id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
1616468
|
import os
import cv2
import xml.etree.ElementTree as ET
import numpy as np
# This function is used to load data
# Path to image file, and xml_file are given as input, and it returns image, bounding_box, class as output
def read_sample(image_path, label_path):
image_path = image_path.strip("\n")
label_path = label_path.strip("\n")
assert os.path.exists(image_path), "Image file does not exist."
assert os.path.exists(label_path), "Label file does not exist."
image = cv2.imread(image_path) # read image in bgr format
bboxes, classes = [], []
xml_root = ET.parse(label_path).getroot()
objects = xml_root.findall("object")
for i, obj in enumerate(objects):
name = obj.find("name").text
bndbox = obj.find("bndbox")
# the reason why we use float() is because some value in bndbox are float
xmin = float(bndbox.find("xmin").text)
ymin = float(bndbox.find("ymin").text)
xmax = float(bndbox.find("xmax").text)
ymax = float(bndbox.find("ymax").text)
bboxes.append([xmin, ymin, xmax, ymax])
classes.append(name)
return np.array(image, dtype=np.float), np.array(bboxes, dtype=np.float), classes
# This function creates and returns a list of strings, each string contains the path of image_file and xml_file
# It takes split file as input, which contain the names of image files in every new line
def generate_samples_from_split(split_file, images_dir, xml_dir):
assert os.path.isfile(split_file), "split_file does not exists."
assert os.path.isdir(images_dir), "images_dir is not a directory."
assert os.path.isdir(xml_dir), "xml_dir is not a directory."
samples = []
with open(split_file, "r") as split_file:
lines = split_file.readlines()
for line in lines:
image_file = os.path.join(images_dir, line.strip("\n") + ".jpg")
xml_file = os.path.join(xml_dir, line.strip("\n") + ".xml")
sample = f"{image_file} {xml_file}"
samples.append(sample)
return samples
|
1616481
|
import bg_helper as bh
import fs_helper as fh
import input_helper as ih
import settings_helper as sh
from redis import StrictRedis, ConnectionError
from time import sleep
__doc__ = """Create an instance of `redis_helper.Collection` and use it
import redis_helper as rh
model = rh.Collection(...)
"""
logger = fh.get_logger(__name__)
SETTINGS = sh.get_all_settings(__name__).get(sh.APP_ENV, {})
REDIS_URL = SETTINGS.get('redis_url')
REDIS = None
def zshow(key, start=0, end=-1, desc=True, withscores=True):
"""Wrapper to REDIS.zrange"""
return REDIS.zrange(key, start, end, withscores=withscores, desc=desc)
def identity(x):
"""Return x, unmodified"""
return x
def _settings_for_docker_ok(exception=False):
"""Return True if settings.ini has the required values set
- exception: if True, raise an exception if settings are not ok (after
optional sync attempt)
If any are missing, prompt to sync settings with vimdiff
"""
global SETTINGS
missing_settings = set(['container_name', 'image_version', 'port', 'rm']) - set(SETTINGS.keys())
if missing_settings != set():
message = 'Update your settings.ini to have: {}'.format(sorted(list(missing_settings)))
print(message)
resp = ih.user_input('Sync settings.ini with vimdiff? (y/n)')
if resp.lower().startswith('y'):
sh.sync_settings_file(__name__)
SETTINGS = sh.get_all_settings(__name__).get(sh.APP_ENV, {})
missing_settings = set(['container_name', 'image_version', 'port', 'rm']) - set(SETTINGS.keys())
if missing_settings == set():
return True
elif exception:
message = 'Update your settings.ini to have: {}'.format(sorted(list(missing_settings)))
raise Exception(message)
else:
if exception:
raise Exception(message)
else:
return True
def start_docker(data_dir=None, aof=True, exception=False, show=False, force=False):
"""Start docker container for redis using values from settings.ini file
- data_dir: directory that will map to container's /data
- specify absolute path or subdirectory of current directory
- aof: if True, use appendonly.aof file
- exception: if True and docker has an error response, raise an exception
- show: if True, show the docker commands and output
- force: if True, stop the container and remove it before re-creating
"""
ok = _settings_for_docker_ok(exception=exception)
if not ok:
return False
return bh.tools.docker_redis_start(
SETTINGS['container_name'],
version=SETTINGS['image_version'],
port=SETTINGS['port'],
rm=SETTINGS['rm'],
data_dir=data_dir,
aof=aof,
exception=exception,
show=show,
force=force
)
def stop_docker(exception=False, show=False):
"""Stop docker container for redis using values from settings.ini file
- exception: if True and docker has an error response, raise an exception
- show: if True, show the docker commands and output
"""
if 'container_name' not in SETTINGS:
message = 'Update your settings.ini to have: container_name'
if exception is True:
raise Exception(message)
elif show is True:
print(message)
return False
return bh.tools.docker_stop(SETTINGS['container_name'], exception=exception, show=show)
def connect_to_server(url=REDIS_URL, attempt_docker=True, exception=False, show=False):
"""Connect to the redis server and set the REDIS variable
- url: if no url is specified, use the redis_url from settings.ini (or check
REDIS_URL environment variable)
- redis://[:somepassword@]somehost:someport/dbnumber
- attempt_docker: if True, and unable to connect initially, call start_docker
- exception: if True and unable to connect, raise an exception
- show: if True, show the docker commands and output
If successful, return (True, dbsize); otherwise, if exception is False,
return (False, float('inf'))
"""
global REDIS_URL, REDIS
REDIS_URL = url
REDIS = StrictRedis.from_url(REDIS_URL)
try:
size = REDIS.dbsize()
except (ConnectionError, AttributeError):
if attempt_docker:
start_docker(exception=exception, show=show)
sleep(1)
REDIS = StrictRedis.from_url(REDIS_URL)
try:
size = REDIS.dbsize()
except (ConnectionError, AttributeError):
REDIS = None
if exception is True:
raise
return (False, float('inf'))
else:
return (True, size)
else:
REDIS = None
if exception is True:
raise
return (False, float('inf'))
else:
return (True, size)
from .collection import Collection
|
1616486
|
from django.db import models
class Person(models.Model):
COUNTRIES = ('Germany', 'France', 'Italy')
first_name = models.CharField('First name', max_length=64)
last_name = models.CharField('Last name', max_length=64, blank=True)
country = models.CharField(
'Country', max_length=32, choices=zip(COUNTRIES, COUNTRIES))
birthdate = models.DateField('Birthdate', blank=True, null=True)
reminder = models.DateTimeField('Next reminder')
approved = models.BooleanField('Is approved')
stars = models.IntegerField('Stars', choices=zip(range(1, 6), range(1, 6)))
class Meta:
verbose_name = 'Person'
verbose_name_plural = 'Persons'
def __str__(self):
if self.last_name:
return '{} {}'.format(self.first_name, self.last_name)
return self.first_name
class RelatedPerson(models.Model):
person = models.ForeignKey(Person, on_delete=models.PROTECT)
first_name = models.CharField('First name', max_length=64)
last_name = models.CharField('Last name', max_length=64, blank=True)
is_child = models.BooleanField('Is child')
class Meta:
verbose_name = 'Related person'
verbose_name_plural = 'Related persons'
def __str__(self):
return '{} ({})'.format(self.first_name, self.person)
|
1616588
|
import numpy as np
from . import _fastmath_ext
__all__ = ['polar_dec']
def polar_dec(matrices):
"""
Batched polar decomposition of an array of stacked matrices,
e.g. given matrices [M1, M2, ..., Mn], decomposes each matrix
into rotation and skew-symmetric matrices.
>>> matrices = np.random.random((10, 3, 3))
>>> rotations, stretches = polar_dec(matrices)
>>> np.allclose([np.linalg.det(R) for R in rotations], 1.0)
True
"""
matrices = np.asarray(matrices)
if matrices.ndim == 2:
matrices = matrices[np.newaxis]
single_matrix = True
else:
single_matrix = False
Rs, Ss = _fastmath_ext.polar_dec(matrices)
if single_matrix:
return Rs[0], Ss[0]
else:
return Rs, Ss
|
1616595
|
import json
import typing
from flask import Response as FlaskResponse
from cauldron import environ
from cauldron.cli import server
Responses = typing.NamedTuple('TestResponses', [
('flask', FlaskResponse),
('response', 'environ.Response')
])
def create_test_app():
"""..."""
return server.server_run.APPLICATION.test_client()
def get(app, endpoint: str, **kwargs) -> Responses:
""" send get request to the test flask application."""
flask_response = app.get(endpoint, **kwargs)
response = deserialize_flask_response(flask_response)
return Responses(flask_response, response)
def post(app, endpoint: str, data=None, **kwargs) -> Responses:
""" send post request to the test flask application."""
args = json.dumps(data) if data else None
flask_response = app.post(
endpoint,
data=args,
content_type='application/json',
**kwargs
)
response = deserialize_flask_response(flask_response)
return Responses(flask_response, response)
def deserialize_flask_response(
flask_response: FlaskResponse
) -> 'environ.Response':
"""..."""
try:
data = json.loads(flask_response.data.decode('utf-8', 'ignore'))
response = environ.Response.deserialize(data)
except Exception as error:
response = environ.Response().fail(
code='DESERIALIZE_FLASK_RESPONSE',
message='Failed to deserialize flask response',
error=error
)
return response
|
1616630
|
import copy
import numpy as np
import torch
class Memory:
def __init__(self, memory_size, nb_total_classes, rehearsal, fixed=True):
self.memory_size = memory_size
self.nb_total_classes = nb_total_classes
self.rehearsal = rehearsal
self.fixed = fixed
self.x = self.y = self.t = None
self.nb_classes = 0
@property
def memory_per_class(self):
if self.fixed:
return self.memory_size // self.nb_total_classes
return self.memory_size // self.nb_classes if self.nb_classes > 0 else self.memory_size
def get_dataset(self, base_dataset):
dataset = copy.deepcopy(base_dataset)
dataset._x = self.x
dataset._y = self.y
dataset._t = self.t
return dataset
def get(self):
return self.x, self.y, self.t
def __len__(self):
return len(self.x) if self.x is not None else 0
def save(self, path):
np.savez(
path,
x=self.x, y=self.y, t=self.t
)
def load(self, path):
data = np.load(path)
self.x = data["x"]
self.y = data["y"]
self.t = data["t"]
assert len(self) <= self.memory_size, len(self)
self.nb_classes = len(np.unique(self.y))
def reduce(self):
x, y, t = [], [], []
for class_id in np.unique(self.y):
indexes = np.where(self.y == class_id)[0]
x.append(self.x[indexes[:self.memory_per_class]])
y.append(self.y[indexes[:self.memory_per_class]])
t.append(self.t[indexes[:self.memory_per_class]])
self.x = np.concatenate(x)
self.y = np.concatenate(y)
self.t = np.concatenate(t)
def add(self, dataset, model, nb_new_classes):
self.nb_classes += nb_new_classes
x, y, t = herd_samples(dataset, model, self.memory_per_class, self.rehearsal)
#assert len(y) == self.memory_per_class * nb_new_classes, (len(y), self.memory_per_class, nb_new_classes)
if self.x is None:
self.x, self.y, self.t = x, y, t
else:
if not self.fixed:
self.reduce()
self.x = np.concatenate((self.x, x))
self.y = np.concatenate((self.y, y))
self.t = np.concatenate((self.t, t))
def herd_samples(dataset, model, memory_per_class, rehearsal):
x, y, t = dataset._x, dataset._y, dataset._t
if rehearsal == "random":
indexes = []
for class_id in np.unique(y):
class_indexes = np.where(y == class_id)[0]
indexes.append(
np.random.choice(class_indexes, size=memory_per_class)
)
indexes = np.concatenate(indexes)
return x[indexes], y[indexes], t[indexes]
elif "closest" in rehearsal:
if rehearsal == 'closest_token':
handling = 'last'
else:
handling = 'all'
features, targets = extract_features(dataset, model, handling)
indexes = []
for class_id in np.unique(y):
class_indexes = np.where(y == class_id)[0]
class_features = features[class_indexes]
class_mean = np.mean(class_features, axis=0, keepdims=True)
distances = np.power(class_features - class_mean, 2).sum(-1)
class_closest_indexes = np.argsort(distances)
indexes.append(
class_indexes[class_closest_indexes[:memory_per_class]]
)
indexes = np.concatenate(indexes)
return x[indexes], y[indexes], t[indexes]
elif "furthest" in rehearsal:
if rehearsal == 'furthest_token':
handling = 'last'
else:
handling = 'all'
features, targets = extract_features(dataset, model, handling)
indexes = []
for class_id in np.unique(y):
class_indexes = np.where(y == class_id)[0]
class_features = features[class_indexes]
class_mean = np.mean(class_features, axis=0, keepdims=True)
distances = np.power(class_features - class_mean, 2).sum(-1)
class_furthest_indexes = np.argsort(distances)[::-1]
indexes.append(
class_indexes[class_furthest_indexes[:memory_per_class]]
)
indexes = np.concatenate(indexes)
return x[indexes], y[indexes], t[indexes]
elif "icarl":
if rehearsal == 'icarl_token':
handling = 'last'
else:
handling = 'all'
features, targets = extract_features(dataset, model, handling)
indexes = []
for class_id in np.unique(y):
class_indexes = np.where(y == class_id)[0]
class_features = features[class_indexes]
indexes.append(
class_indexes[icarl_selection(class_features, memory_per_class)]
)
indexes = np.concatenate(indexes)
return x[indexes], y[indexes], t[indexes]
else:
raise ValueError(f"Unknown rehearsal method {rehearsal}!")
def extract_features(dataset, model, ensemble_handling='last'):
#transform = copy.deepcopy(dataset.trsf.transforms)
#dataset.trsf = transforms.Compose(transform[-2:])
loader = torch.utils.data.DataLoader(
dataset,
batch_size=128,
num_workers=2,
pin_memory=True,
drop_last=False,
shuffle=False
)
features, targets = [], []
with torch.no_grad():
for x, y, _ in loader:
if hasattr(model, 'module'):
feats, _, _ = model.module.forward_features(x.cuda())
else:
feats, _, _ = model.forward_features(x.cuda())
if isinstance(feats, list):
if ensemble_handling == 'last':
feats = feats[-1]
elif ensemble_handling == 'all':
feats = torch.cat(feats, dim=1)
else:
raise NotImplementedError(f'Unknown handdling of multiple features {ensemble_handling}')
elif len(feats.shape) == 3: # joint tokens
if ensemble_handling == 'last':
feats = feats[-1]
elif ensemble_handling == 'all':
feats = feats.permute(1, 0, 2).view(len(x), -1)
else:
raise NotImplementedError(f'Unknown handdling of multiple features {ensemble_handling}')
feats = feats.cpu().numpy()
y = y.numpy()
features.append(feats)
targets.append(y)
features = np.concatenate(features)
targets = np.concatenate(targets)
#dataset.trsf = transforms.Compose(transform)
return features, targets
def icarl_selection(features, nb_examplars):
D = features.T
D = D / (np.linalg.norm(D, axis=0) + 1e-8)
mu = np.mean(D, axis=1)
herding_matrix = np.zeros((features.shape[0],))
w_t = mu
iter_herding, iter_herding_eff = 0, 0
while not (
np.sum(herding_matrix != 0) == min(nb_examplars, features.shape[0])
) and iter_herding_eff < 1000:
tmp_t = np.dot(w_t, D)
ind_max = np.argmax(tmp_t)
iter_herding_eff += 1
if herding_matrix[ind_max] == 0:
herding_matrix[ind_max] = 1 + iter_herding
iter_herding += 1
w_t = w_t + mu - D[:, ind_max]
herding_matrix[np.where(herding_matrix == 0)[0]] = 10000
return herding_matrix.argsort()[:nb_examplars]
def get_finetuning_dataset(dataset, memory, finetuning='balanced'):
if finetuning == 'balanced':
x, y, t = memory.get()
new_dataset = copy.deepcopy(dataset)
new_dataset._x = x
new_dataset._y = y
new_dataset._t = t
elif finetuning in ('all', 'none'):
new_dataset = dataset
else:
raise NotImplementedError(f'Unknown finetuning method {finetuning}')
return new_dataset
|
1616637
|
import os
from dagster.core.storage.pipeline_run import PipelineRunStatus
from dagster.core.test_utils import poll_for_finished_run
from dagster.utils.merger import merge_dicts
from dagster.utils.yaml_utils import merge_yamls
from dagster_test.test_project import (
ReOriginatedExternalPipelineForTest,
find_local_test_image,
get_buildkite_registry_config,
get_test_project_docker_image,
get_test_project_environments_path,
get_test_project_recon_pipeline,
get_test_project_workspace_and_external_pipeline,
)
from . import IS_BUILDKITE, docker_postgres_instance
def test_image_on_pipeline():
docker_image = get_test_project_docker_image()
launcher_config = {
"env_vars": [
"AWS_ACCESS_KEY_ID",
"AWS_SECRET_ACCESS_KEY",
],
"networks": ["container:test-postgres-db-docker"],
"container_kwargs": {
"auto_remove": True,
"volumes": ["/var/run/docker.sock:/var/run/docker.sock"],
},
}
if IS_BUILDKITE:
launcher_config["registry"] = get_buildkite_registry_config()
else:
find_local_test_image(docker_image)
executor_config = {
"execution": {"docker": {"config": {}}},
}
run_config = merge_dicts(
merge_yamls(
[
os.path.join(get_test_project_environments_path(), "env.yaml"),
os.path.join(get_test_project_environments_path(), "env_s3.yaml"),
]
),
executor_config,
)
with docker_postgres_instance(
overrides={
"run_launcher": {
"class": "DockerRunLauncher",
"module": "dagster_docker",
"config": launcher_config,
}
}
) as instance:
recon_pipeline = get_test_project_recon_pipeline("demo_pipeline_docker", docker_image)
with get_test_project_workspace_and_external_pipeline(
instance, "demo_pipeline_docker", container_image=docker_image
) as (
workspace,
orig_pipeline,
):
external_pipeline = ReOriginatedExternalPipelineForTest(
orig_pipeline, container_image=docker_image
)
run = instance.create_run_for_pipeline(
pipeline_def=recon_pipeline.get_definition(),
run_config=run_config,
external_pipeline_origin=external_pipeline.get_external_origin(),
pipeline_code_origin=external_pipeline.get_python_origin(),
)
instance.launch_run(run.run_id, workspace)
poll_for_finished_run(instance, run.run_id, timeout=60)
for log in instance.all_logs(run.run_id):
print(log) # pylint: disable=print-call
assert instance.get_run_by_id(run.run_id).status == PipelineRunStatus.SUCCESS
|
1616641
|
import subprocess
from django.conf import settings
from django.core.files.temp import NamedTemporaryFile
class PenthouseCommand(object):
command = '{phantomjs} {penthouse} {htmlurl} {csspath}'
encoding = 'utf8'
def __init__(self, phantomjs=None, penthouse=None):
self.phantomjs = phantomjs or settings.CRITICAL_PHANTOMJS_PATH
self.penthouse = penthouse or settings.CRITICAL_PENTHOUSE_PATH
def run(self, html, css):
with NamedTemporaryFile(mode='wb', suffix='.html') as htmlfile,\
NamedTemporaryFile(mode='wb', suffix='.css') as cssfile:
htmlfile.write(html.encode(self.encoding))
htmlfile.flush()
cssfile.write(css)
cssfile.flush()
command = self.command.format(
phantomjs=self.phantomjs,
penthouse=self.penthouse,
htmlurl='file:{}'.format(htmlfile.name),
csspath=cssfile.name)
proc = subprocess.Popen(command, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
proc.wait()
if proc.returncode:
raise Exception(
'Penthouse command failed ({code}): {message}'.format(
code=proc.returncode,
message=stderr,
))
return stdout
def get_critical_css(html, css):
return PenthouseCommand().run(html, css)
|
1616652
|
from unittest.mock import MagicMock
from mpf.platforms.interfaces.driver_platform_interface import PulseSettings
from mpf.core.platform import SwitchSettings, DriverSettings
from mpf.tests.MpfTestCase import MpfTestCase
class TestKickback(MpfTestCase):
def get_config_file(self):
return 'config.yaml'
def get_machine_path(self):
return 'tests/machine_files/kickback/'
def test_kickback_with_ball_save(self):
self.machine.default_platform.set_pulse_on_hit_rule = MagicMock()
self.mock_event("kickback_kickback_test_fired")
self.assertFalse(self.machine.ball_saves["kickback_save"].enabled)
# kickback is not enabled. nothing should happen
self.hit_and_release_switch("s_kickback")
self.advance_time_and_run(.01)
self.assertEventNotCalled("kickback_kickback_test_fired")
# enable kickback
self.post_event("kickback_enable")
self.advance_time_and_run(.01)
# should write a hw rule
self.machine.default_platform.set_pulse_on_hit_rule.assert_called_once_with(
SwitchSettings(hw_switch=self.machine.switches["s_kickback"].hw_switch, invert=False, debounce=False),
DriverSettings(hw_driver=self.machine.coils["kickback_coil"].hw_driver,
pulse_settings=PulseSettings(power=1.0, duration=100), hold_settings=None, recycle=True)
)
# a hit should fire it
self.hit_and_release_switch("s_kickback")
self.advance_time_and_run(.01)
self.assertEventCalled("kickback_kickback_test_fired")
# ball save should be enabled just in case
self.assertTrue(self.machine.ball_saves["kickback_save"].enabled)
# but disable after 6s
self.advance_time_and_run(6.1)
self.assertFalse(self.machine.ball_saves["kickback_save"].enabled)
# it only works once though
self.mock_event("kickback_kickback_test_fired")
self.hit_and_release_switch("s_kickback")
self.advance_time_and_run(.01)
self.assertEventNotCalled("kickback_kickback_test_fired")
|
1616668
|
import sphinx_rtd_theme
def get_version():
import pandas_plink
return pandas_plink.__version__
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.napoleon",
"sphinx.ext.viewcode",
"sphinx_autodoc_typehints",
"sphinx.ext.autosectionlabel",
]
templates_path = ["_templates"]
# Change to True when developing it.
autosummary_generate = False
autosectionlabel_prefix_document = True
napoleon_numpy_docstring = True
source_suffix = ".rst"
master_doc = "index"
project = "pandas-plink"
copyright = "2018, <NAME>"
author = "<NAME>"
version = get_version()
release = version
language = None
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "conf.py"]
pygments_style = "default"
todo_include_todos = False
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_sidebars = {"**": ["relations.html", "searchbox.html"]}
htmlhelp_basename = "pandas-plinkdoc"
man_pages = [(master_doc, "pandas-plink", "pandas-plink Documentation", [author], 1)]
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
epub_exclude_files = ["search.html"]
intersphinx_mapping = {
"python": ("https://docs.python.org/3/", None),
"numpy": ("https://numpy.org/doc/stable/", None),
"xarray": ("http://xarray.pydata.org/en/stable/", None),
"pandas": ("http://pandas.pydata.org/pandas-docs/stable/", None),
"dask": ("http://docs.dask.org/en/latest/", None),
}
|
1616669
|
import os
from typing import Dict
from google.cloud import datastore
from testcontainers.core.container import DockerContainer
from testcontainers.core.waiting_utils import wait_for_logs
from tests.integration.feature_repos.universal.online_store_creator import (
OnlineStoreCreator,
)
class DatastoreOnlineStoreCreator(OnlineStoreCreator):
def __init__(self, project_name: str, **kwargs):
super().__init__(project_name)
self.container = (
DockerContainer(
"gcr.io/google.com/cloudsdktool/cloud-sdk:380.0.0-emulators"
)
.with_command(
"gcloud beta emulators datastore start --project test-project --host-port 0.0.0.0:8081"
)
.with_exposed_ports("8081")
)
def create_online_store(self) -> Dict[str, str]:
self.container.start()
log_string_to_wait_for = r"\[datastore\] Dev App Server is now running"
wait_for_logs(
container=self.container, predicate=log_string_to_wait_for, timeout=5
)
exposed_port = self.container.get_exposed_port("8081")
os.environ[datastore.client.DATASTORE_EMULATOR_HOST] = f"0.0.0.0:{exposed_port}"
return {"type": "datastore", "project_id": "test-project"}
def teardown(self):
del os.environ[datastore.client.DATASTORE_EMULATOR_HOST]
self.container.stop()
|
1616716
|
from math import log, exp, sqrt, tanh, sin, cos, tan, atan2, ceil, pi
import numpy as np
from OpenGL.GL import *
from PyEngine3D.Common import logger
from PyEngine3D.App import CoreManager
from PyEngine3D.OpenGLContext import CreateTexture, Texture2D, Texture2DArray, Texture3D, FrameBuffer
from PyEngine3D.Render import RenderTarget, ScreenQuad, Plane
from PyEngine3D.Utilities import *
from .Constants import *
def sqr(x):
return x * x
def omega(k):
return math.sqrt(9.81 * k * (1.0 + sqr(k / km)))
def frandom(seed_data):
return (seed_data >> (31 - 24)) / float(1 << 24)
def bitReverse(i, N):
i = int(i)
N = int(N)
j = i
M = N
Sum = 0
W = 1
M = int(M / 2)
while M != 0:
j = (i & M) > (M - 1)
Sum += j * W
W *= 2
M = int(M / 2)
return int(Sum)
def computeWeight(N, k):
return cos(2.0 * pi * k / float(N)), sin(2.0 * pi * k / float(N))
class Ocean:
DEFAULT_FFT_SEED = 1234
def __init__(self, **object_data):
self.name = object_data.get('name', 'ocean')
self.height = object_data.get('height', 0.0)
self.wind = object_data.get('wind', WIND)
self.omega = object_data.get('omega', OMEGA)
self.amplitude = object_data.get('amplitude', AMPLITUDE)
self.simulation_wind = object_data.get('simulation_wind', 1.0)
self.simulation_amplitude = object_data.get('simulation_amplitude', 3.0)
self.simulation_scale = object_data.get('simulation_scale', 1.0)
self.is_render_ocean = object_data.get('is_render_ocean', True)
self.attributes = Attributes()
self.acc_time = 0.0
self.fft_seed = Ocean.DEFAULT_FFT_SEED
self.simulation_size = GRID_SIZES * self.simulation_scale
self.renderer = CoreManager.instance().renderer
self.scene_manager = CoreManager.instance().scene_manager
self.resource_manager = CoreManager.instance().resource_manager
self.fft_init = None
self.fft_x = None
self.fft_y = None
self.fft_render = None
self.fft_variance = None
self.texture_spectrum_1_2 = None
self.texture_spectrum_3_4 = None
self.texture_slope_variance = None
self.texture_butterfly = None
self.quad = None
self.fft_grid = None
self.caustic_index = 0
self.texture_caustics = []
self.texture_foam = None
self.texture_noise = None
def initialize(self):
self.fft_seed = Ocean.DEFAULT_FFT_SEED
self.fft_init = self.resource_manager.get_material_instance('fft_ocean.init')
self.fft_x = self.resource_manager.get_material_instance('fft_ocean.fft_x')
self.fft_y = self.resource_manager.get_material_instance('fft_ocean.fft_y')
self.fft_render = self.resource_manager.get_material_instance('fft_ocean.render')
self.fft_variance = self.resource_manager.get_material_instance('fft_ocean.fft_variance')
self.texture_spectrum_1_2 = self.resource_manager.get_texture("fft_ocean.spectrum_1_2", default_texture=False)
self.texture_spectrum_3_4 = self.resource_manager.get_texture("fft_ocean.spectrum_3_4", default_texture=False)
self.texture_slope_variance = self.resource_manager.get_texture("fft_ocean.slope_variance", default_texture=False)
self.texture_butterfly = self.resource_manager.get_texture("fft_ocean.butterfly", default_texture=False)
self.quad = ScreenQuad.get_vertex_array_buffer()
self.fft_grid = Plane("FFT_Grid", mode=GL_QUADS, width=GRID_VERTEX_COUNT, height=GRID_VERTEX_COUNT, xz_plane=False)
if None in (self.texture_spectrum_1_2, self.texture_spectrum_3_4, self.texture_slope_variance, self.texture_butterfly):
self.generate_texture()
self.caustic_index = 0
self.texture_caustics = []
i = 0
while True:
resource_name = "common.water_caustic_%02d" % i
if self.resource_manager.texture_loader.hasResource(resource_name):
self.texture_caustics.append(self.resource_manager.get_texture(resource_name))
i += 1
continue
break
self.texture_foam = self.resource_manager.get_texture("common.water_foam")
self.texture_noise = self.resource_manager.get_texture("common.noise")
def get_attribute(self):
self.attributes.set_attribute('is_render_ocean', self.is_render_ocean)
self.attributes.set_attribute('height', self.height)
self.attributes.set_attribute('wind', self.wind)
self.attributes.set_attribute('omega', self.omega)
self.attributes.set_attribute('amplitude', self.amplitude)
self.attributes.set_attribute('simulation_wind', self.simulation_wind)
self.attributes.set_attribute('simulation_amplitude', self.simulation_amplitude)
self.attributes.set_attribute('simulation_scale', self.simulation_scale)
return self.attributes
def set_attribute(self, attribute_name, attribute_value, item_info_history, attribute_index):
if hasattr(self, attribute_name):
setattr(self, attribute_name, attribute_value)
# recreate resources
if attribute_name in ('amplitude', 'wind', 'omega'):
self.generate_texture()
elif attribute_name == 'simulation_scale':
self.simulation_size = GRID_SIZES * self.simulation_scale
return self.attributes
def get_save_data(self):
save_data = dict(
is_render_ocean=self.is_render_ocean,
texture_type=self.__class__.__name__,
height=self.height,
wind=self.wind,
omega=self.omega,
amplitude=self.amplitude,
)
return save_data
def getSlopeVariance(self, kx, ky, spectrumSample0, spectrumSample1):
kSquare = kx * kx + ky * ky
real = spectrumSample0
img = spectrumSample1
hSquare = real * real + img * img
return kSquare * hSquare * 2.0
def spectrum(self, kx, ky, omnispectrum=False):
U10 = max(0.001, self.wind)
Omega = self.omega
Amp = self.amplitude
k = sqrt(kx * kx + ky * ky)
c = omega(k) / k
# spectral peak
kp = 9.81 * sqr(Omega / U10)
cp = omega(kp) / kp
# friction velocity
z0 = 3.7e-5 * sqr(U10) / 9.81 * pow(U10 / cp, 0.9)
u_star = 0.41 * U10 / log(10.0 / z0)
Lpm = exp(- 5.0 / 4.0 * sqr(kp / k))
gamma = 1.7 if Omega < 1.0 else 1.7 + 6.0 * log(Omega)
sigma = 0.08 * (1.0 + 4.0 / pow(Omega, 3.0))
Gamma = exp(-1.0 / (2.0 * sqr(sigma)) * sqr(sqrt(k / kp) - 1.0))
Jp = pow(gamma, Gamma)
Fp = Lpm * Jp * exp(- Omega / sqrt(10.0) * (sqrt(k / kp) - 1.0))
alphap = 0.006 * sqrt(Omega)
Bl = 0.5 * alphap * cp / c * Fp
alpham = 0.01
if u_star < cm:
alpham *= (1.0 + log(u_star / cm))
else:
alpham *= (1.0 + 3.0 * log(u_star / cm))
Fm = exp(-0.25 * sqr(k / km - 1.0))
Bh = 0.5 * alpham * cm / c * Fm * Lpm
if omnispectrum:
return Amp * (Bl + Bh) / (k * sqr(k))
a0 = log(2.0) / 4.0
ap = 4.0
am = 0.13 * u_star / cm
Delta = tanh(a0 + ap * pow(c / cp, 2.5) + am * pow(cm / c, 2.5))
phi = atan2(ky, kx)
if kx < 0.0:
return 0.0
else:
Bl *= 2.0
Bh *= 2.0
return Amp * (Bl + Bh) * (1.0 + Delta * cos(2.0 * phi)) / (2.0 * pi * sqr(sqr(k)))
def getSpectrumSample(self, i, j, lengthScale, kMin):
dk = 2.0 * pi / lengthScale
kx = i * dk
ky = j * dk
if abs(kx) < kMin and abs(ky) < kMin:
return 0.0, 0.0
else:
S = self.spectrum(kx, ky)
h = sqrt(S / 2.0) * dk
self.fft_seed = (self.fft_seed * 1103515245 + 12345) & 0x7FFFFFFF
phi = frandom(self.fft_seed) * 2.0 * pi
return h * cos(phi), h * sin(phi)
def computeButterflyLookupTexture(self, butterfly_data):
for i in range(PASSES):
nBlocks = int(pow(2.0, float(PASSES - 1 - i)))
nHInputs = int(pow(2.0, float(i)))
for j in range(nBlocks):
for k in range(nHInputs):
i1, i2, j1, j2 = 0, 0, 0, 0
if i == 0:
i1 = j * nHInputs * 2 + k
i2 = j * nHInputs * 2 + nHInputs + k
j1 = bitReverse(i1, FFT_SIZE)
j2 = bitReverse(i2, FFT_SIZE)
else:
i1 = j * nHInputs * 2 + k
i2 = j * nHInputs * 2 + nHInputs + k
j1 = i1
j2 = i2
wr, wi = computeWeight(FFT_SIZE, k * nBlocks)
offset1 = 4 * (i1 + i * FFT_SIZE)
butterfly_data[offset1 + 0] = (j1 + 0.5) / FFT_SIZE
butterfly_data[offset1 + 1] = (j2 + 0.5) / FFT_SIZE
butterfly_data[offset1 + 2] = wr
butterfly_data[offset1 + 3] = wi
offset2 = 4 * (i2 + i * FFT_SIZE)
butterfly_data[offset2 + 0] = (j1 + 0.5) / FFT_SIZE
butterfly_data[offset2 + 1] = (j2 + 0.5) / FFT_SIZE
butterfly_data[offset2 + 2] = -wr
butterfly_data[offset2 + 3] = -wi
def generateWavesSpectrum(self, spectrum12_data, spectrum34_data):
for y in range(FFT_SIZE):
for x in range(FFT_SIZE):
offset = 4 * (x + y * FFT_SIZE)
i = (x - FFT_SIZE) if (x >= FFT_SIZE / 2) else x
j = (y - FFT_SIZE) if (y >= FFT_SIZE / 2) else y
s12_0, s12_1 = self.getSpectrumSample(i, j, GRID1_SIZE, pi / GRID1_SIZE)
s12_2, s12_3 = self.getSpectrumSample(i, j, GRID2_SIZE, pi * FFT_SIZE / GRID1_SIZE)
s34_0, s34_1 = self.getSpectrumSample(i, j, GRID3_SIZE, pi * FFT_SIZE / GRID2_SIZE)
s34_2, s34_3 = self.getSpectrumSample(i, j, GRID4_SIZE, pi * FFT_SIZE / GRID3_SIZE)
spectrum12_data[offset: offset+4] = s12_0, s12_1, s12_2, s12_3
spectrum34_data[offset: offset+4] = s34_0, s34_1, s34_2, s34_3
def computeSlopeVarianceTex(self, spectrum12_data, spectrum34_data):
theoreticSlopeVariance = 0.0
k = 5e-3
while k < 1e3:
nextK = k * 1.001
theoreticSlopeVariance += k * k * self.spectrum(k, 0, True) * (nextK - k)
k = nextK
totalSlopeVariance = 0.0
for y in range(FFT_SIZE):
for x in range(FFT_SIZE):
offset = 4 * (x + y * FFT_SIZE)
i = 2.0 * pi * ((x - FFT_SIZE) if (x >= FFT_SIZE / 2) else x)
j = 2.0 * pi * ((y - FFT_SIZE) if (y >= FFT_SIZE / 2) else y)
s12_0, s12_1, s12_2, s12_3 = spectrum12_data[offset: offset + 4]
s34_0, s34_1, s34_2, s34_3 = spectrum34_data[offset: offset + 4]
totalSlopeVariance += self.getSlopeVariance(i/GRID1_SIZE, j/GRID1_SIZE, s12_0, s12_1)
totalSlopeVariance += self.getSlopeVariance(i/GRID2_SIZE, j/GRID2_SIZE, s12_2, s12_3)
totalSlopeVariance += self.getSlopeVariance(i/GRID3_SIZE, j/GRID3_SIZE, s34_0, s34_1)
totalSlopeVariance += self.getSlopeVariance(i/GRID4_SIZE, j/GRID4_SIZE, s34_2, s34_3)
self.fft_variance.use_program()
self.fft_variance.bind_uniform_data("GRID_SIZES", GRID_SIZES)
self.fft_variance.bind_uniform_data("slopeVarianceDelta", (theoreticSlopeVariance - totalSlopeVariance) * 0.5)
self.fft_variance.bind_uniform_data("N_SLOPE_VARIANCE", N_SLOPE_VARIANCE)
self.fft_variance.bind_uniform_data("spectrum_1_2_Sampler", self.texture_spectrum_1_2)
self.fft_variance.bind_uniform_data("spectrum_3_4_Sampler", self.texture_spectrum_3_4)
self.fft_variance.bind_uniform_data("FFT_SIZE", FFT_SIZE)
for layer in range(N_SLOPE_VARIANCE):
self.renderer.framebuffer_manager.bind_framebuffer(self.texture_slope_variance, target_layer=layer)
self.fft_variance.bind_uniform_data("c", layer)
self.quad.draw_elements()
def save_texture(self, texture):
resource = self.resource_manager.texture_loader.get_resource(texture.name)
if resource is None:
resource = self.resource_manager.texture_loader.create_resource(texture.name, texture)
self.resource_manager.texture_loader.save_resource(resource.name)
else:
old_texture = resource.get_data()
old_texture.delete()
resource.set_data(texture)
def generate_texture(self):
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
glDepthFunc(GL_LEQUAL)
glEnable(GL_CULL_FACE)
glFrontFace(GL_CCW)
glEnable(GL_DEPTH_TEST)
glDepthMask(True)
glDisable(GL_BLEND)
glClearColor(0.0, 0.0, 0.0, 1.0)
glClearDepth(1.0)
spectrum12_data = np.zeros(FFT_SIZE * FFT_SIZE * 4, dtype=np.float32)
spectrum34_data = np.zeros(FFT_SIZE * FFT_SIZE * 4, dtype=np.float32)
butterfly_data = np.zeros(FFT_SIZE * PASSES * 4, dtype=np.float32)
self.generateWavesSpectrum(spectrum12_data, spectrum34_data)
self.computeButterflyLookupTexture(butterfly_data)
# create render targets
self.texture_spectrum_1_2 = CreateTexture(
name='fft_ocean.spectrum_1_2',
texture_type=Texture2D,
image_mode='RGBA',
width=FFT_SIZE,
height=FFT_SIZE,
internal_format=GL_RGBA16F,
texture_format=GL_RGBA,
min_filter=GL_NEAREST,
mag_filter=GL_NEAREST,
data_type=GL_FLOAT,
wrap=GL_REPEAT,
data=spectrum12_data,
)
self.texture_spectrum_3_4 = CreateTexture(
name='fft_ocean.spectrum_3_4',
texture_type=Texture2D,
image_mode='RGBA',
width=FFT_SIZE,
height=FFT_SIZE,
internal_format=GL_RGBA16F,
texture_format=GL_RGBA,
min_filter=GL_NEAREST,
mag_filter=GL_NEAREST,
data_type=GL_FLOAT,
wrap=GL_REPEAT,
data=spectrum34_data,
)
self.texture_slope_variance = CreateTexture(
name='fft_ocean.slope_variance',
texture_type=Texture3D,
image_mode='RGBA',
width=N_SLOPE_VARIANCE,
height=N_SLOPE_VARIANCE,
depth=N_SLOPE_VARIANCE,
internal_format=GL_RGBA16F,
texture_format=GL_RGBA,
min_filter=GL_LINEAR,
mag_filter=GL_LINEAR,
wrap=GL_CLAMP_TO_EDGE,
data_type=GL_FLOAT,
)
self.texture_butterfly = CreateTexture(
name='fft_ocean.butterfly',
texture_type=Texture2D,
image_mode='RGBA',
width=FFT_SIZE,
height=PASSES,
internal_format=GL_RGBA16F,
texture_format=GL_RGBA,
min_filter=GL_NEAREST,
mag_filter=GL_NEAREST,
wrap=GL_CLAMP_TO_EDGE,
data_type=GL_FLOAT,
data=butterfly_data,
)
self.computeSlopeVarianceTex(spectrum12_data, spectrum34_data)
self.save_texture(self.texture_spectrum_1_2)
self.save_texture(self.texture_spectrum_3_4)
self.save_texture(self.texture_slope_variance)
self.save_texture(self.texture_butterfly)
def update(self, delta):
self.acc_time += delta
self.caustic_index = int((self.acc_time * 20.0) % len(self.texture_caustics))
def simulateFFTWaves(self):
framebuffer_manager = CoreManager.instance().renderer.framebuffer_manager
RenderTargets = RenderTarget.RenderTargets
fft_a_framebuffer = framebuffer_manager.get_framebuffer(RenderTargets.FFT_A,
RenderTargets.FFT_A,
RenderTargets.FFT_A,
RenderTargets.FFT_A,
RenderTargets.FFT_A)
fft_b_framebuffer = framebuffer_manager.get_framebuffer(RenderTargets.FFT_B,
RenderTargets.FFT_B,
RenderTargets.FFT_B,
RenderTargets.FFT_B,
RenderTargets.FFT_B)
# initialize
fft_a_framebuffer.bind_framebuffer()
glClear(GL_COLOR_BUFFER_BIT)
self.fft_init.use_program()
self.fft_init.bind_uniform_data("FFT_SIZE", FFT_SIZE)
self.fft_init.bind_uniform_data("INVERSE_GRID_SIZES", INVERSE_GRID_SIZES)
self.fft_init.bind_uniform_data("spectrum_1_2_Sampler", self.texture_spectrum_1_2)
self.fft_init.bind_uniform_data("spectrum_3_4_Sampler", self.texture_spectrum_3_4)
self.fft_init.bind_uniform_data("t", self.acc_time * self.simulation_wind)
self.quad.draw_elements()
# # fft passes
self.fft_x.use_program()
self.fft_x.bind_uniform_data("butterflySampler", self.texture_butterfly)
for i in range(PASSES):
self.fft_x.bind_uniform_data("pass", float(i + 0.5) / PASSES)
if i % 2 == 0:
self.fft_x.bind_uniform_data("imgSampler", RenderTargets.FFT_A)
fft_b_framebuffer.bind_framebuffer()
else:
self.fft_x.bind_uniform_data("imgSampler", RenderTargets.FFT_B)
fft_a_framebuffer.bind_framebuffer()
self.quad.draw_elements()
self.fft_y.use_program()
self.fft_y.bind_uniform_data("butterflySampler", self.texture_butterfly)
for i in range(PASSES, PASSES * 2, 1):
self.fft_y.bind_uniform_data("pass", float(i - PASSES + 0.5) / PASSES)
if i % 2 == 0:
self.fft_y.bind_uniform_data("imgSampler", RenderTargets.FFT_A)
fft_b_framebuffer.bind_framebuffer()
else:
self.fft_y.bind_uniform_data("imgSampler", RenderTargets.FFT_B)
fft_a_framebuffer.bind_framebuffer()
self.quad.draw_elements()
RenderTargets.FFT_A.generate_mipmap()
def render_ocean(self, atmosphere, texture_scene, texture_linear_depth, texture_probe, texture_shadow):
self.fft_render.use_program()
self.fft_render.bind_material_instance()
self.fft_render.bind_uniform_data("height", self.height)
self.fft_render.bind_uniform_data("simulation_wind", self.simulation_wind)
self.fft_render.bind_uniform_data("simulation_amplitude", self.simulation_amplitude)
self.fft_render.bind_uniform_data("simulation_size", self.simulation_size)
self.fft_render.bind_uniform_data("cell_size", GRID_CELL_SIZE)
self.fft_render.bind_uniform_data("t", self.acc_time * self.simulation_wind)
self.fft_render.bind_uniform_data("fftWavesSampler", RenderTarget.RenderTargets.FFT_A)
self.fft_render.bind_uniform_data("slopeVarianceSampler", self.texture_slope_variance)
self.fft_render.bind_uniform_data('texture_scene', texture_scene)
self.fft_render.bind_uniform_data('texture_linear_depth', texture_linear_depth)
self.fft_render.bind_uniform_data('texture_probe', texture_probe)
self.fft_render.bind_uniform_data('texture_shadow', texture_shadow)
self.fft_render.bind_uniform_data('texture_noise', self.texture_noise)
self.fft_render.bind_uniform_data('texture_caustic', self.texture_caustics[self.caustic_index])
self.fft_render.bind_uniform_data('texture_foam', self.texture_foam)
# Bind Atmosphere
atmosphere.bind_precomputed_atmosphere(self.fft_render)
self.fft_grid.get_geometry().draw_elements()
|
1616764
|
import sys
import numpy as np
import math
import librosa
import soundfile as sf
import json
from librosa.core.spectrum import power_to_db
import scipy
file_path = sys.argv[1]
data, samplerate = sf.read(file_path)
#data = np.clip(data*3, -1, 1)
with open("MfccConfig.json", "r") as f:
config = json.load(f)
frame_size = config['frame_size']
frame_step = config['frame_step']
n_fft = config['n_fft']
n_mels = config['mfcc_bank_cnt']
fmin = config['fmin']
fmax = config['fmax']
dtype = config.get('dtype', "int")
high_prec = config.get('use_high_prec', False) or dtype == "fix32_scal"
use_power = False
rad4 = round(math.log(n_fft//2, 4)) == math.log(n_fft//2, 4)
ndct = config.get('n_dct', False)
from librosa.filters import get_window
from librosa import util
librosa_fft_window = get_window("hann", frame_size, fftbins=True)
# Pad the window out to n_fft size
librosa_fft_window = util.pad_center(librosa_fft_window, n_fft)
stft = librosa.core.spectrum.stft(data, n_fft, frame_step, frame_size, center=False, pad_mode="constant")
spect = np.abs(stft) ** (1 if not use_power else 2)
mel_basis = librosa.filters.mel(samplerate, n_fft, n_mels, fmin, fmax)
mel_spect = np.dot(mel_basis, spect)
logmel = power_to_db(mel_spect, top_db=None)
mfcc = scipy.fftpack.dct(logmel, axis=0, type=2, norm=None)
with open("ground_truth.h", "w") as f:
f.write(f"float ground_truth[] = {{\n")
for elem in mfcc.T.flatten():
f.write(f"{elem}, ")
f.write("};\n")
|
1616767
|
import logging
from django.db import connection
from django.core.management.base import BaseCommand
from usaspending_api.etl.management.helpers.recent_periods import retrieve_recent_periods
logger = logging.getLogger("script")
UPDATE_AWARDS_SQL = """
WITH recent_covid_awards AS (
SELECT
DISTINCT ON
(faba.award_id) faba.award_id,
sa.is_final_balances_for_fy
FROM
financial_accounts_by_awards faba
INNER JOIN disaster_emergency_fund_code defc ON
defc.code = faba.disaster_emergency_fund_code
AND defc.group_name = 'covid_19'
INNER JOIN submission_attributes sa ON
sa.reporting_period_start >= '2020-04-01'
AND faba.submission_id = sa.submission_id
INNER JOIN dabs_submission_window_schedule dabs ON
dabs.id = sa.submission_window_id
AND dabs.submission_reveal_date <= now()
WHERE
faba.award_id IS NOT NULL
ORDER BY
faba.award_id, submission_reveal_date DESC, is_quarter
),
last_periods_covid_awards AS (
SELECT
DISTINCT award_id
FROM
financial_accounts_by_awards faba
INNER JOIN submission_attributes sa ON
faba.submission_id = sa.submission_id
INNER JOIN dabs_submission_window_schedule dabs ON
dabs.id = sa.submission_window_id
INNER JOIN disaster_emergency_fund_code defc ON
defc.code = faba.disaster_emergency_fund_code
AND defc.group_name = 'covid_19'
WHERE
(
submission_fiscal_year = {last_months_year}
AND submission_fiscal_month = {last_months_month}
AND is_quarter = FALSE
)
OR (
submission_fiscal_year = {last_quarters_year}
AND submission_fiscal_month = {last_quarters_month}
AND is_quarter = TRUE
)
)
{operation_sql}
WHERE
id IN (
SELECT
*
FROM
last_periods_covid_awards
)
AND id IN (
SELECT
award_id
FROM
recent_covid_awards
WHERE
recent_covid_awards.is_final_balances_for_fy = FALSE
)
AND update_date < '{submission_reveal_date}'
"""
UPDATE_AWARDS_ALL_SQL = """
WITH covid_awards AS (
SELECT
DISTINCT ON
(faba.award_id) faba.award_id,
sa.is_final_balances_for_fy
FROM
financial_accounts_by_awards faba
INNER JOIN disaster_emergency_fund_code defc ON
defc.code = faba.disaster_emergency_fund_code
AND defc.group_name = 'covid_19'
INNER JOIN submission_attributes sa ON
sa.reporting_period_start >= '2020-04-01'
AND faba.submission_id = sa.submission_id
INNER JOIN dabs_submission_window_schedule dabs ON
dabs.id = sa.submission_window_id
AND dabs.submission_reveal_date <= now()
WHERE
faba.award_id IS NOT NULL
ORDER BY
faba.award_id, submission_reveal_date DESC, is_quarter
)
{operation_sql}
WHERE
id IN (
SELECT
award_id
FROM
covid_awards
WHERE
covid_awards.is_final_balances_for_fy = FALSE
)
AND update_date < '{submission_reveal_date}'
"""
UPDATE_OPERATION_SQL = """
UPDATE
awards
SET
update_date = NOW()
"""
COUNT_OPERATION_SQL = """
SELECT
count(*)
FROM
awards AS award_to_update_count
"""
class Command(BaseCommand):
"""
NOTE: This command should be run on the `submission_reveal_date` of each period. This
will ensure that covid values in elasticsearch are correctly each period. Running the
command more frequently (ex. daily) will not result in values being constantly recalculated
because an award's `updated_date` is compared against the `submission_reveal_date`
when determining which awards to update.
"""
help = (
"This command sets the 'update_date' field on award records with Covid "
"faba records present in a submission from the previous submission but "
"not in the current period's submission."
)
def add_arguments(self, parser):
parser.add_argument(
"--all",
action="store_true",
default=False,
help="If this option is selected, ALL covid awards not present in the current period will be updated",
)
parser.add_argument(
"--dry-run",
action="store_true",
default=False,
help="If this option is selected, awards will not be updated. The count of awards that would have been updated will instead be logged",
)
def handle(self, *args, **options):
periods = retrieve_recent_periods()
submission_reveal_date = periods["this_month"]["submission_reveal_date"]
dry_run = options["dry_run"]
operation_sql = UPDATE_OPERATION_SQL
if dry_run:
logger.info("Dry run flag provided. No records will be updated.")
operation_sql = COUNT_OPERATION_SQL
if not options["all"]:
formatted_update_sql = UPDATE_AWARDS_SQL.format(
last_months_year=periods["last_month"]["year"],
last_months_month=periods["last_month"]["month"],
last_quarters_year=periods["last_quarter"]["year"],
last_quarters_month=periods["last_quarter"]["month"],
submission_reveal_date=submission_reveal_date,
operation_sql=operation_sql,
)
else:
logger.info("All flag provided. Updating all Covid awards not reported in the latest submission")
formatted_update_sql = UPDATE_AWARDS_ALL_SQL.format(
submission_reveal_date=submission_reveal_date, operation_sql=operation_sql
)
self.execute_sql(formatted_update_sql, dry_run)
def execute_sql(self, update_sql, dry_run):
# Open connection to database
with connection.cursor() as cursor:
cursor.execute(update_sql)
# Log results
if dry_run:
count = cursor.fetchone()[0]
logger.info(
f"There are {count:,} award records which should be reloaded into Elasticsearch for data consistency."
)
else:
logger.info(f"Update message (records updated): {cursor.statusmessage}")
|
1616783
|
from unittest.mock import MagicMock, patch
import pytest
from PySide2.QtGui import QClipboard
from PySide2.QtTest import QTest
from node_launcher.gui.menu.menu import Menu
@pytest.fixture
def menu() -> Menu:
system_tray = MagicMock()
node_set = MagicMock()
node_set.tor_node.network = 'tor'
node_set.bitcoind_node.network = 'bitcoin'
node_set.lnd_node.network = 'lnd'
node_set.lnd_node.configuration.rest_url = 'test rest'
node_set.lnd_node.configuration.macaroon_path = 'test macaroon'
menu = Menu(node_set=node_set, system_tray=system_tray)
return menu
@patch('node_launcher.gui.menu.menu.webbrowser')
@patch('node_launcher.gui.menu.menu.reveal')
class TestMenu(object):
def test_joule_url_action(self,
reveal_patch: MagicMock,
webbrowser_patch: MagicMock,
menu: Menu,
qtbot: QTest):
menu.joule_url_action.trigger()
assert QClipboard().text() == 'test rest'
def test_joule_macaroons_action(self,
reveal_patch: MagicMock,
webbrowser_patch: MagicMock,
menu: Menu,
qtbot: QTest):
menu.joule_macaroons_action.trigger()
reveal_patch.assert_called_with('test macaroon')
def test_zap_open_action(self,
reveal_patch: MagicMock,
webbrowser_patch: MagicMock,
menu: Menu,
qtbot: QTest):
menu.zap_open_action.trigger()
webbrowser_patch.open.assert_called_once()
|
1616823
|
import os
import sys
import subprocess
import platform
import helpers
from io import open
def run():
test_name = 'optest'
config = 'Release'
libExt = helpers.static_lib_extensions()
dc = helpers.detect_compiler()
extra_args = list()
helpers.setup_args(extra_args, config=config)
if (helpers.is_windows()):
extra_args.append(f'-L{test_name}{libExt}')
else:
extra_args.append(f'-L-l{test_name}')
prev_cwd = os.getcwd()
rootPath = os.path.dirname(sys.argv[0])
path = (rootPath + '/' + __package__)
os.chdir(path)
try:
os.mkdir('temp')
except FileExistsError:
pass
os.chdir('temp')
with open('out.txt', 'w') as out, open('err.txt', 'w') as err:
kwargs = dict(stdout=out, stderr=err)
try:
try:
helpers.cmake_run('../cpp', config=config, **kwargs)
except subprocess.CalledProcessError:
raise ValueError("C++ compilation error")
try:
subprocess.run(f'../../../build/gentool ../{test_name}.json'.split(), **kwargs)
except subprocess.CalledProcessError:
raise ValueError("Binding generation failed")
try:
subprocess.run(f'{dc} ../d/{test_name}.d generated.d {" ".join(extra_args)} -unittest -main -g'.split(), check=True)
except subprocess.CalledProcessError:
raise ValueError("D compilation error")
subprocess.run(f'./{test_name}', check=True)
finally:
os.chdir(prev_cwd)
|
1616830
|
import unittest
import torch
from laia.decoders import CTCNBestDecoder
class CTCNBestDecoderTest(unittest.TestCase):
def test(self):
x = torch.tensor(
[
[[1.0, 3.0, -1.0, 0.0]],
[[-1.0, 2.0, -2.0, 3.0]],
[[1.0, 5.0, 9.0, 2.0]],
[[-1.0, -2.0, -3.0, -4.0]],
]
)
decoder = CTCNBestDecoder(4)
r = decoder(x)
paths = ([1, 3, 2, 0], [1, 1, 2, 0], [1, 3, 2, 1], [1, 1, 2, 1])
e = [[(sum(x[i, 0, v] for i, v in enumerate(p)).item(), p) for p in paths]]
self.assertEqual(e, r)
if __name__ == "__main__":
unittest.main()
|
1616854
|
import KratosMultiphysics as KM
import KratosMultiphysics.KratosUnittest as KratosUnittest
class TestPoint(KratosUnittest.TestCase):
def test_point_constructor_with_kratos_array(self):
coords = [1.0, -2.5, 3.3]
arr = KM.Array3(coords)
point = KM.Point(arr)
self.assertAlmostEqual(point.X, coords[0])
self.assertAlmostEqual(point.Y, coords[1])
self.assertAlmostEqual(point.Z, coords[2])
def test_point_constructor_with_kratos_vector(self):
coords = [1.0, -2.5, 3.3]
vec = KM.Vector(coords)
point = KM.Point(vec)
self.assertAlmostEqual(point.X, coords[0])
self.assertAlmostEqual(point.Y, coords[1])
self.assertAlmostEqual(point.Z, coords[2])
if __name__ == '__main__':
KratosUnittest.main()
|
1616931
|
from itertools import islice
from typing import Iterable, TypeVar
import numpy as np
from six import string_types
# alphabets:
from kipoiseq import Variant
DNA = ["A", "C", "G", "T"]
RNA = ["A", "C", "G", "U"]
AMINO_ACIDS = ["A", "R", "N", "D", "B", "C", "E", "Q", "Z", "G", "H",
"I", "L", "K", "M", "F", "P", "S", "T", "W", "Y", "V"]
alphabets = {"DNA": DNA,
"RNA": RNA,
"AMINO_ACIDS": AMINO_ACIDS}
def to_scalar(obj):
"""Convert numpy scalar to native scalar
"""
if isinstance(obj, np.generic):
return obj.item()
else:
return obj
def parse_alphabet(alphabet):
if isinstance(alphabet, str):
return list(alphabet)
else:
return alphabet
def parse_dtype(dtype):
if isinstance(dtype, string_types):
try:
return eval(dtype)
except Exception as e:
raise ValueError("Unable to parse dtype: {}. \nException: {}".format(dtype, e))
else:
return dtype
T = TypeVar('T')
def batch_iter(items: Iterable[T], batch_size: int) -> Iterable[Iterable[T]]:
# ensure this is an iterator
item_iter = iter(items)
while True:
# create next `batch_size` number of items;
batch = list(islice(item_iter, batch_size))
if len(batch) == 0:
break
yield batch
|
1616941
|
import random
import os
import pickle
import librosa as lb
import numpy as np
import musdb
import yaml
# ignore warning about unsafe loaders in pyYAML 5.1 (used in musdb)
# https://github.com/yaml/pyyaml/wiki/PyYAML-yaml.load(input)-Deprecation
yaml.warnings({'YAMLLoadWarning': False})
def musdb_pre_processing(path_to_musdb, path_to_save_data, target_sr,
frame_length):
"""
This function splits all MUSDB tracks in frames of a given length, downsamples them to a given sampling rate,
converts them to mono and saves each frame as .npy-file. It randomly splits the training partition into a training
(80 tracks) and a validation (20 tracks) set.
"""
path_to_save_train_set = os.path.join(path_to_save_data, 'train')
path_to_save_val_set = os.path.join(path_to_save_data, 'val')
path_to_save_test_set = os.path.join(path_to_save_data, 'test')
if not os.path.exists(path_to_save_data):
os.makedirs(path_to_save_data)
if not os.path.exists(path_to_save_train_set):
os.makedirs(path_to_save_train_set)
if not os.path.exists(path_to_save_val_set):
os.makedirs(path_to_save_val_set)
if not os.path.exists(path_to_save_test_set):
os.makedirs(path_to_save_test_set)
# load the musdb train and test partition with the parser musdb (https://github.com/sigsep/sigsep-mus-db)
musdb_corpus = musdb.DB(root_dir=path_to_musdb)
training_tracks = musdb_corpus.load_mus_tracks(subsets=['train'])
test_tracks = musdb_corpus.load_mus_tracks(subsets=['test'])
# randomly select 20 tracks from the training partition that will be the validation set
all_idx = list(np.arange(0, 100))
random.seed(1)
val_idx = random.sample(population=all_idx, k=20) # track indices of validation set tracks
train_idx = [idx for idx in all_idx if idx not in val_idx] # track indices of training set tracks
# process and save training set
train_file_list = []
for idx in train_idx:
track = training_tracks[idx]
track_name = track.name.split('-')
track_name = track_name[0][0:6] + "_" + track_name[1][1:6]
track_name = track_name.replace(" ", "_")
track_audio = track.targets['accompaniment'].audio
track_audio_mono = lb.to_mono(track_audio.T)
track_audio_mono_resampled = lb.core.resample(track_audio_mono, track.rate, target_sr)
frames = lb.util.frame(y=track_audio_mono_resampled, frame_length=frame_length, hop_length=frame_length)
number_of_frames = frames.shape[1]
for n in range(number_of_frames):
file_name = track_name + '_{}.npy'.format(n)
np.save(os.path.join(path_to_save_train_set, file_name), frames[:, n])
train_file_list.append(file_name)
pickle_out = open(os.path.join(path_to_save_train_set, "train_file_list.pickle"), "wb")
pickle.dump(train_file_list, pickle_out)
pickle_out.close()
# process and save validation set
val_file_list = []
for idx in val_idx:
track = training_tracks[idx]
track_name = track.name.split('-')
track_name = track_name[0][0:6] + "_" + track_name[1][1:6]
track_name = track_name.replace(" ", "_")
track_audio = track.targets['accompaniment'].audio
track_audio_mono = lb.to_mono(track_audio.T)
track_audio_mono_resampled = lb.core.resample(track_audio_mono, track.rate, target_sr)
frames = lb.util.frame(y=track_audio_mono_resampled, frame_length=frame_length, hop_length=frame_length)
number_of_frames = frames.shape[1]
for n in range(number_of_frames):
file_name = track_name + '_{}.npy'.format(n)
np.save(os.path.join(path_to_save_val_set, file_name), frames[:, n])
val_file_list.append(file_name)
pickle_out = open(os.path.join(path_to_save_val_set, "val_file_list.pickle"), "wb")
pickle.dump(val_file_list, pickle_out)
pickle_out.close()
# process and save test set
test_file_list = []
for idx in range(50):
track = test_tracks[idx]
track_name = track.name.split('-')
track_name = track_name[0][0:6] + "_" + track_name[1][1:6]
track_name = track_name.replace(" ", "_")
track_audio = track.targets['accompaniment'].audio
track_audio_mono = lb.to_mono(track_audio.T)
track_audio_mono_resampled = lb.core.resample(track_audio_mono, track.rate, target_sr)
frames = lb.util.frame(y=track_audio_mono_resampled, frame_length=frame_length, hop_length=frame_length)
number_of_frames = frames.shape[1]
for n in range(number_of_frames):
file_name = track_name + '_{}.npy'.format(n)
np.save(os.path.join(path_to_save_test_set, file_name), frames[:, n])
test_file_list.append(file_name)
pickle_out = open(os.path.join(path_to_save_test_set, "test_file_list.pickle"), "wb")
pickle.dump(test_file_list, pickle_out)
pickle_out.close()
if __name__ == '__main__':
path_to_musdb = '../Datasets/MUSDB18'
path_to_save_data = '../Datasets/MUSDB_accompaniments'
target_sr = 16000
frame_length = 131584
musdb_pre_processing(path_to_musdb, path_to_save_data, target_sr=target_sr, frame_length=frame_length)
|
1616972
|
from burp import IBurpExtender
from burp import IHttpListener
from burp import IProxyListener
import re
import sys
import os
urls_in_scope=['testblah.com','qa.ooboob.com']
#Adding directory to the path where Python searches for modules
module_folder = os.path.dirname('/home/arvind/Documents/Me/My_Projects/Git/WebAppsec/BurpExtensions/modules/')
sys.path.insert(0, module_folder)
import webcommon
class BurpExtender(IBurpExtender, IHttpListener, IProxyListener):
def registerExtenderCallbacks(self,callbacks):
# Get a reference to the Burp helpers object
self._helpers = callbacks.getHelpers()
# set our extension name
callbacks.setExtensionName("Third Party Referer")
# register ourselves as an HTTP listener
callbacks.registerHttpListener(self)
# register ourselves as a Proxy listener
callbacks.registerProxyListener(self)
def processProxyMessage(self,messageIsRequest,message):
request_http_service=message.getMessageInfo().getHttpService()
request_byte_array=message.getMessageInfo().getRequest()
request_object=self._helpers.analyzeRequest(request_http_service, request_byte_array)
#Extract hostname from header
hostname=webcommon.get_host_header_from_request(self,request_object)
#Check if the URL is NOT in scope. We want to look at referers for the requests that are made to OTHER domains.
if (hostname) and (hostname[1] not in urls_in_scope):
#Extract referer from header
referer=webcommon.get_referer_header_from_request(self,request_object)
if referer:
t1=referer[1].split('/')
if t1[2] in urls_in_scope:
print referer[1]
|
1616979
|
SPARQL_ENDPOINTS = {
'ontocompchem': 'http://theworldavatar.com/blazegraph/namespace/ontocompchem/sparql',
'ontospecies': 'http://theworldavatar.com/blazegraph/namespace/ontospecies/sparql',
'ontopesscan': 'http://theworldavatar.com/blazegraph/namespace/ontopesscan/sparql'
}
|
1616987
|
import torch
import numpy as np
import pandas as pd
import os
class MNIST:
def __init__(self, DATASET_DIR='./dataset/MNIST/'):
self.DATASET_DIR = DATASET_DIR
def fit_normalizer(self, x):
self.min = np.min(x)
self.max = np.max(x)
def transform_normalizer(self, x):
return (x - self.min)/(self.max - self.min)
def inv_transform_normalizer(self, x):
return (x * (self.max - self.min)) + self.min
def load_dataset(self):
test = pd.read_csv(self.DATASET_DIR+'test.csv')
test = test.values
train = pd.read_csv(self.DATASET_DIR+'train.csv')
train = train.values
test_x = test.T[1:].T
test_y = test.T[0]
train_x = train.T[1:].T
train_y = train.T[0]
train_x, test_x = train_x.astype(np.float32), test_x.astype(np.float32)
self.fit_normalizer(train_x)
train_x = self.transform_normalizer(train_x)
test_x = self.transform_normalizer(test_x)
train_x, train_y, test_x, test_y = torch.from_numpy(train_x), torch.from_numpy(train_y), torch.from_numpy(test_x), torch.from_numpy(test_y)
return train_x, train_y, test_x, test_y
|
1617027
|
from re import T
import re
import bpy
import os
import math
import bmesh
from . import Global
from . import DtbShapeKeys
from . import Versions
from . import DataBase
from . import Util
class ToRigify:
notEnglish = False
amtr_objs = []
METARIG = None
RIG = None
chest_upper_tail = []
neck_lower_head = []
def __init__(self, dtu):
self.dtu = dtu
def find_amtr_objs(self):
for d in Util.myccobjs():
if d.type == "MESH":
for modifier in d.modifiers:
if modifier.type == "ARMATURE":
if modifier.object.name == Global.get_Amtr_name():
self.amtr_objs.append(d.name)
def del_eyesdriver(self):
dobj = Global.getBody()
sks = dobj.data.shape_keys
if sks is None:
return
for didx, dvr in enumerate(sks.animation_data.drivers):
d = dvr.driver
dp = dvr.data_path
if len(dp) > 24:
dp = dp[12:]
dp = dp[: len(dp) - 8]
if dp == "EyesUpDown" or dp == "EyesSideSide":
dobj.data.shape_keys.key_blocks[dp].driver_remove("value")
def prepare_scene(self):
self.chest_upper_tail = []
self.neck_lower_head = []
self.amtr_objs = []
self.RIG = None
self.METARIG = None
Versions.set_english()
for scene in bpy.data.scenes:
if scene.name == "Scene":
scene.tool_settings.use_keyframe_insert_auto = False
def check_if_possible(self):
if Global.find_RGFY_all(): # Check if Rigify ran
return True
if Global.getBody() is None:
return True
if Global.getAmtr() is None:
return True
def prepare_bone_list(self, dobj):
blist = []
for bone in dobj.data.edit_bones:
if bone.name.lower() == "chestupper":
for i in range(3):
self.chest_upper_tail.append(bone.tail[i])
elif bone.name.lower() == "necklower":
for i in range(3):
self.neck_lower_head.append(bone.head[i])
b10 = [
bone.name,
bone.head[0],
bone.head[1],
bone.head[2],
bone.tail[0],
bone.tail[1],
bone.tail[2],
bone.roll,
bone.use_connect,
]
blist.append(b10)
Versions.do_chest_upper(blist, self.neck_lower_head)
return blist
def toRigify(self, db, main):
# Prepare for Rigify
self.prepare_scene()
bpy.ops.mesh.primitive_circle_add()
wm = bpy.context.window_manager
wm.progress_begin(0, 100)
Global.decide_HERO()
self.find_amtr_objs()
Global.setOpsMode("OBJECT")
Versions.pivot_active_element_and_center_and_trnormal()
if self.check_if_possible():
return
Global.convert_vgroups() # Updates VertexGroups
if len(Global.get_bone_limit()) == 0: # Seems not Necessary
Global.bone_limit_modify()
wm.progress_update(5)
dobj = Global.getAmtr()
Versions.select(dobj, True)
Versions.active_object(dobj)
Global.setOpsMode("EDIT")
blist = []
blist = self.prepare_bone_list(dobj)
Global.setOpsMode("OBJECT")
wm.progress_update(10)
# Create Rig
rtn = self.make_metarig()
if rtn != "":
main.report({"ERROR"}, rtn)
return
wm.progress_update(15)
Global.setOpsMode("EDIT")
self.fit2Rig(blist, db, 0)
self.fitMetaFace(db)
wm.progress_update(20)
Global.setOpsMode("OBJECT")
rtn = self.generate_rig()
if rtn != "":
main.report({"ERROR"}, rtn)
return
Global.setOpsMode("EDIT")
wm.progress_update(30)
self.all_rigity_bone(db)
wm.progress_update(40)
Global.setOpsMode("EDIT")
self.fit2Rig(blist, db, 2)
wm.progress_update(50)
self.adjust_tweak()
wm.progress_update(55)
Global.setOpsMode("OBJECT")
for oname in self.amtr_objs:
self.toToeWeight1(Util.myccobjs().get(oname))
Global.deselect()
wm.progress_update(60)
for ao in self.amtr_objs:
cobj = Util.myccobjs().get(ao)
Versions.select(cobj, True)
washide = False
if Versions.is_hide_view(cobj):
washide = True
Versions.hide_view(cobj, False)
Versions.active_object(cobj)
bpy.ops.object.parent_clear(type="CLEAR")
if washide:
Versions.hide_view(cobj, True)
Global.deselect()
Versions.select(Global.getAmtr(), True)
Versions.active_object(Global.getAmtr())
self.omit_g8(db)
Util.allobjs().remove(self.METARIG)
wm.progress_update(70)
Global.deselect()
amtr = self.RIG
for ao in self.amtr_objs:
if ao in Util.myccobjs():
d = Util.myccobjs().get(ao)
Versions.select(d, True)
Versions.select(amtr, True)
Versions.active_object(amtr)
bpy.ops.object.parent_set(type="ARMATURE")
Versions.select(d, False)
wm.progress_update(75)
Global.decide_HERO()
Versions.select(Global.getBody(), True)
Versions.active_object(Global.getBody())
dsk = DtbShapeKeys.DtbShapeKeys(True, self.dtu)
self.swap_morph_driver(db, dsk)
wm.progress_update(80)
dsk.swap_fvgroup(db, self.amtr_objs)
wm.progress_update(90)
Global.deselect()
Versions.select(Global.getRgfy(), True)
wm.progress_update(95)
Versions.active_object(Global.getRgfy())
self.finish_job()
wm.progress_update(100)
self.hide_finger_tool()
def omit_g8(self, db):
dobj = Global.getAmtr()
Global.setOpsMode("EDIT")
dobjname = dobj.name
othbns = ["lShin_IK", "lHand_IK", "lShin_P", "lEye_H", "mainEye_H"]
parent_combis = []
for eb in dobj.data.edit_bones:
if eb.parent is not None:
parent_combis.append([eb.name, eb.parent.name])
for eb in dobj.data.edit_bones:
ebname = eb.name
for dbn in db.tbl_basic_bones:
if (("Toe" in dbn[0]) and len(dbn[0]) > 4) == False:
if dbn[0] == ebname:
dobj.data.edit_bones.remove(eb)
for ob in othbns:
if (("Toe" in dbn[0]) and len(dbn[0]) > 4) == False:
if ob == ebname or ("r" + ob[1:]) == ebname:
dobj.data.edit_bones.remove(eb)
dbones = ["sight", "root"]
for dbs in dbones:
if dobj.data.edit_bones.get(dbs) is not None:
dobj.data.edit_bones.remove(dobj.data.edit_bones[dbs])
bnum = len(dobj.data.edit_bones)
Global.setOpsMode("OBJECT")
if bnum < 2:
Versions.active_object(dobj)
bpy.ops.object.delete(use_global=False)
else:
Global.deselect()
aobj = Util.myccobjs().get(dobjname)
amtr_bones = []
for b in aobj.data.bones:
amtr_bones.append(b.name)
Versions.select(aobj, True)
robj = self.RIG
Versions.select(robj, True)
Versions.active_object(robj)
bpy.ops.object.join()
Global.setOpsMode("EDIT")
for eb in robj.data.edit_bones:
if eb.name in amtr_bones and eb.parent is None:
find = False
if "Toe" in eb.name:
if eb.name.startswith("r"):
eb.parent = robj.data.edit_bones["toe.R"]
self.to_layer(eb, 18)
else:
eb.parent = robj.data.edit_bones["toe.L"]
self.to_layer(eb, 15)
find = True
else:
for combi in parent_combis:
if find == True:
break
if eb.name == combi[0]:
mparent = combi[1]
for torigify in db.toRigify:
if torigify[0] >= 2:
ops_torigify = "r" + torigify[1][1:]
nparent = ""
if mparent == ops_torigify:
nparent = "DEF-" + torigify[2].replace(
".L", ".R"
)
elif mparent == torigify[1]:
nparent = "DEF-" + torigify[2]
if (
nparent != ""
and nparent in robj.data.edit_bones
):
eb.parent = robj.data.edit_bones.get(
nparent
)
self.to_layer(eb, 3)
find = True
break
if find == False:
eb.parent = robj.data.edit_bones["torso"]
self.to_layer(eb, 3)
def to_layer(self, ebone, lnum):
ebone.layers[lnum] = True
for i in range(32):
if i != lnum:
ebone.layers[i] = False
def toToeWeight1(self, dobj):
Versions.active_object(dobj)
Global.toMergeWeight_str(dobj, "lMetatarsals", ["lFoot"], True, False)
Global.toMergeWeight_str(dobj, "rMetatarsals", ["rFoot"], True, False)
def swap_morph_driver(self, db, dsk):
for ao in self.amtr_objs:
if (ao in Util.myccobjs()) == False:
continue
dobj = Util.myccobjs().get(ao)
self.changeVgroup(dobj, db)
sks = dobj.data.shape_keys
if sks is None:
continue
if sks.animation_data is None:
continue
for didx, dvr in enumerate(sks.animation_data.drivers):
d = dvr.driver
dp = dvr.data_path
if len(dp) > 24:
dp = dp[12:]
dp = dp[: len(dp) - 8]
if dp in dobj.data.shape_keys.key_blocks:
dobj.data.shape_keys.key_blocks[dp].driver_remove("value")
dsk.makeDrive(dobj, db)
for modifier in Util.myccobjs().get(ao).modifiers:
if modifier.type == "ARMATURE":
modifier.use_deform_preserve_volume = True
def finish_job(self):
Global.setOpsMode("POSE")
for ob in Util.myccobjs():
if Global.isRiggedObject(ob):
for m in ob.modifiers:
if m.name == "Armature":
m.show_on_cage = True
m.show_in_editmode = True
ob.use_shape_key_edit_mode = True
rig = Global.getRgfy()
iks = ["thigh_parent", "upper_arm_parent", "MCH-thigh_ik", "MCH-upper_arm"]
lrs = [".L", ".R"]
for ik in iks:
for lr in lrs:
for pb in rig.pose.bones:
if pb.name == ik + lr:
pb["IK_Stretch"] = 0
Versions.show_x_ray(Global.getRgfy())
bs = ["DEF-pelvis.R", "head", "tweak_spine", "WGT-rig_breast.R"]
if bs[0] in Global.getRgfyBones():
Global.getRgfyBones()[bs[0]].hide = True
if bs[1] in Global.getRgfyBones():
Global.getRgfyBones()[bs[1]].hide = True
if bs[2] in Global.getRgfyBones():
pbs = Global.getRgfy().pose.bones
if bs[2] in pbs:
pbs[bs[2]].custom_shape = Util.allobjs().get(bs[3])
#blender 3.0 break change
Versions.handle_custom_shape_scale(pbs[bs[2]], 6.0)
Global.getRgfyBones()[bs[2]].layers[3] = True
Global.getRgfyBones()[bs[2]].layers[4] = False
for i in range(3):
Global.getRgfy().pose.bones[bs[2]].lock_rotation[i] = False
Global.setRgfy_name("rig" + Util.get_dzidx())
Versions.reverse_language()
nper = ["tweak_spine.003", "tweak_spine.002"]
for i in range(2):
add = 1
if i == 1:
add = -1
add = add * (0.01 * Global.get_size())
Global.getRgfy().pose.bones[nper[i]].location[1] += add
Versions.rigify_finger()
self.finish_toes()
def finish_toes(self):
pbones = Global.getRgfy().pose.bones
for pb in pbones:
if ("Toe" in pb.name) and len(pb.name) > 4:
wtg = Util.allobjs().get("Circle")
if wtg is not None:
pb.custom_shape = wtg
#blender 3.0 break change
Versions.handle_custom_shape_scale(pb, 0.2)
def delete001_sk(self):
Global.setOpsMode("OBJECT")
obj = Global.getBody()
Versions.select(obj, True)
Versions.active_object(obj)
sp = obj.data.shape_keys
if sp is not None:
max = len(sp.key_blocks)
i = 0
for notouch in range(max):
obj.active_shape_key_index = i
if obj.active_shape_key.name.endswith(".001"):
bpy.ops.object.shape_key_remove(all=False)
max = max - 1
else:
i = i + 1
def avg_pos(self, vlist, all_vs, db):
pos3 = [0, 0, 0]
sum = len(vlist) - 2
for i, v in enumerate(vlist):
if i < 2:
continue
if Global.getIsGen():
v = Global.toGeniVIndex(v)
for j in range(3):
pos3[j] += all_vs[v].co[j]
for j in range(3):
pos3[j] = pos3[j] / sum
return pos3
def fitMetaFace(self, db):
bobj = Global.getBody()
all_vs = bobj.data.vertices
amtr = Util.myccobjs().get("metarig")
tbl = None
if Global.getIsMan():
tbl = db.tometaface_m
else:
tbl = db.tometaface_f
for b in amtr.data.edit_bones:
bname = b.name
for dbn in tbl:
ops_dbn = dbn[1].replace(".L", ".R")
bool_ops = bname == ops_dbn
if bname == dbn[1] or bool_ops:
pos3 = self.avg_pos(dbn, all_vs, db)
if bool_ops:
pos3[0] = 0 - pos3[0]
if dbn[0] == 0:
b.tail = pos3
else:
b.head = pos3
clist = []
for z in range(2):
for b in amtr.data.edit_bones:
bname = b.name
ops_bname = bname.replace(".R", ".L")
for tfc in db.tometaface_couple:
if z == 0 and bname == tfc[1]:
clist.append(
[
b.name,
[b.tail[0], b.tail[1], b.tail[2]],
[b.head[0], b.head[1], b.head[2]],
]
)
if z == 1 and (bname == tfc[2] or ops_bname == tfc[2]):
bool_ops = ops_bname == tfc[2] and bname != tfc[2]
for cl in clist:
# five is tail to head
if cl[0] == tfc[1]:
if tfc[0] == 0:
b.tail = cl[1]
elif tfc[0] == 1:
b.head = cl[2]
elif tfc[0] == 5:
b.tail = cl[2]
elif tfc[0] == 9:
b.head = cl[1]
break
if bool_ops:
if tfc[0] == 0 or tfc[0] == 5:
b.tail[0] = 0 - b.tail[0]
else:
b.head[0] = 0 - b.head[0]
def getPlainRol(self, db, plainbone):
for r in DataBase.tbl_brollfix:
if r[0] == plainbone or (
r[0].startswith("-") and r[0][1:] == plainbone[1:]
):
roll = r[1]
if r[0].startswith("-") and plainbone.startswith("l"):
roll = 0 - roll
return roll
return 0
def hide_finger_tool(self):
bs = Global.getRgfy().data
ls = ["f_index", "thumb", "f_middle", "f_ring", "f_pinky"]
rs = [".01_master.L", ".01_master.R"]
for l in ls:
for r in rs:
if bs.bones.get(l + r) is not None:
bs.bones.get(l + r).hide = True
def all_rigity_bone(self, db):
rig = bpy.context.active_object
Global.setOpsMode("EDIT")
b5 = [
["thigh", "Thigh", "ThighBend"],
["shin", "Shin", "Shin"],
["upper_arm", "Shldr", "ShldrBend"],
["forearm", "Forearm", "ForearmBend"],
["foot", "Foot", "Foot"],
["hand", "Hand", "Hand"],
]
lr2 = ["L", "R"]
find = False
miss = ""
get = ""
for bone in rig.data.edit_bones:
if "arm" in bone.name or "hand" in bone.name:
r = bone.roll
if r < 0:
r = r - math.radians(30)
else:
r = r + math.radians(30)
bone.roll = r
continue
if "thigh" in bone.name:
if ".L" in bone.name:
if bone.name == "MCH-thigh_ik_target.L":
bone.roll = math.radians(-94)
else:
bone.roll = math.radians(-11)
else:
if bone.name == "MCH-thigh_ik_target.R":
bone.roll = math.radians(94)
else:
bone.roll = math.radians(11)
mch_ti = ["MCH-thigh_ik.L", "MCH-thigh_ik.R"]
for mt in mch_ti:
if bone.name == mt:
if bone.head[1] > 0:
bone.head[1] = 0
continue
if "shin" in bone.name:
mch_ti = ["MCH-shin_ik.L", "MCH-shin_ik.R"]
for mt in mch_ti:
if bone.name == mt:
if bone.head[1] > -0.002 * Global.get_size():
bone.head[1] = -0.002 * Global.get_size()
if ".L" in bone.name:
bone.roll = math.radians(-8)
else:
bone.roll = math.radians(8)
continue
if bone.name.startswith("DEF-foot"):
if ".L" in bone.name:
bone.roll = math.radians(-84)
else:
bone.roll = math.radians(84)
continue
if "breast" in bone.name:
if ".L" in bone.name:
bone.roll = math.radians(-50)
else:
bone.roll = math.radians(50)
if bone.name.startswith("DEF-breast."):
if "DEF-spine.002" in self.RIG.data.bones:
bone.parent = self.RIG.data.edit_bones["DEF-spine.002"]
oeyes = [
"ORG-eye.L",
"ORG-eye.R",
"ORG-teeth.T",
"ORG-teeth.B",
"ear.L",
"ear.R",
]
for oe in oeyes:
if bone.name == oe:
bone.use_deform = True
find = False
Global.setOpsMode("POSE")
for pb in rig.pose.bones:
find = False
mix = [0, 0, 0, 0, 0, 0]
if (
(
("fk." in pb.name)
and pb.name.startswith("MCH-") == False
and ("hand" in pb.name) == False
and ("upper_arm." in pb.name) == False
)
or pb.name[0 : len(pb.name) - 1] == "thumb.01."
or pb.name[0 : len(pb.name) - 1] == "toe."
):
pb.rotation_mode = "YZX"
else:
pb.rotation_mode = "XYZ"
for b in b5:
if b[0] in pb.name:
for lr in lr2:
for i, k9 in enumerate(db.kind9(b[0], lr)):
if i > 0 and i != 2 and i != 3:
continue
if pb.name == k9:
b1 = b[1]
if k9.startswith("MCH-thigh_ik."):
b1 = "Shin"
lr = lr.lower()
mix = db.mix_range(lr + b1)
find = True
break
if find:
break
if find:
break
if find == False:
for tbr in db.tbl_blimit_rgfy:
if tbr[0] == pb.name:
mix = tbr[1]
find = True
break
if find == False:
for tbl in db.toRigify:
if tbl[0] < 2:
continue
ops_tbl = [tbl[0], "r" + tbl[1][1:], tbl[2].replace(".L", ".R")]
bool_ops = (".L" in tbl[2]) and pb.name == ops_tbl[2]
if pb.name == tbl[2] or bool_ops:
for gbl in Global.get_bone_limit():
if (gbl[0] == tbl[1] and bool_ops == False) or (
bool_ops and gbl[0] == ops_tbl[1]
):
line = ""
for i in range(6):
mix[i] = gbl[2 + i]
line = line + str(gbl[2 + i]) + ","
find = True
break
if find == True:
break
if find == True:
if "arm" in pb.name:
for i in range(3):
if i == 1:
continue
wk = mix[i * 2]
mix[i * 2] = 0 - mix[i * 2 + 1]
mix[i * 2 + 1] = 0 - wk
if "toe" in pb.name:
for i in range(3):
wk = mix[i * 2]
mix[i * 2] = 0 - mix[i * 2 + 1]
mix[i * 2 + 1] = 0 - wk
skips = [
"foot_ik.L",
"foot_ik.R",
"hand_ik.L",
"hand_ik.R",
"ORG-hand.L",
"ORG-hand.R",
]
skip_ik = False
for skip in skips:
if pb.name == skip:
skip_ik = True
break
if skip_ik == False:
lr = pb.constraints.new("LIMIT_ROTATION")
lr.owner_space = "LOCAL"
lr.use_limit_x = True
lr.min_x = math.radians(mix[0])
lr.max_x = math.radians(mix[1])
lr.use_limit_y = True
lr.min_y = math.radians(mix[2])
lr.max_y = math.radians(mix[3])
lr.use_limit_z = True
lr.min_z = math.radians(mix[4])
lr.max_z = math.radians(mix[5])
lr.use_transform_limit = True
self.adjust_pose_bones(pb)
def adjust_pose_bones(self, pb):
if pb.name.startswith("f_") and (
pb.name.endswith(".L") or pb.name.endswith(".R")
):
for c in pb.constraints:
if c.name == "Copy Rotation":
c.use_x = False
c.use_z = True
if pb.name.startswith("DEF-breast.") and len(pb.name) == 12:
cr = pb.constraints.new("COPY_TRANSFORMS")
cr.target = self.RIG # Util.myccobjs().get('rig')
if pb.name.endswith("L"):
cr.subtarget = "breast.L"
else:
cr.subtarget = "breast.R"
cr.target_space = "WORLD"
cr.owner_space = "WORLD"
cr.influence = 0.4
cr.head_tail = 0.1
pb.scale[0] = 0.9
if Versions.getBV() > 2.80:
pb.scale[1] = 0.90
else:
pb.scale[1] = 0.95
pb.scale[2] = 0.9
if pb.name == "head":
cr = pb.constraints.new("COPY_ROTATION")
cr.target = self.RIG # Util.myccobjs().get('rig')
cr.subtarget = "neck"
cr.use_x = True
cr.use_y = True
cr.use_z = True
cr.target_space = "LOCAL"
cr.owner_space = "LOCAL"
pb.lock_rotation = [True] * 3
if pb.name == "DEF-spine.002":
for c in pb.constraints:
if c.name == "Stretch To":
c.mute = True
elif c.name == "Copy Transforms":
c.head_tail = Versions.get_defspine002_heatail()
elif c.name == "Damped Track":
c.head_tail = 0.5
if pb.name == "torso":
pb["head_follow"] = 0.8
pb["neck_follow"] = 0.8
if pb.name == "DEF-shin.L" or pb.name == "DEF-shin.R":
for c in pb.constraints:
if c.name == "Copy Transforms":
c.mute = True
back_reverses = [
["upper_arm_tweak", "upper_arm_fk"],
["thigh_tweak", "thigh_fk"],
["forearm_tweak", "forearm_fk"],
]
lrs = [".L", ".R"]
idx = 0
influ = [1.0, 0.75, 0.5]
for br in back_reverses:
for lr in lrs:
if pb.name == br[0] + lr:
cr = pb.constraints.new("COPY_ROTATION")
cr.target = self.RIG # Util.myccobjs().get('rig')
cr.subtarget = br[1] + lr
cr.use_x = False
cr.use_y = True
cr.invert_y = True
cr.use_z = False
cr.influence = influ[idx]
cr.target_space = "LOCAL"
cr.owner_space = "LOCAL"
idx = idx + 1
def ik2fk(self, idx):
rig_id = Global.getRgfy().data["rig_id"]
parents = ["thigh_parent", "upper_arm_parent"]
len11 = [
["thigh_fk", "thigh_fk"],
["shin_fk", "shin_fk"],
["mfoot_fk", "MCH-foot_fk"],
["foot_fk", "foot_fk"],
["thigh_ik", "thigh_ik"],
["shin_ik", "MCH-thigh_ik"],
["foot_ik", "foot_ik"],
["footroll", "foot_heel_ik"],
["pole", "thigh_ik_target"],
["mfoot_ik", "MCH-thigh_ik_target"],
["main_parent", "thigh_parent"],
]
arm8 = [
["uarm_fk", "upper_arm_fk"],
["farm_fk", "forearm_fk"],
["hand_fk", "hand_fk"],
["uarm_ik", "upper_arm_ik"],
["farm_ik", "MCH-upper_arm_ik"],
["hand_ik", "hand_ik"],
["pole", "upper_arm_ik_target"],
["main_parent", "upper_arm_parent"],
]
lr = [".R", ".L"]
for i in range(2):
leg_ik2fk = "bpy.ops.pose.rigify_leg_ik2fk_" + rig_id + "("
for l in len11:
leg_ik2fk += l[0] + " = '" + l[1] + lr[i] + "',"
leg_ik2fk += ")"
arm_ik2fk = "bpy.ops.pose.rigify_arm_ik2fk_" + rig_id + "("
for l in arm8:
arm_ik2fk += l[0] + " = '" + l[1] + lr[i] + "',"
arm_ik2fk += ")"
if idx < 0 or i == idx:
exec(arm_ik2fk)
if idx < 0 or idx == i + 2:
exec(leg_ik2fk)
if idx < 0:
Global.getRgfy().pose.bones[parents[0] + lr[i]]["IK_FK"] = 0
Global.getRgfy().pose.bones[parents[1] + lr[i]]["IK_FK"] = 0
def match_ikfk(self, influence4):
for i, inf in enumerate(influence4):
if inf > 0.5:
self.ik2fk(i)
else:
self.fk2ik(i)
def fk2ik(self, idx):
rig_id = Global.getRgfy().data["rig_id"]
arm6 = [
["uarm_fk", "upper_arm_fk"],
["farm_fk", "forearm_fk"],
["hand_fk", "hand_fk"],
["uarm_ik", "upper_arm_ik"],
["farm_ik", "MCH-upper_arm_ik"],
["hand_ik", "hand_ik"],
]
leg8 = [
["thigh_fk", "thigh_fk"],
["shin_fk", "shin_fk"],
["foot_fk", "foot_fk"],
["mfoot_fk", "MCH-foot_fk"],
["thigh_ik", "thigh_ik"],
["shin_ik", "MCH-thigh_ik"],
["foot_ik", "MCH-thigh_ik_target"],
["mfoot_ik", "MCH-thigh_ik_target"],
]
lr = [".R", ".L"]
parents = ["thigh_parent", "upper_arm_parent"]
for i in range(2):
leg_fk2ik = "bpy.ops.pose.rigify_leg_fk2ik_" + rig_id + "("
for l in leg8:
leg_fk2ik += l[0] + " = '" + l[1] + lr[i] + "',"
leg_fk2ik += ")"
arm_fk2ik = "bpy.ops.pose.rigify_arm_fk2ik_" + rig_id + "("
for l in arm6:
arm_fk2ik += l[0] + " = '" + l[1] + lr[i] + "',"
arm_fk2ik += ")"
if idx < 0 or idx == i:
exec(arm_fk2ik)
if idx < 0 or idx == i + 2:
exec(leg_fk2ik)
if idx < 0:
Global.getRgfy().pose.bones[parents[0] + lr[i]]["IK_FK"] = 1
Global.getRgfy().pose.bones[parents[1] + lr[i]]["IK_FK"] = 1
def ik_stretch_mute(self, flg_mute):
strech_iks = ["thigh_ik", "MCH-thigh_ik", "upper_arm_ik", "MCH-upper_arm_ik"]
lr = [".L", ".R"]
rig = Global.getRgfy()
for pb in rig.pose.bones:
for c in pb.constraints:
if c.name == "IK":
c.use_stretch = flg_mute == False
break
for i in range(len(lr)):
for siks in strech_iks:
if pb.name == (siks + lr[i]):
if flg_mute:
pb.ik_stretch = 0
else:
pb.ik_stretch = 0.1
break
def generate_rig(self):
Global.setOpsMode("OBJECT")
try:
bpy.ops.pose.rigify_generate()
except:
return "Generate Rig Error"
rig = bpy.context.active_object
self.RIG = rig
Versions.select(rig, True)
return ""
def fit2Rig(self, blist, db, sw):
rig = None
if sw == 0:
rig = self.METARIG
else:
rig = self.RIG
for meb in rig.data.edit_bones:
for dmr in db.toRigify:
if (sw < 2 and dmr[0] >= 6) or (sw == 2 and dmr[0] < 2):
continue
key = dmr[2]
keep_key = key
if sw == 1:
key = "ORG-" + key
elif sw == 2:
key = "DEF-" + key
ops_key = key.replace(".L", ".R")
bool_ops = (".L" in key) and (ops_key == meb.name)
ops_keep_key = keep_key.replace(".L", ".R")
bool_keep_ops = (".L" in keep_key) and (ops_keep_key == meb.name)
if key == meb.name or bool_ops:
for b8 in blist:
if (bool_ops == False and b8[0] == dmr[1]) or (
bool_ops
and dmr[1].startswith("l")
and b8[0] == "r" + dmr[1][1:]
):
if (
sw > 0
and (dmr[0] > 0 and dmr[0] != 6)
and ("Toe" in dmr[1]) == False
):
meb.use_connect = b8[8]
for i in range(3):
if dmr[0] != 4:
if dmr[0] >= 2 or dmr[0] == 1 or dmr[0] == 7:
if dmr[0] != 6:
meb.head[i] = b8[1 + i]
if dmr[0] >= 2 or dmr[0] == 0 or dmr[0] == 6:
if dmr[0] != 7:
meb.tail[i] = b8[4 + i]
if sw == 0:
meb.roll = b8[7]
elif keep_key.startswith("f_") and (
keep_key.endswith(".R") or keep_key.endswith(".L")
):
if keep_key == meb.name or bool_keep_ops:
for b8 in blist:
if (bool_keep_ops == False and b8[0] == dmr[1]) or (
bool_keep_ops
and dmr[1].startswith("l")
and b8[0] == "r" + dmr[1][1:]
):
meb.roll = b8[7]
if len(meb.name) == 22 and meb.name.startswith("MCH-upper_arm_parent."):
meb.roll = 0.0
def adjust_tweak(self):
deb = self.RIG.data.edit_bones
at = [
["tweak_spine.003", "DEF-spine.002"],
["tweak_spine.004", "DEF-spine.004"],
]
for a in at:
if (a[0] in deb) and (a[1] in deb):
deb[a[0]].tail[1] = deb[a[1]].tail[1]
deb[a[0]].head[1] = deb[a[1]].tail[1]
at1 = [
["DEF-spine.003", "tweak_spine.003"],
["ORG-spine.003", "tweak_spine.003"],
]
for a in at1:
if (a[0] in deb) and (a[1] in deb):
deb[a[0]].head[1] = deb[a[1]].head[1]
at2 = [
["tweak_spine.005", "DEF-spine.005", "DEF-spine.004"],
["shin_tweak.L", "DEF-thigh.L.001", "DEF-shin.L"],
["shin_tweak.R", "DEF-thigh.R.001", "DEF-shin.R"],
]
for a in at2:
if (a[0] in deb) and (a[1] in deb) and (a[2] in deb):
deb[a[0]].tail[1] = (deb[a[1]].tail[1] + deb[a[2]].tail[1]) / 2
deb[a[0]].head[1] = (deb[a[1]].tail[1] + deb[a[2]].tail[1]) / 2
at3 = ["DEF-spine.002", "tweak_spine.002"]
if (at3[0] in deb) and (at3[1] in deb):
deb[at3[0]].use_connect = False
h = deb[at3[1]].head
t = deb[at3[1]].tail
y = int((t[2] - h[2]) / 6)
h[2] = h[2] + y
h[1] = h[1] + int(y / 2)
deb[at3[0]].head = h
at4 = ["shin_tweak.R", "shin_tweak.L"]
knees = [["DEF-thigh.R.001", "DEF-shin.R"], ["DEF-thigh.L.001", "DEF-shin.L"]]
for aidx, a in enumerate(at4):
if a in deb:
deb[a].roll = 0
z = -1
for kidx, k in enumerate(knees[aidx]):
if k in deb:
if kidx == 0:
z = deb.get(k).tail[2]
elif kidx == 1:
z += deb.get(k).head[2]
z = z / 2
deb.get(a).head[2] = z
at5 = ["thigh_tweak.R.001", "thigh_tweak.L.001"]
for i, a in enumerate(at5):
if a in deb:
deb[a].roll = math.radians(20 + (-40 * i))
arm_d = ["DEF-forearm.L", "DEF-forearm.R"]
for a in arm_d:
if a in deb:
deb[a].use_connect = False
for i in range(3):
deb[a].head[i] = (
deb[a].head[i] + (deb[a].tail[i] - deb[a].head[i]) / 6
)
def make_metarig(self):
error = ""
try:
bpy.ops.object.armature_human_metarig_add()
except AttributeError:
error = "Missing Addon: 'Rigify'"
except:
error = (
"Rigify: Broken... Something's wrong with Rigify. Please report this"
)
if ("metarig" in Util.myccobjs()) == False:
error = "Missing Addon: 'Rigify'"
if error != "":
return error
self.METARIG = bpy.context.active_object
Versions.select(self.METARIG, True)
for i in range(3):
self.METARIG.scale[i] = Global.get_size()
bpy.ops.object.transform_apply(location=True, rotation=True, scale=True)
return ""
def changeVgroup(self, dobj, db):
for vname in dobj.vertex_groups.keys():
for dbone in db.toRigify:
if dbone[0] < 2 or dbone[0] == 4:
continue
ops_dbone1 = "r" + dbone[1][1:]
bool_ops = ops_dbone1 == vname
if dbone[1] == vname or bool_ops:
if bool_ops:
dobj.vertex_groups[vname].name = "DEF-" + dbone[2].replace(
".L", ".R"
)
else:
dobj.vertex_groups[vname].name = "DEF-" + dbone[2]
break
|
1617050
|
import boto3
import pytest
from botocore.exceptions import ClientError
import xoto3.dynamodb.write_versioned as wv
from xoto3.dynamodb.write_versioned import delete
from xoto3.dynamodb.write_versioned.ddb_api import (
built_transaction_to_transact_write_items_args,
is_cancelled_and_retryable,
known_key_schema,
make_transact_multiple_but_optimize_single,
)
from xoto3.dynamodb.write_versioned.errors import TableSchemaUnknownError, VersionedTransaction
from xoto3.dynamodb.write_versioned.prepare import items_and_keys_to_clean_table_data
def test_is_cancelled_and_retryable():
assert is_cancelled_and_retryable(
ClientError({"Error": {"Code": "TransactionCanceledException"}}, "transact_write_items")
)
assert not is_cancelled_and_retryable(
ClientError({"Error": {"Code": "ResourceNotFoundException"}}, "transact_write_items")
)
assert not is_cancelled_and_retryable(
ClientError(
{
"Error": {
"Code": "TransactionCanceledException",
"CancellationReasons": [
dict(Code="ItemCollectionSizeLimitExceeded"), # this is not legal for retry
dict(Code="ConditionalCheckFailed"),
dict(Code="TransactionConflict"),
dict(Code="ThrottlingError"),
dict(Code="ProvisionedThroughputExceeded"),
],
}
},
"transact_write_items",
)
)
def test_key_schema_unfetchable():
try:
table = boto3.resource("dynamodb").Table("thistabledoesnotexist")
with pytest.raises(TableSchemaUnknownError):
known_key_schema(table)
except: # noqa
pass # test cannot run at all without access to DynamoDB
def test_built_transaction_includes_unmodified():
tx = VersionedTransaction(
tables=dict(
Common=items_and_keys_to_clean_table_data(
("id",), [dict(id="unmodified")], [dict(id="delete", val=4)]
)
)
)
tx = delete(tx, "Common", dict(id="delete"))
args = built_transaction_to_transact_write_items_args(tx, "adatetimestring")
assert {
"TransactItems": [
{
"Delete": {
"TableName": "Common",
"Key": {"id": {"S": "delete"}},
"ExpressionAttributeNames": {
"#itemVersion": "item_version",
"#idThatExists": "id",
},
"ExpressionAttributeValues": {":curItemVersion": {"N": "0"}},
"ConditionExpression": "#itemVersion = :curItemVersion OR ( attribute_not_exists(#itemVersion) AND attribute_exists(#idThatExists) )",
}
},
{
"ConditionCheck": {
"TableName": "Common",
"Key": {"id": {"S": "unmodified"}},
"ExpressionAttributeNames": {"#itemVersion": "item_version"},
"ExpressionAttributeValues": {":curItemVersion": {"N": "0"}},
"ConditionExpression": "#itemVersion = :curItemVersion OR attribute_not_exists(#itemVersion)",
}
},
]
} == args
def test_built_transaction_does_not_write_deep_equal_items():
tx = VersionedTransaction(dict())
table = wv.ItemTable("Foo")
item = dict(id="steve", val=3)
tx = table.presume(dict(id="steve"), item)(tx)
tx = table.put(item)(tx)
args = built_transaction_to_transact_write_items_args(tx, "adatetimestring")
effects = args["TransactItems"]
assert len(effects) == 1
assert set(effects[0]) == {"ConditionCheck"}
def test_dont_call_the_client_if_theres_nothing_to_do():
no_client_transact = make_transact_multiple_but_optimize_single(None)
with pytest.raises(AttributeError):
no_client_transact([dict(Put=dict(some=1))])
no_client_transact([dict(ConditionCheck=dict(what=3))])
no_client_transact([])
|
1617089
|
import networkx as nx
import numpy as np
import pandas as pd
from tqdm import tqdm
from feature_engineering.tools import lit_eval_nan_proof
# this script computes some features by considering the bidirectional graph of citations: jaccard, adar,
# preferential_attachment, resource_allocation_index and common_neighbors
# approx 10 minutes to run it
# progress bar for pandas
tqdm.pandas(tqdm())
# path
path_to_data = "data/"
# loading data
converter_dict = {'authors': lit_eval_nan_proof, 'journal': lit_eval_nan_proof,
'title': lit_eval_nan_proof, 'abstract': lit_eval_nan_proof}
nodes = pd.read_csv(path_to_data + "nodes_preprocessed.csv", converters=converter_dict)
nodes.set_index("id", inplace=True)
training = pd.read_csv(path_to_data + "training_features.txt")
training.set_index("my_index", inplace=True)
testing = pd.read_csv(path_to_data + "testing_features.txt")
testing.set_index("my_index", inplace=True)
G = nx.Graph()
G.add_nodes_from(nodes.index.values)
G.add_edges_from(zip(training[training["target"] == 1]["id1"], training[training["target"] == 1]["id2"]))
# IDs for training set
id1 = training['id1'].values
id2 = training['id2'].values
# placeholder for feature
n = len(id1)
katz = np.zeros(n)
katz_2 = np.zeros(n)
beta = 0.98
beta_2 = 0.90
breaking_point = 10
# computing features for training set
for i in tqdm(range(len(id1))):
if training.at[str(id1[i]) + "|" + str(id2[i]), "target"] == 1:
G.remove_edge(id1[i], id2[i])
katz_acc = 0.0
katz_2_acc = 0.0
counter = 0
try:
iterator = nx.all_shortest_paths(G, source=id1[i], target=id2[i])
for p in iterator:
len_p = len(p)
katz_acc += len_p * (beta ** len_p)
katz_2_acc += len_p * (beta_2 ** len_p)
counter += 1
if counter >= breaking_point:
break
katz[i] = katz_acc
katz[i] = katz_2_acc
except:
katz[i] = -1
katz_2[i] = -1
if training.at[str(id1[i]) + "|" + str(id2[i]), "target"] == 1:
G.add_edge(id1[i], id2[i])
# add feature to data-frame
training["katz"] = katz
training["katz_2"] = katz_2
# IDs for training set
id1 = testing['id1'].values
id2 = testing['id2'].values
# placeholder for feature
n = len(id1)
katz = np.zeros(n)
katz_2 = np.zeros(n)
# computing features for training set
for i in tqdm(range(len(id1))):
katz_acc = 0.0
katz_2_acc = 0.0
counter = 0
try:
iterator = nx.all_shortest_paths(G, source=id1[i], target=id2[i])
for p in iterator:
len_p = len(p)
katz_acc += len_p * (beta ** len_p)
katz_2_acc += len_p * (beta_2 ** len_p)
counter += 1
if counter >= breaking_point:
break
katz[i] = katz_acc
katz[i] = katz_2_acc
except:
katz[i] = -1
katz_2[i] = -1
# add feature to data-frame
testing["katz"] = katz
testing["katz_2"] = katz_2
# save data-frame
training.to_csv(path_to_data + "training_features.txt")
testing.to_csv(path_to_data + "testing_features.txt")
|
1617109
|
import json
import logging
import pathlib
import typing
import yaml
import znjson
log = logging.getLogger(__name__)
def read_file(file: pathlib.Path) -> dict:
"""Read a json/yaml file without the znjson.Decoder
Parameters
----------
file: pathlib.Path
The file to read
Returns
-------
dict:
Content of the json/yaml file
"""
if file.suffix in [".yaml", ".yml"]:
with file.open("r") as f:
file_content = yaml.safe_load(f)
elif file.suffix == ".json":
file_content = json.loads(file.read_text())
else:
raise ValueError(f"File with suffix {file.suffix} is not supported")
return file_content
def write_file(file: pathlib.Path, value: dict, mkdir: bool = True):
"""Save dict to file
Store dictionary to json or yaml file
Parameters
----------
file: pathlib.Path
File to save to
value: dict
Any serializable data to save
mkdir: bool
Create a parent directory if necessary
"""
if mkdir:
file.parent.mkdir(exist_ok=True, parents=True)
if file.suffix in [".yaml", ".yml"]:
file.write_text(yaml.safe_dump(value, indent=4))
elif file.suffix == ".json":
file.write_text(json.dumps(value, indent=4, cls=znjson.ZnEncoder))
else:
raise ValueError(f"File with suffix {file.suffix} is not supported")
def clear_config_file(file: pathlib.Path, node_name: str):
"""Clear the entries in the files for the given node name
Parameters
----------
file: pathlib.Path
The file to read from, e.g. params.yaml / zntrack.json
node_name: str
The name of the Node
"""
try:
file_content = read_file(file)
except FileNotFoundError:
file_content = {}
_ = file_content.pop(node_name, None)
write_file(file, value=file_content)
def update_config_file(
file: pathlib.Path,
node_name: typing.Union[str, None],
value_name: typing.Union[str, None],
value,
):
"""Update a configuration file
The file structure for node_name is not None is something like
>>> {node_name: {value_name: value}}
and for node_name is None:
>>> {value_name: value}
Parameters
----------
file: pathlib.Path
The file to save to
node_name: str|None
the node_name, if None the file is assumed to be {value_name: value}
value_name: str|None
the key of the value to update, if None the file is assumed to
be {node_name: value}.
value:
The value to write to the file
"""
# Read file
if node_name is None and value_name is None:
raise ValueError("Either node_name or value_name must not be None")
try:
file_content = read_file(file)
except FileNotFoundError:
file_content = {}
log.debug(f"Loading <{file}> content: {file_content}")
if node_name is None:
log.debug(f"Update <{value_name}> with: {value}")
file_content[value_name] = value
elif value_name is None:
log.debug(f"Update <{node_name}> with: {value}")
file_content[node_name] = value
else:
# select primary node name key
node_content = file_content.get(node_name, {})
log.debug(f"Gathered <{node_name}> content: {node_content}")
# update with new value
node_content[value_name] = value
log.debug(f"Update <{value_name}> with: {value}")
# save to file
file_content[node_name] = node_content
write_file(file, value=file_content)
log.debug(f"Update <{file}> with: {file_content}")
|
1617202
|
import os
import pathlib
from os.path import dirname
from os.path import join
from astride.detect import Streak
from astride.utils.logger import Logger
def test(file_path = '/Users/Owner/Desktop/Fits Files/Fits'):
logger = Logger().getLogger()
logger.info('Start.')
module_path = dirname(__file__)
directory = os.fsencode(file_path)
print(directory)
for file in os.listdir(directory):
filename = os.fsdecode(file)
print(filename)
if filename.endswith(".fits"):
print(os.path.join(directory, filename))
continue
else:
continue
#1328
# logger.info('Reading file 1328')
# streak = Streak(r'C:\Users\Owner\Desktop\Fits Files\Fits\IMG01328.fits')
# streak.output_path = r'/Users/Owner/Desktop/Fits Files/Pics/IMG01328 '
# logger.info('Detecting...')
# streak.detect()
# logger.info('Output')
# streak.write_outputs()
# streak.plot_figures()
# # 1329
# logger.info('Reading file 1329')
# streak = Streak(r'C:\Users\Owner\Desktop\Fits Files\Fits\IMG01329.fits')
# streak.output_path = '/Users/Owner/Desktop/Fits Files/Pics/IMG01329 '
# logger.info('Detecting...')
# streak.detect()
# logger.info('Output')
# streak.write_outputs()
# streak.plot_figures()
# # 1330
# logger.info('Reading file 1330')
# streak = Streak(r'C:\Users\Owner\Desktop\Fits Files\Fits\IMG01330.fits')
# streak.output_path = '/Users/Owner/Desktop/Fits Files/Pics/IMG01330 '
# logger.info('Detecting...')
# streak.detect()
# logger.info('Output')
# streak.write_outputs()
# streak.plot_figures()
#from astride.utils.outlier import Outlier
#logger.info('Search by Machine Learning..')
#Outlier(streak.raw_borders)
#import sys
#sys.exit()
# logger.info('Save figures and write outputs to %s' %
# streak.output_path)
# streak.write_outputs()
# streak.plot_figures()
# logger.info('Done.')
# logger.handlers = []
if __name__ == '__main__':
test()
|
1617215
|
from elote.competitors.elo import EloCompetitor
from elote.competitors.glicko import GlickoCompetitor
from elote.competitors.ecf import ECFCompetitor
from elote.competitors.dwz import DWZCompetitor
from elote.competitors.ensemble import BlendedCompetitor
from elote.arenas.lambda_arena import LambdaArena
__all__ = [
"EloCompetitor",
"ECFCompetitor",
"DWZCompetitor",
"GlickoCompetitor",
"LambdaArena",
"BlendedCompetitor"
]
|
1617216
|
from django.contrib import admin
from .models import Mirror
class MirrorAdmin(admin.ModelAdmin):
list_display = ['name', 'ip', 'hostname', 'content_url', 'enabled',]
list_editable = ['ip', 'hostname', 'content_url', 'enabled',]
admin.site.register(Mirror, MirrorAdmin)
|
1617249
|
import unittest
import warnings
from pathlib import Path
import pronto
class TestOboJsonExamples(unittest.TestCase):
def setUp(self):
warnings.simplefilter("error")
def tearDown(self):
warnings.simplefilter(warnings.defaultaction)
@staticmethod
def get_path(name):
folder = Path(__file__).parent.parent / "data" / "obographs"
return str(folder / f"{name}.json")
def test_abox(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", pronto.warnings.NotImplementedWarning)
ont = pronto.Ontology(self.get_path("abox"))
self.assertEqual(len(ont.terms()), 3) # Person (implicit), Male and Female
@unittest.expectedFailure
def test_basic(self):
ont = pronto.Ontology(self.get_path("basic"))
self.assertIn("test manus ontology", ont.metadata.remarks)
self.assertIn("UBERON:0002101", ont)
self.assertIn("UBERON:0002470", ont)
self.assertIn("UBERON:0002102", ont)
self.assertIn("UBERON:0002398", ont)
self.assertIn(ont["UBERON:0002398"], ont["UBERON:0002470"].subclasses().to_set())
self.assertIn(ont["UBERON:0002102"], ont["UBERON:0002101"].subclasses().to_set())
self.assertIn(ont["UBERON:0002102"], ont["UBERON:0002398"].relationships[ont["part_of"]])
def test_equiv_node_set(self):
ont = pronto.Ontology(self.get_path("equivNodeSetTest"))
self.assertIn("DOID:0001816", ont)
self.assertIn("NCIT:C3088", ont)
self.assertIn("Orphanet:263413", ont)
self.assertIn(ont["DOID:0001816"], ont["NCIT:C3088"].equivalent_to)
self.assertIn(ont["NCIT:C3088"], ont["DOID:0001816"].equivalent_to)
self.assertIn(ont["DOID:0001816"], ont["Orphanet:263413"].equivalent_to)
self.assertIn(ont["Orphanet:263413"], ont["DOID:0001816"].equivalent_to)
def test_obsoletion_example(self):
ont = pronto.Ontology(self.get_path("obsoletion_example"))
self.assertIn("X:1", ont)
self.assertIn("X:2", ont)
self.assertIn("Y:1", ont)
self.assertIn("Y:2", ont)
self.assertTrue(ont["X:2"].obsolete)
self.assertIn(ont["X:1"], ont["X:2"].replaced_by)
self.assertTrue(ont["Y:2"].obsolete)
self.assertTrue(ont["Y:1"], ont["Y:2"].replaced_by)
def test_nucleus(self):
ont = pronto.Ontology(self.get_path("equivNodeSetTest"))
|
1617299
|
class ZaloAppInfo:
def __init__(self, app_id, secret_key):
self.app_id = app_id
self.secret_key = secret_key
self.callback_url = None
|
1617312
|
from typing import Callable
from telegram import InlineKeyboardMarkup, InlineKeyboardButton
def paginate(total: int, offset: int, items_per_page: int,
callback_data_generator: Callable[[int], str]) -> InlineKeyboardMarkup:
"""
make `InlineKeyboardMarkup` with "back" / "forward" buttons if appropriate.
:param total: total number of items in a list.
:param offset: current offset, starting at 0.
:param items_per_page: maximum number of items displayed in a single message.
:param callback_data_generator: function that generates `callback_data` for
inline buttons that should switch to page with certain offset.
:argument offset: page opened by button should start with item number `offset`.
"""
assert offset <= total
assert items_per_page >= 1
row = []
if offset > 0:
row.append(
InlineKeyboardButton(
"\u25c0\ufe0f previous",
callback_data=callback_data_generator(max(0, offset - items_per_page))))
if offset + items_per_page < total:
row.append(
InlineKeyboardButton(
"next \u25b6\ufe0f",
callback_data=callback_data_generator(offset + items_per_page)))
return InlineKeyboardMarkup([row])
|
1617321
|
from collections import defaultdict
from dataclasses import dataclass, fields
from itertools import chain, filterfalse
from typing import (
Any,
Callable,
Collection,
Iterable,
Iterator,
Mapping,
Optional,
Set,
Tuple,
TypeVar,
)
flatten = chain.from_iterable
_T1 = TypeVar("_T1")
_T2 = TypeVar("_T2")
_Ttype = TypeVar("_Ttype", bound=type)
# adapted from itertools recipe
def unique(iterable: Iterable[_T1]) -> Iterable[_T1]:
"List unique elements, preserving order."
seen: Set[_T1] = set()
seen_add = seen.add
for element in filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
def groupby(
it: Iterable[_T1], *, key: Callable[[_T1], _T2]
) -> Mapping[_T2, Collection[_T1]]:
"Group items into a dict by key"
grouped = defaultdict(list)
for i in it:
grouped[key(i)].append(i)
return grouped
@dataclass(frozen=True, repr=False)
class compose:
"Funtion composition"
__slots__ = ("_functions",)
_functions: Tuple[Callable[[Any], Any], ...]
def __init__(self, *functions: Any) -> None:
object.__setattr__(self, "_functions", functions)
def __call__(self, value: Any) -> Any:
for f in reversed(self._functions):
value = f(value)
return value
# I'd like for this to be a proper Protocol,
# but mypy won't allow `object` to be recognized as such.
# So let's just go with object.
SupportsBool = object
Predicate = Callable[[_T1], SupportsBool]
def both(__a: Predicate[_T1], __b: Predicate[_T1]) -> Predicate[_T1]:
return lambda x: __a(x) and __b(x)
def map_optional(
f: Callable[[_T1], Optional[_T2]], it: Iterable[_T1]
) -> Iterator[_T2]:
return filterfalse(_is_none, map(f, it)) # type: ignore
def _is_none(x: object) -> bool:
return x is None
# From https://github.com/ericvsmith/dataclasses/blob/master/dataclass_tools.py
# License: https://github.com/ericvsmith/dataclasses/blob/master/LICENSE.txt
# Changed only `dataclass.fields` naming
def add_slots(cls: _Ttype) -> _Ttype: # pragma: no cover
# Need to create a new class, since we can't set __slots__
# after a class has been created.
# Make sure __slots__ isn't already set.
if "__slots__" in cls.__dict__:
raise TypeError(f"{cls.__name__} already specifies __slots__")
# Create a new dict for our new class.
cls_dict = dict(cls.__dict__)
field_names = tuple(f.name for f in fields(cls))
cls_dict["__slots__"] = field_names
for field_name in field_names:
# Remove our attributes, if present. They'll still be
# available in _MARKER.
cls_dict.pop(field_name, None)
# Remove __dict__ itself.
cls_dict.pop("__dict__", None)
# And finally create the class.
qualname = getattr(cls, "__qualname__", None)
cls = type(cls)(cls.__name__, cls.__bases__, cls_dict)
if qualname is not None:
cls.__qualname__ = qualname
return cls
|
1617324
|
from django.db import models
class Resource(models.Model):
"""Model representing a Resource that contains the resource's name, description
and an external link to the resource. All the fields are required."""
name = models.CharField(max_length=50)
link = models.URLField(max_length=200)
description = models.TextField(max_length=400, default="")
def __str__(self):
return self.name
|
1617326
|
from humpday.objectives.classic import CLASSIC_OBJECTIVES
import logging
import numpy as np
import math
import warnings
try:
from hebo.design_space.design_space import DesignSpace
from hebo.optimizers.hebo import HEBO
using_hebo = True
except ImportError:
using_hebo = False
if using_hebo:
logging.getLogger('hebo').setLevel(logging.ERROR)
def hebo_cube_factory(objective, n_trials, n_dim, with_count,n_suggestions=5):
global feval_count
feval_count = 0
variables = [{'name': 'u' + str(i), 'type': 'num', 'lb': 0., 'ub': 1.} for i in range(n_dim)]
space = DesignSpace().parse(variables)
opt = HEBO(space)
def _objective(params) -> np.ndarray:
global feval_count
feval_count += len(params.index)
return np.array([ objective(ui) for ui in params.values ])
n_batches = int(math.floor(n_trials/n_suggestions))
n_remainder = n_trials - n_suggestions*n_batches
for i in range(n_batches):
rec = opt.suggest(n_suggestions=n_suggestions) # <-- don't change this
opt.observe(rec, _objective(rec))
for i in range(n_remainder):
rec = opt.suggest(n_suggestions=1) # <-- don't change this
opt.observe(rec, _objective(rec))
best_val = opt.y.min()
best_ndx = np.argmin([y[0] for y in opt.y]) # I mean seriously, why make the user do this?
best_x = list(opt.X.values[best_ndx])
return (best_val, best_x, feval_count) if with_count else (best_val, best_x)
def hebo_sequential_cube(objective, n_trials, n_dim, with_count):
return hebo_cube_factory(objective=objective, n_trials=n_trials, n_dim=n_dim, with_count=with_count, n_suggestions=1)
def hebo_batch_cube(objective, n_trials, n_dim, with_count):
return hebo_cube_factory(objective=objective, n_trials=n_trials, n_dim=n_dim, with_count=with_count, n_suggestions=10)
HEBO_OPTIMIZERS = [hebo_sequential_cube, hebo_batch_cube]
else:
HEBO_OPTIMIZERS = []
if __name__=='__main__':
for objective in CLASSIC_OBJECTIVES:
print(' ')
print(objective.__name__)
import time
for optimizer in HEBO_OPTIMIZERS:
print(optimizer.__name__+'...')
st = time.time()
print((optimizer(objective, n_trials=12, n_dim=4, with_count=True)))
print(' ... took '+str(time.time()-st)+' seconds.')
|
1617330
|
def exchange_sort(numbers: list[int]) -> list[int]:
"""
Uses exchange sort to sort a list of numbers.
Source: https://en.wikipedia.org/wiki/Sorting_algorithm#Exchange_sort
>>> exchange_sort([5, 4, 3, 2, 1])
[1, 2, 3, 4, 5]
>>> exchange_sort([-1, -2, -3])
[-3, -2, -1]
>>> exchange_sort([1, 2, 3, 4, 5])
[1, 2, 3, 4, 5]
>>> exchange_sort([0, 10, -2, 5, 3])
[-2, 0, 3, 5, 10]
>>> exchange_sort([])
[]
"""
numbers_length = len(numbers)
for i in range(numbers_length):
for j in range(i + 1, numbers_length):
if numbers[j] < numbers[i]:
numbers[i], numbers[j] = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
user_input = input("Enter numbers separated by a comma:\n").strip()
unsorted = [int(item) for item in user_input.split(",")]
print(exchange_sort(unsorted))
|
1617335
|
import pickle
import numpy as np
from tqdm.auto import tqdm
import moses
from moses import CharVocab
class NGram:
def __init__(self, max_context_len=10, verbose=False):
self.max_context_len = max_context_len
self._dict = dict()
self.vocab = None
self.default_probs = None
self.zero_probs = None
self.verbose = verbose
def fit(self, data):
self.vocab = CharVocab.from_data(data)
self.default_probs = np.hstack([np.ones(len(self.vocab)-4),
np.array([0., 1., 0., 0.])])
self.zero_probs = np.zeros(len(self.vocab))
if self.verbose:
print('fitting...')
data = tqdm(data, total=len(data))
for line in data:
t_line = tuple(self.vocab.string2ids(line, True, True))
for i in range(len(t_line)):
for shift in range(self.max_context_len):
if i + shift + 1 >= len(t_line):
break
context = t_line[i:i+shift+1]
cid = t_line[i+shift+1]
probs = self._dict.get(context, self.zero_probs.copy())
probs[cid] += 1.
self._dict[context] = probs
def fit_update(self, data):
if self.verbose:
print('fitting...')
data = tqdm(data, total=len(data))
for line in data:
t_line = tuple(self.vocab.string2ids(line, True, True))
for i in range(len(t_line)):
for shift in range(self.max_context_len):
if i + shift + 1 >= len(t_line):
break
context = t_line[i:i+shift+1]
cid = t_line[i+shift+1]
probs = self._dict.get(context, self.zero_probs.copy())
probs[cid] += 1.
self._dict[context] = probs
def generate_one(self, l_smooth=0.01, context_len=None, max_len=100):
if self.vocab is None:
raise RuntimeError('Error: Fit the model before generating')
if context_len is None:
context_len = self.max_context_len
elif context_len <= 0 or context_len > self.max_context_len:
context_len = self.max_context_len
res = [self.vocab.bos]
while res[-1] != self.vocab.eos and len(res) < max_len:
begin_index = max(len(res)-context_len, 0)
context = tuple(res[begin_index:])
while context not in self._dict:
context = context[1:]
probs = self._dict[context]
smoothed = probs + self.default_probs*l_smooth
normed = smoothed / smoothed.sum()
next_symbol = np.random.choice(len(self.vocab), p=normed)
res.append(next_symbol)
return self.vocab.ids2string(res)
def nll(self, smiles, l_smooth=0.01, context_len=None):
if self.vocab is None:
raise RuntimeError('Error: model is not trained')
if context_len is None:
context_len = self.max_context_len
elif context_len <= 0 or context_len > self.max_context_len:
context_len = self.max_context_len
tokens = tuple(self.vocab.string2ids(smiles, True, True))
likelihood = 0.
for i in range(1, len(tokens)):
begin_index = max(i-context_len, 0)
context = tokens[begin_index:i]
while context not in self._dict:
context = context[1:]
probs = self._dict[context] + self.default_probs
normed = probs / probs.sum()
prob = normed[tokens[i]]
if prob == 0.:
return np.inf
likelihood -= np.log(prob)
return likelihood
def generate(self, n, l_smooth=0.01, context_len=None, max_len=100):
generator = (self.generate_one(l_smooth,
context_len,
max_len) for i in range(n))
if self.verbose:
print('generating...')
generator = tqdm(generator, total=n)
return list(generator)
def save(self, path):
"""
Saves a model using pickle
Arguments:
path: path to .pkl file for saving
"""
if self.vocab is None:
raise RuntimeError("Can't save empty model."
" Fit the model first")
data = {
'_dict': self._dict,
'vocab': self.vocab,
'default_probs': self.default_probs,
'zero_probs': self.zero_probs,
'max_context_len': self.max_context_len
}
with open(path, 'wb') as f:
pickle.dump(data, f)
@classmethod
def load(cls, path):
"""
Loads saved model
Arguments:
path: path to saved .pkl file
Returns:
Loaded NGramGenerator
"""
with open(path, "rb") as f:
data = pickle.load(f)
model = cls()
model._dict = data['_dict']
model.vocab = data['vocab']
model.default_probs = data['default_probs']
model.zero_probs = data['zero_probs']
model.max_context_len = data['max_context_len']
return model
def reproduce(seed, samples_path=None, metrics_path=None,
n_jobs=1, device='cpu', verbose=False,
samples=30000):
data = moses.get_dataset('train')
model = NGram(10, verbose=verbose)
model.fit(data)
np.random.seed(seed)
smiles = model.generate(samples, l_smooth=0.01)
metrics = moses.get_all_metrics(smiles, n_jobs=n_jobs, device=device)
if samples_path is not None:
with open(samples_path, 'w') as out:
out.write('SMILES\n')
for s in smiles:
out.write(s+'\n')
if metrics_path is not None:
with open(metrics_path, 'w') as out:
for key, value in metrics.items():
out.write("%s,%f\n" % (key, value))
return smiles, metrics
|
1617336
|
from django.core.exceptions import ValidationError
from django.db.models import Q
from django.forms import widgets
from django.forms.fields import BooleanField, ChoiceField, MultipleChoiceField
from django.utils.safestring import mark_safe
from django.utils.text import format_lazy
from django.utils.translation import ngettext_lazy, gettext_lazy as _
from cms.plugin_pool import plugin_pool
from entangled.forms import EntangledModelFormMixin
from cmsplugin_cascade import app_settings
from cmsplugin_cascade.bootstrap4.grid import Breakpoint
from cmsplugin_cascade.forms import ManageChildrenFormMixin
from .plugin_base import BootstrapPluginBase
from . import grid
def get_widget_choices():
breakpoints = app_settings.CMSPLUGIN_CASCADE['bootstrap4']['fluid_bounds']
widget_choices = []
for index, (bp, bound) in enumerate(breakpoints.items()):
if index == 0:
widget_choices.append((bp.name, "{} (<{:.1f}px)".format(bp.label, bound.max)))
elif index == len(breakpoints) - 1:
widget_choices.append((bp.name, "{} (≥{:.1f}px)".format(bp.label, bound.min)))
else:
widget_choices.append((bp.name, "{} (≥{:.1f}px and <{:.1f}px)".format(bp.label, bound.min, bound.max)))
return widget_choices
class ContainerBreakpointsWidget(widgets.CheckboxSelectMultiple):
template_name = 'cascade/admin/widgets/container_breakpoints.html'
class ContainerFormMixin(EntangledModelFormMixin):
breakpoints = MultipleChoiceField(
label=_('Available Breakpoints'),
choices=get_widget_choices(),
widget=ContainerBreakpointsWidget(choices=get_widget_choices()),
initial=[bp.name for bp in app_settings.CMSPLUGIN_CASCADE['bootstrap4']['fluid_bounds'].keys()],
help_text=_("Supported display widths for Bootstrap's grid system."),
)
fluid = BooleanField(
label=_('Fluid Container'),
initial=False,
required=False,
help_text=_("Changing your outermost '.container' to '.container-fluid'.")
)
class Meta:
entangled_fields = {'glossary': ['breakpoints', 'fluid']}
def clean_breapoints(self):
# TODO: check this
if len(self.cleaned_data['glossary']['breakpoints']) == 0:
raise ValidationError(_("At least one breakpoint must be selected."))
return self.cleaned_data['glossary']
class ContainerGridMixin:
def get_grid_instance(self):
fluid = self.glossary.get('fluid', False)
try:
breakpoints = [getattr(grid.Breakpoint, bp) for bp in self.glossary['breakpoints']]
except KeyError:
breakpoints = [bp for bp in grid.Breakpoint]
if fluid:
bounds = dict((bp, grid.fluid_bounds[bp]) for bp in breakpoints)
else:
bounds = dict((bp, grid.default_bounds[bp]) for bp in breakpoints)
return grid.Bootstrap4Container(bounds=bounds)
class BootstrapContainerPlugin(BootstrapPluginBase):
name = _("Container")
parent_classes = None
require_parent = False
model_mixins = (ContainerGridMixin,)
form = ContainerFormMixin
footnote_html = """<p>
For more information about the Container please read the
<a href="https://getbootstrap.com/docs/4.3/layout/overview/#containers" target="_new">Bootstrap documentation</a>.
</p>"""
@classmethod
def get_identifier(cls, obj):
breakpoints = obj.glossary.get('breakpoints')
content = obj.glossary.get('fluid') and '(fluid) ' or ''
if breakpoints:
BREAKPOINTS = app_settings.CMSPLUGIN_CASCADE['bootstrap4']['fluid_bounds']
devices = ', '.join([str(bp.label) for bp in BREAKPOINTS if bp.name in breakpoints])
content = _("{0}for {1}").format(content, devices)
return mark_safe(content)
@classmethod
def get_css_classes(cls, obj):
css_classes = cls.super(BootstrapContainerPlugin, cls).get_css_classes(obj)
if obj.glossary.get('fluid'):
css_classes.append('container-fluid')
else:
css_classes.append('container')
return css_classes
def save_model(self, request, obj, form, change):
super().save_model(request, obj, form, change)
obj.sanitize_children()
plugin_pool.register_plugin(BootstrapContainerPlugin)
class BootstrapRowFormMixin(ManageChildrenFormMixin, EntangledModelFormMixin):
"""
Form class to add non-materialized field to count the number of children.
"""
ROW_NUM_COLUMNS = [1, 2, 3, 4, 6, 12]
num_children = ChoiceField(
label=_('Columns'),
choices=[(i, ngettext_lazy('{0} column', '{0} columns', i).format(i)) for i in ROW_NUM_COLUMNS],
initial=3,
help_text=_('Number of columns to be created with this row.'),
)
class Meta:
untangled_fields = ['num_children']
class RowGridMixin:
def get_grid_instance(self):
row = grid.Bootstrap4Row()
query = Q(plugin_type='BootstrapContainerPlugin') | Q(plugin_type='BootstrapColumnPlugin') \
| Q(plugin_type='BootstrapJumbotronPlugin')
container = self.get_ancestors().order_by('depth').filter(query).last().get_bound_plugin().get_grid_instance()
container.add_row(row)
return row
class BootstrapRowPlugin(BootstrapPluginBase):
name = _("Row")
default_css_class = 'row'
parent_classes = ['BootstrapContainerPlugin', 'BootstrapColumnPlugin', 'BootstrapJumbotronPlugin']
model_mixins = (RowGridMixin,)
form = BootstrapRowFormMixin
@classmethod
def get_identifier(cls, obj):
num_cols = obj.get_num_children()
content = ngettext_lazy("with {0} column", "with {0} columns", num_cols).format(num_cols)
return mark_safe(content)
def save_model(self, request, obj, form, change):
wanted_children = int(form.cleaned_data.get('num_children'))
super().save_model(request, obj, form, change)
child_glossary = {'xs-column-width': 'col'}
self.extend_children(obj, wanted_children, BootstrapColumnPlugin, child_glossary=child_glossary)
plugin_pool.register_plugin(BootstrapRowPlugin)
class ColumnGridMixin:
valid_keys = ['xs-column-width', 'sm-column-width', 'md-column-width', 'lg-column-width', 'xs-column-width',
'xs-column-offset', 'sm-column-offset', 'md-column-offset', 'lg-column-offset', 'xs-column-offset']
def get_grid_instance(self):
column = None
query = Q(plugin_type='BootstrapRowPlugin')
row_obj = self.get_ancestors().order_by('depth').filter(query).last().get_bound_plugin()
# column_siblings = row_obj.get_descendants().order_by('depth').filter(plugin_type='BootstrapColumnPlugin')
row = row_obj.get_grid_instance()
for column_sibling in self.get_siblings():
classes = [val for key, val in column_sibling.get_bound_plugin().glossary.items()
if key in self.valid_keys and val]
if column_sibling.pk == self.pk:
column = grid.Bootstrap4Column(classes)
row.add_column(column)
else:
row.add_column(grid.Bootstrap4Column(classes))
return column
class BootstrapColumnPlugin(BootstrapPluginBase):
name = _("Column")
parent_classes = ['BootstrapRowPlugin']
child_classes = ['BootstrapJumbotronPlugin']
alien_child_classes = True
default_css_attributes = [fmt.format(bp.name) for bp in grid.Breakpoint
for fmt in ('{}-column-width', '{}-column-offset', '{}-column-ordering', '{}-responsive-utils')]
model_mixins = (ColumnGridMixin,)
def get_form(self, request, obj=None, **kwargs):
def choose_help_text(*phrases):
bounds = 'fluid_bounds' if container.glossary.get('fluid') else 'default_bounds'
bs4_breakpoints = app_settings.CMSPLUGIN_CASCADE['bootstrap4'][bounds]
if last:
return phrases[0].format(bs4_breakpoints[last].max)
elif len(breakpoints) > 1:
return phrases[1].format(bs4_breakpoints[first].min)
else:
return phrases[2]
if 'parent' in self._cms_initial_attributes:
container=self._cms_initial_attributes['parent'].get_ancestors().order_by('depth').last().get_bound_plugin()
else:
containers=obj.get_ancestors().filter(plugin_type='BootstrapContainerPlugin')
if containers:
container=containers.order_by('depth').last().get_bound_plugin()
else:
jumbotrons=obj.get_ancestors().filter(plugin_type='BootstrapJumbotronPlugin')
container=jumbotrons.order_by('depth').last().get_bound_plugin()
breakpoints = container.glossary['breakpoints']
width_fields, offset_fields, reorder_fields, responsive_fields = {}, {}, {}, {}
units = [ngettext_lazy("{} unit", "{} units", i).format(i) for i in range(0, 13)]
for bp in breakpoints:
try:
last = getattr(grid.Breakpoint, breakpoints[breakpoints.index(bp)])
except IndexError:
last = None
finally:
first = getattr(grid.Breakpoint, bp)
devices = ', '.join([str(b.label) for b in grid.Breakpoint.range(first, last)])
if bp == 'xs':
choices = [('col', _("Flex column"))]
choices.extend(('col-{}'.format(i), _("{} fixed column").format(units[i])) for i in range(1, 13))
choices.append(('col-auto', _("Auto column")))
else:
choices = [('col-{}'.format(bp), _("Flex column"))]
choices.extend(('col-{}-{}'.format(bp, i), _("{} fixed column").format(units[i])) for i in range(1, 13))
choices.append(('col-{}-auto'.format(bp), _("Auto column")))
if breakpoints.index(bp) == 0:
# first breakpoint
field_name = '{}-column-width'.format(bp)
width_fields[field_name] = ChoiceField(
choices=choices,
label=_("Column width for {}").format(devices),
initial='col' if bp == 'xs' else 'col-{}'.format(bp),
help_text=choose_help_text(
_("Column width for devices narrower than {:.1f} pixels."),
_("Column width for devices wider than {:.1f} pixels."),
_("Column width for all devices."),
)
)
else:
# wider breakpoints may inherit from next narrower ones
choices.insert(0, ('', format_lazy(_("Inherit column width from {}"), previous_devices)))
field_name = '{}-column-width'.format(bp)
width_fields[field_name] = ChoiceField(
choices=choices,
label=_("Column width for {}").format(devices),
initial='',
required=False,
help_text=choose_help_text(
_("Override column width for devices narrower than {:.1f} pixels."),
_("Override column width for devices wider than {:.1f} pixels."),
_("Override column width for all devices."),
)
)
previous_devices = devices
# handle offset
if breakpoints.index(bp) == 0:
choices = [('', _("No offset"))]
offset_range = range(1, 13)
else:
choices = [('', format_lazy(_("Inherit offset from {}"), previous_label))]
offset_range = range(0, 13)
previous_label = Breakpoint[bp].label
if bp == 'xs':
choices.extend(('offset-{}'.format(i), units[i]) for i in offset_range)
else:
choices.extend(('offset-{}-{}'.format(bp, i), units[i]) for i in offset_range)
label = _("Offset for {}").format(devices)
help_text = choose_help_text(
_("Offset width for devices narrower than {:.1f} pixels."),
_("Offset width for devices wider than {:.1f} pixels."),
_("Offset width for all devices.")
)
field_name = '{}-column-offset'.format(bp)
offset_fields[field_name] = ChoiceField(
choices=choices,
label=label,
required=False,
help_text=help_text,
)
# handle column reordering
choices = [('', _("No reordering"))]
if bp == 'xs':
choices.extend(('order-{}'.format(i), _("Reorder by {}").format(units[i])) for i in range(1, 13))
else:
choices.extend(('order-{}-{}'.format(bp, i), _("Reorder by {}").format(units[i])) for i in range(1, 13))
label = _("Reordering for {}").format(devices)
help_text = choose_help_text(
_("Reordering for devices narrower than {:.1f} pixels."),
_("Reordering for devices wider than {:.1f} pixels."),
_("Reordering for all devices.")
)
field_name = '{}-column-ordering'.format(bp)
reorder_fields[field_name] = ChoiceField(
choices=choices,
label=label,
required=False,
help_text=help_text,
)
# handle responsive utilities
choices = [('', _("Default")), ('visible-{}'.format(bp), _("Visible")), ('hidden-{}'.format(bp), _("Hidden"))]
label = _("Responsive utilities for {}").format(devices)
help_text = choose_help_text(
_("Utility classes for showing and hiding content by devices narrower than {:.1f} pixels."),
_("Utility classes for showing and hiding content by devices wider than {:.1f} pixels."),
_("Utility classes for showing and hiding content for all devices.")
)
field_name = '{}-responsive-utils'.format(bp)
responsive_fields[field_name] = ChoiceField(
choices=choices,
label=label,
initial='',
widget=widgets.RadioSelect,
required=False,
help_text=help_text,
)
glossary_fields = list(width_fields.keys())
glossary_fields.extend(offset_fields.keys())
glossary_fields.extend(reorder_fields.keys())
glossary_fields.extend(responsive_fields.keys())
class Meta:
entangled_fields = {'glossary': glossary_fields}
attrs = dict(width_fields, **offset_fields, **reorder_fields, **responsive_fields, Meta=Meta)
kwargs['form'] = type('ColumnForm', (EntangledModelFormMixin,), attrs)
return super().get_form(request, obj, **kwargs)
def save_model(self, request, obj, form, change):
super().save_model(request, obj, form, change)
obj.sanitize_children()
@classmethod
def sanitize_model(cls, obj):
sanitized = super().sanitize_model(obj)
return sanitized
@classmethod
def get_identifier(cls, obj):
glossary = obj.get_complete_glossary()
widths = []
for bp in glossary.get('breakpoints', []):
width = obj.glossary.get('{0}-column-width'.format(bp), '').replace('col-{0}-'.format(bp), '')
if width:
widths.append(width)
if len(widths) > 0:
content = _("widths: {}").format(' / '.join(widths))
else:
content = _("unknown width")
return mark_safe(content)
plugin_pool.register_plugin(BootstrapColumnPlugin)
|
1617391
|
class GeneralPogoException(Exception):
"""Throw an exception that moves up to the start, and reboots"""
|
1617423
|
from __future__ import absolute_import
from . import data
from . import data_augmentation
from . import data_normalization
from . import iterator
from . import standardizer
from . import tta
from . import preprocessor
|
1617494
|
import tensorflow as tf
import pickle
from models.nets.CPM import CPM
class CPM_Model(CPM):
def __init__(self, input_size, heatmap_size, stages, joints, img_type='RGB', is_training=True):
self.stages = stages
self.stage_heatmap = []
self.stage_loss = [0 for _ in range(stages)]
self.total_loss = 0
self.input_image = None
self.center_map = None
self.gt_heatmap = None
self.init_lr = 0
self.merged_summary = None
self.joints = joints
self.batch_size = 0
self.inference_type = 'Train'
if img_type == 'RGB':
self.input_images = tf.placeholder(dtype=tf.float32,
shape=(None, input_size, input_size, 3),
name='input_placeholder')
elif img_type == 'GRAY':
self.input_images = tf.placeholder(dtype=tf.float32,
shape=(None, input_size, input_size, 1),
name='input_placeholder')
self.cmap_placeholder = tf.placeholder(dtype=tf.float32,
shape=(None, input_size, input_size, 1),
name='cmap_placeholder')
self.gt_hmap_placeholder = tf.placeholder(dtype=tf.float32,
shape=(None, heatmap_size, heatmap_size, joints + 1),
name='gt_hmap_placeholder')
self._build_model()
def _build_model(self):
with tf.variable_scope('pooled_center_map'):
self.center_map = tf.layers.average_pooling2d(inputs=self.cmap_placeholder,
pool_size=[9, 9],
strides=[8, 8],
padding='same',
name='center_map')
with tf.variable_scope('sub_stages'):
sub_conv1 = tf.layers.conv2d(inputs=self.input_images,
filters=64,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='sub_conv1')
sub_conv2 = tf.layers.conv2d(inputs=sub_conv1,
filters=64,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='sub_conv2')
sub_pool1 = tf.layers.max_pooling2d(inputs=sub_conv2,
pool_size=[2, 2],
strides=2,
padding='valid',
name='sub_pool1')
sub_conv3 = tf.layers.conv2d(inputs=sub_pool1,
filters=128,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='sub_conv3')
sub_conv4 = tf.layers.conv2d(inputs=sub_conv3,
filters=128,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='sub_conv4')
sub_pool2 = tf.layers.max_pooling2d(inputs=sub_conv4,
pool_size=[2, 2],
strides=2,
padding='valid',
name='sub_pool2')
sub_conv5 = tf.layers.conv2d(inputs=sub_pool2,
filters=256,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='sub_conv5')
sub_conv6 = tf.layers.conv2d(inputs=sub_conv5,
filters=256,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='sub_conv6')
sub_conv7 = tf.layers.conv2d(inputs=sub_conv6,
filters=256,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='sub_conv7')
sub_conv8 = tf.layers.conv2d(inputs=sub_conv7,
filters=256,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='sub_conv8')
sub_pool3 = tf.layers.max_pooling2d(inputs=sub_conv8,
pool_size=[2, 2],
strides=2,
padding='valid',
name='sub_pool3')
sub_conv9 = tf.layers.conv2d(inputs=sub_pool3,
filters=512,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='sub_conv9')
sub_conv10 = tf.layers.conv2d(inputs=sub_conv9,
filters=512,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='sub_conv10')
sub_conv11 = tf.layers.conv2d(inputs=sub_conv10,
filters=512,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='sub_conv11')
sub_conv12 = tf.layers.conv2d(inputs=sub_conv11,
filters=512,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='sub_conv12')
sub_conv13 = tf.layers.conv2d(inputs=sub_conv12,
filters=512,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='sub_conv13')
sub_conv14 = tf.layers.conv2d(inputs=sub_conv13,
filters=512,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='sub_conv14')
self.sub_stage_img_feature = tf.layers.conv2d(inputs=sub_conv14,
filters=128,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='sub_stage_img_feature')
with tf.variable_scope('stage_1'):
conv1 = tf.layers.conv2d(inputs=self.sub_stage_img_feature,
filters=512,
kernel_size=[1, 1],
strides=[1, 1],
padding='valid',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='conv1')
self.stage_heatmap.append(tf.layers.conv2d(inputs=conv1,
filters=self.joints+1,
kernel_size=[1, 1],
strides=[1, 1],
padding='valid',
activation=None,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='stage_heatmap'))
for stage in range(2, self.stages + 1):
self._middle_conv(stage)
def _middle_conv(self, stage):
with tf.variable_scope('stage_' + str(stage)):
self.current_featuremap = tf.concat([self.stage_heatmap[stage - 2],
self.sub_stage_img_feature,
# self.center_map],
],
axis=3)
mid_conv1 = tf.layers.conv2d(inputs=self.current_featuremap,
filters=128,
kernel_size=[7, 7],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='mid_conv1')
mid_conv2 = tf.layers.conv2d(inputs=mid_conv1,
filters=128,
kernel_size=[7, 7],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='mid_conv2')
mid_conv3 = tf.layers.conv2d(inputs=mid_conv2,
filters=128,
kernel_size=[7, 7],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='mid_conv3')
mid_conv4 = tf.layers.conv2d(inputs=mid_conv3,
filters=128,
kernel_size=[7, 7],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='mid_conv4')
mid_conv5 = tf.layers.conv2d(inputs=mid_conv4,
filters=128,
kernel_size=[7, 7],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='mid_conv5')
mid_conv6 = tf.layers.conv2d(inputs=mid_conv5,
filters=128,
kernel_size=[1, 1],
strides=[1, 1],
padding='valid',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='mid_conv6')
self.current_heatmap = tf.layers.conv2d(inputs=mid_conv6,
filters=self.joints+1,
kernel_size=[1, 1],
strides=[1, 1],
padding='valid',
activation=None,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='mid_conv7')
self.stage_heatmap.append(self.current_heatmap)
def build_loss(self, lr, lr_decay_rate, lr_decay_step, optimizer='Adam'):
self.total_loss = 0
self.total_loss_eval = 0
self.init_lr = lr
self.lr_decay_rate = lr_decay_rate
self.lr_decay_step = lr_decay_step
self.optimizer = optimizer
self.batch_size = tf.cast(tf.shape(self.input_images)[0], dtype=tf.float32)
for stage in range(self.stages):
with tf.variable_scope('stage' + str(stage + 1) + '_loss'):
self.stage_loss[stage] = tf.nn.l2_loss(self.stage_heatmap[stage] - self.gt_hmap_placeholder,
name='l2_loss') / self.batch_size
tf.summary.scalar('stage' + str(stage + 1) + '_loss', self.stage_loss[stage])
with tf.variable_scope('total_loss'):
for stage in range(self.stages):
self.total_loss += self.stage_loss[stage]
tf.summary.scalar('total loss train', self.total_loss)
with tf.variable_scope('total_loss_eval'):
for stage in range(self.stages):
self.total_loss_eval += self.stage_loss[stage]
tf.summary.scalar('total loss eval', self.total_loss)
with tf.variable_scope('train'):
self.global_step = tf.contrib.framework.get_or_create_global_step()
self.cur_lr = tf.train.exponential_decay(self.init_lr,
global_step=self.global_step,
decay_rate=self.lr_decay_rate,
decay_steps=self.lr_decay_step)
tf.summary.scalar('global learning rate', self.cur_lr)
self.train_op = tf.contrib.layers.optimize_loss(loss=self.total_loss,
global_step=self.global_step,
learning_rate=self.cur_lr,
optimizer=self.optimizer)
def load_weights_from_file(self, weight_file_path, sess, finetune=True):
# weight_file_object = open(weight_file_path, 'rb')
weights = pickle.load(open(weight_file_path, 'rb'))#, encoding='latin1')
with tf.variable_scope('', reuse=True):
## Pre stage conv
# conv1
for layer in range(1, 3):
conv_kernel = tf.get_variable('sub_stages/sub_conv' + str(layer) + '/kernel')
conv_bias = tf.get_variable('sub_stages/sub_conv' + str(layer) + '/bias')
loaded_kernel = weights['conv1_' + str(layer)]
loaded_bias = weights['conv1_' + str(layer) + '_b']
sess.run(tf.assign(conv_kernel, loaded_kernel))
sess.run(tf.assign(conv_bias, loaded_bias))
# conv2
for layer in range(1, 3):
conv_kernel = tf.get_variable('sub_stages/sub_conv' + str(layer + 2) + '/kernel')
conv_bias = tf.get_variable('sub_stages/sub_conv' + str(layer + 2) + '/bias')
loaded_kernel = weights['conv2_' + str(layer)]
loaded_bias = weights['conv2_' + str(layer) + '_b']
sess.run(tf.assign(conv_kernel, loaded_kernel))
sess.run(tf.assign(conv_bias, loaded_bias))
# conv3
for layer in range(1, 5):
conv_kernel = tf.get_variable('sub_stages/sub_conv' + str(layer + 4) + '/kernel')
conv_bias = tf.get_variable('sub_stages/sub_conv' + str(layer + 4) + '/bias')
loaded_kernel = weights['conv3_' + str(layer)]
loaded_bias = weights['conv3_' + str(layer) + '_b']
sess.run(tf.assign(conv_kernel, loaded_kernel))
sess.run(tf.assign(conv_bias, loaded_bias))
# conv4
for layer in range(1, 5):
conv_kernel = tf.get_variable('sub_stages/sub_conv' + str(layer + 8) + '/kernel')
conv_bias = tf.get_variable('sub_stages/sub_conv' + str(layer + 8) + '/bias')
loaded_kernel = weights['conv4_' + str(layer)]
loaded_bias = weights['conv4_' + str(layer) + '_b']
sess.run(tf.assign(conv_kernel, loaded_kernel))
sess.run(tf.assign(conv_bias, loaded_bias))
# conv5
for layer in range(1, 3):
conv_kernel = tf.get_variable('sub_stages/sub_conv' + str(layer + 12) + '/kernel')
conv_bias = tf.get_variable('sub_stages/sub_conv' + str(layer + 12) + '/bias')
loaded_kernel = weights['conv5_' + str(layer)]
loaded_bias = weights['conv5_' + str(layer) + '_b']
sess.run(tf.assign(conv_kernel, loaded_kernel))
sess.run(tf.assign(conv_bias, loaded_bias))
# conv5_3_CPM
conv_kernel = tf.get_variable('sub_stages/sub_stage_img_feature/kernel')
conv_bias = tf.get_variable('sub_stages/sub_stage_img_feature/bias')
loaded_kernel = weights['conv5_3_CPM']
loaded_bias = weights['conv5_3_CPM_b']
sess.run(tf.assign(conv_kernel, loaded_kernel))
sess.run(tf.assign(conv_bias, loaded_bias))
## stage 1
conv_kernel = tf.get_variable('stage_1/conv1/kernel')
conv_bias = tf.get_variable('stage_1/conv1/bias')
loaded_kernel = weights['conv6_1_CPM']
loaded_bias = weights['conv6_1_CPM_b']
sess.run(tf.assign(conv_kernel, loaded_kernel))
sess.run(tf.assign(conv_bias, loaded_bias))
if finetune != True:
conv_kernel = tf.get_variable('stage_1/stage_heatmap/kernel')
conv_bias = tf.get_variable('stage_1/stage_heatmap/bias')
loaded_kernel = weights['conv6_2_CPM']
loaded_bias = weights['conv6_2_CPM_b']
sess.run(tf.assign(conv_kernel, loaded_kernel))
sess.run(tf.assign(conv_bias, loaded_bias))
## Stage 2 and behind
for stage in range(2, self.stages + 1):
for layer in range(1, 8):
conv_kernel = tf.get_variable('stage_' + str(stage) + '/mid_conv' + str(layer) + '/kernel')
conv_bias = tf.get_variable('stage_' + str(stage) + '/mid_conv' + str(layer) + '/bias')
loaded_kernel = weights['Mconv' + str(layer) + '_stage' + str(stage)]
loaded_bias = weights['Mconv' + str(layer) + '_stage' + str(stage) + '_b']
sess.run(tf.assign(conv_kernel, loaded_kernel))
sess.run(tf.assign(conv_bias, loaded_bias))
|
1617507
|
import os
from itertools import product
from typing import Dict, List, Optional, Tuple, Union
import torch
from torch import Tensor
from torch.nn.functional import log_softmax
from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence
from transformers import PreTrainedTokenizer
from diagnnose.activations.selection_funcs import final_sen_token
from diagnnose.attribute import ShapleyTensor
from diagnnose.corpus import Corpus
from diagnnose.extract import Extractor
from diagnnose.models import LanguageModel
from diagnnose.typedefs.activations import (
ActivationDict,
ActivationName,
ActivationNames,
)
from diagnnose.utils import __file__ as diagnnose_utils_init
from diagnnose.utils.misc import suppress_print
from diagnnose.utils.pickle import load_pickle
class RecurrentLM(LanguageModel):
"""Base class for RNN LM with intermediate activations.
This class contains all the base logic (including forward passes)
for LSTM-type LMs, except for loading in the weights of a specific
model.
"""
is_causal: bool = True
forget_offset: int = 0
ih_concat_order: List[str] = ["h", "i"]
split_order: List[str]
use_char_embs: bool = False
use_peepholes: bool = False
init_states: ActivationDict = {}
def __init__(self, device: str = "cpu"):
super().__init__(device)
# layer index -> layer weights
self.weight: Dict[int, Tensor] = {}
self.bias: Dict[int, Tensor] = {}
# Projects cell state dimension (8192) back to hidden dimension (1024)
self.weight_P: Dict[int, Tensor] = {}
# The 3 peepholes are weighted by a diagonal matrix
self.peepholes: ActivationDict = {}
self.decoder_w: Optional[Tensor] = None
self.decoder_b: Optional[Tensor] = None
def create_inputs_embeds(self, input_ids: Tensor) -> Tensor:
return self.word_embeddings[input_ids]
def decode(self, hidden_state: Tensor) -> Tensor:
return hidden_state @ self.decoder_w.t() + self.decoder_b
@property
def num_layers(self) -> int:
return max(layer for layer, _name in self.sizes) + 1
@property
def top_layer(self) -> int:
return self.num_layers - 1
@property
def output_size(self) -> int:
return self.sizes[self.top_layer, "hx"]
def nhid(self, activation_name: ActivationName) -> int:
"""Returns number of hidden units for a (layer, name) tuple.
If `name` != emb/hx/cx returns the size of (layer, `cx`).
"""
layer, name = activation_name
return self.sizes.get((layer, name), self.sizes[layer, "cx"])
def activation_names(self, compute_out: bool = False) -> ActivationNames:
"""Returns a list of all the model's activation names.
Parameters
----------
compute_out : bool, optional
Toggles the computation of the final decoder projection.
If set to False this projection is not calculated.
Defaults to True.
Returns
-------
activation_names : ActivationNames
List of (layer, name) tuples.
"""
lstm_names = ["hx", "cx", "f_g", "i_g", "o_g", "c_tilde_g"]
activation_names = list(product(range(self.num_layers), lstm_names))
activation_names.append((0, "emb"))
if compute_out:
activation_names.append((self.top_layer, "out"))
return activation_names
def forward(
self,
input_ids: Optional[Tensor] = None,
inputs_embeds: Optional[Union[Tensor, ShapleyTensor]] = None,
input_lengths: Optional[Tensor] = None,
calc_causal_lm_probs: bool = False,
compute_out: bool = False,
only_return_top_embs: bool = False,
) -> Union[ActivationDict, Tensor]:
if input_ids is not None and inputs_embeds is not None:
raise ValueError(
"You cannot specify both input_ids and inputs_embeds at the same time"
)
if inputs_embeds is None and input_ids is None:
raise ValueError("inputs_embeds or input_ids must be provided")
if inputs_embeds is None:
inputs_embeds = self.create_inputs_embeds(input_ids)
if len(inputs_embeds.shape) == 2:
inputs_embeds = inputs_embeds.unsqueeze(0)
inputs_embeds = inputs_embeds.to(self.device)
iterator, unsorted_indices = self._create_iterator(inputs_embeds, input_lengths)
all_activations = self._init_activations(inputs_embeds, compute_out)
cur_activations = self.init_hidden(inputs_embeds.size(0))
for w_idx, input_ in enumerate(iterator):
num_input = input_.shape[0]
for a_name in cur_activations:
cur_activations[a_name] = cur_activations[a_name][:num_input]
cur_activations = self.forward_step(
input_, cur_activations, compute_out=compute_out
)
for a_name in all_activations:
all_activations[a_name][:num_input, w_idx] = cur_activations[a_name]
# Batch had been sorted and needs to be unsorted to retain the original order
for a_name, activations in all_activations.items():
all_activations[a_name] = activations[unsorted_indices]
if calc_causal_lm_probs:
output_ids = input_ids[:, 1:].unsqueeze(-1)
logits = all_activations[self.top_layer, "out"]
probs = log_softmax(logits[:, :-1], dim=-1)
all_activations[self.top_layer, "out"] = torch.gather(probs, -1, output_ids)
if only_return_top_embs and compute_out:
return all_activations[self.top_layer, "out"]
elif only_return_top_embs:
return all_activations[self.top_layer, "hx"]
return all_activations
def forward_step(
self,
token_embeds: Tensor,
prev_activations: ActivationDict,
compute_out: bool = False,
) -> ActivationDict:
"""Performs a forward pass of one step across all layers.
Parameters
----------
token_embeds : Tensor
Tensor of word embeddings at the current sentence position.
prev_activations : ActivationDict
Dict mapping the activation names of the previous hidden
and cell states to their corresponding Tensors.
compute_out : bool, optional
Toggles the computation of the final decoder projection.
If set to False this projection is not calculated.
Defaults to True.
Returns
-------
all_activations : ActivationDict
Dictionary mapping activation names to tensors of shape:
batch_size x max_sen_len x nhid.
"""
cur_activations: ActivationDict = {}
input_ = token_embeds
for layer in range(self.num_layers):
prev_hx = prev_activations[layer, "hx"]
prev_cx = prev_activations[layer, "cx"]
layer_activations = self.forward_cell(layer, input_, prev_hx, prev_cx)
cur_activations.update(layer_activations)
input_ = cur_activations[layer, "hx"]
if compute_out:
out = input_ @ self.decoder_w.t()
out += self.decoder_b
cur_activations[self.top_layer, "out"] = out
return cur_activations
def forward_cell(
self, layer: int, input_: Tensor, prev_hx: Tensor, prev_cx: Tensor
) -> ActivationDict:
"""Performs the forward step of 1 LSTM cell.
Parameters
----------
layer : int
Current RNN layer.
input_ : Tensor
Current input embedding. In higher layers this is h^l-1_t.
Size: batch_size x nhid
prev_hx : Tensor
Previous hidden state. Size: batch_size x nhid
prev_cx : Tensor
Previous cell state. Size: batch_size x nhid
Returns
-------
all_activations : ActivationDict
Dictionary mapping activation names to tensors of shape:
batch_size x max_sen_len x nhid.
"""
# Shape: (bsz, nhid_h+emb_size)
if self.ih_concat_order == ["h", "i"]:
ih_concat = torch.cat((prev_hx, input_), dim=1)
else:
ih_concat = torch.cat((input_, prev_hx), dim=1)
# Shape: (bsz, 4*nhid_c)
proj = ih_concat @ self.weight[layer]
if layer in self.bias:
proj += self.bias[layer]
split_proj: Dict[str, Tensor] = dict(
zip(self.split_order, torch.split(proj, self.sizes[layer, "cx"], dim=1))
)
if self.use_peepholes:
split_proj["f"] += prev_cx * self.peepholes[layer, "f"]
split_proj["i"] += prev_cx * self.peepholes[layer, "i"]
# Shapes: (bsz, nhid_c)
f_g = torch.sigmoid(split_proj["f"])
i_g = torch.sigmoid(split_proj["i"])
c_tilde_g = torch.tanh(split_proj["g"])
cx = f_g * prev_cx + i_g * c_tilde_g
if self.use_peepholes:
split_proj["o"] += cx * self.peepholes[layer, "o"]
o_g = torch.sigmoid(split_proj["o"])
hx = o_g * torch.tanh(cx)
if self.sizes[layer, "hx"] != self.sizes[layer, "cx"]:
hx = hx @ self.weight_P[layer]
activation_dict = {
(layer, "hx"): hx,
(layer, "cx"): cx,
(layer, "f_g"): f_g,
(layer, "i_g"): i_g,
(layer, "o_g"): o_g,
(layer, "c_tilde_g"): c_tilde_g,
}
if layer == 0:
activation_dict[0, "emb"] = input_
return activation_dict
@staticmethod
def _create_iterator(
inputs_embeds: Tensor, input_lengths: Optional[Tensor]
) -> Tuple[Tuple[Tensor, ...], Tensor]:
"""Creates a PackedSequence that handles batching for the RNN.
Batch items are sorted based on sentence length, allowing
<pad> tokens to be skipped efficiently during the forward pass.
Returns
-------
iterator : Tuple[Tensor, ...]
Tuple of input tensors for each step in the sequence.
unsorted_indices : Tensor
Original order of the corpus prior to sorting.
"""
if input_lengths is None:
batch_size = inputs_embeds.shape[0]
input_lengths = torch.tensor(batch_size * [inputs_embeds.shape[1]])
packed_batch: PackedSequence = pack_padded_sequence(
inputs_embeds,
lengths=input_lengths.cpu(),
batch_first=True,
enforce_sorted=False,
)
iterator = torch.split(packed_batch.data, list(packed_batch.batch_sizes))
return iterator, packed_batch.unsorted_indices
def _init_activations(
self, inputs_embeds: Tensor, compute_out: bool
) -> ActivationDict:
"""Returns a dictionary mapping activation names to tensors.
If the input is a ShapleyTensor this dict will store the
ShapleyTensors as well.
Returns
-------
all_activations : ActivationDict
Dictionary mapping activation names to tensors of shape:
batch_size x max_sen_len x nhid.
"""
batch_size, max_sen_len = inputs_embeds.shape[:2]
all_activations: ActivationDict = {
a_name: torch.zeros(batch_size, max_sen_len, self.nhid(a_name))
for a_name in self.activation_names(compute_out)
}
if isinstance(inputs_embeds, ShapleyTensor):
for a_name, activations in all_activations.items():
all_activations[a_name] = type(inputs_embeds)(activations)
return all_activations
def init_hidden(self, batch_size: int) -> ActivationDict:
"""Creates a batch of initial states.
Parameters
----------
batch_size : int
Size of batch for which states are created.
Returns
-------
init_states : ActivationTensors
Dictionary mapping hidden and cell state to init tensors.
"""
batch_init_states: ActivationDict = {}
for layer in range(self.num_layers):
for hc in ["hx", "cx"]:
# Shape: (batch_size, nhid)
batched_state = self.init_states[layer, hc].repeat(batch_size, 1)
batch_init_states[layer, hc] = batched_state
return batch_init_states
def final_hidden(self, hidden: ActivationDict) -> Tensor:
"""Returns the final hidden state.
Parameters
----------
hidden : ActivationTensors
Dictionary of extracted activations.
Returns
-------
final_hidden : Tensor
Tensor of the final hidden state.
"""
return hidden[self.top_layer, "hx"].squeeze()
def set_init_states(
self,
pickle_path: Optional[str] = None,
corpus_path: Optional[str] = None,
use_default: bool = False,
tokenizer: Optional[PreTrainedTokenizer] = None,
save_init_states_to: Optional[str] = None,
) -> None:
"""Set up the initial LM states.
If no path is provided 0-valued embeddings will be used.
Note that the loaded init should provide tensors for `hx`
and `cx` in all layers of the LM.
Note that `init_states_pickle` takes precedence over
`init_states_corpus` in case both are provided.
Parameters
----------
pickle_path : str, optional
Path to pickled file with initial lstm states. If not
provided zero-valued init states will be created.
corpus_path : str, optional
Path to corpus of which the final hidden state will be used
as initial states.
use_default : bool
Toggle to use the default initial sentence `. <eos>`.
tokenizer : PreTrainedTokenizer, optional
Tokenizer that must be provided when creating the init
states from a corpus.
save_init_states_to : str, optional
Path to which the newly computed init_states will be saved.
If not provided these states won't be dumped.
Returns
-------
init_states : ActivationTensors
ActivationTensors containing the init states for each layer.
"""
if use_default:
diagnnose_utils_dir = os.path.dirname(diagnnose_utils_init)
corpus_path = os.path.join(diagnnose_utils_dir, "init_sentence.txt")
if pickle_path is not None:
init_states = self._create_init_states_from_pickle(pickle_path)
elif corpus_path is not None:
init_states = self._create_init_states_from_corpus(
corpus_path, tokenizer, save_init_states_to
)
else:
init_states = self._create_zero_states()
self.init_states = init_states
def _create_zero_states(self) -> ActivationDict:
"""Zero-initialized states if no init state is provided.
Returns
-------
init_states : ActivationTensors
Dictionary mapping (layer, name) tuple to zero-tensor.
"""
init_states: ActivationDict = {
a_name: torch.zeros((1, self.nhid(a_name)), device=self.device)
for a_name in product(range(self.num_layers), ["cx", "hx"])
}
return init_states
@suppress_print
def _create_init_states_from_corpus(
self,
init_states_corpus: str,
tokenizer: PreTrainedTokenizer,
save_init_states_to: Optional[str] = None,
) -> ActivationDict:
assert (
tokenizer is not None
), "Tokenizer must be provided when creating init states from corpus"
corpus: Corpus = Corpus.create(init_states_corpus, tokenizer=tokenizer)
activation_names: ActivationNames = [
(layer, name) for layer in range(self.num_layers) for name in ["hx", "cx"]
]
extractor = Extractor(
self,
corpus,
activation_names,
activations_dir=save_init_states_to,
selection_func=final_sen_token,
)
init_states = extractor.extract().activation_dict
return init_states
def _create_init_states_from_pickle(self, pickle_path: str) -> ActivationDict:
init_states: ActivationDict = load_pickle(pickle_path)
self._validate_init_states_from_pickle(init_states)
return init_states
def _validate_init_states_from_pickle(self, init_states: ActivationDict) -> None:
num_init_layers = max(layer for layer, _name in init_states)
assert (
num_init_layers == self.num_layers
), "Number of initial layers not correct"
for (layer, name), size in self.sizes.items():
if name in ["hx", "cx"]:
assert (
layer,
name,
) in init_states.keys(), (
f"Activation {layer},{name} is not found in init states"
)
init_size = init_states[layer, name].size(1)
assert init_size == size, (
f"Initial activation size for {name} is incorrect: "
f"{name}: {init_size}, should be {size}"
)
|
1617553
|
import chex
import jax
import jax.numpy as jnp
from typing import Callable, Optional, Tuple
from .moment import MomentTransform, MomentTransformClass
from chex import Array, dataclass
import tensorflow_probability.substrates.jax as tfp
dist = tfp.distributions
class UnscentedTransform(MomentTransformClass):
def __init__(
self,
gp_pred,
n_features: int,
alpha: float = 1.0,
beta: float = 1.0,
kappa: Optional[float] = None,
):
self.gp_pred = gp_pred
self.sigma_pts = get_unscented_sigma_points(n_features, kappa, alpha)
self.Wm, self.wc = get_unscented_weights(n_features, kappa, alpha, beta)
self.Wc = jnp.diag(self.wc)
def predict_mean(self, x, x_cov):
# cholesky decomposition
L = jnp.linalg.cholesky(x_cov)
# calculate sigma points
# (D,M) = (D,1) + (D,D)@(D,M)
x_sigma_samples = x[..., None] + L @ self.sigma_pts
# ===================
# Mean
# ===================
# function predictions over mc samples
# (P,M) = (D,M)
y_mu_sigma = jax.vmap(self.gp_pred.predict_mean, in_axes=2, out_axes=1)(
x_sigma_samples
)
# mean of mc samples
# (N,M,P) @ (M,) -> (N,P)
y_mu = jnp.einsum("ijk,j->ik", y_mu_sigma, self.Wm)
return y_mu
def predict_f(self, x, x_cov, full_covariance=False):
# cholesky decomposition
L = jnp.linalg.cholesky(x_cov)
# calculate sigma points
# (D,M) = (D,1) + (D,D)@(D,M)
x_sigma_samples = x[..., None] + L @ self.sigma_pts
# ===================
# Mean
# ===================
# function predictions over mc samples
# (N,M,P) = (D,M)
y_mu_sigma = jax.vmap(self.gp_pred.predict_mean, in_axes=2, out_axes=1)(
x_sigma_samples
)
# mean of mc samples
# (N,M,P) @ (M,) -> (N,P)
y_mu = jnp.einsum("ijk,j->ik", y_mu_sigma, self.Wm)
# ===================
# Covariance
# ===================
if full_covariance:
# (N,P,M) - (N,P,1) -> (N,P,M)
dfydx = y_mu_sigma - y_mu[..., None]
# (N,M,P) @ (M,M) @ (N,M,P) -> (N,P,D)
cov = jnp.einsum("ijk,jl,mlk->ikm", dfydx, self.Wc, dfydx.T)
return y_mu, cov
else:
# (N,P,M) - (N,P,1) -> (N,P,M)
dfydx = y_mu_sigma - y_mu[..., None]
# (N,M,P) @ (M,) -> (N,P)
var = jnp.einsum("ijk,j->ik", dfydx ** 2, self.wc)
return y_mu, var
def predict_cov(self, key, x, x_cov):
# cholesky decomposition
L = jnp.linalg.cholesky(x_cov)
# calculate sigma points
# (D,M) = (D,1) + (D,D)@(D,M)
x_sigma_samples = x[..., None] + L @ self.sigma_pts
# ===================
# Mean
# ===================
# function predictions over mc samples
# (N,P,M) = (D,M)
y_mu_sigma = jax.vmap(self.gp_pred.predict_mean, in_axes=2, out_axes=1)(
x_sigma_samples
)
# mean of mc samples
# (N,P,M) @ (M,) -> (N,P)
y_mu = jnp.einsum("ijk,j->ik", y_mu_sigma, self.Wm)
# ===================
# Covariance
# ===================
# (N,P,M) - (N,P,1) -> (N,P,M)
dfydx = y_mu_sigma - y_mu[..., None]
# (N,M,P) @ (M,M) @ (N,M,P) -> (N,P,D)
y_cov = jnp.einsum("ijk,jl,mlk->ikm", dfydx, self.Wc, dfydx.T)
return y_cov
def predict_var(self, key, x, x_cov):
# cholesky decomposition
L = jnp.linalg.cholesky(x_cov)
# calculate sigma points
# (D,M) = (D,1) + (D,D)@(D,M)
x_sigma_samples = x[..., None] + L @ self.sigma_pts
# ===================
# Mean
# ===================
# function predictions over mc samples
# (N,P,M) = (D,M)
y_mu_sigma = jax.vmap(self.gp_pred.predict_mean, in_axes=2, out_axes=1)(
x_sigma_samples
)
# mean of mc samples
# (N,P,M) @ (M,) -> (N,P)
y_mu = jnp.einsum("ijk,j->ik", y_mu_sigma, self.Wm)
# ===================
# Variance
# ===================
# (N,P,M) - (N,P,1) -> (N,P,M)
dfydx = y_mu_sigma - y_mu[..., None]
# (N,M,P) @ (M,) -> (N,P)
var = jnp.einsum("ijk,j->ik", dfydx ** 2, self.wc)
return var
class SphericalTransform(UnscentedTransform):
def __init__(self, gp_pred, n_features: int):
super().__init__(
gp_pred=gp_pred, n_features=n_features, alpha=1.0, beta=0.0, kappa=0.0
)
def get_unscented_sigma_points(
n_features: int, kappa: Optional[float] = None, alpha: float = 1.0
) -> Tuple[chex.Array, chex.Array]:
"""Generate Unscented samples"""
# calculate kappa value
if kappa is None:
kappa = jnp.maximum(3.0 - n_features, 0.0)
lam = alpha ** 2 * (n_features + kappa) - n_features
c = jnp.sqrt(n_features + lam)
return jnp.hstack(
(jnp.zeros((n_features, 1)), c * jnp.eye(n_features), -c * jnp.eye(n_features))
)
def get_unscented_weights(
n_features: int,
kappa: Optional[float] = None,
alpha: float = 1.0,
beta: float = 2.0,
) -> Tuple[float, float]:
"""Generate normalizers for MCMC samples"""
# calculate kappa value
if kappa is None:
kappa = jnp.maximum(3.0 - n_features, 0.0)
lam = alpha ** 2 * (n_features + kappa) - n_features
wm = 1.0 / (2.0 * (n_features + lam)) * jnp.ones(2 * n_features + 1)
wc = wm.copy()
wm = jax.ops.index_update(wm, 0, lam / (n_features + lam))
wc = jax.ops.index_update(wc, 0, wm[0] + (1 - alpha ** 2 + beta))
return wm, wc
|
1617559
|
from ..utils import attack, change_parameter
XSS_STRING = u'<script>alert("XSS_STRING");</script>'
def attack_post(client, log, form):
# A helper function for modifing values of the parameter list.
def modify_parameter(target_name, value):
parameters = dict(form.get_parameters())
parameters[target_name] = value
return parameters
for parameter_name, parameter_value in form.get_parameters():
# Replace value with XSS_STRING
parameters = modify_parameter(parameter_name, XSS_STRING)
# Send the form
try:
attacked_page = form.send(client, parameters)
except Exception as e:
log('warn', form.action,
'HTTP Errors occurs when confronted with html input',
"in parameter" + parameter_name)
return
# Determine if the string is unfiltered on the page.
if XSS_STRING in attacked_page.html:
# Oh no! It is!
log('vuln', attacked_page.url, "XSS",
"in parameter " + parameter_name,
request=attacked_page.request)
def attack_get(client, log, url, parameter):
# Replace the value of the parameter with XSS_STRING
attack_url = change_parameter(url, parameter, XSS_STRING)
# To run the attack, we just request the site.
attacked_page = client.download_page(attack_url)
# If XSS_STRING is found unfilitered in the site, we have a problem.
if XSS_STRING in attacked_page.html:
log('vuln', attacked_page.url, "XSS", "in URL parameter " + parameter)
def search(page):
for form in page.get_forms():
yield ('post', form)
for parameter, _ in page.url_parameters:
yield ('get', page.url, parameter)
@attack(search)
def xss(client, log, target_type, *args):
globals()['attack_' + target_type](client, log, *args)
|
1617587
|
import matplotlib.pyplot as plt
import numpy as np
import torch
import cv2
def draw_figure(fig):
fig.canvas.draw()
fig.canvas.flush_events()
plt.pause(0.001)
def show_tensor(a: torch.Tensor, fig_num = None, title = None, range=(None, None), ax=None):
"""Display a 2D tensor.
args:
fig_num: Figure number.
title: Title of figure.
"""
a_np = a.squeeze().cpu().clone().detach().numpy()
if a_np.ndim == 3:
a_np = np.transpose(a_np, (1, 2, 0))
if ax is None:
fig = plt.figure(fig_num)
plt.tight_layout()
plt.cla()
plt.imshow(a_np, vmin=range[0], vmax=range[1])
plt.axis('off')
plt.axis('equal')
if title is not None:
plt.title(title)
draw_figure(fig)
else:
ax.cla()
ax.imshow(a_np, vmin=range[0], vmax=range[1])
ax.set_axis_off()
ax.axis('equal')
if title is not None:
ax.set_title(title)
draw_figure(plt.gcf())
def plot_graph(a: torch.Tensor, fig_num = None, title = None):
"""Plot graph. Data is a 1D tensor.
args:
fig_num: Figure number.
title: Title of figure.
"""
a_np = a.squeeze().cpu().clone().detach().numpy()
if a_np.ndim > 1:
raise ValueError
fig = plt.figure(fig_num)
# plt.tight_layout()
plt.cla()
plt.plot(a_np)
if title is not None:
plt.title(title)
draw_figure(fig)
def show_image_with_boxes(im, boxes, iou_pred=None, disp_ids=None):
im_np = im.clone().cpu().squeeze().numpy()
im_np = np.ascontiguousarray(im_np.transpose(1, 2, 0).astype(np.uint8))
boxes = boxes.view(-1, 4).cpu().numpy().round().astype(int)
# Draw proposals
for i_ in range(boxes.shape[0]):
if disp_ids is None or disp_ids[i_]:
bb = boxes[i_, :]
disp_color = (i_*38 % 256, (255 - i_*97) % 256, (123 + i_*66) % 256)
cv2.rectangle(im_np, (bb[0], bb[1]), (bb[0] + bb[2], bb[1] + bb[3]),
disp_color, 1)
if iou_pred is not None:
text_pos = (bb[0], bb[1] - 5)
cv2.putText(im_np, 'ID={} IOU = {:3.2f}'.format(i_, iou_pred[i_]), text_pos,
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1, bottomLeftOrigin=False)
im_tensor = torch.from_numpy(im_np.transpose(2, 0, 1)).float()
return im_tensor
def _pascal_color_map(N=256, normalized=False):
"""
Python implementation of the color map function for the PASCAL VOC data set.
Official Matlab version can be found in the PASCAL VOC devkit
http://host.robots.ox.ac.uk/pascal/VOC/voc2012/index.html#devkit
"""
def bitget(byteval, idx):
return (byteval & (1 << idx)) != 0
dtype = 'float32' if normalized else 'uint8'
cmap = np.zeros((N, 3), dtype=dtype)
for i in range(N):
r = g = b = 0
c = i
for j in range(8):
r = r | (bitget(c, 0) << 7 - j)
g = g | (bitget(c, 1) << 7 - j)
b = b | (bitget(c, 2) << 7 - j)
c = c >> 3
cmap[i] = np.array([r, g, b])
cmap = cmap / 255 if normalized else cmap
return cmap
def overlay_mask(im, ann, alpha=0.5, colors=None, contour_thickness=None):
""" Overlay mask over image.
Source: https://github.com/albertomontesg/davis-interactive/blob/master/davisinteractive/utils/visualization.py
This function allows you to overlay a mask over an image with some
transparency.
# Arguments
im: Numpy Array. Array with the image. The shape must be (H, W, 3) and
the pixels must be represented as `np.uint8` data type.
ann: Numpy Array. Array with the mask. The shape must be (H, W) and the
values must be intergers
alpha: Float. Proportion of alpha to apply at the overlaid mask.
colors: Numpy Array. Optional custom colormap. It must have shape (N, 3)
being N the maximum number of colors to represent.
contour_thickness: Integer. Thickness of each object index contour draw
over the overlay. This function requires to have installed the
package `opencv-python`.
# Returns
Numpy Array: Image of the overlay with shape (H, W, 3) and data type
`np.uint8`.
"""
im, ann = np.asarray(im, dtype=np.uint8), np.asarray(ann, dtype=np.int)
if im.shape[:-1] != ann.shape:
raise ValueError('First two dimensions of `im` and `ann` must match')
if im.shape[-1] != 3:
raise ValueError('im must have three channels at the 3 dimension')
colors = colors or _pascal_color_map()
colors = np.asarray(colors, dtype=np.uint8)
mask = colors[ann]
fg = im * alpha + (1 - alpha) * mask
img = im.copy()
img[ann > 0] = fg[ann > 0]
if contour_thickness: # pragma: no cover
import cv2
for obj_id in np.unique(ann[ann > 0]):
contours = cv2.findContours((ann == obj_id).astype(
np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2:]
cv2.drawContours(img, contours[0], -1, colors[obj_id].tolist(),
contour_thickness)
return img
|
1617603
|
import json
from django.http import HttpResponse
from gerapy.server.core.encoder import JSONEncoder
class JsonResponse(HttpResponse):
"""
An HTTP response class that consumes data to be serialized to JSON.
:param data: Data to be dumped into json. By default only ``dict`` objects
are allowed to be passed due to a security flaw before EcmaScript 5. See
the ``safe`` parameter for more information.
:param encoder: Should be an json encoder class. Defaults to
``django.core.serializers.json.DjangoJSONEncoder``.
:param safe: Controls if only ``dict`` objects may be serialized. Defaults
to ``True``.
:param json_dumps_params: A dictionary of kwargs passed to json.dumps().
"""
def __init__(self, data, encoder=JSONEncoder, safe=False,
json_dumps_params=None, **kwargs):
if safe and not isinstance(data, dict):
raise TypeError(
'In order to allow non-dict objects to be serialized set the '
'safe parameter to False.'
)
if json_dumps_params is None:
json_dumps_params = {}
kwargs.setdefault('content_type', 'application/json')
data = json.dumps(data, cls=encoder, **json_dumps_params)
super(JsonResponse, self).__init__(content=data, **kwargs)
|
1617648
|
from django.utils.translation import ugettext_lazy as _
EQUAL = 0
LESS_THAN = 1
LESS_THAN_EQUAL = 2
GREATER_THAN = 3
GREATER_THAN_EQUAL = 4
CONTAIN = 5
IS = 10
IS_NOT = 11
IS_VALID = 12
IS_NOT_VALID = 13
NUMBER_OPERATORS = (
(EQUAL, _(u"Equal to")),
(LESS_THAN, _(u"Less than")),
(LESS_THAN_EQUAL, _(u"Less than or equal to")),
(GREATER_THAN, _(u"Greater than")),
(GREATER_THAN_EQUAL, _(u"Greater than or equal to")),
)
STRING_OPERATORS = (
(EQUAL, _(u"Equal to")),
(CONTAIN, _(u"Contain")),
)
SELECT_OPERATORS = (
(IS, _(u"Is")),
(IS_NOT, _(u"Is not")),
(IS_VALID, _(u"Is valid")),
(IS_NOT_VALID, _(u"Is not valid")),
)
|
1617684
|
import pickle # Save model
#import matplotlib.pyplot as plt
import re # regular expression library
from random import random, choice # for random strategy
from operator import itemgetter
import numpy as np
from scipy.sparse import csgraph
from scipy.spatial import distance
from sklearn.cluster import KMeans
from sklearn.cluster import SpectralClustering
from sklearn.decomposition import TruncatedSVD
def generateUserFeature(W):
svd = TruncatedSVD(n_components=25)
result = svd.fit(W).transform(W)
return result
def vectorize(M):
temp = []
for i in range(M.shape[0]*M.shape[1]):
temp.append(M.T.item(i))
V = np.asarray(temp)
return V
def matrixize(V, C_dimension):
temp = np.zeros(shape = (C_dimension, len(V)/C_dimension))
for i in range(len(V)/C_dimension):
temp.T[i] = V[i*C_dimension : (i+1)*C_dimension]
W = temp
return W
def readFeatureVectorFile(FeatureVectorsFileName):
FeatureVectors = {}
with open(FeatureVectorsFileName, 'r') as f:
f.readline()
for line in f:
line = line.split("\t")
vec = line[1].strip('[]').strip('\n').split(';')
FeatureVectors[int(line[0])] = np.array(vec).astype(np.float)
return FeatureVectors
# This code simply reads one line from the source files of Yahoo!
def parseLine(line):
userID, tim, pool_articles = line.split("\t")
userID, tim = int(userID), int(tim)
pool_articles = np.array(pool_articles.strip('[').strip(']').strip('\n').split(','))
return userID, tim, pool_articles
def save_to_file(fileNameWrite, recordedStats, tim):
with open(fileNameWrite, 'a+') as f:
f.write('data') # the observation line starts with data;
f.write(',' + str(tim))
f.write(',' + ';'.join([str(x) for x in recordedStats]))
f.write('\n')
def initializeGW( Gepsilon ,n, relationFileName):
W = np.identity(n)
with open(relationFileName) as f:
for line in f:
line = line.split('\t')
if line[0] != 'userID':
if int(line[0])<=n and int(line[1]) <=n:
W[int(line[0])][int(line[1])] +=1
G = W
L = csgraph.laplacian(G, normed = False)
I = np.identity(n)
GW = I + Gepsilon*L # W is a double stochastic matrix
print GW
return GW.T
# generate graph W(No clustering)
def initializeW(n,relationFileName):
W = np.identity(n)
with open(relationFileName) as f:
for line in f:
line = line.split('\t')
if line[0] != 'userID':
if int(line[0])<=n and int(line[1]) <=n:
W[int(line[0])][int(line[1])] +=1
#print W[int(line[0])][int(line[1])]
row_sums = W.sum(axis=1)
NormalizedW = W / row_sums[:, np.newaxis]
W = NormalizedW
print W.T
print 'Wtype', type(W)
#initializeW_clustering(n,relationFileName, 5)
return W.T
def initializeW_clustering(n,relationFileName, nClusters):
W = np.identity(n+1)
with open(relationFileName) as f:
f.readline()
for line in f:
line = line.split('\t')
if int(line[0])<=n and int(line[1]) <=n:
W[int(line[0])][int(line[1])] +=1
#KMeans
#SpectralClustering
spc = SpectralClustering(n_clusters=nClusters, affinity = "precomputed")
#spc = SpectralClustering(n_clusters=nClusters)
spc.fit(W) # What is the meaning
label = spc.labels_
with open(relationFileName+'.cluster','w') as f:
for i in range(n):
f.write(str(label[i])+'\n')
NeighborW = np.zeros(shape=(nClusters, nClusters))
for i in range(n):
for j in range(n):
if label[i]==label[j]:
NeighborW[label[i]][label[j]] = 0
else:
NeighborW[label[i]][label[j]] += W[i][j]
NormalizedNeighborW = normalizeByRow(NeighborW)
newW = np.identity(nClusters) + NormalizedNeighborW
print 'newW', newW
NormalizednewW = normalizeByRow(newW)
print 'NormalizednewW', NormalizednewW.T
return NormalizednewW.T, newW, label
def initializeGW_clustering(Gepsilon, relationFileName, newW):
G = newW
n = newW.shape[0]
L = csgraph.laplacian(G, normed = False)
I = np.identity(n)
GW = I + Gepsilon*L # W is a double stochastic matrix
print GW
return GW.T
def initializeGW_label(Gepsilon ,n, relationFileName, label, diagnol):
W = np.identity(n)
with open(relationFileName) as f:
for line in f:
line = line.split('\t')
if line[0] != 'userID' and label[int(line[0])]!=10000 and label[int(line[1])]!=10000: #10000 means not top 100 user.
W[label[int(line[0])]][label[int(line[1])]] += 1
# don't need it
'''
if diagnol=='1' or diagnol=='0':
for i in range(n):
W[i][i] = int(diagnol)
'''
G = W
L = csgraph.laplacian(G, normed = False)
I = np.identity(n)
GW = I + Gepsilon*L # W is a double stochastic matrix
print GW
return GW.T
# generate graph W(No clustering)
def initializeW_label(n,relationFileName, label, diagnol, show_heatmap):
W = np.identity(n)
with open(relationFileName) as f:
for line in f:
line = line.split('\t')
if line[0] != 'userID' and label[int(line[0])]!=10000 and label[int(line[1])]!=10000: #10000 means not top 100 user.
W[label[int(line[0])]][label[int(line[1])]] += 1
if show_heatmap:
heatmap(W)
# normalize
if is_number(diagnol):
for i in range(n):
W[i][i] = 0
W = normalizeByRow(W)
if show_heatmap:
heatmap(W)
for i in range(n):
W[i][i] = float(diagnol)
if show_heatmap:
heatmap(W)
if diagnol == 'Max':
for i in range(n):
W[i][i] = 0
W = normalizeByRow(W)
if show_heatmap:
heatmap(W)
for i in range(n):
maxi = max(W[i])
W[i][i] = maxi
print W
if show_heatmap:
heatmap(W)
if diagnol == 'Opt':
for i in range(n):
W[i][i] =0
if sum(W[i]!=0):
W[i][i] = np.linalg.norm(W[i])**2/sum(W[i])
else:
W[i][i] =1
print W
if show_heatmap:
heatmap(W)
W = normalizeByRow(W)
if show_heatmap:
heatmap(W)
print W.T
return W.T
def read_cluster_label(labelfile):
label = [0]
#fin = open(labelfile,'r')
for line in labelfile:
label.append(int(line))
return np.array(label)
def heatmap(X):
plt.pcolor(X)
plt.colorbar()
plt.show()
def normalizeByRow(Matrix):
row_sums = Matrix.sum(axis=1)
for i in range(len(row_sums)):
if row_sums[i] ==0:
row_sums[i] =0.00000000000001
print row_sums
NormalizednewMatrix = Matrix / row_sums[:, np.newaxis]
return NormalizednewMatrix
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def model_dump(obj, filename, linenum):
fout = open(filename +'.txt', 'w')
fout.write("line\t"+str(linenum))
fout.close()
fout = open(filename +'.model', 'w')
pickle.dump(obj, fout)
fout.close()
def getcons(dim):
cons = []
cons.append({'type': 'eq','fun': lambda x : np.sum(x)-1})
for i in range(dim):
cons.append({'type' : 'ineq','fun' : lambda x: x[i] })
cons.append({'type' : 'ineq','fun' : lambda x: 1-x[i]})
return tuple(cons)
|
1617693
|
from .coordattention import CoordAttention, H_Sigmoid, H_Swish
from .involution import Involution
from .identity import Identity
from .droppath import DropPath, droppath
|
1617718
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import ConfigSpace
from autoPyTorch.components.networks.base_net import BaseImageNet
from autoPyTorch.utils.config_space_hyperparameter import add_hyperparameter, get_hyperparameter
from autoPyTorch.components.networks.image.utils.utils import initialize_weights
from autoPyTorch.components.networks.image.utils.shakeshakeblock import shake_shake, generate_alpha_beta
from autoPyTorch.components.networks.image.utils.shakedrop import shake_drop, generate_alpha_beta_single
class SkipConnection(nn.Module):
def __init__(self, in_channels, out_channels, stride):
super(SkipConnection, self).__init__()
self.s1 = nn.Sequential()
self.s1.add_module('Skip_1_AvgPool',
nn.AvgPool2d(1, stride=stride))
self.s1.add_module('Skip_1_Conv',
nn.Conv2d(in_channels,
int(out_channels / 2),
kernel_size=1,
stride=1,
padding=0,
bias=False))
self.s2 = nn.Sequential()
self.s2.add_module('Skip_2_AvgPool',
nn.AvgPool2d(1, stride=stride))
self.s2.add_module('Skip_2_Conv',
nn.Conv2d(in_channels,
int(out_channels / 2) if out_channels % 2 == 0 else int(out_channels / 2) + 1,
kernel_size=1,
stride=1,
padding=0,
bias=False))
self.batch_norm = nn.BatchNorm2d(out_channels)
def forward(self, x):
out1 = F.relu(x, inplace=False)
out1 = self.s1(out1)
out2 = F.pad(x[:, :, 1:, 1:], (0, 1, 0, 1))
out2 = self.s2(out2)
out = torch.cat([out1, out2], dim=1)
out = self.batch_norm(out)
return out
class ResidualBranch(nn.Module):
def __init__(self, in_channels, out_channels, filter_size, stride, branch_index):
super(ResidualBranch, self).__init__()
self.residual_branch = nn.Sequential()
self.residual_branch.add_module('Branch_{}:ReLU_1'.format(branch_index),
nn.ReLU(inplace=False))
self.residual_branch.add_module('Branch_{}:Conv_1'.format(branch_index),
nn.Conv2d(in_channels,
out_channels,
kernel_size=filter_size,
stride=stride,
padding=round(filter_size / 3),
bias=False))
self.residual_branch.add_module('Branch_{}:BN_1'.format(branch_index),
nn.BatchNorm2d(out_channels))
self.residual_branch.add_module('Branch_{}:ReLU_2'.format(branch_index),
nn.ReLU(inplace=False))
self.residual_branch.add_module('Branch_{}:Conv_2'.format(branch_index),
nn.Conv2d(out_channels,
out_channels,
kernel_size=filter_size,
stride=1,
padding=round(filter_size / 3),
bias=False))
self.residual_branch.add_module('Branch_{}:BN_2'.format(branch_index),
nn.BatchNorm2d(out_channels))
def forward(self, x):
return self.residual_branch(x)
class BasicBlock(nn.Module):
def __init__(self, n_input_plane, n_output_plane, filter_size, res_branches, stride, shake_config):
super(BasicBlock, self).__init__()
self.shake_config = shake_config
self.branches = nn.ModuleList([ResidualBranch(n_input_plane, n_output_plane, filter_size, stride, branch + 1) for branch in range(res_branches)])
# Skip connection
self.skip = nn.Sequential()
if n_input_plane != n_output_plane or stride != 1:
self.skip.add_module('Skip_connection',
SkipConnection(n_input_plane, n_output_plane, stride))
def forward(self, x):
if len(self.branches) == 1:
out = self.branches[0](x)
if self.config.apply_shakeDrop:
alpha, beta = generate_alpha_beta_single(out.size(), self.shake_config if self.training else (False, False, False), x.is_cuda)
out = shake_drop(out, alpha, beta, self.config.death_rate, self.training)
else:
if self.config.apply_shakeShake:
alpha, beta = generate_alpha_beta(len(self.branches), x.size(0), self.shake_config if self.training else (False, False, False), x.is_cuda)
branches = [self.branches[i](x) for i in range(len(self.branches))]
out = shake_shake(alpha, beta, *branches)
else:
out = sum([self.branches[i](x) for i in range(len(self.branches))])
return out + self.skip(x)
class ResidualGroup(nn.Module):
def __init__(self, block, n_input_plane, n_output_plane, n_blocks, filter_size, res_branches, stride, shake_config):
super(ResidualGroup, self).__init__()
self.group = nn.Sequential()
self.n_blocks = n_blocks
# The first residual block in each group is responsible for the input downsampling
self.group.add_module('Block_1',
block(n_input_plane,
n_output_plane,
filter_size,
res_branches,
stride=stride,
shake_config=shake_config))
# The following residual block do not perform any downsampling (stride=1)
for block_index in range(2, n_blocks + 1):
block_name = 'Block_{}'.format(block_index)
self.group.add_module(block_name,
block(n_output_plane,
n_output_plane,
filter_size,
res_branches,
stride=1,
shake_config=shake_config))
def forward(self, x):
return self.group(x)
class ResNet(BaseImageNet):
def __init__(self, config, in_features, out_features, final_activation, **kwargs):
super(ResNet, self).__init__(config, in_features, out_features, final_activation)
nn.Module.config = config
self.final_activation = final_activation
self.nr_main_blocks = config['nr_main_blocks']
config.initial_filters = config['initial_filters']
config.death_rate = config['death_rate']
config.forward_shake = True
config.backward_shake = True
config.shake_image = True
config.apply_shakeDrop = True
config.apply_shakeShake = True
self.nr_residual_blocks = dict([
('Group_%d' % (i+1), config['nr_residual_blocks_%i' % (i+1)])
for i in range(self.nr_main_blocks)])
self.widen_factors = dict([
('Group_%d' % (i+1), config['widen_factor_%i' % (i+1)])
for i in range(self.nr_main_blocks)])
self.res_branches = dict([
('Group_%d' % (i+1), config['res_branches_%i' % (i+1)])
for i in range(self.nr_main_blocks)])
self.filters_size = dict([
('Group_%d' % (i+1), 3) #config['filters_size_%i' % (i+1)])
for i in range(self.nr_main_blocks)])
shake_config = (config.forward_shake, config.backward_shake,
config.shake_image)
##########
self.model = nn.Sequential()
# depth = sum([config.nr_convs * self.nr_residual_blocks['Group_{}'.format(i)] + 2 for i in range(1, self.nr_main_blocks + 1)])
# print(' | Multi-branch ResNet-' + str(depth) + ' CIFAR-10')
block = BasicBlock
im_size = max(self.ih, self.iw)
self.model.add_module('Conv_0',
nn.Conv2d(self.channels,
config.initial_filters,
kernel_size=7 if im_size > 200 else 3,
stride=2 if im_size > 200 else 1,
padding=3 if im_size > 200 else 1,
bias=False))
self.model.add_module('BN_0',
nn.BatchNorm2d(config.initial_filters))
if im_size > 200:
self.model.add_module('ReLU_0', nn.ReLU(inplace=True))
self.model.add_module('Pool_0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
feature_maps_in = int(round(config.initial_filters * self.widen_factors['Group_1']))
self.model.add_module('Group_1',
ResidualGroup(block,
config.initial_filters,
feature_maps_in,
self.nr_residual_blocks['Group_1'],
self.filters_size['Group_1'],
self.res_branches['Group_1'],
1, #2 if im_size > 100 else 1,
shake_config))
# image_size, min_image_size = min(self.iw, self.ih), 5
# division_steps = math.floor(math.log2(image_size) - math.log2(min_image_size) - 1e-5)
for main_block_nr in range(2, self.nr_main_blocks + 1):
feature_maps_out = int(round(feature_maps_in * self.widen_factors['Group_{}'.format(main_block_nr)]))
self.model.add_module('Group_{}'.format(main_block_nr),
ResidualGroup(block,
feature_maps_in,
feature_maps_out,
self.nr_residual_blocks['Group_{}'.format(main_block_nr)],
self.filters_size['Group_{}'.format(main_block_nr)],
self.res_branches['Group_{}'.format(main_block_nr)],
2, # if main_block_nr > self.nr_main_blocks - division_steps else 1,
shake_config))
#image_size = math.floor((image_size+1)/2.0) if main_block_nr > self.nr_main_blocks - division_steps else image_size
feature_maps_in = feature_maps_out
self.feature_maps_out = feature_maps_in
self.model.add_module('ReLU_0', nn.ReLU(inplace=True))
self.model.add_module('AveragePool', nn.AdaptiveAvgPool2d(1))
self.fc = nn.Linear(self.feature_maps_out, out_features)
self.apply(initialize_weights)
self.layers = nn.Sequential(self.model)
def forward(self, x):
x = self.model(x)
x = x.view(-1, self.feature_maps_out)
x = self.fc(x)
if not self.training and self.final_activation is not None:
x = self.final_activation(x)
return x
@staticmethod
def get_config_space( nr_main_blocks=[1, 8], nr_residual_blocks=([1, 16], True), initial_filters=([8, 32], True), widen_factor=([0.5, 4], True),
res_branches=([1, 5], False), filters_size=[3, 3], **kwargs):
import ConfigSpace as CS
import ConfigSpace.hyperparameters as CSH
cs = CS.ConfigurationSpace()
nr_main_blocks_hp = get_hyperparameter(ConfigSpace.UniformIntegerHyperparameter, "nr_main_blocks", nr_main_blocks)
cs.add_hyperparameter(nr_main_blocks_hp)
initial_filters_hp = get_hyperparameter(ConfigSpace.UniformIntegerHyperparameter, "initial_filters", initial_filters)
cs.add_hyperparameter(initial_filters_hp)
# add_hyperparameter(cs, CSH.UniformIntegerHyperparameter, 'nr_convs', nr_convs, log=True)
death_rate_hp = get_hyperparameter(ConfigSpace.UniformFloatHyperparameter, "death_rate", ([0,1], False))
cs.add_hyperparameter(death_rate_hp)
if type(nr_main_blocks[0]) is int:
main_blocks_min = nr_main_blocks[0]
main_blocks_max = nr_main_blocks[1]
else:
main_blocks_min = nr_main_blocks[0][0]
main_blocks_max = nr_main_blocks[0][1]
for i in range(1, main_blocks_max + 1):
blocks_hp = get_hyperparameter(ConfigSpace.UniformIntegerHyperparameter, 'nr_residual_blocks_%d' % i, nr_residual_blocks)
blocks = cs.add_hyperparameter(blocks_hp)
widen_hp = get_hyperparameter(ConfigSpace.UniformFloatHyperparameter, 'widen_factor_%d' % i, widen_factor)
widen = cs.add_hyperparameter(widen_hp)
branches_hp = get_hyperparameter(ConfigSpace.UniformIntegerHyperparameter, 'res_branches_%d' % i, res_branches)
branches = cs.add_hyperparameter(branches_hp)
# filters = add_hyperparameter(cs, CSH.UniformIntegerHyperparameter, 'filters_size_%d' % i, filters_size, log=False)
if i > main_blocks_min:
cs.add_condition(CS.GreaterThanCondition(blocks_hp, nr_main_blocks_hp, i-1))
cs.add_condition(CS.GreaterThanCondition(widen_hp, nr_main_blocks_hp, i-1))
cs.add_condition(CS.GreaterThanCondition(branches_hp, nr_main_blocks_hp, i-1))
# cs.add_condition(CS.GreaterThanCondition(filters, main_blocks, i-1))
return cs
|
1617756
|
from __future__ import absolute_import
from torch import nn
from torch.nn import functional as F
from torch.nn import init
import torchvision
import torch
import random
from .gem import GeneralizedMeanPoolingP
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
class Waveblock(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
if self.training:
h, w = x.size()[-2:]
rh = round(0.3 * h)
sx = random.randint(0, h-rh)
mask = (x.new_ones(x.size()))*1.5
mask[:, :, sx:sx+rh, :] = 1
x = x * mask
return x
class ResNet(nn.Module):
__factory = {
18: torchvision.models.resnet18,
34: torchvision.models.resnet34,
50: torchvision.models.resnet50,
101: torchvision.models.resnet101,
152: torchvision.models.resnet152,
}
def __init__(self, depth, pretrained=True, cut_at_pooling=False,
num_features=0, norm=False, dropout=0, num_classes=0,
dev = None):
super(ResNet, self).__init__()
self.pretrained = pretrained
self.depth = depth
self.cut_at_pooling = cut_at_pooling
# Construct base (pretrained) resnet
if depth not in ResNet.__factory:
raise KeyError("Unsupported depth:", depth)
resnet = ResNet.__factory[depth](pretrained=pretrained)
resnet.layer4[0].conv2.stride = (1,1)
resnet.layer4[0].downsample[0].stride = (1,1)
gap = GeneralizedMeanPoolingP() #nn.AdaptiveAvgPool2d(1)
print("The init norm is ",gap)
waveblock = Waveblock()
self.base = nn.Sequential(
resnet.conv1, resnet.bn1, resnet.maxpool, resnet.relu,
resnet.layer1,
resnet.layer2, waveblock,
resnet.layer3, waveblock,
resnet.layer4, gap
).cuda()
if not self.cut_at_pooling:
self.num_features = 2048*4
self.norm = norm
self.dropout = dropout
self.has_embedding = num_features > 0
self.num_classes = num_classes
out_planes = resnet.fc.in_features
if self.dropout > 0:
self.drop = nn.Dropout(self.dropout)
if self.num_classes > 0:
projecter = nn.Sequential(
nn.Linear(2048, 2048*2),
nn.BatchNorm1d(2048*2),
nn.LeakyReLU(0.2, inplace = True),
nn.Linear(2048*2, 2048*4)
)
assert num_classes % 4 == 0
self.classifier_0 = nn.Linear(self.num_features, self.num_classes//4, bias=False).cuda()
init.normal_(self.classifier_0.weight, std=0.001)
self.classifier_1 = nn.Linear(self.num_features, self.num_classes//4, bias=False).cuda()
init.normal_(self.classifier_1.weight, std=0.001)
self.classifier_2 = nn.Linear(self.num_features, self.num_classes//4, bias=False).cuda()
init.normal_(self.classifier_2.weight, std=0.001)
self.classifier_3 = nn.Linear(self.num_features, self.num_classes//4, bias=False).cuda()
init.normal_(self.classifier_3.weight, std=0.001)
# Append new layers
if self.has_embedding:
self.feat = nn.Linear(out_planes, self.num_features)
self.feat_bn = nn.BatchNorm1d(self.num_features)
init.kaiming_normal_(self.feat.weight, mode='fan_out')
init.constant_(self.feat.bias, 0)
else:
# Change the num_features to CNN output channels
self.num_features = 2048*4
feat_bn = nn.BatchNorm1d(self.num_features)
feat_bn.bias.requires_grad_(False)
init.constant_(feat_bn.weight, 1)
init.constant_(feat_bn.bias, 0)
self.projector_feat_bn = nn.Sequential(
projecter,
feat_bn
).cuda()
def forward(self, x, feature_withbn=False):
x = self.base(x)
x = x.view(x.size(0), -1)
bn_x = self.projector_feat_bn(x)
# Split FC->
prob = [None for _ in range(4)]
prob[0] = self.classifier_0(bn_x.cuda())
prob[1] = self.classifier_1(bn_x.cuda())
prob[2] = self.classifier_2(bn_x.cuda())
prob[3] = self.classifier_3(bn_x.cuda())
prob = torch.cat(prob, dim = 1)
# <-Split FC
return x, prob
def resnet18(**kwargs):
return ResNet(18, **kwargs)
def resnet34(**kwargs):
return ResNet(34, **kwargs)
def resnet50(**kwargs):
return ResNet(50, **kwargs)
def resnet101(**kwargs):
return ResNet(101, **kwargs)
def resnet152(**kwargs):
return ResNet(152, **kwargs)
|
1617768
|
from verifai.simulators.webots.webots_task import webots_task
from verifai.simulators.webots.client_webots import ClientWebots
from math import sin
from math import cos
import numpy as np
from math import atan2
from collections import namedtuple
import os
from dotmap import DotMap
import pickle
from shapely.geometry import Point, Polygon
try:
from controller import Supervisor
except ModuleNotFoundError:
import sys
sys.exit('This functionality requires webots to be installed')
# get distance from x, y to point i
def getDist(data, xy):
return np.linalg.norm(np.array(data) - np.array(xy), axis=1)
# get two nearest points
Line = namedtuple('Line', ['x1', 'y1', 'x2', 'y2'])
def getLine(x, y, data):
dists = getDist(data=data, xy=[x, y])
dist_pos = np.argpartition(dists, 2)
i, j = dist_pos[0], dist_pos[1]
x1, y1 = data[min(i, j)][0], data[min(i, j)][1]
x2, y2 = data[max(i, j)][0], data[max(i, j)][1]
return Line(x1=x1, y1=y1, x2=x2, y2=y2)
curr_dir = os.getcwd()
par_dir = curr_dir
# Defining the task as a webots task
class scenic_intersection(webots_task):
def __init__(self, N_SIM_STEPS, supervisor):
super().__init__(N_SIM_STEPS, supervisor)
def use_sample(self, sample):
print('Sample recieved')
print(sample)
self.data = sample.params.turnWaypoints
car_id = 0
for obj in sample.objects:
if obj.webotsType == 'TurningCar':
object = self.supervisor.getFromDef('TurningCar')
turning_car = object
offset = -20
if obj.webotsType == 'Ego':
object = self.supervisor.getFromDef('EgoCar')
ego_car = object
offset = 30
if obj.webotsType == 'ToyotaPrius':
object = self.supervisor.getFromDef('waiting_car'+str(car_id+3))
car_id +=1
offset = 0
obj_pos = object.getField('translation').getSFVec3f()
pos = obj.position
object.getField('translation').setSFVec3f([pos[0], obj_pos[1], pos[1]+offset])
rot = [0, 1, 0, -obj.heading]
object.getField('rotation').setSFRotation(rot)
return ego_car, turning_car
def run_task(self, sample):
ego_car, turning_car = self.use_sample(sample)
car_length = 2.995
intersection = [(-28, 25), (-28, 17), (-7, 17), (-7, 25)]
destination = max([y for (x, y) in intersection])
intersection_safe = 1
for _ in range(self.N_SIM_STEPS):
self.supervisor.step(1)
# get car position
turning_x = turning_car.getPosition()[0]
turning_y = turning_car.getPosition()[2]
turning_th = atan2(turning_car.getOrientation()[2], turning_car.getOrientation()[0])
# get position of front of the car
turning_y = turning_y + car_length * cos(turning_th)
turning_x = turning_x + car_length * sin(turning_th)
# find nearest segment
line = getLine(x=turning_x, y=turning_y, data=self.data)
# send data to the controller
write_data = DotMap()
write_data.turning.theta = turning_th
write_data.turning.x = turning_x
write_data.turning.y = turning_y
write_data.line.x1 = line.x1
write_data.line.y1 = line.y1
write_data.line.x2 = line.x2
write_data.line.y2 = line.y2
pickle.dump(write_data, open(par_dir + '/data_turning.pickle', 'wb'))
ego_x = ego_car.getPosition()[0]
ego_y = ego_car.getPosition()[2]
ego_th = atan2(ego_car.getOrientation()[2], ego_car.getOrientation()[0])
ego_y = ego_y + car_length * cos(ego_th)
ego_x = ego_x + car_length * sin(ego_th)
intersection_buffer = 2.5 # This decides how far you are from the intersection to provide the warning
# If 0 then you provide warning when you just enter the intersection
intersection_polygon = Polygon(intersection)
intersection_polygon = intersection_polygon.buffer(intersection_buffer + 0.5)
intersection_safe = not Point(turning_x, turning_y).within(intersection_polygon)
write_data = DotMap()
write_data.turning.theta = turning_th
write_data.turning.x = turning_x
write_data.turning.y = turning_y
write_data.braking_info.safe = intersection_safe
write_data.braking_info.dist = ego_y - destination
pickle.dump(write_data, open(par_dir + '/data_ego.pickle', 'wb'))
return
PORT = 8888
BUFSIZE = 4096
N_SIM_STEPS = 300
supervisor = Supervisor()
simulation_data = DotMap()
simulation_data.port = PORT
simulation_data.bufsize = BUFSIZE
simulation_data.task = scenic_intersection(N_SIM_STEPS=N_SIM_STEPS, supervisor=supervisor)
client_task = ClientWebots(simulation_data)
if not client_task.run_client():
print("End of accident scenario generation")
supervisor.simulationQuit(True)
|
1617777
|
from enum import Enum
class LogType(Enum):
Info = 0
Success = 1
Fail = 2
Error = 3
Subtle = 4
Process = 5
|
1617791
|
from numpy import loadtxt, ndarray, min, max
from sklearn.metrics import adjusted_mutual_info_score, adjusted_rand_score, fowlkes_mallows_score
from SNNDPC import SNNDPC
if __name__ == '__main__':
# Parameter
# --------------------------------------------------------------------------------
# pathData = "../data/Flame.tsv"
# k = 5
# nc = 2
pathData = "../data/Aggregation.tsv"
k = 15
nc = 7
# Run
# --------------------------------------------------------------------------------
data: ndarray = loadtxt(pathData)
label = data[:, -1]
data: ndarray = data[:, :-1]
data = (data - min(data, axis=0)) / (max(data, axis=0) - min(data, axis=0))
centroid, assignment = SNNDPC(k, nc, data)
print(f"Centroids = {centroid.tolist()}")
print(f"AMI = {adjusted_mutual_info_score(label, assignment):.4f}")
print(f"ARI = {adjusted_rand_score(label, assignment):.4f}")
print(f"FMI = {fowlkes_mallows_score(label, assignment):.4f}")
|
1617818
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
# Lots of different places that widgets could come from...
try:
from ipywidgets import interact, FloatSlider, IntSlider
except ImportError:
import warnings
# ignore ShimWarning raised by IPython, see GH #892
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
from IPython.html.widgets import interact, FloatSlider, IntSlider
except ImportError:
try:
from IPython.html.widgets import (interact,
FloatSliderWidget,
IntSliderWidget)
FloatSlider = FloatSliderWidget
IntSlider = IntSliderWidget
except ImportError:
pass
from .miscplot import palplot
from .palettes import (color_palette, dark_palette, light_palette,
diverging_palette, cubehelix_palette)
__all__ = ["choose_colorbrewer_palette", "choose_cubehelix_palette",
"choose_dark_palette", "choose_light_palette",
"choose_diverging_palette"]
def _init_mutable_colormap():
"""Create a matplotlib colormap that will be updated by the widgets."""
greys = color_palette("Greys", 256)
cmap = LinearSegmentedColormap.from_list("interactive", greys)
cmap._init()
cmap._set_extremes()
return cmap
def _update_lut(cmap, colors):
"""Change the LUT values in a matplotlib colormap in-place."""
cmap._lut[:256] = colors
cmap._set_extremes()
def _show_cmap(cmap):
"""Show a continuous matplotlib colormap."""
from .rcmod import axes_style # Avoid circular import
with axes_style("white"):
f, ax = plt.subplots(figsize=(8.25, .75))
ax.set(xticks=[], yticks=[])
x = np.linspace(0, 1, 256)[np.newaxis, :]
ax.pcolormesh(x, cmap=cmap)
def choose_colorbrewer_palette(data_type, as_cmap=False):
"""Select a palette from the ColorBrewer set.
These palettes are built into matplotlib and can be used by name in
many seaborn functions, or by passing the object returned by this function.
Parameters
----------
data_type : {'sequential', 'diverging', 'qualitative'}
This describes the kind of data you want to visualize. See the seaborn
color palette docs for more information about how to choose this value.
Note that you can pass substrings (e.g. 'q' for 'qualitative.
as_cmap : bool
If True, the return value is a matplotlib colormap rather than a
list of discrete colors.
Returns
-------
pal or cmap : list of colors or matplotlib colormap
Object that can be passed to plotting functions.
See Also
--------
dark_palette : Create a sequential palette with dark low values.
light_palette : Create a sequential palette with bright low values.
diverging_palette : Create a diverging palette from selected colors.
cubehelix_palette : Create a sequential palette or colormap using the
cubehelix system.
"""
if data_type.startswith("q") and as_cmap:
raise ValueError("Qualitative palettes cannot be colormaps.")
pal = []
if as_cmap:
cmap = _init_mutable_colormap()
if data_type.startswith("s"):
opts = ["Greys", "Reds", "Greens", "Blues", "Oranges", "Purples",
"BuGn", "BuPu", "GnBu", "OrRd", "PuBu", "PuRd", "RdPu", "YlGn",
"PuBuGn", "YlGnBu", "YlOrBr", "YlOrRd"]
variants = ["regular", "reverse", "dark"]
@interact
def choose_sequential(name=opts, n=(2, 18),
desat=FloatSlider(min=0, max=1, value=1),
variant=variants):
if variant == "reverse":
name += "_r"
elif variant == "dark":
name += "_d"
if as_cmap:
colors = color_palette(name, 256, desat)
_update_lut(cmap, np.c_[colors, np.ones(256)])
_show_cmap(cmap)
else:
pal[:] = color_palette(name, n, desat)
palplot(pal)
elif data_type.startswith("d"):
opts = ["RdBu", "RdGy", "PRGn", "PiYG", "BrBG",
"RdYlBu", "RdYlGn", "Spectral"]
variants = ["regular", "reverse"]
@interact
def choose_diverging(name=opts, n=(2, 16),
desat=FloatSlider(min=0, max=1, value=1),
variant=variants):
if variant == "reverse":
name += "_r"
if as_cmap:
colors = color_palette(name, 256, desat)
_update_lut(cmap, np.c_[colors, np.ones(256)])
_show_cmap(cmap)
else:
pal[:] = color_palette(name, n, desat)
palplot(pal)
elif data_type.startswith("q"):
opts = ["Set1", "Set2", "Set3", "Paired", "Accent",
"Pastel1", "Pastel2", "Dark2"]
@interact
def choose_qualitative(name=opts, n=(2, 16),
desat=FloatSlider(min=0, max=1, value=1)):
pal[:] = color_palette(name, n, desat)
palplot(pal)
if as_cmap:
return cmap
return pal
def choose_dark_palette(input="husl", as_cmap=False):
"""Launch an interactive widget to create a dark sequential palette.
This corresponds with the :func:`dark_palette` function. This kind
of palette is good for data that range between relatively uninteresting
low values and interesting high values.
Requires IPython 2+ and must be used in the notebook.
Parameters
----------
input : {'husl', 'hls', 'rgb'}
Color space for defining the seed value. Note that the default is
different than the default input for :func:`dark_palette`.
as_cmap : bool
If True, the return value is a matplotlib colormap rather than a
list of discrete colors.
Returns
-------
pal or cmap : list of colors or matplotlib colormap
Object that can be passed to plotting functions.
See Also
--------
dark_palette : Create a sequential palette with dark low values.
light_palette : Create a sequential palette with bright low values.
cubehelix_palette : Create a sequential palette or colormap using the
cubehelix system.
"""
pal = []
if as_cmap:
cmap = _init_mutable_colormap()
if input == "rgb":
@interact
def choose_dark_palette_rgb(r=(0., 1.),
g=(0., 1.),
b=(0., 1.),
n=(3, 17)):
color = r, g, b
if as_cmap:
colors = dark_palette(color, 256, input="rgb")
_update_lut(cmap, colors)
_show_cmap(cmap)
else:
pal[:] = dark_palette(color, n, input="rgb")
palplot(pal)
elif input == "hls":
@interact
def choose_dark_palette_hls(h=(0., 1.),
l=(0., 1.), # noqa: E741
s=(0., 1.),
n=(3, 17)):
color = h, l, s
if as_cmap:
colors = dark_palette(color, 256, input="hls")
_update_lut(cmap, colors)
_show_cmap(cmap)
else:
pal[:] = dark_palette(color, n, input="hls")
palplot(pal)
elif input == "husl":
@interact
def choose_dark_palette_husl(h=(0, 359),
s=(0, 99),
l=(0, 99), # noqa: E741
n=(3, 17)):
color = h, s, l
if as_cmap:
colors = dark_palette(color, 256, input="husl")
_update_lut(cmap, colors)
_show_cmap(cmap)
else:
pal[:] = dark_palette(color, n, input="husl")
palplot(pal)
if as_cmap:
return cmap
return pal
def choose_light_palette(input="husl", as_cmap=False):
"""Launch an interactive widget to create a light sequential palette.
This corresponds with the :func:`light_palette` function. This kind
of palette is good for data that range between relatively uninteresting
low values and interesting high values.
Requires IPython 2+ and must be used in the notebook.
Parameters
----------
input : {'husl', 'hls', 'rgb'}
Color space for defining the seed value. Note that the default is
different than the default input for :func:`light_palette`.
as_cmap : bool
If True, the return value is a matplotlib colormap rather than a
list of discrete colors.
Returns
-------
pal or cmap : list of colors or matplotlib colormap
Object that can be passed to plotting functions.
See Also
--------
light_palette : Create a sequential palette with bright low values.
dark_palette : Create a sequential palette with dark low values.
cubehelix_palette : Create a sequential palette or colormap using the
cubehelix system.
"""
pal = []
if as_cmap:
cmap = _init_mutable_colormap()
if input == "rgb":
@interact
def choose_light_palette_rgb(r=(0., 1.),
g=(0., 1.),
b=(0., 1.),
n=(3, 17)):
color = r, g, b
if as_cmap:
colors = light_palette(color, 256, input="rgb")
_update_lut(cmap, colors)
_show_cmap(cmap)
else:
pal[:] = light_palette(color, n, input="rgb")
palplot(pal)
elif input == "hls":
@interact
def choose_light_palette_hls(h=(0., 1.),
l=(0., 1.), # noqa: E741
s=(0., 1.),
n=(3, 17)):
color = h, l, s
if as_cmap:
colors = light_palette(color, 256, input="hls")
_update_lut(cmap, colors)
_show_cmap(cmap)
else:
pal[:] = light_palette(color, n, input="hls")
palplot(pal)
elif input == "husl":
@interact
def choose_light_palette_husl(h=(0, 359),
s=(0, 99),
l=(0, 99), # noqa: E741
n=(3, 17)):
color = h, s, l
if as_cmap:
colors = light_palette(color, 256, input="husl")
_update_lut(cmap, colors)
_show_cmap(cmap)
else:
pal[:] = light_palette(color, n, input="husl")
palplot(pal)
if as_cmap:
return cmap
return pal
def choose_diverging_palette(as_cmap=False):
"""Launch an interactive widget to choose a diverging color palette.
This corresponds with the :func:`diverging_palette` function. This kind
of palette is good for data that range between interesting low values
and interesting high values with a meaningful midpoint. (For example,
change scores relative to some baseline value).
Requires IPython 2+ and must be used in the notebook.
Parameters
----------
as_cmap : bool
If True, the return value is a matplotlib colormap rather than a
list of discrete colors.
Returns
-------
pal or cmap : list of colors or matplotlib colormap
Object that can be passed to plotting functions.
See Also
--------
diverging_palette : Create a diverging color palette or colormap.
choose_colorbrewer_palette : Interactively choose palettes from the
colorbrewer set, including diverging palettes.
"""
pal = []
if as_cmap:
cmap = _init_mutable_colormap()
@interact
def choose_diverging_palette(
h_neg=IntSlider(min=0,
max=359,
value=220),
h_pos=IntSlider(min=0,
max=359,
value=10),
s=IntSlider(min=0, max=99, value=74),
l=IntSlider(min=0, max=99, value=50), # noqa: E741
sep=IntSlider(min=1, max=50, value=10),
n=(2, 16),
center=["light", "dark"]
):
if as_cmap:
colors = diverging_palette(h_neg, h_pos, s, l, sep, 256, center)
_update_lut(cmap, colors)
_show_cmap(cmap)
else:
pal[:] = diverging_palette(h_neg, h_pos, s, l, sep, n, center)
palplot(pal)
if as_cmap:
return cmap
return pal
def choose_cubehelix_palette(as_cmap=False):
"""Launch an interactive widget to create a sequential cubehelix palette.
This corresponds with the :func:`cubehelix_palette` function. This kind
of palette is good for data that range between relatively uninteresting
low values and interesting high values. The cubehelix system allows the
palette to have more hue variance across the range, which can be helpful
for distinguishing a wider range of values.
Requires IPython 2+ and must be used in the notebook.
Parameters
----------
as_cmap : bool
If True, the return value is a matplotlib colormap rather than a
list of discrete colors.
Returns
-------
pal or cmap : list of colors or matplotlib colormap
Object that can be passed to plotting functions.
See Also
--------
cubehelix_palette : Create a sequential palette or colormap using the
cubehelix system.
"""
pal = []
if as_cmap:
cmap = _init_mutable_colormap()
@interact
def choose_cubehelix(n_colors=IntSlider(min=2, max=16, value=9),
start=FloatSlider(min=0, max=3, value=0),
rot=FloatSlider(min=-1, max=1, value=.4),
gamma=FloatSlider(min=0, max=5, value=1),
hue=FloatSlider(min=0, max=1, value=.8),
light=FloatSlider(min=0, max=1, value=.85),
dark=FloatSlider(min=0, max=1, value=.15),
reverse=False):
if as_cmap:
colors = cubehelix_palette(256, start, rot, gamma,
hue, light, dark, reverse)
_update_lut(cmap, np.c_[colors, np.ones(256)])
_show_cmap(cmap)
else:
pal[:] = cubehelix_palette(n_colors, start, rot, gamma,
hue, light, dark, reverse)
palplot(pal)
if as_cmap:
return cmap
return pal
|
1617845
|
import argparse
import os
import torch
import yaml
import numpy as np
import torch.nn.functional as F
import config_folder as cf
from data_loaders.Chairs import Chairs
from data_loaders.kitti import KITTI
from data_loaders.sintel import Sintel
from model import MaskFlownet, MaskFlownet_S, Upsample, EpeLossWithMask
from skimage.io import imread
def centralize(img1, img2):
rgb_mean = torch.cat((img1, img2), 2)
rgb_mean = rgb_mean.view(rgb_mean.shape[0], 3, -1).mean(2)
rgb_mean = rgb_mean.view(rgb_mean.shape[0], 3, 1, 1)
return img1 - rgb_mean, img2-rgb_mean, rgb_mean
parser = argparse.ArgumentParser()
parser.add_argument('config', type=str, nargs='?', default=None)
parser.add_argument('--dataset_cfg', type=str, default='chairs.yaml')
parser.add_argument('-c', '--checkpoint', type=str, default=None,
help='model checkpoint to load')
parser.add_argument('-b', '--batch', type=int, default=1,
help='Batch Size')
parser.add_argument('-f', '--root_folder', type=str, default=None,
help='Root folder of KITTI')
parser.add_argument('--resize', type=str, default='')
args = parser.parse_args()
resize = (int(args.resize.split(',')[0]), int(args.resize.split(',')[1])) if args.resize else None
num_workers = 2
with open(os.path.join('config_folder', args.dataset_cfg)) as f:
config = cf.Reader(yaml.load(f))
with open(os.path.join('config_folder', args.config)) as f:
config_model = cf.Reader(yaml.load(f))
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
net = eval(config_model.value['network']['class'])(config)
checkpoint = torch.load(os.path.join('weights', args.checkpoint))
net.load_state_dict(checkpoint)
net = net.to(device)
im0 = torch.from_numpy(imread("example/0img0.ppm")).float()[None] / 255
im1 = torch.from_numpy(imread("example/0img1.ppm")).float()[None] / 255
print(im0.max())
with torch.no_grad():
im0 = im0.permute(0, 3, 1, 2)
im1 = im1.permute(0, 3, 1, 2)
im0, im1, _ = centralize(im0, im1)
shape = im0.shape
pad_h = (64 - shape[2] % 64) % 64
pad_w = (64 - shape[3] % 64) % 64
if pad_h != 0 or pad_w != 0:
im0 = F.interpolate(im0, size=[shape[2] + pad_h, shape[3] + pad_w], mode='bilinear')
im1 = F.interpolate(im1, size=[shape[2] + pad_h, shape[3] + pad_w], mode='bilinear')
im0 = im0.to(device)
im1 = im1.to(device)
pred, flows, warpeds = net(im0, im1)
up_flow = Upsample(pred[-1], 4)
up_occ_mask = Upsample(flows[0], 4)
if pad_h != 0 or pad_w != 0:
up_flow = F.interpolate(up_flow, size=[shape[2], shape[3]], mode='bilinear') * \
torch.tensor([shape[d] / up_flow.shape[d] for d in (2, 3)], device=device).view(1, 2, 1, 1)
up_occ_mask = F.interpolate(up_occ_mask, size=[shape[2], shape[3]], mode='bilinear')
print('done')
|
1617919
|
import os,argparse,time
import numpy as np
from omegaconf import OmegaConf
import torch
import torch.backends.cudnn as cudnn
import torch.utils.data
import utils
import wandb
tstart=time.time()
# Arguments
parser = argparse.ArgumentParser(description='RRR')
parser.add_argument('--config', type=str, default='./configs/config_cub_rrr.yaml')
parser.add_argument('--name', type=str, default='')
parser.add_argument('overrides', nargs='*', help="Any key=svalue arguments to override config values "
"(use dots for.nested=overrides)")
flags = parser.parse_args()
overrides = OmegaConf.from_cli(flags.overrides)
cfg = OmegaConf.load(flags.config)
args = OmegaConf.merge(cfg, overrides)
########################################################################################################################
# Args -- Data generator
from dataloaders import datagenerator
# Args -- Aporoach
from approaches.rrr import RRR as approach
# Args -- Network
if args.experiment.dataset == 'cifar100':
from networks import resnet_cifar as network
# args.architecture.target_layer = "features.layer4.1.conv2" # resnet_cifar
args.architecture.target_layer = "m_8_0.3" # resnet used in itaml
else:
from networks import resnet as network
if args.architecture.backbone == 'resnet18':
args.architecture.target_layer = "features.7.1.conv2"
elif args.architecture.backbone == 'densenet121':
args.architecture.target_layer = "features.0.denseblock4.denselayer16.conv2"
elif args.architecture.backbone == 'alexnet':
args.architecture.target_layer = "features.0.10"
elif args.architecture.backbone == 'vgg11':
args.architecture.target_layer = "features.0.18"
elif args.architecture.backbone == 'squeezenet1_1':
args.architecture.target_layer = "features.0.12.expand3x3"
elif args.architecture.backbone == 'googlenet':
args.architecture.target_layer = 'features.15.branch4.1.conv'
########################################################################################################################
def run(args, run_id):
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Data loader
print('Instantiate data generators and model...')
dataset = datagenerator.DatasetGen(args)
args.taskcla, args.inputsize = dataset.taskcla, dataset.inputsize
args.num_classes = dataset.num_classes
# Network
net = network.Net(args)
net.print_model_size()
if args.device.multi:
net = network._CustomDataParallel(net)
net = net.to(device=args.device.name)
for n,p in net.named_parameters():
print (n, p.size())
if args.device.multi:
args.architecture.target_layer = 'module.'+ args.architecture.target_layer
# Approach
appr = approach(net, args, dataset=dataset, network=network)
# Loop tasks
perf =np.zeros((len(args.taskcla),len(args.taskcla)),dtype=np.float32)
avg_rii = np.zeros((args.experiment.ntasks, 2))
for t,ncla in args.taskcla:
# Train and test
appr.train(t, perf)
def main(args):
utils.print_time(start=True)
args.path.checkpoint, args.wandb.notes = utils.make_directories(args)
if args.wandb.log:
wandb.init(project=args.wandb.project,name=args.wandb.notes,
config=args.config,notes=args.wandb.notes,
allow_val_change=True)
utils.save_code(args)
print('=' * 100)
print('Arguments =')
for arg in vars(args):
print('\t' + arg + ':', getattr(args, arg))
print('=' * 100)
for n in range(args.train.num_runs):
args.seed = n+1
args.experiment.memory_budget = int(args.experiment.memory_budget)
args.path.output = 'Run_{}_{}.txt'.format(n+1, args.wandb.notes)
if args.wandb.log:
wandb.config.update(args, allow_val_change=True)
print (">"*30, "Run #", n+1)
run(args, n)
print ("All Done! ")
print('[Elapsed time = {:.1f} min - {:0.1f} hours]'.format((time.time()-tstart)/(60), (time.time()-tstart)/(3600)))
utils.print_time(start=False)
#######################################################################################################################
if __name__ == '__main__':
main(args)
|
1617968
|
import weakref
from yggdrasil.metaschema import MetaschemaTypeError
from yggdrasil.metaschema.properties.MetaschemaProperty import (
MetaschemaProperty)
from yggdrasil.metaschema.properties.JSONArrayMetaschemaProperties import (
ItemsMetaschemaProperty)
class ArgsMetaschemaProperty(MetaschemaProperty):
r"""Property class for 'args' property."""
name = 'args'
schema = {'description': ('Arguments required to recreate a class instance.'),
'type': 'array'}
_instance_dict_attr = ['input_arguments', 'input_args']
@classmethod
def instance2args(cls, instance):
r"""Get input arguments from a class instance.
Args:
instance (object): Instance of a Python class.
Returns:
dict: Input arguments for re-creating the instance.
"""
out = None
for k in cls._instance_dict_attr:
if out is not None:
break
if hasattr(instance, k):
out = getattr(instance, k)
elif hasattr(instance, 'get_' + k):
out = getattr(instance, 'get_' + k)()
elif hasattr(instance, '_' + k):
out = getattr(instance, '_' + k)
if isinstance(out, (list, tuple)):
out_real = []
for x in out:
if isinstance(x, weakref.ReferenceType):
out_real.append(x())
else:
out_real.append(x)
return out_real
elif isinstance(out, dict):
out_real = {}
for k, v in out.items():
if isinstance(v, weakref.ReferenceType):
out_real[k] = v()
else:
out_real[k] = v
return out_real
else:
raise MetaschemaTypeError('Could not locate dictionary of arguments.')
@classmethod
def encode(cls, instance, typedef=None):
r"""Encoder for the 'args' property."""
typedef_args = None
# if isinstance(typedef, dict) and ('args' in typedef):
# typedef_args = typedef['args']
args = cls.instance2args(instance)
return ItemsMetaschemaProperty.encode(args, typedef_args)
@classmethod
def compare(cls, *args, **kwargs):
r"""Comparison method for 'args' container property."""
for e in ItemsMetaschemaProperty.compare(*args, **kwargs):
yield e
|
1618004
|
from requests import get
from sqlalchemy import ForeignKey, Integer
from eNMS.database import db
from eNMS.forms import ServiceForm
from eNMS.fields import HiddenField
from eNMS.models.automation import Service
from eNMS.variables import vs
class SwissArmyKnifeService(Service):
__tablename__ = "swiss_army_knife_service"
pretty_name = "<NAME>"
id = db.Column(Integer, ForeignKey("service.id"), primary_key=True)
__mapper_args__ = {"polymorphic_identity": "swiss_army_knife_service"}
def job(self, *args, **kwargs):
return getattr(self, self.scoped_name)(*args, **kwargs)
def Start(self, *args, **kwargs): # noqa: N802
return {"success": True}
def End(self, *args, **kwargs): # noqa: N802
return {"success": True}
def Placeholder(self, *args, **kwargs): # noqa: N802
return {"success": True}
def cluster_monitoring(self, run):
protocol = vs.settings["cluster"]["scan_protocol"]
for instance in db.fetch_all("instance"):
db.factory(
"instance",
**get(
f"{protocol}://{instance.ip_address}/rest/is_alive",
timeout=vs.settings["cluster"]["scan_timeout"],
).json(),
)
return {"success": True}
def process_payload1(self, run, device):
get_facts = run.get_result("NAPALM: Get Facts", device.name)
get_interfaces = run.get_result("NAPALM: Get interfaces", device.name)
uptime_less_than_50000 = get_facts["result"]["get_facts"]["uptime"] < 50000
mgmg1_is_up = get_interfaces["result"]["get_interfaces"]["Management1"]["is_up"]
return {
"success": True,
"uptime_less_5000": uptime_less_than_50000,
"Management1 is UP": mgmg1_is_up,
}
class SwissArmyKnifeForm(ServiceForm):
form_type = HiddenField(default="swiss_army_knife_service")
|
1618010
|
from datetime import timedelta
from django.test import TestCase
from ..models import GameSession
from ..resources import GamesPlayedResource
from .factories import GameSessionFactory
class ExportTests(TestCase):
def test_export_correctly(self):
GameSessionFactory.create(game__name='Overwatch', duration=timedelta(hours=1))
GameSessionFactory.create(game__name='Overwatch', duration=timedelta(minutes=30))
GameSessionFactory.create(game__name='Battlefield 4', duration=timedelta(minutes=30))
GameSessionFactory.create(game__name='Battlefield 4', duration=timedelta(minutes=30))
GameSessionFactory.create(game__name='Battlefield 4', duration=timedelta(hours=1))
queryset = GameSession.objects.get_game_durations()
resource = GamesPlayedResource()
dataset = resource.export(queryset)
self.assertEqual(dataset.headers, ['game', 'time (hours)', 'num_players'])
self.assertEqual(dataset[0], ('Battlefield 4', 2, 3))
self.assertEqual(dataset[1], ('Overwatch', 1.5, 2))
|
1618069
|
import sqlite3
import os
MAX_DEPTH_CHAIN = 10
P_INSTANCE_OF = 31
P_SUBCLASS = 279
MAX_ITEMS_CACHE = 100000
conn = None
entity_cache = {}
chain_cache = {}
DB_DEFAULT_PATH = os.path.abspath(__file__ + '/../../data_spacy_entity_linker/wikidb_filtered.db')
wikidata_instance = None
def get_wikidata_instance():
global wikidata_instance
if wikidata_instance is None:
wikidata_instance = WikidataQueryController()
return wikidata_instance
class WikidataQueryController:
def __init__(self):
self.conn = None
self.cache = {
"entity": {},
"chain": {},
"name": {}
}
self.init_database_connection()
def _get_cached_value(self, cache_type, key):
return self.cache[cache_type][key]
def _is_cached(self, cache_type, key):
return key in self.cache[cache_type]
def _add_to_cache(self, cache_type, key, value):
if len(self.cache[cache_type]) < MAX_ITEMS_CACHE:
self.cache[cache_type][key] = value
def init_database_connection(self, path=DB_DEFAULT_PATH):
self.conn = sqlite3.connect(path)
def clear_cache(self):
self.cache["entity"].clear()
self.cache["chain"].clear()
self.cache["name"].clear()
def get_entities_from_alias(self, alias):
c = self.conn.cursor()
if self._is_cached("entity", alias):
return self._get_cached_value("entity", alias).copy()
query_alias = """SELECT j.item_id,j.en_label, j.en_description,j.views,j.inlinks,a.en_alias from aliases as a
LEFT JOIN joined as j ON a.item_id = j.item_id
WHERE a.en_alias_lowercase = ? and j.item_id NOT NULL"""
c.execute(query_alias, [alias.lower()])
fetched_rows = c.fetchall()
self._add_to_cache("entity", alias, fetched_rows)
return fetched_rows
def get_instances_of(self, item_id, properties=[P_INSTANCE_OF, P_SUBCLASS], count=1000):
query = "SELECT source_item_id from statements where target_item_id={} and edge_property_id IN ({}) LIMIT {}".format(
item_id, ",".join([str(prop) for prop in properties]), count)
c = self.conn.cursor()
c.execute(query)
res = c.fetchall()
return [e[0] for e in res]
def get_entity_name(self, item_id):
if self._is_cached("name", item_id):
return self._get_cached_value("name", item_id)
c = self.conn.cursor()
query = "SELECT en_label from joined WHERE item_id=?"
c.execute(query, [item_id])
res = c.fetchone()
if res and len(res):
if res[0] == None:
self._add_to_cache("name", item_id, 'no label')
else:
self._add_to_cache("name", item_id, res[0])
else:
self._add_to_cache("name", item_id, '<none>')
return self._get_cached_value("name", item_id)
def get_entity(self, item_id):
c = self.conn.cursor()
query = "SELECT j.item_id,j.en_label,j.en_description,j.views,j.inlinks from joined as j " \
"WHERE j.item_id=={}".format(item_id)
res = c.execute(query)
return res.fetchone()
def get_children(self, item_id, limit=100):
c = self.conn.cursor()
query = "SELECT j.item_id,j.en_label,j.en_description,j.views,j.inlinks from joined as j " \
"JOIN statements as s on j.item_id=s.source_item_id " \
"WHERE s.target_item_id={} and s.edge_property_id IN (279,31) LIMIT {}".format(item_id, limit)
res = c.execute(query)
return res.fetchall()
def get_parents(self, item_id, limit=100):
c = self.conn.cursor()
query = "SELECT j.item_id,j.en_label,j.en_description,j.views,j.inlinks from joined as j " \
"JOIN statements as s on j.item_id=s.target_item_id " \
"WHERE s.source_item_id={} and s.edge_property_id IN (279,31) LIMIT {}".format(item_id, limit)
res = c.execute(query)
return res.fetchall()
def get_categories(self, item_id, max_depth=10):
chain = []
edges = []
self._append_chain_elements(item_id, 0, chain, edges, max_depth, [P_INSTANCE_OF, P_SUBCLASS])
return [el[0] for el in chain]
def get_chain(self, item_id, max_depth=10, property=P_INSTANCE_OF):
chain = []
edges = []
self._append_chain_elements(item_id, 0, chain, edges, max_depth, property)
return chain
def get_recursive_edges(self, item_id):
chain = []
edges = []
self._append_chain_elements(self, item_id, 0, chain, edges)
return edges
def _append_chain_elements(self, item_id, level=0, chain=[], edges=[], max_depth=10, property=P_INSTANCE_OF):
properties = property
if type(property) != list:
properties = [property]
if self._is_cached("chain", (item_id, max_depth)):
chain += self._get_cached_value("chain", (item_id, max_depth)).copy()
return
# prevent infinite recursion
if level >= max_depth:
return
c = self.conn.cursor()
query = "SELECT target_item_id,edge_property_id from statements where source_item_id={} and edge_property_id IN ({})".format(
item_id, ",".join([str(prop) for prop in properties]))
# set value for current item in order to prevent infinite recursion
self._add_to_cache("chain", (item_id, max_depth), [])
for target_item in c.execute(query):
chain_ids = [el[0] for el in chain]
if not (target_item[0] in chain_ids):
chain += [(target_item[0], level + 1)]
edges.append((item_id, target_item[0], target_item[1]))
self._append_chain_elements(target_item[0], level=level + 1, chain=chain, edges=edges,
max_depth=max_depth,
property=property)
self._add_to_cache("chain", (item_id, max_depth), chain)
if __name__ == '__main__':
queryInstance = WikidataQueryController()
queryInstance.init_database_connection()
print(queryInstance.get_categories(13191, max_depth=1))
print(queryInstance.get_categories(13191, max_depth=1))
|
1618092
|
from uiautomator import Device
device = Device()
resource_id_dict = {
'salary': 'com.hpbr.bosszhipin:id/tv_position_salary',
'company': 'com.hpbr.bosszhipin:id/tv_company_name',
'address': 'com.hpbr.bosszhipin:id/tv_location',
'experence': 'com.hpbr.bosszhipin:id/tv_work_exp',
'degree': 'com.hpbr.bosszhipin:id/tv_degree'}
def crawl():
for job in device(resourceId='com.hpbr.bosszhipin:id/rl_section_1'):
result_dict = {}
job_info_box = job.child(resourceId='com.hpbr.bosszhipin:id/ll_position')
job_name = job_info_box.child(resourceId='com.hpbr.bosszhipin:id/tv_position_name')
if not job_name.exists:
return
result_dict['job_name'] = job_name.text
for key, resource_id in resource_id_dict.items():
value = job.child(resourceId=resource_id)
if not value.exists:
return
result_dict[key] = value.text
print(result_dict)
def scroll():
device(scrollable=True).scroll.vert.forward()
if __name__ == '__main__':
while True:
crawl()
scroll()
|
1618097
|
from diagrams import Diagram
from diagrams.onprem.client import Users
from diagrams.onprem.container import Docker
from diagrams.programming.framework import Spring
graph_attr = {
"fontsize": "20",
"bgcolor": "white" # transparent
}
with Diagram("", direction="LR", graph_attr=graph_attr, outformat="png", filename="testing_architecture"):
users = Users("Integration Tests")
backend = Spring("Application")
database = Docker("PostgreSQL database")
backend >> database
users >> backend
|
1618130
|
from aiogram.types import ChatMemberUpdated
from aiogram.dispatcher.filters import BaseFilter
"""
Note: Currently these filters don't check for group ownership transfer
Consider this a #TODO
"""
class AdminAdded(BaseFilter):
async def __call__(self, event: ChatMemberUpdated) -> bool:
return event.new_chat_member.status in ("creator", "administrator")
class AdminRemoved(BaseFilter):
async def __call__(self, event: ChatMemberUpdated) -> bool:
return event.old_chat_member.status in ("creator", "administrator") \
and event.new_chat_member.status not in ("creator", "administrator")
|
1618172
|
from pathlib import Path
import pandas as pd
from autofe.get_feature import get_baseline_total_data, train_and_evaluate
from xgboost import XGBRegressor
if __name__ == '__main__':
ROOTDIR = Path('./')
PROCESSED_DATA_DIR = ROOTDIR / 'data/processed_data/house/'
train_datafile = PROCESSED_DATA_DIR / 'train_data.csv'
test_datafile = PROCESSED_DATA_DIR / 'test_data.csv'
train_data = pd.read_csv(PROCESSED_DATA_DIR / 'train_data.csv')
test_data = pd.read_csv(PROCESSED_DATA_DIR / 'test_data.csv')
total_data = pd.concat([train_data, test_data]).reset_index(drop=True)
len_train = len(train_data)
target_name = 'SalePrice'
classfier = XGBRegressor()
"""xgboost baseline"""
# r2_score: 0.9016007093834981
total_data_base = get_baseline_total_data(total_data)
print('xgboost baseline:')
score = train_and_evaluate(
total_data_base,
target_name,
len_train,
classfier,
task_type='regression')
param = {
'subsample': 0.3653289846869241,
'colsample_bytree': 0.9545839225514163,
'learning_rate': 0.02858607644743819,
'max_depth': 24,
'n_estimators': 600
}
classfier = XGBRegressor(**param)
total_data_base = get_baseline_total_data(total_data)
score = train_and_evaluate(
total_data_base,
target_name,
len_train,
classfier,
task_type='regression')
|
1618188
|
from ..common.optim import SGD as optimizer
from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier
from ..common.data.coco import dataloader
from ..common.models.mask_rcnn_fpn import model
from ..common.train import train
model.backbone.bottom_up.freeze_at = 2
train.init_checkpoint = "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
|
1618202
|
from django.contrib.auth.models import AnonymousUser
class FakeSuperuserMiddleware(object):
def process_request(self, request):
request.user = AnonymousUser()
request.user.is_superuser = True
|
1618260
|
from AdminPage import AdminPage
# Set this to False if you want to allow everyone to access secure pages
# with no login required. This should instead come from a config file.
requireLogin = True
if not requireLogin:
class AdminSecurity(AdminPage):
def writeHTML(self):
session = self.session()
request = self.request()
# Are they logging out?
if request.hasField('logout'):
# They are logging out. Clear all session variables:
session.values().clear()
# write the page
AdminPage.writeHTML(self)
else:
class AdminSecurity(AdminPage):
def writeHTML(self):
session = self.session()
request = self.request()
trans = self.transaction()
app = self.application()
# Are they logging in?
if (request.hasField('login') and request.hasField('username')
and request.hasField('password')):
# They are logging in. Get login id and clear session:
loginid = session.value('loginid', None)
session.values().clear()
# Check if this is a valid user/password
username = request.field('username')
password = request.field('password')
if (self.isValidUserAndPassword(username, password)
and request.field('loginid', 'nologin') == loginid):
# Success; log them in and send the page:
session.setValue('authenticated_user_admin', username)
AdminPage.writeHTML(self)
else:
# Failed login attempt; have them try again:
request.fields()['extra'] = ('Login failed.'
' Please try again.'
' (And make sure cookies are enabled.)')
app.forward(trans, 'LoginPage')
return
# Are they logging out?
elif request.hasField('logout'):
# They are logging out. Clear all session variables:
session.values().clear()
request.fields()['extra'] = 'You have been logged out.'
app.forward(trans, 'LoginPage')
return
# Are they already logged in?
elif session.value('authenticated_user_admin', None):
# They are already logged in; write the HTML for this page:
AdminPage.writeHTML(self)
else:
# They need to log in.
app.forward(trans, 'LoginPage')
return
def isValidUserAndPassword(self, username, password):
# Replace this with a database lookup, or whatever you're using
# for authentication...
adminPassword = self.application().setting('AdminPassword')
return (username == 'admin'
and adminPassword and password == <PASSWORD>)
|
1618277
|
from garcon import log
class MockLogClient(log.GarconLogger):
"""Mock of an object for which we want to add a Garcon logger
"""
domain = 'test_domain'
# Valid execution context
execution_context = {
'execution.domain': 'dev',
'execution.run_id': '123abc=',
'execution.workflow_id': 'test-workflow-id'}
# Invalid execution context. Keys are incorrect
invalid_execution_context = {
'abcd.domain': 'dev',
'123.run_id': '123abc=',
'XYZ.workflow_id': 'test-workflow-id'}
def log_enabled_object():
"""Creates a mock object with log enabled
"""
mock = MockLogClient()
mock.set_log_context(execution_context)
return mock
def log_disabled_object():
"""Creates a mock object with no log
"""
mock = MockLogClient()
mock.set_log_context(invalid_execution_context)
return mock
|
1618305
|
from instapi.client_api.base import BaseClient
from ..conftest import random_string
def test_redirect_to_base(mocker):
mocker.patch("instagram_private_api.client.Client.__init__", return_value=None)
mock = mocker.patch("instagram_private_api.client.Client._call_api", return_value=None)
client = BaseClient(random_string(), random_string())
client._call_api(random_string())
mock.assert_called_once()
|
1618322
|
import base64
from datetime import timedelta
from django.conf import settings
from django.contrib.auth.base_user import BaseUserManager
from django.contrib.auth.hashers import make_password
from django.utils import timezone
from rest_framework.authtoken.models import Token
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from rest_framework.views import APIView
from pyplan.pyplan.auth.serializers.passwordResetSerializer import \
PasswordResetSerializer
from pyplan.pyplan.auth.serializers.sendPasswordResetEmailSerializer import \
SendPasswordResetEmailSerializer
# enums
from pyplan.pyplan.common.email.classes.eEmailType import eEmailType
# services
from pyplan.pyplan.common.email.service import EmailService
# serializers
from pyplan.pyplan.companies.serializers import CompanySerializer
# models
from pyplan.pyplan.users.models import User
class CustomAuthToken(ObtainAuthToken):
def post(self, request, *args, **kwargs):
serializer = self.serializer_class(
data=request.data,
context={'request': request}
)
serializer.is_valid(raise_exception=True)
user = serializer.validated_data['user']
token, created = Token.objects.get_or_create(user=user)
# token_expire_handler will check, if the token is expired it will generate new one
is_expired, token = _TokenHelpers.token_expire_handler(token)
companies = CompanySerializer(user.companies.filter(active=True), many=True).data
return Response({
'id': user.pk,
'token': token.key,
'email': user.email,
'companies': companies,
})
class SendPasswordResetEmail(APIView):
permission_classes = (AllowAny,)
def post(self, request, *args, **kwargs):
try:
serializer = SendPasswordResetEmailSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
username = serializer.validated_data['username']
user = User.objects.get(username=username)
token, created = Token.objects.get_or_create(user=user)
# token_expire_handler will check, if the token is expired it will generate new one
is_expired, token = _TokenHelpers.token_expire_handler(token)
# A new token has been created, send a new email
key = f"id={username}&static={token.key}"
encriptedKey = str(base64.b64encode(key.encode()), "utf-8")
link = f"{serializer.validated_data['publicUrl']}#resetpassword/query={encriptedKey}"
email = {
'email_to': user.email,
'name_to': username,
'email_type': eEmailType.RESET_PASSWORD,
'context': {
"username": username,
"link": link
}
}
service = EmailService(request)
if service.addToQueue(email):
if service.sendQueue():
return Response(True)
return Response(False)
except Exception as ex:
return Response(True)
class PasswordReset(APIView):
permission_classes = (AllowAny,)
def post(self, request, *args, **kwargs):
try:
serializer = PasswordResetSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
query = serializer.validated_data['query'][6:].encode()
decriptedQuery = base64.decodestring(query).decode()
username = decriptedQuery[3:decriptedQuery.find('&')]
static = decriptedQuery[decriptedQuery.find('&')+8:]
user = User.objects.get(username=username)
token = Token.objects.get(key=static)
if not _TokenHelpers.is_token_expired(token):
newPassword = BaseUserManager().make_random_password(8)
hasshedPassword = make_password(newPassword)
user.password = <PASSWORD>
user.save()
email = {
'email_to': user.email,
'name_to': username,
'email_type': eEmailType.CHANGED_PASSWORD,
'context': {
"username": username,
"password": <PASSWORD>
}
}
service = EmailService(request)
if service.addToQueue(email):
if service.sendQueue():
return Response(True)
return Response(False)
return Response(False)
except Exception as ex:
return Response(False)
class _TokenHelpers():
is_expired = False
# this return left time
@classmethod
def expires_in(cls, token):
time_elapsed = timezone.now() - token.created
left_time = timedelta(seconds=settings.TOKEN_EXPIRED_AFTER_SECONDS) - time_elapsed
return left_time
# token checker if token expired or not
@classmethod
def is_token_expired(cls, token):
return cls.expires_in(token) < timedelta(seconds=0)
# if token is expired new token will be established
# If token is expired then it will be removed
# and new one with different key will be created
@classmethod
def token_expire_handler(cls, token):
cls.is_expired = cls.is_token_expired(token)
if cls.is_expired:
token.delete()
token = Token.objects.create(user=token.user)
return cls.is_expired, token
|
1618343
|
class Plugin(object):
def onNew(self, view):
pass
def onClone(self, view):
pass
def onLoad(self, view):
pass
def onClose(self, view):
pass
def onPreSave(self, view):
pass
def onPostSave(self, view):
pass
def onModified(self, view):
pass
def onSelectionModified(self, view):
pass
def onActivated(self, view):
pass
def onProjectLoad(self, window):
pass
def onProjectClose(self, window):
pass
class ApplicationCommand(Plugin):
pass
class WindowCommand(Plugin):
pass
class TextCommand(Plugin):
pass
class TextCommand(Plugin):
def run(self, view, args):
pass
def isEnabled(self, view, args):
pass
|
1618352
|
import logging
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from helium.common.views.views import HeliumAPIView
from helium.planner.models import Course, CourseSchedule
from helium.planner.schemas import CourseScheduleDetailSchema
from helium.planner.serializers.eventserializer import EventSerializer
from helium.planner.services import coursescheduleservice
__author__ = "<NAME>"
__copyright__ = "Copyright 2019, Helium Edu"
__version__ = "1.4.38"
logger = logging.getLogger(__name__)
class CourseScheduleAsEventsResourceView(HeliumAPIView):
"""
get:
Return all course schedules as a list of event instances.
"""
permission_classes = (IsAuthenticated,)
schema = CourseScheduleDetailSchema()
def get(self, request, *args, **kwargs):
user = self.request.user
course = Course.objects.get(pk=self.kwargs['course'])
course_schedules = CourseSchedule.objects.for_user(user.pk).for_course(course.pk)
events = coursescheduleservice.course_schedules_to_events(course, course_schedules)
serializer = EventSerializer(events, many=True)
return Response(serializer.data)
|
1618361
|
from __future__ import absolute_import, print_function, unicode_literals
try:
from unittest.mock import patch, MagicMock
except ImportError:
from mock import MagicMock, patch
from rest_framework_latex import renderers
from tests.testrenderers.tests.test_latex import RendererTestCase
class CallbackTestCase(RendererTestCase):
"""Test the LatexRenderer
"""
def setUp(self):
"""Reset the renderer for testing
"""
self.renderer = renderers.LatexRenderer()
self.view = _FakeView()
@patch('rest_framework_latex.renderers.shutil')
@patch('rest_framework_latex.renderers.settings')
@patch('rest_framework_latex.renderers.open')
@patch('rest_framework_latex.renderers.Popen')
def test_callbacks(self, Popen, open_util, settings, shutil):
"""Assume a render was successful.
"""
request = MagicMock()
response = MagicMock()
settings.LATEX_RESOURCES = 'output'
self.mock_output(open_util, 'Output')
Popen.return_value = self.get_proc()
self.renderer.render(
{'key': 'value'},
renderer_context={
'view': self.view,
'request': request,
'response': response,
}
)
self.assertTrue(self.view.called_pre_latex)
self.assertTrue(self.view.called_post_latex)
class _FakeView(object):
"""
"""
latex_name = 'output.tex'
def __init__(self):
self.called_pre_latex = False
self.called_post_latex = False
def pre_latex(self, t_dir, context):
assert context['key'] == 'value'
self.called_pre_latex = True
def post_latex(self, t_dir, context):
assert context['key'] == 'value'
self.called_post_latex = True
|
1618381
|
import os
import asyncio
import hashlib
import pathlib
import synapse.tests.utils as s_t_utils
import synapse.tools.pullfile as s_pullfile
class TestPullFile(s_t_utils.SynTest):
async def test_pullfile(self):
async with self.getTestAxon() as axon:
axonurl = axon.getLocalUrl()
testhash = hashlib.sha256(b'test').hexdigest()
visihash = hashlib.sha256(b'visi').hexdigest()
nonehash = hashlib.sha256(b'none').hexdigest()
testbash = hashlib.sha256(b'test').digest()
visibash = hashlib.sha256(b'visi').digest()
self.eq(((4, visibash), (4, testbash)), await axon.puts([b'visi', b'test']))
with self.getTestDir() as wdir:
outp = self.getTestOutp()
self.eq(0, await s_pullfile.main(['-a', axonurl,
'-o', wdir,
'-l', testhash,
'-l', nonehash], outp))
oldcwd = os.getcwd()
os.chdir(wdir)
self.eq(0, await s_pullfile.main(['-a', axonurl,
'-l', visihash], outp))
os.chdir(oldcwd)
with open(pathlib.Path(wdir, testhash), 'rb') as fd:
self.eq(b'test', fd.read())
with open(pathlib.Path(wdir, visihash), 'rb') as fd:
self.eq(b'visi', fd.read())
self.true(outp.expect(f'{nonehash} not in axon store'))
self.true(outp.expect(f'Fetching {testhash} to file'))
self.true(outp.expect(f'Fetching {visihash} to file'))
|
1618382
|
from torch import nn
import os
from src.encoder import encoder_dict
from src.neuralblox import models, training, training_fusion
from src.neuralblox import generation, generation_fusion
from src import data
from src.common import decide_total_volume_range, update_reso
def get_model(cfg, device=None, dataset=None, **kwargs):
''' Return the Occupancy Network model.
Args:
cfg (dict): imported yaml config
device (device): pytorch device
dataset (dataset): dataset
'''
decoder = cfg['model']['decoder']
encoder = cfg['model']['encoder']
dim = cfg['data']['dim']
c_dim = cfg['model']['c_dim']
decoder_kwargs = cfg['model']['decoder_kwargs']
encoder_kwargs = cfg['model']['encoder_kwargs']
padding = cfg['data']['padding']
# for pointcloud_crop
try:
encoder_kwargs['unit_size'] = cfg['data']['unit_size']
decoder_kwargs['unit_size'] = cfg['data']['unit_size']
except:
pass
# local positional encoding
if 'local_coord' in cfg['model'].keys():
encoder_kwargs['local_coord'] = cfg['model']['local_coord']
decoder_kwargs['local_coord'] = cfg['model']['local_coord']
if 'pos_encoding' in cfg['model']:
encoder_kwargs['pos_encoding'] = cfg['model']['pos_encoding']
decoder_kwargs['pos_encoding'] = cfg['model']['pos_encoding']
# update the feature volume/plane resolution
if cfg['data']['input_type'] == 'pointcloud_crop':
fea_type = cfg['model']['encoder_kwargs']['plane_type']
if dataset is not None:
if (dataset.split == 'train') or (cfg['generation']['sliding_window']):
recep_field = 2**(cfg['model']['encoder_kwargs']['unet3d_kwargs']['num_levels'] + 2)
reso = cfg['data']['query_vol_size'] + recep_field - 1
if 'grid' in fea_type:
encoder_kwargs['grid_resolution'] = update_reso(reso, dataset.depth)
encoder_kwargs['grid_resolution'] = cfg['data']['grid_resolution']
if bool(set(fea_type) & set(['xz', 'xy', 'yz'])):
encoder_kwargs['plane_resolution'] = update_reso(reso, dataset.depth)
# if dataset.split == 'val': #TODO run validation in room level during training
else:
if 'grid' in fea_type:
encoder_kwargs['grid_resolution'] = dataset.total_reso
if bool(set(fea_type) & set(['xz', 'xy', 'yz'])):
encoder_kwargs['plane_resolution'] = dataset.total_reso
else:
encoder_kwargs['grid_resolution'] = cfg['data']['grid_resolution']
if cfg['data']['input_type'] == 'pointcloud_merge' or cfg['data']['input_type'] == 'pointcloud_sequential':
fea_type = cfg['model']['encoder_kwargs']['plane_type']
# calculate the volume boundary
query_vol_metric = cfg['data']['padding'] + 1
unit_size = cfg['data']['unit_size']
recep_field = 2 ** (cfg['model']['encoder_kwargs']['unet3d_kwargs']['num_levels'] + 2)
if 'unet' in cfg['model']['encoder_kwargs']:
depth = cfg['model']['encoder_kwargs']['unet_kwargs']['depth']
elif 'unet3d' in cfg['model']['encoder_kwargs']:
depth = cfg['model']['encoder_kwargs']['unet3d_kwargs']['num_levels']
vol_info = decide_total_volume_range(query_vol_metric, recep_field, unit_size, depth)
grid_reso = cfg['data']['grid_resolution']
input_vol_size = cfg['data']['input_vol']
query_vol_size = cfg['data']['query_vol']
if 'grid' in fea_type:
if cfg['data']['input_type'] == 'pointcloud_sequential':
encoder_kwargs['grid_resolution'] = grid_reso
else:
encoder_kwargs['grid_resolution'] = grid_reso
encoder_kwargs['input_crop_size'] = input_vol_size
encoder_kwargs['query_crop_size'] = query_vol_size
decoder = models.decoder_dict[decoder](
dim=dim, c_dim=c_dim, padding=padding,
**decoder_kwargs
)
if encoder == 'idx':
encoder = nn.Embedding(len(dataset), c_dim)
elif encoder is not None:
encoder = encoder_dict[encoder](
dim=dim, c_dim=c_dim, padding=padding,
**encoder_kwargs
)
else:
encoder = None
model = models.ConvolutionalOccupancyNetwork(
decoder, encoder, device=device
)
if cfg['data']['input_type'] == 'pointcloud_sequential':
return model, input_vol_size, query_vol_size, grid_reso
else:
return model
def get_trainer(model, optimizer, cfg, device, **kwargs):
''' Returns the trainer object.
Args:
model (nn.Module): the Occupancy Network model
optimizer (optimizer): pytorch optimizer object
cfg (dict): imported yaml config
device (device): pytorch device
'''
threshold = cfg['test']['threshold']
out_dir = cfg['training']['out_dir']
vis_dir = os.path.join(out_dir, 'vis')
input_type = cfg['data']['input_type']
trainer = training.Trainer(
model, optimizer,
device=device, input_type=input_type,
vis_dir=vis_dir, threshold=threshold,
eval_sample=cfg['training']['eval_sample'],
)
return trainer
def get_trainer_sequence(model, model_merge, optimizer, cfg, device, **kwargs):
''' Returns the trainer object.
Args:
model (nn.Module): the Occupancy Network model
optimizer (optimizer): pytorch optimizer object
cfg (dict): imported yaml config
device (device): pytorch device
'''
threshold = cfg['test']['threshold']
out_dir = cfg['training']['out_dir']
vis_dir = os.path.join(out_dir, 'vis')
input_type = cfg['data']['input_type']
query_n = cfg['data']['points_subsample']
unet_hdim = cfg['model']['encoder_kwargs']['unet3d_kwargs']['f_maps']
unet_depth = cfg['model']['encoder_kwargs']['unet3d_kwargs']['num_levels'] - 1
trainer = training_fusion.Trainer(
model, model_merge, optimizer,
device=device, input_type=input_type,
vis_dir=vis_dir, threshold=threshold,
eval_sample=cfg['training']['eval_sample'],
query_n = query_n,
unet_hdim = unet_hdim,
unet_depth = unet_depth
)
return trainer
def get_generator(model, cfg, device, **kwargs):
''' Returns the generator object.
Args:
model (nn.Module): Occupancy Network model
cfg (dict): imported yaml config
device (device): pytorch device
'''
if cfg['data']['input_type'] == 'pointcloud_crop':
# calculate the volume boundary
query_vol_metric = cfg['data']['padding'] + 1
unit_size = cfg['data']['unit_size']
recep_field = 2 ** (cfg['model']['encoder_kwargs']['unet3d_kwargs']['num_levels'] + 2)
if 'unet' in cfg['model']['encoder_kwargs']:
depth = cfg['model']['encoder_kwargs']['unet_kwargs']['depth']
elif 'unet3d' in cfg['model']['encoder_kwargs']:
depth = cfg['model']['encoder_kwargs']['unet3d_kwargs']['num_levels']
vol_info = decide_total_volume_range(query_vol_metric, recep_field, unit_size, depth)
grid_reso = cfg['data']['query_vol_size'] + recep_field - 1
grid_reso = update_reso(grid_reso, depth)
query_vol_size = cfg['data']['query_vol_size'] * unit_size
input_vol_size = grid_reso * unit_size
# only for the sliding window case
vol_bound = None
if cfg['generation']['sliding_window']:
vol_bound = {'query_crop_size': query_vol_size,
'input_crop_size': input_vol_size,
'fea_type': cfg['model']['encoder_kwargs']['plane_type'],
'reso': grid_reso}
else:
vol_bound = None
vol_info = None
generator = generation.Generator3D(
model,
device=device,
threshold=cfg['test']['threshold'],
resolution0=cfg['generation']['resolution_0'],
upsampling_steps=cfg['generation']['upsampling_steps'],
sample=cfg['generation']['use_sampling'],
refinement_step=cfg['generation']['refinement_step'],
simplify_nfaces=cfg['generation']['simplify_nfaces'],
input_type=cfg['data']['input_type'],
padding=cfg['data']['padding'],
vol_info=vol_info,
vol_bound=vol_bound,
)
return generator
def get_generator_fusion(model, model_merge, sample_points, cfg, device, **kwargs):
''' Returns the generator object.
Args:
model (nn.Module): the backbone encoder and decoder which are used
model_merge : the fusion network
sample_points : points sampled to define scene ranges
cfg (dict): config dictionary
device (device): pytorch device
'''
if cfg['data']['input_type'] == 'pointcloud_crop':
# calculate the volume boundary
query_vol_metric = cfg['data']['padding'] + 1
unit_size = cfg['data']['unit_size']
recep_field = 2 ** (cfg['model']['encoder_kwargs']['unet3d_kwargs']['num_levels'] + 2)
if 'unet' in cfg['model']['encoder_kwargs']:
depth = cfg['model']['encoder_kwargs']['unet_kwargs']['depth']
elif 'unet3d' in cfg['model']['encoder_kwargs']:
depth = cfg['model']['encoder_kwargs']['unet3d_kwargs']['num_levels']
vol_info = decide_total_volume_range(query_vol_metric, recep_field, unit_size, depth)
grid_reso = cfg['data']['grid_resolution']
input_vol_size = cfg['data']['input_vol']
query_vol_size = cfg['data']['query_vol']
voxel_threshold = cfg['generation']['voxel_threshold']
boundary_interpolation = cfg['generation'].get("boundary_interpolation", True)
unet_hdim = cfg['model']['encoder_kwargs']['unet3d_kwargs']['f_maps']
unet_depth = cfg['model']['encoder_kwargs']['unet3d_kwargs']['num_levels'] - 1
vol_bound = {'query_crop_size': query_vol_size,
'input_crop_size': input_vol_size,
'fea_type': cfg['model']['encoder_kwargs']['plane_type'],
'reso': grid_reso}
else:
vol_bound = None
vol_info = None
generator = generation_fusion.Generator3D(
model,
model_merge,
sample_points,
device=device,
threshold=cfg['test']['threshold'],
resolution0=cfg['generation']['resolution_0'],
upsampling_steps=cfg['generation']['upsampling_steps'],
refinement_step=cfg['generation']['refinement_step'],
input_type=cfg['data']['input_type'],
padding=cfg['data']['padding'],
vol_info=vol_info,
vol_bound=vol_bound,
voxel_threshold=voxel_threshold,
boundary_interpolation=boundary_interpolation,
unet_hdim = unet_hdim,
unet_depth = unet_depth
)
return generator
def get_data_fields(mode, cfg):
''' Returns the data fields.
Args:
mode (str): the mode which is used
cfg (dict): imported yaml config
'''
points_transform = data.SubsamplePoints(cfg['data']['points_subsample'])
input_type = cfg['data']['input_type']
fields = {}
if cfg['data']['points_file'] is not None:
if input_type != 'pointcloud_crop':
fields['points'] = data.PointsField(
cfg['data']['points_file'], points_transform,
unpackbits=cfg['data']['points_unpackbits'],
multi_files=cfg['data']['multi_files']
)
else:
fields['points'] = data.PatchPointsField(
cfg['data']['points_file'],
transform=points_transform,
unpackbits=cfg['data']['points_unpackbits'],
multi_files=cfg['data']['multi_files']
)
if mode in ('val', 'test'):
points_iou_file = cfg['data']['points_iou_file']
voxels_file = cfg['data']['voxels_file']
if points_iou_file is not None:
if input_type == 'pointcloud_crop':
fields['points_iou'] = data.PatchPointsField(
points_iou_file,
unpackbits=cfg['data']['points_unpackbits'],
multi_files=cfg['data']['multi_files']
)
else:
fields['points_iou'] = data.PointsField(
points_iou_file,
unpackbits=cfg['data']['points_unpackbits'],
multi_files=cfg['data']['multi_files']
)
if voxels_file is not None:
fields['voxels'] = data.VoxelsField(voxels_file)
return fields
|
1618416
|
from bson.objectid import ObjectId
from pymongo.results import UpdateResult
from project.infrastructure.drivers.mongo.adapter import MongoAdapter
class MongoDataLayer(MongoAdapter):
def __init__(self, collection: str) -> None:
"""
Generic access data layer to MongoDB
"""
super().__init__()
self.collection = collection
def get_collection(self):
"""
Returns:
Collection: returns a collection instance
"""
database = self.client()
collection = database[self.collection]
return collection
async def get_by_id(self, _id: ObjectId):
"""
Args:
_id (ObjectId): Id of document
Returns:
object_result (Document)
"""
collection = self.get_collection()
object_result = await collection.find_one({"_id": _id})
return object_result
async def get_by_filter(self, _filter: dict):
"""
Args:
_filter (dict): dict of parameters to filter find method
Returns:
object_result (List[Document]))
"""
collection = self.get_collection()
cursor = collection.find(_filter)
object_result = await cursor.to_list(None)
await cursor.close()
return object_result
async def save(self, _object: dict):
"""
Args:
_object (dict): the object that will be saved
Returns:
object_result (Document)
"""
collection = self.get_collection()
save_result = await collection.insert_one(_object)
object_result = await self.get_by_id(save_result.inserted_id)
return object_result
async def update(self, criteria: dict, _object: dict) -> UpdateResult:
"""
Args:
criteria (dict): criteria by which the object(s) will be updated
_object (dict): the parameters that will be updated
Returns:
object_result (UpdateResult)
"""
collection = self.get_collection()
object_result = await collection.update_one(criteria, {"$set": _object})
return object_result
async def delete(self, criteria: dict):
"""[summary]
Args:
criteria (dict): criteria by which the object(s) will be deleted
Returns:
object_result (DeleteResult)
"""
collection = self.get_collection()
object_result = await collection.delete_many(criteria)
return object_result
|
1618447
|
import requests
from typing import List
from .util import PlacementPreference
from .storyprovider import StoryProvider
from .story import Story
class WeatherStoryProvider(StoryProvider):
def __init__(self, woe: str = "2358820", F: bool = True):
self.woe = woe
self.F = F
def CtoF(self, temp: float) -> float:
return (temp * 9 / 5) + 32
def get_stories(self, limit: int = 1) -> List[Story]:
weatherReq = requests.get(
f"https://www.metaweather.com/api/location/{self.woe}/"
).json()
weather = weatherReq["consolidated_weather"][0]
weatherTitle = weatherReq["title"]
if self.F:
headline = f"{int(self.CtoF(weather['the_temp']))}ºF with {weather['weather_state_name']} in {weatherTitle}"
body_html = f"""
<img
src="https://www.metaweather.com/static/img/weather/png/64/{weather['weather_state_abbr']}.png"
width="42" />
{int(self.CtoF(weather['min_temp']))} – {int(self.CtoF(weather['max_temp']))}ºF, Winds {weather['wind_direction_compass']}
"""
else:
headline = f"{weather['the_temp']:.1f}ºC with {weather['weather_state_name']} in {weatherTitle}"
body_html = f"""
<img
src="https://www.metaweather.com/static/img/weather/png/64/{weather['weather_state_abbr']}.png"
width="42" />
{weather['min_temp']:.1f} – {weather['max_temp']:.1f}ºC, Winds {weather['wind_direction_compass']}
"""
return [
Story(
headline=headline,
body_html=body_html,
placement_preference=PlacementPreference.EAR,
)
]
|
1618470
|
import sqlite3
from hacksport.problem import files_from_directory, PHPApp, ProtectedFile
class Problem(PHPApp):
files = files_from_directory("webroot/") + [ProtectedFile("users.db")]
php_root = "webroot/"
num_workers = 5
def setup(self):
conn = sqlite3.connect("users.db")
c = conn.cursor()
c.execute("CREATE TABLE users (name text, password text, admin integer);")
# This is static. However, there is no reason it couldn't be autogenerated!
c.execute(
"""INSERT INTO users VALUES ('admin', 'pbkdf2:sha1:1000$bTY1abU0$5503ae46ff1a45b14ff19d5a2ae08acf1d2aacde', 1)"""
)
conn.commit()
conn.close()
|
1618531
|
from pyinstagram.entities import Account, Comment, Location, Media, Story, Tag
import pytest
from random import randint, choice
from string import ascii_uppercase, ascii_lowercase, digits
def setup_function():
Account.clear_cache()
Comment.clear_cache()
Location.clear_cache()
Media.clear_cache()
Story.clear_cache()
Tag.clear_cache()
def id():
return "".join(
choice(ascii_uppercase + ascii_lowercase + digits) for _ in range(randint(1, 50))
)
@pytest.mark.parametrize("id", [id() for _ in range(3)])
def test_account_creation(id):
account = Account(id)
assert getattr(account, account.primary_key) == id
assert len(Account.cache) == 1 and Account.cache[id] is account
@pytest.mark.parametrize("id", [id() for _ in range(3)])
def test_media_creation(id):
media = Media(id)
assert getattr(media, media.primary_key) == id
assert len(Media.cache) == 1 and Media.cache[id] is media
@pytest.mark.parametrize("id", [id() for _ in range(3)])
def test_location_creation(id):
location = Location(id)
assert getattr(location, location.primary_key) == id
assert len(Location.cache) == 1 and Location.cache[id] is location
@pytest.mark.parametrize("id", [id() for _ in range(3)])
def test_tag_creation(id):
tag = Tag(id)
assert getattr(tag, tag.primary_key) == id
assert len(Tag.cache) == 1 and Tag.cache[id] is tag
@pytest.mark.parametrize("id", [id() for _ in range(3)])
def test_comment_creation(id):
account = Account("test")
media = Media("test")
comment = Comment(id, media=media, owner=account, text="test", created_at=0)
assert getattr(comment, comment.primary_key) == id
assert comment.media is media
assert comment.owner is account
assert comment.text == "test"
assert comment.created_at == 0
assert len(Comment.cache) == 1 and Comment.cache[id] is comment
@pytest.mark.parametrize("id", [id() for _ in range(3)])
def test_story_creation(id):
story = Story(id)
assert getattr(story, story.primary_key) == id
assert len(Story.cache) == 1 and Story.cache[id] is story
|
1618598
|
from seamless.highlevel import Context
import traceback
ctx = Context()
ctx.code = "head -$lines testdata > RESULT"
ctx.code.celltype = "text"
ctx.tf = lambda lines, testdata: None
ctx.tf.language = "bash"
ctx.tf.docker_image = "ubuntu"
ctx.tf.testdata = "a \nb \nc \nd \ne \nf \n"
ctx.tf.testdata.celltype = "text"
ctx.tf.lines = 3
ctx.tf.code = ctx.code
ctx.result = ctx.tf
ctx.result.celltype = "mixed"
ctx.compute()
print(ctx.result.value)
ctx.tf.debug.enable("sandbox", sandbox_name="docker-shell")
|
1618613
|
import ssl
import logging
from vibora.tests import TestSuite
from vibora import client
class TestSSLErrors(TestSuite):
def setUp(self):
# Python always warns about SSL errors but since where are forcing them to occur
# there is no reason to fill the testing console with these messages.
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
async def test_expired_ssl__expects_exception(self):
try:
await client.get('https://expired.badssl.com/')
self.fail('Client trusted in an expired SSL certificate.')
except ssl.SSLError:
pass
async def test_expired_ssl__expects_ignored(self):
try:
await client.get('https://expired.badssl.com/', ssl=False)
except ssl.SSLError:
self.fail('Client raised an exception for an expired SSL certificate '
'even when explicitly told to not do so.')
async def test_wrong_host_ssl__expects_exception(self):
try:
await client.get('https://wrong.host.badssl.com/')
self.fail('Client trusted in an SSL certificate with an invalid hostname.')
except (ssl.CertificateError, ssl.SSLError):
pass
async def test_wrong_host_ssl__expects_ignored(self):
try:
await client.get('https://wrong.host.badssl.com/', ssl=False)
except ssl.CertificateError:
self.fail('Failed to ignore SSL verification.')
async def test_self_signed_certificate__expects_exception(self):
try:
await client.get('https://self-signed.badssl.com/')
self.fail('Client trusted in an self signed certificate.')
except ssl.SSLError:
pass
async def test_self_signed_certificate__expects_ignored(self):
try:
await client.get('https://self-signed.badssl.com/', ssl=False)
except ssl.SSLError:
self.fail('Failed to ignore SSL verification.')
async def test_untrusted_root_certificate__expects_exception(self):
try:
await client.get('https://untrusted-root.badssl.com/')
self.fail('Client trusted in an untrusted root certificate.')
except ssl.SSLError:
pass
async def test_untrusted_root_certificate__expects_ignored(self):
try:
await client.get('https://untrusted-root.badssl.com/', ssl=False)
except ssl.SSLError:
self.fail('Failed to ignore SSL verification.')
async def test_trusted_certificate__expects_allowed(self):
try:
await client.get('https://google.com/')
except ssl.SSLError:
self.fail('Failed to validate Google certificate.')
# Pending OCSP/CRL SSL implementation.
# def test_revoked_certificate__expects_exception(self):
# try:
# http.get('https://revoked.badssl.com/')
# self.fail('Client trusted in a revoked certificate.')
# except ssl.SSLError:
# pass
# Pending OCSP/CRL SSL implementation.
# def test_revoked_certificate__expects_ignored(self):
# try:
# http.get('https://revoked.badssl.com/', verify=False)
# except ssl.SSLError:
# self.fail('Client failed to ignore a revoked certificate.')
|
1618626
|
import wandb
from transformers import is_torch_tpu_available
from transformers.integrations import WandbCallback
import os
from transformers.utils import logging
logger = logging.get_logger(__name__)
class MyWandbCallback(WandbCallback):
"""
A :class:`~transformers.TrainerCallback` that sends the logs to `Weight and Biases
<https://www.wandb.com/>`__.
"""
def setup(self, args, state, model):
"""
Setup the optional Weights & Biases (`wandb`) integration.
One can subclass and override this method to customize the setup if needed. Find more information
`here <https://docs.wandb.com/huggingface>`__. You can also override the following environment variables:
Environment:
WANDB_WATCH (:obj:`str`, `optional` defaults to :obj:`"gradients"`):
Can be :obj:`"gradients"`, :obj:`"all"` or :obj:`"false"`. Set to :obj:`"false"` to disable gradient
logging or :obj:`"all"` to log gradients and parameters.
WANDB_PROJECT (:obj:`str`, `optional`, defaults to :obj:`"huggingface"`):
Set this to a custom string to store results in a different project.
WANDB_DISABLED (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to disable wandb entirely.
"""
# self._initialized = True
if state.is_world_process_zero:
logger.info(
'Automatic Weights & Biases logging enabled, to disable set os.environ["WANDB_DISABLED"] = "true"'
)
combined_dict = {**args.to_sanitized_dict()}
if getattr(model, "config", None) is not None:
combined_dict = {**model.config.to_dict(), **combined_dict}
wandb.init(project=os.getenv("WANDB_PROJECT", "huggingface"), config=combined_dict, name=args.run_name,
reinit=True) # CUSTOM LOGIC TO REINIT WANDB AFTER EACH RUN
# keep track of model topology and gradients, unsupported on TPU
if not is_torch_tpu_available() and os.getenv("WANDB_WATCH") != "false":
wandb.watch(model, log=os.getenv("WANDB_WATCH", "gradients"), log_freq=max(100, args.logging_steps))
def on_train_begin(self, args, state, control, model=None, **kwargs):
"""
:param args:
:param state:
:param control:
:param model:
:param kwargs:
"""
if not self._initialized:
self.setup(args, state, model)
def on_log(self, args, state, control, model=None, logs=None, **kwargs):
"""
:param args:
:param state:
:param control:
:param model:
:param logs:
:param kwargs:
"""
# if not self._initialized:
# self.setup(args, state, model)
if state.is_world_process_zero:
wandb.log(logs, step=state.global_step)
|
1618684
|
import nltk
import numpy as np
import pyjsonrpc
from features import Feature
from stst.data import dict_utils
from stst.libs.kernel import vector_kernel as vk
class Embedding(object):
def __init__(self):
self.http_client = pyjsonrpc.HttpClient(
url="http://localhost:8084",
)
def get_word2vec(self, word):
"""
:param word:
:return: (st, vec)
"""
vec = self.http_client.word2vec(word)
return vec
def get_glove(self, word):
vec = self.http_client.glove(word)
return vec
def get_paragram(self, word):
vec = self.http_client.paragram(word)
return vec
def get_glove300(self, word):
vec = self.http_client.glove300(word)
return vec
def pooling(word_sa, emb_type, dim, pooling_types='avg', convey='idf'):
idf_weight = dict_utils.DictLoader().load_dict('idf')
embedding = Embedding()
vdist = nltk.FreqDist(word_sa)
length = float(len(word_sa))
if pooling_types == 'avg':
function = np.average
elif pooling_types == 'min':
function = np.amin
elif pooling_types == 'max':
function = np.amax
else:
print(pooling_types)
raise NotImplementedError
vec = []
for word in word_sa:
if emb_type == 'word2vec':
st, w2v = embedding.get_word2vec(word)
elif emb_type == 'glove':
st, w2v = embedding.get_glove(word)
elif emb_type == 'paragram':
st, w2v = embedding.get_paragram(word)
elif emb_type == 'glove300':
st, w2v = embedding.get_glove300(word)
if convey == 'idf':
w = idf_weight.get(word, 10.0)
elif convey == 'tfidf':
w = vdist[word] * idf_weight.get(word, 10.0)
else:
raise NotImplementedError
w2v = w * np.array(w2v)
vec.append(w2v)
if len(vec) == 0:
vec = np.zeros((dim,))
else:
vec = function(vec, axis=0)
return vec
def minavgmaxpooling(word_sa, emb_type, dim, convey='idf'):
vecs = []
for pooling_types in ['avg', 'min', 'max']:
vec = pooling(word_sa, emb_type, dim, pooling_types, convey)
vecs.append(vec)
vecs = np.reshape(vecs, [-1])
return vecs
class MinAvgMaxEmbeddingFeature(Feature):
def __init__(self, emb_type, dim, lower=True, **kwargs):
super(MinAvgMaxEmbeddingFeature, self).__init__(**kwargs)
self.lower = lower
if 'emb_type' is None:
print('please init with emb_type and dimension!')
exit()
self.emb_type = emb_type
self.dim = dim
self.feature_name = self.feature_name + '-%s' % (emb_type)
def extract(self, train_instance):
lower = self.lower
emb_type = self.emb_type
dim = self.dim
word_sa, word_sb = train_instance.get_word(type='word', stopwords=True, lower=lower)
pooling_vec_sa = minavgmaxpooling(word_sa, emb_type, dim)
pooling_vec_sb = minavgmaxpooling(word_sb, emb_type, dim)
all_feats, all_names = vk.get_all_kernel(pooling_vec_sa, pooling_vec_sb)
features = all_feats
infos = [emb_type, lower]
return features, infos
class MinAvgMaxPoolingFeature(Feature):
def __init__(self, emb_name, dim, emb_file, binary=False, lower=True, **kwargs):
pass
|
1618718
|
import itertools
import re
import urllib
from django.views.generic import TemplateView
from django.http import Http404
from django.template import Template, Context
from billy.models import db, Metadata
def templatename(name):
return 'billy/web/public/%s.html' % name
def mongo_fields(*fields):
fields = dict(zip(fields, itertools.repeat(1)))
# FIXED the return value was being assigned.
return fields
def normalize_whitespace(s):
return re.sub(ur'\s+', ' ', s)
class ListViewBase(TemplateView):
'''Base class for VoteList, FeedList, etc.
I tried using generic views for bill lists to cut down the
boilerplate, but I'm not sure that has succeeded. One downside
has been the reuse of what attempts to be a generic sort of
template but in reality has become an awful monster template,
named "object_list.html." Possibly more tuning needed.
Context:
- column_headers
- rowtemplate_name
- description_template
- object_list
- nav_active
- abbr
- metadata
- url
- use_table
'''
template_name = templatename('object_list')
nav_active = None
def get_context_data(self, *args, **kwargs):
super(ListViewBase, self).get_context_data(*args, **kwargs)
abbr = self.kwargs['abbr']
if abbr == 'all':
metadata = None
else:
metadata = Metadata.get_object(abbr)
context = {}
context.update(column_headers_tmplname=self.column_headers_tmplname,
rowtemplate_name=self.rowtemplate_name,
description_template=self.description_template,
object_list=self.get_queryset(),
nav_active=self.nav_active,
abbr=abbr,
metadata=metadata,
url=self.request.path,
use_table=getattr(self, 'use_table', False))
# Include the kwargs to enable references to url paramaters.
context.update(**kwargs)
# Get the formatted page title and description.
# Wait to render until get_object has been called in subclasses.
if not getattr(self, 'defer_rendering_title', False):
for attr in ('title', 'description'):
if attr not in context:
context[attr] = self._render(attr, context,
request=self.request)
# Add the correct path to paginated links. Yuck.
if self.request.GET:
params = dict(self.request.GET.items())
if 'page' in params:
del params['page']
for k, v in params.items():
params[k] = unicode(v).encode('utf8')
context.update(get_params=urllib.urlencode(params))
return context
def _render(self, attr, context, **extra_context):
try:
template = getattr(self, '%s_template' % attr)
except AttributeError:
return
template = Template(normalize_whitespace(template))
context.update(**extra_context)
context = Context(context)
return template.render(context)
class RelatedObjectsList(ListViewBase):
'''A generic list view where there's a main object, like a
legislator or metadata, and we want to display all of the main
object's "sponsored_bills" or "introduced_bills." This class
basically hacks the ListViewBase to add the main object into
the template context so it can be used to generate a phrase like
'showing all sponsored bills for Wesley Chesebro.'
Context:
- obj
- collection_name
Templates:
- defined in subclasses
'''
defer_rendering_title = True
def get_context_data(self, *args, **kwargs):
context = super(RelatedObjectsList, self).get_context_data(*args,
**kwargs)
context.update(
obj=self.get_object(),
collection_name=self.collection_name)
# Get the formatted page title and description.
for attr in ('title', 'description'):
if attr not in context:
context[attr] = self._render(attr, context)
return context
def get_object(self):
try:
return self.obj
except AttributeError:
pass
try:
collection_name = self.kwargs['collection_name']
except KeyError:
collection_name = self.collection_name
try:
_id = self.kwargs['_id']
except KeyError:
_id = self.kwargs['abbr']
if _id == 'all':
return None
# Get the related object.
collection = getattr(db, collection_name)
obj = collection.find_one(_id)
self.obj = obj
return obj
def get_queryset(self):
get = self.request.GET.get
# Setup the paginator arguments.
show_per_page = getattr(self, 'show_per_page', 10)
show_per_page = int(get('show_per_page', show_per_page))
page = int(get('page', 1))
if 100 < show_per_page:
show_per_page = 100
obj = self.get_object()
if obj is None:
raise Http404('RelatedObjectsList.get_object returned None.')
objects = getattr(obj, self.query_attr)
# The related collection of objects might be a
# function or a manager class.
# This is to work around a pain-point in models.py.
if callable(objects):
kwargs = {}
sort = getattr(self, 'mongo_sort', None)
if sort is not None:
kwargs['sort'] = sort
objects = objects(**kwargs)
# Apply any specified sorting.
sort_func = getattr(self, 'sort_func', None)
sort_reversed = bool(getattr(self, 'sort_reversed', None))
if sort_func:
objects = sorted(objects, key=sort_func,
reverse=sort_reversed)
paginator = self.paginator(objects, page=page,
show_per_page=show_per_page)
return paginator
def _render(self, attr, context):
try:
template = getattr(self, '%s_template' % attr)
except AttributeError:
return
template = Template(normalize_whitespace(template))
context = Context(context)
return template.render(context)
# Source: https://gist.github.com/mishari/5ecfccd219925c04ac32
# DC and PR were found by point-and-click, using "What's here?" on Google Maps
# US is maximum extent of all listed states
GEO_BOUNDS = {
"US": [
[-124.836097717285, 17.811],
[-65.221, 49.3844909667969]
],
"AL": [
[-88.4731369018555, 30.1375217437744],
[-84.8882446289062, 35.0080299377441]
],
"AR": [
[-94.6178131103516, 33.0041046142578],
[-89.6422424316406, 36.4996032714844]
],
"AZ": [
[-114.818359375, 31.3321762084961],
[-109.045196533203, 37.0042610168457]
],
"CA": [
[-124.482009887695, 32.5295219421387],
[-114.13077545166, 42.0095024108887]
],
"CO": [
[-109.060256958008, 36.9924240112305],
[-102.041580200195, 41.0023612976074]
],
"CT": [
[-73.7277755737305, 40.9667053222656],
[-71.7869873046875, 42.0505905151367]
],
"DC": [
[-77.119760, 38.791647],
[-76.909397, 38.995551]
],
"DE": [
[-75.7890472412109, 38.4511260986328],
[-74.9846343994141, 39.8394355773926]
],
"FL": [
[-87.6349029541016, 24.3963069915771],
[-79.9743041992188, 31.0009689331055]
],
"GA": [
[-85.6051712036133, 30.3557567596436],
[-80.7514266967773, 35.0008316040039]
],
"IA": [
[-96.6397171020508, 40.3755989074707],
[-90.1400604248047, 43.5011367797852]
],
"ID": [
[-117.243034362793, 41.9880561828613],
[-111.043563842773, 49.000846862793]
],
"IL": [
[-91.513053894043, 36.9701309204102],
[-87.0199203491211, 42.5083045959473]
],
"IN": [
[-88.0997085571289, 37.7717399597168],
[-84.7845764160156, 41.7613716125488]
],
"KS": [
[-102.0517578125, 36.9930801391602],
[-94.5882034301758, 40.0030975341797]
],
"KY": [
[-89.5715103149414, 36.4967155456543],
[-81.9645385742188, 39.1474609375]
],
"LA": [
[-94.0431518554688, 28.9210300445557],
[-88.817008972168, 33.019458770752]
],
"MA": [
[-73.5081481933594, 41.1863288879395],
[-69.8615341186523, 42.8867149353027]
],
"MD": [
[-79.4871978759766, 37.8856391906738],
[-75.0395584106445, 39.7229347229004]
],
"ME": [
[-71.0841751098633, 42.9561233520508],
[-66.9250717163086, 47.4598426818848]
],
"MI": [
[-90.4186248779297, 41.6960868835449],
[-82.122802734375, 48.3060646057129]
],
"MN": [
[-97.2392654418945, 43.4994277954102],
[-89.4833831787109, 49.3844909667969]
],
"MO": [
[-95.7741470336914, 35.9956817626953],
[-89.0988388061523, 40.6136360168457]
],
"MS": [
[-91.6550140380859, 30.1477890014648],
[-88.0980072021484, 34.9960556030273]
],
"MT": [
[-116.050003051758, 44.3582191467285],
[-104.039558410645, 49.0011100769043]
],
"NC": [
[-84.3218765258789, 33.7528762817383],
[-75.4001159667969, 36.5880393981934]
],
"ND": [
[-104.049270629883, 45.9350357055664],
[-96.5543899536133, 49.0004920959473]
],
"NE": [
[-104.053520202637, 39.9999961853027],
[-95.3080520629883, 43.0017013549805]
],
"NH": [
[-72.55712890625, 42.6970405578613],
[-70.534065246582, 45.3057823181152]
],
"NJ": [
[-75.5633926391602, 38.7887535095215],
[-73.8850555419922, 41.3574256896973]
],
"NM": [
[-109.050178527832, 31.3323001861572],
[-103.000862121582, 37.0001411437988]
],
"NV": [
[-120.005729675293, 35.0018730163574],
[-114.039642333984, 42.0022087097168]
],
"NY": [
[-79.7625122070312, 40.4773979187012],
[-71.8527069091797, 45.0158615112305]
],
"OH": [
[-84.8203430175781, 38.4031982421875],
[-80.5189895629883, 42.3232383728027]
],
"OK": [
[-103.002571105957, 33.6191940307617],
[-94.4312133789062, 37.0021362304688]
],
"OR": [
[-124.703544616699, 41.9917907714844],
[-116.463500976562, 46.2991027832031]
],
"PA": [
[-80.5210876464844, 39.7197647094727],
[-74.6894989013672, 42.5146903991699]
],
"PR": [
[-67.945, 17.881],
[-65.221, 18.515]
],
"RI": [
[-71.9070053100586, 41.055534362793],
[-71.1204681396484, 42.018856048584]
],
"SC": [
[-83.35400390625, 32.0333099365234],
[-78.4992980957031, 35.2155418395996]
],
"SD": [
[-104.05770111084, 42.4798889160156],
[-96.4363327026367, 45.9454536437988]
],
"TN": [
[-90.310302734375, 34.9829788208008],
[-81.6468963623047, 36.6781196594238]
],
"TX": [
[-106.645652770996, 25.8370609283447],
[-93.5078201293945, 36.5007057189941]
],
"UT": [
[-114.053932189941, 36.9979667663574],
[-109.041069030762, 42.0013885498047]
],
"VA": [
[-83.6754150390625, 36.5407867431641],
[-75.2312240600586, 39.4660148620605]
],
"VT": [
[-73.437744140625, 42.7269325256348],
[-71.4653549194336, 45.0166664123535]
],
"WA": [
[-124.836097717285, 45.5437202453613],
[-116.917427062988, 49.00244140625]
],
"WI": [
[-92.8881149291992, 42.491943359375],
[-86.2495422363281, 47.3025016784668]
],
"WV": [
[-82.6447448730469, 37.2014808654785],
[-77.7190246582031, 40.638801574707]
],
"WY": [
[-111.05689239502, 40.9948768615723],
[-104.052154541016, 45.0034217834473]
]
}
|
1618751
|
import os
import torch
from torch.utils import data
import numpy as np
from data.pose_graph_tools import Graph
from data.sample_data import random_sample, sequential_sample
def build_sub_graph(runs, data_dir):
"""
Build a pose graph from the stored data files. We build the pose graph using the tools provided in
https://github.com/utiasASRL/vtr-dataset-tools. We use the data provided by the UTIAS Long-Term Localization
and Mapping Dataset that can be found at http://asrl.utias.utoronto.ca/datasets/2020-vtr-dataset/index.html.
Args:
runs (list[int]): the ids of the runs from the dataset that we want to include in the pose graph.
data_dir (string): the top-level directory that holds the data for the path for which we build a pose graph.
Returns:
graph (Graph): a pose graph built from the runs that was provided.
"""
teach_file = f'{data_dir}/run_000000/transforms_temporal.txt'
repeat_files = []
for run in runs:
if run != 0:
run_str = str(run).zfill(6)
run_file = f'{data_dir}/run_{run_str}/transforms_spatial.txt'
if os.path.isfile(run_file):
repeat_files.append(run_file)
else:
print(f'Building pose graph, run_file does not exist: {run_file}')
exit(1)
return Graph(teach_file, repeat_files)
def build_random_loc_dataset(data_path, path_names, runs, num_samples, temporal_length):
"""
Build a dataset that localizes vertices of one run in the pose graph to another. We sample vertex pairs
randomly from the pose graph.
Record the pose transforms as 4x4 matrices and the 6 DOF vector equivalents. The pose from vertex, v1, to
vertex, v2, is given as T_v2_v1. Create sample ids from the vertex ids. A vertex id consists of the id of the
run the vertex belongs to and the id of the pose along that run, i.e. vertex_id = (run_id, pose_id). The sample
id corresponding to pose transform T_v2_v1 is on the form pathname_runid1_poseid1_runid2_poseid2, for
instance: mutliseason_1_531_5_542.
Args:
data_path (string): path to where the different runs of data are stored.
path_names (list[string]): the paths we will use for sampling. One pose graph is created for each path.
runs (dict): map from the path names to a list of the runs to use for localization pose sampling for the
given path.
num_samples (dict): map from the path names to the number of samples to generate for the given paths.
temporal_length (int): we can 'walk along' the pose graph to pair vertices that har further apart (i.e.
not the closest pair). We set a fixed topological distance/steps we move away
from the start vertex.
Returns:
samples (list[string]): list of all the sample ids.
labels_se3 (dict): dictionary mapping sample id to pose transform 4x4 matrix provided as a torch.Tensor.
labels_log (dict): dictionary mapping sample id to pose transform 6 DOF vector provided as a torch.Tensor.
"""
all_ids = []
labels_se3 = {}
labels_log = {}
for path_name in path_names:
num_samples_path = num_samples[path_name]
num_runs = len(runs[path_name])
print(f'\nRandom dataset from path: {path_name}')
print(f'Sample from runs: {runs[path_name]}')
print(f'Collect {num_samples_path} samples \n')
data_dir = f'{data_path}/{path_name}'
pose_graph = build_sub_graph(runs[path_name], data_dir)
path_ids, path_labels_se3, path_labels_log = random_sample(path_name, pose_graph, runs[path_name],
num_samples_path, temporal_length)
all_ids = all_ids + path_ids
labels_se3 = {**labels_se3, **path_labels_se3}
labels_log = {**labels_log, **path_labels_log}
print(f'\nRandom dataset total samples: {len(all_ids)}\n')
return all_ids, labels_se3, labels_log
def build_sequential_loc_dataset(data_path, path_name, map_run_id, live_run_ids, temporal_length):
"""
Build a dataset that localizes all the vertices of one or more runs in the pose graph to the vertices on one
map (or teach) run. I.e. we localize one or more live (or repeat) runs to one run that we choose as the map
run. We get relative pose transforms for each localized vertex in the order that the vertices were created when
driving the robot during data collection.
Record the pose transforms as 4x4 matrices and the 6 DOF vector equivalents. The pose from vertex, v1, to
vertex, v2, is given as T_v2_v1. Create sample ids from the vertex ids. A vertex id consists of the id of the
run the vertex belongs to and the id of the pose along that run, i.e. vertex_id = (run_id, pose_id). The sample
id corresponding to pose transform T_v2_v1 is on the form pathname_runid1_poseid1_runid2_poseid2, for
instance: mutliseason_1_531_5_542.
Args:
data_path (string): path to where the different runs of data are stored.
path_name (string): name given to the path that the pose graph represents.
map_run_id (int): id of the run to localize to, i.e. compute the relative pose to vertices on this run.
live_run_ids (list[int]): the runs we localize to the map run, i.e. compute relative pose from vertices on
these runs.
temporal_length (int): we can 'walk along' the pose graph to pair vertices that har further apart (i.e.
not the closest pair). We set a fixed topological distance/steps we move away
from the start vertex.
Returns:
samples (list[string]): list of all the sample ids.
labels_se3 (dict): dictionary mapping sample id to pose transform 4x4 matrix provided as a torch.Tensor.
labels_log (dict): dictionary mapping sample id to pose transform 6 DOF vector provided as a torch.Tensor.
"""
print(f'\nSequential dataset from path: {path_name}')
print(f'Map (teach) run: {map_run_id}')
print(f'Live (repeat) runs to localize: {live_run_ids} \n')
data_dir = f'{data_path}/{path_name}'
pose_graph = build_sub_graph([map_run_id] + live_run_ids, data_dir)
return sequential_sample(path_name, pose_graph, map_run_id, live_run_ids, temporal_length)
|
1618758
|
import coloredlogs
from dynaconf.utils.boxing import DynaBox
import logging
import sys
def setup(config: DynaBox, name: str):
fmt = "%(asctime)s %(levelname)-8s [%(name)s] %(message)s"
colored_formatter = coloredlogs.ColoredFormatter(fmt)
plain_formatter = logging.Formatter(fmt)
logger = logging.getLogger(name)
if config.file:
fh = logging.FileHandler(config.filename)
fhLevel = logging.getLevelName(config.file_verbosity.upper())
logger.setLevel(fhLevel)
fh.setLevel(fhLevel)
fh.setFormatter(plain_formatter)
logger.addHandler(fh)
if config.console:
ch = logging.StreamHandler()
chLevel = logging.getLevelName(config.console_verbosity.upper())
ch.setLevel(chLevel)
if logger.level > chLevel or logger.level == 0:
logger.setLevel(chLevel)
ch.setFormatter(colored_formatter)
logger.addHandler(ch)
class ShutdownHandler(logging.Handler):
"""Exit application with CRITICAL logs"""
def emit(self, record):
logging.shutdown()
sys.exit(1)
sh = ShutdownHandler(level=50)
sh.setFormatter(colored_formatter)
logger.addHandler(sh)
return logger
|
1618766
|
import os, torch, random, cv2, math, glob
import numpy as np
from torch.utils import data
from torchvision import transforms as T
from PIL import Image
from torch.nn import functional as F
from collections import defaultdict
import random
import copy
from torch.utils.data.sampler import Sampler
class IdentityCameraSampler(Sampler):
def __init__(self, data_source, batch_size, num_instances,cams_of_dataset=None,len_of_real_data=None):
if batch_size < num_instances:
raise ValueError('batch_size={} must be no less '
'than num_instances={}'.format(batch_size, num_instances))
self.data_source = data_source
self.batch_size = batch_size
self.num_instances = num_instances
self.num_pids_per_batch = self.batch_size // self.num_instances # approximate
self.num_cams_per_batch = 8
self.index_dic = defaultdict(list)
self.cam_index_dic = dict()
self.num_pids_per_cam = self.num_pids_per_batch//self.num_cams_per_batch
for index, (_, pid, camid) in enumerate(self.data_source):
self.index_dic[pid].append(index)
if camid not in self.cam_index_dic.keys():
self.cam_index_dic[camid]=defaultdict(list)
self.cam_index_dic[camid][pid].append(index)
self.pids = list(self.index_dic.keys())
self.cams_of_dataset=cams_of_dataset
self.len_of_real_data = len_of_real_data
def __iter__(self):
final_idxs = []
length = 2*self.len_of_real_data if self.len_of_real_data is not None else len(self.data_source)
# F setting
#length = len(self.data_source)
while(len(final_idxs) < length):
if self.cams_of_dataset is not None:
# C setting
#c_rnd = np.random.choice(list(self.cam_index_dic.keys()),size=1)[0]
#for cams_of_data in self.cams_of_dataset:
# if c_rnd in cams_of_data:
# cams = np.random.choice(list(cams_of_data),size=self.num_cams_per_batch,replace=True)
# break
# D setting
c_rnd = np.random.choice([i for i in range(len(self.cams_of_dataset))],size=1)[0]
cams = np.random.choice(list(self.cams_of_dataset[c_rnd]),size=self.num_cams_per_batch,replace=True)
else:
cams = np.random.choice(list(self.cam_index_dic.keys()),size=self.num_cams_per_batch,replace=True)
for c in cams:
pids = np.random.choice(list(self.cam_index_dic[c].keys()),size=self.num_pids_per_cam, replace=True)
for p in pids:
idxs =np.random.choice(self.cam_index_dic[c][p],size=self.num_instances,replace=True)
random.shuffle(idxs)
final_idxs.extend(idxs)
self.length=len(final_idxs)
return iter(final_idxs)
def __len__(self):
return self.length
class RandomErasing(object):
def __init__(self, EPSILON=0.5, mean=[0.485, 0.456, 0.406]):
self.EPSILON = EPSILON
self.mean = mean
def __call__(self, img):
if random.uniform(0, 1) > self.EPSILON:
return img
for attempt in range(100):
area = img.size()[1] * img.size()[2]
target_area = random.uniform(0.02, 0.2) * area
aspect_ratio = random.uniform(0.3, 3)
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w <= img.size()[2] and h <= img.size()[1]:
x1 = random.randint(0, img.size()[1] - h)
y1 = random.randint(0, img.size()[2] - w)
img[0, x1:x1 + h, y1:y1 + w] = self.mean[0]
img[1, x1:x1 + h, y1:y1 + w] = self.mean[1]
img[2, x1:x1 + h, y1:y1 + w] = self.mean[2]
return img
return img
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_transform = T.Compose([
T.Resize((256,128)),
T.RandomHorizontalFlip(),
T.ToTensor(),
normalizer,
RandomErasing(EPSILON=0.5)
])
test_transform = T.Compose([
T.Resize((256,128)),
T.ToTensor(),
normalizer ])
class imgdataset_withsource(data.Dataset):
def __init__(self, data_source):
self.data_source = data_source
self.transform = train_transform
def __getitem__(self,index):
im_path, pid, cam = self.data_source[index]
image = Image.open(im_path).convert('RGB')
image = self.transform(image)
return image,pid, cam
def __len__(self):
return len(self.data_source)
class imgdataset(data.Dataset):
def __init__(self, dataset_dir, txt_path, transformer = 'train'):
self.mode = transformer
self.transform = train_transform if transformer == 'train' else test_transform
with open(txt_path) as f:
line = f.readlines()
self.img_list = [os.path.join(dataset_dir, i.split()[0]) for i in line]
self.label_list = [int(i.split()[1]) for i in line]
self.cam_list = [int(i.split()[2]) for i in line]
if self.mode=='test':
self.frame_list = [int(i.split()[3]) for i in line]
#self.cam_list = [int(i.split('c')[1][0]) for i in line]
self.cams = np.unique(self.cam_list)
self.pids = np.unique(self.label_list)
pid2label = {pid:ind for ind,pid in enumerate(self.pids)}
labels = []
for l in self.label_list:
labels.append(pid2label[l])
self.label_list = labels
self.data_source = []
for i in range(len(self.label_list)):
self.data_source.append((self.img_list[i],self.label_list[i],self.cam_list[i]))
def __getitem__(self, index):
im_path = self.img_list[index]
image = Image.open(im_path).convert('RGB')
image = self.transform(image)
if self.mode=='train':
return image, self.label_list[index], self.cam_list[index]
elif self.mode=='test':
return image, self.label_list[index], self.cam_list[index], self.frame_list[index]
def __len__(self):
return len(self.label_list)
class imgdataset_cam(data.Dataset):
def __init__(self, dataset_dir, txt_path,camid, transformer = 'train'):
self.mode = transformer
self.transform = train_transform if transformer == 'train' else test_transform
with open(txt_path) as f:
line = f.readlines()
self.img_list = np.array([os.path.join(dataset_dir, i.split()[0]) for i in line])
self.label_list = np.array([int(i.split()[1]) for i in line])
self.cam_list = np.array([int(i.split()[2]) for i in line])
self.query_list = np.array([True if 'query' in i else False for i in line])
if self.mode=='test':
self.frame_list =np.array([int(i.split()[3]) for i in line])
select = self.cam_list==camid
self.img_list = self.img_list[select]
self.label_list = self.label_list[select]
self.cam_list = self.cam_list[select]
self.frame_list = self.frame_list[select]
self.query_list = self.query_list[select]
#self.cam_list = [int(i.split('c')[1][0]) for i in line]
self.cams = np.unique(self.cam_list)
def __getitem__(self, index):
im_path = self.img_list[index]
image = Image.open(im_path).convert('RGB')
image = self.transform(image)
if self.mode=='train':
return image, self.label_list[index], self.cam_list[index]
elif self.mode=='test':
return image, self.label_list[index], self.cam_list[index], self.frame_list[index], self.query_list[index]
def __len__(self):
return len(self.label_list)
class imgdataset_camtrans(data.Dataset):
def __init__(self, dataset_dir, txt_path, transformer = 'train', num_cam=8, K=4):
self.num_cam = num_cam
self.mode = transformer
self.transform = train_transform if transformer == 'train' else test_transform
self.K = K
with open(txt_path) as f:
line = f.readlines()
self.img_list = [os.path.join(dataset_dir, i.split()[0]) for i in line]
self.label_list = [int(i.split()[1]) for i in line]
#self.cam_list = [int(i.split('c')[1][0]) for i in line]
self.cam_list = [int(i.split()[2]) for i in line]
def __getitem__(self, index):
im_path = self.img_list[index]
camid = self.cam_list[index]
cams = torch.randperm(self.num_cam) + 1
imgs = []
cam_labels = []
index_labels = []
for sel_cam in cams[0:self.K]:
if sel_cam != camid:
if 'msmt' in im_path:
im_path_cam = im_path[:-4]+'_fake_'+str(sel_cam.numpy())+'.jpg'
else:
im_path_cam = im_path[:-4] + '_fake_' + str(camid) + 'to' + str(sel_cam.numpy()) + '.jpg'
else:
im_path_cam = im_path
#print('im_path', camid, sel_cam,im_path_cam)
image = Image.open(im_path_cam).convert('RGB')
image = self.transform(image)
imgs.append(image.numpy())
#imgs.append(image)
cam_labels.append(sel_cam)
index_labels.append(index)
imgs = np.array(imgs, np.float32)
imgs = torch.from_numpy(imgs).float()
cam_labels = np.array(cam_labels)
cam_labels = torch.from_numpy(cam_labels)
index_labels = np.array(index_labels)
index_labels = torch.from_numpy(index_labels)
return imgs, self.label_list[index], index_labels, cam_labels
def __len__(self):
return len(self.label_list)
class NormalCollateFn:
def __call__(self, batch):
img_tensor = [x[0] for x in batch]
pids = np.array([x[1] for x in batch])
camids = np.array([x[2] for x in batch])
return torch.stack(img_tensor, dim=0), torch.from_numpy(pids), torch.from_numpy(np.array(camids))
|
1618810
|
import unittest
import numpy
from templevel import TempLevel
from pymclevel.box import BoundingBox
__author__ = 'Rio'
class TestJavaLevel(unittest.TestCase):
def setUp(self):
self.creativelevel = TempLevel("Dojo_64_64_128.dat")
self.indevlevel = TempLevel("hell.mclevel")
def testCopy(self):
indevlevel = self.indevlevel.level
creativelevel = self.creativelevel.level
creativelevel.copyBlocksFrom(indevlevel, BoundingBox((0, 0, 0), (64, 64, 64,)), (0, 0, 0))
assert(numpy.array((indevlevel.Blocks[0:64, 0:64, 0:64]) == (creativelevel.Blocks[0:64, 0:64, 0:64])).all())
creativelevel.saveInPlace()
# xxx old survival levels
|
1618827
|
from torch.optim import Adam, SGD, AdamW
import torch
from torch.optim.lr_scheduler import OneCycleLR
import numpy as np
import os
import time
from torch.utils.data import DataLoader
from dataset.vocab import Vocab
from dataset.add_noise import SynthesizeData
from params import *
from models.seq2seq import Seq2Seq
from models.seq2seq_without_attention import Seq2Seq_WithoutAtt
from utils.logger import Logger
from dataset.autocorrect_dataset import AutoCorrectDataset
from models.loss import LabelSmoothingLoss
from utils.utils import translate, translate_beam_search, batch_translate_beam_search
from utils.metrics import compute_accuracy
class Trainer():
def __init__(self, alphabets_, list_ngram):
self.vocab = Vocab(alphabets_)
self.synthesizer = SynthesizeData(vocab_path="")
self.list_ngrams_train, self.list_ngrams_valid = self.train_test_split(list_ngram, test_size=0.1)
print("Loaded data!!!")
print("Total training samples: ", len(self.list_ngrams_train))
print("Total valid samples: ", len(self.list_ngrams_valid))
INPUT_DIM = self.vocab.__len__()
OUTPUT_DIM = self.vocab.__len__()
self.device = DEVICE
self.num_iters = NUM_ITERS
self.beamsearch = BEAM_SEARCH
self.batch_size = BATCH_SIZE
self.print_every = PRINT_PER_ITER
self.valid_every = VALID_PER_ITER
self.checkpoint = CHECKPOINT
self.export_weights = EXPORT
self.metrics = MAX_SAMPLE_VALID
logger = LOG
if logger:
self.logger = Logger(logger)
self.iter = 0
self.model = Seq2Seq(input_dim=INPUT_DIM, output_dim=OUTPUT_DIM, encoder_embbeded=ENC_EMB_DIM,
decoder_embedded=DEC_EMB_DIM,
encoder_hidden=ENC_HID_DIM, decoder_hidden=DEC_HID_DIM, encoder_dropout=ENC_DROPOUT,
decoder_dropout=DEC_DROPOUT)
self.optimizer = AdamW(self.model.parameters(), betas=(0.9, 0.98), eps=1e-09)
self.scheduler = OneCycleLR(self.optimizer, total_steps=self.num_iters, pct_start=PCT_START, max_lr=MAX_LR)
self.criterion = LabelSmoothingLoss(len(self.vocab), padding_idx=self.vocab.pad, smoothing=0.1)
self.train_gen = self.data_gen(self.list_ngrams_train, self.synthesizer, self.vocab, is_train=True)
self.valid_gen = self.data_gen(self.list_ngrams_valid, self.synthesizer, self.vocab, is_train=False)
self.train_losses = []
# to device
self.model.to(self.device)
self.criterion.to(self.device)
def train_test_split(self, list_phrases, test_size=0.1):
list_phrases = list_phrases
train_idx = int(len(list_phrases) * (1 - test_size))
list_phrases_train = list_phrases[:train_idx]
list_phrases_valid = list_phrases[train_idx:]
return list_phrases_train, list_phrases_valid
def data_gen(self, list_ngrams_np, synthesizer, vocab, is_train=True):
dataset = AutoCorrectDataset(list_ngrams_np, transform_noise=synthesizer, vocab=vocab, maxlen=MAXLEN)
shuffle = True if is_train else False
gen = DataLoader(
dataset,
batch_size=BATCH_SIZE,
shuffle=shuffle,
drop_last=False)
return gen
def step(self, batch):
self.model.train()
batch = self.batch_to_device(batch)
src, tgt = batch['src'], batch['tgt']
src, tgt = src.transpose(1, 0), tgt.transpose(1, 0) # batch x src_len -> src_len x batch
outputs = self.model(src, tgt) # src : src_len x B, outpus : B x tgt_len x vocab
# loss = self.criterion(rearrange(outputs, 'b t v -> (b t) v'), rearrange(tgt_output, 'b o -> (b o)'))
outputs = outputs.view(-1, outputs.size(2)) # flatten(0, 1)
tgt_output = tgt.transpose(0, 1).reshape(-1) # flatten() # tgt: tgt_len xB , need convert to B x tgt_len
loss = self.criterion(outputs, tgt_output)
self.optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), 1)
self.optimizer.step()
self.scheduler.step()
loss_item = loss.item()
return loss_item
def train(self):
print("Begin training from iter: ", self.iter)
total_loss = 0
total_loader_time = 0
total_gpu_time = 0
best_acc = -1
data_iter = iter(self.train_gen)
for i in range(self.num_iters):
self.iter += 1
start = time.time()
try:
batch = next(data_iter)
except StopIteration:
data_iter = iter(self.train_gen)
batch = next(data_iter)
total_loader_time += time.time() - start
start = time.time()
loss = self.step(batch)
total_gpu_time += time.time() - start
total_loss += loss
self.train_losses.append((self.iter, loss))
if self.iter % self.print_every == 0:
info = 'iter: {:06d} - train loss: {:.3f} - lr: {:.2e} - load time: {:.2f} - gpu time: {:.2f}'.format(
self.iter,
total_loss / self.print_every, self.optimizer.param_groups[0]['lr'],
total_loader_time, total_gpu_time)
total_loss = 0
total_loader_time = 0
total_gpu_time = 0
print(info)
self.logger.log(info)
if self.iter % self.valid_every == 0:
val_loss, preds, actuals, inp_sents = self.validate()
acc_full_seq, acc_per_char, cer = self.precision(self.metrics)
info = 'iter: {:06d} - valid loss: {:.3f} - acc full seq: {:.4f} - acc per char: {:.4f} - CER: {:.4f} '.format(
self.iter, val_loss, acc_full_seq, acc_per_char, cer)
print(info)
print("--- Sentence predict ---")
for pred, inp, label in zip(preds, inp_sents, actuals):
infor_predict = 'Pred: {} - Inp: {} - Label: {}'.format(pred, inp, label)
print(infor_predict)
self.logger.log(infor_predict)
self.logger.log(info)
if acc_full_seq > best_acc:
self.save_weights(self.export_weights)
best_acc = acc_full_seq
self.save_checkpoint(self.checkpoint)
def validate(self):
self.model.eval()
total_loss = []
max_step = self.metrics / self.batch_size
with torch.no_grad():
for step, batch in enumerate(self.valid_gen):
batch = self.batch_to_device(batch)
src, tgt = batch['src'], batch['tgt']
src, tgt = src.transpose(1, 0), tgt.transpose(1, 0)
outputs = self.model(src, tgt, 0) # turn off teaching force
outputs = outputs.flatten(0, 1)
tgt_output = tgt.flatten()
loss = self.criterion(outputs, tgt_output)
total_loss.append(loss.item())
preds, actuals, inp_sents, probs = self.predict(5)
del outputs
del loss
if step > max_step:
break
total_loss = np.mean(total_loss)
self.model.train()
return total_loss, preds[:3], actuals[:3], inp_sents[:3]
def predict(self, sample=None):
pred_sents = []
actual_sents = []
inp_sents = []
for batch in self.valid_gen:
batch = self.batch_to_device(batch)
if self.beamsearch:
translated_sentence = batch_translate_beam_search(batch['src'], self.model)
prob = None
else:
translated_sentence, prob = translate(batch['src'], self.model)
pred_sent = self.vocab.batch_decode(translated_sentence.tolist())
actual_sent = self.vocab.batch_decode(batch['tgt'].tolist())
inp_sent = self.vocab.batch_decode(batch['src'].tolist())
pred_sents.extend(pred_sent)
actual_sents.extend(actual_sent)
inp_sents.extend(inp_sent)
if sample is not None and len(pred_sents) > sample:
break
return pred_sents, actual_sents, inp_sents, prob
def precision(self, sample=None):
pred_sents, actual_sents, _, _ = self.predict(sample=sample)
acc_full_seq = compute_accuracy(actual_sents, pred_sents, mode='full_sequence')
acc_per_char = compute_accuracy(actual_sents, pred_sents, mode='per_char')
cer = compute_accuracy(actual_sents, pred_sents, mode='CER')
return acc_full_seq, acc_per_char, cer
def visualize_prediction(self, sample=16, errorcase=False, fontname='serif', fontsize=16):
pred_sents, actual_sents, img_files, probs = self.predict(sample)
if errorcase:
wrongs = []
for i in range(len(img_files)):
if pred_sents[i] != actual_sents[i]:
wrongs.append(i)
pred_sents = [pred_sents[i] for i in wrongs]
actual_sents = [actual_sents[i] for i in wrongs]
img_files = [img_files[i] for i in wrongs]
probs = [probs[i] for i in wrongs]
img_files = img_files[:sample]
fontdict = {
'family': fontname,
'size': fontsize
}
def visualize_dataset(self, sample=16, fontname='serif'):
n = 0
for batch in self.train_gen:
for i in range(self.batch_size):
img = batch['img'][i].numpy().transpose(1, 2, 0)
sent = self.vocab.decode(batch['tgt_input'].T[i].tolist())
n += 1
if n >= sample:
return
def load_checkpoint(self, filename):
checkpoint = torch.load(filename)
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.scheduler.load_state_dict(checkpoint['scheduler'])
self.model.load_state_dict(checkpoint['state_dict'])
self.iter = checkpoint['iter']
self.train_losses = checkpoint['train_losses']
def save_checkpoint(self, filename):
state = {'iter': self.iter, 'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(), 'train_losses': self.train_losses,
'scheduler': self.scheduler.state_dict()}
path, _ = os.path.split(filename)
os.makedirs(path, exist_ok=True)
torch.save(state, filename)
def load_weights(self, filename):
state_dict = torch.load(filename, map_location=torch.device(self.device))
for name, param in self.model.named_parameters():
if name not in state_dict:
print('{} not found'.format(name))
elif state_dict[name].shape != param.shape:
print(
'{} missmatching shape, required {} but found {}'.format(name, param.shape, state_dict[name].shape))
del state_dict[name]
self.model.load_state_dict(state_dict, strict=False)
def save_weights(self, filename):
path, _ = os.path.split(filename)
os.makedirs(path, exist_ok=True)
torch.save(self.model.state_dict(), filename)
def batch_to_device(self, batch):
src = batch['src'].to(self.device, non_blocking=True)
tgt = batch['tgt'].to(self.device, non_blocking=True)
batch = {
'src': src,
'tgt': tgt
}
return batch
|
1618866
|
from board_tests.test_support.doubles.fake_help_repository import FakeHelpRepository
from board_tests.test_support.doubles.fake_new_face_repository import FakeNewFaceRepository
from board_tests.test_support.doubles.fake_team_repository import FakeTeamRepository
from board_tests.test_support.doubles.gui_spy import GuiSpy
def before_scenario(context, scenario):
context.gui = GuiSpy()
context.team_repository = FakeTeamRepository()
context.new_face_repository = FakeNewFaceRepository()
context.help_repository = FakeHelpRepository()
|
1618869
|
from django.contrib.auth.models import User
from django.test import TestCase
from dfirtrack_config.forms import SystemImporterFileCsvConfigForm
from dfirtrack_main.models import (
Analysisstatus,
Case,
Company,
Dnsname,
Domain,
Location,
Os,
Reason,
Recommendation,
Serviceprovider,
Systemstatus,
Systemtype,
)
class SystemImporterFileCsvConfigFormCsvVsDbTestCase(TestCase):
"""system importer file CSV config form tests"""
@classmethod
def setUpTestData(cls):
# create user
testuser = User.objects.create_user(
username='testuser_system_importer_file_csv_config',
password='<PASSWORD>',
)
# create objects
Analysisstatus.objects.create(analysisstatus_name='analysisstatus_1')
Case.objects.create(
case_name='case_1',
case_is_incident=False,
case_created_by_user_id=testuser,
)
Company.objects.create(company_name='company_1').company_id
Dnsname.objects.create(dnsname_name='dnsname_1').dnsname_id
Domain.objects.create(domain_name='domain_1').domain_id
Location.objects.create(location_name='location_1').location_id
Os.objects.create(os_name='os_1').os_id
Reason.objects.create(reason_name='reason_1').reason_id
Recommendation.objects.create(
recommendation_name='recommendation_1'
).recommendation_id
Serviceprovider.objects.create(
serviceprovider_name='serviceprovider_1'
).serviceprovider_id
Systemstatus.objects.create(systemstatus_name='systemstatus_1')
Systemtype.objects.create(systemtype_name='systemtype_1').systemtype_id
""" ip """
def test_system_importer_file_csv_config_form_ip_choice_only(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_ip': True,
'csv_column_ip': None,
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['csv_choice_ip'], ['Add CSV column.'])
def test_system_importer_file_csv_config_form_ip_column_only(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_ip': False,
'csv_column_ip': '2',
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['csv_choice_ip'], ['Forgot to choose CSV?'])
def test_system_importer_file_csv_config_form_ip_from_csv(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_ip': True,
'csv_column_ip': '2',
}
)
# compare
self.assertTrue(form.is_valid())
""" dnsname """
def test_system_importer_file_csv_config_form_dnsname_choice_only(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_dnsname': True,
'csv_column_dnsname': None,
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['csv_choice_dnsname'], ['Add CSV column.'])
def test_system_importer_file_csv_config_form_dnsname_column_only(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_dnsname': False,
'csv_column_dnsname': '2',
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['csv_choice_dnsname'], ['Forgot to choose CSV?'])
def test_system_importer_file_csv_config_form_dnsname_choice_and_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
dnsname_1 = Dnsname.objects.get(dnsname_name='dnsname_1').dnsname_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_dnsname': True,
'csv_default_dnsname': str(dnsname_1),
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_choice_dnsname'],
['Decide between CSV or database or nothing.'],
)
def test_system_importer_file_csv_config_form_dnsname_column_and_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
dnsname_1 = Dnsname.objects.get(dnsname_name='dnsname_1').dnsname_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_column_dnsname': '2',
'csv_default_dnsname': str(dnsname_1),
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_choice_dnsname'],
['Decide between CSV or database or nothing.'],
)
def test_system_importer_file_csv_config_form_dnsname_choice_column_and_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
dnsname_1 = Dnsname.objects.get(dnsname_name='dnsname_1').dnsname_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_dnsname': True,
'csv_column_dnsname': '2',
'csv_default_dnsname': str(dnsname_1),
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_choice_dnsname'],
['Decide between CSV or database or nothing.'],
)
def test_system_importer_file_csv_config_form_dnsname_from_csv(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_dnsname': True,
'csv_column_dnsname': '2',
}
)
# compare
self.assertTrue(form.is_valid())
def test_system_importer_file_csv_config_form_dnsname_from_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
dnsname_1 = Dnsname.objects.get(dnsname_name='dnsname_1').dnsname_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_default_dnsname': str(dnsname_1),
}
)
# compare
self.assertTrue(form.is_valid())
""" domain """
def test_system_importer_file_csv_config_form_domain_choice_only(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_domain': True,
'csv_column_domain': None,
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['csv_choice_domain'], ['Add CSV column.'])
def test_system_importer_file_csv_config_form_domain_column_only(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_domain': False,
'csv_column_domain': '2',
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['csv_choice_domain'], ['Forgot to choose CSV?'])
def test_system_importer_file_csv_config_form_domain_choice_and_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
domain_1 = Domain.objects.get(domain_name='domain_1').domain_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_domain': True,
'csv_default_domain': str(domain_1),
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_choice_domain'],
['Decide between CSV or database or nothing.'],
)
def test_system_importer_file_csv_config_form_domain_column_and_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
domain_1 = Domain.objects.get(domain_name='domain_1').domain_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_column_domain': '2',
'csv_default_domain': str(domain_1),
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_choice_domain'],
['Decide between CSV or database or nothing.'],
)
def test_system_importer_file_csv_config_form_domain_choice_column_and_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
domain_1 = Domain.objects.get(domain_name='domain_1').domain_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_domain': True,
'csv_column_domain': '2',
'csv_default_domain': str(domain_1),
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_choice_domain'],
['Decide between CSV or database or nothing.'],
)
def test_system_importer_file_csv_config_form_domain_from_csv(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_domain': True,
'csv_column_domain': '2',
}
)
# compare
self.assertTrue(form.is_valid())
def test_system_importer_file_csv_config_form_domain_from_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
domain_1 = Domain.objects.get(domain_name='domain_1').domain_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_default_domain': str(domain_1),
}
)
# compare
self.assertTrue(form.is_valid())
""" location """
def test_system_importer_file_csv_config_form_location_choice_only(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_location': True,
'csv_column_location': None,
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['csv_choice_location'], ['Add CSV column.'])
def test_system_importer_file_csv_config_form_location_column_only(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_location': False,
'csv_column_location': '2',
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['csv_choice_location'], ['Forgot to choose CSV?'])
def test_system_importer_file_csv_config_form_location_choice_and_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
location_1 = Location.objects.get(location_name='location_1').location_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_location': True,
'csv_default_location': str(location_1),
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_choice_location'],
['Decide between CSV or database or nothing.'],
)
def test_system_importer_file_csv_config_form_location_column_and_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
location_1 = Location.objects.get(location_name='location_1').location_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_column_location': '2',
'csv_default_location': str(location_1),
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_choice_location'],
['Decide between CSV or database or nothing.'],
)
def test_system_importer_file_csv_config_form_location_choice_column_and_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
location_1 = Location.objects.get(location_name='location_1').location_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_location': True,
'csv_column_location': '2',
'csv_default_location': str(location_1),
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_choice_location'],
['Decide between CSV or database or nothing.'],
)
def test_system_importer_file_csv_config_form_location_from_csv(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_location': True,
'csv_column_location': '2',
}
)
# compare
self.assertTrue(form.is_valid())
def test_system_importer_file_csv_config_form_location_from_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
location_1 = Location.objects.get(location_name='location_1').location_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_default_location': str(location_1),
}
)
# compare
self.assertTrue(form.is_valid())
""" os """
def test_system_importer_file_csv_config_form_os_choice_only(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_os': True,
'csv_column_os': None,
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['csv_choice_os'], ['Add CSV column.'])
def test_system_importer_file_csv_config_form_os_column_only(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_os': False,
'csv_column_os': '2',
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['csv_choice_os'], ['Forgot to choose CSV?'])
def test_system_importer_file_csv_config_form_os_choice_and_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
os_1 = Os.objects.get(os_name='os_1').os_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_os': True,
'csv_default_os': str(os_1),
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_choice_os'], ['Decide between CSV or database or nothing.']
)
def test_system_importer_file_csv_config_form_os_column_and_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
os_1 = Os.objects.get(os_name='os_1').os_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_column_os': '2',
'csv_default_os': str(os_1),
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_choice_os'], ['Decide between CSV or database or nothing.']
)
def test_system_importer_file_csv_config_form_os_choice_column_and_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
os_1 = Os.objects.get(os_name='os_1').os_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_os': True,
'csv_column_os': '2',
'csv_default_os': str(os_1),
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_choice_os'], ['Decide between CSV or database or nothing.']
)
def test_system_importer_file_csv_config_form_os_from_csv(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_os': True,
'csv_column_os': '2',
}
)
# compare
self.assertTrue(form.is_valid())
def test_system_importer_file_csv_config_form_os_from_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
os_1 = Os.objects.get(os_name='os_1').os_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_default_os': str(os_1),
}
)
# compare
self.assertTrue(form.is_valid())
""" reason """
def test_system_importer_file_csv_config_form_reason_choice_only(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_reason': True,
'csv_column_reason': None,
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['csv_choice_reason'], ['Add CSV column.'])
def test_system_importer_file_csv_config_form_reason_column_only(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_reason': False,
'csv_column_reason': '2',
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['csv_choice_reason'], ['Forgot to choose CSV?'])
def test_system_importer_file_csv_config_form_reason_choice_and_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
reason_1 = Reason.objects.get(reason_name='reason_1').reason_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_reason': True,
'csv_default_reason': str(reason_1),
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_choice_reason'],
['Decide between CSV or database or nothing.'],
)
def test_system_importer_file_csv_config_form_reason_column_and_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
reason_1 = Reason.objects.get(reason_name='reason_1').reason_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_column_reason': '2',
'csv_default_reason': str(reason_1),
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_choice_reason'],
['Decide between CSV or database or nothing.'],
)
def test_system_importer_file_csv_config_form_reason_choice_column_and_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
reason_1 = Reason.objects.get(reason_name='reason_1').reason_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_reason': True,
'csv_column_reason': '2',
'csv_default_reason': str(reason_1),
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_choice_reason'],
['Decide between CSV or database or nothing.'],
)
def test_system_importer_file_csv_config_form_reason_from_csv(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_reason': True,
'csv_column_reason': '2',
}
)
# compare
self.assertTrue(form.is_valid())
def test_system_importer_file_csv_config_form_reason_from_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
reason_1 = Reason.objects.get(reason_name='reason_1').reason_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_default_reason': str(reason_1),
}
)
# compare
self.assertTrue(form.is_valid())
""" recommendation """
def test_system_importer_file_csv_config_form_recommendation_choice_only(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_recommendation': True,
'csv_column_recommendation': None,
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['csv_choice_recommendation'], ['Add CSV column.'])
def test_system_importer_file_csv_config_form_recommendation_column_only(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_recommendation': False,
'csv_column_recommendation': '2',
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_choice_recommendation'], ['Forgot to choose CSV?']
)
def test_system_importer_file_csv_config_form_recommendation_choice_and_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
recommendation_1 = Recommendation.objects.get(
recommendation_name='recommendation_1'
).recommendation_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_recommendation': True,
'csv_default_recommendation': str(recommendation_1),
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_choice_recommendation'],
['Decide between CSV or database or nothing.'],
)
def test_system_importer_file_csv_config_form_recommendation_column_and_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
recommendation_1 = Recommendation.objects.get(
recommendation_name='recommendation_1'
).recommendation_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_column_recommendation': '2',
'csv_default_recommendation': str(recommendation_1),
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_choice_recommendation'],
['Decide between CSV or database or nothing.'],
)
def test_system_importer_file_csv_config_form_recommendation_choice_column_and_db(
self,
):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
recommendation_1 = Recommendation.objects.get(
recommendation_name='recommendation_1'
).recommendation_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_recommendation': True,
'csv_column_recommendation': '2',
'csv_default_recommendation': str(recommendation_1),
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_choice_recommendation'],
['Decide between CSV or database or nothing.'],
)
def test_system_importer_file_csv_config_form_recommendation_from_csv(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_recommendation': True,
'csv_column_recommendation': '2',
}
)
# compare
self.assertTrue(form.is_valid())
def test_system_importer_file_csv_config_form_recommendation_from_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
recommendation_1 = Recommendation.objects.get(
recommendation_name='recommendation_1'
).recommendation_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_default_recommendation': str(recommendation_1),
}
)
# compare
self.assertTrue(form.is_valid())
""" serviceprovider """
def test_system_importer_file_csv_config_form_serviceprovider_choice_only(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_serviceprovider': True,
'csv_column_serviceprovider': None,
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['csv_choice_serviceprovider'], ['Add CSV column.'])
def test_system_importer_file_csv_config_form_serviceprovider_column_only(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_serviceprovider': False,
'csv_column_serviceprovider': '2',
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_choice_serviceprovider'], ['Forgot to choose CSV?']
)
def test_system_importer_file_csv_config_form_serviceprovider_choice_and_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
serviceprovider_1 = Serviceprovider.objects.get(
serviceprovider_name='serviceprovider_1'
).serviceprovider_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_serviceprovider': True,
'csv_default_serviceprovider': str(serviceprovider_1),
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_choice_serviceprovider'],
['Decide between CSV or database or nothing.'],
)
def test_system_importer_file_csv_config_form_serviceprovider_column_and_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
serviceprovider_1 = Serviceprovider.objects.get(
serviceprovider_name='serviceprovider_1'
).serviceprovider_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_column_serviceprovider': '2',
'csv_default_serviceprovider': str(serviceprovider_1),
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_choice_serviceprovider'],
['Decide between CSV or database or nothing.'],
)
def test_system_importer_file_csv_config_form_serviceprovider_choice_column_and_db(
self,
):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
serviceprovider_1 = Serviceprovider.objects.get(
serviceprovider_name='serviceprovider_1'
).serviceprovider_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_serviceprovider': True,
'csv_column_serviceprovider': '2',
'csv_default_serviceprovider': str(serviceprovider_1),
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_choice_serviceprovider'],
['Decide between CSV or database or nothing.'],
)
def test_system_importer_file_csv_config_form_serviceprovider_from_csv(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_serviceprovider': True,
'csv_column_serviceprovider': '2',
}
)
# compare
self.assertTrue(form.is_valid())
def test_system_importer_file_csv_config_form_serviceprovider_from_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
serviceprovider_1 = Serviceprovider.objects.get(
serviceprovider_name='serviceprovider_1'
).serviceprovider_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_default_serviceprovider': str(serviceprovider_1),
}
)
# compare
self.assertTrue(form.is_valid())
""" systemtype """
def test_system_importer_file_csv_config_form_systemtype_choice_only(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_systemtype': True,
'csv_column_systemtype': None,
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['csv_choice_systemtype'], ['Add CSV column.'])
def test_system_importer_file_csv_config_form_systemtype_column_only(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_systemtype': False,
'csv_column_systemtype': '2',
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_choice_systemtype'], ['Forgot to choose CSV?']
)
def test_system_importer_file_csv_config_form_systemtype_choice_and_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
systemtype_1 = Systemtype.objects.get(
systemtype_name='systemtype_1'
).systemtype_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_systemtype': True,
'csv_default_systemtype': str(systemtype_1),
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_choice_systemtype'],
['Decide between CSV or database or nothing.'],
)
def test_system_importer_file_csv_config_form_systemtype_column_and_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
systemtype_1 = Systemtype.objects.get(
systemtype_name='systemtype_1'
).systemtype_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_column_systemtype': '2',
'csv_default_systemtype': str(systemtype_1),
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_choice_systemtype'],
['Decide between CSV or database or nothing.'],
)
def test_system_importer_file_csv_config_form_systemtype_choice_column_and_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
systemtype_1 = Systemtype.objects.get(
systemtype_name='systemtype_1'
).systemtype_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_systemtype': True,
'csv_column_systemtype': '2',
'csv_default_systemtype': str(systemtype_1),
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_choice_systemtype'],
['Decide between CSV or database or nothing.'],
)
def test_system_importer_file_csv_config_form_systemtype_from_csv(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_systemtype': True,
'csv_column_systemtype': '2',
}
)
# compare
self.assertTrue(form.is_valid())
def test_system_importer_file_csv_config_form_systemtype_from_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
systemtype_1 = Systemtype.objects.get(
systemtype_name='systemtype_1'
).systemtype_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_default_systemtype': str(systemtype_1),
}
)
# compare
self.assertTrue(form.is_valid())
""" case """
def test_system_importer_file_csv_config_form_case_choice_only(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_case': True,
'csv_column_case': None,
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['csv_choice_case'], ['Add CSV column.'])
def test_system_importer_file_csv_config_form_case_column_only(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_case': False,
'csv_column_case': '2',
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['csv_choice_case'], ['Forgot to choose CSV?'])
def test_system_importer_file_csv_config_form_case_choice_and_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
case_1 = Case.objects.get(case_name='case_1').case_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_case': True,
'csv_default_case': [
str(case_1),
],
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_choice_case'],
['Decide between CSV or database or nothing.'],
)
def test_system_importer_file_csv_config_form_case_column_and_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
case_1 = Case.objects.get(case_name='case_1').case_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_column_case': '2',
'csv_default_case': [
str(case_1),
],
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_choice_case'],
['Decide between CSV or database or nothing.'],
)
def test_system_importer_file_csv_config_form_case_choice_column_and_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
case_1 = Case.objects.get(case_name='case_1').case_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_case': True,
'csv_column_case': '2',
'csv_default_case': [
str(case_1),
],
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_choice_case'],
['Decide between CSV or database or nothing.'],
)
def test_system_importer_file_csv_config_form_case_from_csv(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_case': True,
'csv_column_case': '2',
}
)
# compare
self.assertTrue(form.is_valid())
def test_system_importer_file_csv_config_form_case_from_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
case_1 = Case.objects.get(case_name='case_1').case_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_default_case': [
str(case_1),
],
}
)
# compare
self.assertTrue(form.is_valid())
""" company """
def test_system_importer_file_csv_config_form_company_choice_only(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_company': True,
'csv_column_company': None,
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['csv_choice_company'], ['Add CSV column.'])
def test_system_importer_file_csv_config_form_company_column_only(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_company': False,
'csv_column_company': '2',
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['csv_choice_company'], ['Forgot to choose CSV?'])
def test_system_importer_file_csv_config_form_company_choice_and_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
company_1 = Company.objects.get(company_name='company_1').company_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_company': True,
'csv_default_company': [
str(company_1),
],
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_choice_company'],
['Decide between CSV or database or nothing.'],
)
def test_system_importer_file_csv_config_form_company_column_and_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
company_1 = Company.objects.get(company_name='company_1').company_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_column_company': '2',
'csv_default_company': [
str(company_1),
],
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_choice_company'],
['Decide between CSV or database or nothing.'],
)
def test_system_importer_file_csv_config_form_company_choice_column_and_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
company_1 = Company.objects.get(company_name='company_1').company_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_company': True,
'csv_column_company': '2',
'csv_default_company': [
str(company_1),
],
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_choice_company'],
['Decide between CSV or database or nothing.'],
)
def test_system_importer_file_csv_config_form_company_from_csv(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_company': True,
'csv_column_company': '2',
}
)
# compare
self.assertTrue(form.is_valid())
def test_system_importer_file_csv_config_form_company_from_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
company_1 = Company.objects.get(company_name='company_1').company_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_default_company': [
str(company_1),
],
}
)
# compare
self.assertTrue(form.is_valid())
|
1618873
|
import unittest
import numpy as np
import scipy.sparse as sp
from multimodal.lib.array_utils import normalize_features
from multimodal.evaluation import (evaluate_label_reco,
evaluate_NN_label,
chose_examples)
class TestLabelEvaluation(unittest.TestCase):
def test(self):
labels = [2, 0]
reco = np.array([[.1, .5, .6, .1],
[.6, .5, .2, .1]])
good = evaluate_label_reco(reco, labels)
self.assertEqual(good, 1.)
bad = evaluate_label_reco(reco[[1, 0], :], labels)
self.assertEqual(bad, 0.)
medium = evaluate_label_reco(reco[[1, 1], :], labels)
self.assertEqual(medium, .5)
def test_fails_on_multiple_labels(self):
labels = [[2], [0]]
reco = np.array([[.1, .5, .6, .1],
[.6, .5, .2, .1]])
with self.assertRaises(AssertionError):
evaluate_label_reco(reco, labels)
class TestNNEvaluation(unittest.TestCase):
def setUp(self):
self.labels_a = np.random.randint(10, size=13)
self.labels_b = [i for i in reversed(range(10))]
# Encode label on third coordinate of a and fourth of b
self.a = np.random.random((13, 5))
for i in range(13):
self.a[i, 2] = self.labels_a[i]
self.b = np.random.random((10, 5))
for i in range(10):
self.b[i, 3] = self.labels_b[i]
def fake_metrics(self, a, b, axis=-1):
assert(axis == -1) # Test does not work if not...
return 1. - (a[:, :, 2] == b[:, :, 3])
def test_good_on_fake_measure(self):
self.assertEqual(evaluate_NN_label(self.a, self.b, self.labels_a,
self.labels_b, self.fake_metrics
), 1.)
def test_bad_on_fake_measure(self):
self.assertEqual(evaluate_NN_label(self.a, 1 + self.b,
self.labels_a, self.labels_b,
self.fake_metrics), 0.)
def test_on_fake_measure_sparse(self):
a = sp.lil_matrix(self.a).tocsr()
b = sp.lil_matrix(self.b).tocsr()
self.assertEqual(
evaluate_NN_label(a, b, self.labels_a, self.labels_b,
self.fake_metrics),
1.)
self.assertEqual(
evaluate_NN_label(a, 1 + self.b, self.labels_a,
self.labels_b, self.fake_metrics),
0.)
class TestChoseExamples(unittest.TestCase):
def setUp(self):
self.label_set = list(range(3))
self.labels = self.label_set * 5
np.random.seed(0)
np.random.shuffle(self.labels)
def test_choses_as_many_examples_as_labels(self):
r = chose_examples(self.labels, self.label_set)
self.assertEqual(len(r), len(self.label_set))
r = chose_examples(self.labels) # And without giving labels
self.assertEqual(len(r), len(self.label_set))
def test_choses_twice_as_many_examples_as_labels(self):
r = chose_examples(self.labels, self.label_set, number=2)
self.assertEqual(len(r), 2 * len(self.label_set))
def test_all_chosen_are_indices(self):
r = chose_examples(self.labels, self.label_set, number=2)
assert(all([0 <= i < len(self.labels) for i in r]))
def test_all_labels_are_chosen_once(self):
r = chose_examples(self.labels, self.label_set)
lab = [self.labels[i] for i in r]
assert(all([lab.count(l) == 1 for l in self.label_set]))
def test_all_labels_are_chosen_twice(self):
r = chose_examples(self.labels, self.label_set, number=2)
lab = [self.labels[i] for i in r]
assert(all([lab.count(l) == 2 for l in self.label_set]))
class TestNormalizeFeatures(unittest.TestCase):
def setUp(self):
self.mat = np.random.random((32, 13))
self.mat = 10. * self.mat * (self.mat < .2)
def test_on_sparse_same_shape(self):
m = self.mat
m[0, :] += 1 # Ensures that no column has zero sum
m = sp.csc_matrix(m)
norm = normalize_features(m)
assert(np.allclose(norm.sum(axis=0), 1))
self.assertEqual(norm.shape, m.shape)
def test_removes_columns_sparse(self):
m = self.mat
m[0, :] += 1 # Ensures that no column has zero sum
m[:, [1, 3]] = 0 # Ensures column 1 and 3 have zero sum
m = sp.csc_matrix(m)
norm = normalize_features(m)
assert(np.allclose(norm.sum(axis=0), 1))
self.assertEqual(norm.shape, (m.shape[0], m.shape[1] - 2))
def test_on_dense_same_shape(self):
m = self.mat
m[0, :] += 1 # Ensures that no column has zero sum
norm = normalize_features(m)
assert(np.allclose(norm.sum(axis=0), 1))
self.assertEqual(norm.shape, m.shape)
def test_removes_columns_dense(self):
m = self.mat
m[0, :] += 1 # Ensures that no column has zero sum
m[:, [1, 3]] = 0 # Ensures column 1 and 3 have zero sum
norm = normalize_features(m)
assert(np.allclose(norm.sum(axis=0), 1))
self.assertEqual(norm.shape, (m.shape[0], m.shape[1] - 2))
def test_same_on_dense_and_sparse(self):
m1 = sp.csc_matrix(self.mat)
m2 = sp.csr_matrix(self.mat)
n = normalize_features(self.mat)
n1 = normalize_features(m1)
n2 = normalize_features(m2)
assert(np.allclose(n1.todense(), n))
assert(np.allclose(n2.todense(), n))
def test_does_not_modify(self):
m = self.mat.copy()
normalize_features(m)
ms = sp.csr_matrix(m)
normalize_features(ms)
assert(np.allclose(m, self.mat))
assert(np.allclose(ms.todense(), self.mat))
def test_OK(self):
n = normalize_features(np.array([[1., 0., 1.5, .1],
[1., 0., .5, .3]]))
ok = np.array([[.5, .75, .25],
[.5, .25, .75]])
assert(np.allclose(n, ok))
|
1618875
|
import os
def fixdir(d):
for e in os.listdir(d):
p = os.path.join(d,e)
if e.endswith('.wmf') or e.endswith('.WMF'):
with open(p, 'rb') as f:
b = f.read()
with open(p, 'wb') as f:
f.write(b[0xC:])
else:
fixdir(p)
fixdir('upic')
|
1618938
|
import time
from hmac import compare_digest
import jwt
from quart import Blueprint, abort, current_app, request, jsonify
from werkzeug.exceptions import HTTPException
home = Blueprint("home", __name__)
_SECRETS = {"worker1": "f0fdeb1f1584fd5431c4250b2e859457"}
def _400(desc):
exc = HTTPException()
exc.code = 400
exc.description = desc
return error_handling(exc)
def error_handling(error):
if isinstance(error, HTTPException):
result = {
"code": error.code,
"description": error.description,
"message": str(error),
}
else:
description = abort.mapping[500].description
result = {"code": 500, "description": description, "message": str(error)}
resp = jsonify(result)
resp.status_code = result["code"]
return resp
@home.route("/.well-known/jwks.json")
async def _jwks():
"""Returns the public key in the Json Web Key (JWK) format"""
with open(current_app.config["PUBLIC_KEY"]) as f:
key = f.read()
key = {
"alg": "RS512",
"e": "AQAB",
"n": key,
"kty": "RSA",
"use": "sig",
}
return jsonify([key])
def is_authorized_app(client_id, client_secret):
return compare_digest(_SECRETS.get(client_id), client_secret)
@home.route("/oauth/token", methods=["POST"])
async def create_token():
with open(current_app.config["PRIVATE_KEY"]) as f:
key = f.read()
try:
data = await request.form
if data.get("grant_type") != "client_credentials":
return _400(f"Wrong grant_type {data.get('grant_type')}")
client_id = data.get("client_id")
client_secret = data.get("client_secret")
aud = data.get("audience", "")
if not is_authorized_app(client_id, client_secret):
return abort(401)
now = int(time.time())
token = {
"iss": "https://tokendealer.example.com",
"aud": aud,
"iat": now,
"exp": now + 3600 * 24,
}
token = jwt.encode(token, key, algorithm="RS512")
return {"access_token": token}
except Exception as e:
return _400(str(e))
@home.route("/verify_token", methods=["POST"])
async def verify_token():
with open(current_app.config["PUBLIC_KEY"]) as f:
key = f.read()
try:
json_body = await request.form
token = json_body["access_token"]
audience = json_body.get("audience", "")
print(token, audience)
return jwt.decode(token, key, algorithms=["RS512"], audience=audience)
except Exception as e:
return _400(str(e))
|
1618941
|
import pickle
import lmdb
import os
import glob
from PIL import Image
from pathlib import Path
from tqdm import tqdm
import numpy as np
from scipy import ndimage
def to_lmdb(root_path, lmdb_path):
image_paths = [x.split(".")[0] for x in os.listdir(os.path.join(root_path, "color"))]
print('#images: ', len(image_paths))
print("Generate LMDB to %s" % lmdb_path)
image_size = Image.open(os.path.join(root_path, "color", f'{image_paths[0]}.jpg')).size
pixels = image_size[0] * image_size[1] * len(image_paths)
print("Pixels in split: ", pixels)
map_size = pixels * 4 + 1500 * 320 * 240 * 4
print("Estimated Size: ", map_size / (1024 * 1024 * 1024))
isdir = os.path.isdir(lmdb_path)
db = lmdb.open(lmdb_path, subdir=isdir, map_size=map_size, readonly=False, meminit=False, map_async=True, writemap=True)
txn = db.begin(write=True)
key_list = []
for idx, path in tqdm(enumerate(image_paths)):
jpg_path = os.path.join(root_path, 'color', f'{path}.jpg')
png_path = os.path.join(root_path, 'label', f'{path}.png')
image = np.array(Image.open(jpg_path).convert('RGB'), dtype=np.uint8)
label = np.array(Image.open(png_path), dtype=np.uint8)
label -= 1
label[label == -1] = 255
label[label >= 40] = 255
txn.put(u'{}'.format(path).encode('ascii'), pickle.dumps(np.dstack((image, label)), protocol=3))
key_list.append(path)
print('Committing..')
txn.commit()
print('Writing keys..')
keys = [u'{}'.format(k).encode('ascii') for k in key_list]
with db.begin(write=True) as txn:
txn.put(b'__keys__', pickle.dumps(keys, protocol=3))
txn.put(b'__len__', pickle.dumps(len(keys), protocol=3))
print('Syncing..')
db.sync()
db.close()
if __name__ == '__main__':
PATH_TO_SELECTIONS = "ViewAL/dataset/scannet-sample/raw/selections"
PATH_TO_LMDB = "ViewAL/dataset/scannet-sample/dataset.lmdb"
to_lmdb(PATH_TO_SELECTIONS, PATH_TO_LMDB)
|
1618978
|
import importlib
import re
import pkg_resources
from pkg_resources import VersionConflict
from omniduct._version import __optional_dependencies__
from omniduct.utils.debug import logger
def check_dependencies(protocols, message=None):
if protocols is None:
return
dependencies = []
for protocol in protocols:
dependencies.extend(__optional_dependencies__.get(protocol, []))
missing_deps = []
warning_deps = {}
for dep in dependencies:
m = re.match('^[a-z_][a-z0-9]*', dep)
if not m:
logger.warning('Invalid dependency requested: {}'.format(dep))
package_name = m.group(0)
accept_any_version = package_name == dep
try:
pkg_resources.get_distribution(dep)
except VersionConflict:
warning_deps[dep] = "{}=={}".format(package_name, pkg_resources.get_distribution(m.group(0)).version)
except:
# Some packages may be available, but not installed. If so, we
# should accept them with warnings (if version specified in dep).
try:
importlib.import_module(package_name)
if not accept_any_version:
warning_deps.append('{}==<not installed>'.format(package_name))
except: # ImportError in python 2, ModuleNotFoundError in Python 3
missing_deps.append(dep)
if warning_deps:
message = "You may have some outdated packages:\n"
for key in sorted(warning_deps):
message += '\t- Want {}, found {}'.format(key, warning_deps[key])
logger.warning(message)
if missing_deps:
message = message or "Whoops! You do not seem to have all the dependencies required."
fix = ("You can fix this by running:\n\n"
"\t{install_command}\n\n"
"Note: Depending on your system's installation of Python, you may "
"need to use `pip2` or `pip3` instead of `pip`.").format(install_command='pip install --upgrade ' + ' '.join(missing_deps))
raise RuntimeError('\n\n'.join([message, fix]))
|
1618979
|
try:
from browser import timer
except:
class ajax:
pass
from . import futures
class HTTPException(Exception):
"""
A class representing a HTTPRequest error
"""
def __init__(self, request):
super(HTTPException, self).__init__()
self.req = request
class HTTPRequest(futures.Future):
"""
A class representing a Future HTTPRequest result.
"""
METHOD_POST = 'POST'
METHOD_GET = 'GET'
def __init__(self, url, method='GET', data=None, **kwargs):
super(HTTPRequest, self).__init__(**kwargs)
self._url = url
self._req = ajax.ajax()
self._req.bind("complete", self._complete_handler)
self._data = data
self._method = method
self._req.open(self._method, self._url, True)
self._req.set_header('content-type', 'application/x-www-form-urlencoded')
if self._data is None:
self._req.send()
else:
self._req.send(self._data)
def _complete_handler(self, req):
if req.status == 200 or req.status == 0:
self.set_result(req)
else:
self.set_exception(HTTPException(req))
|
1618984
|
import socket
addr=input("Enter IP address: ")
try:
socket.inet_aton(addr)
print ("IP address is valid")
except socket.error:
print ("IP address is NOT valid")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.