id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
153768
|
import dill
import json
import os
def load(path):
"""
Loads a saved model and returns it.
Args:
path: Name of the model or full path to model.
Example::
import backprop
backprop.save(model_object, "my_model")
model = backprop.load("my_model")
"""
# Try to look in cache folder
cache_path = os.path.expanduser(f"~/.cache/backprop/{path}")
cache_model_path = os.path.join(cache_path, "model.bin")
if os.path.exists(cache_model_path):
path = cache_model_path
else:
model_path = os.path.join(path, "model.bin")
if not os.path.isabs(model_path):
model_path = os.path.join(os.getcwd(), model_path)
if not os.path.exists(model_path):
raise ValueError("model not found!")
path = model_path
with open(os.path.join(path), "rb") as f:
model = dill.load(f)
return model
|
153776
|
import os
import re
from weakref import WeakKeyDictionary
from io import StringIO
import trafaret as _trafaret
from yaml import load, dump, ScalarNode
from yaml.scanner import ScannerError
try:
from yaml import CSafeLoader as SafeLoader
except ImportError:
from yaml import SafeLoader
from .error import ConfigError, ErrorLine
VARS_REGEX = re.compile(r'\$(\w+)|\$\{([^}]+)\}')
try:
STR_TYPES = (str, unicode)
UNICODE_TYPE = unicode
except NameError:
STR_TYPES = str
UNICODE_TYPE = str
class ConfigDict(dict):
__slots__ = ('marks', 'extra')
def __init__(self, data, marks, extra):
dict.__init__(self, data)
self.marks = marks
self.extra = extra
class ConfigList(list):
__slots__ = ('marks', 'extra')
def __init__(self, data, marks, extra):
list.__init__(self, data)
self.marks = marks
self.extra = extra
class SubstInfo(object):
def __init__(self, original, vars):
self.original = original
self.vars = vars
def _trafaret_config_hint(self):
return (
[repr(self.original).lstrip('u')] +
[_format_var(k, v) for k, v in self.vars.items()]
)
def _format_var(key, value):
if value is None:
return 'variable {} is undefined'.format(repr(key).lstrip('u'))
else:
value = UNICODE_TYPE(value)
if value.isdecimal():
kind = 'numeric'
elif value.isalnum():
if all(c.isdecimal() or 97 <= ord(c) <= 102
for c in value.lower()):
kind = 'hexadecimal'
elif value.isalpha():
kind = 'letter'
else:
kind = 'alphanumeric'
else:
kind = 'various'
return 'variable {} consists of {} {} characters'.format(
repr(key).lstrip('u'), len(value), kind)
class ConfigLoader(SafeLoader):
def __init__(self, stream, expand_vars, errors):
SafeLoader.__init__(self, stream)
self.__vars = expand_vars
self.__errors = errors
self.__used_vars = set()
def construct_yaml_map(self, node):
data = ConfigDict({}, {}, {})
yield data
data.update(self.construct_mapping(node))
marks = {'__self__': [node.start_mark, node.end_mark]}
for (key, value) in node.value:
if isinstance(key, ScalarNode):
key_str = self.construct_scalar(key)
marks[key_str] = cur_marks = [key.start_mark, value.end_mark]
if self.__vars is not None and isinstance(value, ScalarNode):
val = self.construct_scalar(value)
if isinstance(val, STR_TYPES):
nval, ext = self.__expand_vars(val, cur_marks)
if nval != val:
data[key_str] = nval
data.extra[key_str] = ext
data.marks = marks
def construct_yaml_seq(self, node):
data = ConfigList([], {}, {})
yield data
data.extend(self.construct_sequence(node))
marks = {'__self__': [node.start_mark, node.end_mark]}
for idx, value in enumerate(node.value):
marks[idx] = cur_marks = [value.start_mark, value.end_mark]
if self.__vars is not None and isinstance(value, ScalarNode):
val = data[idx]
if isinstance(val, str):
data[idx], ext = self.__expand_vars(val, cur_marks)
data.extra[idx] = ext
data.marks = marks
def __expand_vars(self, value, marks):
replaced = {}
def replacer(match):
key = match.group(1)
if not key:
key = match.group(2)
replaced[key] = self.__vars.get(key)
self.__used_vars.add(key)
try:
return self.__vars[key]
except KeyError:
self.__errors.append(ErrorLine(
marks, None,
'variable {} not found'.format(repr(key).lstrip('u')),
value))
return match.group(0)
return VARS_REGEX.sub(replacer, value), SubstInfo(value, replaced)
def get_expanded_vars(self):
return set(self.__used_vars)
ConfigLoader.add_constructor(
'tag:yaml.org,2002:map',
ConfigLoader.construct_yaml_map)
ConfigLoader.add_constructor(
'tag:yaml.org,2002:seq',
ConfigLoader.construct_yaml_seq)
def read_and_validate(filename, trafaret, vars=os.environ):
with open(filename) as input:
return _validate_input(input, trafaret, filename=filename, vars=vars)
def read_and_get_vars(filename, trafaret, vars=os.environ):
with open(filename) as input:
errors = []
loader = ConfigLoader(input, vars, errors)
try:
loader.get_single_data()
except ScannerError as e:
raise ConfigError.from_scanner_error(e, filename, errors)
finally:
loader.dispose()
return loader.get_expanded_vars()
def parse_and_validate(string, trafaret,
filename='<config.yaml>', vars=os.environ):
errors = []
input = StringIO(string)
input.name = filename
return _validate_input(input, trafaret, filename=filename, vars=vars)
def _validate_input(input, trafaret, filename, vars):
errors = []
loader = ConfigLoader(input, vars, errors)
try:
data = loader.get_single_data()
except ScannerError as e:
raise ConfigError.from_scanner_error(e, filename, errors)
finally:
loader.dispose()
try:
result = trafaret.check(data)
except _trafaret.DataError as e:
raise ConfigError.from_data_error(e, data, errors)
if errors:
raise ConfigError.from_loader_errors(errors)
return result
|
153815
|
from symsynd.heuristics import get_ip_register
def test_ip_reg():
assert get_ip_register({'pc': '0x42'}, 'arm7') == int('42', 16)
assert get_ip_register({}, 'arm7') == None
assert get_ip_register({}, 'x86') == None
|
153818
|
import cv2
import numpy as np
from PIL import Image
from torch.utils.data import Dataset
# imagenet
imagenet_mean = [0.485, 0.456, 0.406]
imagenet_std = [0.229, 0.224, 0.225]
class CustomDataset(Dataset):
def __init__(self, all_img_path_list, transform, ):
self.all_img_paths = all_img_path_list
self.transform = transform
def __len__(self):
return len(self.all_img_paths)
def __getitem__(self, idx):
img_path = self.all_img_paths[idx]
# solve chinese file name problem
img = cv2.imdecode(np.fromfile(img_path, dtype=np.uint8), cv2.IMREAD_COLOR)
img = Image.fromarray(img)
img = self.transform(img)
return img, img_path
|
153849
|
def test_get_uptimez(client):
response = client.get("/uptimez/")
assert response.status_code == 200
def test_get_healthz(client):
response = client.get("/healthz/")
assert response.status_code == 200
|
153874
|
from typing import Optional
import cv2
from pymba import Frame
# todo add more colours
PIXEL_FORMATS_CONVERSIONS = {
'BayerRG8': cv2.COLOR_BAYER_RG2RGB,
}
def display_frame(frame: Frame, delay: Optional[int] = 1) -> None:
"""
Displays the acquired frame.
:param frame: The frame object to display.
:param delay: Display delay in milliseconds, use 0 for indefinite.
"""
print('frame {}'.format(frame.data.frameID))
# get a copy of the frame data
image = frame.buffer_data_numpy()
# convert colour space if desired
try:
image = cv2.cvtColor(image, PIXEL_FORMATS_CONVERSIONS[frame.pixel_format])
except KeyError:
pass
# display image
cv2.imshow('Image', image)
cv2.waitKey(delay)
|
153891
|
from sonosco.inputs.audio import SonoscoAudioInput
import webrtcvad
import collections
import pyaudio
import sys
import logging
class VadInput(SonoscoAudioInput):
def __init__(self):
super().__init__()
self.FORMAT = pyaudio.paInt16
self.CHANNELS = 1
self.RATE = 16000
self.CHUNK_DURATION_MS = 30 # supports 10, 20 and 30 (ms)
self.PADDING_DURATION_MS = 1000
self.CHUNK_SIZE = int(self.RATE * self.CHUNK_DURATION_MS / 1000)
self.CHUNK_BYTES = self.CHUNK_SIZE * 2
self.NUM_PADDING_CHUNKS = int(self.PADDING_DURATION_MS / self.CHUNK_DURATION_MS)
self.NUM_WINDOW_CHUNKS = int(240 / self.CHUNK_DURATION_MS)
self.vad = webrtcvad.Vad(2)
pa = pyaudio.PyAudio()
self.stream = pa.open(format=self.FORMAT,
channels=self.CHANNELS,
rate=self.RATE,
input=True,
start=False,
# input_device_index=2,
frames_per_buffer=self.CHUNK_SIZE)
logging.basicConfig()
self.logger = logging.getLogger("VadInput")
def request_audio(self, *args, **kwargs):
got_a_sentence = False
ring_buffer = collections.deque(maxlen=self.NUM_PADDING_CHUNKS)
triggered = False
voiced_frames = []
ring_buffer_flags = [0] * self.NUM_WINDOW_CHUNKS
ring_buffer_index = 0
buffer_in = ''
print("* recording")
self.stream.start_stream()
while not got_a_sentence: # and not leave:
chunk = self.stream.read(self.CHUNK_SIZE)
active = self.vad.is_speech(chunk, self.RATE)
sys.stdout.write('1' if active else '0')
ring_buffer_flags[ring_buffer_index] = 1 if active else 0
ring_buffer_index += 1
ring_buffer_index %= self.NUM_WINDOW_CHUNKS
if not triggered:
ring_buffer.append(chunk)
num_voiced = sum(ring_buffer_flags)
if num_voiced > 0.5 * self.NUM_WINDOW_CHUNKS:
sys.stdout.write('+')
triggered = True
voiced_frames.extend(ring_buffer)
ring_buffer.clear()
else:
voiced_frames.append(chunk)
ring_buffer.append(chunk)
num_unvoiced = self.NUM_WINDOW_CHUNKS - sum(ring_buffer_flags)
if num_unvoiced > 0.9 * self.NUM_WINDOW_CHUNKS:
sys.stdout.write('-')
triggered = False
got_a_sentence = True
sys.stdout.flush()
sys.stdout.write('\n')
data = b''.join(voiced_frames)
self.stream.stop_stream()
print("* done recording")
return data
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
try:
self.stream.close()
except Exception as e:
self.logger.error(f"Exception while closing process pool {e}")
|
153900
|
from haystack.query import SearchQuerySet
from search.services.suggest import SuggestBase
class SuggestInvestigator(SuggestBase):
@classmethod
def _query(cls, term):
sqs = SearchQuerySet()
raw_results = sqs.filter(investigator_name=term).order_by('-investigator_complaint_count')[:5]
results = [
cls.entry_format(
suggest_value='{name} ({count})'.format(
name=entry.investigator_name, count=entry.investigator_complaint_count),
tag_value=cls.build_tag_value(
category='allegation__investigator',
value=entry.investigator_id,
display_category='Investigator',
display_value=entry.investigator_name,
)
) for entry in raw_results
]
return {'Investigator': results}
|
153906
|
import behave
@behave.given(u'There are no annotations')
def step_impl(context):
assert True
@behave.when(u'I list all annotations')
def step_impl(context):
context.annotations_list = context.item.annotations.list()
@behave.then(u'I receive a list of all annotations')
def step_impl(context):
assert len(context.annotations_list) == len(context.annotations)
@behave.then(u'The annotations in the list equals the annotations uploaded')
def step_impl(context):
for annotation in context.annotations_list:
ann = {'type': annotation.type,
'label': annotation.label,
'attributes': annotation.attributes,
'coordinates': annotation.coordinates}
# remove 'z' value to match file
for coordinate in ann['coordinates']:
coordinate.pop('z')
assert ann in context.annotations
@behave.then(u'I receive an empty annotations list')
def step_impl(context):
assert len(context.annotations_list) == 0
|
153946
|
import subprocess
import os
os.chdir('./')
ST = 'python '
stand = dict()
conf = dict()
stand = dict()
stand['ds'] = 'cifar10'
stand['bs'] = 128
stand['defense'] = 'adr_pgd'
stand['model'] = 'resnet18'
stand['epsilon'] = 0.031
stand['trades_beta'] = 1.0
stand['lccomw'] = 1.0
stand['lcsmtw'] = 1.0
stand['gbcomw'] = 1.0
stand['gbsmtw'] = 1.0
stand['confw'] = 1.0
conf['mnist'] = stand.copy()
conf['mnist']['ds'] = 'mnist'
conf['mnist']['model'] = 'cnn'
conf['mnist']['epsilon'] = 0.3
conf['cifar10'] = stand.copy()
conf['cifar10']['ds'] = 'cifar10'
conf['cifar10']['epsilon'] = 0.031
skip = ['_', '_', '_', '_']
progs = [
'02a_adversarial_training.py ',
'02e_evaluate_robustness.py ',
]
for k in list(conf.keys()):
if k in skip:
continue
for chST in progs:
exp = conf[k]
sub = ' '.join(['--{}={}'.format(t, exp[t]) for t in exp.keys()])
print(sub)
subprocess.call([ST + chST + sub], shell=True)
|
153954
|
import shelve
import os
import re
from resource_api.interfaces import Resource as BaseResource, Link as BaseLink, AbstractUriPolicy
from resource_api.schema import StringField, DateTimeField, IntegerField
from resource_api.service import Service
from resource_api.errors import ValidationError
RE_SHA1 = re.compile("^[a-f0-9]{40}$")
SHELVE_PATH = "/tmp/school.shelve.db"
class User(object):
def __init__(self, email=None):
if email is None:
self.is_anonymous = True
else:
self.is_anonymous = False
if email == "<EMAIL>":
self.is_admin = True
else:
self.is_admin = False
self.email = email
class ShelveService(Service):
def __init__(self):
super(ShelveService, self).__init__()
self._storage = shelve.open(SHELVE_PATH, writeback=True)
def _get_context(self):
return {"storage": self._storage, "service": self}
def _get_user(self, data):
if data is None:
return User(None)
else:
return User(data.get("email"))
def __del__(self):
self._storage.close()
class Resource(BaseResource):
def __init__(self, context):
super(Resource, self).__init__(context)
self._storage = context["storage"]
def exists(self, user, pk):
return pk in self._storage.get(self.get_name(), {})
def get_data(self, user, pk):
return self._storage.get(self.get_name(), {}).get(pk)
def delete(self, user, pk):
self._storage.get(self.get_name(), {}).pop(pk)
self._storage.sync()
def create(self, user, pk, data):
if self.get_name() not in self._storage:
self._storage[self.get_name()] = {}
self._storage[self.get_name()][pk] = data
self._storage.sync()
def update(self, user, pk, data):
self._storage[self.get_name()][pk].update(data)
self._storage.sync()
def get_uris(self, user, params=None):
return self._storage.get(self.get_name(), {}).keys()
def get_count(self, user, params=None):
return len(self.get_uris(params))
def can_get_data(self, user, pk, data):
""" Only authenticated users can access data """
if user.is_anonymous:
return False
else:
return True
def can_get_uris(self, user):
""" Only authenticated users can access data """
if user.is_anonymous:
return False
else:
return True
class Link(BaseLink):
def __init__(self, context):
super(Link, self).__init__(context)
self._storage = context["storage"]
def exists(self, user, pk, rel_pk):
return rel_pk in self._storage.get((pk, self.get_name()), {})
def get_data(self, user, pk, rel_pk):
return self._storage.get((pk, self.get_name()), {}).get(rel_pk)
def create(self, user, pk, rel_pk, data=None):
key = (pk, self.get_name())
if key not in self._storage:
self._storage[key] = {}
self._storage[key][rel_pk] = data
self._storage.sync()
def update(self, user, pk, rel_pk, data):
self._storage[key][rel_pk].update(data)
self._storage.sync()
def delete(self, user, pk, rel_pk):
self._storage.get((pk, self.get_name()), {}).pop(rel_pk)
self._storage.sync()
def get_uris(self, user, pk, params=None):
return self._storage.get((pk, self.get_name()), {}).keys()
def get_count(self, user, pk, params=None):
return len(self.get_uris(pk, params))
def can_get_data(self, user, pk, rel_pk, data):
""" Only authenticated users can access data """
if user.is_anonymous:
return False
else:
return True
def can_get_uris(self, user, pk):
""" Only authenticated users can access data """
if user.is_anonymous:
return False
else:
return True
class Person(Resource):
class Schema:
email = StringField(regex="[^@]+@[^@]+\.[^@]+", pk=True,
description="Addess to which the notifications shall be sent")
first_name = StringField(description="Given name(s)")
last_name = StringField(description="Family name(s)")
def can_update(self, user, pk):
""" Only a person himself can update his own information """
return user.email == pk or user.is_admin
def can_delete(self, user, pk):
""" Only admins can delete people """
return user.is_admin
class Student(Person):
""" A pupil """
class Schema(Person.Schema):
birthday = DateTimeField()
class Links:
class courses(Link):
""" Courses the student has ever attended """
target = "Course"
related_name = "students"
master = True
class Schema:
grade = IntegerField(min_val=1, max_val=5)
def _is_course_teacher(self, user, pk, rel_pk):
""" Only teachers can add/remove students to/from courses and grade them.
And they can do it with their courses ONLY.
"""
if user.is_admin:
return True
teacher_data = self._storage.get(Teacher.get_name(), {}).get(user.email)
if teacher_data is None:
return False
else:
teacher_courses = self._storage.get((user.email, Teacher.Links.courses.get_name()), {})
return rel_pk in teacher_courses
def can_create(self, user, pk, rel_pk, data=None):
return self._is_course_teacher(user, pk, rel_pk)
can_update = can_create
can_delete = _is_course_teacher
class comments(Link):
""" Comments made by the student """
target = "Comment"
related_name = "student"
class ratings(Link):
""" Ratings given by the student """
target = "TeacherRating"
related_name = "student"
class Teacher(Person):
""" A lecturer """
class Schema(Person.Schema):
category = StringField(description="TQS Category",
choices=["four", "five", "five plus", "six"])
class Links:
class ratings(Link):
""" Ratings given to the teacher """
target = "TeacherRating"
related_name = "teacher"
class courses(Link):
""" Courses the teacher is responsible for """
target = "Course"
related_name = "teacher"
class PersonalLink(Link):
""" Users can link things to their accounts only """
def can_update(self, user, pk, rel_pk, data):
return user.email == rel_pk or user.is_admin
def can_create(self, user, pk, rel_pk, data):
return user.email == rel_pk or user.is_admin
def can_delete(self, user, pk, rel_pk):
return user.email == rel_pk or user.is_admin
class Course(Resource):
""" An educational unit represinting the lessons for a specific set of topics """
class Schema:
name = StringField(pk=True, description="Name of the course. E.g. physics, maths.")
duration = IntegerField(description="Length of the course in weeks")
class Links:
class teacher(PersonalLink):
""" The lecturer of the course """
target = "Teacher"
related_name = "courses"
cardinality = Link.cardinalities.ONE
master = True
required = True
class comments(Link):
""" All comments made about the course """
target = "Comment"
related_name = "course"
class ratings(Link):
""" All ratings that were given to the teachers of the specific course """
target = "TeacherRating"
related_name = "course"
class students(Link):
""" All pupils who attend the course """
target = "Student"
related_name = "courses"
class AutoGenSha1UriPolicy(AbstractUriPolicy):
""" Uses a randomly generated sha1 as a primary key """
@property
def type(self):
return "autogen_policy"
def generate_pk(self, data):
return os.urandom(16).encode('hex')
def serialize(self, pk):
return pk
def deserialize(self, pk):
if not isinstance(pk, basestring):
raise ValidationError("Has to be string")
if not RE_SHA1.match(value):
raise ValidationError("PK is not a valid SHA1")
return pk
class Comment(Resource):
""" Student's comment about the course """
UriPolicy = AutoGenSha1UriPolicy
class Schema:
pk = StringField(pk=True, description="Identifier of the resource")
value = StringField(description="Text of the comment")
creation_time = DateTimeField(description="Time when the comment was added (for sorting purpose)")
class Links:
class student(PersonalLink):
""" The pupil who made the comment """
target = "Student"
related_name = "comments"
cardinality = Link.cardinalities.ONE
master = True
required = True
def can_delete(self, user, pk, rel_pk):
""" It does not make any sense to let anyone touch the comment's student once it is created """
return False
class course(Link):
""" The subject the comment was made about """
target = "Course"
related_name = "comments"
cardinality = Link.cardinalities.ONE
master = True
required = True
def can_delete(self, user, pk, rel_pk):
""" It does not make any sense to let anyone touch the comment's course once it is created """
return False
class TeacherRating(Resource):
""" Student's rating about teacher's performance """
UriPolicy = AutoGenSha1UriPolicy
class Schema:
pk = StringField(pk=True, description="Identifier of the resource")
value = IntegerField(min_val=0, max_val=100, description="Lecturer's performance identifier ")
creation_time = DateTimeField(description="Time when the rating was added (for sorting purpose)")
class Links:
class student(PersonalLink):
""" The pupil who gave the rating to the teacher """
target = "Student"
related_name = "ratings"
cardinality = Link.cardinalities.ONE
master = True
required = True
def can_delete(self, user, pk, rel_pk):
""" It does not make any sense to let anyone touch the rating's student once it is created """
return False
class course(Link):
""" The subject with respect to which the rating was given """
target = "Course"
related_name = "ratings"
cardinality = Link.cardinalities.ONE
master = True
required = True
def can_delete(self, user, pk, rel_pk):
""" It does not make any sense to let anyone touch the ratings's course once it is created """
return False
class teacher(Link):
""" The lecturer to whom the rating is related """
target = "Teacher"
related_name = "ratings"
cardinality = Link.cardinalities.ONE
master = True
required = True
def can_delete(self, user, pk, rel_pk):
""" It does not make any sense to let anyone touch the rating's teacher once it is created """
return False
srv = ShelveService()
srv.register(Student)
srv.register(Teacher)
srv.register(Course)
srv.register(Comment)
srv.register(TeacherRating)
srv.setup()
|
153955
|
import os
import json
from string import Template
from functools import total_ordering
import argparse
from os import path
def generate(sitedir, siteBaseUrl, codeBaseUrl, logoPath):
versions = loadVersions(sitedir)
print(versions.asList())
generateVersions(sitedir, versions)
generateIndex(sitedir, siteBaseUrl, versions.getLastRelease())
with open(logoPath, 'r') as file:
logo = file.read()
generateLatestReleaseResources(sitedir, siteBaseUrl, codeBaseUrl, versions.getLastRelease(), logo)
generateNightlyBuildResources(sitedir, siteBaseUrl, codeBaseUrl, versions.getNightlyBuild(), logo)
generateRedirect(sitedir+'/donate/thanks.html', siteBaseUrl, versions.getLastRelease().name+'/donate-thanks.html')
def loadVersions(sitedir):
versionNames = []
for entry in os.scandir(sitedir):
if entry.is_dir() and entry.name.startswith('v'):
versionNames.append(entry.name)
return Versions(versionNames)
def generateVersions(sitedir, versions):
with open(sitedir+'/versions.json', 'w') as file:
json.dump(versions.asList(), file)
def generateIndex(sitedir, baseUrl, currentVersion):
generateRedirect(sitedir+'/index.html', baseUrl, currentVersion.name)
def generateLatestReleaseResources(sitedir, baseUrl, codeBaseUrl, version, logo):
generateBadge(sitedir, 'latest-release-version', 'latest release', version.name, logo=logo)
generateRedirect(sitedir+'/redirects/latest-release-site.html', baseUrl, version.name)
generateRedirect(sitedir+'/redirects/latest-release-showcase.html', baseUrl, version.name+'/presentation/showcase.html')
generateRedirect(sitedir+'/redirects/latest-release-code.html', codeBaseUrl, version.name)
def generateNightlyBuildResources(sitedir, baseUrl, codeBaseUrl, version, logo):
generateBadge(sitedir, 'nightly-build-version', 'nightly build', version.name, logo=logo)
generateRedirect(sitedir+'/redirects/nightly-build-site.html', baseUrl, version.name)
generateRedirect(sitedir+'/redirects/nightly-build-showcase.html', baseUrl, version.name+'/presentation/showcase.html')
generateRedirect(sitedir+'/redirects/nightly-build-code.html', codeBaseUrl, version.name)
def generateBadge(sitedir, badgeType, label, message, color='lightgrey', logo=None):
jsonContent = {"schemaVersion": 1, "label": label, "message": message, "color": color}
if logo is not None:
if logo.startswith('<?xml') or logo.startswith('<svg'):
jsonContent['logoSvg'] = logo
else:
jsonContent['namedLogo'] = logo
os.makedirs(sitedir+'/badges', exist_ok=True)
with open(sitedir+'/badges/'+badgeType+'.json', 'w') as file:
json.dump(jsonContent, file)
def generateRedirect(htmlFile, baseUrl, target):
tpl = Template('<html><head><meta http-equiv="refresh" content="0; URL=$baseUrl/$target"></head><body></body></html>')
os.makedirs(path.dirname(htmlFile), exist_ok=True)
with open(htmlFile, 'w') as file:
file.write(tpl.substitute(baseUrl=baseUrl, target=target))
@total_ordering
class Version:
def __init__(self, version):
self.name = version
parts = version.split('.')
self.major = parts[0].replace('v', '')
self.minor = parts[1]
self.patch = parts[2].replace('-SNAPSHOT', '')
self.snapshot = parts[2].find('-SNAPSHOT')>=0
def __cmp__(self, other):
if self.major != other.major:
return cmp(self.major, other.major)
if self.minor != other.minor:
return cmp(self.minor, other.minor)
if self.patch != other.patch:
return cmp(self.patch, other.patch)
if self.snapshot != other.snapshot:
return -1 if self.snapshot else 1
return 0
def __lt__(self, other):
return self.__cmp__(other) < 0
def __gt__(self, other):
return self.__cmp__(other) > 0
class Versions:
def __init__(self, versionNames):
self.versions = []
for versionName in versionNames:
self.versions.append(Version(versionName))
self.versions.sort(reverse=True)
def getLast(self):
return self.versions[0]
def getLastRelease(self):
for version in self.versions:
if not version.snapshot:
return version
return None
def getNightlyBuild(self):
for version in self.versions:
if version.snapshot:
return version
return None
def asList(self):
return list(map(lambda v: v.name, self.versions))
def cmp(a, b):
return (a > b) - (a < b)
def parseArgs():
parser = argparse.ArgumentParser(description='Generate versions.json and index.html files for site.')
parser.add_argument('sitedir', help='The directory that contains the site index')
parser.add_argument('--base-url', default='https://groupe-sii.github.io/ogham/', help='The URL of the generated site')
parser.add_argument('--code-base-url', default='https://github.com/groupe-sii/ogham/tree/', help='The URL of the github sources')
parser.add_argument('--logo-svg-path', default='src/docs/resources/images/logo.svg', help='The path to the logo (relative to ogham root directory)')
return parser.parse_args()
if __name__ == '__main__':
args = parseArgs()
generate(args.sitedir, args.base_url, args.code_base_url, os.path.dirname(__file__)+'/../../'+args.logo_svg_path)
|
153976
|
import pytest
from waterbutler.providers.figshare.metadata import (FigshareFileMetadata,
FigshareFolderMetadata,
FigshareFileRevisionMetadata)
from tests.providers.figshare.fixtures import (project_article_type_1_metadata,
project_article_type_3_metadata)
class TestFigshareFileMetadata:
def test_private_file_metadata(self, project_article_type_1_metadata):
base_meta = project_article_type_1_metadata['private']
data = FigshareFileMetadata(base_meta, base_meta['files'][0])
assert data.id == 15562817
assert data.name == 'FigurePrivate01.png'
assert data.article_id == 8305859
assert data.article_name == 'FigurePrivate'
assert data.path == '/8305859/15562817'
assert data.materialized_path == '/FigurePrivate/FigurePrivate01.png'
assert data.upload_path == '/8305859/15562817'
assert data.size == 89281
assert data.size_as_int == 89281
assert type(data.size_as_int) == int
assert data.content_type is None
assert data.modified is None
assert data.modified_utc is None
assert data.created_utc is None
assert data.can_delete is True
assert data.is_public is False
assert data.etag == 'draft:8305859:cae3869aa4b144a3aa5cffe979359836'
assert data.web_view == 'https://figshare.com/account/articles/8305859'
assert data.extra == {
'fileId': 15562817,
'articleId': 8305859,
'status': 'draft',
'downloadUrl': 'https://ndownloader.figshare.com/files/15562817',
'canDelete': True,
'webView': 'https://figshare.com/account/articles/8305859',
'hashingInProgress': False,
'hashes': {
'md5': 'cae3869aa4b144a3aa5cffe979359836'
}
}
assert data.kind == 'file'
assert data.serialized() == {
'extra': {
'fileId': 15562817,
'articleId': 8305859,
'status': 'draft',
'downloadUrl': 'https://ndownloader.figshare.com/files/15562817',
'canDelete': True,
'webView': 'https://figshare.com/account/articles/8305859',
'hashingInProgress': False,
'hashes': {
'md5': 'cae3869aa4b144a3aa5cffe979359836'
}
},
'kind': 'file',
'name': 'FigurePrivate01.png',
'path': '/8305859/15562817',
'provider': 'figshare',
'materialized': '/FigurePrivate/FigurePrivate01.png',
'etag': '0113713d8af08db5fb6a0f6565b115d98d0f6c284b808a58997f1e73bdec397e',
'contentType': None,
'modified': None,
'modified_utc': None,
'created_utc': None,
'size': 89281,
'sizeInt': 89281,
}
api_url = 'http://localhost:7777/v1/resources/cn42d/providers/figshare/8305859/15562817'
assert data.json_api_serialized('cn42d') == {
'id': 'figshare/8305859/15562817',
'type': 'files',
'attributes': {
'extra': {
'fileId': 15562817,
'articleId': 8305859,
'status': 'draft',
'downloadUrl': 'https://ndownloader.figshare.com/files/15562817',
'canDelete': True,
'webView': 'https://figshare.com/account/articles/8305859',
'hashingInProgress': False,
'hashes': {
'md5': 'cae3869aa4b144a3aa5cffe979359836'
}
},
'kind': 'file',
'name': 'FigurePrivate01.png',
'path': '/8305859/15562817',
'provider': 'figshare',
'materialized': '/FigurePrivate/FigurePrivate01.png',
'etag': '0113713d8af08db5fb6a0f6565b115d98d0f6c284b808a58997f1e73bdec397e',
'contentType': None,
'modified': None,
'modified_utc': None,
'created_utc': None,
'size': 89281,
'sizeInt': 89281,
'resource': 'cn42d'
},
'links': {
'move': api_url,
'upload': '{}?kind=file'.format(api_url),
'delete': api_url,
'download': api_url,
}
}
assert data._json_api_links('cn42d') == {
'move': api_url,
'upload': '{}?kind=file'.format(api_url),
'delete': api_url,
'download': api_url,
}
def test_public_file_metadata(self, project_article_type_1_metadata):
item = project_article_type_1_metadata['public']
public_metadata = FigshareFileMetadata(item, item['files'][0])
assert public_metadata.id == 15451592
assert public_metadata.name == 'Figure01.png'
assert public_metadata.article_id == 8263730
assert public_metadata.article_name == 'Figure'
assert public_metadata.is_public is True
assert public_metadata.web_view == 'https://figshare.com/articles/Figure01_png/8263730'
assert public_metadata.extra.get('status') == 'public'
def test_metadata_article_identifier(self, project_article_type_1_metadata):
item = project_article_type_1_metadata['private']
article_metadata = FigshareFileMetadata(item, item['files'][0])
article_metadata.raw['url'] = 'https://api.figshare.com/v2/account/articles/8263730'
assert article_metadata.id == 15562817
assert article_metadata.name == 'FigurePrivate01.png'
assert article_metadata.article_id == 8305859
assert article_metadata.article_name == ''
assert article_metadata.path == '/15562817'
assert article_metadata.materialized_path == '/FigurePrivate01.png'
def test_private_folder_metadata(self, project_article_type_3_metadata):
data = FigshareFolderMetadata(project_article_type_3_metadata['private'])
assert data.id == 8269766
assert data.name == 'DatasetPrivate'
assert data.path == '/8269766/'
assert data.materialized_path == '/DatasetPrivate/'
assert data.size is None
assert data.modified == '2019-06-13T16:00:12Z'
assert data.created_utc is None
assert data.etag == 'draft::::8269766'
assert data.kind == 'folder'
assert data.extra == {
'id': 8269766,
'doi': '',
'status': 'draft'
}
assert data.serialized() == {
'extra': {
'id': 8269766,
'doi': '',
'status': 'draft'
},
'kind': 'folder',
'name': 'DatasetPrivate',
'path': '/8269766/',
'provider': 'figshare',
'materialized': '/DatasetPrivate/',
'etag': 'd7dc67b05e9c50c8adefabc9e2ff2cfaabad26913e8c9916396f067216941389'
}
api_url = 'http://localhost:7777/v1/resources/45hjnz/providers/figshare/8269766/'
assert data.json_api_serialized('45hjnz') == {
'id': 'figshare/8269766/',
'type': 'files',
'attributes': {
'extra': {
'id': 8269766,
'doi': '',
'status': 'draft'
},
'kind': 'folder',
'name': 'DatasetPrivate',
'path': '/8269766/',
'provider': 'figshare',
'materialized': '/DatasetPrivate/',
'etag': 'd7dc67b05e9c50c8adefabc9e2ff2cfaabad26913e8c9916396f067216941389',
'resource': '45hjnz',
'size': None,
'sizeInt': None,
},
'links': {
'move': api_url,
'upload': '{}?kind=file'.format(api_url),
'delete': api_url,
'new_folder': '{}?kind=folder'.format(api_url),
}
}
assert data._json_api_links('45hjnz') == {
'move': api_url,
'upload': '{}?kind=file'.format(api_url),
'delete': api_url,
'new_folder': '{}?kind=folder'.format(api_url),
}
def test_public_folder_metadata(self, project_article_type_3_metadata):
data = FigshareFolderMetadata(project_article_type_3_metadata['public'])
assert data.id == 8263811
assert data.name == 'Dataset'
assert data.extra.get('status') == 'public'
def test_revision_metadata(self):
data = FigshareFileRevisionMetadata()
assert data.modified is None
assert data.modified_utc is None
assert data.version_identifier == 'revision'
assert data.version == 'latest'
assert data.extra == {}
assert data.serialized() == {
'extra': {},
'version': 'latest',
'modified': None,
'modified_utc': None,
'versionIdentifier': 'revision',
}
assert data.json_api_serialized() == {
'id': 'latest',
'type': 'file_versions',
'attributes': {
'extra': {},
'version': 'latest',
'modified': None,
'modified_utc': None,
'versionIdentifier': 'revision',
}
}
|
154026
|
import time
import pytest
from py_ecc import (
bn128,
optimized_bn128,
bls12_381,
optimized_bls12_381,
)
from py_ecc.fields import (
bls12_381_FQ,
bls12_381_FQ2,
bls12_381_FQ12,
bn128_FQ,
bn128_FQ2,
bn128_FQ12,
optimized_bls12_381_FQ,
optimized_bls12_381_FQ2,
optimized_bls12_381_FQ12,
optimized_bn128_FQ,
optimized_bn128_FQ2,
optimized_bn128_FQ12,
)
from py_ecc.fields.field_properties import (
field_properties,
)
@pytest.fixture(params=[bn128, optimized_bn128, bls12_381, optimized_bls12_381])
def lib(request):
return request.param
@pytest.fixture
def FQ(lib):
if lib == bn128:
return bn128_FQ
elif lib == optimized_bn128:
return optimized_bn128_FQ
elif lib == bls12_381:
return bls12_381_FQ
elif lib == optimized_bls12_381:
return optimized_bls12_381_FQ
else:
raise Exception("Library Not Found")
@pytest.fixture
def FQ2(lib):
if lib == bn128:
return bn128_FQ2
elif lib == optimized_bn128:
return optimized_bn128_FQ2
elif lib == bls12_381:
return bls12_381_FQ2
elif lib == optimized_bls12_381:
return optimized_bls12_381_FQ2
else:
raise Exception("Library Not Found")
@pytest.fixture
def FQ12(lib):
if lib == bn128:
return bn128_FQ12
elif lib == optimized_bn128:
return optimized_bn128_FQ12
elif lib == bls12_381:
return bls12_381_FQ12
elif lib == optimized_bls12_381:
return optimized_bls12_381_FQ12
else:
raise Exception("Library Not Found")
@pytest.fixture
def field_modulus(lib):
if lib == bn128 or lib == optimized_bn128:
return field_properties["bn128"]["field_modulus"]
elif lib == bls12_381 or lib == optimized_bls12_381:
return field_properties["bls12_381"]["field_modulus"]
else:
raise Exception("Library Not Found")
@pytest.fixture
def G1(lib):
return lib.G1
@pytest.fixture
def G2(lib):
return lib.G2
@pytest.fixture
def G12(lib):
return lib.G12
@pytest.fixture
def Z1(lib):
return lib.Z1
@pytest.fixture
def Z2(lib):
return lib.Z2
@pytest.fixture
def b(lib):
return lib.b
@pytest.fixture
def b2(lib):
return lib.b2
@pytest.fixture
def b12(lib):
return lib.b12
@pytest.fixture
def is_inf(lib):
return lib.is_inf
@pytest.fixture
def is_on_curve(lib):
return lib.is_on_curve
@pytest.fixture
def eq(lib):
return lib.eq
@pytest.fixture
def add(lib):
return lib.add
@pytest.fixture
def double(lib):
return lib.double
@pytest.fixture
def curve_order(lib):
return lib.curve_order
@pytest.fixture
def multiply(lib):
return lib.multiply
@pytest.fixture
def pairing(lib):
return lib.pairing
@pytest.fixture
def neg(lib):
return lib.neg
@pytest.fixture
def twist(lib):
return lib.twist
def test_FQ_object(FQ, field_modulus):
assert FQ(2) * FQ(2) == FQ(4)
assert FQ(2) / FQ(7) + FQ(9) / FQ(7) == FQ(11) / FQ(7)
assert FQ(2) * FQ(7) + FQ(9) * FQ(7) == FQ(11) * FQ(7)
assert FQ(9) ** field_modulus == FQ(9)
assert FQ(-1).n > 0
def test_FQ2_object(FQ2, field_modulus):
x = FQ2([1, 0])
f = FQ2([1, 2])
fpx = FQ2([2, 2])
one = FQ2.one()
z1, z2 = FQ2([-1, -1]).coeffs
assert x + f == fpx
assert f / f == one
assert one / f + x / f == (one + x) / f
assert one * f + x * f == (one + x) * f
assert x ** (field_modulus ** 2 - 1) == one
if isinstance(z1, int):
assert z1 > 0
assert z2 > 0
else:
assert z1.n > 0
assert z2.n > 0
def test_FQ12_object(FQ12, field_modulus):
x = FQ12([1] + [0] * 11)
f = FQ12([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
fpx = FQ12([2, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
one = FQ12.one()
zs = FQ12([-1]*12).coeffs
assert x + f == fpx
assert f / f == one
assert one / f + x / f == (one + x) / f
assert one * f + x * f == (one + x) * f
if isinstance(zs[0], int):
assert all(z > 0 for z in zs)
else:
assert all(z.n > 0 for z in zs)
# This check takes too long
# assert x ** (field_modulus ** 12 - 1) == one
def test_G1_object(G1, eq, double, add, multiply, curve_order, is_inf):
assert eq(add(add(double(G1), G1), G1), double(double(G1)))
assert not eq(double(G1), G1)
assert eq(add(multiply(G1, 9), multiply(G1, 5)), add(multiply(G1, 12), multiply(G1, 2)))
assert is_inf(multiply(G1, curve_order))
def test_G2_object(G2, b2, eq, add, double, multiply, is_inf, curve_order, field_modulus, is_on_curve):
assert eq(add(add(double(G2), G2), G2), double(double(G2)))
assert not eq(double(G2), G2)
assert eq(add(multiply(G2, 9), multiply(G2, 5)), add(multiply(G2, 12), multiply(G2, 2)))
assert is_inf(multiply(G2, curve_order))
assert not is_inf(multiply(G2, 2 * field_modulus - curve_order))
assert is_on_curve(multiply(G2, 9), b2)
def test_G12_object(G12, b12, eq, add, double, multiply, is_on_curve, is_inf, curve_order):
assert eq(add(add(double(G12), G12), G12), double(double(G12)))
assert not eq(double(G12), G12)
assert eq(add(multiply(G12, 9), multiply(G12, 5)), add(multiply(G12, 12), multiply(G12, 2)))
assert is_on_curve(multiply(G12, 9), b12)
assert is_inf(multiply(G12, curve_order))
def test_Z1_object(add, eq, double, FQ, G1, is_inf, multiply, neg, twist, Z1):
assert eq(G1, add(G1, Z1))
assert eq(Z1, double(Z1))
assert eq(Z1, multiply(Z1, 0))
assert eq(Z1, multiply(Z1, 1))
assert eq(Z1, multiply(Z1, 2))
assert eq(Z1, multiply(Z1, 3))
assert is_inf(neg(Z1))
def test_Z2_object(add, eq, double, FQ2, G2, is_inf, multiply, neg, twist, Z2):
assert eq(G2, add(G2, Z2))
assert eq(Z2, double(Z2))
assert eq(Z2, multiply(Z2, 0))
assert eq(Z2, multiply(Z2, 1))
assert eq(Z2, multiply(Z2, 2))
assert eq(Z2, multiply(Z2, 3))
assert is_inf(neg(Z2))
assert is_inf(twist(Z2))
def test_none_point(lib, neg, twist):
if lib not in [optimized_bn128, optimized_bls12_381]:
pytest.skip()
with pytest.raises(Exception):
neg(None)
with pytest.raises(Exception):
twist(None)
def test_pairing_negative_G1(pairing, G1, G2, FQ12, curve_order, multiply, neg):
p1 = pairing(G2, G1)
pn1 = pairing(G2, neg(G1))
assert p1 * pn1 == FQ12.one()
def test_pairing_negative_G2(pairing, G1, G2, FQ12, curve_order, multiply, neg):
p1 = pairing(G2, G1)
pn1 = pairing(G2, neg(G1))
np1 = pairing(neg(G2), G1)
assert p1 * np1 == FQ12.one()
assert pn1 == np1
def test_pairing_output_order(G1, G2, FQ12, pairing, curve_order):
p1 = pairing(G2, G1)
assert p1 ** curve_order == FQ12.one()
def test_pairing_bilinearity_on_G1(G1, G2, neg, multiply, pairing):
p1 = pairing(G2, G1)
p2 = pairing(G2, multiply(G1, 2))
np1 = pairing(neg(G2), G1)
assert p1 * p1 == p2
def test_pairing_is_non_degenerate(G1, G2, neg, pairing, multiply):
p1 = pairing(G2, G1)
p2 = pairing(G2, multiply(G1, 2))
np1 = pairing(neg(G2), G1)
assert p1 != p2 and p1 != np1 and p2 != np1
def test_pairing_bilinearity_on_G2(G1, G2, pairing, multiply):
p1 = pairing(G2, G1)
po2 = pairing(multiply(G2, 2), G1)
assert p1 * p1 == po2
def test_pairing_composit_check(G1, G2, multiply, pairing):
p3 = pairing(multiply(G2, 27), multiply(G1, 37))
po3 = pairing(G2, multiply(G1, 999))
assert p3 == po3
"""
for lib in (bn128, optimized_bn128):
FQ, FQ2, FQ12, field_modulus = lib.FQ, lib.FQ2, lib.FQ12, lib.field_modulus
assert FQ(2) * FQ(2) == FQ(4)
assert FQ(2) / FQ(7) + FQ(9) / FQ(7) == FQ(11) / FQ(7)
assert FQ(2) * FQ(7) + FQ(9) * FQ(7) == FQ(11) * FQ(7)
assert FQ(9) ** field_modulus == FQ(9)
print('FQ works fine')
x = FQ2([1, 0])
f = FQ2([1, 2])
fpx = FQ2([2, 2])
one = FQ2.one()
assert x + f == fpx
assert f / f == one
assert one / f + x / f == (one + x) / f
assert one * f + x * f == (one + x) * f
assert x ** (field_modulus ** 2 - 1) == one
print('FQ2 works fine')
x = FQ12([1] + [0] * 11)
f = FQ12([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
fpx = FQ12([2, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
one = FQ12.one()
assert x + f == fpx
assert f / f == one
assert one / f + x / f == (one + x) / f
assert one * f + x * f == (one + x) * f
# This check takes too long
# assert x ** (field_modulus ** 12 - 1) == one
print('FQ12 works fine')
G1, G2, G12, b, b2, b12, is_inf, is_on_curve, eq, add, double, curve_order, multiply = \
lib.G1, lib.G2, lib.G12, lib.b, lib.b2, lib.b12, lib.is_inf, lib.is_on_curve, lib.eq, lib.add, lib.double, lib.curve_order, lib.multiply
assert eq(add(add(double(G1), G1), G1), double(double(G1)))
assert not eq(double(G1), G1)
assert eq(add(multiply(G1, 9), multiply(G1, 5)), add(multiply(G1, 12), multiply(G1, 2)))
assert is_inf(multiply(G1, curve_order))
print('G1 works fine')
assert eq(add(add(double(G2), G2), G2), double(double(G2)))
assert not eq(double(G2), G2)
assert eq(add(multiply(G2, 9), multiply(G2, 5)), add(multiply(G2, 12), multiply(G2, 2)))
assert is_inf(multiply(G2, curve_order))
assert not is_inf(multiply(G2, 2 * field_modulus - curve_order))
assert is_on_curve(multiply(G2, 9), b2)
print('G2 works fine')
assert eq(add(add(double(G12), G12), G12), double(double(G12)))
assert not eq(double(G12), G12)
assert eq(add(multiply(G12, 9), multiply(G12, 5)), add(multiply(G12, 12), multiply(G12, 2)))
assert is_on_curve(multiply(G12, 9), b12)
assert is_inf(multiply(G12, curve_order))
print('G12 works fine')
pairing, neg = lib.pairing, lib.neg
print('Starting pairing tests')
a = time.time()
p1 = pairing(G2, G1)
pn1 = pairing(G2, neg(G1))
assert p1 * pn1 == FQ12.one()
print('Pairing check against negative in G1 passed')
np1 = pairing(neg(G2), G1)
assert p1 * np1 == FQ12.one()
assert pn1 == np1
print('Pairing check against negative in G2 passed')
assert p1 ** curve_order == FQ12.one()
print('Pairing output has correct order')
p2 = pairing(G2, multiply(G1, 2))
assert p1 * p1 == p2
print('Pairing bilinearity in G1 passed')
assert p1 != p2 and p1 != np1 and p2 != np1
print('Pairing is non-degenerate')
po2 = pairing(multiply(G2, 2), G1)
assert p1 * p1 == po2
print('Pairing bilinearity in G2 passed')
p3 = pairing(multiply(G2, 27), multiply(G1, 37))
po3 = pairing(G2, multiply(G1, 999))
assert p3 == po3
print('Composite check passed')
print('Total time for pairings: %.3f' % (time.time() - a))
"""
|
154038
|
import iotbx.file_reader
from cctbx.array_family import flex
def run(hklin):
arrays = iotbx.file_reader.any_file(hklin).file_server.miller_arrays
for arr in arrays:
if not arr.anomalous_flag():
continue
print arr.info()
if arr.is_complex_array():
arr = arr.as_amplitude_array() # must be F
ano = arr.anomalous_differences()
ave = arr.average_bijvoet_mates()
ano, ave = ano.common_sets(ave)
print " <d''/mean>=", flex.mean(flex.abs(ano.data()) / ave.data())
print " <d''>/<mean>=", flex.mean(flex.abs(ano.data())) / flex.mean(ave.data())
print
if __name__ == "__main__":
import sys
run(sys.argv[1])
|
154068
|
def fahrenheit_to_celsius(F):
C = 0
# Your code goes here: calculate the temperature in Celsius,
# store in a variable (we called it C), and return it.
return C
|
154090
|
from django.test import SimpleTestCase
from corehq.form_processor.models import XFormInstanceSQL
class FormDocTypesTest(SimpleTestCase):
def test_doc_types(self):
for doc_type in XFormInstanceSQL.DOC_TYPE_TO_STATE:
self.assertIn(doc_type, XFormInstanceSQL.ALL_DOC_TYPES)
def test_deleted(self):
self.assertIn('XFormInstance-Deleted', XFormInstanceSQL.ALL_DOC_TYPES)
|
154103
|
import sys
from Bio import SeqIO
input_file = sys.argv[1]
output_file = "".join(input_file.split(".")[:-1]) + ".rachel.fa"
print input_file
print output_file
fasta_sequences = SeqIO.parse(open(input_file,'r'),'fasta')
with open(output_file, 'w') as out_file:
for fasta in fasta_sequences:
name, description, sequence = fasta.id, fasta.description, str(fasta.seq)
new_sequence = ""
num = ""
for letter in sequence:
if letter.isdigit():
num += letter
elif num!="":
new_sequence += " " + num + " " + letter
num = ""
else:
new_sequence += letter
out_file.write(">%s\t%s\n%s\n" %(name, description, new_sequence))
|
154121
|
from .abstract_conjunction import AbstractConjunction
from .condition_type import ConditionType
class OrConjunction(AbstractConjunction):
def __init__(self, conditions):
super().__init__(type_=ConditionType.OR.value, conditions=conditions)
|
154123
|
import asyncio
import pytest
from aiohttp.test_utils import AioHTTPTestCase, unittest_run_loop
from aiohttp import web
from asynctest import mock as async_mock
from ....core.in_memory import InMemoryProfile
from ....utils.stats import Collector
from ...wire_format import JsonWireFormat
from ..base import OutboundTransportError
from ..http import HttpTransport
class TestHttpTransport(AioHTTPTestCase):
async def setUpAsync(self):
self.profile = InMemoryProfile.test_profile()
self.message_results = []
self.headers = {}
async def receive_message(self, request):
payload = await request.json()
self.headers = request.headers
self.message_results.append(payload)
raise web.HTTPOk()
async def get_application(self):
"""
Override the get_app method to return your application.
"""
app = web.Application()
app.add_routes([web.post("/", self.receive_message)])
return app
@unittest_run_loop
async def test_handle_message_no_api_key(self):
server_addr = f"http://localhost:{self.server.port}"
async def send_message(transport, payload, endpoint):
async with transport:
await transport.handle_message(self.profile, payload, endpoint)
transport = HttpTransport()
await asyncio.wait_for(send_message(transport, "{}", endpoint=server_addr), 5.0)
assert self.message_results == [{}]
assert self.headers.get("x-api-key") is None
assert self.headers.get("content-type") == "application/json"
@unittest_run_loop
async def test_handle_message_api_key(self):
server_addr = f"http://localhost:{self.server.port}"
api_key = "test1234"
async def send_message(transport, payload, endpoint, api_key):
async with transport:
await transport.handle_message(
self.profile, payload, endpoint, api_key=api_key
)
transport = HttpTransport()
await asyncio.wait_for(
send_message(transport, "{}", endpoint=server_addr, api_key=api_key), 5.0
)
assert self.message_results == [{}]
assert self.headers.get("x-api-key") == api_key
@unittest_run_loop
async def test_handle_message_packed_compat_mime_type(self):
server_addr = f"http://localhost:{self.server.port}"
async def send_message(transport, payload, endpoint):
async with transport:
await transport.handle_message(self.profile, payload, endpoint)
transport = HttpTransport()
await asyncio.wait_for(
send_message(transport, b"{}", endpoint=server_addr), 5.0
)
assert self.message_results == [{}]
assert self.headers.get("content-type") == "application/ssi-agent-wire"
@unittest_run_loop
async def test_handle_message_packed_standard_mime_type(self):
server_addr = f"http://localhost:{self.server.port}"
async def send_message(transport, payload, endpoint):
async with transport:
await transport.handle_message(self.profile, payload, endpoint)
transport = HttpTransport()
self.profile.settings["emit_new_didcomm_mime_type"] = True
await asyncio.wait_for(
send_message(transport, b"{}", endpoint=server_addr), 5.0
)
assert self.message_results == [{}]
assert self.headers.get("content-type") == "application/didcomm-envelope-enc"
@unittest_run_loop
async def test_stats(self):
server_addr = f"http://localhost:{self.server.port}"
async def send_message(transport, payload, endpoint):
async with transport:
await transport.handle_message(self.profile, payload, endpoint)
transport = HttpTransport()
transport.collector = Collector()
await asyncio.wait_for(
send_message(transport, b"{}", endpoint=server_addr), 5.0
)
results = transport.collector.extract()
assert results["count"] == {
"outbound-http:dns_resolve": 1,
"outbound-http:connect": 1,
"outbound-http:POST": 1,
}
@unittest_run_loop
async def test_transport_coverage(self):
transport = HttpTransport()
assert transport.wire_format is None
transport.wire_format = JsonWireFormat()
assert transport.wire_format is not None
await transport.start()
with pytest.raises(OutboundTransportError):
await transport.handle_message(None, None, None)
with async_mock.patch.object(
transport, "client_session", async_mock.MagicMock()
) as mock_session:
mock_response = async_mock.MagicMock(status=404)
mock_session.post = async_mock.MagicMock(
return_value=async_mock.MagicMock(
__aenter__=async_mock.CoroutineMock(return_value=mock_response)
)
)
with pytest.raises(OutboundTransportError):
await transport.handle_message(None, "dummy", "http://localhost")
await transport.__aexit__(KeyError, KeyError("just a drill"), None)
|
154127
|
import unittest
from unittest import mock
from django.contrib.auth.models import User, Group
from tethys_compute.job_manager import JobManager, JOB_TYPES
from tethys_compute.models.tethys_job import TethysJob
from tethys_compute.models.condor.condor_scheduler import CondorScheduler
from tethys_apps.models import TethysApp
class TestJobManager(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.app_model = TethysApp(
name='test_app_job_manager',
package='test_app_job_manager'
)
cls.app_model.save()
cls.user_model = User.objects.create_user(
username='test_user_job_manager',
email='<EMAIL>',
password='<PASSWORD>'
)
cls.group_model = Group.objects.create(
name='test_group_job_manager'
)
cls.group_model.user_set.add(cls.user_model)
cls.scheduler = CondorScheduler(
name='test_scheduler',
host='localhost',
)
cls.scheduler.save()
cls.tethysjob = TethysJob(
name='test_tethysjob',
description='test_description',
user=cls.user_model,
label='test_app_job_manager',
)
cls.tethysjob.save()
cls.tethysjob.groups.add(cls.group_model)
@classmethod
def tearDownClass(cls):
cls.tethysjob.delete()
cls.scheduler.delete()
cls.group_model.delete()
cls.user_model.delete()
cls.app_model.delete()
def setUp(self):
pass
def tearDown(self):
pass
def test_JobManager_init(self):
mock_app = mock.MagicMock()
mock_app.package = 'test_label'
ret = JobManager(mock_app)
# Check Result
self.assertEqual(mock_app, ret.app)
self.assertEqual('test_label', ret.label)
def test_JobManager_create_job_custom_class(self):
self.app_model.get_user_workspace = mock.MagicMock()
self.app_model.get_user_workspace().path = 'test_user_workspace'
# Execute
ret_jm = JobManager(self.app_model)
ret_job = ret_jm.create_job(
name='test_create_tethys_job',
user=self.user_model,
job_type=TethysJob,
groups=self.group_model,
)
self.assertEqual(ret_job.name, 'test_create_tethys_job')
self.assertEqual(ret_job.user, self.user_model)
self.assertEqual(ret_job.label, 'test_app_job_manager')
self.assertIn(self.group_model, ret_job.groups.all())
ret_job.delete()
@mock.patch('tethys_compute.job_manager.CondorJob')
def test_JobManager_create_job_string(self, mock_cj):
mock_app = mock.MagicMock()
mock_app.package = 'test_label'
mock_app.get_app_workspace.return_value = 'test_app_workspace'
mock_user_workspace = mock.MagicMock()
mock_app.get_user_workspace.return_value = mock_user_workspace
mock_app.get_user_workspace().path = 'test_user_workspace'
# Execute
ret_jm = JobManager(mock_app)
with mock.patch.dict(JOB_TYPES, {'CONDOR': mock_cj}):
ret_jm.create_job(name='test_name', user='test_user', job_type='CONDOR')
mock_cj.assert_called_with(label='test_label', name='test_name', user='test_user',
workspace='test_user_workspace')
def test_JobManager_list_job_with_user(self):
mgr = JobManager(self.app_model)
ret = mgr.list_jobs(user=self.user_model)
self.assertEqual(ret[0], self.tethysjob)
def test_JobManager_list_job_with_groups(self):
mgr = JobManager(self.app_model)
ret = mgr.list_jobs(groups=[self.group_model])
self.assertEqual(ret[0], self.tethysjob)
def test_JobManager_list_job_value_error(self):
mgr = JobManager(self.app_model)
self.assertRaises(ValueError, mgr.list_jobs, user=self.user_model, groups=[self.group_model])
@mock.patch('tethys_compute.job_manager.TethysJob')
def test_JobManager_get_job(self, mock_tethys_job):
mock_args = mock.MagicMock()
mock_app_package = mock.MagicMock()
mock_args.package = mock_app_package
mock_jobs = mock.MagicMock()
mock_tethys_job.objects.get_subclass.return_value = mock_jobs
mock_job_id = 'fooid'
mock_user = 'bar'
mgr = JobManager(mock_args)
ret = mgr.get_job(job_id=mock_job_id, user=mock_user)
self.assertEqual(ret, mock_jobs)
mock_tethys_job.objects.get_subclass.assert_called_once_with(id='fooid', label=mock_app_package, user='bar')
@mock.patch('tethys_compute.job_manager.TethysJob')
def test_JobManager_get_job_dne(self, mock_tethys_job):
mock_args = mock.MagicMock()
mock_app_package = mock.MagicMock()
mock_args.package = mock_app_package
mock_tethys_job.DoesNotExist = TethysJob.DoesNotExist # Restore original exception
mock_tethys_job.objects.get_subclass.side_effect = TethysJob.DoesNotExist
mock_job_id = 'fooid'
mock_user = 'bar'
mgr = JobManager(mock_args)
ret = mgr.get_job(job_id=mock_job_id, user=mock_user)
self.assertEqual(ret, None)
mock_tethys_job.objects.get_subclass.assert_called_once_with(id='fooid', label=mock_app_package, user='bar')
def test_JobManager_get_job_status_callback_url(self):
mock_args = mock.MagicMock()
mock_request = mock.MagicMock()
mock_job_id = 'foo'
mgr = JobManager(mock_args)
mgr.get_job_status_callback_url(mock_request, mock_job_id)
mock_request.build_absolute_uri.assert_called_once_with(u'/update-job-status/foo/')
|
154182
|
import unittest
import sys
from pathlib import Path
TEST_DIR = str(Path(__file__).parent.resolve())
BASE_DIR = str(Path(__file__).parent.parent.resolve())
sys.path.append(BASE_DIR)
# Run tests without using GPU
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
from core.sensible_span_extractor import SensibleSpanExtractor, SubsequenceExtractor
class TestSensibleSpanExtractor(unittest.TestCase):
def setUp(self):
self.extractor = SensibleSpanExtractor()
def test_generates_span_1(self):
sent = 'The present invention relates to an epitaxial group-III-nitride buffer layer structure on a heterosubstrate.'
span = 'group-III-nitride buffer layer structure'
self.assertSpan(sent, span)
def test_generates_span_2(self):
sent = 'Another object of the present invention is to provide a fire fighting drone.'
span = 'a fire fighting drone.'
self.assertSpan(sent, span)
def assertSpan(self, sent, span):
self.assertEqual(span, self.extractor.extract_from(sent))
class TestSubsequenceExtractor(unittest.TestCase):
def setUp(self):
self.extractor = SubsequenceExtractor([0, 1, 2, 3])
def test_subsequences_of_zero_length(self):
subseqs = self.extractor.extract(0)
self.assertEqual(0, len(subseqs))
def test_subsequences_of_unit_length(self):
subseqs = self.extractor.extract(1)
self.assertEqual(4, len(subseqs))
def test_subsequences_between_lengths(self):
subseqs = self.extractor.extract(2, 3)
self.assertEqual(5, len(subseqs))
if __name__ == '__main__':
unittest.main()
|
154246
|
import shutil
import tempfile
from unittest import TestCase, mock
import pytest
from lineflow import download
from lineflow.datasets.squad import Squad, get_squad
class SquadTestCase(TestCase):
@classmethod
def setUpClass(cls):
cls.default_cache_root = download.get_cache_root()
cls.temp_dir = tempfile.mkdtemp()
download.set_cache_root(cls.temp_dir)
@classmethod
def tearDownClass(cls):
download.set_cache_root(cls.default_cache_root)
shutil.rmtree(cls.temp_dir)
@pytest.mark.slow
def test_get_squad_v1(self):
raw = get_squad(version=1)
self.assertIn('train', raw)
self.assertEqual(len(raw['train']), 87_599)
self.assertIn('dev', raw)
self.assertEqual(len(raw['dev']), 10_570)
@pytest.mark.slow
def test_get_squad_v1_twice(self):
get_squad(version=1)
with mock.patch('lineflow.datasets.squad.pickle', autospec=True) as mock_pickle:
get_squad(version=1)
mock_pickle.dump.assert_not_called()
self.assertEqual(mock_pickle.load.call_count, 1)
@pytest.mark.slow
def test_get_squad_v2_twice(self):
get_squad(version=2)
with mock.patch('lineflow.datasets.squad.pickle', autospec=True) as mock_pickle:
get_squad(version=2)
mock_pickle.dump.assert_not_called()
self.assertEqual(mock_pickle.load.call_count, 1)
@pytest.mark.slow
def test_get_squad_v2(self):
raw = get_squad(version=2)
self.assertIn('train', raw)
self.assertEqual(len(raw['train']), 130_319)
self.assertIn('dev', raw)
self.assertEqual(len(raw['dev']), 11_873)
@pytest.mark.slow
def test_loads_v1_each_split(self):
train = Squad(split='train', version=1)
self.assertEqual(len(train), 87_599)
dev = Squad(split='dev', version=1)
self.assertEqual(len(dev), 10_570)
@pytest.mark.slow
def test_loads_v2_each_split(self):
train = Squad(split='train', version=2)
self.assertEqual(len(train), 130_319)
dev = Squad(split='dev', version=2)
self.assertEqual(len(dev), 11_873)
def test_raises_value_error_with_invalid_split(self):
with self.assertRaises(ValueError):
Squad(split='invalid_split')
def test_raises_value_error_with_invalid_version(self):
with self.assertRaises(ValueError):
Squad(version=3)
|
154292
|
from __future__ import annotations
from typing import Dict
from whyqd.base import BaseCategoryAction
class Action(BaseCategoryAction):
"""`CATEGORISE` support function which must be run *before* it to derive unique category terms from unique
values in a source data column.
`ASSIGN` subset of unique values (or all booleans) to a specified `CategoryModel` in a destination `ColumnModel`.
Scripts must be 'flat' and are of the form::
"ASSIGN_CATEGORY_UNIQUES > 'destination_field'::'destination_category' < 'source_column'::['unique_source_term', 'unique_source_term', etc.]"
Where:
* `destination_field` is a `FieldModel` and is the destination column. The `::` linked `CategoryModel` defines what
term the source values are to be assigned.
* `list` of `CategoryModel` - unique values from `ColumnModel` - will be assigned `::CategoryModel`.
"""
def __init__(self) -> None:
super().__init__()
self.name = "ASSIGN_CATEGORY_UNIQUES"
self.title = "Assign category uniques"
self.description = (
"Assign unique values in a source data column as categorical unique terms defined in the Schema."
)
self.structure = "unique"
def parse(self, script: str) -> Dict[str, str]:
"""Validates term requirements for this category action script.
Script is of the form::
"ACTION > 'destination_column'::term < 'source_column'::[term]"
Which is inherited as::
{
"action": ACTION,
"destination": 'destination_column',
"category": term,
"source": 'source_column',
"source_category": [term]
}
Parameters
----------
script: str
An action script.
Raises
------
ValueError for any parsing errors.
Returns
-------
dict
Parsed dictionary of validated split strings for further processing.
"""
parsed = super().parse(script)
# Class-based term validation
if parsed["action"] != "ASSIGN_CATEGORY_UNIQUES":
raise ValueError(f"Action not valid for this 'ASSIGN_CATEGORY_UNIQUES' parser ({parsed['action']}).")
if not parsed.get("source_category"):
raise ValueError(
"'ASSIGN_CATEGORY_UNIQUES' category assignment requires unique source category references."
)
return parsed
|
154345
|
from .issuer_credential_revocation_updater import IssuerCredentialRevocationUpdater
from .issuer_credential_status_updater import IssuerCredentialStatusUpdater
def subscribe_issuer_protocol_listeners():
IssuerCredentialStatusUpdater()
IssuerCredentialRevocationUpdater()
|
154353
|
import time
from pyscf import scf
import os, time
import numpy as np
from mldftdat.lowmem_analyzers import RHFAnalyzer, UHFAnalyzer
from mldftdat.workflow_utils import get_save_dir, SAVE_ROOT, load_mol_ids
from mldftdat.density import get_exchange_descriptors2, LDA_FACTOR, GG_AMIN
from mldftdat.data import get_unique_coord_indexes_spherical
import logging
import yaml
from argparse import ArgumentParser
"""
Script to compile a dataset from the CIDER DB for training a CIDER functional.
"""
def compile_dataset2(DATASET_NAME, MOL_IDS, SAVE_ROOT, CALC_TYPE, FUNCTIONAL, BASIS,
spherical_atom=False, locx=False, lam=0.5,
version='a', **gg_kwargs):
all_descriptor_data = None
all_rho_data = None
all_values = []
all_weights = []
cutoffs = []
if locx:
raise ValueError('locx setting not supported in this version! (but might be later)')
Analyzer = loc_analyzers.UHFAnalyzer if 'U' in CALC_TYPE \
else loc_analyzers.RHFAnalyzer
else:
Analyzer = UHFAnalyzer if 'U' in CALC_TYPE else RHFAnalyzer
for MOL_ID in MOL_IDS:
logging.info('Computing descriptors for {}'.format(MOL_ID))
data_dir = get_save_dir(SAVE_ROOT, CALC_TYPE, BASIS, MOL_ID, FUNCTIONAL)
start = time.monotonic()
analyzer = Analyzer.load(data_dir + '/data.hdf5')
analyzer.get_ao_rho_data()
if type(analyzer.calc) == scf.hf.RHF:
restricted = True
else:
restricted = False
end = time.monotonic()
logging.info('Analyzer load time {}'.format(end - start))
if spherical_atom:
start = time.monotonic()
indexes = get_unique_coord_indexes_spherical(analyzer.grid.coords)
end = time.monotonic()
logging.info('Index scanning time {}'.format(end - start))
start = time.monotonic()
if restricted:
descriptor_data = get_exchange_descriptors2(
analyzer, restricted=True, version=version,
**gg_kwargs
)
else:
descriptor_data_u, descriptor_data_d = \
get_exchange_descriptors2(
analyzer, restricted=False, version=version,
**gg_kwargs
)
descriptor_data = np.append(descriptor_data_u, descriptor_data_d,
axis = 1)
end = time.monotonic()
logging.info('Get descriptor time {}'.format(end - start))
if locx:
logging.info('Getting loc fx with lambda={}'.format(lam))
values = analyzer.get_loc_fx_energy_density(lam = lam, overwrite=True)
if not restricted:
values = 2 * np.append(analyzer.loc_fx_energy_density_u,
analyzer.loc_fx_energy_density_d)
else:
values = analyzer.get_fx_energy_density()
if not restricted:
values = 2 * np.append(analyzer.fx_energy_density_u,
analyzer.fx_energy_density_d)
rho_data = analyzer.rho_data
if not restricted:
rho_data = 2 * np.append(rho_data[0], rho_data[1], axis=1)
if spherical_atom:
values = values[indexes]
descriptor_data = descriptor_data[:,indexes]
rho_data = rho_data[:,indexes]
weights = analyzer.grid.weights[indexes]
else:
weights = analyzer.grid.weights
if all_descriptor_data is None:
all_descriptor_data = descriptor_data
else:
all_descriptor_data = np.append(all_descriptor_data, descriptor_data,
axis = 1)
if all_rho_data is None:
all_rho_data = rho_data
else:
all_rho_data = np.append(all_rho_data, rho_data, axis=1)
all_values = np.append(all_values, values)
all_weights = np.append(all_weights, weights)
if not restricted:
# two copies for unrestricted case
all_weights = np.append(all_weights, weights)
cutoffs.append(all_values.shape[0])
DATASET_NAME = os.path.basename(DATASET_NAME)
save_dir = os.path.join(SAVE_ROOT, 'DATASETS',
FUNCTIONAL, BASIS, version, DATASET_NAME)
if not os.path.isdir(save_dir):
os.makedirs(save_dir, exist_ok=True)
rho_file = os.path.join(save_dir, 'rho.npy')
desc_file = os.path.join(save_dir, 'desc.npy')
val_file = os.path.join(save_dir, 'val.npy')
wt_file = os.path.join(save_dir, 'wt.npy')
cut_file = os.path.join(save_dir, 'cut.npy')
np.save(rho_file, all_rho_data)
np.save(desc_file, all_descriptor_data)
np.save(val_file, all_values)
np.save(wt_file, all_weights)
np.save(cut_file, np.array(cutoffs))
settings = {
'DATASET_NAME': DATASET_NAME,
'MOL_IDS': MOL_IDS,
'SAVE_ROOT': SAVE_ROOT,
'CALC_TYPE': CALC_TYPE,
'FUNCTIONAL': FUNCTIONAL,
'BASIS': BASIS,
'spherical_atom': spherical_atom,
'locx': locx,
'lam': lam,
'version': version
}
settings.update(gg_kwargs)
with open(os.path.join(save_dir, 'settings.yaml'), 'w') as f:
yaml.dump(settings, f)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
m_desc = 'Compile datset of exchange descriptors'
parser = ArgumentParser(description=m_desc)
parser.add_argument('mol_id_file', type=str,
help='yaml file from which to read mol_ids to parse')
parser.add_argument('basis', metavar='basis', type=str,
help='basis set code')
parser.add_argument('--functional', metavar='functional', type=str, default=None,
help='exchange-correlation functional, HF for Hartree-Fock')
parser.add_argument('--spherical-atom', action='store_true',
default=False, help='whether dataset contains spherical atoms')
parser.add_argument('--locx', action='store_true',
default=False, help='whether to use transformed exchange hole')
parser.add_argument('--lam', default=0.5, type=float,
help='lambda factor for exchange hole, only used if locx=True')
parser.add_argument('--version', default='c', type=str,
help='version of descriptor set. Default c')
parser.add_argument('--gg-a0', default=8.0, type=float)
parser.add_argument('--gg-facmul', default=1.0, type=float)
parser.add_argument('--gg-amin', default=GG_AMIN, type=float)
parser.add_argument('--suffix', default=None, type=str,
help='customize data directories with this suffix')
args = parser.parse_args()
version = args.version.lower()
assert version in ['a', 'b', 'c']
calc_type, mol_ids = load_mol_ids(args.mol_id_file)
assert ('HF' in calc_type) or (args.functional is not None),\
'Must specify functional if not using HF reference.'
if args.mol_id_file.endswith('.yaml'):
mol_id_code = args.mol_id_file[:-5]
else:
mol_id_code = args.mol_id_file
dataname = 'XTR{}_{}'.format(version.upper(), mol_id_code.upper())
if args.spherical_atom:
pass#dataname = 'SPH_' + dataname
if args.locx:
dataname = 'LOCX_' + dataname
if args.suffix is not None:
dataname = dataname + '_' + args.suffix
# TODO remove this if locx supported in the future
args.locx = False
if version == 'c':
compile_dataset2(
dataname, mol_ids, SAVE_ROOT, calc_type, args.functional, args.basis,
spherical_atom=args.spherical_atom, locx=args.locx, lam=args.lam,
version=version, a0=args.gg_a0, fac_mul=args.gg_facmul,
amin=args.gg_amin
)
else:
compile_dataset2(
dataname, mol_ids, SAVE_ROOT, calc_type, args.functional, args.basis,
spherical_atom=args.spherical_atom, locx=args.locx, lam=args.lam,
version=version, a0=args.gg_a0, fac_mul=args.gg_facmul,
amin=args.gg_amin
)
|
154355
|
import os
import numpy as np
import time
import subprocess
import sys
setups = ['spec', 'spec', 'spec']
GPU = 0
script = 'train.py'
if __name__ == '__main__':
start = time.time()
for stp in setups:
str_exec = 'CUDA_VISIBLE_DEVICES=' + str(GPU) + ' python ' + str(script) + ' ' + str(stp)
#str_exec = 'CUDA_VISIBLE_DEVICES=' + str(GPU) + ' python3 ' + str(script) + ' ' + str(stp)
print(str_exec)
try:
retcode = subprocess.call(str_exec, shell=True)
if retcode < 0:
print("Terminated by signal", retcode)
else:
print("Returned", retcode)
except OSError as e:
print("Execution failed:", e)
end = time.time()
print('\nDone! It took: %7.2f hours' % ((end - start) / 3600.0))
|
154393
|
from common import *
from logcatcolor.column import *
from logcatcolor.config import *
from logcatcolor.layout import *
from logcatcolor.profile import *
from logcatcolor.reader import *
import unittest
class ProfileTest(unittest.TestCase):
def setUp(self):
pass
def test_package_name_filter(self):
profile = Profile(name = 'package_filt', packages = ['com.example.test'])
self.assertFalse(profile.include({'message' : 'Start proc com.example.test for activity tw.com.xxxx.android.yyyy/.333Activity: pid=123456 uid=10105 gids={3003}'}))
self.assertTrue(profile.include({'pid' : '123456', 'message' : 'foo bar'}))
def test_package_name_filter_android_51(self):
profile = Profile(name = 'package_filt', packages = ['com.example.test'])
self.assertFalse(profile.include({'message' : 'Start proc 26360:com.example.test/u0a208 for activity tw.com.xxxx.android.yyyy/com.example.test.ui.MainActivity'}))
self.assertTrue(profile.include({'pid' : '26360', 'message' : 'foo bar'}))
def test_empty_package_will_still_work(self):
profile = Profile(name = 'package_filt')
self.assertTrue(profile.include({'message' : 'Start proc com.example.test for activity tw.com.xxxx.android.yyyy/.333Activity: pid=123456 uid=10105 gids={3003}'}))
|
154421
|
from pathlib import Path
from typing import Tuple
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
import torchaudio
from constants import INPUT_SAMPLE_RATE, TARGET_SAMPLE_RATE
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
class SegmentationDataset(Dataset):
"""Base class for FixedSegmentationDataset and RandomSegmentationDataset"""
def __init__(
self,
path_to_dataset: str,
split_name: str,
) -> None:
"""
Args:
path_to_dataset (str): absolute path to the directory
of _talks.tsv and _segments.tsv for the dataset
split_name (str): name of the dataset split
"""
super().__init__()
self.path_to_dataset = Path(path_to_dataset)
self.split_name = split_name
self.input_sr = INPUT_SAMPLE_RATE
self.target_sr = TARGET_SAMPLE_RATE
self.in_trg_ratio = self.input_sr / self.target_sr
self.trg_in_ratio = 1 / self.in_trg_ratio
# load the talks and the actual segments
self.talks_df = pd.read_csv(
self.path_to_dataset / f"{self.split_name}_talks.tsv", sep="\t", index_col=0
)
self.segments_df = pd.read_csv(
self.path_to_dataset / f"{self.split_name}_segments.tsv",
sep="\t",
index_col=0,
)
self.columns = ["talk_id", "start", "end", "duration", "included"]
# to calculate percentage of positive examples
self.n_pos, self.n_all = 0, 0
def _secs_to_outframes(self, x):
# from seconds to output space
return np.round(x * self.target_sr).astype(int)
def _outframes_to_inframes(self, x):
# from output space to input space
return np.round(x * self.in_trg_ratio).astype(int)
def _inframes_to_outframes(self, x):
# from input space to output space
return np.round(x * self.trg_in_ratio).astype(int)
def _secs_to_inframes(self, x):
# from seconds to input space
return np.round(x * self.input_sr).astype(int)
def _get_targets_for_talk(self, sgm_df: pd.DataFrame, talk_id: str) -> pd.DataFrame:
"""
Given a segmentation of a talk (sgm_df), find for
each random segment the true_starts and true_ends that it includes.
They are in string form separated by commas.
Ff they are none, an empty string is passed.
Args:
sgm_df (pd.DataFrame): a random segmentation of a wav
talk_id (str): unique id for the wav
Returns:
pd.DataFrame: sgm_df but with the 'included' column completed
"""
true_sgm_df = self.segments_df.loc[self.segments_df.talk_id == talk_id]
talk_targets = np.zeros(
self.talks_df.loc[self.talks_df.id == talk_id, "total_frames"].values[0]
)
for idx, sgm in true_sgm_df.iterrows():
talk_targets[sgm.start : sgm.end] = 1
for idx, sgm in sgm_df.iterrows():
sgm_targets = self._get_targets_for_segment(
talk_targets[sgm.start : sgm.end]
)
sgm_df.loc[idx, "included"] = (
",".join([f"{s}:{e}" for s, e in sgm_targets]) if sgm_targets else "NA"
)
return sgm_df
def _get_targets_for_segment(self, true_points: np.array) -> list[list[int]]:
"""
Extracts the start and end points of segments in the output space
from a binary vector defining the labels in the input space
Args:
true_points (np.array):
binary label for each frame in the input space of a random segment
Returns:
list[list[int]]: list of tuples (start, end) in the output space
where each tuple defines the start and end of a the true included points
"""
points_of_change = list(np.where(true_points[1:] != true_points[:-1])[0] + 1)
targets = []
for s, e in zip([0] + points_of_change, points_of_change + [len(true_points)]):
if true_points[s] == 1:
s = self._inframes_to_outframes(s)
e = self._inframes_to_outframes(e)
# increase start of next segment if overlaps with end of the prev one
if targets and s <= targets[-1][-1]:
s += 1
targets.append([s, e])
self.n_pos += e - s
self.n_all += self._inframes_to_outframes(len(true_points))
return targets
def _construct_target(self, segment: pd.Series) -> torch.FloatTensor:
"""
Given a random segment, constructs its one-hot target tensor in the output space
"""
target_len = self._inframes_to_outframes(segment.duration)
target = torch.zeros(target_len, dtype=torch.float)
if segment.included != "NA":
for s_e in segment.included.split(","):
s, e = s_e.split(":")
s = int(s)
e = min(int(e), target_len + 1)
target[s:e] = 1
return target
class FixedSegmentationDataset(SegmentationDataset):
def __init__(
self,
path_to_dataset: str,
split_name: str,
segment_length_secs: int = 20,
inference_times: int = 1,
) -> None:
"""
Segmentation dataset to be used during inference
Creates a pool of examples from a fixed-length segmentation of a wav
Args:
path_to_dataset (str): absolute path to the directory
of _talks.tsv and _segments.tsv for the dataset
split_name (str): name of the dataset split
segment_length_secs (int, optional):
The length of the fixed segments in seconds. Defaults to 20.
inference_times (int, optional):
How many times to perform inference on different fixed-length segmentations.
Defaults to 1.
"""
super().__init__(path_to_dataset, split_name)
self.segment_length_inframes = self._secs_to_inframes(segment_length_secs)
self.inference_times = inference_times
def generate_fixed_segments(self, talk_id: str, i: int) -> None:
"""
Generates a fixed-length segmentation of a wav
with "i" controlling the begining of the segmentation
so that different values of "i" produce different segmentations
Args:
talk_id (str): unique wav identifier
i (int): indicates the current inference time
and is used to produce a different fixed-length segmentation
minimum allowed is 0 and maximum allowed is inference_times - 1
"""
talk_info = self.talks_df.loc[self.talks_df["id"] == talk_id]
self.talk_path = talk_info["path"].values[0]
self.duration_outframes = self._inframes_to_outframes(
self.talks_df.loc[self.talks_df["id"] == talk_id, "total_frames"].values[0]
)
self.duration_inframes = int(talk_info["total_frames"])
self.fixed_segments_df = pd.DataFrame(columns=self.columns)
start = round(self.segment_length_inframes / self.inference_times * i)
if start > self.duration_inframes:
start = 0
segmentation = np.arange(
start, self.duration_inframes, self.segment_length_inframes
).astype(int)
if segmentation[0] != 0:
segmentation = np.insert(segmentation, 0, 0)
if segmentation[-1] != self.duration_inframes:
if self.duration_inframes - segmentation[-1] < self._secs_to_inframes(2):
segmentation[-1] = self.duration_inframes
else:
segmentation = np.append(segmentation, self.duration_inframes)
self.fixed_segments_df["talk_id"] = talk_id
self.fixed_segments_df["start"] = segmentation[:-1]
self.fixed_segments_df["end"] = segmentation[1:]
self.fixed_segments_df["duration"] = (
self.fixed_segments_df.end - self.fixed_segments_df.start
)
# fill-in targets
self.fixed_segments_df = self._get_targets_for_talk(
self.fixed_segments_df, talk_id
)
def __len__(self) -> int:
return len(self.fixed_segments_df)
def __getitem__(
self, index: int
) -> Tuple[torch.FloatTensor, torch.FloatTensor, int, int]:
"""
Loads the data for this fixed-length segment
Args:
index (int): segment id in the self.fixed_segments_df
Returns:
Tuple[torch.FloatTensor, torch.FloatTensor, int, int]:
0: waveform of the segment (input space)
1: target tensor of the segment (output space)
2: starting frame of the segment (output space)
3: ending frame of the segment (output space)
"""
segment = self.fixed_segments_df.iloc[index]
waveform, _ = torchaudio.backend.sox_io_backend.load(
self.talk_path, frame_offset=segment.start, num_frames=segment.duration
)
start = self._inframes_to_outframes(segment.start + 1e-6)
end = self._inframes_to_outframes(segment.end + 1e-6)
target = self._construct_target(segment)
return waveform[0], target, start, end
class RandomSegmentationDataset(SegmentationDataset):
def __init__(
self,
path_to_dataset: str,
split_name: str = "train",
segment_length_secs: int = 20,
seed: int = None,
) -> None:
"""
Segmentation dataset to be used during training.
Creates a pool of examples from a random segmentation of collection of wavs
Args:
path_to_dataset (str): absolute path to the directory
of _talks.tsv and _segments.tsv for the dataset
split_name (str): name of the dataset split. Defaults to train.
segment_length_secs (int, optional):
The length of the fixed segments in seconds. Defaults to 20.
seed (int, optional): The random seed to be used for the random segmentation.
Defaults to None
"""
super().__init__(path_to_dataset, split_name)
if seed is not None:
np.random.seed(seed)
self.segment_length_outframes = self._secs_to_outframes(segment_length_secs)
self.max_segment_outframes_overlap = self._secs_to_outframes(
segment_length_secs / 10
)
self.segment_length_inframes = self._secs_to_inframes(segment_length_secs)
# populate the dataset
self.generate_random_segments()
self.pos_class_percentage = self.n_pos / self.n_all
def generate_random_segments(self) -> None:
"""
Creates a new dataset by randomly segmenting each talk
and finding the true targets that correspond to every random segment
"""
print(
f"Generating random segments for {self.path_to_dataset} and {self.split_name} split ..."
)
self.random_segments_df = pd.concat(
[
self._get_targets_for_talk(self._segment_talk(talk), talk["id"])
for _, talk in tqdm(self.talks_df.iterrows())
],
ignore_index=True,
)
def _segment_talk(self, talk: pd.Series) -> pd.DataFrame:
"""
Produces a random segmentation of a given talk from the talks_df
"""
rnd_sgm_df = pd.DataFrame(columns=self.columns)
# sample in 0.02 ms but convert back to frames
start_range = np.arange(
0,
self._inframes_to_outframes(talk["total_frames"]),
step=self.segment_length_outframes - self.max_segment_outframes_overlap,
)
start_range = start_range - np.random.randint(
0, self.max_segment_outframes_overlap, size=len(start_range)
)
start_range = self._outframes_to_inframes(start_range)
rnd_sgm_df[["start", "end"]] = [
(
max(0, start),
min(start + self.segment_length_inframes, talk["total_frames"]),
)
for start in start_range
]
rnd_sgm_df["duration"] = rnd_sgm_df["end"] - rnd_sgm_df["start"]
rnd_sgm_df["talk_id"] = talk["id"]
return rnd_sgm_df
def __len__(self) -> int:
return len(self.random_segments_df)
def __getitem__(
self, index: int
) -> Tuple[torch.FloatTensor, torch.FloatTensor, int]:
"""
Loads the data for this example of a random segment
Args:
index (int): the index of the random segment in the random_segments_df
Returns:
Tuple[torch.FloatTensor, torch.FloatTensor, int, int]:
0: waveform of the segment (input space)
1: target tensor of the segment (output space)
2: starting frame of the segment (output space)
3: ending frame of the segment (output space)
"""
segment = self.random_segments_df.iloc[index]
talk_path = self.talks_df.loc[
self.talks_df.id == segment.talk_id, "path"
].values[0]
# get input
wavefrom, _ = torchaudio.backend.sox_io_backend.load(
talk_path, frame_offset=segment.start, num_frames=segment.duration
)
target = self._construct_target(segment)
start = self._inframes_to_outframes(segment.start + 1e-6)
end = self._inframes_to_outframes(segment.end + 1e-6)
return wavefrom[0], target, start, end
class MultRandomSegmentationDataset(RandomSegmentationDataset):
def __init__(
self,
dataset_paths: list[str],
splits: list[str],
segment_length_secs: int = 20,
seed: int = None,
) -> None:
"""
Segmentation dataset to be used during multilingual traning.
Creates a pool of examples by randomly segmenting many wav collections
Args:
path_to_dataset (str): absolute path to the directory
of _talks.tsv and _segments.tsv for the dataset
split_name (str): name of the dataset split. Defaults to train.
segment_length_secs (int, optional):
The length of the fixed segments in seconds. Defaults to 20.
seed (int, optional): The random seed to be used for the random segmentation.
Defaults to None
"""
# init data variables
self.random_segments_df_parent = pd.DataFrame()
self.talks_df_parent = pd.DataFrame()
self.segments_df_parent = pd.DataFrame()
self.n_pos_parent, self.n_all_parent = 0, 0
# iterativelly populate the dataset
for dataset_path, split in zip(dataset_paths, splits):
super().__init__(dataset_path, split, segment_length_secs, seed)
self.random_segments_df_parent = pd.concat(
[self.random_segments_df_parent, self.random_segments_df],
ignore_index=True,
)
self.talks_df_parent = pd.concat(
[self.talks_df_parent, self.talks_df], ignore_index=True
)
self.segments_df_parent = pd.concat(
[self.segments_df_parent, self.segments_df], ignore_index=True
)
self.n_pos_parent += self.n_pos
self.n_all_parent += self.n_all
self.pos_class_percentage = self.n_pos_parent / self.n_all_parent
self.random_segments_df = self.random_segments_df_parent
self.talks_df = self.talks_df_parent
self.segments_df = self.segments_df_parent
class FixedSegmentationDatasetNoTarget(Dataset):
def __init__(
self,
path_to_wav: str,
segment_length: int = 20,
inference_times: int = 1,
) -> None:
"""[summary]
Args:
path_to_wavs (str): [description]
segment_length (int, optional): [description]. Defaults to 20.
inference_times (int, optional): [description]. Defaults to 1.
"""
super().__init__()
self.input_sr = INPUT_SAMPLE_RATE
self.target_sr = TARGET_SAMPLE_RATE
self.in_trg_ratio = self.input_sr / self.target_sr
self.trg_in_ratio = 1 / self.in_trg_ratio
self.segment_length_inframes = self._secs_to_inframes(segment_length)
self.inference_times = inference_times
self.path_to_wav = path_to_wav
self.duration_inframes = torchaudio.info(self.path_to_wav).num_frames
self.duration_outframes = self._inframes_to_outframes(self.duration_inframes)
self.sample_rate = torchaudio.info(self.path_to_wav).sample_rate
assert (
self.sample_rate == self.input_sr
), f"Audio needs to have sample rate of {self.input_sr}"
def _inframes_to_outframes(self, x):
# from input space to output space
return np.round(x * self.trg_in_ratio).astype(int)
def _secs_to_inframes(self, x):
# from seconds to input space
return np.round(x * self.input_sr).astype(int)
def fixed_length_segmentation(self, i: int) -> None:
"""
Generates a fixed-length segmentation of a wav
with "i" controlling the begining of the segmentation
so that different values of "i" produce different segmentations
Args:
talk_id (str): unique wav identifier
i (int): indicates the current inference time
and is used to produce a different fixed-length segmentation
minimum allowed is 0 and maximum allowed is inference_times - 1
"""
start = round(self.segment_length_inframes / self.inference_times * i)
if start > self.duration_inframes:
start = 0
segmentation = np.arange(
start, self.duration_inframes, self.segment_length_inframes
).astype(int)
if segmentation[0] != 0:
segmentation = np.insert(segmentation, 0, 0)
if segmentation[-1] != self.duration_inframes:
if self.duration_inframes - segmentation[-1] < self._secs_to_inframes(2):
segmentation[-1] = self.duration_inframes
else:
segmentation = np.append(segmentation, self.duration_inframes)
self.starts = segmentation[:-1]
self.ends = segmentation[1:]
def __len__(self) -> int:
return len(self.starts)
def __getitem__(
self, index: int
) -> Tuple[torch.FloatTensor, torch.FloatTensor, int, int]:
"""
Loads the data for this fixed-length segment
Args:
index (int): index of the segment in the fixed length segmentation
Returns:
Tuple[torch.FloatTensor, torch.FloatTensor, int, int]:
0: waveform of the segment (input space)
1: None for consistency with datasets that have targets
1: starting frame of the segment (output space)
2: ending frame of the segment (output space)
"""
waveform, _ = torchaudio.backend.sox_io_backend.load(
self.path_to_wav,
frame_offset=self.starts[index],
num_frames=self.ends[index] - self.starts[index],
)
start = self._inframes_to_outframes(self.starts[index] + 1e-6)
end = self._inframes_to_outframes(self.ends[index] + 1e-6)
return waveform[0], None, start, end
class RandomDataloaderGenerator:
def __init__(
self,
dataset_roots: str,
batch_size: int,
split_name: str,
num_workers: int = 0,
segment_length: int = 20,
) -> None:
"""
Helper object to be used in each epoch of training
to produce a different random segmentation of the training data
Args:
dataset_roots (str): absolute path to the directory
of _talks.tsv and _segments.tsv for the dataset
batch_size (int): training batch size (in number of examples)
split_name (str): the name of the dataset split
num_workers (int, optional): number of workers for the dataloader.
Defaults to 0.
segment_length (int, optional):
Length of the segments (in seconds) to be produced during the random segmentation.
Defaults to 20.
"""
self.dataset_roots = dataset_roots
self.num_workers = num_workers
self.split_name = split_name
self.batch_size = batch_size
# for the multilingual training, dataset_roots is comma separated
if "," in self.dataset_roots:
self.is_mult = True
else:
self.is_mult = False
self.segment_length = segment_length
self.max_seed = 2 ** 32 - 1
def generate(self) -> DataLoader:
"""
Generates a random segmentation of the entire dataset
and returns a dataloader object for it
"""
if self.is_mult:
dataset = MultRandomSegmentationDataset(
self.dataset_roots.split(","),
self.split_name.split(","),
segment_length_secs=self.segment_length,
seed=np.random.randint(0, self.max_seed),
)
else:
dataset = RandomSegmentationDataset(
self.dataset_roots,
self.split_name,
segment_length_secs=self.segment_length,
seed=np.random.randint(0, self.max_seed),
)
dataloader = DataLoader(
dataset,
batch_size=self.batch_size,
collate_fn=segm_collate_fn,
num_workers=self.num_workers,
shuffle=True,
)
return dataloader
class FixedDataloaderGenerator:
def __init__(
self,
dataset_root: str,
batch_size: int,
split_name: str,
num_workers: int = 0,
segment_length: int = 20,
inference_times: int = 1,
) -> None:
"""
Helper object to be used during inference in order to generate the
fixed-length segmentations of a wav collection
Args:
dataset_roots (str): absolute path to the directory
of _talks.tsv and _segments.tsv for the dataset
batch_size (int): training batch size (in number of examples)
split_name (str): the name of the dataset split
num_workers (int, optional): number of workers for the dataloader.
Defaults to 0.
segment_length (int, optional):
Length of the segments (in seconds) to be produced during the random segmentation.
Defaults to 20.
inference_times (int, optional):
The number of different fixed-length segmentations to produce
from each wav. Defaults to 1.
"""
self.batch_size = batch_size
self.num_workers = num_workers
self.lang_pair = Path(dataset_root).name
self.dataset = FixedSegmentationDataset(
dataset_root,
split_name,
segment_length_secs=segment_length,
inference_times=inference_times,
)
def generate(self, talk_id: str, i: int) -> DataLoader:
"""
Generates a fixed segmentation of a specific talk_id.
The iteration (<= inference_times) controls the points of the fixed segmentation
to introduce different overlaps. Returns a dataloder for this dataset.
Args:
talk_id (str): unique wav id
i (int): iteration in (0, inference_times)
Returns:
DataLoader: a torch dataloader based on a FixedSegmentationDataset
"""
self.dataset.generate_fixed_segments(talk_id, i)
dataloder = DataLoader(
self.dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
drop_last=False,
shuffle=False,
collate_fn=segm_collate_fn,
)
return dataloder
def get_talk_ids(self) -> list:
return self.dataset.talks_df["id"].tolist()
def segm_collate_fn(
batch: list,
) -> Tuple[
torch.FloatTensor,
torch.FloatTensor,
torch.LongTensor,
torch.BoolTensor,
list[bool],
list[int],
list[int],
]:
"""
(inference) collate function for the dataloader of the SegmentationDataset
Args:
batch (list): list of examples from SegmentationDataset
Returns:
Tuple[ torch.FloatTensor, torch.FloatTensor, torch.LongTensor, torch.BoolTensor, list[bool], list[int], list[int], ]:
0: 2D tensor, padded and normalized waveforms for each random segment
1: 2D tensor, binary padded targets for each random segment (output space)
2: 2D tensor, binary mask for wav2vec 2.0 (input space)
3: 2D tensor, binary mask for audio-frame-classifier (output space)
4: a '0' indicates that the whole example is empty (torch.zeros)
5: the start frames of the segments (output space)
6: the end frames of the segments (output space)
"""
included = [bool(example[0].sum()) for example in batch]
starts = [example[2] for example in batch]
ends = [example[3] for example in batch]
# sequence lengths
in_seq_len = [len(example[0]) for example in batch]
out_seq_len = [end - start for start, end in zip(starts, ends)]
bs = len(in_seq_len)
# pad and concat
audio = torch.cat(
[
F.pad(example[0], (0, max(in_seq_len) - len(example[0]))).unsqueeze(0)
for example in batch
]
)
# check if the batch contains also targets
if batch[0][1] is not None:
target = torch.cat(
[
F.pad(example[1], (0, max(out_seq_len) - len(example[1]))).unsqueeze(0)
for example in batch
]
)
else:
target = None
# normalize input
# only for inputs that have non-zero elements
included_ = torch.tensor(included).bool()
audio[included_] = (
audio[included_] - torch.mean(audio[included_], dim=1, keepdim=True)
) / torch.std(audio[included_], dim=1, keepdim=True)
# get masks
in_mask = torch.ones(audio.shape, dtype=torch.long)
out_mask = torch.ones([bs, max(out_seq_len)], dtype=torch.bool)
for i, in_sl, out_sl in zip(range(bs), in_seq_len, out_seq_len):
in_mask[i, in_sl:] = 0
out_mask[i, out_sl:] = 0
return (audio, target, in_mask, out_mask, included, starts, ends)
|
154434
|
import torch.nn as nn
import math
import torch
from collections import namedtuple
from maskrcnn_benchmark.layers import FrozenBatchNorm2d
# s0 = top layer idx
# name = sub op name
# s1 = sub layer idx
GraphPath = namedtuple("GraphPath", ['s0', 'name', 's1']) #
def conv_bn(inp, oup, stride, norm_func):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
# nn.BatchNorm2d(oup),
norm_func(oup),
nn.ReLU6(inplace=True)
)
def conv_1x1_bn(inp, oup, norm_func):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
# nn.BatchNorm2d(oup),
norm_func(oup),
nn.ReLU6(inplace=True)
)
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio, norm_func):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = round(inp * expand_ratio)
self.use_res_connect = self.stride == 1 and inp == oup
if expand_ratio == 1:
self.conv = nn.Sequential(
# dw
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),
# nn.BatchNorm2d(hidden_dim),
norm_func(hidden_dim),
nn.ReLU6(inplace=True),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
# nn.BatchNorm2d(oup),
norm_func(oup),
)
else:
self.conv = nn.Sequential(
# pw
nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),
# nn.BatchNorm2d(hidden_dim),
norm_func(hidden_dim),
nn.ReLU6(inplace=True),
# dw
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),
# nn.BatchNorm2d(hidden_dim),
norm_func(hidden_dim),
nn.ReLU6(inplace=True),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
# nn.BatchNorm2d(oup),
norm_func(oup),
)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class CrossEntropyLabelSmooth(nn.Module):
def __init__(self, num_classes, epsilon=0.1):
super(CrossEntropyLabelSmooth, self).__init__()
self.num_classes = num_classes
self.epsilon = epsilon
self.logsoftmax = nn.LogSoftmax(dim=1)
def forward(self, inputs, targets):
log_probs = self.logsoftmax(inputs)
targets = torch.zeros_like(log_probs).scatter_(1, targets.unsqueeze(1), 1)
targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes
loss = (-targets * log_probs).mean(0).sum()
return loss
featuremap_indexes = [
GraphPath(3, 'conv', 3), # stride = 4 chn = 144
GraphPath(6, 'conv', 3), # stride = 8 chn = 192
GraphPath(13, 'conv', 3), # stride = 16 chn = 576
GraphPath(17, 'conv', 3), # stride = 32 chn = 1280
]
class MobileNetV2(nn.Module):
def __init__(self, cfg, n_class=1000, input_size=224, width_mult=1., smooth=False):
super(MobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 32
last_channel = 1280
interverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2], # stride = 4
[6, 32, 3, 2], # stride = 8
[6, 64, 4, 2], # stride = 16
[6, 96, 3, 1],
[6, 160, 3, 2], # stride = 32
[6, 320, 1, 1],
]
norm_func = nn.BatchNorm2d if cfg.MODEL.MOBILENET.FROZEN_BN == False else FrozenBatchNorm2d
# building first layer
assert input_size % 32 == 0
input_channel = int(input_channel * width_mult)
self.last_channel = int(last_channel * width_mult) if width_mult > 1.0 else last_channel
self.features = [conv_bn(3, input_channel, 2, norm_func)]
# building inverted residual blocks
for t, c, n, s in interverted_residual_setting:
output_channel = int(c * width_mult)
for i in range(n):
if i == 0:
self.features.append(block(input_channel, output_channel, s, expand_ratio=t, norm_func=norm_func))
else:
self.features.append(block(input_channel, output_channel, 1, expand_ratio=t, norm_func=norm_func))
input_channel = output_channel
# building last several layers
self.features.append(conv_1x1_bn(input_channel, self.last_channel, norm_func))
# make it nn.Sequential
self.features = nn.Sequential(*self.features)
# building classifier
self.classifier = nn.Sequential(
# nn.Dropout(0.2),
nn.Linear(self.last_channel, n_class),
)
# self.criterion = CrossEntropyLabelSmooth(n_class)
self.criterion = nn.CrossEntropyLoss() if not smooth else CrossEntropyLabelSmooth(n_class)
self._initialize_weights()
# Optionally freeze (requires_grad=False) parts of the backbone
self._freeze_backbone(cfg.MODEL.BACKBONE.FREEZE_CONV_BODY_AT)
def _freeze_backbone(self, freeze_at):
if freeze_at < 0:
return
for layer_index in range(freeze_at):
for p in self.features[layer_index].parameters():
print(p.size())
p.requires_grad = False
# def forward(self, x, target):
# x = self.features(x)
# x = x.mean(3).mean(2)
# logits = self.classifier(x)
# loss = self.criterion(logits, target)
# return logits, loss.unsqueeze(0)
def forward(self, x):
outputs = []
fm_idx = 0
for index, layer in enumerate(self.features):
# print(index, fm_idx)
if fm_idx < len(featuremap_indexes) and index == featuremap_indexes[fm_idx].s0:
sub = getattr(layer, featuremap_indexes[fm_idx].name)
for layer in sub[:featuremap_indexes[fm_idx].s1]:
x = layer(x)
y = x
for layer in sub[featuremap_indexes[fm_idx].s1:]:
x = layer(x)
fm_idx+=1
# print(y.size())
outputs.append(y)
else:
x =layer(x)
# print(x.size())
# # add last layer
# outputs.append(x)
return outputs
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
|
154449
|
from finviz.config import connection_settings
class NoResults(Exception):
""" Raise when there are no results found. """
def __init__(self, query):
super(NoResults, self).__init__(f"No results found for query: {query}")
class InvalidTableType(Exception):
""" Raise when the given table type is invalid. """
def __init__(self, arg):
super(InvalidTableType, self).__init__(f"Invalid table type called: {arg}")
class TooManyRequests(Exception):
""" Raise when HTTP request fails because too many requests were sent to FinViz at once. """
def __init__(self, arg):
super(TooManyRequests, self).__init__(f"Too many HTTP requests at once: {arg}")
class InvalidPortfolioID(Exception):
""" Raise when the given portfolio id is invalid. """
def __int__(self, portfolio_id):
super(InvalidPortfolioID, self).__init__(
f"Invalid portfolio with ID: {portfolio_id}"
)
class NonexistentPortfolioName(Exception):
""" Raise when the given portfolio name is nonexistent. """
def __init__(self, name):
super(NonexistentPortfolioName, self).__init__(
f"Nonexistent portfolio with name: {name}"
)
class NoPortfolio(Exception):
""" Raise when the user has not created a portfolio. """
def __int__(self, func_name):
super(NoPortfolio, self).__init__(
"Function ({func_name}) cannot be called because "
"there is no existing portfolio."
)
class InvalidTicker(Exception):
""" Raise when the given ticker is nonexistent or unavailable on FinViz. """
def __init__(self, ticker):
super(InvalidTicker, self).__init__(
f"Unable to find {ticker} since it is non-existent or unavailable on FinViz."
)
class ConnectionTimeout(Exception):
""" The request has timed out while trying to connect to the remote server. """
def __init__(self, webpage_link):
super(ConnectionTimeout, self).__init__(
f'Connection timed out after {connection_settings["CONNECTION_TIMEOUT"]} while trying to reach {webpage_link}'
)
|
154471
|
import allure
from pages.web_page import WebPage
from pages.web_elements import *
class ArticlePage(WebPage):
def title(self): return el(self.page, selector='.container > h1')
def author_link(self): return el(self.page, selector='.author')
def subject(self): return el(self.page, selector='div[class*="article-content"] h1')
def publish_button(self): return el(self.page, selector='text="Publish Article"')
def tags_field(self): return el(self.page, selector='input[placeholder="Enter tags"]')
def __init__(self, base_url, article_id, page: Page):
super().__init__(page)
self.base_url = base_url
self.article_id = article_id
@allure.step
def open(self):
self.page.goto("%s/#/article/%s" % self.base_url, self.article_id, wait_until="load")
return self
|
154487
|
from requests.auth import HTTPBasicAuth
def apply_updates(doc, update_dict):
# updates the doc with items from the dict
# returns whether or not any updates were made
should_save = False
for key, value in update_dict.items():
if getattr(doc, key, None) != value:
setattr(doc, key, value)
should_save = True
return should_save
class EndpointMixin(object):
@classmethod
def from_config(cls, config):
return cls(config.url, config.username, config.password)
def _auth(self):
return HTTPBasicAuth(self.username, self.password)
def _urlcombine(self, base, target):
return '{base}{target}'.format(base=base, target=target)
|
154495
|
import unittest
from IDM import IDM, IDMAuto
from Constants import *
from LaneChange import LaneChange
from Cars import *
from copy import copy, deepcopy
from CarFactory import *
from Street import *
class MyTestCase(unittest.TestCase):
def test_IDM(self):
# using the initial value of Cars
idm = IDM(112.65 / 3.6, 0.5, 3.0, 3.0, 1.5)
self.assertAlmostEqual(idm.veq_table[4], 0.6543521725647148)
self.assertAlmostEqual(idm.veq_table[20], 10.690092856310587)
self.assertAlmostEqual(idm.veq_table[90], 28.89634917614212)
def test_LaneChange(self):
lc = LaneChange(0.2, 0.3)
def test_Car(self):
lc = LaneChange(0.2, 0.3)
model = IDM(112.65 / 3.6, 0.5, 3.0, 3.0, 1.5)
car1 = Car(0, 10, 0, model, lc, 5)
car2 = copy(Car)
def test_BCCar(self):
model = IDM(112.65 / 3.6, 0.5, 3.0, 3.0, 1.5)
bc = BCCar(0, 10, 0, model, 0)
bc.lane_change = "fuck"
self.assertEqual(bc.lane_change, None)
def test_CarFactory(self):
cf = CarFactory(0.5, 0.5)
car = cf.create_vehicle(1,10,0)
def test_StreetRamp(self):
cf = CarFactory(0.5, 0.5)
road = StreetRamp(3, 10000, cf, 0.2)
for _ in range(100):
road.update(10)
road.report()
# road.assertion()
def test_StreetAuto(self):
road = StreetAuto(3, 10000, 0.2, 0.2)
for _ in range(100):
road.update(10)
road.report()
# road.assertion()
# TODO: there's a situation that human car's may crash with high probability
def test_Street(self):
cf = CarFactory(0.5, 0.5)
road = Street(3, 10000, cf)
road.dt = 0.5
for _ in range(100):
road.update(10)
road.assertion()
if __name__ == '__main__':
unittest.main()
|
154506
|
import os
import json
import re
import subprocess
import sys
import logging
from datetime import datetime
def install(package):
subprocess.check_call([sys.executable, "-m", "pip", "install", package])
install('boto3')
import boto3
LOG_LEVEL = os.getenv("LOG_LEVEL", "INFO").upper()
logger = logging.getLogger()
logger.setLevel(LOG_LEVEL)
region = os.environ.get('Region', 'NoAWSRegionFound')
pipeline_name = os.environ.get('PipelineName', 'NoPipelineNameFound')
cloudwatch = boto3.client("cloudwatch", region)
def get_baseline_drift(feature):
if "violations" in feature:
for violation in feature["violations"]:
if violation["constraint_check_type"] == "baseline_drift_check":
desc = violation["description"]
matches = re.search("distance: (.+) exceeds threshold: (.+)", desc)
if matches:
yield {
"metric_name": f'feature_baseline_drift_{violation["feature_name"]}',
"metric_value": float(matches.group(1)),
"metric_threshold": float(matches.group(2)),
}
def put_cloudwatch_metric(pipeline_name: str, metrics: list):
for m in metrics:
logger.info(f'Putting metric: {m["metric_name"]} value: {m["metric_value"]}')
response = cloudwatch.put_metric_data(
Namespace="aws/sagemaker/ModelBuildingPipeline/data-metrics",
MetricData=[
{
"MetricName": m["metric_name"],
"Dimensions": [{"Name": "PipelineName", "Value": pipeline_name}],
"Timestamp": datetime.now(),
"Value": m["metric_value"],
"Unit": "None",
},
],
)
logger.debug(response)
def postprocess_handler():
violations_file = "/opt/ml/processing/output/constraint_violations.json"
if os.path.isfile(violations_file):
f = open(violations_file)
violations = json.load(f)
metrics = list(get_baseline_drift(violations))
put_cloudwatch_metric(pipeline_name, metrics)
logger.info("Violation detected and added to cloudwatch")
else:
logger.info("No constraint_violations file found. All good!")
|
154511
|
from torchsampler.__about__ import * # noqa: F401 F403
from torchsampler.imbalanced import ImbalancedDatasetSampler
__all__ = [
'ImbalancedDatasetSampler',
]
|
154528
|
from django.db import models
# Create your models here.
from myuser.models import TemplateUser
class Event(models.Model):
description = models.CharField(max_length=300, blank=True,null=True, unique=False)
title = models.CharField(max_length=300, blank=False, null=False, unique=False)
enabled = models.BooleanField(null=True)
followers = models.ManyToManyField(TemplateUser, blank=True)
signifance_level= models.IntegerField(blank=True,null=True,unique=False,default=0)
|
154556
|
import os
import sys
import pytest
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
from base import TestBaseClass
class TestClassOelintVarsFileSettingsDouble(TestBaseClass):
@pytest.mark.parametrize('id', ['oelint.vars.filessetting.double'])
@pytest.mark.parametrize('occurrence', [1])
@pytest.mark.parametrize('input',
[
{
'oelint_adv_test.bb':
'''
FILES_${PN} += "${bindir}"
FILES_${PN}-ping = "${base_bindir}/ping.${BPN}"
'''
},
{
'oelint_adv_test.bbappend':
'''
FILES_${PN} += "${bindir}"
FILES_${PN}-ping = "${base_bindir}/ping.${BPN}"
'''
},
{
'oelint_adv_test.bb':
'''
FILES_${PN} += "${bindir}"
'''
},
{
'oelint_adv_test.bb':
'''
FILES_${PN}-doc += "${docdir}"
'''
},
],
)
def test_bad(self, input, id, occurrence):
self.check_for_id(self._create_args(input), id, occurrence)
@pytest.mark.parametrize('id', ['oelint.vars.filessetting.double'])
@pytest.mark.parametrize('occurrence', [2])
@pytest.mark.parametrize('input',
[
{
'oelint_adv_test.bb':
'''
FILES_${PN} += "/opt/other/path"
FILES_${PN}-ping = "${base_bindir}/ping.${BPN}"
FILES_${PN} += "/opt/other/path"
'''
}
],
)
def test_bad_non_default(self, input, id, occurrence):
self.check_for_id(self._create_args(input), id, occurrence)
@pytest.mark.parametrize('id', ['oelint.vars.filessetting.double'])
@pytest.mark.parametrize('occurrence', [0])
@pytest.mark.parametrize('input',
[
{
'oelint_adv_test.bb':
'''
FILES_${PN} += "/opt/other/path"
FILES_${PN}-ping = "${base_bindir}/ping.${BPN}"
'''
}
],
)
def test_good(self, input, id, occurrence):
self.check_for_id(self._create_args(input), id, occurrence)
|
154569
|
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
cls = get_driver(Provider.EC2)
driver = cls('temporary access key', 'temporary secret key',
token='<PASSWORD>', region="us-west-1")
|
154586
|
import torch,math
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from roialign.roi_align.crop_and_resize import CropAndResizeFunction
def log2(x):
"""Implementatin of Log2. Pytorch doesn't have a native implemenation."""
ln2 = Variable(torch.log(torch.FloatTensor([2.0])), requires_grad=False)
if x.is_cuda:
ln2 = ln2.cuda()
return torch.log(x) / ln2
############################################################
# ROIAlign Layer
############################################################
def pyramid_roi_align(inputs, pool_size, image_shape):
"""Implements ROI Pooling on multiple levels of the feature pyramid.
Params:
- pool_size: [height, width] of the output pooled regions. Usually [7, 7]
- image_shape: [height, width, channels]. Shape of input image in pixels
Inputs:
- boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized
coordinates.
- Feature maps: List of feature maps from different levels of the pyramid.
Each is [batch, channels, height, width]
Output:
Pooled regions in the shape: [num_boxes, height, width, channels].
The width and height are those specific in the pool_shape in the layer
constructor.
"""
# Currently only supports batchsize 1
for i in range(len(inputs)):
inputs[i] = inputs[i].squeeze(0)
# Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords
boxes = inputs[0]
# Feature Maps. List of feature maps from different level of the
# feature pyramid. Each is [batch, height, width, channels]
feature_maps = inputs[1:]
# Assign each ROI to a level in the pyramid based on the ROI area.
boxes = boxes.view(-1,4)
y1, x1, y2, x2 = boxes.chunk(4, dim=1)
h = y2 - y1
w = x2 - x1
# Equation 1 in the Feature Pyramid Networks paper. Account for
# the fact that our coordinates are normalized here.
# e.g. a 224x224 ROI (in pixels) maps to P4
image_area = Variable(torch.FloatTensor([float(image_shape[0]*image_shape[1])]), requires_grad=False)
if boxes.is_cuda:
image_area = image_area.cuda()
roi_level = 4 + log2(torch.sqrt(h*w)/(224.0/torch.sqrt(image_area)))
roi_level = roi_level.round().int()
roi_level = roi_level.clamp(2,5)
# Loop through levels and apply ROI pooling to each. P2 to P5.
pooled = []
box_to_level = []
for i, level in enumerate(range(2, 6)):
ix = roi_level==level
if not ix.any():
continue
ix = torch.nonzero(ix)[:,0]
level_boxes = boxes[ix.data, :]
# Keep track of which box is mapped to which level
box_to_level.append(ix.data)
# Stop gradient propogation to ROI proposals
level_boxes = level_boxes.detach()
# Crop and Resize
# From Mask R-CNN paper: "We sample four regular locations, so
# that we can evaluate either max or average pooling. In fact,
# interpolating only a single value at each bin center (without
# pooling) is nearly as effective."
#
# Here we use the simplified approach of a single value per bin,
# which is how it's done in tf.crop_and_resize()
# Result: [batch * num_boxes, pool_height, pool_width, channels]
ind = Variable(torch.zeros(level_boxes.size()[0]),requires_grad=False).int()
if level_boxes.is_cuda:
ind = ind.cuda()
feature_maps[i] = feature_maps[i].unsqueeze(0) #CropAndResizeFunction needs batch dimension
pooled_features = CropAndResizeFunction(pool_size, pool_size, 0)(feature_maps[i], level_boxes, ind)
pooled.append(pooled_features)
# Pack pooled features into one tensor
pooled = torch.cat(pooled, dim=0)
# Pack box_to_level mapping into one array and add another
# column representing the order of pooled boxes
box_to_level = torch.cat(box_to_level, dim=0)
# Rearrange pooled features to match the order of the original boxes
_, box_to_level = torch.sort(box_to_level)
pooled = pooled[box_to_level, :, :]
return pooled
def pyramid_roi_align_image(inputs, pool_size, image_shape,istrain=False):
"""Implements ROI Pooling on multiple levels of the feature pyramid.
Params:
- pool_size: [height, width] of the output pooled regions. Usually [7, 7]
- image_shape: [height, width, channels]. Shape of input image in pixels
Inputs:
- boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized
coordinates.
- Feature maps: List of feature maps from different levels of the pyramid.
Each is [batch, channels, height, width]
Output:
Pooled regions in the shape: [num_boxes, height, width, channels].
The width and height are those specific in the pool_shape in the layer
constructor.
"""
# Currently only supports batchsize 1
if istrain:
start = 1
else:
start = 0
for i in range(start,len(inputs)):
inputs[i] = inputs[i].squeeze(0)
# Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords
boxes = inputs[0]
# Feature Maps. List of feature maps from different level of the
# feature pyramid. Each is [batch, height, width, channels]
feature_maps = inputs[1:]
# Loop through levels and apply ROI pooling to each. P2 to P5.
pooled = []
ind = Variable(torch.zeros(boxes.size()[0]),requires_grad=False).int()
if boxes.is_cuda:
ind = ind.cuda()
feature_maps[0] = feature_maps[0].unsqueeze(0) #CropAndResizeFunction needs batch dimension
pooled_features = CropAndResizeFunction(pool_size, pool_size, 0)(feature_maps[0], boxes, ind)
return pooled_features
class SamePad2d(nn.Module):
"""Mimics tensorflow's 'SAME' padding.
"""
def __init__(self, kernel_size, stride):
super(SamePad2d, self).__init__()
self.kernel_size = torch.nn.modules.utils._pair(kernel_size)
self.stride = torch.nn.modules.utils._pair(stride)
def forward(self, input):
in_width = input.size()[2]
in_height = input.size()[3]
out_width = math.ceil(float(in_width) / float(self.stride[0]))
out_height = math.ceil(float(in_height) / float(self.stride[1]))
pad_along_width = ((out_width - 1) * self.stride[0] +
self.kernel_size[0] - in_width)
pad_along_height = ((out_height - 1) * self.stride[1] +
self.kernel_size[1] - in_height)
pad_left = math.floor(pad_along_width / 2)
pad_top = math.floor(pad_along_height / 2)
pad_right = pad_along_width - pad_left
pad_bottom = pad_along_height - pad_top
return F.pad(input, (pad_left, pad_right, pad_top, pad_bottom), 'constant', 0)
def __repr__(self):
return self.__class__.__name__
############################################################
# FPN Graph
############################################################
class TopDownLayer(nn.Module):
def __init__(self, in_channels, out_channels):
super(TopDownLayer, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1)
self.padding2 = SamePad2d(kernel_size=3, stride=1)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1)
def forward(self, x, y):
y = F.upsample(y, scale_factor=2)
x = self.conv1(x)
return self.conv2(self.padding2(x+y))
class FPN(nn.Module):
def __init__(self, C1, C2, C3, C4, C5, out_channels):
super(FPN, self).__init__()
self.out_channels = out_channels
self.C1 = C1
self.C2 = C2
self.C3 = C3
self.C4 = C4
self.C5 = C5
self.P6 = nn.MaxPool2d(kernel_size=1, stride=2)
self.P5_conv1 = nn.Conv2d(2048, self.out_channels, kernel_size=1, stride=1)
self.P5_conv2 = nn.Sequential(
SamePad2d(kernel_size=3, stride=1),
nn.Conv2d(self.out_channels, self.out_channels, kernel_size=3, stride=1),
)
self.P4_conv1 = nn.Conv2d(1024, self.out_channels, kernel_size=1, stride=1)
self.P4_conv2 = nn.Sequential(
SamePad2d(kernel_size=3, stride=1),
nn.Conv2d(self.out_channels, self.out_channels, kernel_size=3, stride=1),
)
self.P3_conv1 = nn.Conv2d(512, self.out_channels, kernel_size=1, stride=1)
self.P3_conv2 = nn.Sequential(
SamePad2d(kernel_size=3, stride=1),
nn.Conv2d(self.out_channels, self.out_channels, kernel_size=3, stride=1),
)
self.P2_conv1 = nn.Conv2d(256, self.out_channels, kernel_size=1, stride=1)
self.P2_conv2 = nn.Sequential(
SamePad2d(kernel_size=3, stride=1),
nn.Conv2d(self.out_channels, self.out_channels, kernel_size=3, stride=1),
)
def forward(self, x):
x = self.C1(x)
x = self.C2(x)
c2_out = x
x = self.C3(x)
c3_out = x
x = self.C4(x)
c4_out = x
x = self.C5(x)
p5_out = self.P5_conv1(x)
p4_out = self.P4_conv1(c4_out) + F.upsample(p5_out, scale_factor=2)
p3_out = self.P3_conv1(c3_out) + F.upsample(p4_out, scale_factor=2)
p2_out = self.P2_conv1(c2_out) + F.upsample(p3_out, scale_factor=2)
p5_out = self.P5_conv2(p5_out)
p4_out = self.P4_conv2(p4_out)
p3_out = self.P3_conv2(p3_out)
p2_out = self.P2_conv2(p2_out)
# P6 is used for the 5th anchor scale in RPN. Generated by
# subsampling from P5 with stride of 2.
p6_out = self.P6(p5_out)
return [p2_out, p3_out, p4_out, p5_out, p6_out]
############################################################
# Resnet Graph
############################################################
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride)
self.bn1 = nn.BatchNorm2d(planes, eps=0.001, momentum=0.01)
self.padding2 = SamePad2d(kernel_size=3, stride=1)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3)
self.bn2 = nn.BatchNorm2d(planes, eps=0.001, momentum=0.01)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1)
self.bn3 = nn.BatchNorm2d(planes * 4, eps=0.001, momentum=0.01)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.padding2(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, architecture, stage5=False):
super(ResNet, self).__init__()
assert architecture in ["resnet50", "resnet101"]
self.inplanes = 64
self.layers = [3, 4, {"resnet50": 6, "resnet101": 23}[architecture], 3]
self.block = Bottleneck
self.stage5 = stage5
self.C1 = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3),
nn.BatchNorm2d(64, eps=0.001, momentum=0.01),
nn.ReLU(inplace=True),
SamePad2d(kernel_size=3, stride=2),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.C2 = self.make_layer(self.block, 64, self.layers[0])
self.C3 = self.make_layer(self.block, 128, self.layers[1], stride=2)
self.C4 = self.make_layer(self.block, 256, self.layers[2], stride=2)
if self.stage5:
self.C5 = self.make_layer(self.block, 512, self.layers[3], stride=2)
else:
self.C5 = None
def forward(self, x):
x = self.C1(x)
x = self.C2(x)
x = self.C3(x)
x = self.C4(x)
x = self.C5(x)
return x
def stages(self):
return [self.C1, self.C2, self.C3, self.C4, self.C5]
def make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride),
nn.BatchNorm2d(planes * block.expansion, eps=0.001, momentum=0.01),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
############################################################
# Region Proposal Network
############################################################
class RPN(nn.Module):
"""Builds the model of Region Proposal Network.
anchors_per_location: number of anchors per pixel in the feature map
anchor_stride: Controls the density of anchors. Typically 1 (anchors for
every pixel in the feature map), or 2 (every other pixel).
Returns:
rpn_logits: [batch, H, W, 2] Anchor classifier logits (before softmax)
rpn_probs: [batch, W, W, 2] Anchor classifier probabilities.
rpn_bbox: [batch, H, W, (dy, dx, log(dh), log(dw))] Deltas to be
applied to anchors.
"""
def __init__(self, anchors_per_location, anchor_stride, depth):
super(RPN, self).__init__()
self.anchors_per_location = anchors_per_location
self.anchor_stride = anchor_stride
self.depth = depth
self.padding = SamePad2d(kernel_size=3, stride=self.anchor_stride)
self.conv_shared = nn.Conv2d(self.depth, 512, kernel_size=3, stride=self.anchor_stride)
self.relu = nn.ReLU(inplace=True)
self.conv_class = nn.Conv2d(512, 2 * anchors_per_location, kernel_size=1, stride=1)
self.softmax = nn.Softmax(dim=2)
self.conv_bbox = nn.Conv2d(512, 4 * anchors_per_location, kernel_size=1, stride=1)
def forward(self, x):
# Shared convolutional base of the RPN
x = self.relu(self.conv_shared(self.padding(x)))
# Anchor Score. [batch, anchors per location * 2, height, width].
rpn_class_logits = self.conv_class(x)
# Reshape to [batch, 2, anchors]
rpn_class_logits = rpn_class_logits.permute(0,2,3,1)
rpn_class_logits = rpn_class_logits.contiguous()
rpn_class_logits = rpn_class_logits.view(x.size()[0], -1, 2)
# Softmax on last dimension of BG/FG.
rpn_probs = self.softmax(rpn_class_logits)
# Bounding box refinement. [batch, H, W, anchors per location, depth]
# where depth is [x, y, log(w), log(h)]
rpn_bbox = self.conv_bbox(x)
# Reshape to [batch, 4, anchors]
rpn_bbox = rpn_bbox.permute(0,2,3,1)
rpn_bbox = rpn_bbox.contiguous()
rpn_bbox = rpn_bbox.view(x.size()[0], -1, 4)
return [rpn_class_logits, rpn_probs, rpn_bbox]
############################################################
# Feature Pyramid Network Heads
############################################################
class Classifier(nn.Module):
def __init__(self, depth, pool_size, image_shape, num_classes):
super(Classifier, self).__init__()
self.depth = depth
self.pool_size = pool_size
self.image_shape = image_shape
self.num_classes = num_classes
self.conv1 = nn.Conv2d(self.depth, 1024, kernel_size=self.pool_size, stride=1)
self.bn1 = nn.BatchNorm2d(1024, eps=0.001, momentum=0.01)
self.conv2 = nn.Conv2d(1024, 1024, kernel_size=1, stride=1)
self.bn2 = nn.BatchNorm2d(1024, eps=0.001, momentum=0.01)
self.relu = nn.ReLU(inplace=True)
self.linear_class = nn.Linear(1024, num_classes)
self.softmax = nn.Softmax(dim=1)
self.linear_bbox = nn.Linear(1024, num_classes * 4)
def forward(self, x, rois):
x = pyramid_roi_align([rois]+x, self.pool_size, self.image_shape)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = x.view(-1,1024)
mrcnn_class_logits = self.linear_class(x)
mrcnn_probs = self.softmax(mrcnn_class_logits)
mrcnn_bbox = self.linear_bbox(x)
mrcnn_bbox = mrcnn_bbox.view(mrcnn_bbox.size()[0], -1, 4)
return [mrcnn_class_logits, mrcnn_probs, mrcnn_bbox]
class Mask(nn.Module):
def __init__(self, depth, pool_size, image_shape, num_classes):
super(Mask, self).__init__()
self.depth = depth
self.pool_size = pool_size
self.image_shape = image_shape
self.num_classes = num_classes
self.padding = SamePad2d(kernel_size=3, stride=1)
self.conv1 = nn.Conv2d(self.depth, 256, kernel_size=3, stride=1)
self.bn1 = nn.BatchNorm2d(256, eps=0.001)
self.conv2 = nn.Conv2d(256, 256, kernel_size=3, stride=1)
self.bn2 = nn.BatchNorm2d(256, eps=0.001)
self.conv3 = nn.Conv2d(256, 256, kernel_size=3, stride=1)
self.bn3 = nn.BatchNorm2d(256, eps=0.001)
self.conv4 = nn.Conv2d(256, 256, kernel_size=3, stride=1)
self.bn4 = nn.BatchNorm2d(256, eps=0.001)
self.deconv = nn.ConvTranspose2d(256, 256, kernel_size=2, stride=2)
self.conv5 = nn.Conv2d(256, num_classes, kernel_size=1, stride=1)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU(inplace=True)
def forward(self, x, rois,cls_feature):
x = pyramid_roi_align([rois] + x, self.pool_size, self.image_shape)
x = torch.cat((cls_feature,x),dim=1)
x = self.conv1(self.padding(x))
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(self.padding(x))
x = self.bn2(x)
x = self.relu(x)
x = self.conv3(self.padding(x))
x = self.bn3(x)
x = self.relu(x)
x = self.conv4(self.padding(x))
x = self.bn4(x)
feat = self.relu(x)
x = self.deconv(feat)
x = self.relu(x)
x = self.conv5(x)
# x = self.sigmoid(x)
return x,feat
class Refine(nn.Module):
def __init__(self, depth, num_classes):
super(Refine, self).__init__()
self.depth = depth
self.padding = SamePad2d(kernel_size=3, stride=1)
self.conv1 = nn.Conv2d(self.depth, 256, kernel_size=3, stride=1)
self.bn1 = nn.BatchNorm2d(256, eps=0.001)
self.conv2 = nn.Conv2d(256, 256, kernel_size=3, stride=1)
self.bn2 = nn.BatchNorm2d(256, eps=0.001)
self.conv3 = nn.Conv2d(256, 256, kernel_size=3, stride=1)
self.bn3 = nn.BatchNorm2d(256, eps=0.001)
self.conv4 = nn.Conv2d(256, 256, kernel_size=3, stride=1)
self.bn4 = nn.BatchNorm2d(256, eps=0.001)
self.deconv = nn.ConvTranspose2d(256, 256, kernel_size=2, stride=2)
self.conv5 = nn.Conv2d(256, num_classes, kernel_size=1, stride=1)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv1(self.padding(x))
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(self.padding(x))
x = self.bn2(x)
x = self.relu(x)
x = self.conv3(self.padding(x))
x = self.bn3(x)
x = self.relu(x)
x = self.conv4(self.padding(x))
x = self.bn4(x)
x = self.relu(x)
x = self.conv5(x)
# x = self.sigmoid(x)
return x
|
154597
|
from typing import Sequence
from eth_typing import BLSSignature
from eth_utils import ValidationError
from eth2._utils.bls import bls
from eth2._utils.hash import hash_eth2
from eth2.beacon.attestation_helpers import (
validate_indexed_attestation_aggregate_signature,
)
from eth2.beacon.committee_helpers import get_beacon_committee
from eth2.beacon.epoch_processing_helpers import (
get_attesting_indices,
get_indexed_attestation,
)
from eth2.beacon.helpers import compute_epoch_at_slot, compute_signing_root, get_domain
from eth2.beacon.signature_domain import SignatureDomain
from eth2.beacon.types.aggregate_and_proof import AggregateAndProof
from eth2.beacon.types.attestations import Attestation
from eth2.beacon.types.states import BeaconState
from eth2.beacon.typing import Bitfield, CommitteeIndex, SerializableUint64, Slot
from eth2.configs import Eth2Config
# TODO: TARGET_AGGREGATORS_PER_COMMITTEE is not in Eth2Config now.
TARGET_AGGREGATORS_PER_COMMITTEE = 16
def get_slot_signature(
state: BeaconState, slot: Slot, privkey: int, config: Eth2Config
) -> BLSSignature:
"""
Sign on ``slot`` and return the signature.
"""
domain = get_domain(
state,
SignatureDomain.DOMAIN_BEACON_ATTESTER,
config.SLOTS_PER_EPOCH,
message_epoch=compute_epoch_at_slot(slot, config.SLOTS_PER_EPOCH),
)
signing_root = compute_signing_root(SerializableUint64(slot), domain)
return bls.sign(privkey, signing_root)
def is_aggregator(
state: BeaconState,
slot: Slot,
index: CommitteeIndex,
signature: BLSSignature,
config: Eth2Config,
) -> bool:
"""
Check if the validator is one of the aggregators of the given ``slot``.
.. note::
- Probabilistically, with enought validators, the aggregator count should
approach ``TARGET_AGGREGATORS_PER_COMMITTEE``.
- With ``len(committee)`` is 128 and ``TARGET_AGGREGATORS_PER_COMMITTEE`` is 16,
the expected length of selected validators is 16.
- It's possible that this algorithm selects *no one* as the aggregator, but with the
above parameters, the chance of having no aggregator has a probability of 3.78E-08.
- Chart analysis: https://docs.google.com/spreadsheets/d/1C7pBqEWJgzk3_jesLkqJoDTnjZOODnGTOJUrxUMdxMA # noqa: E501
"""
committee = get_beacon_committee(state, slot, index, config)
modulo = max(1, len(committee) // TARGET_AGGREGATORS_PER_COMMITTEE)
return int.from_bytes(hash_eth2(signature)[0:8], byteorder="little") % modulo == 0
def get_aggregate_from_valid_committee_attestations(
attestations: Sequence[Attestation]
) -> Attestation:
"""
Return the aggregate attestation.
The given attestations SHOULD have the same `data: AttestationData` and are valid.
"""
signatures = [attestation.signature for attestation in attestations]
aggregate_signature = bls.aggregate(*signatures)
all_aggregation_bits = [
attestation.aggregation_bits for attestation in attestations
]
aggregation_bits = tuple(map(any, zip(*all_aggregation_bits)))
assert len(attestations) > 0
return Attestation.create(
data=attestations[0].data,
aggregation_bits=Bitfield(aggregation_bits),
signature=aggregate_signature,
)
#
# Validation
#
def validate_aggregate_and_proof(
state: BeaconState,
aggregate_and_proof: AggregateAndProof,
attestation_propagation_slot_range: int,
config: Eth2Config,
) -> None:
"""
Validate aggregate_and_proof
Reference: https://github.com/ethereum/eth2.0-specs/blob/master/specs/networking/p2p-interface.md#global-topics # noqa: E501
"""
attestation = aggregate_and_proof.aggregate
validate_attestation_propagation_slot_range(
state, attestation, attestation_propagation_slot_range
)
attesting_indices = get_attesting_indices(
state, attestation.data, attestation.aggregation_bits, config
)
if aggregate_and_proof.aggregator_index not in attesting_indices:
raise ValidationError(
f"The aggregator index ({aggregate_and_proof.aggregator_index}) is not within"
f" the aggregate's committee {attesting_indices}"
)
if not is_aggregator(
state,
attestation.data.slot,
attestation.data.index,
aggregate_and_proof.selection_proof,
config,
):
raise ValidationError(
f"The given validator {aggregate_and_proof.aggregator_index}"
" is not a selected aggregator"
)
validate_aggregator_proof(state, aggregate_and_proof, config)
validate_attestation_signature(state, attestation, config)
def validate_attestation_propagation_slot_range(
state: BeaconState,
attestation: Attestation,
attestation_propagation_slot_range: int,
) -> None:
if (
attestation.data.slot + attestation_propagation_slot_range < state.slot
or attestation.data.slot > state.slot
):
raise ValidationError(
"attestation.data.slot should be within the last"
" {attestation_propagation_slot_range} slots. Got"
f" attestationdata.slot={attestation.data.slot},"
f" current slot={state.slot}"
)
def validate_aggregator_proof(
state: BeaconState, aggregate_and_proof: AggregateAndProof, config: Eth2Config
) -> None:
slot = aggregate_and_proof.aggregate.data.slot
pubkey = state.validators[aggregate_and_proof.aggregator_index].pubkey
domain = get_domain(
state,
SignatureDomain.DOMAIN_BEACON_ATTESTER,
config.SLOTS_PER_EPOCH,
message_epoch=compute_epoch_at_slot(slot, config.SLOTS_PER_EPOCH),
)
signing_root = compute_signing_root(SerializableUint64(slot), domain)
bls.validate(signing_root, aggregate_and_proof.selection_proof, pubkey)
def validate_attestation_signature(
state: BeaconState, attestation: Attestation, config: Eth2Config
) -> None:
indexed_attestation = get_indexed_attestation(state, attestation, config)
validate_indexed_attestation_aggregate_signature(
state, indexed_attestation, config.SLOTS_PER_EPOCH
)
|
154607
|
import pandas as pd
import numpy as np
import glob
import math
import re
import sys
import multiprocessing
def downsampleRow(args):
row, targetSum = args
currentCount = row.sum()
downsampledRow = row.copy()
while currentCount > targetSum and currentCount != 0:
possible = downsampledRow[(downsampledRow > 0)]
desiredTossCount = int(currentCount - targetSum)
probabilities = [p / currentCount for p in possible]
for indexToLower in np.random.choice(
possible.index, max(0, desiredTossCount),
replace=True, p=probabilities):
if downsampledRow[indexToLower] > 0:
downsampledRow[indexToLower] -= 1
currentCount = downsampledRow.sum()
return downsampledRow
# downsample_to = sample to this amount of counts per column
# min_feature_abundance = remove all rows which have less than these counts
def downsampleDataFrame(df, downsample_to, min_feature_abundance=50):
pool = multiprocessing.Pool(8)
try:
df = df.loc[:, df.sum() > downsample_to]
df = df.loc[df.sum(1) > min_feature_abundance, :]
subset = df.transpose()
dfDownsampled = subset.copy()
for idx, drow in enumerate(
pool.map(
downsampleRow, [
(row, downsample_to) for i, row in subset.iterrows()])):
dfDownsampled.iloc[idx, :] = drow
except Exception as e:
print(e)
pool.close()
return dfDownsampled.transpose()
|
154628
|
from torch import nn as nn
from torch.nn import functional as F
from torch.nn.utils import spectral_norm
import torch
from torch import nn as nn
from torch.nn import functional as F
from torch.nn.utils import spectral_norm
from basicsr.utils.registry import ARCH_REGISTRY
import torch
class add_attn(nn.Module):
def __init__(self, x_channels, g_channels=256):
super(add_attn, self).__init__()
self.W = nn.Sequential(
nn.Conv2d(x_channels, x_channels, kernel_size=1, stride=1, padding=0), nn.BatchNorm2d(x_channels))
self.theta = nn.Conv2d(x_channels, x_channels, kernel_size=2, stride=2, padding=0, bias=False)
self.phi = nn.Conv2d(g_channels, x_channels, kernel_size=1, stride=1, padding=0, bias=True)
self.psi = nn.Conv2d(x_channels, out_channels=1, kernel_size=1, stride=1, padding=0, bias=True)
def forward(self, x, g):
input_size = x.size()
batch_size = input_size[0]
assert batch_size == g.size(0)
theta_x = self.theta(x)
theta_x_size = theta_x.size()
phi_g = F.interpolate(self.phi(g), size=theta_x_size[2:], mode='bilinear', align_corners=False)
f = F.relu(theta_x + phi_g, inplace=True)
sigm_psi_f = torch.sigmoid(self.psi(f))
sigm_psi_f = F.interpolate(sigm_psi_f, size=input_size[2:], mode='bilinear', align_corners=False)
y = sigm_psi_f.expand_as(x) * x
W_y = self.W(y)
return W_y, sigm_psi_f
class unetCat(nn.Module):
def __init__(self, dim_in, dim_out):
super(unetCat, self).__init__()
norm = spectral_norm
self.convU = norm(nn.Conv2d(dim_in, dim_out, 3, 1, 1, bias=False))
def forward(self, input_1, input_2):
# Upsampling
input_2 = F.interpolate(input_2, scale_factor=2, mode='bilinear', align_corners=False)
output_2 = F.leaky_relu(self.convU(input_2), negative_slope=0.2, inplace=True)
offset = output_2.size()[2] - input_1.size()[2]
padding = 2 * [offset // 2, offset // 2]
output_1 = F.pad(input_1, padding)
y = torch.cat([output_1, output_2], 1)
return y
class UNetDiscriminatorSN(nn.Module):
"""Defines a U-Net discriminator with spectral normalization (SN)"""
def __init__(self, num_in_ch, num_feat=64):
super(UNetDiscriminatorSN, self).__init__()
norm = spectral_norm
self.conv0 = nn.Conv2d(num_in_ch, num_feat, kernel_size=3, stride=1, padding=1)
self.conv1 = norm(nn.Conv2d(num_feat, num_feat * 2, 3, 2, 1, bias=False))
self.conv2 = norm(nn.Conv2d(num_feat * 2, num_feat * 4, 3, 2, 1, bias=False))
# Center
self.conv3 = norm(nn.Conv2d(num_feat * 4, num_feat * 8, 3, 2, 1, bias=False))
self.gating = norm(nn.Conv2d(num_feat * 8, num_feat * 4, 1, 1, 1, bias=False))
# attention Blocks
self.attn_1 = add_attn(x_channels=num_feat * 4, g_channels=num_feat * 4)
self.attn_2 = add_attn(x_channels=num_feat * 2, g_channels=num_feat * 4)
self.attn_3 = add_attn(x_channels=num_feat, g_channels=num_feat * 4)
# Cat
self.cat_1 = unetCat(dim_in=num_feat * 8, dim_out=num_feat * 4)
self.cat_2 = unetCat(dim_in=num_feat * 4, dim_out=num_feat * 2)
self.cat_3 = unetCat(dim_in=num_feat * 2, dim_out=num_feat)
# upsample
self.conv4 = norm(nn.Conv2d(num_feat * 8, num_feat * 4, 3, 1, 1, bias=False))
self.conv5 = norm(nn.Conv2d(num_feat * 4, num_feat * 2, 3, 1, 1, bias=False))
self.conv6 = norm(nn.Conv2d(num_feat * 2, num_feat, 3, 1, 1, bias=False))
# extra
self.conv7 = norm(nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=False))
self.conv8 = norm(nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=False))
self.conv9 = nn.Conv2d(num_feat, 1, 3, 1, 1)
def forward(self, x):
x0 = F.leaky_relu(self.conv0(x), negative_slope=0.2, inplace=True)
x1 = F.leaky_relu(self.conv1(x0), negative_slope=0.2, inplace=True)
x2 = F.leaky_relu(self.conv2(x1), negative_slope=0.2, inplace=True)
x3 = F.leaky_relu(self.conv3(x2), negative_slope=0.2, inplace=True)
gated = F.leaky_relu(self.gating(x3), negative_slope=0.2, inplace=True)
# Attention
attn1, ly1 = self.attn_1(x2, gated)
attn2, ly2 = self.attn_2(x1, gated)
attn3, ly3 = self.attn_3(x0, gated)
return (ly1, ly2, ly3)
class multiscale(nn.Module):
def __init__(self, num_in_ch, num_feat=64, num_D=2):
super(multiscale, self).__init__()
self.num_D = num_D
for i in range(num_D):
netD = UNetDiscriminatorSN(num_in_ch, num_feat=num_feat)
setattr(self, 'layer' + str(i), netD)
self.downsample = nn.AvgPool2d(4, stride=2, padding=[1, 1])
def singleD_forward(self, model, input):
return model(input)
def forward(self, input):
num_D = self.num_D
result = []
input_downsampled = input
for i in range(num_D):
model = getattr(self, 'layer' + str(num_D - 1 - i))
result.append(self.singleD_forward(model, input_downsampled))
if i != (num_D - 1):
input_downsampled = self.downsample(input_downsampled)
return result
if __name__ == "__main__":
from torchsummary import summary
from PIL import Image
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--img_path', default=r"..\inputs\1.png", help='image path')
parser.add_argument('--model_path', default=r"..\experiments\pretrained_models\multi", help='multi model list path')
parser.add_argument('--save_path', default=r".\Visual", help='path to save the heat map')
parser.add_argument('--Disc_num', default=2, help='path to save the heat map')
args = parser.parse_args()
dNum = args.Disc_num
uNet = multiscale(3, num_feat=64, num_D=dNum)
import numpy as np
imgpath = args.img_path
modelpath = args.model_path
save_dir = args.save_path
import cv2
import torchvision.transforms as transforms
img = cv2.imread(imgpath)
import os
import shutil
if not os.path.exists(save_dir):
os.mkdir(save_dir)
else:
shutil.rmtree(save_dir)
os.mkdir(save_dir)
for i in range(5000, 200000, 5000):
path = modelpath + r"\net_d_" + str(i) + ".pth"
l = torch.load(path)
p = uNet.state_dict()
uNet.load_state_dict(l["params"], strict=True)
input = transforms.ToTensor()(img)
input = input.unsqueeze(0)
AList = uNet(input)
DiscNum = 1
for AttentionLayer1, AttentionLayer2, AttentionLayer3 in AList:
A1 = AttentionLayer1.detach().numpy()
A1 = np.squeeze(A1)
A1 = A1 * 255
A1 = cv2.applyColorMap(np.uint8(A1), cv2.COLORMAP_JET)
save_path = save_dir + "\A1_D" + str(DiscNum) + "_" + str(i) + ".png"
cv2.imwrite(save_path, A1)
A2 = AttentionLayer2.detach().numpy()
A2 = np.squeeze(A2)
A2 = A2 * 255
A2 = cv2.applyColorMap(np.uint8(A2), cv2.COLORMAP_JET)
save_path = save_dir + "\A2_D" + str(DiscNum) + "_" + str(i) + ".png"
cv2.imwrite(save_path, A2)
A3 = AttentionLayer3.detach().numpy()
A3 = np.squeeze(A3)
A3 = A3 * 255
A3 = cv2.applyColorMap(np.uint8(A3), cv2.COLORMAP_JET)
save_path = save_dir + "\A3_D" + str(DiscNum) + "_" + str(i) + ".png"
cv2.imwrite(save_path, A3)
DiscNum += 1
|
154635
|
import uuid
from django.db import models
class TimestampedModel(models.Model):
updated_at = models.DateTimeField(auto_now=True)
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
abstract = True
class KeyModel(TimestampedModel):
key = models.CharField(
max_length=255, unique=True, db_index=True, null=False, blank=True
)
class Meta:
abstract = True
@property
def short_key(self):
return self.key[:8]
def save(self, **kwargs):
if not self.key:
while True:
new_key = str(uuid.uuid4())
try:
self.__class__.objects.get(key=new_key)
continue
except self.__class__.DoesNotExist:
self.key = new_key
break
super().save(**kwargs)
|
154648
|
import rq
from rq import Queue
from rq import Connection
from redis import Redis
class Worker(object):
def __init__(self, job_queue_name_list):
self.queues = job_queue_name_list
def run(self, redis):
redis_connection = Redis(redis[0], redis[1], password=redis[2])
with Connection(redis_connection):
qs = map(Queue, self.queues) or [Queue()]
w = rq.Worker(qs)
w.work()
print('Items in queue \'{0}\': {1}'.format(self.queues[0], len(qs)))
@classmethod
def from_worker(cls, job_queue_name_list):
return cls(job_queue_name_list)
|
154678
|
from .saver import CheckpointSaver
from .data_loader import CheckpointDataLoader, CheckpointSampler
from .data_loader import RandomSampler, SequentialSampler
from .base_trainer import BaseTrainer
from .base_options import BaseOptions
|
154682
|
import json
import logging
import math
import os
import pathlib
import subprocess
import numpy as np
import shapely.geometry
import shapely.affinity
import venn7.bezier
ROOT = pathlib.Path(os.path.realpath(__file__)).parent
class VennDiagram:
"""A simple symmetric monotone Venn diagram. The diagram is encoded discretely
using a set of "row swaps." Creation of path data is performed on the fly.
See README for more info.
Parameters
----------
n : int
The order of the Venn diagram. Must be prime.
matrix_encoding_string : str
A string containing whitespace-separated rows of the "matrix encoding."
See README for example.
"""
def __init__(self, n, matrix_encoding_string, name=None, renderer_args=None):
self.name = name
self.n = n
self.row_swaps = self.parse_matrix_encoding_string(
matrix_encoding_string
)
self.flattened_row_swaps = [y for x in self.row_swaps for y in x]
self.renderer_args = renderer_args
if self.renderer_args is None:
self.renderer_args = {}
self.validate_basic()
self.validate_venn()
def parse_matrix_encoding_string(self, matrix_encoding_string):
rows = matrix_encoding_string.strip().splitlines()
matrix = [[int(c) for c in line.strip()] for line in rows]
row_swaps = []
for column in range(len(matrix[0])):
entry = []
for row in range(len(matrix)):
if matrix[row][column] == 1:
entry.append(row + 1)
row_swaps.append(entry)
return row_swaps
def validate_basic(self):
"""Check for basic errors in the matrix flattened_row_swaps."""
n = self.n
expected_length = (2 ** n - 2) // n
if len(self.flattened_row_swaps) != expected_length:
raise ValueError(
f"Wrong length: flattened_row_swaps should be of length {expected_length}"
)
last_x = self.flattened_row_swaps[-1]
for x in self.flattened_row_swaps:
if last_x == x:
raise ValueError(
"Immediate repetitions are not allowed in flattened_row_swaps"
)
last_x = x
for k in range(1, n - 1):
expected = math.comb(n, k) // n
count = 0
for x in self.flattened_row_swaps:
if x == k:
count += 1
if count != expected:
raise ValueError(f"Expected {expected} instances of {k}")
def validate_venn(self):
"""Check that this is in fact a Venn diagram."""
n = self.n
# I am not sure if this validation code is correct, sorry
ranks = [False] * (2 ** n)
ranks[0] = ranks[-1] = True
p = list(range(n))
for swap_row in self.full_flattened_row_swaps():
a = swap_row
b = swap_row - 1
p[a], p[b] = p[b], p[a]
rank = sum([2 ** x for x in p[swap_row:]])
if ranks[rank]:
raise ValueError(f"Duplicate rank {rank}")
ranks[rank] = True
if not all(ranks):
raise ValueError(f"Not all ranks represented")
def full_flattened_row_swaps(self):
"""Return the flattened_row_swaps duplicated n times."""
full_flattened_row_swaps = []
for i in range(self.n):
full_flattened_row_swaps += self.flattened_row_swaps
return full_flattened_row_swaps
def get_spline(self, index=0):
renderer = VennDiagramRenderer(self, **self.renderer_args)
return renderer.get_spline()
def get_polygon(self, index=0):
"""Get the shape of a single curve as a polygon."""
spline = self.get_spline(index)
resolution = 10
points = []
for bezier in spline.beziers:
for i in range(resolution):
points.append(bezier(i / resolution))
return points
def check_regions(self):
"""Approximate this Venn diagram with polygons and use Shapely to check
that the diagram is valid."""
original_curve = shapely.geometry.Polygon(self.get_polygon())
curves = []
for i in range(self.n):
angle = 2 * math.pi * i / self.n
curve = shapely.affinity.rotate(
original_curve, angle, origin=(0, 0), use_radians=True
)
curves.append(curve)
# Region at index 0 is an empty set.
regions = [[]]
for rank in range(1, 2 ** self.n):
curves_included = []
curves_excluded = []
tmp_rank = rank
for i in range(self.n):
if tmp_rank % 2 == 0:
curves_excluded.append(curves[i])
else:
curves_included.append(curves[i])
tmp_rank //= 2
region = curves_included[0]
for curve in curves_included[1:]:
region = region.intersection(curve)
for curve in curves_excluded:
region = region.difference(curve)
assert not region.is_empty
def export_json(self):
result = {
"name": self.name,
"n": self.n,
"curve": self.get_spline().as_svg_path(),
}
process = subprocess.run(
["node", str(ROOT / "venn_boolean.js")],
check=True,
capture_output=True,
input=json.dumps(result),
encoding="utf-8",
)
regions = json.loads(process.stdout)
processed_regions = [""]
for region in regions[1:]:
path = venn7.bezier.BezierPath.from_svg_path(region)
path = path.remove_tiny_segments(threshold=1)
processed_regions.append(path.as_svg_path())
result["regions"] = processed_regions
return result
def plot(self):
import matplotlib.pyplot as plt
import matplotlib.patches
import matplotlib.collections
fig, ax = plt.subplots()
polygons = [
matplotlib.patches.Polygon(self.get_polygon(i)) for i in range(diagram.n)
]
patches = matplotlib.collections.PatchCollection(polygons, alpha=0.2)
ax.add_collection(patches)
plt.xlim(-100, 100)
plt.ylim(-100, 100)
plt.show()
class VennDiagramRenderer:
"""A class that renders discrete Venn diagrams to splines."""
def __init__(
self,
venn_diagram,
inner_radius=30,
spacing=5,
tension_diagonal=1.0,
tension_default=1.0,
extra_outer_spacing=0,
):
self.n = venn_diagram.n
self.row_swaps = venn_diagram.row_swaps
self.inner_radius = inner_radius
self.spacing = spacing
self.tension_diagonal = tension_diagonal
self.tension_default = tension_default
self.extra_outer_spacing = extra_outer_spacing
# Avoid perfectly coincident endpoints, which causes
# issues for Boolean ops.
self.fudge_factor = 1e-4
def _get_radius_of_row(self, row, use_extra_outer_spacing=True):
adjusted_row = row
if use_extra_outer_spacing:
if row <= 1:
adjusted_row -= self.extra_outer_spacing
if row >= self.n - 1:
adjusted_row += self.extra_outer_spacing
result = self.inner_radius + self.spacing * adjusted_row
return result
def _get_curve_points_on_cylinder(self, index):
"""Get the set of control points (not Bezier but Metafont control
points) if the Venn diagram were unraveled on a cylinder. All these
points lie on a grid.
Each point is of the form (x, y, type). x is the circular coordinate
which wraps around from 0 to len(self.row_swaps). y is the other,
non-circular component which ranges from 0 to self.n - 1 inclusive.
type is a string used to tag points with information about the point.
This method generates two:
- intersection_+ means that the curve is going up at this point.
- intersection_- means that the curve is going down at this point.
"""
points = []
row, column = 0, index * len(self.row_swaps)
for i in range(self.n):
for swap_rows in self.row_swaps:
if row + 1 in swap_rows:
points.append((row + 1, column, "intersection_+"))
row += 1
elif row in swap_rows:
points.append((row, column, "intersection_-"))
row -= 1
column += 1
return points
def _add_arc_points(self, points):
"""Given a set of control points on the cylinder, find pairs of points
that are horizontal and insert new arc points to help round out the
curve in that region. It is assumed that all points are intersection type.
"""
squash_factor = len(self.row_swaps)
result = []
for i in range(len(points)):
r1, c1, type_1 = point = points[i]
r2, c2, type_2 = points[(i + 1) % len(points)]
result.append(point)
if r1 == r2:
radius = (c2 - c1) % len(self.n * self.row_swaps) * 0.5
column = c1 + radius
if type_1 == "intersection_+" and type_2 == "intersection_-":
arc_direction = 1
type_ = "arc_+"
elif type_1 == "intersection_-" and type_2 == "intersection_+":
arc_direction = -1
type_ = "arc_-"
else:
raise RuntimeError
vertical_radius = arc_direction * radius * 0.5
ratio = 0.6
#result.append((r1 + vertical_radius, column, type_))
return result
def _get_tensions(self, points):
"""Given a set of control points on the cylinder, determine whether
each pair of points is diagonal or horizontal. If they are diagonal and
both are of "intersection" type, their tension is set to
``tension_diagonal``. Otherwise, their tension is ``tension_default``.
Collect a list of all tensions and return it.
"""
tensions = []
for i in range(len(points)):
r1, c1, type_1 = points[i]
r2, c2, type_2 = points[(i + 1) % len(points)]
if (
type_1.startswith("intersection_") and type_2.startswith("intersection_")
and type_1 == type_2
):
tensions.append(self.tension_diagonal)
else:
tensions.append(self.tension_default)
return tensions
def _convert_cylinder_points_to_polar(self, cylinder_points):
polar_points = []
for row, column, __ in cylinder_points:
radius = self._get_radius_of_row(row)
theta = column * 2 * math.pi / (self.n * len(self.row_swaps))
x = radius * math.cos(theta)
y = radius * math.sin(theta)
polar_points.append((x, y))
return polar_points
def _normalize_rotation_and_scaling(self, spline):
"""Given a spline, rotate and uniformly scale it so that its furthest
point from the origin is transformed to (0, -50)."""
x, y = spline.get_furthest_point_from((0, 0))
angle = np.arctan2(y, x)
scale = np.hypot(x, y)
return spline.transform(
venn7.bezier.get_rotation_matrix(-np.pi * 0.5 - angle) * 50 / scale
)
def _get_angles(self, cylinder_points):
result = []
for row, column, type_ in cylinder_points:
tangent_angle = 2 * np.pi * column / (self.n * len(self.row_swaps)) + np.pi / 2
angle = tangent_angle
dy = self.spacing
dx = self._get_radius_of_row(row) * 2 * np.pi / (self.n * len(self.row_swaps))
tilt_angle = np.arctan2(dy, dx)
if type_ == "intersection_+":
angle -= tilt_angle
elif type_ == "intersection_-":
angle += tilt_angle
angle = angle % (2 * np.pi)
result.append(angle)
return result
def get_spline(self, index=0):
"""Render a single curve of the Venn diagram to a BezierSpline
and return the result.
Parameters
----------
index : int
Which curve to return. For a symmetric Venn diagram, indices
other than 0 are rotations of each other.
"""
cylinder_points = self._get_curve_points_on_cylinder(index)
cylinder_points = self._add_arc_points(cylinder_points)
angles = self._get_angles(cylinder_points)
control_points = self._convert_cylinder_points_to_polar(cylinder_points)
spline = venn7.bezier.AngleSpline(control_points, angles)
spline = self._normalize_rotation_and_scaling(spline)
spline = spline.translate(np.array([self.fudge_factor, 0]))
return spline
DIAGRAMS_LIST = [
"victoria",
"adelaide",
"massey",
"manawatu",
"palmerston_north",
"hamilton",
"5",
]
DIAGRAMS = {
"victoria": VennDiagram(
7,
"""
010000000000
101000001000
010100010101
100010101010
000001010001
000000100000
""",
"Victoria",
),
"adelaide": VennDiagram(
7,
"""
0100000000
1010001000
0101010101
1010101010
0001010001
0000100000
""",
"Adelaide",
),
"massey": VennDiagram(
7,
"""
010000000000
101000000010
010100010101
101010101000
010101000000
001000000000
""",
"Massey",
),
"manawatu": VennDiagram(
7,
"""
00001000000000
10000000100100
01010001010001
00101010001010
00000100100100
01000000000000
""",
"Manawatu",
renderer_args={
"extra_outer_spacing": 2
},
),
"palmerston_north": VennDiagram(
7,
"""
10000000000000
00100000001010
01010100010100
10001010100010
00000001000101
00000000010000
""",
"<NAME>",
renderer_args={
"extra_outer_spacing": 1
},
),
"hamilton": VennDiagram(
7,
"""
0010000000
1000100010
0101010101
1010101010
0101000100
0000000001
""",
"Hamilton",
renderer_args={
"extra_outer_spacing": 1
},
),
"5": VennDiagram(
5,
"""
1000
0101
1010
0001
""",
"Symmetric 5-Venn diagram",
renderer_args={
"inner_radius": 10,
"spacing": 8,
"tension_diagonal": 1,
"tension_default": 1,
},
),
}
if __name__ == "__main__":
import json
import sys
diagrams_json = {}
diagrams_json["diagrams_list"] = DIAGRAMS_LIST
for name, diagram in DIAGRAMS.items():
diagrams_json[name] = diagram.export_json()
with open(sys.argv[1], "w") as f:
f.write("const venn_diagrams = ")
json.dump(diagrams_json, f)
f.write(";")
|
154692
|
import pytest
import util.cli
import core.config
import modules.contrib.amixer
@pytest.fixture
def module_mock(request):
def _module_mock(config = []):
return modules.contrib.amixer.Module(
config=core.config.Config(config),
theme=None
)
yield _module_mock
@pytest.fixture
def amixer_mock():
def _mock(device='Master', volume='10%', state='on'):
return """
Simple mixer control '{device}',0
Capabilities: pvolume pswitch pswitch-joined
Playback channels: Front Left - Front Right
Limits: Playback 0 - 65536
Mono:
Front Left: Playback 55705 [{volume}%] [{state}]
Front Right: Playback 55705 [{volume}%] [{state}]
""".format(
device=device,
volume=volume,
state=state
)
return _mock
def test_load_module():
__import__("modules.contrib.amixer")
def test_initial_full_text(module_mock, amixer_mock, mocker):
module = module_mock()
assert module.widget().full_text() == 'n/a'
def test_input_registration(mocker):
input_register = mocker.patch('core.input.register')
module = modules.contrib.amixer.Module(
config=core.config.Config([]),
theme=None
)
input_register.assert_any_call(
module,
button=core.input.WHEEL_DOWN,
cmd=module.decrease_volume
)
input_register.assert_any_call(
module,
button=core.input.WHEEL_UP,
cmd=module.increase_volume
)
input_register.assert_any_call(
module,
button=core.input.LEFT_MOUSE,
cmd=module.toggle
)
def test_volume_update(module_mock, amixer_mock, mocker):
mocker.patch(
'util.cli.execute',
return_value=amixer_mock(volume='25%', state='on')
)
module = module_mock()
widget = module.widget()
module.update()
assert widget.full_text() == '25%'
assert module.state(widget) == ['unmuted']
def test_muted_update(module_mock, amixer_mock, mocker):
mocker.patch(
'util.cli.execute',
return_value=amixer_mock(volume='50%', state='off')
)
module = module_mock()
widget = module.widget()
module.update()
assert widget.full_text() == '50%'
assert module.state(widget) == ['warning', 'muted']
def test_exception_update(module_mock, mocker):
mocker.patch(
'util.cli.execute',
side_effect=Exception
)
module = module_mock()
widget = module.widget()
module.update()
assert widget.full_text() == 'n/a'
def test_unavailable_amixer(module_mock, mocker):
mocker.patch('util.cli.execute', return_value='Invalid')
module = module_mock()
widget = module.widget()
module.update()
assert widget.full_text() == '0%'
def test_toggle(module_mock, mocker):
command = mocker.patch('util.cli.execute')
module = module_mock()
module.toggle(False)
command.assert_called_once_with('amixer -q set Master,0 toggle')
def test_default_volume(module_mock, mocker):
module = module_mock()
command = mocker.patch('util.cli.execute')
module.increase_volume(False)
command.assert_called_once_with('amixer -q set Master,0 4%+')
command = mocker.patch('util.cli.execute')
module.decrease_volume(False)
command.assert_called_once_with('amixer -q set Master,0 4%-')
def test_custom_volume(module_mock, mocker):
module = module_mock(['-p', 'amixer.percent_change=25'])
command = mocker.patch('util.cli.execute')
module.increase_volume(False)
command.assert_called_once_with('amixer -q set Master,0 25%+')
command = mocker.patch('util.cli.execute')
module.decrease_volume(False)
command.assert_called_once_with('amixer -q set Master,0 25%-')
def test_custom_device(module_mock, mocker):
mocker.patch('util.cli.execute')
module = module_mock(['-p', 'amixer.device=CustomMaster'])
command = mocker.patch('util.cli.execute')
module.toggle(False)
command.assert_called_once_with('amixer -q set CustomMaster toggle')
command = mocker.patch('util.cli.execute')
module.increase_volume(False)
command.assert_called_once_with('amixer -q set CustomMaster 4%+')
command = mocker.patch('util.cli.execute')
module.decrease_volume(False)
command.assert_called_once_with('amixer -q set CustomMaster 4%-')
|
154721
|
from unittest import TestCase
from camunda.variables.variables import Variables
class VariablesTest(TestCase):
def test_get_variable_returns_none_when_variable_absent(self):
variables = Variables({})
self.assertIsNone(variables.get_variable("var1"))
def test_get_variable_returns_value_when_variable_present(self):
variables = Variables({"var1": {"value": 1}})
self.assertEqual(1, variables.get_variable("var1"))
def test_get_variable_returns_with_meta(self):
var1_raw = {"value": 1}
variables = Variables({"var1": var1_raw})
self.assertEqual(var1_raw, variables.get_variable("var1", True))
def test_get_variable_returns_without_meta(self):
var1_raw = {"value": 1}
variables = Variables({"var1": var1_raw})
self.assertEqual(1, variables.get_variable("var1", False))
def test_format_returns_empty_dict_when_none_is_passed(self):
variables = None
self.assertDictEqual({}, Variables.format(variables))
def test_format_returns_empty_dict_when_variables_absent(self):
variables = {}
self.assertDictEqual({}, Variables.format(variables))
def test_format_returns_formatted_variables_when_variables_present(self):
variables = {"var1": 1, "var2": True, "var3": "string"}
formatted_vars = Variables.format(variables)
self.assertDictEqual({"var1": {"value": 1},
"var2": {"value": True},
"var3": {"value": "string"}}, formatted_vars)
def test_format_returns_formatted_variables_keeps_already_formatted(self):
variables = {"var1": 1, "var2": True, "var3": "string", "var4": {"value": 1}}
formatted_vars = Variables.format(variables)
self.assertDictEqual({"var1": {"value": 1},
"var2": {"value": True},
"var3": {"value": "string"},
"var4": {"value": 1}}, formatted_vars)
def test_to_dict_returns_variables_as_dict(self):
variables = Variables({"var1": {"value": 1},
"var2": {"value": True},
"var3": {"value": "string"}})
self.assertDictEqual({"var1": 1, "var2": True, "var3": "string"}, variables.to_dict())
|
154724
|
import hashlib
from assemblyline.common import entropy
from assemblyline.common.charset import safe_str
DEFAULT_BLOCKSIZE = 65536
# noinspection PyBroadException
def get_digests_for_file(path, blocksize=DEFAULT_BLOCKSIZE,
calculate_entropy=True,
on_first_block=lambda b, l: {}):
""" Generate digests for file reading only 'blocksize bytes at a time."""
bc = None
if calculate_entropy:
try:
bc = entropy.BufferedCalculator()
except: # pylint: disable=W0702
calculate_entropy = False
result = {'path': safe_str(path)}
md5 = hashlib.md5()
sha1 = hashlib.sha1()
sha256 = hashlib.sha256()
size = 0
with open(path, 'rb') as f:
data = f.read(blocksize)
length = len(data)
if not size:
result.update(on_first_block(data, length))
while length > 0:
if calculate_entropy:
bc.update(data, length)
md5.update(data)
sha1.update(data)
sha256.update(data)
size += length
data = f.read(blocksize)
length = len(data)
if calculate_entropy:
result['entropy'] = bc.entropy()
else:
result['entropy'] = 0
result['md5'] = md5.hexdigest()
result['sha1'] = sha1.hexdigest()
result['sha256'] = sha256.hexdigest()
result['size'] = size
return result
def get_md5_for_file(path, blocksize=DEFAULT_BLOCKSIZE):
md5 = hashlib.md5()
with open(path, 'rb') as f:
data = f.read(blocksize)
length = len(data)
while length > 0:
md5.update(data)
data = f.read(blocksize)
length = len(data)
return md5.hexdigest()
def get_sha256_for_file(path, blocksize=DEFAULT_BLOCKSIZE):
sha256 = hashlib.sha256()
with open(path, 'rb') as f:
data = f.read(blocksize)
length = len(data)
while length > 0:
sha256.update(data)
data = f.read(blocksize)
length = len(data)
return sha256.hexdigest()
|
154734
|
import py
from pypy.rpython.lltypesystem import lltype
from pypy.jit.timeshifter import rvalue
from pypy.jit.timeshifter import rcontainer
from pypy.jit.timeshifter.test.support import *
def test_create_int_redbox_var():
jitstate = FakeJITState()
gv = FakeGenVar()
box = rvalue.IntRedBox("dummy kind", gv)
assert not box.is_constant()
assert box.getgenvar(jitstate) is gv
gv2 = FakeGenVar()
box.setgenvar(gv2) # doesn't raises
assert box.getgenvar(jitstate) is gv2
def test_create_int_redbox_const():
jitstate = FakeJITState()
gv = FakeGenConst()
box = rvalue.IntRedBox("dummy kind", gv)
assert box.is_constant()
assert box.getgenvar(jitstate) is gv
gv2 = FakeGenVar()
py.test.raises(AssertionError, box.setgenvar, gv2)
def test_forcevar():
jitstate = FakeJITState()
gv = FakeGenVar()
intbox = rvalue.IntRedBox("dummy kind", gv)
assert intbox.forcevar(jitstate, rvalue.copy_memo(), False) is intbox
doublebox = rvalue.DoubleRedBox("dummy kind", FakeGenConst())
box2 = doublebox.forcevar(jitstate, rvalue.copy_memo(), False)
assert doublebox is not box2
assert not box2.is_constant()
assert doublebox.genvar is not box2.genvar
def test_learn_nonzeroness():
jitstate = FakeJITState()
gv = FakeGenVar()
box = rvalue.PtrRedBox("dummy pointer", gv)
assert not box.known_nonzero
assert box.learn_nonzeroness(jitstate, True)
assert box.known_nonzero
assert not box.learn_nonzeroness(jitstate, False)
assert box.learn_nonzeroness(jitstate, True)
box = rvalue.PtrRedBox("dummy pointer", gv)
assert box.learn_nonzeroness(jitstate, False)
assert box.is_constant()
assert box.genvar._value == "NULL"
assert box.learn_nonzeroness(jitstate, False)
assert not box.learn_nonzeroness(jitstate, True)
def test_box_get_set_field():
jitstate = FakeJITState()
V0 = FakeGenVar()
box = rvalue.PtrRedBox("dummy pointer", V0)
STRUCT = lltype.Struct("dummy", ("foo", lltype.Signed))
desc = rcontainer.StructFieldDesc(FakeHRTyper(), lltype.Ptr(STRUCT), "foo", 0)
box2 = box.op_getfield(jitstate, desc)
V1 = box2.genvar
assert box.known_nonzero
assert jitstate.curbuilder.ops == [('getfield', (('field', STRUCT, 'foo'), V0), V1)]
jitstate.curbuilder.ops = []
V42 = FakeGenVar(42)
valuebox = rvalue.IntRedBox("dummy kind", V42)
box.op_setfield(jitstate, desc, valuebox)
assert jitstate.curbuilder.ops == [('setfield', (('field', STRUCT, 'foo'), V0, V42), None)]
|
154745
|
from lxml import etree
import lxml
import pickle
with open('/Users/billchen/OneDrive/Workspace/LearningRepo/Python3/专利检索爬虫/20050101_20101231_B09B_PAGE1.pickle', 'rb') as f:
source = ''
p = pickle.load(f)
html = etree.HTML(p)
|
154757
|
import os
import torch
import create_data
from model import shape_net
import numpy as np
def align_bone_len(opt_, pre_):
opt = opt_.copy()
pre = pre_.copy()
opt_align = opt.copy()
for i in range(opt.shape[0]):
ratio = pre[i][6] / opt[i][6]
opt_align[i] = ratio * opt_align[i]
err = np.abs(opt_align - pre).mean(0)
return err
def fun(_shape, _label, data_loader):
# 计算相对骨骼长度
shape = _shape.clone().detach()
label = _label.detach().clone()
# 根据shape计算相对骨骼长度
X = data_loader.new_cal_ref_bone(shape)
err = align_bone_len(X.cpu().numpy(), label.cpu().numpy())
return err.sum()
checkpoint = 'checkpoints'
model = shape_net.ShapeNet()
shape_net.load_checkpoint(
model, os.path.join(checkpoint, 'ckp_siknet_synth_41.pth.tar')
)
for params in model.parameters():
params.requires_grad = False
data_set = ['rhd', 'stb', 'do', 'eo']
temp_data = create_data.DataSet(_mano_root='mano/models')
for data in data_set:
print('*' * 20)
print('加载' + data + '数据集')
print('*' * 20)
# 加载预测
pre_path = os.path.join('out_testset/', data + '_pre_joints.npy')
temp = np.load(pre_path)
temp = torch.Tensor(temp)
_x = temp_data.cal_ref_bone(temp)
# 模型回归shape
Y = model(_x)
Y = Y['beta']
np.save('out_testset/' + data + '_dl.npy', Y.clone().detach().cpu().numpy())
dl_err = fun(Y, _x, temp_data)
print('回归误差:{}'.format(dl_err))
|
154787
|
import info
class subinfo(info.infoclass):
def setTargets(self):
self.versionInfo.setDefaultValues()
self.description = "GUI to profilers such as Valgrind"
self.defaultTarget = 'master'
def setDependencies(self):
self.runtimeDependencies["libs/qt5/qtbase"] = None
self.runtimeDependencies["kde/frameworks/tier1/karchive"] = None
self.runtimeDependencies["kde/frameworks/tier1/kcoreaddons"] = None
self.runtimeDependencies["kde/frameworks/tier2/kdoctools"] = None
self.runtimeDependencies["kde/frameworks/tier1/kwidgetsaddons"] = None
self.runtimeDependencies["kde/frameworks/tier3/kxmlgui"] = None
self.runtimeDependencies["kde/frameworks/tier4/kdelibs4support"] = None
from Package.CMakePackageBase import *
class Package(CMakePackageBase):
def __init__(self):
CMakePackageBase.__init__(self)
|
154841
|
from __future__ import absolute_import
from __future__ import print_function
from loqui.client import LoquiClient
client = LoquiClient(('localhost', 4001))
print(len(client.send_request('hello world')))
|
154864
|
from PyQt5.QtWidgets import *
from PyQt5.QtCore import Qt
from PyQt5 import QtCore, QtGui
from .views import *
from .models import State, StateListener, KeyboardNotifier
from .styles import Theme
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle("UltimateLabeler")
self.central_widget = CentralWidget()
self.central_widget.setFocusPolicy(Qt.StrongFocus)
self.setFocusProxy(self.central_widget)
self.central_widget.setFocus(True)
self.statusBar()
mainMenu = self.menuBar()
fileMenu = mainMenu.addMenu('&File')
helpMenu = mainMenu.addMenu('&Help')
close = QAction('Close window', self)
close.setShortcut('Ctrl+W')
close.triggered.connect(self.close)
fileMenu.addAction(close)
import_action = QAction('Import', self)
import_action.setShortcut('Ctrl+I')
import_action.triggered.connect(self.central_widget.io.on_import_click)
fileMenu.addAction(import_action)
export = QAction('Export', self)
export.setShortcut('Ctrl+E')
export.triggered.connect(self.central_widget.io.on_export_click)
fileMenu.addAction(export)
"""save = QAction('Save', self)
save.setShortcut('Ctrl+S')
save.triggered.connect()
fileMenu.addAction(save)"""
help = QAction('Documentation', self)
help.triggered.connect(self.open_url)
helpMenu.addAction(help)
self.setCentralWidget(self.central_widget)
self.show()
self.center()
def open_url(self):
url = QtCore.QUrl('https://github.com/alexandre01/UltimateLabeling')
if not QtGui.QDesktopServices.openUrl(url):
QtGui.QMessageBox.warning(self, 'Open Url', 'Could not open url')
def center(self):
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def closeEvent(self, event):
print("exiting")
self.central_widget.ssh_login.closeServers()
self.central_widget.state.track_info.save_to_disk()
self.central_widget.state.save_state()
class CentralWidget(QWidget, StateListener):
def __init__(self):
super().__init__()
self.state = State()
self.state.load_state()
self.state.add_listener(self)
self.keyboard_notifier = KeyboardNotifier()
self.video_list_widget = VideoListWidget(self.state)
self.img_widget = ImageWidget(self.state)
self.slider = VideoSlider(self.state, self.keyboard_notifier)
self.player = PlayerWidget(self.state)
self.theme_picker = ThemePicker(self.state)
self.options = Options(self.state)
self.ssh_login = SSHLogin(self.state)
self.detection_manager = DetectionManager(self.state, self.ssh_login)
self.tracking_manager = TrackingManager(self.state)
self.hungarian_button = HungarianManager(self.state)
self.info_detection = InfoDetection(self.state)
self.io = IO(self, self.state)
self.keyPressEvent = self.keyboard_notifier.keyPressEvent
self.keyReleaseEvent = self.keyboard_notifier.keyReleaseEvent
self.keyboard_notifier.add_listeners(self.player, self.slider, self.img_widget, self.info_detection,
self.tracking_manager)
# Avoid keyboard not being triggered when focus on some widgets
self.video_list_widget.setFocusPolicy(Qt.NoFocus)
self.slider.setFocusPolicy(Qt.NoFocus)
self.setFocusPolicy(Qt.StrongFocus)
# Image widget thread signal, update function should always be called from main thread
self.img_widget.signal.connect(self.img_widget.update)
self.state.img_viewer = self.img_widget
self.make_layout()
self.on_theme_change()
def make_layout(self):
main_layout = QHBoxLayout()
navbar_box = QGroupBox("Videos")
navbar_layout = QVBoxLayout()
navbar_layout.addWidget(self.video_list_widget)
navbar_box.setLayout(navbar_layout)
main_layout.addWidget(navbar_box)
image_box = QGroupBox("Image")
image_layout = QVBoxLayout()
image_layout.addWidget(self.img_widget)
image_layout.addWidget(self.slider)
image_box.setLayout(image_layout)
main_layout.addWidget(image_box)
control_box = QGroupBox("Control")
control_layout = QVBoxLayout()
control_layout.addWidget(self.player)
control_layout.addWidget(self.ssh_login)
control_layout.addWidget(self.theme_picker)
control_layout.addWidget(self.options)
control_layout.addWidget(self.detection_manager)
control_layout.addWidget(self.hungarian_button)
control_layout.addWidget(self.tracking_manager)
control_layout.addWidget(self.info_detection)
control_layout.addStretch()
control_box.setLayout(control_layout)
main_layout.addWidget(control_box)
self.setLayout(main_layout)
def on_theme_change(self):
app.setStyle("Fusion")
app.setPalette(Theme.get_palette(self.state.theme))
if __name__ == '__main__':
app = QApplication([])
main_window = MainWindow()
app.exec()
|
154874
|
from ..mapper import PropertyMapper, ApiInterfaceBase
from ..mapper.types import Timestamp, AnyType
__all__ = ['RewriteRule', 'RewriteRuleInterface']
class RewriteRuleInterface(ApiInterfaceBase):
matcher: str
replacer: str
class RewriteRule(PropertyMapper, RewriteRuleInterface):
pass
|
154883
|
import os
import sys
import inspect
from unittest import TestCase
from chatterbot import corpus
from chatterbot import languages
from chatterbot.constants import STATEMENT_TEXT_MAX_LENGTH
from chatterbot_corpus.corpus import DATA_DIRECTORY
class CorpusUtilsTestCase(TestCase):
"""
This test case is designed to make sure that all
corpus data adheres to a few general rules.
"""
def test_character_count(self):
"""
Test that no line in the corpus exceeds the maximum number of characters.
"""
files = corpus.list_corpus_files('chatterbot_corpus')
for dialog_corpus, _categories, _file_path in corpus.load_corpus(*files):
for conversation in dialog_corpus:
for text in conversation:
if len(text) > STATEMENT_TEXT_MAX_LENGTH:
self.fail(
'"{}" cannot be longer than {} characters'.format(
text,
STATEMENT_TEXT_MAX_LENGTH
)
)
def test_conversation_format(self):
files = corpus.list_corpus_files('chatterbot_corpus')
for dialog_corpus, _categories, _file_path in corpus.load_corpus(*files):
for conversation in dialog_corpus:
for text in conversation:
if not isinstance(text, str):
self.fail('"{}" must be a string, not {}.'.format(
str(text),
type(text)
))
def test_language_names(self):
"""
Each language directroy should adhere to the same nameing convention.
"""
valid_language_names = []
language_classes = inspect.getmembers(sys.modules[languages.__name__])
for _name, obj in language_classes:
if inspect.isclass(obj):
valid_language_names.append(obj.ENGLISH_NAME.lower())
for directory_name in os.listdir(DATA_DIRECTORY):
self.assertIn(directory_name, valid_language_names)
|
154899
|
from django import forms
from .models import Member, get_config
from .util import validate_country
class MemberForm(forms.ModelForm):
class Meta:
model = Member
fields = ('fullname', 'country', 'listed')
def clean_country(self):
if self.instance.country_exception:
# No country checking for this member
return self.cleaned_data['country']
validate_country(get_config().country_validator, self.cleaned_data['country'])
return self.cleaned_data['country']
class ProxyVoterForm(forms.Form):
name = forms.CharField(min_length=5, max_length=100, help_text="Name of proxy voter. Leave empty to cancel proxy voting.", required=False)
|
154928
|
from datetime import datetime
import os
from pathlib import Path
import warnings
import pickle
from typing import List, Dict, Any
from thunderbolt.client.local_cache import LocalCache
from tqdm import tqdm
class LocalDirectoryClient:
def __init__(self, workspace_directory: str = '', task_filters: List[str] = [], tqdm_disable: bool = False, use_cache: bool = True):
self.workspace_directory = os.path.abspath(workspace_directory)
self.task_filters = task_filters
self.tqdm_disable = tqdm_disable
self.local_cache = LocalCache(workspace_directory, use_cache)
self.use_cache = use_cache
def get_tasks(self) -> List[Dict[str, Any]]:
"""Load all task_log from workspace_directory."""
files = {str(path) for path in Path(os.path.join(self.workspace_directory, 'log/task_log')).rglob('*')}
tasks_list = list()
for x in tqdm(files, disable=self.tqdm_disable):
n = x.split('/')[-1]
if self.task_filters and not [x for x in self.task_filters if x in n]:
continue
n = n.split('_')
if self.use_cache:
cache = self.local_cache.get(x)
if cache:
tasks_list.append(cache)
continue
try:
modified = datetime.fromtimestamp(os.stat(x).st_mtime)
with open(x, 'rb') as f:
task_log = pickle.load(f)
with open(x.replace('task_log', 'task_params'), 'rb') as f:
task_params = pickle.load(f)
params = {
'task_name': '_'.join(n[:-1]),
'task_params': task_params,
'task_log': task_log,
'last_modified': modified,
'task_hash': n[-1].split('.')[0],
}
tasks_list.append(params)
if self.use_cache:
self.local_cache.dump(x, params)
except Exception:
continue
if len(tasks_list) != len(files):
warnings.warn(f'[NOT FOUND LOGS] target file: {len(files)}, found log file: {len(tasks_list)}')
return tasks_list
def to_absolute_path(self, x: str) -> str:
"""get file path"""
x = x.lstrip('.').lstrip('/')
if self.workspace_directory.rstrip('/').split('/')[-1] == x.split('/')[0]:
x = '/'.join(x.split('/')[1:])
x = os.path.join(self.workspace_directory, x)
return os.path.abspath(x)
|
154955
|
import os
if 'DJANGO_SETTINGS_MODULE' not in os.environ:
os.environ['DJANGO_SETTINGS_MODULE'] = 'tests.settings'
from drf_typescript_generator.utils import (
_get_method_return_value_type, _get_typescript_name, _get_typescript_type, export_serializer,
get_serializer_fields
)
from drf_typescript_generator.globals import DEFAULT_TYPE
from tests.utils import (
ChoiceFieldTestSerializer, ListFieldTestSerializer, MethodOutputTestSerializer,
ModelTestSerializer, NestedSerializersTestSerializer, TypescriptNameTestSerializer
)
class BaseTest:
def setup_method(self, test_method):
self.serializer = self.serializer_class()
self.fields = self.serializer.get_fields()
class TestRequired(BaseTest):
serializer_class = TypescriptNameTestSerializer
def test_typescript_required_name(self):
ts_name = _get_typescript_name(self.fields['required_field'], 'required_field')
assert ts_name == 'requiredField'
def test_typescript_not_required_name(self):
ts_name = _get_typescript_name(self.fields['not_required_field'], 'not_required_field')
assert ts_name == 'notRequiredField?'
class TestMethodField(BaseTest):
serializer_class = MethodOutputTestSerializer
def test_method_unknown_return_type(self):
ts_type, _ = _get_method_return_value_type(
self.fields['unknown_output_type'], 'unknown_output_type', self.serializer
)
assert ts_type == DEFAULT_TYPE
ts_type, _ = _get_method_return_value_type(
self.fields['known_output_type'], 'known_output_type', self.serializer
)
assert ts_type == "number"
def test_method_known_return_type(self):
ts_type, _ = _get_method_return_value_type(
self.fields['known_output_type'], 'known_output_type', self.serializer
)
assert ts_type == "number"
class TestChoiceField(BaseTest):
serializer_class = ChoiceFieldTestSerializer
def test_basic_choice_selection_fields(self):
ts_type = _get_typescript_type(self.fields['choice_field_int'], 'choice_field_int', self.serializer)
assert ts_type == '1 | 2 | 3'
ts_type = _get_typescript_type(self.fields['choice_field_float'], 'choice_field_float', self.serializer)
assert ts_type == '1.2 | 3.1'
ts_type = _get_typescript_type(self.fields['choice_field_bool'], 'choice_field_bool', self.serializer)
assert ts_type == 'true | false'
ts_type = _get_typescript_type(self.fields['choice_field_str'], 'choice_field_str', self.serializer)
assert ts_type == '"a" | "b"'
def test_choice_selection_fields_with_empty_values(self):
ts_type = _get_typescript_type(self.fields['choice_field_str_blank'], 'choice_field_str_blank', self.serializer)
assert ts_type == '"a" | "b" | ""'
ts_type = _get_typescript_type(self.fields['choice_field_int_null'], 'choice_field_int_null', self.serializer)
assert ts_type == '1 | 2 | null'
class TestListField(BaseTest):
serializer_class = ListFieldTestSerializer
def test_basic_list_field_type(self):
ts_type = _get_typescript_type(self.fields['lst'], 'lst', self.serializer)
assert ts_type == 'number[]'
def test_composite_list_field_type(self):
ts_type = _get_typescript_type(self.fields['composite_lst'], 'composite_lst', self.serializer)
assert ts_type == '(number | null)[]'
def test_multiple_choice_field(self):
ts_type = _get_typescript_type(self.fields['multichoice'], 'multichoice', self.serializer)
assert ts_type == '(1 | 2 | 3)[]'
def test_model_serializer():
fields = get_serializer_fields(ModelTestSerializer)
options = {
'format': 'type',
'semicolons': False,
'tabs': None,
'spaces': 2
}
ts_serializer = export_serializer('ModelTestSerializer', fields, options)
assert ' '.join(ts_serializer.split()).strip() == ' '.join(
"""
export type ModelTestSerializer = {
field1?: string
field2: number
field3: number
}
""".split()
).strip()
class TestNestedSerializers(BaseTest):
serializer_class = NestedSerializersTestSerializer
def test_single_object_nested_serializer(self):
ts_type = _get_typescript_type(self.fields['model'], 'model', self.serializer)
assert ts_type == 'ModelTestSerializer'
def test_many_objects_nested_serializer(self):
ts_type = _get_typescript_type(self.fields['models'], 'models', self.serializer)
assert ts_type == 'ModelTestSerializer[]'
def test_nullable_many_objects_nested_serializer(self):
ts_type = _get_typescript_type(self.fields['models_nullable'], 'models_nullable', self.serializer)
assert ts_type == '(ModelTestSerializer | null)[]'
|
155015
|
from scripts.simulation.SimulationWorld import SimulationWorld
from scripts.Robot.Robot import Robot
import numpy as np
import scripts.utils.yaml_paser as yaml
from scripts.utils.utils import Utils as utils
import logging
import os
from collections import OrderedDict
from scripts.DB.Mongo_driver import MongoDriver
import time
class TrajectoryOptimizationPlanner:
# initializing robot model, simulation environment and SQP solver
def __init__(self, **kwargs):
self.robot_config = None
self.default_config = None
self.config = None
self.sqp_yaml = None
self.sqp_config = None
self.robot_default_config_params = None
self.elapsed_time = 0
main_logger_name = utils.get_var_from_kwargs("logger_name", optional=True,
default="Trajectory_Optimization_Planner.", **kwargs)
verbose = utils.get_var_from_kwargs("verbose", optional=True, default=False, **kwargs)
log_file = utils.get_var_from_kwargs("log_file", optional=True, default=False, **kwargs)
robot_config = utils.get_var_from_kwargs("robot_config", optional=True, **kwargs)
self.load_configs(robot_config)
self.save_problem = utils.get_var_from_kwargs("save_problem", optional=True, **kwargs)
if self.save_problem is not None:
db_name = utils.get_var_from_kwargs("db_name", optional=True, default="Trajectory_planner_results", **kwargs)
self.db_driver = MongoDriver(db_name)
else:
self.db_driver = None
self.save_problem = None
self.if_plot_traj = utils.get_var_from_kwargs("plot_trajectory", optional=True, default=False, **kwargs)
self.robot = Robot(main_logger_name, verbose, log_file)
self.world = SimulationWorld(**kwargs)
self.logger = logging.getLogger(main_logger_name)
utils.setup_logger(self.logger, main_logger_name, verbose, log_file)
self.world.toggle_rendering(0)
self.load_robot_from_config()
self.world.toggle_rendering(1)
# to load default configuration from a file
def load_configs(self, config_file=None):
file_path_prefix = os.path.join(os.path.dirname(__file__), '../../config/')
self.default_config = yaml.ConfigParser(file_path_prefix + 'default_config.yaml')
self.config = self.default_config.get_by_key("config")
sqp_config_file = file_path_prefix + self.config["solver"]
self.sqp_yaml = yaml.ConfigParser(sqp_config_file)
self.sqp_config = self.sqp_yaml.get_by_key("sqp")
if config_file is not None:
robot_config_file = file_path_prefix + config_file
else:
robot_config_file = file_path_prefix + self.config["robot"]["config"]
self.robot_default_config_params = self.config["robot"]["default_paramaters"]
robot_yaml = yaml.ConfigParser(robot_config_file)
self.robot_config = robot_yaml.get_by_key("robot")
# to load a robot model from urdf file
def load_robot(self, urdf_file, position=[0, 0, 0], orientation=[0, 0, 0, 1], use_fixed_base=True):
self.robot.id = self.world.load_robot(urdf_file, position, orientation, use_fixed_base)
self.robot.load_robot_model(urdf_file)
return self.robot.id
# to load a robot configuration from srdf file
def load_robot_srdf(self, srdf_file):
self.robot.load_srdf(srdf_file)
self.world.ignored_collisions = self.robot.get_ignored_collsion()
# load robot from urdf file specified in config file
def load_robot_from_config(self):
urdf_file = utils.get_var_from_kwargs("urdf", optional=True, **self.robot_config)
srdf_file = utils.get_var_from_kwargs("srdf", optional=True, **self.robot_config)
if urdf_file is not None:
pos = self.robot_config["position"] if "position" in self.robot_config else [0, 0, 0]
orn = self.robot_config["orientation"] if "orientation" in self.robot_config else [0, 0, 0]
self.load_robot(urdf_file, position=pos, orientation=orn)
if srdf_file is not None:
self.load_robot_srdf(srdf_file)
# load an objects into simulation environment from urdf file
def load_from_urdf(self, name, urdf_file, position, orientation=None, use_fixed_base=False):
urdf_id = self.world.load_urdf(name, urdf_file, position, orientation, use_fixed_base)
return urdf_id
# load a collision constraints into simulation environment from urdf file
def add_constraint_from_urdf(self, name, urdf_file, position, orientation=None, use_fixed_base=False):
urdf_id = self.world.load_urdf(name, urdf_file, position, orientation, use_fixed_base)
self.world.add_collision_constraints(urdf_id)
return urdf_id
# load primitive shape collision constraints into simulation environment
def add_constraint(self, name, shape, mass, position, size=None, radius=None, height=None, orientation=None):
shape_id = self.world.create_constraint(name, shape, mass, position, orientation, size, radius, height)
self.world.add_collision_constraints(shape_id)
return shape_id
# if already an object loaded into simulation environment, it can be then added as collision constraint
def add_constraint_with_id(self, constraint_id):
self.world.add_collision_constraints(constraint_id)
# method to plan and get the optimized trajectory
def get_trajectory(self, **kwargs):
group = []
group_name = utils.get_var_from_kwargs("group", **kwargs)
if group_name is not None:
if type(group_name) is list:
group = group_name
if type(group_name) is str and group_name in self.robot_config["joints_groups"]:
group = self.robot_config["joints_groups"][group_name]
if not len(group):
group = self.robot.get_planning_group_from_srdf(group_name)
start_state = utils.get_var_from_kwargs("start_state", optional=True, **kwargs)
if start_state is not None and len(group):
if type(start_state) is dict or type(start_state) is OrderedDict:
start_state = start_state.values()
if not type(start_state) is list:
_, start_state = self.get_planning_group_and_corresponding_state("start_state", **kwargs)
self.reset_robot_to(start_state, group, key="start_state")
status, is_collision_free, trajectory = "start state in collision", False, -1
is_start_state_in_collision = self.world.is_given_state_in_collision(self.robot.id, start_state, group)
if is_start_state_in_collision:
print "is_start_state_in_collision", is_start_state_in_collision
status = "start state in collision"
return status, is_collision_free, trajectory
elif len(group):
start_state = self.world.get_current_states_for_given_joints(self.robot.id, group)
goal_state = utils.get_var_from_kwargs("goal_state", **kwargs)
if goal_state is not None and len(group):
if type(goal_state) is dict or type(goal_state) is OrderedDict:
goal_state = goal_state.values()
if not type(goal_state) is list:
_, goal_state = self.get_planning_group_and_corresponding_state("goal_state", **kwargs)
status, is_collision_free, trajectory = "goal state in collision", False, -1
is_goal_in_collision = self.world.is_given_state_in_collision(self.robot.id, goal_state, group)
if is_goal_in_collision:
print "is_goal_in_collision", is_goal_in_collision
status = "goal state in collision"
return status, is_collision_free, trajectory
samples = utils.get_var_from_kwargs("samples", optional=True, default=20, **kwargs)
duration = utils.get_var_from_kwargs("duration", optional=True, default=10, **kwargs)
collision_safe_distance = utils.get_var_from_kwargs("collision_safe_distance", optional=True,
default=0.05, **kwargs)
collision_check_distance = utils.get_var_from_kwargs("collision_check_distance", optional=True,
default=0.1, **kwargs)
ignore_goal_states = utils.get_var_from_kwargs("ignore_goal_states", optional=True, **kwargs)
self.robot.init_plan_trajectory(group=group, current_state=start_state,
goal_state=goal_state, samples=samples, duration=duration,
collision_safe_distance=collision_safe_distance,
collision_check_distance=collision_check_distance,
solver_class=self.sqp_config["solver_class"],
ignore_goal_states=ignore_goal_states
)
self.world.toggle_rendering_while_planning(False)
_, planning_time, _ = self.robot.calulate_trajecotory(self.callback_function_from_solver)
trajectory = self.robot.planner.get_trajectory()
is_collision_free = self.world.is_trajectory_collision_free(self.robot.id, self.robot.get_trajectory().final,
group,
0.02)
self.world.toggle_rendering_while_planning(True)
self.elapsed_time = self.robot.planner.sqp_solver.solving_time + \
self.world.collision_check_time + self.robot.planner.prob_model_time
status = "Optimal Trajectory has been found in " + str(self.elapsed_time) + " secs"
self.logger.info(status)
self.log_infos()
if self.if_plot_traj:
self.robot.planner.trajectory.plot_trajectories()
if self.save_problem:
self.save_to_db(samples, duration, start_state, goal_state, group, collision_safe_distance,
collision_check_distance, is_collision_free)
return status, is_collision_free, trajectory
# method to plan an optimized trajectory
def plan_trajectory(self, planning_group, goal_state, samples=20, duration=10,
collision_safe_distance=0.1,
collision_check_distance=0.05, solver_config=None):
status, is_collision_free, _ = self.get_trajectory(group=planning_group,
goal_state=goal_state, samples=samples, duration=duration,
collision_safe_distance=collision_safe_distance,
collision_check_distance=collision_check_distance,
solver_config=solver_config)
status += ", is trajectory collision free: " + str(is_collision_free)
return status, is_collision_free
# method to execute the planned trajectory
def execute_trajectory(self):
self.world.execute_trajectory(self.robot, self.robot.planner.get_trajectory())
return "Trajectory execution completed"
# method to extract planning group and corresponding joint values from a config file
def get_planning_group_and_corresponding_state(self, group_state, **kwargs):
group = []
joint_states = []
group_name = utils.get_var_from_kwargs("group", **kwargs)
if group_name is not None:
if type(group_name) is str:
if kwargs["group"] in self.robot_config["joints_groups"]:
group = self.robot_config["joints_groups"][kwargs["group"]]
if not len(group):
group = self.robot.get_planning_group_from_srdf(group_name)
if group_state in kwargs and len(group):
joint_states = kwargs[group_state]
if type(joint_states) is str and joint_states in self.robot_config["joint_configurations"]:
joint_states = self.robot_config["joint_configurations"][joint_states]
# if not len(joint_states):
else:
group, joint_states = self.robot.get_group_state_from_srdf(joint_states)
if type(joint_states) is dict or type(joint_states) is OrderedDict:
joint_states = joint_states.values()
return group, joint_states
# robots can be reset to a given state
def reset_robot_to(self, state, group, key="reset_state"):
group, joint_states = self.get_planning_group_and_corresponding_state(key, group=group, reset_state=state)
self.world.reset_joint_states(self.robot.id, joint_states, group)
# robots can be reset to a random state
def reset_robot_to_random_state(self, group_name):
group =[]
if type(group_name) is str:
group = self.robot_config["joints_groups"][group_name]
if not len(group):
group = self.robot.get_planning_group_from_srdf(group)
if type(group) is dict or type(group) is OrderedDict:
group = group.values()
status = self.world.reset_joints_to_random_states(self.robot.id, group)
self.world.step_simulation_for(0.2)
return status
def get_group_names(self, group):
if type(group) is str:
group = self.robot_config["joints_groups"][group]
if type(group) is dict or type(group) is OrderedDict:
group = group.values()
return group
def callback_function_from_solver(self, new_trajectory, delta_trajectory=None):
constraints, lower_limit, upper_limit = None, None, None
new_trajectory = new_trajectory[:self.robot.planner.no_of_samples * self.robot.planner.num_of_joints]
trajectory = np.split(new_trajectory, self.robot.planner.no_of_samples)
self.robot.planner.trajectory.add_trajectory(trajectory)
start = time.time()
collision_infos = self.world.get_collision_infos(self.robot, trajectory, self.robot.planner.current_planning_joint_group,
distance=self.robot.planner.collision_check_distance)
end = time.time()
self.elapsed_time = (end - start) + self.robot.planner.sqp_solver.solving_time
# self.elapsed_time = self.world.collision_check_time + self.robot.planner.sqp_solver.solving_time
if len(collision_infos[2]) > 0:
constraints, lower_limit, upper_limit = \
self.robot.planner.problem_model.update_collision_infos(collision_infos, self.robot.planner.collision_safe_distance)
self.robot.planner.update_prob()
return constraints, lower_limit, upper_limit
def get_group_and_state(self, group, state, **kwargs):
if type(group) is str:
group = self.robot_config["joints_groups"][kwargs["group"]]
goal_state = kwargs["goal_state"]
if type(goal_state) is str:
goal_state = self.robot_config["joint_configurations"][goal_state]
if type(goal_state) is dict or type(goal_state) is OrderedDict:
goal_state = goal_state.values()
return goal_state
def save_to_db(self, samples, duration, current_robot_state, goal_state, group, d_safe, d_check, is_collision_free):
if self.save_problem and self.db_driver is not None:
planning_request = OrderedDict()
planning_request["samples"] = samples
planning_request["duration"] = duration
planning_request["start_state"] = current_robot_state
planning_request["goal_state"] = goal_state
planning_request["no of links"] = len(self.world.robot_info["joint_infos"])
planning_request["collision_safe_distance"] = d_safe
planning_request["collision_check_distance"] = d_check
planning_request["no_scene_items"] = len(self.world.scene_items)
result = OrderedDict()
# result["type"] = "donbot_random_state_and_obstacles"
result["type"] = "old_vs_new_solver"
# result["sub_type"] = "prob_" + str(len(self.world.scene_items))
# result["sub_type"] = "donbot_full_new_solver"
# result["sub_type"] = "donbot_arm_new_solver"
result["sub_type"] = "donbot_arm_old_solver"
result["num_qp_iterations"] = self.robot.planner.sqp_solver.num_qp_iterations
result["num_sqp_iterations"] = self.robot.planner.sqp_solver.num_sqp_iterations
result["actual_reductions"] = self.robot.planner.sqp_solver.actual_reductions
result["predicted_reductions"] = self.robot.planner.sqp_solver.predicted_reductions
result["actual_costs"] = self.robot.planner.sqp_solver.actual_costs
result["model_costs"] = self.robot.planner.sqp_solver.model_costs
result["cost_improvement"] = self.robot.planner.sqp_solver.actual_reduction_improve
result["collision_check_time"] = self.world.collision_check_time
result["solving_time"] = self.robot.planner.sqp_solver.solving_time
result["prob_model_time"] = self.robot.planner.prob_model_time
# result["total_time"] = elapsed_time
result["planning_time"] = self.elapsed_time
result["is_collision_free"] = is_collision_free
result["planning_request"] = planning_request
result["initial_trajectory"] = self.robot.planner.trajectory.initial.tolist()
result["final_trajectory"] = [x.tolist() for x in self.robot.planner.trajectory.trajectory_by_name.values()]
planning_request["group"] = self.robot.planner.trajectory.trajectory_by_name.keys()
result["solver_config"] = self.robot.planner.sqp_solver.solver_config
self.db_driver.insert(result)
def log_infos(self):
self.logger.debug("number of qp iterations: " + str(self.robot.planner.sqp_solver.num_qp_iterations))
self.logger.debug("number of sqp iterations: " + str(self.robot.planner.sqp_solver.num_sqp_iterations))
self.logger.debug("actual_reductions: " + str(self.robot.planner.sqp_solver.actual_reductions))
self.logger.debug("predicted_reductions: " + str(self.robot.planner.sqp_solver.predicted_reductions))
self.logger.debug("actual_costs: " + str(self.robot.planner.sqp_solver.actual_costs))
self.logger.debug("model_costs: "+ str(self.robot.planner.sqp_solver.model_costs))
self.logger.debug("number of qp iterations: " + str(self.robot.planner.sqp_solver.num_qp_iterations))
self.logger.debug("number of sqp iterations: " + str(self.robot.planner.sqp_solver.num_sqp_iterations))
self.logger.debug("actual reduction improvement: " + str(self.robot.planner.sqp_solver.actual_reduction_improve))
self.logger.debug("predicted reduction improvement: " + str(self.robot.planner.sqp_solver.predicted_reduction_improve))
self.logger.debug("actual cost improvement: " + str(self.robot.planner.sqp_solver.actual_cost_improve))
self.logger.debug("model cost improvement: " + str(self.robot.planner.sqp_solver.model_cost_improve))
self.logger.debug("collision check time: " + str(self.world.collision_check_time))
self.logger.debug("solving_time: " + str(self.robot.planner.sqp_solver.solving_time))
self.logger.debug("prob_model_time: " + str(self.robot.planner.prob_model_time))
self.logger.debug("total elapsed_time time: " + str(self.elapsed_time))
|
155030
|
from ploomber.clients import SQLAlchemyClient
def get_client():
return SQLAlchemyClient('sqlite:///data.db')
|
155068
|
from django.shortcuts import render
from django.core import serializers
from django.http import HttpResponse
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.conf import settings
import json
import datetime
from registrar.models import Course
from registrar.models import CourseFinalMark
from registrar.models import Student
from registrar.models import AssignmentSubmission
from registrar.models import QuizSubmission
from registrar.models import ExamSubmission
@login_required(login_url='/landpage')
def credit_page(request, course_id):
course = Course.objects.get(id=course_id)
student = Student.objects.get(user=request.user)
try:
final_mark = CourseFinalMark.objects.get(
course=course,
student=student,
)
except CourseFinalMark.DoesNotExist:
final_mark = None
return render(request, 'course/credit/list.html',{
'course' : course,
'final_mark': final_mark,
'user' : request.user,
'tab' : 'credit',
'HAS_ADVERTISMENT': settings.APPLICATION_HAS_ADVERTISMENT,
'local_css_urls' : settings.SB_ADMIN_2_CSS_LIBRARY_URLS,
'local_js_urls' : settings.SB_ADMIN_2_JS_LIBRARY_URLS,
})
# Function will verify that all criteria to pass the course are met and
# make a record of user completing this course. If user does not meet
# criteria an error will be returned.
@login_required()
def submit_credit_application(request, course_id):
if request.is_ajax():
if request.method == 'POST':
# Fetch from database
course = Course.objects.get(id=course_id)
student = Student.objects.get(user=request.user)
try:
a_submissions = AssignmentSubmission.objects.filter(
assignment__course=course,
student=student,
)
except AssignmentSubmission.DoesNotExist:
a_submissions = None
try:
q_submissions = QuizSubmission.objects.filter(
quiz__course=course,
student=student,
)
except QuizSubmission.DoesNotExist:
q_submissions = None
try:
e_submissions = ExamSubmission.objects.filter(
exam__course=course,
student=student,
)
except ExamSubmission.DoesNotExist:
e_submissions = None
# Calculate the final mark for the course.
has_completed_final = False
final_percent = 0
for a_submission in a_submissions:
percent = (a_submission.assignment.worth / 100)
percent *= (a_submission.percent / 100)
final_percent += percent
for q_submission in q_submissions:
percent = (q_submission.quiz.worth / 100)
percent *= (q_submission.percent / 100)
final_percent += percent
for e_submission in e_submissions:
percent = (e_submission.exam.worth / 100)
percent *= (e_submission.percent / 100)
final_percent += percent
if e_submission.exam.is_final:
if percent >= 0.50:
has_completed_final = True
final_percent *= 100
# Validation
if final_percent < 50:
response_data = {'status' : 'failure', 'message' : 'you need to pass with at minimum 50%'}
return HttpResponse(json.dumps(response_data), content_type="application/json")
if not has_completed_final:
response_data = {'status' : 'failure', 'message' : 'you need to pass the final exam with at minumum 50%'}
return HttpResponse(json.dumps(response_data), content_type="application/json")
# Create or fetch our final mark for this course
try:
final_mark = CourseFinalMark.objects.get(
course=course,
student=student,
)
final_mark.percent = final_percent
final_mark.save()
except CourseFinalMark.DoesNotExist:
final_mark = CourseFinalMark.objects.create(
course=course,
student=student,
percent = final_percent,
)
final_mark.save()
response_data = {'status' : 'success', 'message' : 'credit granted'}
return HttpResponse(json.dumps(response_data), content_type="application/json")
@login_required(login_url='/landpage')
def error_credits_modal(request, course_id, submission_id):
response_data = {}
if request.is_ajax():
if request.method == 'POST':
# Check to see if any fields where missing from the form.
return render(request, 'course/peer_review/review_modal.html',{
'user': request.user,
})
|
155141
|
import socket
if socket.gethostname() == 'Faramir': #for CNN_B
data_root = '/home/tencia/Documents/data/heart/'
data_kaggle = data_root + 'kaggle'
data_sunnybrook = data_root + 'sunnybrook'
local_root = '/home/tencia/Dropbox/heart/diagnose-heart/'
data_manual = local_root + 'manual_data'
data_intermediate = local_root + 'data_intermediate'
output_dir = local_root + 'CNN_A/output/'
tencia_output_dir = local_root + 'CNN_B/output/'
else: #for CNN_A
data_root = "/media/qiliu/share/heart/";
data_sunnybrook = data_root + '/sunnybrook';
data_kaggle = data_root;
local_dir = '/home/qiliu/Documents/Coding/heart/diagnose-heart/';
manual_data_root = local_dir + 'manual_data'
data_aug_contours = manual_data_root + '/manual_contours';
intermediate_dir = local_dir + 'CNN_A';
params_dir = intermediate_dir + '/params/'
output_dir = intermediate_dir + '/output/'
tencia_output_dir = local_dir + 'CNN_B/output'
|
155161
|
from __future__ import annotations
from e2cnn import gspaces
from e2cnn import kernels
from e2cnn import diffops
from .general_r2 import GeneralOnR2
from .utils import rotate_array
from e2cnn.group import Representation
from e2cnn.group import Group
from e2cnn.group import DihedralGroup
from e2cnn.group import O2
from e2cnn.group import dihedral_group
from e2cnn.group import o2_group
import numpy as np
from typing import Tuple, Union, Callable, List
__all__ = ["FlipRot2dOnR2"]
class FlipRot2dOnR2(GeneralOnR2):
def __init__(self, N: int = None, maximum_frequency: int = None, axis: float = np.pi / 2, fibergroup: Group = None):
r"""
Describes reflectional and rotational symmetries of the plane :math:`\R^2`.
Reflections are applied with respect to the line through the origin with an angle ``axis`` degrees with respect
to the *X*-axis.
If ``N > 1``, the class models reflections and *discrete* rotations by angles multiple of :math:`\frac{2\pi}{N}`
(:class:`~e2cnn.group.DihedralGroup`).
Otherwise, if ``N=-1``, the class models reflections and *continuous* planar rotations
(:class:`~e2cnn.group.O2`).
In that case the parameter ``maximum_frequency`` is required to specify the maximum frequency of the irreps of
:class:`~e2cnn.group.O2` (see its documentation for more details)
.. note ::
All axes obtained from the axis defined by ``axis`` with a rotation in the symmetry group are equivalent.
For instance, if ``N = 4``, an axis :math:`\beta` is equivalent to the axis :math:`\beta + \pi/2`.
It follows that for ``N = -1``, i.e. in case the symmetry group contains all continuous rotations, any
reflection axis is theoretically equivalent.
In practice, though, a basis for equivariant convolutional filter sampled on a grid is affected by the
specific choice of the axis. In general, choosing an axis aligned with the grid (an horizontal or a
vertical axis, i.e. :math:`0` or :math:`\pi/2`) is suggested.
Args:
N (int): number of discrete rotations (integer greater than 1) or -1 for continuous rotations
maximum_frequency (int): maximum frequency of :class:`~e2cnn.group.O2` 's irreps if ``N = -1``
axis (float, optional): the slope of the axis of the flip (in radians)
fibergroup (Group, optional): use an already existing instance of the symmetry group.
In that case only the parameter ``axis`` should be used.
Attributes:
~.axis (float): Angle with respect to the horizontal axis which defines the reflection axis.
"""
assert N is not None or fibergroup is not None, "Error! Either use the parameter `N` or the parameter `group`!"
if fibergroup is not None:
assert isinstance(fibergroup, DihedralGroup) or isinstance(fibergroup, O2)
assert maximum_frequency is None, "Maximum Frequency can't be set when the group is already provided in input"
N = fibergroup.rotation_order
assert isinstance(N, int)
self.axis = axis
if N > 1:
assert maximum_frequency is None, "Maximum Frequency can't be set for finite cyclic groups"
name = 'Flip_{}-Rotations(f={:.5f})'.format(N, self.axis)
elif N == -1:
name = 'Flip_Continuous-Rotations(f={:.5f})'.format(self.axis)
# self.axis = np.pi/2
else:
raise ValueError(f'Error! "N" has to be an integer greater than 1 or -1, but got {N}')
if fibergroup is None:
if N > 1:
fibergroup = dihedral_group(N)
elif N == -1:
fibergroup = o2_group(maximum_frequency)
super(FlipRot2dOnR2, self).__init__(fibergroup, name)
def restrict(self, id: Tuple[Union[None, float, int], int]) -> Tuple[gspaces.GSpace, Callable, Callable]:
r"""
Build the :class:`~e2cnn.group.GSpace` associated with the subgroup of the current fiber group identified by
the input ``id``, which is a tuple :math:`(k, M)`.
Here, :math:`M` is a positive integer indicating the number of discrete rotations in the subgroup while
:math:`k` is either ``None`` (no reflections) or an angle indicating the axis of reflection.
If the current fiber group is :math:`D_N` (:class:`~e2cnn.group.DihedralGroup`), then :math:`M` needs to divide
:math:`N` and :math:`k` needs to be an integer in :math:`\{0, \dots, \frac{N}{M}-1\}`.
Otherwise, :math:`M` can be any positive integer while :math:`k` needs to be a real number in
:math:`[0, \frac{2\pi}{M}]`.
Valid combinations are:
- (``None``, :math:`1`): restrict to no reflection and rotation symmetries
- (``None``, :math:`M`): restrict to only the :math:`M` rotations generated by :math:`r_{2\pi/M}`.
- (:math:`0`, :math:`1`): restrict to only reflections :math:`\langle f \rangle` around the same axis as in the current group
- (:math:`0`, :math:`M`): restrict to reflections and :math:`M` rotations generated by :math:`r_{2\pi/M}` and :math:`f`
If the current fiber group is :math:`D_N` (an instance of :class:`~e2cnn.group.DihedralGroup`):
- (:math:`k`, :math:`M`): restrict to reflections :math:`\langle r_{k\frac{2\pi}{N}} f \rangle` around the axis of the current G-space rotated by :math:`k\frac{\pi}{N}` and :math:`M` rotations generated by :math:`r_{2\pi/M}`
If the current fiber group is :math:`O(2)` (an instance of :class:`~e2cnn.group.O2`):
- (:math:`\theta`, :math:`M`): restrict to reflections :math:`\langle r_{\theta} f \rangle` around the axis of the current G-space rotated by :math:`\frac{\theta}{2}` and :math:`M` rotations generated by :math:`r_{2\pi/M}`
- (``None``, :math:`-1`): restrict to all (continuous) rotations
Args:
id (tuple): the id of the subgroup
Returns:
a tuple containing
- **gspace**: the restricted gspace
- **back_map**: a function mapping an element of the subgroup to itself in the fiber group of the original space
- **subgroup_map**: a function mapping an element of the fiber group of the original space to itself in the subgroup (returns ``None`` if the element is not in the subgroup)
"""
subgroup, mapping, child = self.fibergroup.subgroup(id)
if id[0] is not None:
# the new flip axis is the previous one rotated by the new chosen axis for the flip
# notice that the actual group element used to generate the subgroup does not correspond to the flip axis
# but to 2 times that angle
if self.fibergroup.order() > 1:
n = self.fibergroup.rotation_order
rotation = id[0] * 2.0 * np.pi / n
else:
rotation = id[0]
new_axis = divmod(self.axis + 0.5*rotation, 2*np.pi)[1]
if id[0] is None and id[1] == 1:
return gspaces.TrivialOnR2(fibergroup=subgroup), mapping, child
elif id[0] is None and (id[1] > 1 or id[1] == -1):
return gspaces.Rot2dOnR2(fibergroup=subgroup), mapping, child
elif id[0] is not None and id[1] == 1:
return gspaces.Flip2dOnR2(fibergroup=subgroup, axis=new_axis), mapping, child
elif id[0] is not None:
return gspaces.FlipRot2dOnR2(fibergroup=subgroup, axis=new_axis), mapping, child
else:
raise ValueError(f"id {id} not recognized!")
def _basis_generator(self,
in_repr: Representation,
out_repr: Representation,
rings: List[float],
sigma: List[float],
**kwargs,
) -> kernels.KernelBasis:
r"""
Method that builds the analitical basis that spans the space of equivariant filters which
are intertwiners between the representations induced from the representation ``in_repr`` and ``out_repr``.
If this :class:`~e2cnn.group.GSpace` includes only a discrete number of rotations (``n > 1``), either
``maximum_frequency`` or ``maximum_offset`` must be set in the keywords arguments.
Args:
in_repr: the input representation
out_repr: the output representation
rings: radii of the rings where to sample the bases
sigma: parameters controlling the width of each ring where the bases are sampled.
Keyword Args:
maximum_frequency (int): the maximum frequency allowed in the basis vectors
maximum_offset (int): the maximum frequencies offset for each basis vector with respect to its base ones (sum and difference of the frequencies of the input and the output representations)
Returns:
the basis built
"""
if self.fibergroup.order() > 0:
maximum_frequency = None
maximum_offset = None
if 'maximum_frequency' in kwargs and kwargs['maximum_frequency'] is not None:
maximum_frequency = kwargs['maximum_frequency']
assert isinstance(maximum_frequency, int) and maximum_frequency >= 0
if 'maximum_offset' in kwargs and kwargs['maximum_offset'] is not None:
maximum_offset = kwargs['maximum_offset']
assert isinstance(maximum_offset, int) and maximum_offset >= 0
assert (maximum_frequency is not None or maximum_offset is not None), \
'Error! Either the maximum frequency or the maximum offset for the frequencies must be set'
return kernels.kernels_DN_act_R2(in_repr, out_repr, rings, sigma,
axis=self.axis,
max_frequency=maximum_frequency,
max_offset=maximum_offset)
else:
return kernels.kernels_O2_act_R2(in_repr, out_repr, rings, sigma, axis=self.axis)
def _diffop_basis_generator(self,
in_repr: Representation,
out_repr: Representation,
max_power: int,
** kwargs,
) -> diffops.DiffopBasis:
r"""
Method that builds the analytical basis that spans the space of equivariant PDOs which
are intertwiners between the representations induced from the representation ``in_repr`` and ``out_repr``.
If this :class:`~e2cnn.group.GSpace` includes only a discrete number of rotations (``n > 1``), either
``maximum_frequency`` or ``maximum_offset`` must be set in the keywords arguments.
Args:
in_repr: the input representation
out_repr: the output representation
max_power (int): the maximum power of Laplacians to use
Keyword Args:
maximum_frequency (int): the maximum frequency allowed in the basis vectors
maximum_offset (int): the maximum frequencies offset for each basis vector with respect to its base ones
(sum and difference of the frequencies of the input and the output representations)
Returns:
the basis built
"""
if self.fibergroup.order() > 0:
maximum_frequency = None
maximum_offset = None
if 'maximum_frequency' in kwargs and kwargs['maximum_frequency'] is not None:
maximum_frequency = kwargs['maximum_frequency']
assert isinstance(maximum_frequency, int) and maximum_frequency >= 0
if 'maximum_offset' in kwargs and kwargs['maximum_offset'] is not None:
maximum_offset = kwargs['maximum_offset']
assert isinstance(maximum_offset, int) and maximum_offset >= 0
assert (maximum_frequency is not None or maximum_offset is not None), \
'Error! Either the maximum frequency or the maximum offset for the frequencies must be set'
return diffops.diffops_DN_act_R2(in_repr, out_repr, max_power,
axis=self.axis,
max_frequency=maximum_frequency,
max_offset=maximum_offset)
else:
return diffops.diffops_O2_act_R2(in_repr, out_repr, max_power, axis=self.axis)
def _basespace_action(self, input: np.ndarray, element: Tuple[int, Union[float, int]]) -> np.ndarray:
assert self.fibergroup.is_element(element)
if self.fibergroup.order() > 1:
n = self.fibergroup.rotation_order
rotation = element[1] * 2.0 * np.pi / n
else:
rotation = element[1]
output = input
if element[0]:
output = output[..., ::-1, :]
rotation += 2*self.axis
if rotation != 0.:
output = rotate_array(output, rotation)
else:
output = output.copy()
return output
def __eq__(self, other):
if isinstance(other, FlipRot2dOnR2):
return self.fibergroup == other.fibergroup
else:
return False
def __hash__(self):
return hash(self.name)
|
155192
|
import netomaton as ntm
if __name__ == '__main__':
network = ntm.topology.cellular_automaton2d(60, 60, r=1, neighbourhood="Hex")
initial_conditions = ntm.init_simple2d(60, 60)
def activity_rule(ctx):
return 1 if sum(ctx.neighbourhood_activities) == 1 else ctx.current_activity
trajectory = ntm.evolve(initial_conditions=initial_conditions, network=network, timesteps=31,
activity_rule=activity_rule)
ntm.animate_hex(trajectory, shape=(60, 60), interval=150)
|
155198
|
from ..utils import load_pretrained
from .blocks import ResNet, ResNetBasicBlock, ResNetBlock
__all__ = ['resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152']
META = {
'resnet18': ['resnet18.pth', 'https://drive.google.com/open?id=1d-AgSMO7HcKihDEXHqzDX2dAxYtH7N2m'],
'resnet34': ['resnet34.pth', 'https://drive.google.com/open?id=1HiUX27F3Luu27K3rfLc9dUXzwmwcBVpZ'],
'resnet50': ['resnet50.pth', 'https://drive.google.com/open?id=10aA1Qex-5CvzZMCmHaWMMfeFToN3DHkr'],
'resnet101': ['resnet101.pth', 'https://drive.google.com/open?id=1rbgoMnCHNGHHXbJhYiOsgAMGYkeQ7tvP'],
'resnet152': ['resnet152.pth', 'https://drive.google.com/open?id=15fAZ1PJ6ESiIkRG55TwKr_vMZ5xhHtGu']
}
def resnet18(pretrained=False, nc=1000, dest=None):
m = ResNet(ResNetBasicBlock, [2, 2, 2, 2], nc=nc)
return load_pretrained(m, META['resnet18'], dest, pretrained)
def resnet34(pretrained=False, nc=1000, dest=None):
m = ResNet(ResNetBasicBlock, [3, 4, 6, 3], nc=nc)
return load_pretrained(m, META['resnet34'], dest, pretrained)
def resnet50(pretrained=False, nc=1000, dest=None):
m = ResNet(ResNetBlock, [3, 4, 6, 3], nc=nc)
return load_pretrained(m, META['resnet50'], dest, pretrained)
def resnet101(pretrained=False, nc=1000, dest=None):
m = ResNet(ResNetBlock, [3, 4, 23, 3], nc=nc)
return load_pretrained(m, META['resnet101'], dest, pretrained)
def resnet152(pretrained=False, nc=1000, dest=None):
m = ResNet(ResNetBlock, [3, 8, 36, 3], nc=nc)
return load_pretrained(m, META['resnet152'], dest, pretrained)
|
155248
|
from keras.models import *
from keras.callbacks import *
import keras.backend as K
from model import *
from data import *
import cv2
import argparse
import pydot, graphviz
from keras.utils import np_utils, plot_model
def visualize_class_activation_map(model_path, img_path, output_path, run_count, write_to_file, post_processing):
model = model_path
model_seq = model_path
original_img = cv2.imread(img_path, 1)
original_img = cv2.resize(original_img, (224, 224))
width, height, _ = original_img.shape
img = original_img.reshape(1, 3, width, height)
# Get the 512 input weights to the softmax.
model = Model(inputs=model.input, outputs=model.layers[32].output)
plot_model(model, to_file="vgg_original.png", show_shapes=True)
class_weights = model.layers[-1].get_weights()[0]
class_weights2 = model.layers[-1].get_weights()[1]
final_conv = model.layers[29]
get_output = K.function([model.layers[0].input], [final_conv.output, model.layers[-1].output])
[conv_outputs, predictions] = get_output([img])
predicted_class = np.argmax(predictions)
predicted_class_crosscheck = model_seq.predict_classes(img)
print(predicted_class)
print(predicted_class_crosscheck)
if predicted_class == 0:
predicted_class = "happiness"
elif predicted_class == 1:
predicted_class = "disgust"
elif predicted_class == 2:
predicted_class = "repression"
elif predicted_class == 3:
predicted_class = "surprise"
else:
predicted_class = "others"
conv_outputs = conv_outputs[0, :, :, :]
# Create the class activation map.
cam = np.zeros(dtype = np.float32, shape = conv_outputs.shape[1:3])
for i, w in enumerate(class_weights[:, 1]):
cam += w * conv_outputs[i, :, :]
cam /= np.max(cam)
cam = cv2.resize(cam, (height, width))
heatmap = cv2.applyColorMap(np.uint8(255*cam), cv2.COLORMAP_JET)
heatmap[np.where(cam < 0.2)] = 0 # tunable
if post_processing == False:
img = heatmap*0.5 + original_img
else:
heatmap_output = heatmap * 0.5
return heatmap_output, predicted_class, original_img
output_path = output_path + "_" + predicted_class + ".jpg"
if write_to_file:
cv2.imwrite(output_path, img)
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--cam", type = bool, default = False, help = 'Train the network or visualize a CAM')
parser.add_argument("--image_path", type = str, help = "Path of an image to run the network on")
parser.add_argument("--output_path", type = str, default = "heatmap.jpg", help = "Path of an image to run the network on")
parser.add_argument("--model_path", type = str, help = "Path of the trained model")
parser.add_argument("--dataset_path", type = str, help = \
'Path to image dataset. Should have pos/neg folders, like in the inria person dataset. \
http://pascal.inrialpes.fr/data/human/')
parser.add_argument("--post_processing", type = bool, default = False, help = 'aggregate the output of cam which are heatmaps')
parser.add_argument("--write_to_file", type = bool, default = False, help = 'export the output?')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = get_args()
if args.cam == True:
model_path = 'vgg_spatial_ID_12.h5'
img_path_array = []
img_path = '/media/ice/OS/Datasets/CASME2_TIM/CASME2_TIM/'
out_path = '/home/ice/Documents/Micro-Expression/External-Tools/keras-cam/CASME2_output/'
first_run = 1
for root, dirnames, filenames in os.walk(img_path):
if len(dirnames) > 0:
if first_run == 1:
first_run = 0
subject_path_array = dirnames
else:
img_path_array += [dirnames]
files = filenames
counter = 0
final_path = np.empty([0])
output_path = np.empty([0])
delete_path = np.empty([0])
IgnoredSamples = ['sub09/EP13_02/','sub09/EP02_02f/','sub10/EP13_01/','sub17/EP15_01/',
'sub17/EP15_03/','sub19/EP19_04/','sub24/EP10_03/','sub24/EP07_01/',
'sub24/EP07_04f/','sub24/EP02_07/','sub26/EP15_01/']
ignore_flag = 0
for subject in subject_path_array:
path_array = img_path_array[counter]
for item in path_array:
for file in files:
path_to_parse = img_path + str(subject) + '/' + str(item) + '/' + str(file)
delete_parse = out_path + str(subject) + '/' + str(item) + '/' + str(file)
file = file[0:3]
out_parse = out_path + str(subject) + '/' + str(item) + '/' + str(file)
for ignorance in IgnoredSamples:
if ignorance in path_to_parse:
ignore_flag = 1
if ignore_flag == 0:
final_path = np.append(final_path, path_to_parse)
output_path = np.append(output_path, out_parse)
delete_path = np.append(delete_path, delete_parse)
else:
ignore_flag = 0
counter += 1
# clear all the pictures inside each folder
# print(delete_path)
for item in delete_path:
os.remove(item)
heatmap_count = 0
run_count = 13
model = VGG_16('vgg_spatial_cam.h5')
for item in final_path:
heatmap_path = output_path[heatmap_count]
visualize_class_activation_map(model, item, heatmap_path, run_count, args.write_to_file, args.post_processing)
run_count += 13
heatmap_count += 1
print(str(heatmap_count) + "/2460 processed" + "\n")
elif args.post_processing == True:
img_path_array = []
img_path = '/media/ice/OS/Datasets/CASME2_TIM/CASME2_TIM/'
out_path = '/home/ice/Documents/Micro-Expression/External-Tools/keras-cam/CASME2_output/'
first_run = 1
for root, dirnames, filenames in os.walk(img_path):
if len(dirnames) > 0:
if first_run == 1:
first_run = 0
subject_path_array = dirnames
else:
img_path_array += [dirnames]
files = filenames
counter = 0
final_path = np.empty([0])
output_path = np.empty([0])
delete_path = np.empty([0])
IgnoredSamples = ['sub09/EP13_02/','sub09/EP02_02f/','sub10/EP13_01/','sub17/EP15_01/',
'sub17/EP15_03/','sub19/EP19_04/','sub24/EP10_03/','sub24/EP07_01/',
'sub24/EP07_04f/','sub24/EP02_07/','sub26/EP15_01/']
ignore_flag = 0
for subject in subject_path_array:
path_array = img_path_array[counter]
for item in path_array:
for file in files:
path_to_parse = img_path + str(subject) + '/' + str(item) + '/' + str(file)
delete_parse = out_path + str(subject) + '/' + str(item) + '/' + str(file)
file = file[0:3]
out_parse = out_path + str(subject) + '/' + str(item) + '/' + str(file)
for ignorance in IgnoredSamples:
if ignorance in path_to_parse:
ignore_flag = 1
if ignore_flag == 0:
final_path = np.append(final_path, path_to_parse)
output_path = np.append(output_path, out_parse)
delete_path = np.append(delete_path, delete_parse)
else:
ignore_flag = 0
counter += 1
heatmap_count = 0
run_count = 13
model = VGG_16('vgg_spatial_cam.h5')
is_heatmap_accumulator_empty = True
output_path = "/home/ice/Documents/Micro-Expression/External-Tools/keras-cam/postprocessed_heatmap/"
counter_for_heatmap_file = 1
# heatmap_accumulator = []
for item in final_path:
# heatmap_path = output_path[heatmap_count]
heatmap_path = output_path
heatmap, predicted_class, original_img = visualize_class_activation_map(model, item, heatmap_path, run_count, args.write_to_file, args.post_processing)
##### accumulate heatmap #####
# initial
if heatmap_count % 10 == 0 and is_heatmap_accumulator_empty:
heatmap_accumulator = heatmap
is_heatmap_accumulator_empty = False
heatmap_count += 1
# accumulation
elif heatmap_count % 10 != 0:
heatmap_accumulator += heatmap
heatmap_count += 1
# write to file
if heatmap_count % 10 == 0 and is_heatmap_accumulator_empty == False:
filename = output_path + str(counter_for_heatmap_file) + "_" + predicted_class + ".png"
# heatmap_accumulator = heatmap_accumulator + original_img
# overlay on top of a face
cv2.imwrite(filename, heatmap_accumulator)
counter_for_heatmap_file += 1
is_heatmap_accumulator_empty = True
run_count += 13
# heatmap_count += 1
print(str(heatmap_count) + "/2460 processed" + "\n")
|
155275
|
from scramjet.streams import Stream, StreamAlreadyConsumed
import asyncio
import pytest
@pytest.mark.asyncio
async def test_simple_stream_piping():
s1 = Stream.read_from(range(8)).map(lambda x: 2*x)
s2 = Stream().filter(lambda x: x > 5)
s1.pipe(s2)
assert await s2.to_list() == [6, 8, 10, 12, 14]
@pytest.mark.asyncio
async def test_piping_to_multiple_targets():
source = Stream.read_from(range(8), max_parallel=4).map(lambda x: x+1)
s1 = Stream(max_parallel=4).map(lambda x: x/10)
s2 = Stream(max_parallel=4).map(lambda x: x*10)
source.pipe(s1)
source.pipe(s2)
result1, result2 = await asyncio.gather(s1.to_list(), s2.to_list())
assert result1 == [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]
assert result2 == [10, 20, 30, 40, 50, 60, 70, 80]
@pytest.mark.asyncio
async def test_piped_stream_cannot_be_transformed():
s1 = Stream.read_from(range(8)).map(lambda x: 2*x)
s2 = Stream()
s1.pipe(s2)
with pytest.raises(StreamAlreadyConsumed):
s1.map(lambda x: x+1)
await s2.to_list()
|
155328
|
from collections import namedtuple
from datetime import datetime
def parse_datetime(datetime_str):
if datetime_str:
return datetime.strptime(datetime_str, "%Y-%m-%dT%H:%M:%S.%fZ")
class Price(
namedtuple(
"price",
[
"quantity",
"vwap",
"price",
"fees",
"total",
"json",
],
)
):
@classmethod
def create_from_json(cls, price):
return cls(
quantity=float(price["quantity"]),
vwap=float(price["vwap"]),
price=float(price["price"]),
fees=float(price["fees"]),
total=float(price["total"]),
json=price,
)
class OrderBookEntry(
namedtuple(
"book_entry",
[
"price",
"amount",
"orders",
],
)
):
@classmethod
def create_from_json(cls, book_entry):
return cls(
price=book_entry["price"],
amount=book_entry["amount"],
orders=book_entry["orders"],
)
class OrderBook(
namedtuple(
"order_book",
[
"bids",
"asks",
"timestamp",
"last_price",
"json",
],
)
):
@classmethod
def create_from_json(cls, order_book):
return cls(
bids=[
OrderBookEntry.create_from_json(book_entry)
for book_entry in order_book["bids"]
],
asks=[
OrderBookEntry.create_from_json(book_entry)
for book_entry in order_book["asks"]
],
timestamp=order_book["timestamp"],
last_price=order_book["last_price"],
json=order_book,
)
class Trade(
namedtuple(
"trade",
[
"id",
"uuid",
"engine_id",
"pair",
"amount",
"price",
"created_at",
],
)
):
@classmethod
def create_from_json(cls, trade):
return cls(
id=trade["id"],
uuid=trade["uuid"],
engine_id=trade["engine_id"],
pair=trade["pair"],
amount=trade["amount"],
price=float(trade["price"]),
created_at=parse_datetime(trade["created_at"]),
)
class Trades(
namedtuple(
"trades",
[
"count",
"next",
"previous",
"results",
"json",
],
)
):
@classmethod
def create_from_json(cls, trades):
return cls(
count=trades["count"],
next=trades["next"],
previous=trades["previous"],
results=[Trade.create_from_json(trade) for trade in trades["results"]],
json=trades,
)
class Rates(
namedtuple(
"rates",
[
"base",
"rates",
"json",
],
)
):
@classmethod
def create_from_json(cls, rates):
return cls(
base=rates["base"],
rates=rates["rates"],
json=rates,
)
|
155342
|
from flask_script import Manager
from app import application
manager = Manager(application)
# Not sure if I need a database yet
# db = SQLAlchemy(application)
# migrate = Migrate(application, db)
# manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
|
155373
|
import os
from billy.utils.generic import get_git_rev
here = os.path.abspath(os.path.dirname(__file__))
VERSION = '0.0.0'
version_path = os.path.join(here, 'version.txt')
if os.path.exists(version_path):
with open(version_path, 'rt') as verfile:
VERSION = verfile.read().strip()
REVISION = None
revision_path = os.path.join(here, 'revision.txt')
if os.path.exists(revision_path):
with open(revision_path, 'rt') as rerfile:
REVISION = rerfile.read().strip()
# cannot find revision from file, try to get it from .git folder
if REVISION is None:
REVISION = get_git_rev()
|
155387
|
import glob,sys
import numpy as np
sys.path.append('../../flu/src')
import test_flu_prediction as test_flu
import matplotlib.pyplot as plt
import analysis_utils_toy_data as AU
file_formats = ['.svg', '.pdf']
plt.rcParams.update(test_flu.mpl_params)
line_styles = ['-', '--', '-.']
cols = ['b', 'r', 'g', 'c', 'm', 'k', 'y']
cols+=cols
figure_folder = '../figures/'
data_dir = '../data_new'
prefix= '/20140820_'
N_list = [20000] #,20000]
mu_list = [1e-6,2e-6, 4e-6, 8e-6, 16e-6, 32e-6, 64e-6, 128e-6]
#nflip_list = [0.02,0.04, 0.08, 0.16]
gamma_list = [1.0] #, 2.0,3.0, 5.0]
omega_list = [0.3]
nflip_list = [0.04, 0.08]
sdt_list = [1,100] #determines whether 2 genomes are sampled every generation, or 200 every 100 gen
pred, norm_pred, run_stats = AU.load_prediction_data(prefix, N_list, mu_list, nflip_list,
sdt_list, return_mean=True)
valdt = 200
ssize = 200
D = 0.2
L=2000
mean_fitness_true_fitness_spearman_i = -4
for gamma in gamma_list:
for omega in omega_list:
for sdt in [1,100]:
plt.figure(figsize= (10,6))
ax = plt.subplot(111)
#plt.title(r'\omega='+str(omega)+',\;dt='+str(sdt)+'$')
for di,D in enumerate([0.2, 0.5]):
pred_label = ssize, gamma, D, omega, valdt
### PLOT FITNESS CORRELATION VS PAIRWISE DIVERSITY ###
for ni, N in enumerate(N_list):
for fi, nflip in enumerate(nflip_list):
plt.errorbar([run_stats[(N,mu,nflip,sdt)][-1] for mu in mu_list],
[pred[(N,mu,nflip,sdt)+pred_label][0][mean_fitness_true_fitness_spearman_i] for mu in mu_list],
[pred[(N,mu,nflip,sdt)+pred_label][2][mean_fitness_true_fitness_spearman_i] for mu in mu_list],
c=cols[fi], ls=line_styles[di], label = '$n_A = '+str(nflip)+',\;\Gamma='+str(D)+'$', lw=2)
plt.ylabel("Spearman's correlation")
plt.xlabel('average pairwise distance')
plt.xscale('log')
plt.legend(loc=4)
#add panel label
plt.text(0.02,0.9,'Fig.~2-S1', transform = plt.gca().transAxes, fontsize = 20)
plt.xlim([0.5, 200])
for ff in file_formats:
plt.savefig(figure_folder+'Fig2_S1_pairwise_diversity_vs_predictability_sdt_'+str(sdt)+'_gamma_'
+str(gamma)+'_valdt_'+str(valdt)+ff)
### PLOT prediction_success VS PAIRWISE DIVERSITY ###
plt.figure(figsize= (10,6))
ax = plt.subplot(111)
#plt.title(r'$\gamma='+str(gamma)+',\;\omega='+str(omega)+',\;dt='+str(sdt)+'$')
for ni, N in enumerate(N_list):
for fi, nflip in enumerate(nflip_list):
for di,D in enumerate([0.2, 0.5]):
plt.errorbar([run_stats[(N,mu,nflip,sdt)][-1] for mu in mu_list],
[norm_pred[(N,mu,nflip,sdt)+pred_label][0][1] for mu in mu_list],
[norm_pred[(N,mu,nflip,sdt)+pred_label][2][1] for mu in mu_list],
c=cols[fi], ls=line_styles[di], label = '$n_A = '+str(nflip)+'$')
plt.ylabel(r'distance $\bar{d}$ to future populations')
plt.xlabel('average pairwise distance')
#add panel label
plt.text(0.02,0.9,'Fig.~2-S2', transform = plt.gca().transAxes, fontsize = 20)
plt.xscale('log')
plt.legend(loc=1)
plt.xlim([0.5, 200])
for ff in file_formats:
plt.savefig(figure_folder+'Fig2_S2_pairwise_diversity_vs_distance_sdt_'+str(sdt)+'_gamma_'+str(gamma)+'_valdt_'+str(valdt)+ff)
#plt.close()
## plot gamma versus the number of predictions that are worse than random
# reload the data without averaging over the different realizations.
pred, norm_pred, run_stats = AU.load_prediction_data(prefix, N_list, mu_list, nflip_list,
sdt_list, return_mean=False)
#for sdt in [100]:
if len(gamma_list)>1:
for omega in omega_list:
### PLOT FITNESS CORRELATION VS DSCALE ###
plt.figure(figsize= (10,6))
ax = plt.subplot(111)
#plt.title(r'$\omega='+str(omega)+',\;dt='+str(sdt)+'$')
for mi,mu in enumerate(mu_list[3:6]):
for ni, N in enumerate(N_list):
for fi, nflip in enumerate(nflip_list[:]):
if mi==0:
label_str = r'$n_A ='+str(nflip)+'$'
else:
label_str = None
plt.plot(gamma_list, [np.mean(pred[(N,mu,nflip,sdt)+(ssize, gamma, D, omega, valdt)][:,0]<
pred[(N,mu,nflip,sdt)+(ssize, gamma, D, omega, valdt)][:,3])
for gamma in gamma_list], lw=2, marker='o', markersize=10,
ls=line_styles[mi], c=cols[fi], label = label_str)
#plt.xscale('log')
#add panel label
plt.text(0.02,0.9,'Fig.~2-S3', transform = plt.gca().transAxes, fontsize = 20)
plt.xlim([0.0, 5.5])
plt.ylabel('worse than random (out of 100)')
plt.xlabel(r'time rescaling $\gamma$')
plt.legend(loc=1,numpoints=1)
for ff in file_formats:
plt.savefig(figure_folder+'Fig2_S3_gamma_vs_predictability_sdt_'+str(sdt)+'_D_'+str(D)+'_w_'+str(omega)+'_valdt_'+str(valdt)+ff)
|
155416
|
import collections
import datetime
import itertools
import os
import subprocess
from hyperparameters_config import (paraphrase, inverse_paraphrase)
class SafeDict(dict):
def __missing__(self, key):
return '{' + key + '}'
def get_run_id():
filename = "style_paraphrase/logs/expts.txt"
if os.path.isfile(filename) is False:
with open(filename, 'w') as f:
f.write("")
return 0
else:
with open(filename, 'r') as f:
expts = f.readlines()
run_id = len(expts) / 5
return run_id
other_dependencies = {
"memory": lambda x: int(x["ngpus"]) * 50 if x["gpu"] in ["m40", "titanx"] else int(x["ngpus"]) * 45,
"cpus": lambda x: int(x["ngpus"]) * 3
}
top_details = "GPT2 model for formality."
hyperparameters = inverse_paraphrase
run_id = int(get_run_id())
key_hyperparameters = [x[0] for x in hyperparameters]
value_hyperparameters = [x[1] for x in hyperparameters]
combinations = list(itertools.product(*value_hyperparameters))
scripts = []
eval_scripts = []
for combo in combinations:
# Write the scheduler scripts
with open("style_paraphrase/run_finetune_gpt2_template.sh", 'r') as f:
schedule_script = f.read()
with open("style_paraphrase/run_generation_gpt2_template.sh", 'r') as f:
generation_script = f.read()
with open("style_paraphrase/run_evaluate_gpt2_template.sh", 'r') as f:
evaluate_script = f.read()
combo = {k[0]: v for (k, v) in zip(key_hyperparameters, combo)}
for k, v in other_dependencies.items():
combo[k] = v(combo)
od = collections.OrderedDict(sorted(combo.items()))
lower_details = ""
for k, v in od.items():
lower_details += "%s = %s, " % (k, str(v))
# removing last comma and space
lower_details = lower_details[:-2]
combo["top_details"] = top_details
combo["lower_details"] = lower_details
combo["job_id"] = run_id
print("Scheduling Job #%d" % run_id)
for k, v in combo.items():
if "{%s}" % k in schedule_script:
schedule_script = schedule_script.replace("{%s}" % k, str(v))
for k, v in combo.items():
if "{%s}" % k in generation_script:
generation_script = generation_script.replace("{%s}" % k, str(v))
for k, v in combo.items():
if "{%s}" % k in evaluate_script:
evaluate_script = evaluate_script.replace("{%s}" % k, str(v))
schedule_script += "\n"
generation_script += "\n"
evaluate_script += "\n"
# Write schedule script
script_name = 'style_paraphrase/slurm-schedulers/schedule_%d.sh' % run_id
with open(script_name, 'w') as f:
f.write(schedule_script)
generation_script_name = 'style_paraphrase/slurm-schedulers/generate_%d.sh' % run_id
with open(generation_script_name, 'w') as f:
f.write(generation_script)
evaluate_script_name = 'style_paraphrase/slurm-schedulers/evaluate_%d.sh' % run_id
with open(evaluate_script_name, 'w') as f:
f.write(evaluate_script)
scripts.append(script_name)
eval_scripts.append(evaluate_script_name)
# Making files executable
subprocess.check_output('chmod +x %s' % script_name, shell=True)
subprocess.check_output('chmod +x %s' % generation_script_name, shell=True)
subprocess.check_output('chmod +x %s' % evaluate_script_name, shell=True)
# Update experiment logs
output = "Script Name = " + script_name + "\n" + \
datetime.datetime.now().strftime("%Y-%m-%d %H:%M") + "\n" + \
top_details + "\n" + \
lower_details + "\n\n"
with open("style_paraphrase/logs/expts.txt", "a") as f:
f.write(output)
# For the next job
run_id += 1
# schedule jobs
for script in scripts:
command = "sbatch %s" % script
print(subprocess.check_output(command, shell=True))
for script in eval_scripts:
command = "sbatch %s" % script
print(subprocess.check_output(command, shell=True))
|
155451
|
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pytest
from cycler import cycler
def test_colorcycle_basic():
fig, ax = plt.subplots()
ax.set_prop_cycle(cycler('color', ['r', 'g', 'y']))
for _ in range(4):
ax.plot(range(10), range(10))
assert [l.get_color() for l in ax.lines] == ['r', 'g', 'y', 'r']
def test_marker_cycle():
fig, ax = plt.subplots()
ax.set_prop_cycle(cycler('c', ['r', 'g', 'y']) +
cycler('marker', ['.', '*', 'x']))
for _ in range(4):
ax.plot(range(10), range(10))
assert [l.get_color() for l in ax.lines] == ['r', 'g', 'y', 'r']
assert [l.get_marker() for l in ax.lines] == ['.', '*', 'x', '.']
def test_marker_cycle_kwargs_arrays_iterators():
fig, ax = plt.subplots()
ax.set_prop_cycle(c=np.array(['r', 'g', 'y']),
marker=iter(['.', '*', 'x']))
for _ in range(4):
ax.plot(range(10), range(10))
assert [l.get_color() for l in ax.lines] == ['r', 'g', 'y', 'r']
assert [l.get_marker() for l in ax.lines] == ['.', '*', 'x', '.']
def test_linestylecycle_basic():
fig, ax = plt.subplots()
ax.set_prop_cycle(cycler('ls', ['-', '--', ':']))
for _ in range(4):
ax.plot(range(10), range(10))
assert [l.get_linestyle() for l in ax.lines] == ['-', '--', ':', '-']
def test_fillcycle_basic():
fig, ax = plt.subplots()
ax.set_prop_cycle(cycler('c', ['r', 'g', 'y']) +
cycler('hatch', ['xx', 'O', '|-']) +
cycler('linestyle', ['-', '--', ':']))
for _ in range(4):
ax.fill(range(10), range(10))
assert ([p.get_facecolor() for p in ax.patches]
== [mpl.colors.to_rgba(c) for c in ['r', 'g', 'y', 'r']])
assert [p.get_hatch() for p in ax.patches] == ['xx', 'O', '|-', 'xx']
assert [p.get_linestyle() for p in ax.patches] == ['-', '--', ':', '-']
def test_fillcycle_ignore():
fig, ax = plt.subplots()
ax.set_prop_cycle(cycler('color', ['r', 'g', 'y']) +
cycler('hatch', ['xx', 'O', '|-']) +
cycler('marker', ['.', '*', 'D']))
t = range(10)
# Should not advance the cycler, even though there is an
# unspecified property in the cycler "marker".
# "marker" is not a Polygon property, and should be ignored.
ax.fill(t, t, 'r', hatch='xx')
# Allow the cycler to advance, but specify some properties
ax.fill(t, t, hatch='O')
ax.fill(t, t)
ax.fill(t, t)
assert ([p.get_facecolor() for p in ax.patches]
== [mpl.colors.to_rgba(c) for c in ['r', 'r', 'g', 'y']])
assert [p.get_hatch() for p in ax.patches] == ['xx', 'O', 'O', '|-']
def test_property_collision_plot():
fig, ax = plt.subplots()
ax.set_prop_cycle('linewidth', [2, 4])
t = range(10)
for c in range(1, 4):
ax.plot(t, t, lw=0.1)
ax.plot(t, t)
ax.plot(t, t)
assert [l.get_linewidth() for l in ax.lines] == [0.1, 0.1, 0.1, 2, 4]
def test_property_collision_fill():
fig, ax = plt.subplots()
ax.set_prop_cycle(linewidth=[2, 3, 4, 5, 6], facecolor='bgcmy')
t = range(10)
for c in range(1, 4):
ax.fill(t, t, lw=0.1)
ax.fill(t, t)
ax.fill(t, t)
assert ([p.get_facecolor() for p in ax.patches]
== [mpl.colors.to_rgba(c) for c in 'bgcmy'])
assert [p.get_linewidth() for p in ax.patches] == [0.1, 0.1, 0.1, 5, 6]
def test_valid_input_forms():
fig, ax = plt.subplots()
# These should not raise an error.
ax.set_prop_cycle(None)
ax.set_prop_cycle(cycler('linewidth', [1, 2]))
ax.set_prop_cycle('color', 'rgywkbcm')
ax.set_prop_cycle('lw', (1, 2))
ax.set_prop_cycle('linewidth', [1, 2])
ax.set_prop_cycle('linewidth', iter([1, 2]))
ax.set_prop_cycle('linewidth', np.array([1, 2]))
ax.set_prop_cycle('color', np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]]))
ax.set_prop_cycle('dashes', [[], [13, 2], [8, 3, 1, 3]])
ax.set_prop_cycle(lw=[1, 2], color=['k', 'w'], ls=['-', '--'])
ax.set_prop_cycle(lw=np.array([1, 2]),
color=np.array(['k', 'w']),
ls=np.array(['-', '--']))
def test_cycle_reset():
fig, ax = plt.subplots()
# Can't really test a reset because only a cycle object is stored
# but we can test the first item of the cycle.
prop = next(ax._get_lines.prop_cycler)
ax.set_prop_cycle(linewidth=[10, 9, 4])
assert prop != next(ax._get_lines.prop_cycler)
ax.set_prop_cycle(None)
got = next(ax._get_lines.prop_cycler)
assert prop == got
def test_invalid_input_forms():
fig, ax = plt.subplots()
with pytest.raises((TypeError, ValueError)):
ax.set_prop_cycle(1)
with pytest.raises((TypeError, ValueError)):
ax.set_prop_cycle([1, 2])
with pytest.raises((TypeError, ValueError)):
ax.set_prop_cycle('color', 'fish')
with pytest.raises((TypeError, ValueError)):
ax.set_prop_cycle('linewidth', 1)
with pytest.raises((TypeError, ValueError)):
ax.set_prop_cycle('linewidth', {1, 2})
with pytest.raises((TypeError, ValueError)):
ax.set_prop_cycle(linewidth=1, color='r')
with pytest.raises((TypeError, ValueError)):
ax.set_prop_cycle('foobar', [1, 2])
with pytest.raises((TypeError, ValueError)):
ax.set_prop_cycle(foobar=[1, 2])
with pytest.raises((TypeError, ValueError)):
ax.set_prop_cycle(cycler(foobar=[1, 2]))
with pytest.raises(ValueError):
ax.set_prop_cycle(cycler(color='rgb', c='cmy'))
|
155482
|
from sympy.solvers import solve
from sympy.simplify import simplify
def singularities(expr, sym):
"""
Finds singularities for a function.
Currently supported functions are:
- univariate real rational functions
Examples
========
>>> from sympy.calculus.singularities import singularities
>>> from sympy import Symbol
>>> x = Symbol('x', real=True)
>>> singularities(x**2 + x + 1, x)
()
>>> singularities(1/(x + 1), x)
(-1,)
References
==========
.. [1] http://en.wikipedia.org/wiki/Mathematical_singularity
"""
if not expr.is_rational_function(sym):
raise NotImplementedError("Algorithms finding singularities for"
" non rational functions are not yet"
" implemented")
else:
return tuple(sorted(solve(simplify(1/expr), sym)))
|
155484
|
import torch
from cogdl import oagbert
tokenizer, bert_model = oagbert()
bert_model.eval()
sequence = ["CogDL is developed by KEG, Tsinghua.", "OAGBert is developed by KEG, Tsinghua."]
tokens = tokenizer(sequence, return_tensors="pt", padding=True)
with torch.no_grad():
outputs = bert_model(**tokens)
print(outputs[0])
|
155485
|
SAMPLE_YEAR = 1983
SAMPLE_YEAR_SHORT = 83
SAMPLE_MONTH = 1
SAMPLE_DAY = 2
SAMPLE_HOUR = 15
SAMPLE_UTC_HOUR = 20
SAMPLE_HOUR_12H = 3
SAMPLE_MINUTE = 4
SAMPLE_SECOND = 5
SAMPLE_PERIOD = 'PM'
SAMPLE_OFFSET = '-00'
SAMPLE_LONG_TZ = 'UTC'
def create_sample(template: str) -> str:
return (
template
.replace('YYYY', str(SAMPLE_YEAR))
.replace('YY', ('%02d' % SAMPLE_YEAR_SHORT))
.replace('MM', ('%02d' % SAMPLE_MONTH))
.replace('DD', ('%02d' % SAMPLE_DAY))
.replace('HH24', ('%02d' % SAMPLE_HOUR))
.replace('HH12', ('%02d' % SAMPLE_HOUR_12H))
.replace('HH', ('%02d' % SAMPLE_HOUR))
.replace('MI', ('%02d' % SAMPLE_MINUTE))
.replace('SS', ('%02d' % SAMPLE_SECOND))
.replace('OF', SAMPLE_OFFSET)
.replace('AM', SAMPLE_PERIOD)
)
DATE_CASES = [
'YYYY-MM-DD',
'MM-DD-YYYY',
'DD-MM-YYYY',
'MM/DD/YY',
'DD/MM/YY',
'DD-MM-YY',
]
TIMEONLY_CASES = [
"HH12:MI AM",
"HH:MI:SS",
"HH24:MI:SS",
]
DATETIMETZ_CASES = [
"YYYY-MM-DD HH:MI:SSOF",
"YYYY-MM-DD HH:MI:SS",
"YYYY-MM-DD HH24:MI:SSOF",
"MM/DD/YY HH24:MI",
]
DATETIME_CASES = [
"YYYY-MM-DD HH24:MI:SS",
"YYYY-MM-DD HH:MI:SS",
"YYYY-MM-DD HH12:MI AM",
"MM/DD/YY HH24:MI",
]
|
155553
|
from sklearn.decomposition import PCA
import pandas as pd
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
df = pd.read_csv("Iris.csv")
labels = df['Species']
X = df.drop(['Id','Species'],axis=1)
X_std = StandardScaler().fit_transform(X)
pca = PCA(n_components=4)
X_transform = pca.fit_transform(X_std)
explained_var = pca.explained_variance_ratio_
for var in explained_var:
print var
plt.bar([1,2,3,4],explained_var,label=var)
plt.xlabel("Component #")
plt.ylabel("% Variance Contribution")
plt.legend()
plt.show()
# pca1 = zip(*X_transform)[0]
# pca2 = zip(*X_transform)[1]
# color_dict = {}
# color_dict["Iris-setosa"] = "green"
# color_dict["Iris-versicolor"]='red'
# color_dict["Iris-virginica"] = 'blue'
# i=0
# for label in labels.values:
# plt.scatter(pca1[i],pca2[i],color=color_dict[label])
# i=i+1
# plt.show()
|
155563
|
import hmac
from urllib.parse import quote
import httpx
from fastapi import HTTPException, Query
from fastapi.responses import RedirectResponse
from idunn import settings
client = httpx.AsyncClient()
base_url = settings.get("BASE_URL")
secret = settings.get("SECRET").encode()
def resolve_url(url: str) -> str:
"""
Idunn's URL that can be provided to redirect to the same page as the input
URL would.
"""
return base_url + f"v1/redirect?url={quote(url, safe='')}&hash={hash_url(url)}"
def hash_url(url: str) -> str:
"""
Hash of the URL that the client must provide in order to avoid abusive use
of the endpoint.
"""
return hmac.HMAC(key=secret, msg=url.encode(), digestmod="sha256").hexdigest()
async def follow_redirection(
url: str = Query(..., description="An external URL that is expected to redirect."),
hash: str = Query(..., description="Value of the hash provided by Idunn."),
):
"""
Redirect to the same page as provided URL which must have been provided by
Idunn together with an hash value.
"""
if not hmac.compare_digest(hash_url(url), hash):
raise HTTPException(403, detail="provided hash does not match the URL")
response = await client.get(url, allow_redirects=False)
response.raise_for_status()
if response.status_code not in range(300, 400):
raise HTTPException(404, detail="provided URL does not redirect")
return RedirectResponse(response.headers["Location"])
|
155688
|
import glob
import numpy as np
import pandas as pd
from collections import OrderedDict
#from . import metrics
import metrics
from .csv_reader import csv_node
__all__ = ['tune_threshold',
'assemble_node',
'assemble_dev_threshold',
'metric_reading',
'Ensemble']
def tune_threshold(y_true, y_prob, metric="f1_score"):
if isinstance(metric, str):
metric = getattr(metrics, metric)
thresholds = np.arange(0.01, 1, 0.01)
best_score = 0.0
best_threshold = 0.5
for threshold in thresholds:
y_pred = np.array([1 if p > threshold else 0 for p in y_prob])
cur_score = metric(y_true, y_pred)
if cur_score > best_score:
best_score = cur_score
best_threshold = threshold
print("Tuned threshold: {:.4f}".format(best_threshold))
return best_threshold
def assemble_node(nodes, key="Y_PROBA", method="median", PIDs=None):
if isinstance(method, str):
method = getattr(np, method)
if PIDs is None:
PIDs = nodes[0].PID
probas = []
for pid in PIDs:
proba = method([x.data[pid][key] for x in nodes])
probas.append(proba)
return np.array(probas)
def assemble_dev_threshold(nodes, method="median", metric="f1_score", PIDs=None):
y_prob = assemble_node(nodes, key="Y_PROBA", method=method, PIDs=PIDs)
y_true = nodes[0].extract("Y_TRUE", PIDs)
threshold = tune_threshold(y_true, y_prob, metric)
return threshold
def metric_reading(y_true, y_pred, y_proba):
if isinstance(y_true, list):
readings = [metric_reading(y_true_, y_pred_, y_proba_)
for y_true_,y_pred_,y_proba_ in zip(y_true, y_pred, y_proba)]
return readings
else:
scores = metrics.classification_summary(y_true, y_pred, [0,1], y_proba, verbose=False)
reading = OrderedDict([('Pos.Acc',scores['pos_acc']*100.0),
('Neg.Acc',scores['neg_acc']*100.0),
('Precision',scores['precision']*100.0),
('Recall',scores['recall']*100.0),
('F1',scores['f1']*100.0),
('ROC',scores['roc']*100.0),
('PRC',scores['prc']*100.0),
('NDCG',scores['ndcg']*100.0),
('TP',scores['tp']),
('FP',scores['fp']),
('TN',scores['tn']),
('FN',scores['fn'])])
return reading
class Ensemble(object):
def __init__(self, results_csvs, dev_csvs, pids=None):
self.results_csvs = results_csvs
self.dev_csvs = dev_csvs
self.build(pids)
@classmethod
def from_keyword(klass, test_keyword, dev_keyword, pids=None):
test_csvs = glob.glob(test_keyword, recursive=True)
dev_csvs = glob.glob(dev_keyword, recursive=True)
return klass(test_csvs, dev_csvs, pids)
@classmethod
def from_folder(klass, results_folder, dev_folder, pids=None):
results_csvs = glob.glob("{}/**/predictions*.csv".format(results_folder), recursive=True)
dev_csvs = glob.glob("{}/**/predictions*.csv".format(dev_folder), recursive=True)
return klass(results_csvs, dev_csvs, pids)
def build(self, pids=None):
self.results = [csv_node.from_csv(x) for x in self.results_csvs]
self.devs = [csv_node.from_csv(x) for x in self.dev_csvs]
self.results = sorted(self.results, key=lambda x: x.seed)
self.devs = sorted(self.devs, key=lambda x: x.seed)
if pids is None:
self.pids = list(self.results[0].PID)
else:
self.pids = pids
try:
self.score_list = self.get_seeds_score_list()
self.score = True
except:
self.score = False
self.proba_list = self.get_seeds_proba_list()
self.pred_list = self.get_seeds_pred_list()
@property
def score_dataframe(self):
return pd.DataFrame(OrderedDict(self.score_list_head+self.score_list))
@property
def proba_dataframe(self):
return pd.DataFrame(OrderedDict(self.proba_list_head+self.proba_list))
@property
def pred_dataframe(self):
return pd.DataFrame(OrderedDict(self.pred_list_head+self.pred_list))
def get_df_by_seed(self, key="Y_PROBA"):
seeds = [x.seed for x in self.results]
probas = [x.extract(key, self.pids) for x in self.results]
df_dict = OrderedDict([("PID", self.pids)] + \
[("SEED_{}".format(seed), proba) for seed, proba in zip(seeds, probas)])
df = pd.DataFrame(df_dict)
return df
def get_score_by_seed(self, seed=0):
idx = [x.seed for x in self.results].index(seed)
node = self.results[idx]
y_true = node.extract("Y_TRUE")
y_pred = node.extract("Y_PRED")
y_proba = node.extract("Y_PROBA")
score = metric_reading(y_true, y_pred, y_proba)
return score
def score2pair(self, key, score):
val = ["{:.2f}".format(score[key]) for key in self.score_keys]
return (key, val)
def get_seeds_score_list(self):
seeds = [x.seed for x in self.results]
scores = [self.get_score_by_seed(x) for x in seeds]
self.score_keys = list(scores[0].keys())
self.score_list_head = [("Experiment", self.score_keys)]
df_list = []
for seed, score in zip(seeds, scores):
pair = self.score2pair("SEED_{}".format(seed), score)
df_list.append(pair)
mean_score = OrderedDict([(key, np.mean([score[key] for score in scores])) for key in self.score_keys])
std_score = OrderedDict([(key, np.std([score[key] for score in scores])) for key in self.score_keys])
df_list.append(self.score2pair("AVERAGE", mean_score))
df_list.append(self.score2pair("STD", std_score))
return df_list
def get_seeds_proba_list(self):
seeds = [x.seed for x in self.results]
probas = [x.extract("Y_PROBA", self.pids) for x in self.results]
self.proba_list_head = [("PID", self.pids)]
proba_list = [("SEED_{}".format(seed), proba) for seed, proba in zip(seeds, probas)]
return proba_list
def get_seeds_pred_list(self):
seeds = [x.seed for x in self.results]
preds = [x.extract("Y_PRED", self.pids) for x in self.results]
self.pred_list_head = [("PID", self.pids)]
pred_list = [("SEED_{}".format(seed), pred) for seed, pred in zip(seeds, preds)]
return pred_list
def median_vote(self, metric="f1_score"):
dev_threshold = assemble_dev_threshold(self.devs, method="median",
metric=metric, PIDs=self.devs[0].PID)
voted_y_proba = assemble_node(self.results, key="Y_PROBA",
method="median", PIDs=self.pids)
voted_y_pred = np.array([1 if p > dev_threshold else 0 for p in voted_y_proba])
y_true = self.results[0].extract("Y_TRUE", self.pids)
#df_dict = OrderedDict([("PID", self.pids),
# ("Y_PROBA", voted_y_proba),
# ("Y_PRED", voted_y_pred)])
#df = pd.DataFrame(df_dict)
proba_pair = ("MEDIAN", voted_y_proba)
self.proba_list.append(proba_pair)
proba_df = pd.DataFrame(OrderedDict(self.proba_list_head+[proba_pair]))
pred_pair = ("MEDIAN", voted_y_pred)
self.pred_list.append(pred_pair)
pred_df = pd.DataFrame(OrderedDict(self.pred_list_head+[pred_pair]))
if self.score:
score = metric_reading(y_true, voted_y_pred, voted_y_proba)
score_pair = self.score2pair("MEDIAN", score)
self.score_list.append(score_pair)
score_df = pd.DataFrame(OrderedDict(self.score_list_head+[score_pair]))
else:
score_df = None
return proba_df, pred_df, score_df
def mv_vote(self):
voted_y_proba = assemble_node(self.results, key="Y_PRED",
method="mean", PIDs=self.pids)
voted_y_pred = np.round(voted_y_proba)
y_true = self.results[0].extract("Y_TRUE", self.pids)
proba_pair = ("MV", voted_y_proba)
self.proba_list.append(proba_pair)
proba_df = pd.DataFrame(OrderedDict(self.proba_list_head+[proba_pair]))
pred_pair = ("MV", voted_y_pred)
self.pred_list.append(pred_pair)
pred_df = pd.DataFrame(OrderedDict(self.pred_list_head+[pred_pair]))
if self.score:
score = metric_reading(y_true, voted_y_pred, voted_y_proba)
score_pair = self.score2pair("MV", score)
self.score_list.append(score_pair)
score_df = pd.DataFrame(OrderedDict(self.score_list_head+[score_pair]))
else:
score_df = None
return proba_df, pred_df, score_df
|
155716
|
from __future__ import print_function
import subprocess
import tempfile
import numpy as np
import warnings
import astropy.units as u
_quantity = u.Quantity
from collections import defaultdict
import os
import sys
from . import utils
from . import synthspec
from .utils import QuantityOff,ImmutableDict,unitless,grouper
from .base_class import RadiativeTransferApproximator
from astropy import units as u
from astropy import constants
from astropy import log
import astropy.table
PYVERSION = 3 if sys.version_info >= (3,0) else 2
__all__ = ['pyradex', 'write_input', 'parse_outfile', 'call_radex', 'Radex',
'density_distribution']
def pyradex(executable='radex', minfreq=100, maxfreq=130,
collider_densities={'H2':1}, debug=False, delete_tempfile=True,
return_dict=False, **kwargs):
"""
Get the radex results for a set of input parameters
Parameters
----------
executable : str
Full path to the RADEX executable
minfreq : float
Lowest frequency line to store, in GHz
(note: any astropy.unit spectroscopic unit is also allowed)
maxfreq : float
Highest frequency line to store
collider_densities : dict
Collider names and their number densities
If the molecule specified has both o-H2 and p-H2, you will get a
WARNING if you specify 'H2'
An ortho/para example:
collider_densities = {'oH2':900, 'pH2':100}
which will yield H2 = 1000
See write_input for additional parameters
Returns
-------
An astropy table containing the RADEX returns
.. WARNING:: If RADEX spits out *******, it will be replaced with -999
"""
warnings.warn("pyradex is deprecated: Use pyradex.Radex instead if you can.")
infile,outfile = write_input(minfreq=minfreq, maxfreq=maxfreq,
delete_tempfile=delete_tempfile,
collider_densities=collider_densities, **kwargs)
logfile = call_radex(executable, infile.name, debug=debug,
delete_tempfile=delete_tempfile)
check_logfile(logfile.name)
data = parse_outfile(outfile.name, return_dict=return_dict)
if debug:
with open(infile.name,'r') as inf:
print("Input:")
print(inf.read())
with open(outfile.name,'r') as out:
print("Output:")
print(out.read())
infile.close()
outfile.close()
logfile.close()
return data
def check_logfile(logfilename):
with open(logfilename,'r') as f:
if "Warning: Assuming thermal o/p ratio" in f.read():
warnings.warn("Assumed thermal o/p ratio since only H2 was given but collider file has o- and p- H2")
def write_input(temperature=10, column=1e12, collider_densities={'H2':1},
bw=0.01, tbg=2.73, species='co', velocity_gradient=1.0, minfreq=1,
maxfreq=10, delete_tempfile=True):
"""
Write radex.inp file parameters
Parameters
----------
temperature : float
Kinetic temperature (K)
collider_densities : dict
Collider names and their number densities
column : float
column density of the molecule
species : str
Name of the molecule (specifically, the prefix for the file name, e.g.
for "co.dat", species='co'). Case sensitive!
tbg : float
Temperature of the background radiation (e.g. CMB)
velocity_gradient : float
Velocity gradient per pc in km/s
"""
if hasattr(minfreq, 'unit'):
minfreq = unitless(minfreq.to('GHz',u.spectral()))
if hasattr(maxfreq, 'unit'):
maxfreq = unitless(maxfreq.to('GHz',u.spectral()))
infile = tempfile.NamedTemporaryFile(mode='w', delete=delete_tempfile)
outfile = tempfile.NamedTemporaryFile(mode='w', delete=delete_tempfile)
infile.write(species+'.dat\n')
infile.write(outfile.name+'\n')
infile.write(str(minfreq)+' '+str(maxfreq)+'\n')
infile.write(str(temperature)+'\n')
# RADEX doesn't allow densities < 1e-3
for k in collider_densities.keys():
if collider_densities[k] < 1e-3:
collider_densities.pop(k)
infile.write('%s\n' % len(collider_densities))
for name,dens in collider_densities.items():
infile.write('%s\n' % name)
infile.write(str(dens)+'\n')
infile.write(str(tbg)+'\n')
infile.write(str(column)+'\n')
infile.write(str(velocity_gradient)+'\n')
# end the input file
infile.write('0\n')
infile.flush()
return infile,outfile
def call_radex(executable, inpfilename, debug=False, delete_tempfile=True):
logfile = tempfile.NamedTemporaryFile(mode='w', delete=delete_tempfile)
cmd = '{radex} < {inpfile} > {logfile}'.format(
radex=executable,
inpfile=inpfilename,
logfile=logfile.name)
if debug:
print("Command:",cmd)
result = subprocess.call(cmd, shell=True)
if result != 0:
print("RADEX returned error code %i" % result)
with open(logfile.name,'r') as f:
print(f.read())
return logfile
header_names = ['J_up','J_low','E_UP','FREQ', 'WAVE', 'T_EX', 'TAU', 'T_R', 'POP_UP', 'POP_LOW', 'FLUX_Kkms', 'FLUX_Inu']
header_units = [None, None, u.K, u.GHz, u.um, u.K, None, u.K, None, None, u.K*u.km/u.s, u.erg/u.cm**2/u.s]
dtypes = [str, str, float, float, float, float, float, float, float, float, float, float]
def parse_outfile(filename, return_dict=False):
with open(filename,'r') as f:
alllines = f.readlines()
header = {L.split(":")[0][2:].strip():L.split(":")[1].strip()
for L in alllines
if L[0]=='*'}
lines = [L.replace("--"," ") for L in alllines
if (L[0] != '*'
and 'iterat' not in L
and 'GHz' not in L
and 'TAU' not in L)]
niter = [L.split(" ")[3]
for L in alllines
if 'iterat' in L]
data_list = [[x if '*' not in x else '-999' for x in L.split()] for L in lines]
if len(data_list) == 0:
raise ValueError("No lines included?")
data_in_columns = map(list,zip(*data_list))
if return_dict:
data = {name: C for C,name in zip(data_in_columns, header_names)}
data['niter']=niter
return data
columns = [astropy.table.Column(data=C, name=name.lower(), unit=unit, dtype=dtype)
for C,name,unit,dtype in zip(data_in_columns, header_names, header_units, dtypes)]
data = astropy.table.Table(columns, meta=header)
return data
class Radex(RadiativeTransferApproximator):
def __call__(self, return_table=True, **kwargs):
# reset the parameters appropriately
self.set_params(**kwargs)
# No need to re-validate: it is already done when self.temperature is
# set in __init__
niter = self.run_radex(reload_molfile=False, validate_colliders=False)
if return_table:
return self.get_table()
else:
return niter
def __init__(self,
collider_densities=None,
density=None,
total_density=None,
temperature=None,
species='co',
column=None,
column_per_bin=None,
tbackground=2.7315,
deltav=1.0,
abundance=None,
datapath=None,
escapeProbGeom='lvg',
outfile='radex.out',
logfile='radex.log',
debug=False,
mu=2.8,
source_area=None,
):
"""
Direct wrapper of the radex FORTRAN code
Parameters
----------
collider_densities: dict
Dictionary giving the volume densities of the collider(s) in units
of cm^-3. Valid entries are h2,oh2,ph2,e,He,H,H+. The keys are
case-insensitive.
density: float
total_density: float
(optional) Alternative to ``collider_densities``: can specify a
single number indicating the total density of H2. This should
not be used when electrons or H atoms are the intended collider.
These keywords are synonymous and therefore only one can be used.
temperature: float
Local gas temperature in K
species: str
A string specifying a valid chemical species. This is used to look
up the specified molecule
column: float
column_per_bin : float
The column density of the molecule of interest per bin, where
a bin is (deltav km/s * 1 pc). These keywords are synonymous and
therefore only one can be specified.
abundance: float
The molecule's abundance relative to the total collider density in
each velocity bin, i.e. column = abundance * density * length * dv.
If both abundance and column are specified, abundance is ignored.
tbackground: float
Background radiation temperature (e.g., CMB)
deltav: float
The FWHM line width (really, the single-zone velocity width to
scale the column density by: this is most sensibly interpreted as a
velocity gradient (dv/length))
datapath: str
Path to the molecular data files. If it is not specified, defaults
to the current directory, OR the shell variable RADEX_DATAPATH if
it is specified.
outfile: str
Output file name
logfile: str
Log file name
escapeProbGeom: 'lvg','sphere','slab'
Which escape probability method to use
mu: float
Mean mass per particle in AMU. Set to 2.8 for H2+Helium mix
source_area: float / unit
The emitting area of the source on the sky in steradians
"""
log.debug("Importing radex fortran module")
from pyradex.radex import radex
self.radex = radex
self.mu = mu
if os.getenv('RADEX_DATAPATH') and datapath is None:
datapath = os.getenv('RADEX_DATAPATH')
log.debug(f"Datapath={datapath}")
if datapath is not None:
self.datapath = datapath
if self.datapath != os.path.expanduser(datapath):
raise ValueError("Data path %s was not successfully stored;"
" instead %s was." % (datapath,self.datapath))
log.debug(f"Setting species to {species}")
self.species = species
if self.molpath == b'':
raise ValueError("Must set a species name.")
if not os.path.exists(self.molpath):
raise ValueError("Must specify a valid path to a molecular data file "
"else RADEX will crash."
" Current path is {0}".format(self.molpath))
if sum(x is not None for x in (collider_densities,density,total_density)) > 1:
raise ValueError("Can only specify one of density, total_density,"
" and collider_densities")
if sum(x is not None for x in (column,column_per_bin)) > 1:
raise ValueError("Can only specify one of column, column_per_bin.")
n_specifications = sum(x is not None for x in (column, column_per_bin,
collider_densities,
density, total_density,
abundance))
if (n_specifications > 2):
raise ValueError("Can only specify two of column, density, and abundance.")
if (n_specifications < 2):
raise ValueError("Must specify two of column, density, and abundance.")
self._locked_parameter = 'density'
self._is_locked = True
log.debug(f"Setting temperature to {temperature}")
# This MUST happen before density is set, otherwise OPR will be
# incorrectly set.
self.radex.cphys.tkin = unitless(temperature)
log.debug(f"Temperature = {self.radex.cphys.tkin}")
# density warnings will occur if a generic (number-based) density is
# used. It can be suppressed more directly by using a dictionary-style
# density
self._suppress_density_warning = False
log.debug("Setting up collider densities")
if collider_densities:
self.density = collider_densities
self._suppress_density_warning = True
self._is_locked = False
if total_density:
log.warn("`total_density` was specified, but `collider_densities` "
"was used instead. Set `collider_densities=None` if you "
"want to use `total_density`.")
elif total_density:
self.density = total_density
self._suppress_density_warning = True
self._is_locked = False
elif density:
self.density = density
self._suppress_density_warning = True
self._is_locked = False
else:
self._locked_parameter = 'column'
self._is_locked = True
log.debug("Completed collider densities; setting up outfile/logfile")
self.outfile = outfile
self.logfile = logfile
self.escapeProbGeom = escapeProbGeom
self.deltav = deltav
log.debug("Setting parameters for the first time")
self._set_parameters()
if column_per_bin is not None:
self.column_per_bin = column_per_bin
elif column is not None:
self.column_per_bin = column
else:
self._locked_parameter = 'density'
self._is_locked = False
if abundance:
self.abundance = abundance
self._validate_colliders()
# This has to happen here, because the colliders are read in at
# this point and rates interpolated
self.temperature = temperature
self.tbg = tbackground
self.debug = debug
self.source_area = source_area
self._suppress_density_warning = False
_u_brightness = (u.erg * u.s**-1 * u.cm**-2 * u.Hz**-1 * u.sr**-1)
_u_sc = u.cm**-2
_u_cc = u.cm**-3
_u_gradient = u.cm**-2 / (u.km/u.s) / u.pc
_u_kms = u.km/u.s
_u_cms = u.cm/u.s
def set_params(self, density=None, collider_densities=None,
column=None, column_per_bin=None, temperature=None,
abundance=None, species=None, deltav=None, tbg=None,
escapeProbGeom=None):
if species is not None:
self.species = species
if deltav is not None:
self.deltav = deltav
# This MUST happen before density is set, otherwise OPR will be
# incorrectly set.
if temperature is not None:
self.radex.cphys.tkin = unitless(temperature)
# if the density is a specified parameter, we only want to warn that it
# is being set once
self._suppress_density_warning = False
if collider_densities is not None:
self.density = collider_densities
self._suppress_density_warning = True
elif density is not None:
if collider_densities is not None:
raise ValueError('Can specify only one of density,'
' collider_densities')
self.density = density
self._suppress_density_warning = True
if column is not None:
self.column = column
elif column_per_bin is not None:
if column is not None:
raise ValueError("Can specify only one of column,"
"column per bin")
self.column_per_bin = column_per_bin
if temperature is not None:
self.temperature = temperature
if abundance is not None:
self.abundance = abundance
if tbg is not None:
self.tbg = tbg
if escapeProbGeom is not None:
self.escapeProbGeom = escapeProbGeom
# the density warning should occur for any other future settings
self._suppress_density_warning = False
@property
def locked_parameter(self):
return self._locked_parameter
def _lock_param(self, parname):
if not hasattr(self, '_previous_locked_parameter') or (hasattr(self, '_locked_parameter') and
self._previous_locked_parameter != self._locked_parameter):
self._previous_locked_parameter = self._locked_parameter
self._locked_parameter = parname
def _set_parameters(self):
#self.radex.cphys.cdmol = self.column
#self.radex.cphys.tkin = self.temperature
if hasattr(self.deltav, 'to'):
self.radex.cphys.deltav = unitless(self.deltav.to(self._u_cms))
else:
self.radex.cphys.deltav = self.deltav * (self._u_cms.to(self._u_kms))
# these parameters are only used for outputs and therefore can be ignored
self.radex.freq.fmin = 0
self.radex.freq.fmax = 1e10
if not hasattr(self, 'miniter'):
self.miniter = 10
if not hasattr(self, 'maxiter'):
self.maxiter = 200
_all_valid_colliders = {'H2':'H2',
'PH2':'pH2',
'OH2':'oH2',
'E':'e',
'H':'H',
'HE':'He',
'H+':'H+'}
@property
def density(self):
d = {'H2':self.radex.cphys.density[0],
'pH2':self.radex.cphys.density[1],
'oH2':self.radex.cphys.density[2],
'e':self.radex.cphys.density[3],
'H':self.radex.cphys.density[4],
'He':self.radex.cphys.density[5],
'H+':self.radex.cphys.density[6]}
for k in d:
d[k] = u.Quantity(d[k], self._u_cc)
return ImmutableDict(d)
@density.setter
def density(self, collider_density):
collider_ids = {'H2': 0,
'PH2': 1,
'OH2': 2,
'E': 3,
'H': 4,
'HE': 5,
'H+': 6}
self._use_thermal_opr = False
if isinstance(collider_density, (float,int,_quantity,np.ndarray)):
if not self._suppress_density_warning:
log.warn("Assuming the density is n(H_2).")
collider_density = {'H2': collider_density}
collider_densities = defaultdict(lambda: 0)
for k in collider_density:
collider_densities[k.upper()] = unitless(u.Quantity(collider_density[k], self._u_cc))
if k.upper() not in self._all_valid_colliders:
raise ValueError('Collider %s is not one of the valid colliders: %s' %
(k,self._all_valid_colliders))
if (('OH2' in collider_densities and collider_densities['OH2'] !=0) or
('PH2' in collider_densities and collider_densities['PH2'] !=0)):
# this is simply not true: NH3 has just ph2 as a collider
#if not 'PH2' in collider_densities or not 'OH2' in collider_densities:
# raise ValueError("If o-H2 density is specified, p-H2 must also be.")
# TODO: look up whether RADEX uses density[0] if density[1] and [2] are specified
# (it looks like the answer is "no" based on a quick test)
#self.radex.cphys.density[0] = 0 # collider_densities['OH2'] + collider_densities['PH2']
# PARA is [1], ORTHO is [2]
# See lines 91, 92 of io.f
if 'PH2' in collider_densities:
self.radex.cphys.density[1] = collider_densities['PH2']
if 'OH2' in collider_densities:
self.radex.cphys.density[2] = collider_densities['OH2']
self._use_thermal_opr = False
elif 'H2' in collider_densities:
warnings.warn("Using a default ortho-to-para ratio (which "
"will only affect species for which independent "
"ortho & para collision rates are given)")
self._use_thermal_opr = True
#self.radex.cphys.density[0] = collider_densities['H2']
T = unitless(self.temperature)
if T > 0:
# From Faure, private communication
opr = min(3.0,9.0*np.exp(-170.6/T))
else:
opr = 3.0
fortho = opr/(1+opr)
log.debug("Set OPR to {0} and fortho to {1}".format(opr,fortho))
self.radex.cphys.density[1] = collider_densities['H2']*(1-fortho)
self.radex.cphys.density[2] = collider_densities['H2']*(fortho)
# RADEX relies on n(H2) = n(oH2) + n(pH2)
# We have set n(oH2) and n(pH2) above
vc = [x.lower() for x in self.valid_colliders]
if 'h2' in vc:
self.radex.cphys.density[0] = self.radex.cphys.density[1:3].sum()
self.radex.cphys.density[1] = 0
self.radex.cphys.density[2] = 0
elif 'oh2' in vc or 'ph2' in vc:
self.radex.cphys.density[0] = 0
self.radex.cphys.density[3] = collider_densities['E']
self.radex.cphys.density[4] = collider_densities['H']
self.radex.cphys.density[5] = collider_densities['HE']
self.radex.cphys.density[6] = collider_densities['H+']
# skip H2 when computing by assuming OPR correctly distributes ortho & para
# It's not obvious that RADEX does this correctly in readdata.f
self.radex.cphys.totdens = self.radex.cphys.density.sum()
# Unfortunately,
# must re-read molecular file and re-interpolate to new density
log.debug("Validating colliders")
self._validate_colliders()
log.debug(f"Running 'readdata' from molfile={self.molpath}")
self.radex.readdata()
log.debug("Ran 'readdata'")
if not self._is_locked:
self._is_locked = True
assert self.locked_parameter in ('column', 'abundance', 'density')
if self.locked_parameter == 'density': # self is locked, still need to update
if hasattr(self, '_previous_locked_parameter'):
self._lock_param(self._previous_locked_parameter)
else:
self._lock_param('abundance') # choose arbitrarily
if self.locked_parameter == 'column':
self.abundance = self.column_per_bin /(self.total_density*self.length)
elif self.locked_parameter == 'abundance':
self.column_per_bin = self.total_density * self.length * self.abundance
else:
raise ValueError("Neither column nor abundance were updated")
self._lock_param('density')
self._is_locked = False
invab = (self.total_density / (self.column / self.length)).decompose().value
if not np.allclose(invab, 1/self.abundance):
raise ValueError("Can not set density to %s" % collider_density)
@property
def valid_colliders(self):
return self._valid_colliders
@property
def total_density(self):
"""
The total density *by number of particles*
The *mass density* can be dramatically different!
"""
return u.Quantity(self.radex.cphys.totdens, self._u_cc)
@property
def opr(self):
return self.radex.cphys.density[1]/self.radex.cphys.density[2]
@property
def molpath(self):
log.debug(f"Computing molpath from molfile = {self.radex.impex.molfile}")
try:
result = b"".join(self.radex.impex.molfile).strip()
except TypeError:
result = self.radex.impex.molfile.tostring().strip()
# this hack may be wrong; the underlying dtype appears to be corrupt
return result.lstrip(b"b'") # strip "bytes" junk that appears to be added by numpy
@molpath.setter
def molpath(self, molfile):
log.debug(f"Setting molpath to {molfile} (self.radex.impex.molfile={self.radex.impex.molfile})")
if "~" in molfile:
molfile = os.path.expanduser(molfile)
if PYVERSION == 3:
try:
self.radex.impex.molfile[:] = np.bytes_([""]*len(self.radex.impex.molfile))
except TypeError as ex:
self.radex.impex.molfile = " " * self.radex.impex.molfile.dtype.itemsize
else:
self.radex.impex.molfile[:] = ""
log.debug(f"Verifying collision rates for molfile={molfile} from impex.molfile={self.radex.impex.molfile}")
utils.verify_collisionratefile(molfile)
try:
self.radex.impex.molfile[:len(molfile)] = molfile
except IndexError:
self.radex.impex.molfile = molfile + " " * (self.radex.impex.molfile.dtype.itemsize - len(molfile))
@property
def outfile(self):
return self.radex.impex.outfile
@outfile.setter
def outfile(self, outfile):
if PYVERSION == 3:
try:
self.radex.impex.outfile[:] = np.bytes_([""]*len(self.radex.impex.outfile))
except TypeError as ex:
self.radex.impex.outfile = " " * self.radex.impex.outfile.dtype.itemsize
else:
self.radex.impex.outfile[:] = ""
try:
self.radex.impex.outfile[:len(outfile)] = outfile
except IndexError:
self.radex.impex.outfile = outfile + " " * (self.radex.impex.outfile.dtype.itemsize - len(outfile))
@property
def logfile(self):
return self.radex.setup.logfile
@logfile.setter
def logfile(self, logfile):
if PYVERSION == 3:
try:
self.radex.setup.logfile[:] = np.bytes_([""]*len(self.radex.setup.logfile))
except TypeError as ex:
self.radex.setup.logfile = " " * self.radex.setup.logfile.dtype.itemsize
else:
self.radex.setup.logfile[:] = ""
try:
self.radex.setup.logfile[:len(logfile)] = logfile
except IndexError:
self.radex.setup.logfile = logfile + " " * (self.radex.setup.logfile.dtype.itemsize - len(logfile))
@property
def datapath(self):
try:
return os.path.expanduser(b"".join(self.radex.setup.radat).strip()).decode('utf-8')
except TypeError:
# occurs if radat is S120 instead of array of S1
return os.path.expanduser((self.radex.setup.radat.tostring().decode('utf-8').strip()))
@datapath.setter
def datapath(self, radat):
# self.radex data path not needed if molecule given as full path
if PYVERSION == 3:
try:
self.radex.setup.radat[:] = np.bytes_([""] * len(self.radex.setup.radat))
except TypeError as ex:
# now radat gets treated as a single S120 instead of an array of S1s
self.radex.setup.radat = " " * self.radex.setup.radat.dtype.itemsize
else:
self.radex.setup.radat[:] = ""
# there is dangerous magic here: radat needs to be interpreted as an array,
# but you can't make it an array of characters easily...
try:
self.radex.setup.radat[:len(radat)] = radat
except IndexError:
# in python3, this might just work, where the above doesn't?
# (this works if RADAT is an S120)
# the added space is because the right and left side must have *exactly* the same size
self.radex.setup.radat = radat + " " * (self.radex.setup.radat.dtype.itemsize - len(radat))
@property
def escapeProbGeom(self):
mdict = {2:'lvg',1:'sphere',3:'slab'}
return mdict[int(self.radex.setup.method)]
@escapeProbGeom.setter
def escapeProbGeom(self, escapeProbGeom):
mdict = {'lvg':2,'sphere':1,'slab':3}
if escapeProbGeom not in mdict:
raise ValueError("Invalid escapeProbGeom, must be one of "+",".join(mdict))
self.radex.setup.method = mdict[escapeProbGeom]
@property
def level_population(self):
return self.radex.collie.xpop
@property
def tex(self):
return u.Quantity(self.radex.radi.tex[self._mask], u.K)
Tex = tex
@property
def tau(self):
# taul(iline) = cddv*(xpop(n)*gstat(m)/gstat(n)-xpop(m))
#$ /(fgaus*xt/aeinst(iline))
return self.radex.radi.taul[self._mask]
@property
def frequency(self):
return u.Quantity(self.radex.radi.spfreq[self._mask], u.GHz)
@property
def temperature(self):
return u.Quantity(self.radex.cphys.tkin, u.K)
@temperature.setter
def temperature(self, tkin):
if hasattr(tkin,'to'):
tkin = unitless(u.Quantity(tkin, u.K))
elif tkin is None:
raise TypeError("Must specify tkin")
if tkin <= 0 or tkin > 1e4:
raise ValueError('Must have kinetic temperature > 0 and < 10^4 K')
self.radex.cphys.tkin = tkin
if not os.path.exists(self.molpath):
raise IOError("File not found: %s" % self.molpath)
# must re-read molecular file and re-interpolate to new temperature
self._validate_colliders()
#log.info("before DENS:"+str(self.radex.cphys.density))
#log.info("before TOTDENS:"+str(self.radex.cphys.totdens))
self.radex.readdata()
#log.info("after DENS:"+str(self.radex.cphys.density))
#log.info("after TOTDENS:"+str(self.radex.cphys.totdens))
if self._use_thermal_opr:
# Reset the density to a thermal value
lp = self._locked_parameter
self.density = (unitless(self.density['H2']) or
unitless(self.density['oH2']+self.density['pH2']))
self._locked_parameter = lp
@property
def column(self):
return self.column_per_bin
@column.setter
def column(self, value):
self.column_per_bin = value
@property
def column_per_bin(self):
return u.Quantity(self.radex.cphys.cdmol, self._u_sc)
@column_per_bin.setter
def column_per_bin(self, col):
if hasattr(col, 'to'):
col = unitless(u.Quantity(col, self._u_sc))
if col < 1e5 or col > 1e25:
raise ValueError("Extremely low or extremely high column.")
self.radex.cphys.cdmol = col
col = u.Quantity(col, self._u_sc)
if not self._is_locked:
self._is_locked = True
assert self.locked_parameter in ('column', 'abundance', 'density')
if self.locked_parameter == 'column': # self is locked, still need to update
if hasattr(self, '_previous_locked_parameter'):
self._lock_param(self._previous_locked_parameter)
else:
self._lock_param('density') # choose arbitrarily
assert self.locked_parameter in ('density', 'abundance')
if self.locked_parameter == 'density':
ab = (col/(self.total_density * self.length))
if hasattr(ab, 'decompose'):
self.abundance = ab.decompose().value
else:
self.abundance = ab / (self._u_cc*u.pc).to(self._u_sc)
elif self.locked_parameter == 'abundance':
self.density = col / self.length / self.abundance
else:
raise ValueError("Neither density nor abundance were updated")
self._lock_param('column')
self._is_locked = False
invab = (self.total_density / (self.column / self.length)).decompose().value
if not np.allclose(invab, 1/self.abundance):
raise ValueError("Can not set column_per_bin to %s" % col)
@property
def column_per_kms_perpc(self):
return self.column_per_bin / self.deltav
@column_per_kms_perpc.setter
def column_per_kms_perpc(self, cddv):
cddv = u.Quantity(cddv, self._u_gradient)
self.column_per_bin = cddv * u.Quantity(self.deltav, self._u_kms) * self.length()
@property
def abundance(self):
return self._abundance
@abundance.setter
def abundance(self, abund):
self._abundance = abund
if not self._is_locked:
assert self.locked_parameter in ('column', 'abundance', 'density')
if self.locked_parameter == 'abundance': # self is locked, still need to update
if hasattr(self, '_previous_locked_parameter'):
self._lock_param(self._previous_locked_parameter)
else:
self._lock_param('density') # choose arbitrarily
self._is_locked = True
if self.locked_parameter == 'column':
dens = self.column_per_bin / self.length / abund
self.density = dens
elif self.locked_parameter == 'density':
col = self.total_density*self.length*abund
self.column_per_bin = u.Quantity(col, u.cm**-2)
else:
raise ValueError("Neither column nor density were updated")
self._lock_param('abundance')
self._is_locked = False
invab = (self.total_density / (self.column / self.length)).decompose().value
if not np.allclose(invab, 1/self.abundance):
raise ValueError("Can not set abundance to %s" % abund)
@property
def deltav(self):
return self._deltav
@deltav.setter
def deltav(self, dv):
self._deltav = u.Quantity(dv, self._u_kms)
@property
def length(self):
""" Hard-coded, assumed length-scale """
return u.Quantity(1, u.pc)
@property
def debug(self):
return self.radex.dbg.debug
@debug.setter
def debug(self, debug):
self.radex.dbg.debug = debug
@property
def tbg(self):
return u.Quantity(self.radex.cphys.tbg, u.K)
@tbg.setter
def tbg(self, tbg):
if tbg is None:
# allow tbg to be not-set so that backrad() isn't triggered
return
#print("Set TBG=%f" % tbg)
if hasattr(tbg, 'value'):
tbg = unitless(u.Quantity(tbg, u.K))
self.radex.cphys.tbg = tbg
self.radex.backrad()
def run_radex(self, silent=True, reuse_last=False, reload_molfile=True,
abs_convergence_threshold=1e-16, rel_convergence_threshold=1e-8,
validate_colliders=True):
"""
Run the iterative matrix solution using a python loop
Parameters
----------
silent: bool
Print a message when iteration is done?
reuse_last: bool
If this is True, the matrix iterator will start at iteration 1
rather than iteration 0, and it will therefore repopulate the rate
matrix based on the radiative background alone. In principle,
setting this to True should result in a significantly faster
convergence; in practice, it does not.
reload_molfile: bool
Re-read the molecular line file? This is needed if the collision
rates are different and have not been updated by, e.g., changing
the temperature (which automatically runs the `readdata` function)
validate_colliders: bool
Validate the colliders before running the code. This should always
be done unless running in a grid, in which case it can cause a
slowdown (~30%).
"""
if validate_colliders:
# 100 loops, best of 3: 7.48 ms per loop
self._validate_colliders()
if reload_molfile or self.radex.collie.ctot.sum()==0:
# 100 loops, best of 3: 15.3 ms per loop
self.radex.readdata()
#self.radex.backrad()
# Given the properties of *this* class, set the appropriate RADEX
# fortran function values
# 10000 loops, best of 3: 74 micros per loop
self._set_parameters()
self._iter_counter = 1 if reuse_last else 0
converged = np.array(False)
# 1000000 loops, best of 3: 1.79 micros per loop
last = self.level_population.copy()
while not converged:
if self._iter_counter >= self.maxiter:
if not silent:
print("Did not converge in %i iterations, stopping." % self.maxiter)
break
# 10000 loops, best of 3: 30.8 micros per loop
self.radex.matrix(self._iter_counter, converged)
level_diff = np.abs(last-self.level_population)
frac_level_diff = level_diff/self.level_population
if (((level_diff.sum() < abs_convergence_threshold) or
(frac_level_diff.sum() < rel_convergence_threshold)) and
self._iter_counter>self.miniter):
if not silent:
print("Stopped changing after %i iterations" % self._iter_counter)
break
last = self.level_population.copy()
self._iter_counter += 1
if converged and not silent:
print("Successfully converged after %i iterations" % self._iter_counter)
return self._iter_counter
@property
def quantum_number(self):
# more recent versions of numpy/python don't require any restructuring?
return self.radex.quant.qnum
#return np.array([(b"".join(x)).strip() for x in
# grouper(self.radex.quant.qnum.T.ravel().tolist(),6,fillvalue=b'')])
@property
def upperlevelnumber(self):
# wrong return self.radex.imolec.iupp[self._mask]
return self.quantum_number[self.upperlevelindex]
@property
def lowerlevelnumber(self):
# wrong return self.radex.imolec.ilow[self._mask]
return self.quantum_number[self.lowerlevelindex]
@property
def upperlevelindex(self):
return self.radex.imolec.iupp[self._mask]-1
@property
def upperlevelpop(self):
return self.level_population[self.upperlevelindex]
@property
def lowerlevelindex(self):
return self.radex.imolec.ilow[self._mask]-1
@property
def lowerlevelpop(self):
return self.level_population[self.lowerlevelindex]
@property
def upperstateenergy(self):
return self.radex.rmolec.eup[self._mask]
@property
def inds_frequencies_included(self):
"""
The indices of the line frequencies fitted by RADEX
(RADEX can hold up to 99999 frequencies, but usually uses ~100)
"""
return np.where(self._mask)[0]
@property
def background_brightness(self):
return u.Quantity(self.radex.radi.backi[self._mask], self._u_brightness)
@background_brightness.setter
def background_brightness(self, value):
self.radex.radi.backi[:value.size] = value.to(self._u_brightness)
self.radex.radi.totalb[:value.size] = value.to(self._u_brightness)
_thc = (2 * constants.h * constants.c).cgs / u.sr
_fk = (constants.h * constants.c / constants.k_B).cgs
_thc_value = _thc.value
_fk_value = _fk.value
@property
def source_brightness(self):
"""
RADEX compat? (check)
"""
fk = self._fk_value
thc = self._thc_value
with QuantityOff():
ftau = np.exp(-self.tau)
xt = self._xt
xnu = self._xnu
earg = fk*xnu/self.tex
bnutex = thc*xt/(np.exp(earg)-1.0)
toti = self.background_brightness*ftau+bnutex*(1.0-ftau)
return u.Quantity(toti, self._u_brightness)
@property
def source_brightness_beta(self):
fk = self._fk_value
thc = self._thc_value
with QuantityOff():
ftau = np.exp(-self.tau)
xt = self._xt
xnu = self._xnu
earg = fk*xnu/self.tex
bnutex = thc*xt/(np.exp(earg)-1.0)
toti = self.background_brightness*ftau+bnutex*(1-self.beta)
return u.Quantity(toti, self._u_brightness)
@property
def beta(self):
# this will probably be faster if vectorized (translated completely
# from fortran to python)
return np.array([self.radex.escprob(t) for t in self.tau])
@property
def _xnu(self):
"""
Line frequency in inverse cm
"""
return u.Quantity(self.radex.radi.xnu[self._mask], u.cm**-1)
@property
def _xt(self):
# xt = xnu**3 # cm^-1 -> cm^-3
return self._xnu**3
@property
def _cddv(self):
return self.column / self.deltav
@property
def _statistical_weight(self):
return self.radex.rmolec.gstat
@property
def upperlevel_statisticalweight(self):
return self._statistical_weight[self.upperlevelindex]
@property
def lowerlevel_statisticalweight(self):
return self._statistical_weight[self.lowerlevelindex]
@property
def _mask(self):
return self.radex.radi.spfreq != 0
def get_synthspec(self, fmin, fmax, npts=1000, **kwargs):
"""
Generate a synthetic spectrum of the selected molecule over the
specified frequency range. This task is good for quick-looks but has a
lot of overhead for generating models and should not be used for
fitting (unless you have a conveniently small amount of data)
Parameters
----------
fmin : `~astropy.units.Quantity`
fmax : `~astropy.units.Quantity`
Frequency-equivalent quantity
"""
wcs = synthspec.FrequencyArray(fmin, fmax, npts)
S = synthspec.SyntheticSpectrum.from_RADEX(wcs, self, **kwargs)
return S
def partition_function(self, temperature=None):
"""
Equation 46 of Mangum & Shirley 2015:
Q = Sum( g_i exp(-E_i / kT) )
"""
warnings.warn("The partition function may be very inaccurate using "
"LAMDA files because they include a small fraction of"
" the total available states.")
gi = self.upperlevel_statisticalweight
Ei = u.Quantity(self.upperstateenergy, unit=u.K)
if temperature is None:
temperature = self.temperature
if not hasattr(temperature, 'unit'):
temperature = u.Quantity(temperature, unit=u.K)
return (gi*np.exp(-Ei/(temperature))).sum()
def density_distribution(densarr, distr, moleculecolumn, tauthresh=0.8,
opr=None, line_ids=[], mincol=None, Radex=Radex,
**kwargs):
"""
Compute the LVG model for a single zone with an assumed density
*distribution* but other properties fixed.
Parameters
----------
dendarr : array
Array of densities corresponding to the distribution function
distr : array
The density distribution corresponding to the density array
moleculecolumn : quantity
The total column density of the molecule in question. It will be
redistributed across the appropriate densities. Units: cm^-2
[this is wrong - each density will assume a too-low optical depth]
"""
if not np.allclose(distr.sum(), 1):
raise ValueError("The distribution must be normalized.")
if not line_ids:
raise ValueError("Specify at least one line ID")
meandens = (densarr*distr).mean()
if opr is None:
collider_densities = {'H2': meandens}
else:
fortho = opr/(1+opr)
collider_densities = {'oH2':meandens*fortho,'pH2':meandens*(1-fortho)}
# Test whether the multi-slab model is reasonable by checking:
# if the column was all at the mean density, would any lines be
# optically thick?
R = Radex(collider_densities=collider_densities, column=moleculecolumn, **kwargs)
R.run_radex()
if np.any(R.tau > tauthresh):
warnings.warn(("At least one line optical depth is >{tauthresh}. "
"Smoothing may be invalid.").format(tauthresh=tauthresh))
# set the optical depth from the *mean* density assuming the *total* column
tau = R.tau
print("Mean density: {0} Optical Depth: {1}".format(meandens, tau[line_ids]))
_thc = (2 * constants.h * constants.c).cgs / u.sr
_fk = (constants.h * constants.c / constants.k_B).cgs
_thc_value = _thc.value
_fk_value = _fk.value
_u_brightness = (u.erg * u.s**-1 * u.cm**-2 * u.Hz**-1 * u.sr**-1)
xnu = R.frequency.to(u.cm**-1, u.spectral()).value
linestrengths = []
texs = []
for dens,prob in zip(densarr,distr):
if opr is None:
collider_densities = {'H2':dens}
else:
collider_densities = {'oH2':dens*fortho,'pH2':dens*(1-fortho)}
R.density = collider_densities
try:
R.column = moleculecolumn * prob
if mincol is not None and R.column < mincol:
R.column = mincol
R.run_radex()
except ValueError as ex:
if ex.args[0] == "Extremely low or extremely high column.":
if R.column > u.Quantity(1e20, u.cm**-2):
raise ex
else:
texs.append(np.zeros_like(line_ids)+2.73)
linestrengths.append(np.zeros_like(line_ids))
continue
else:
raise ex
if hasattr(R, 'radex'):
R.radex.radi.taul[:len(tau)] = tau
elif hasattr(R, '_data_dict'):
R._data_dict['tau'] = tau
fk = _fk_value
thc = _thc_value
with QuantityOff():
ftau = np.exp(-tau)
xt = xnu**3
earg = fk*xnu/R.tex
bnutex = thc*xt/(np.exp(earg)-1.0)
toti_nounit = R.background_brightness*ftau+bnutex*(1.0-ftau)
toti = u.Quantity(toti_nounit, _u_brightness)
totK = ((toti*u.sr).to(u.K, u.brightness_temperature(1*u.sr,
R.frequency)))
linestrengths.append(totK[line_ids])
texs.append(R.tex[line_ids])
linestrengths = np.array(linestrengths)
texs = np.array(texs)
return R, linestrengths, linestrengths.sum(axis=0), texs, tau[line_ids]
def grid():
pass
|
155742
|
import six
import multiprocessing
import pytest
from .run_functions import log_observation, upload_artifact
from operator import itemgetter
from functools import partial
class TestConcurrency:
def test_multiple_runs_log_obs(self, client):
client.set_project()
client.set_experiment()
pool = multiprocessing.Pool(36)
result = pool.map(partial(log_observation, client), range(10))
pool.close()
def extract_obs_value(obs):
return list(map(itemgetter(0), obs))
check_results = list(map(lambda res: extract_obs_value((res["run"].get_observation("obs"))) == res["obs"], result))
assert all(check_results)
def test_multiple_runs_upload_artifacts(self, client, in_tempdir):
client.set_project()
client.set_experiment()
pool = multiprocessing.Pool(36)
result = pool.map(partial(upload_artifact, client), range(5))
pool.close()
check_results = list(map(lambda res: res["run"].get_artifact("artifact").read() == res["artifact"], result))
assert all(check_results)
|
155755
|
import numpy as np
from collections import defaultdict
from .loss import compute_rre, compute_rte
class Logger:
def __init__(self):
self.store = defaultdict(list)
def reset(self):
self.store = defaultdict(list)
def add(self, key, value):
self.store[key].append(value)
def avg(self, key):
return np.mean(self.store[key])
def save_sacred(self, ex, it):
for k, v in self.store.items():
if "fail" in k:
ex.log_scalar(k, np.sum(v), it)
else:
ex.log_scalar(k, np.mean(v), it)
def show(self):
print("\n========================")
for k, v in self.store.items():
if "fail" in k:
print(k, np.sum(v))
else:
print(k, np.mean(v))
if "attn_acc" in k or "count" in k:
print("min " + k + ": ", np.min(v))
print("max " + k + ": ", np.max(v))
print("========================\n")
def save_metrics(logger, prefix, R, t, R_est, t_est, te_thres=0.6, re_thres=5):
bs = R.shape[0]
rot_error = compute_rre(R_est, R)
trans_error = compute_rte(t.reshape(bs, -1), t_est.reshape(bs, -1))
logger.add(prefix + ".rte_all", trans_error)
logger.add(prefix + ".rre_all", rot_error)
if rot_error < re_thres and trans_error < te_thres:
logger.add(prefix + ".recall", 1)
logger.add(prefix + ".rte", trans_error)
logger.add(prefix + ".rre", rot_error)
else:
if rot_error > re_thres and trans_error > te_thres:
logger.add(prefix + ".fail_both", 1)
elif rot_error > 5:
logger.add(prefix + ".fail_rot", 1)
else:
logger.add(prefix + ".fail_trans", 1)
logger.add(prefix + ".recall", 0)
|
155763
|
import unittest
from expand_region_handler import *
class UndoRedoTest(unittest.TestCase):
def test_dont_crash_with_blank_json (self):
settingsJson = ''
newSettingsJson = add_to_stack(settingsJson, "teststring", 2, 3, 1, 1);
newSettings = json.loads(newSettingsJson)
self.assertEqual(newSettings.get("hash"), "d67c5cbf5b01c9f91932e3b8def5e5f8")
self.assertEqual(newSettings.get("stack")[0].get("start"), 2)
self.assertEqual(newSettings.get("stack")[0].get("end"), 3)
def test_add_selection_as_only_item_in_stack_1 (self):
settingsJson = '{"stack": [], "hash": ""}'
newSettingsJson = add_to_stack(settingsJson, "teststring", 2, 3, 1, 1);
newSettings = json.loads(newSettingsJson)
self.assertEqual(newSettings.get("hash"), "d67c5cbf5b01c9f91932e3b8def5e5f8")
self.assertEqual(newSettings.get("stack")[0].get("start"), 2)
self.assertEqual(newSettings.get("stack")[0].get("end"), 3)
def test_add_selection_as_only_item_in_stack_2 (self):
settingsJson = '{"stack": [], "hash": "d67c5cbf5b01c9f91932e3b8def5e5f8"}'
newSettingsJson = add_to_stack(settingsJson, "teststring", 2, 3, 1, 1);
newSettings = json.loads(newSettingsJson)
self.assertEqual(newSettings.get("hash"), "d67c5cbf5b01c9f91932e3b8def5e5f8")
self.assertEqual(newSettings.get("stack")[0].get("start"), 2)
self.assertEqual(newSettings.get("stack")[0].get("end"), 3)
def test_add_selection_as_only_item_in_stack_3 (self):
settingsJson = '{"hash": "d67c5cbf5b01c9f91932e3b8def5e5f8", "stack": [{"start": 2, "end": 3}]}'
newSettingsJson = add_to_stack(settingsJson, "teststring1", 89, 99, 1, 1);
newSettings = json.loads(newSettingsJson)
self.assertEqual(newSettings.get("hash"), "790fabec68fa346d3444a3a2196b1741")
self.assertEqual(newSettings.get("stack")[0].get("start"), 89)
self.assertEqual(newSettings.get("stack")[0].get("end"), 99)
def test_add_selection_as_only_item_in_stack_4 (self):
settingsJson = '{"hash": "d67c5cbf5b01c9f91932e3b8def5e5f8", "stack": [{"start": 2, "end": 3}]}'
newSettingsJson = add_to_stack(settingsJson, "teststring", 89, 99, 1, 1);
newSettings = json.loads(newSettingsJson)
self.assertEqual(newSettings.get("hash"), "d67c5cbf5b01c9f91932e3b8def5e5f8")
self.assertEqual(newSettings.get("stack")[0].get("start"), 89)
self.assertEqual(newSettings.get("stack")[0].get("end"), 99)
def test_add_selection_to_existing_stack (self):
settingsJson = '{"hash": "d67c5cbf5b01c9f91932e3b8def5e5f8", "stack": [{"start": 2, "end": 3}]}'
newSettingsJson = add_to_stack(settingsJson, "teststring", 1, 4, 2, 3);
newSettings = json.loads(newSettingsJson)
self.assertEqual(newSettings.get("hash"), "d67c5cbf5b01c9f91932e3b8def5e5f8")
self.assertEqual(newSettings.get("stack")[0].get("start"), 2)
self.assertEqual(newSettings.get("stack")[0].get("end"), 3)
self.assertEqual(newSettings.get("stack")[1].get("start"), 1)
self.assertEqual(newSettings.get("stack")[1].get("end"), 4)
def test_should_not_crash_with_umlaute (self):
settingsJson = '{"hash": "", "stack": []}'
newSettingsJson = add_to_stack(settingsJson, "hinzufügen", 1, 4, 2, 3);
## why the fuck is it working, it doesn't work in the IDE
newSettings = json.loads(newSettingsJson)
self.assertEqual(newSettings.get("hash"), "7f633295a95fdba4a07dcbe70d0768a0")
self.assertEqual(newSettings.get("stack")[0].get("start"), 1)
self.assertEqual(newSettings.get("stack")[0].get("end"), 4)
## undo
def test_clear_stack_on_undo_because_string_is_different (self):
settingsJson = '{"hash": "d67c5cbf5b01c9f91932e3b8def5e5f8", "stack": [{"start": 2, "end": 3}]}'
result = get_last_selection(settingsJson, "teststring1", 2, 3);
newSettings = json.loads(result["newSettingsJson"])
self.assertEqual(newSettings.get("hash"), "790fabec68fa346d3444a3a2196b1741")
self.assertEqual(len(newSettings.get("stack")), 0)
def test_clear_stack_on_undo_because_selection_is_different (self):
settingsJson = '{"hash": "d67c5cbf5b01c9f91932e3b8def5e5f8", "stack": [{"start": 2, "end": 3}, {"start": 1, "end": 4}]}'
result = get_last_selection(settingsJson, "teststring", 89, 99);
newSettings = json.loads(result["newSettingsJson"])
self.assertEqual(newSettings.get("hash"), "d67c5cbf5b01c9f91932e3b8def5e5f8")
self.assertEqual(len(newSettings.get("stack")), 0)
def test_return_last_selection (self):
settingsJson = '{"hash": "d67c5cbf5b01c9f91932e3b8def5e5f8", "stack": [{"start": 2, "end": 3}, {"start": 1, "end": 4}]}'
result = get_last_selection(settingsJson, "teststring", 1, 4);
newSelection = result["newSelection"]
self.assertEqual(newSelection.get("startIndex"), 2)
self.assertEqual(newSelection.get("endIndex"), 3)
newSettings = json.loads(result["newSettingsJson"])
self.assertEqual(newSettings.get("hash"), "d67c5cbf5b01c9f91932e3b8def5e5f8")
self.assertEqual(len(newSettings.get("stack")), 1)
self.assertEqual(newSettings.get("stack")[0].get("start"), 2)
self.assertEqual(newSettings.get("stack")[0].get("end"), 3)
def test_should_not_crash_when_stack_empty_1 (self):
settingsJson = '{"stack": [], "hash": ""}'
result = get_last_selection(settingsJson, "teststring", 1, 4);
newSelection = result["newSelection"]
self.assertEqual(newSelection, None)
newSettings = json.loads(result["newSettingsJson"])
self.assertEqual(newSettings.get("hash"), "d67c5cbf5b01c9f91932e3b8def5e5f8")
self.assertEqual(len(newSettings.get("stack")), 0)
def test_should_not_crash_when_stack_empty_2 (self):
settingsJson = '{"stack": [], "hash": "d67c5cbf5b01c9f91932e3b8def5e5f8"}'
result = get_last_selection(settingsJson, "teststring", 1, 4);
newSelection = result["newSelection"]
self.assertEqual(newSelection, None)
newSettings = json.loads(result["newSettingsJson"])
self.assertEqual(newSettings.get("hash"), "d67c5cbf5b01c9f91932e3b8def5e5f8")
self.assertEqual(len(newSettings.get("stack")), 0)
def test_should_not_crash_when_last_selection (self):
settingsJson = '{"hash": "d67c5cbf5b01c9f91932e3b8def5e5f8", "stack": [{"start": 1, "end": 4}]}'
result = get_last_selection(settingsJson, "teststring", 1, 4);
newSelection = result["newSelection"]
self.assertEqual(newSelection, None)
newSettings = json.loads(result["newSettingsJson"])
self.assertEqual(newSettings.get("hash"), "d67c5cbf5b01c9f91932e3b8def5e5f8")
self.assertEqual(len(newSettings.get("stack")), 0)
if __name__ == "__main__":
unittest.main()
|
155769
|
import numpy as np
import torch
import math
def TLift(in_score, gal_cam_id, gal_time, prob_cam_id, prob_time, num_cams, tau=100, sigma=200, K=10, alpha=0.2):
"""Function for the Temporal Lifting (TLift) method
TLift is a model-free temporal cooccurrence based score weighting method proposed in
<NAME> and <NAME>, "Interpretable and Generalizable Person Re-Identification with Query-Adaptive
Convolution and Temporal Lifting." In The European Conference on Computer Vision (ECCV), 23-28 August, 2020.
Inputs:
in_score: the similarity score of size [num_probs, num_gals] between the gallery and probe sets.
gal_cam_id: camera index for samples in the gallery set, starting from 0 and continuously numbered.
gal_time: time stamps of samples in the gallery set.
prob_cam_id: camera index for samples in the probe set, starting from 0 and continuously numbered.
prob_time: time stamps of samples in the probe set.
num_cams: the number of cameras.
tau: the interval threshold to define nearby persons. Default: 100.
sigma: the sensitivity parameter of the time difference. Default: 200.
K: parameter of the top K retrievals used to define the pivot set P. Default: 10.
alpha: regularizer for the multiplication fusion. Default: 0.2.
All the cam_id and time inputs are 1-dim vectors, and they are in the same order corresponding to
the first axis (probe) or second axis (gallery) of the in_score.
Outputs:
out_score: the refined score by TLift, with the same size as the in_score.
Comments:
The default alpha value works for the sigmoid or re-ranking matching scores. Otherwise, it is
suggested that your input scores are distributed in [0, 1], with an average score around 0.01-0.1
considering many negative matching pairs. To apply TLift directly on QAConv scores, please use
score = torch.sigmoid(score) instead of the scaled scores in qaconv.py.
Author:
<NAME>, reimplemented by <NAME>
<EMAIL>
Version:
V1.1
July 12, 2020
"""
out_score = torch.tensor(np.zeros_like(in_score))
if torch.cuda.is_available():
out_score = out_score.cuda()
if len(prob_time.shape) == 1:
prob_time = prob_time[np.newaxis, :]
prob_time_diff = prob_time - np.transpose(prob_time)
cooccur_mask = (abs(prob_time_diff) < tau)
g_sam_index = []
score = []
gal_time_diff = []
for g_cam in range(num_cams):
g_sam_index.append(np.where(gal_cam_id == g_cam)[0]) # camera id starting with 0.
score.append(in_score[:, g_sam_index[g_cam]])
frame_id = gal_time[g_sam_index[g_cam]]
if len(frame_id.shape) == 1:
frame_id = frame_id[np.newaxis, :]
gal_time_diff.append(
torch.tensor(frame_id - np.transpose(frame_id), dtype=out_score.dtype).to(out_score.device))
for p_cam in range(num_cams):
p_sam_index = np.where(prob_cam_id == p_cam)[0]
c_mask = cooccur_mask[p_sam_index][:, p_sam_index]
num_prob = len(p_sam_index)
for g_cam in range(num_cams):
# if p_cam == g_cam: # in some public datasets they still evaluate negative pairs in the same camera
# continue
prob_score = score[g_cam][p_sam_index, :]
for i in range(num_prob):
cooccur_index = np.where(c_mask[:, i] == True)[0]
cooccur_score = prob_score[cooccur_index, :]
sorted_score = np.sort(cooccur_score, axis=None)
if sorted_score.shape[0] > K:
thr = sorted_score[-K]
else:
thr = sorted_score[0]
mask_in_gal = np.where(cooccur_score >= thr)[1]
dt = gal_time_diff[g_cam][:, mask_in_gal]
weight = torch.mean(torch.exp(-1 * torch.pow(dt, 2).to(dtype=out_score.dtype) / math.pow(sigma, 2)),
dim=1)
out_score[p_sam_index[i], g_sam_index[g_cam]] = weight
out_score = out_score.cpu().numpy()
out_score = (out_score + alpha) * in_score
return out_score
if __name__ == '__main__':
in_score = np.random.randn(50, 100)
gal_cam_id = np.random.randint(0, 5, (100))
gal_time = np.random.randint(0, 20, (100))
prob_cam_id = np.random.randint(0, 5, (50))
prob_time = np.random.randint(0, 20, (50))
num_cams = 5
TLift(in_score, gal_cam_id, gal_time, prob_cam_id, prob_time, num_cams)
|
155836
|
import stackless
class MyChannel:
def __init__(self):
self.queue = []
self.balance = 0
self.temp = None
def send(self, data):
if self.balance < 0:
receiver = self.queue.pop(0)
self.temp = data
receiver.insert()
self.balance += 1
receiver.run()
else:
sender = stackless.current
self.queue.append((sender, data))
self.balance += 1
stackless.schedule_remove()
def receive(self):
if self.balance > 0:
sender, retval = self.queue.pop(0)
sender.insert()
self.balance -= 1
return retval
else:
receiver = stackless.current
self.queue.append(receiver)
self.balance -= 1
stackless.schedule_remove()
return self.temp
def f1(ch):
for i in range(5):
ch.send(i)
print("done sending")
def f2(ch):
while 1:
data = ch.receive()
if data is None:
print("done receiving")
return
print("received", data)
def test():
ch = MyChannel()
t2 = stackless.tasklet(f2)(ch)
t1 = stackless.tasklet(f1)(ch)
stackless.run()
return ch
if __name__ == "__main__":
test()
|
155844
|
from distutils.core import setup
with open('README.md', encoding='utf-8') as f:
long_description = f.read()
setup(
name='pytago',
version='0.0.12',
packages=['pytago', 'pytago.go_ast'],
url='https://github.com/nottheswimmer/pytago',
license='',
author='<NAME>',
author_email='<EMAIL>',
description='Transpiles some Python into human-readable Golang.',
long_description=long_description,
long_description_content_type='text/markdown',
entry_points={
'console_scripts': ['pytago=pytago.cmd:main'],
},
python_requires=">=3.10",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
zip_safe=False,
include_package_data=True,
install_requires=["astroid==2.6.2", "dill==0.3.4"]
)
|
155848
|
import datetime
from airflow import DAG
from airflow.operators.python_operator import PythonOperator
from airflow.contrib.operators.aws_athena_operator import AWSAthenaOperator
import googleapiclient.discovery
from jinja2 import PackageLoader
from kite_airflow.plugins.google import GoogleSheetsRangeOperator
from kite_airflow.common import configs
from kite_airflow.common import utils as common_utils
from kite_airflow.youtube_dashboard import api
from kite_airflow.youtube_dashboard import files
from kite_airflow.youtube_dashboard import utils
from kite_airflow.slack_alerts import task_fail_slack_alert
BUCKET = 'kite-youtube-data' if common_utils.is_production() else 'kite-metrics-test'
SCRATCH_SPACE_LOC = 's3://{}/athena-scratch-space/'.format(BUCKET)
DATABASE = 'prod_kite_link_stats_youtube' if common_utils.is_production() else 'kite_link_stats_youtube'
TABLE_CHANNELS = {
'name': 'kite_link_stats_youtube_channels',
'data_location': 's3://{}/youtube-dashboard/channels/'.format(BUCKET),
}
TABLE_VIDEOS = {
'name': 'kite_link_stats_youtube_videos',
'data_location': 's3://{}/youtube-dashboard/videos/'.format(BUCKET),
}
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': datetime.datetime(2020, 11, 21),
'email_on_failure': False,
'email_on_retry': False,
'retries': 0,
'retry_delay': datetime.timedelta(minutes=5),
'on_failure_callback': task_fail_slack_alert,
}
kite_link_stats_dag = DAG(
'youtube_dashboard',
description='Import links stats of sponsored videos for the YouTube dashboard.',
default_args=default_args,
schedule_interval='10 0 * * *',
jinja_environment_kwargs={
'loader': PackageLoader('kite_airflow', 'templates')
},
)
schema_operators = []
for table in [TABLE_CHANNELS, TABLE_VIDEOS]:
drop_op = AWSAthenaOperator(
aws_conn_id=configs.AWS_CONN_ID,
task_id='drop_table_{}'.format(table['name']),
query='DROP TABLE IF EXISTS {}'.format(table['name']),
output_location='s3://kite-metrics-test/athena-results/ddl',
database=DATABASE,
dag=kite_link_stats_dag,
params={'data_location': table['data_location']},
)
create_op = AWSAthenaOperator(
aws_conn_id=configs.AWS_CONN_ID,
task_id='create_table_{}'.format(table['name']),
query='athena/tables/{}.tmpl.sql'.format(table['name']),
output_location='s3://kite-metrics-test/athena-results/ddl',
database=DATABASE,
dag=kite_link_stats_dag,
params={'data_location': table['data_location']},
)
drop_op >> create_op
schema_operators.append(create_op)
get_channels_op = AWSAthenaOperator(
aws_conn_id=configs.AWS_CONN_ID,
task_id='get_channels',
query='SELECT * FROM {}'.format(TABLE_CHANNELS['name']),
output_location=SCRATCH_SPACE_LOC,
database=DATABASE,
dag=kite_link_stats_dag,
)
schema_operators >> get_channels_op
get_videos_op = AWSAthenaOperator(
aws_conn_id=configs.AWS_CONN_ID,
task_id='get_videos',
query='SELECT * FROM {}'.format(TABLE_VIDEOS['name']),
output_location=SCRATCH_SPACE_LOC,
database=DATABASE,
dag=kite_link_stats_dag,
)
schema_operators >> get_videos_op
get_channels_sheet_operator = GoogleSheetsRangeOperator(
gcp_conn_id='google_cloud_kite_dev',
spreadsheet_id='XXXXXXX-J0',
range="'List of Channels'!A:C",
task_id='get_channels_sheet',
dag=kite_link_stats_dag,
)
def update_videos_from_all_channels(ti, yt_client):
'''
Take all given channels and store the list their videos
In case of new channel we search all videos and in case of an existing
channel we only search new videos via YouTube activities
Returns:\n
list:
new video items which we will use while taking snapshots. We need this
because athena queries are evaluating at start so we will not receive
these new videos via get videos query.
'''
channel_list = files.get_scratch_space_csv(ti, get_channels_op.task_id)
sheet_data = ti.xcom_pull(task_ids='get_channels_sheet')['values']
cid_field = sheet_data[0].index('Channel ID')
sheet_channels = {line[cid_field] for line in sheet_data[1:] if len(line) > cid_field and line[cid_field].strip()}
for new_c in sheet_channels - {c['id'] for c in channel_list}:
channel_list.append({'id': new_c, 'is_backfilled': 'false', 'last_backfill_until': '', 'last_updated': ''})
new_video_list = []
search_budget = 80
exception = None
for channel in channel_list:
channel_id = channel['id']
# indicates new channel or a channels whose backfilled is yet to complete
if channel['is_backfilled'] == 'false':
# incase of backfill was incomplete then resumes where it's left off
published_before = channel['last_backfill_until'] if channel['is_backfilled'] == 'false' else None
video_search_list, has_channel_search_remaining, no_of_searches, exception = api.get_all_video_search_list(
yt_client,
channel_id,
published_before,
search_budget,
)
for video_search_item in video_search_list:
new_video_list.append({
'id': utils.get_video_id_of_search_item(video_search_item),
'channel_id': channel_id,
})
# only update channel attributes if videos are found (also handles YT out of quota cases)
if(video_search_list):
last_search_item = video_search_list[- 1]
channel['last_backfill_until'] = utils.get_published_date_of_search_item(last_search_item)
channel['is_backfilled'] = not has_channel_search_remaining
# update the last_updated of channel which will help is in limiting future searches
channel['last_updated'] = common_utils.get_date_time_in_ISO()
search_budget -= no_of_searches
if search_budget <= 0:
break
all_activity_list, exception = api.get_all_activity_list(
yt_client,
channel_id,
channel['last_updated'],
)
if(len(all_activity_list)):
files.write_activities_on_file(all_activity_list)
video_activity_list = api.filter_video_activity_from_list(
all_activity_list,
)
for video_activity in video_activity_list:
new_video_list.append({
'id': utils.get_id_of_video_activity(video_activity),
'channel_id': channel_id,
})
# update the last_updated of channel which will help is in limiting future searches
channel['last_updated'] = common_utils.get_date_time_in_ISO()
files.write_channels_on_file(channel_list)
if len(new_video_list) > 0:
files.write_videos_on_file(new_video_list)
if exception:
raise exception
return new_video_list
def take_snapshots_and_update_files(video_list_for_snapshots, cached_urls_dict):
snapshot_list = get_snapshots_list(video_list_for_snapshots, cached_urls_dict)
files.write_snapshots_on_file(snapshot_list)
files.write_cached_urls_on_file(cached_urls_dict)
def get_snapshots_list(video_list, cached_urls_dict):
if not video_list:
return
snapshot_list = []
for video_item in video_list:
snapshot_list.append({
'video_id': utils.get_id_of_video_item(video_item),
'description': utils.get_description_of_video_item(video_item),
'is_link_present': utils.is_link_present_in_description(video_item, cached_urls_dict), # also updates the cache in case of shorten urls
'views': utils.get_views_of_video_item(video_item),
'timestamp': common_utils.get_date_time_in_ISO(),
})
return snapshot_list
def update_snapshots_of_all_videos(ti, yt_client, new_video_list):
'''
Take snapshots of all of the available videos and new videos
'''
video_list_for_snapshots = []
cached_urls_dict = files.get_cached_urls_from_file()
all_videos_list = files.get_scratch_space_csv(ti, get_videos_op.task_id)
all_videos_id_list = [video['id'] for video in all_videos_list]
no_of_batch_requests = 50 # to optimise YouTube quota
# appending new videos id also because get videos query don't return
# us new results that are been during the execution of this script
all_videos_id_list.extend(
list(map(lambda video: video['id'], new_video_list))
)
for start_index in range(0, len(all_videos_id_list), no_of_batch_requests):
try:
video_list = []
end_index = (start_index) + no_of_batch_requests
videos_id_batch_list = all_videos_id_list[start_index:end_index]
video_list = api.get_video_list(yt_client, videos_id_batch_list)
video_list_for_snapshots.extend(video_list)
except Exception:
# store data until now in case of any error or if quota exceeded
take_snapshots_and_update_files(video_list_for_snapshots, cached_urls_dict)
raise
take_snapshots_and_update_files(video_list_for_snapshots, cached_urls_dict)
def get_snaphots_of_videos(ti, **context):
api_service_name = 'youtube'
api_version = 'v3'
api_key = 'XXXXXXX'
yt_client = googleapiclient.discovery.build(
api_service_name, api_version, developerKey=api_key
)
new_video_list = update_videos_from_all_channels(ti, yt_client)
update_snapshots_of_all_videos(ti, yt_client, new_video_list)
get_snaphots_of_videos_operator = PythonOperator(
python_callable=get_snaphots_of_videos,
task_id=get_snaphots_of_videos.__name__,
dag=kite_link_stats_dag,
provide_context=True,
)
(
get_channels_op,
get_videos_op,
get_channels_sheet_operator,
) >> get_snaphots_of_videos_operator
|
155874
|
import sqlite3
from flask_restplus import Resource, reqparse
class User:
def __init__(self, _id, username, password):
self.id = _id
self.username = username
self.password = password
@classmethod
def find_by_username(cls, username):
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
query = "SELECT * FROM users WHERE username = ?"
# Parameters MUST ALWAYS be in form of a TUPLE!
result = cursor.execute(query, (username, ))
# If the result set does not contain any values row = None
row = result.fetchone()
if row is not None:
# *row is like *args, cls in this example is class User
user = cls(*row)
else:
user = None
connection.close()
return user
@classmethod
def find_by_id(cls, id):
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
query = "SELECT * FROM users WHERE id = ?"
# Parameters MUST ALWAYS be in form of a TUPLE!
result = cursor.execute(query, (id, ))
# If the result set does not contain any values row = None
row = result.fetchone()
if row is not None:
# *row is like *args, cls in this example is class User
user = cls(*row)
else:
user = None
connection.close()
return user
class UserRegister(Resource):
# Parameter parsing
parser = reqparse.RequestParser()
parser.add_argument('username',
type = str,
required = True,
help = "Username is required!" )
parser.add_argument('password',
type = str,
required = True,
help = "Password is required!" )
def post(self):
data = UserRegister.parser.parse_args()
# Preventing user duplication
if User.find_by_username(data['username']) is not None:
return {"message" : "User with that username already exists."}, 400
else:
# Connection
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
# id is auto-incrementing so it needs to be setup to null
register_query = "INSERT INTO users VALUES (NULL, ?, ?)"
cursor.execute(register_query, (data['username'], data['password'],))
connection.commit()
connection.close()
return {"message": "User created successfully!"}, 201
|
155917
|
import pyfits
import numpy
import scipy
import math
import os
import toVac
import prueba
from scipy import optimize
import matplotlib.pylab as plt
import readcol
def gauss1(params,x):
C = params[0]
A = params[1]
med = params[2]
sig = params[3]
g = C+A*numpy.exp(-0.5*(x-med)*(x-med)/(sig*sig))
return g
def res_gauss1(params,g,x):
return g-gauss1(params,x)
def el_stl(wam,fl,SLi,SLf):
for i in range(len(SLi)):
if SLi[i]>wam[-1]:
break
I = numpy.where((wam >= SLi[i]) & (wam<=SLf[i]))[0]
fl[I]=1.0
return fl
SLi,SLf = readcol.readcol('lines2.dat',twod=False)
model_path = '/media/VERBATIM/COHELO_MODELS/RES_MOD/R_60000b/'
model_path = '/data/ajordan/COHELO_MODELS/R_60000b/'
non_rot = os.listdir(model_path+'vsini_0.0/')
rot = [0.0,2.5,5.0,7.5,10.0,15.0,20.0,25.0,30.0,35.0,40.0,45.0,50.0]
i=0
f = open('anchos.dat','w')
verdad = False
non_rot = non_rot[200:]
for fits in non_rot:
sc = pyfits.getdata(model_path+'vsini_0.0/'+fits)
hd = pyfits.getheader(model_path+'vsini_0.0/'+fits)
wa1 = numpy.arange(len(sc))*hd['CD1_1']+hd['CRVAL1']
sc = el_stl(wa1,sc,SLi,SLf)
wa = toVac.ToVacuum(wa1)
I = numpy.where((wa > 5500) & (wa < 6250))[0]
wa = wa[I]
fl = sc[I]
T = hd['TEFF']
G = hd['LOG_G']
Z = hd['FEH']
devs = ''
if T == 4500 and G == 2.5 and Z == -1.5:
verdad=True
#elif T == 4000 and G == 3.0:
# break
if verdad:
for v in rot:
if os.access(model_path+'vsini_'+str(v)+'/'+'R_'+str(v)+'_'+fits[-22:],os.F_OK):
sc2 = pyfits.getdata(model_path+'vsini_'+str(v)+'/'+'R_'+str(v)+'_'+fits[-22:])
sc2 = el_stl(wa1,sc2,SLi,SLf)
fl2 = sc2[I]
vv,cc = prueba.CCF(wa,fl,wa,fl2,-200.0,200.0)
cc2 = numpy.array(cc)
vv2 = numpy.array(vv)
B3 = 0.5*(cc2[0]+cc2[-1])
A3 = numpy.max(cc2)-B3
med3 = 0.0
sig3 = 20.0
guess1 = [B3,A3,med3,sig3]
ajustep=optimize.leastsq(res_gauss1,guess1,args=(cc2,numpy.array(vv2)))
cte3 = ajustep[0][0]
no3 = ajustep[0][1]
med3 = ajustep[0][2]
sig3 = ajustep[0][3]
devs = devs+str(sig3)+'\t'
else:
devs = devs+str(0.0)+'\t'
f.write(str(T)+'\t'+str(G)+'\t'+str(Z)+'\t'+devs+'\n')
print T,G,Z,devs
|
155933
|
import pytest
# from snovault.schema_utils import load_schema
pytestmark = [pytest.mark.setone, pytest.mark.working, pytest.mark.schema]
@pytest.fixture
def biosample_cc_w_diff(testapp, de_term, lab, award):
item = {
"culture_start_date": "2018-01-01",
"differentiation_state": "Differentiated to definitive endoderm demonstrated by decreased Oct4 expression and increased Sox17 expression",
"tissue": de_term['@id'],
"in_vitro_differentiated": "Yes",
'award': award['@id'],
'lab': lab['@id']
}
return testapp.post_json('/biosample_cell_culture', item).json['@graph'][0]
@pytest.fixture
def biosample_1(testapp, human_biosource, lab, award):
item = {
'description': "GM12878 prepared for Hi-C",
'biosource': [human_biosource['@id'], ],
'award': award['@id'],
'lab': lab['@id'],
}
return testapp.post_json('/biosample', item).json['@graph'][0]
@pytest.fixture
def biosample_w_mod(testapp, biosample_1, mod_w_target):
return testapp.patch_json(biosample_1['@id'], {'modifications': [mod_w_target['@id']]}).json['@graph'][0]
@pytest.fixture
def biosample_w_treatment(testapp, biosample_1, rnai):
return testapp.patch_json(biosample_1['@id'], {'treatments': [rnai['@id']]}).json['@graph'][0]
def biosample_relation(derived_from):
return {"biosample_relation": [{"relationship_type": "derived from",
"biosample": derived_from['@id']}]}
def test_biosample_has_display_title(testapp, biosample_1):
# accession fallback used for display title here
assert biosample_1['display_title'] == biosample_1['accession']
# data from test/datafixtures
def test_update_biosample_relation(testapp, human_biosample, biosample_1):
patch_res = testapp.patch_json(human_biosample['@id'], biosample_relation(biosample_1))
res = testapp.get(biosample_1['@id'])
# expected relation: 'biosample': human_biosample['@id'],
# 'relationship_type': 'parent of'
assert res.json['biosample_relation'][0]['biosample']['@id'] == human_biosample['@id']
assert res.json['biosample_relation'][0]['relationship_type'] == 'parent of'
def test_biosample_calculated_properties(testapp, biosample_1, ):
"""
Test to ensure the calculated properties are in result returned from testapp
These have string 'None' returned if no value as they are used in Item page view
"""
res = testapp.get(biosample_1['@id']).json
assert 'modifications_summary' in res
assert 'modifications_summary_short' in res
assert 'treatments_summary' in res
assert 'biosource_summary' in res
def test_biosample_biosource_summary_one_biosource(testapp, biosample_1, human_biosource):
assert biosample_1['biosource_summary'] == human_biosource['biosource_name']
def test_biosample_biosource_summary_two_biosource(testapp, biosample_1, human_biosource, lung_biosource):
res = testapp.patch_json(biosample_1['@id'], {'biosource': [human_biosource['@id'], lung_biosource['@id']]}).json['@graph'][0]
assert human_biosource['biosource_name'] in res['biosource_summary']
assert lung_biosource['biosource_name'] in res['biosource_summary']
assert ' and ' in res['biosource_summary']
def test_biosample_biosource_summary_w_differentiation(testapp, biosample_1, human_biosource, biosample_cc_w_diff, de_term):
res = testapp.patch_json(biosample_1['@id'], {'cell_culture_details': [biosample_cc_w_diff['@id']]}).json['@graph'][0]
assert human_biosource['biosource_name'] in res['biosource_summary']
assert ' differentiated to ' in res['biosource_summary']
assert de_term['display_title'] in res['biosource_summary']
def test_biosample_sample_type_w_differentiation(testapp, biosample_1, biosample_cc_w_diff):
res = testapp.patch_json(biosample_1['@id'], {'cell_culture_details': [biosample_cc_w_diff['@id']]}).json['@graph'][0]
assert res['biosample_type'] == 'in vitro differentiated cells'
def test_biosample_sample_type_immortalized_wo_differentiation(testapp, biosample_1, biosample_cc_wo_diff):
res = testapp.patch_json(biosample_1['@id'], {'cell_culture_details': [biosample_cc_wo_diff['@id']]}).json['@graph'][0]
assert res['biosample_type'] == 'immortalized cells'
def test_biosample_sample_type_bs_stem_cell_line(testapp, biosample_1, human_biosource):
bsres = testapp.patch_json(human_biosource['@id'], {'biosource_type': 'stem cell derived cell line'}).json['@graph'][0]
res = testapp.patch_json(biosample_1['@id'], {'biosource': [bsres['@id']]}).json['@graph'][0]
assert res['biosample_type'] == 'stem cells'
def test_biosample_sample_type_bs_multicellular(testapp, biosample_1, human_biosource):
bsres = testapp.patch_json(human_biosource['@id'], {'biosource_type': 'multicellular organism'}).json['@graph'][0]
res = testapp.patch_json(biosample_1['@id'], {'biosource': [bsres['@id']]}).json['@graph'][0]
assert res['biosample_type'] == 'whole organisms'
def test_biosample_sample_type_bs_tissue(testapp, biosample_1, human_biosource):
bty = 'tissue'
bsres = testapp.patch_json(human_biosource['@id'], {'biosource_type': bty}).json['@graph'][0]
res = testapp.patch_json(biosample_1['@id'], {'biosource': [bsres['@id']]}).json['@graph'][0]
assert res['biosample_type'] == bty
def test_biosample_sample_type_bs_lines_and_to_pluralize(testapp, biosample_1, human_biosource):
types = {
"primary cell": "primary cells",
"primary cell line": "primary cells",
"immortalized cell line": "immortalized cells",
"stem cell": "stem cells",
"induced pluripotent stem cell": "induced pluripotent stem cells"
}
for bty, bsty in types.items():
bsres = testapp.patch_json(human_biosource['@id'], {'biosource_type': bty}).json['@graph'][0]
res = testapp.patch_json(biosample_1['@id'], {'biosource': [bsres['@id']]}).json['@graph'][0]
assert res['biosample_type'] == bsty
def test_biosample_sample_type_bs_multiple_same_type(testapp, biosample_1, human_biosource, GM12878_biosource):
res = testapp.patch_json(biosample_1['@id'], {'biosource': [human_biosource['@id'], GM12878_biosource['@id']]}).json['@graph'][0]
assert res['biosample_type'] == 'immortalized cells'
def test_biosample_sample_type_bs_multiple_diff_types(testapp, biosample_1, human_biosource, lung_biosource):
res = testapp.patch_json(biosample_1['@id'], {'biosource': [human_biosource['@id'], lung_biosource['@id']]}).json['@graph'][0]
assert res['biosample_type'] == 'mixed sample'
def test_biosample_modifications_summaries(biosample_w_mod):
assert biosample_w_mod['modifications_summary'] == 'Crispr for RAD21 gene'
assert biosample_w_mod['modifications_summary_short'] == 'RAD21 Crispr'
def test_biosample_modifications_summaries_no_mods(biosample_1):
assert biosample_1.get('modifications_summary') == 'None'
assert biosample_1.get('modifications_summary_short') == 'None'
def test_biosample_treatments_summary(biosample_w_treatment):
assert biosample_w_treatment.get('treatments_summary') == 'shRNA treatment'
def test_biosample_treatments_summary_no_treatment(biosample_1):
assert biosample_1.get('treatments_summary') == 'None'
def test_biosample_category_undifferentiated_stem_cells(testapp, biosample_1, human_biosource):
scl = testapp.patch_json(human_biosource['@id'], {'biosource_type': 'stem cell derived cell line'}).json['@graph'][0]
bios = testapp.patch_json(biosample_1['@id'], {'biosource': [scl['@id']]}).json['@graph'][0]
assert 'Human stem cell' in bios.get('biosample_category')
def test_biosample_category_differentiated_stem_cells(testapp, biosample_1, human_biosource, biosample_cc_w_diff):
scl = testapp.patch_json(human_biosource['@id'], {'biosource_type': 'stem cell derived cell line'}).json['@graph'][0]
bios = testapp.patch_json(biosample_1['@id'], {'biosource': [scl['@id']], 'cell_culture_details': [biosample_cc_w_diff['@id']]}).json['@graph'][0]
cats = bios.get('biosample_category')
assert 'Human stem cell' not in cats
assert 'In vitro Differentiation' in cats
def test_biosample_biosource_category_two_biosource(testapp, biosample_1, human_biosource, lung_biosource):
res = testapp.patch_json(biosample_1['@id'], {'biosource': [human_biosource['@id'], lung_biosource['@id']]}).json['@graph'][0]
cat = res.get('biosample_category')
assert len(cat) == 1
assert cat[0] == 'Mixed samples'
# setting up fixtures for testing tissue and organ calcprop
@pytest.fixture
def brain_term(testapp, uberon_ont, cns_term, ectoderm_term):
item = {
"is_slim_for": "organ",
"term_id": "brain_tid",
"term_name": "brain",
"source_ontologies": [uberon_ont['@id']],
"slim_terms": [cns_term['@id'], ectoderm_term['@id']]
}
return testapp.post_json('/ontology_term', item).json['@graph'][0]
@pytest.fixture
def cns_term(testapp, uberon_ont, ectoderm_term):
item = {
"is_slim_for": "system",
"term_id": "cns_tid",
"term_name": "central nervous system",
"source_ontologies": [uberon_ont['@id']],
"slim_terms": [ectoderm_term['@id']]
}
return testapp.post_json('/ontology_term', item).json['@graph'][0]
@pytest.fixture
def ectoderm_term(testapp, uberon_ont):
item = {
"is_slim_for": "developmental",
"term_id": "ectoderm_tid",
"term_name": "ectoderm",
"source_ontologies": [uberon_ont['@id']],
}
return testapp.post_json('/ontology_term', item).json['@graph'][0]
@pytest.fixture
def primary_cell_term(testapp, ontology):
item = {
"is_slim_for": "cell",
"term_id": "pcell_id",
"term_name": "primary cell",
"source_ontologies": [ontology['@id']],
}
return testapp.post_json('/ontology_term', item).json['@graph'][0]
@pytest.fixture
def cortical_neuron_term(testapp, uberon_ont, brain_term, cns_term,
ectoderm_term, primary_cell_term):
item = {
"term_id": "cort_neuron_id",
"term_name": "cortical neuron",
"source_ontologies": [uberon_ont['@id']],
"slim_terms": [brain_term['@id'], cns_term['@id'], ectoderm_term['@id'], primary_cell_term['@id']]
}
return testapp.post_json('/ontology_term', item).json['@graph'][0]
@pytest.fixture
def bcc_diff_to_cortical(testapp, lab, award, cortical_neuron_term):
item = {
"culture_start_date": "2018-01-01",
"differentiation_state": "Stem cell differentiated to cortical neuron",
"tissue": cortical_neuron_term['@id'],
"in_vitro_differentiated": "Yes",
'award': award['@id'],
'lab': lab['@id']
}
return testapp.post_json('/biosample_cell_culture', item).json['@graph'][0]
@pytest.fixture
def diff_cortical_neuron_bs(testapp, F123_biosource, bcc_diff_to_cortical, lab, award):
item = {
"description": "Differentiated cortical neuron",
"biosource": [F123_biosource['@id']],
"cell_culture_details": [bcc_diff_to_cortical['@id']],
"award": award['@id'],
"lab": lab['@id']
}
return testapp.post_json('/biosample', item).json['@graph'][0]
@pytest.fixture
def brain_biosource(testapp, brain_term, lab, award):
item = {
"description": "Brain tissue",
"biosource_type": "tissue",
"tissue": brain_term['@id'],
"lab": lab['@id'],
"award": award['@id']
}
return testapp.post_json('/biosource', item).json['@graph'][0]
@pytest.fixture
def brain_biosample(testapp, brain_biosource, lab, award):
item = {
"description": "Brain Tissue Biosample",
"biosource": [brain_biosource['@id']],
"award": award['@id'],
"lab": lab['@id']
}
return testapp.post_json('/biosample', item).json['@graph'][0]
@pytest.fixture
def mixed_biosample(testapp, brain_biosource, lung_biosource, lab, award):
item = {
"description": "Mixed Tissue Biosample",
"biosource": [brain_biosource['@id'], lung_biosource['@id']],
"award": award['@id'],
"lab": lab['@id']
}
return testapp.post_json('/biosample', item).json['@graph'][0]
def test_get_tissue_organ_info_none_present(biosample_1):
assert 'tissue_organ_info' not in biosample_1
def test_get_tissue_organ_info_tissue_in_cell_culture(diff_cortical_neuron_bs, cortical_neuron_term):
org_sys = sorted(['brain', 'central nervous system', 'ectoderm'])
assert 'tissue_organ_info' in diff_cortical_neuron_bs
assert diff_cortical_neuron_bs['tissue_organ_info']['tissue_source'] == cortical_neuron_term.get('display_title')
assert sorted(diff_cortical_neuron_bs['tissue_organ_info']['organ_system']) == org_sys
def test_get_tissue_organ_info_tissue_in_biosource(brain_biosample, brain_term):
org_sys = sorted(['central nervous system', 'ectoderm'])
assert 'tissue_organ_info' in brain_biosample
assert brain_biosample['tissue_organ_info']['tissue_source'] == brain_term.get('display_title')
assert sorted(brain_biosample['tissue_organ_info']['organ_system']) == org_sys
def test_get_tissue_organ_info_tissue_mixed_biosample(mixed_biosample):
org_sys = sorted(['central nervous system', 'ectoderm'])
assert 'tissue_organ_info' in mixed_biosample
assert mixed_biosample['tissue_organ_info']['tissue_source'] == 'mixed tissue'
assert sorted(mixed_biosample['tissue_organ_info']['organ_system']) == org_sys
def test_get_tissue_organ_info_none_if_only_cell_slim_terms(testapp, F123_biosource, lab, award):
item = {
"description": "F123 Biosample",
"biosource": [F123_biosource['@id']],
"award": award['@id'],
"lab": lab['@id']
}
f123_biosample = testapp.post_json('/biosample', item).json['@graph'][0]
assert 'tissue_organ_info' not in f123_biosample
|
156008
|
import re
from random import randint
from typing import Match
from typing import Optional
from retrying import retry
import apysc as ap
from apysc._expression import expression_data_util
from apysc._expression.event_handler_scope import HandlerScope
from apysc._type.copy_interface import CopyInterface
from apysc._type.variable_name_interface import VariableNameInterface
class TestCopyInterface:
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__copy(self) -> None:
interface: CopyInterface = CopyInterface()
interface.variable_name = 'test_copy_interface'
interface._type_name = 'test_copy_interface'
result: CopyInterface = interface._copy()
assert result.variable_name.startswith('test_copy_interface_')
assert result.variable_name != interface.variable_name
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__append_copy_expression(self) -> None:
expression_data_util.empty_expression()
int_1: ap.Int = ap.Int(10)
int_2: ap.Int = int_1._copy()
expression: str = expression_data_util.get_current_expression()
expected: str = (
f'{int_2.variable_name} = '
f'{int_1.variable_name};'
)
assert expected in expression
expression_data_util.empty_expression()
arr_1: ap.Array = ap.Array([10, 20, 30])
arr_2: ap.Array = arr_1._copy()
expression = expression_data_util.get_current_expression()
expected = (
f'{arr_2.variable_name} = '
f'cpy({arr_1.variable_name});'
)
assert expected in expression
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__append_value_updating_cpy_exp_to_handler_scope(self) -> None:
expression_data_util.empty_expression()
instance: VariableNameInterface = VariableNameInterface()
instance.variable_name = 'test_instance'
int_1: ap.Int = ap.Int(10)
with HandlerScope(handler_name='test_handler_1', instance=instance):
int_2: ap.Int = int_1._copy()
expression: str = \
expression_data_util.get_current_event_handler_scope_expression()
pattern: str = (
rf'^{int_2.variable_name} = {int_1.variable_name};')
match: Optional[Match] = re.search(
pattern=pattern,
string=expression, flags=re.MULTILINE)
assert match is not None
expression_data_util.empty_expression()
arr_1: ap.Array = ap.Array([1, 2, 3])
with HandlerScope(handler_name='test_handler_1', instance=instance):
arr_2: ap.Array = arr_1._copy()
expression = \
expression_data_util.get_current_event_handler_scope_expression()
pattern = (
rf'^{arr_2.variable_name} = cpy\({arr_1.variable_name}\);'
)
match = re.search(
pattern=pattern,
string=expression, flags=re.MULTILINE)
assert match is not None
|
156058
|
import pandas as pd
from pytest import mark
from pytest import approx
@mark.inflation
@mark.usefixtures('_init_inflation')
class TestInflation:
def test_get_infl_rub_data(self):
assert self.infl_rub.first_date == pd.to_datetime('1991-01')
assert self.infl_rub.pl.years == 10
assert self.infl_rub.pl.months == 1
assert self.infl_rub.name == 'Russia Inflation Rate'
assert self.infl_rub.type == 'inflation'
assert self.infl_rub.cumulative_inflation[-1] == approx(19576.47386585591, rel=1e-4)
assert self.infl_rub.purchasing_power_1000 == approx(0.05107911300773333, rel=1e-4)
assert self.infl_rub.rolling_inflation[-1] == approx(0.2070533602100877, rel=1e-4)
def test_get_infl_usd_data(self):
assert self.infl_usd.first_date == pd.to_datetime('1913-02')
assert self.infl_usd.pl.years == 10
assert self.infl_usd.pl.months == 0
assert self.infl_usd.name == 'US Inflation Rate'
assert self.infl_usd.type == 'inflation'
assert self.infl_usd.cumulative_inflation[-1] == approx(0.7145424753209466, rel=1e-4)
assert self.infl_usd.purchasing_power_1000 == approx(583.2459763429362, rel=1e-4)
assert self.infl_usd.rolling_inflation[-1] == approx(-0.005813765681402461, rel=1e-4)
def test_get_infl_eur_data(self):
assert self.infl_eur.first_date == pd.to_datetime('1996-02')
assert self.infl_eur.pl.years == 10
assert self.infl_eur.pl.years == 10
assert self.infl_eur.name == 'EU Inflation Rate'
assert self.infl_eur.type == 'inflation'
assert self.infl_eur.cumulative_inflation[-1] == approx(0.20267532488218776, rel=1e-4)
assert self.infl_eur.purchasing_power_1000 == approx(831.4796016106495, rel=1e-4)
assert self.infl_eur.rolling_inflation[-1] == approx(0.02317927930197139, rel=1e-4)
def test_describe(self):
description = self.infl_rub.describe(years=[5])
assert list(description.columns) == ['property', 'period', 'Russia Inflation Rate']
assert description.loc[3, 'Russia Inflation Rate'] == approx(3.0414434004010245, rel=1e-4)
assert description.loc[5, 'Russia Inflation Rate'] == approx(247.43634907784974, rel=1e-4)
def test_annual_inflation_ts(self):
assert self.infl_rub.annual_inflation_ts.iloc[-1] == approx(0.02760000000000007, rel=1e-4)
@mark.rates
@mark.usefixtures('_init_rates')
class TestRates:
def test_rates_rub(self):
assert self.rates_rub.name == 'Max deposit rates (RUB) in Russian banks'
assert self.rates_rub.first_date == pd.to_datetime('2015-01')
assert self.rates_rub.last_date == pd.to_datetime('2020-02')
def test_okid(self):
assert self.rates_rub.okid.sum() == approx(6376.2308059223915, rel=1e-4)
|
156060
|
from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence
from torch.nn.utils.rnn import pad_packed_sequence
def l2norm(inputs, dim=-1):
# inputs: (batch, dim_ft)
norm = torch.norm(inputs, p=2, dim=dim, keepdim=True)
zeros = torch.zeros(norm.size()).cuda()
zeros[norm == 0] = 1
inputs = inputs / (norm+zeros)
return inputs
def sequence_mask(lengths, max_len=None):
''' Creates a boolean mask from sequence lengths.
'''
# lengths: LongTensor, (batch, )
batch_size = lengths.size(0)
max_len = max_len or lengths.max()
return (torch.arange(0, max_len)
.type_as(lengths)
.repeat(batch_size, 1)
.lt(lengths.unsqueeze(1)))
def rnn_factory(rnn_type, **kwargs):
# Use pytorch version when available.
rnn = getattr(nn, rnn_type.upper())(**kwargs)
return rnn
def calc_rnn_outs_with_sort(rnn, inputs, seq_lens, init_states=None):
'''
inputs: FloatTensor, (batch, seq_len, dim_ft)
seq_lens: LongTensor, (batch,)
'''
seq_len = inputs.size(1)
# sort
sorted_seq_lens, seq_sort_idx = torch.sort(seq_lens, descending=True)
_, seq_unsort_idx = torch.sort(seq_sort_idx, descending=False)
# pack
inputs = torch.index_select(inputs, 0, seq_sort_idx)
packed_inputs = pack_padded_sequence(inputs, sorted_seq_lens, batch_first=True)
if init_states is not None:
if isinstance(init_states, tuple):
new_states = []
for i, state in enumerate(init_states):
new_states.append(torch.index_select(state, 1, seq_sort_idx))
init_states = tuple(new_states)
else:
init_states = torch.index_select(init_states, 1, seq_sort_idx)
# rnn
packed_outs, states = rnn(packed_inputs, init_states)
# unpack
outs, _ = pad_packed_sequence(packed_outs, batch_first=True,
total_length=seq_len, padding_value=0)
# unsort
# outs.size = (batch, seq_len, num_directions * hidden_size)
outs = torch.index_select(outs, 0, seq_unsort_idx)
if isinstance(states, tuple):
# states: (num_layers * num_directions, batch, hidden_size)
new_states = []
for i, state in enumerate(states):
new_states.append(torch.index_select(state, 1, seq_unsort_idx))
states = tuple(new_states)
else:
states = torch.index_select(states, 1, seq_unsort_idx)
return outs, states
class SigmoidCrossEntropyLoss(nn.Module):
'''
reference to:
https://www.tensorflow.org/api_docs/python/tf/nn/weighted_cross_entropy_with_logits
l = (1 + (q - 1) * z)
(1 - z) * x + l * (log(1 + exp(-abs(x))) + max(-x, 0))
'''
def __init__(self, pos_weights=None):
super(SigmoidCrossEntropyLoss, self).__init__()
if pos_weights is not None:
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.pos_weights = torch.FloatTensor(pos_weights).to(device)
else:
self.pos_weights = None
def forward(self, logits, targets):
'''
Args:
logits: (batch, num_labels)
targets: (batch, num_labels)
pos_weights: (num_labels, )
Return:
losses: (batch, num_labels)
'''
if self.pos_weights is None:
weights = 1
else:
weights = 1 + torch.unsqueeze(self.pos_weights - 1, 0) * targets
losses = (1 - targets) * logits + \
weights * (torch.log(1 + torch.exp(-torch.abs(logits))) + torch.clamp(-logits, 0))
return losses
|
156081
|
from __future__ import annotations
from jsonclasses import jsonclass, types
@jsonclass
class SuperFilter:
list1: list[int] | None = types.listof(int).filter(lambda i: i % 2 == 0)
list2: list[int] | None = types.listof(int).filter(types.mod(2).eq(0))
|
156302
|
import decimal
from protoactor.actor import PID
class AccountCredited:
pass
class AccountDebited:
pass
class ChangeBalance:
def __init__(self, amount: decimal, reply_to: PID):
self.amount = amount
self.reply_to = reply_to
class Credit(ChangeBalance):
def __init__(self, amount: decimal, reply_to: PID):
super().__init__(amount, reply_to)
class CreditRefused:
pass
class Debit(ChangeBalance):
def __init__(self, amount: decimal, reply_to: PID):
super().__init__(amount, reply_to)
class DebitRolledBack:
pass
class EscalateTransfer:
def __init__(self, message: str):
self._message = message
@property
def message(self):
return self._message
def __str__(self):
return f'{self.__class__.__module__}.{self.__class__.__name__}: {self._message}'
class Result():
def __init__(self, pid: PID):
self.pid = pid
class FailedAndInconsistent(Result):
def __init__(self, pid: PID):
super().__init__(pid)
class FailedButConsistentResult(Result):
def __init__(self, pid: PID):
super().__init__(pid)
class GetBalance:
pass
class InsufficientFunds:
pass
class InternalServerError:
pass
class OK:
pass
class Refused:
pass
class ServiceUnavailable:
pass
class StatusUnknown:
pass
class SuccessResult(Result):
def __init__(self, pid: PID):
super().__init__(pid)
class TransferCompleted:
def __init__(self, from_id: PID, from_balance: decimal, to: PID, to_balance: decimal):
self.from_id = from_id
self.from_balance = from_balance
self.to = to
self.to_balance = to_balance
def __str__(self):
return f'{self.__class__.__module__}.{self.__class__.__name__}: {self.from_id.id} balance is ' \
f'{self.from_balance}, {self.to.id} balance is {self.to_balance}'
class TransferFailed():
def __init__(self, reason: str):
self.reason = reason
def __str__(self):
return f'{self.__class__.__module__}.{self.__class__.__name__}: {self.reason}'
class TransferStarted:
pass
class UnknownResult(Result):
def __init__(self, pid: PID):
super().__init__(pid)
|
156304
|
from winrm.protocol import Protocol
import argparse
parser = argparse.ArgumentParser(description='Run command on Windows host')
parser.add_argument("--host", required=True)
parser.add_argument("--port", default=5985)
parser.add_argument("--socksport", default=1234)
parser.add_argument("--username", required=True)
parser.add_argument("--password", required=True)
parser.add_argument("--protocol", default="http")
parser.add_argument("--transport", default="basic")
args = parser.parse_args()
# Run process with low-level API
p = Protocol(
endpoint=args.protocol+'://'+args.host+':'+str(args.port)+'/wsman',
transport=args.transport,
username=args.username,
password=<PASSWORD>,
server_cert_validation='ignore',
proxy='socks5h://localhost:'+str(args.socksport))
shell_id = p.open_shell()
command_id = p.run_command(shell_id, 'ipconfig', ['/all'])
std_out, std_err, status_code = p.get_command_output(shell_id, command_id)
p.cleanup_command(shell_id, command_id)
p.close_shell(shell_id)
print(status_code)
print(std_out)
print(std_err)
|
156318
|
import csv
import os
import numpy as np
import sentencepiece as spm
import torch
class DataLoader:
def __init__(self, directory, parts, cols, spm_filename):
"""Dataset loader.
Args:
directory (str): dataset directory.
parts (list[str]): dataset parts. [parts].tsv files must exists in dataset directory.
spm_filename (str): file name of the dump sentencepiece model.
"""
self.pad_idx, self.unk_idx, self.sos_idx, self.eos_idx = range(4)
self.cols = cols
self.directory = directory
self.parts = parts
self.spm_filename = spm_filename
# Load sentecepiece model:
self.sp = spm.SentencePieceProcessor()
self.sp.load(self.spm_filename)
# Load dataset parts:
self.data_parts = {part: list(self.from_tsv(part)) for part in parts}
self.part_lens = {part: len(self.data_parts[part]) for part in parts}
self.max_lens = {part: self.get_max_len(part) for part in parts}
self.max_len = max([self.max_lens[part] for part in parts])
def next_batch(self, batch_size, part, device):
"""Get next batch.
Args:
batch_size (int): batch size.
part (str): dataset part.
device (torch.device): torch device.
Returns:
Batch: batch wrapper.
"""
indexes = np.random.randint(0, self.part_lens[part], batch_size)
raw_batches = [[self.data_parts[part][i][col] for i in indexes] for col, name in enumerate(self.cols)]
return Batch(self, raw_batches, device)
def sequential(self, part, device):
"""Get all examples from dataset sequential.
Args:
part (str): part of the dataset.
device: (torch.Device): torch device.
Returns:
Batch: batch wrapper with size 1.
"""
for example in self.data_parts[part]:
raw_batches = [example]
yield Batch(self, raw_batches, device)
def pad(self, data):
"""Add <sos>, <eos> tags and pad sequences from batch
Args:
data (list[list[int]]): token indexes
Returns:
list[list[int]]: padded list of sizes (batch, max_seq_len + 2)
"""
data = list(map(lambda x: [self.sos_idx] + x + [self.eos_idx], data))
lens = [len(s) for s in data]
max_len = max(lens)
for i, length in enumerate(lens):
to_add = max_len - length
data[i] += [self.pad_idx] * to_add
return data, lens
def from_tsv(self, part):
"""Read and tokenize data from TSV file.
Args:
part (str): the name of the part.
Yields:
(list[int], list[int]): pairs for each example in dataset.
"""
filename = os.path.join(self.directory, part + '.tsv')
with open(filename) as file:
reader = csv.reader(file, delimiter='\t')
for row in reader:
yield tuple(self.sp.EncodeAsIds(row[i]) for i, col in enumerate(self.cols))
def decode(self, data):
"""Decode encoded sentence tensor.
Args:
data (torch.Tensor): sentence tensor.
Returns:
list[str]: decoded sentences.
"""
return [self.sp.DecodeIds([token.item() for token in sentence]) for sentence in data]
def decode_raw(self, data):
"""Decode encoded sentence tensor without removing auxiliary symbols.
Args:
data (torch.Tensor): sentence tensor.
Returns:
list[str]: decoded sentences.
"""
return [''.join([self.sp.IdToPiece(token.item()) for token in sentence]) for sentence in data]
def get_max_len(self, part):
lens = []
for example in self.data_parts[part]:
for col in example:
lens.append(len(col))
return max(lens) + 2
class Batch:
def __init__(self, data_loader, raw_batches, device):
"""Simple batch wrapper.
Args:
data_loader (DataLoader): data loader object.
raw_batches (list[data]): raw data batches.
device (torch.device): torch device.
Variables:
- **cols_name_length** (list[int]): lengths of `cols_name` sequences.
- **cols_name** (torch.Tensor): long tensor of `cols_name` sequences.
"""
for i, col in enumerate(data_loader.cols):
tensor, length = data_loader.pad(raw_batches[i])
self.__setattr__(col, torch.tensor(tensor, dtype=torch.long, device=device))
self.__setattr__(col + '_length', length)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.