code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
"""
This module implements methods for generating random attributes from nodes in a graph based on distribution and range.
Method generate() will create all the necessary attributes for the graph:
Collection: name.
Dataset collection: name.
System collection: name.
System: system criticality, environment, description, name, regex grouping.
Dataset: slo, environment, description, name, regex grouping.
Data integrity: reconstruction time, volatility, regeneration time, restoration time.
Dataset processing: impact, freshness.
"""
import random
class AttributeGenerator:
"""
A class to generate random attributes for nodes based on distribution or range of values.
...
Attributes:
collection_params: Instance of CollectionParams.
dataset_params: Instance of DatasetParams.
system_params: Instance of SystemParams.
data_integrity_params: Instance of DataIntegrityParams.
processing_params: Instance of ProcessingParams.
connection_params: Instance of ConnectionParams.
dataset_attributes: Dictionary with keys as attribute type, and value lists of generated attributes.
system_attributes: Dictionary with keys as attribute type, and value lists of generated attributes.
dataset_processing_attributes: Dictionary with keys as attribute type, and value lists of generated attributes.
data_integrity_attributes: Dictionary with keys as attribute type, and value lists of generated attributes.
Methods:
_generate_time()
Generates time strings from given range in seconds.
_generate_from_proba()
Generates value from given probability map.
_generate_dataset_attributes()
Generates all necessary dataset attributes.
_generate_system_attributes()
Generates all necessary system attributes.
_generate_processing_attributes()
Generates all dataset processing attributes.
_generate_data_integrity_attributes()
Generates all data integrity attributes.
generate()
Generates all the needed attributes for data dependency mapping graph.
"""
def __init__(self, collection_params, dataset_params, system_params, data_integrity_params, processing_params,
connection_params):
self.collection_params = collection_params
self.dataset_params = dataset_params
self.system_params = system_params
self.data_integrity_params = data_integrity_params
self.processing_params = processing_params
self.connection_params = connection_params
self.collection_attributes = {}
self.dataset_collection_attributes = {}
self.system_collection_attributes = {}
self.dataset_attributes = {}
self.system_attributes = {}
self.dataset_processing_attributes = {}
self.data_integrity_attributes = {}
@staticmethod
def _generate_time(n=1):
"""Generates n random time strings in format 1d / 25h / 121m / 46s"""
generated_time = []
time_ranges = {
"d": (1, 30),
"h": (1, 120),
"m": (1, 720),
"s": (1, 360)
}
for i in range(n):
time_metric = random.choice(list(time_ranges.keys()))
time_value = random.randint(time_ranges[time_metric][0], time_ranges[time_metric][1])
generated_time.append(f"{time_value}{time_metric}")
return generated_time
@staticmethod
def _generate_from_proba(proba_map, n=1):
"""Generates n random values with replacement from map using their probability."""
population = list(proba_map.keys())
probability = list(proba_map.values())
# Normalise probability
probability = [i / sum(probability) for i in probability]
return random.choices(population, probability, k=n)
@staticmethod
def _generate_description(node_type, node_id):
"""Generates random description for a node (ex. Dataset number 1.)."""
return f"{node_type.capitalize()} number {node_id}."
@staticmethod
def _generate_regex(node_type, node_id):
"""Generates random regex grouping."""
return f"{node_type}.{node_id}.*"
@staticmethod
def _generate_name(node_type, node_id):
"""Generates random node name."""
return f"{node_type}.{node_id}"
def _generate_collection_attributes(self):
"""Generates name for collections."""
collection_names = [self._generate_name("collection", i) for i in range(self.collection_params.collection_count)]
self.collection_attributes["names"] = collection_names
def _generate_dataset_collection_attributes(self):
"""Generates name for dataset collections."""
dataset_collection_names = [self._generate_name("dataset collection", i)
for i in range(self.collection_params.dataset_collection_count)]
self.dataset_collection_attributes["names"] = dataset_collection_names
def _generate_system_collection_attributes(self):
"""Generates name for system collections."""
system_collection_names = [self._generate_name("system collection", i)
for i in range(self.collection_params.system_collection_count)]
self.system_collection_attributes["names"] = system_collection_names
def _generate_dataset_attributes(self):
"""Generates slo, environments, regex groupings and names for datasets."""
dataset_descriptions = [self._generate_description("dataset", i) for i in range(self.dataset_params.dataset_count)]
dataset_regexs = [self._generate_regex("dataset", i) for i in range(self.dataset_params.dataset_count)]
dataset_names = [self._generate_name("dataset", i) for i in range(self.dataset_params.dataset_count)]
dataset_slos = self._generate_time(n=self.dataset_params.dataset_count)
# View counts as probability of being picked
dataset_environments = self._generate_from_proba(self.dataset_params.dataset_env_count_map,
n=self.dataset_params.dataset_count)
self.dataset_attributes["descriptions"] = dataset_descriptions
self.dataset_attributes["names"] = dataset_names
self.dataset_attributes["regex_groupings"] = dataset_regexs
self.dataset_attributes["dataset_slos"] = dataset_slos
self.dataset_attributes["dataset_environments"] = dataset_environments
def _generate_system_attributes(self):
"""Generates system criticality, system environments, regex groupings, names and descriptions for systems."""
system_descriptions = [self._generate_description("system", i) for i in range(self.system_params.system_count)]
system_regexs = [self._generate_regex("system", i) for i in range(self.system_params.system_count)]
system_names = [self._generate_name("system", i) for i in range(self.system_params.system_count)]
system_criticalities = self._generate_from_proba(self.system_params.system_criticality_proba_map,
n=self.system_params.system_count)
# View counts as probability of being picked
system_environments = self._generate_from_proba(self.system_params.system_env_count_map,
n=self.system_params.system_count)
self.system_attributes["regex_groupings"] = system_regexs
self.system_attributes["names"] = system_names
self.system_attributes["descriptions"] = system_descriptions
self.system_attributes["system_criticalities"] = system_criticalities
self.system_attributes["system_environments"] = system_environments
def _generate_processing_attributes(self):
"""Generates dataset impacts and dataset freshness."""
dataset_impacts = self._generate_from_proba(self.processing_params.dataset_impact_proba_map,
n=self.connection_params.dataset_system_connection_count)
dataset_freshness = self._generate_from_proba(self.processing_params.dataset_criticality_proba_map,
n=self.connection_params.dataset_system_connection_count)
self.dataset_processing_attributes["dataset_impacts"] = dataset_impacts
self.dataset_processing_attributes["dataset_freshness"] = dataset_freshness
def _generate_data_integrity_attributes(self):
"""Generates restoration, regeneration, reconstruction times and volatility for each dataset collection."""
data_restoration_time = self._generate_time(n=self.collection_params.dataset_collection_count)
data_regeneration_time = self._generate_time(n=self.collection_params.dataset_collection_count)
data_reconstruction_time = self._generate_time(n=self.collection_params.dataset_collection_count)
data_volatility = self._generate_from_proba(self.data_integrity_params.data_volatility_proba_map,
n=self.collection_params.dataset_collection_count)
self.data_integrity_attributes["data_restoration_time"] = data_restoration_time
self.data_integrity_attributes["data_regeneration_time"] = data_regeneration_time
self.data_integrity_attributes["data_reconstruction_time"] = data_reconstruction_time
self.data_integrity_attributes["data_volatility"] = data_volatility
def generate(self):
"""Generates all needed attributes."""
self._generate_collection_attributes()
self._generate_dataset_collection_attributes()
self._generate_system_collection_attributes()
self._generate_dataset_attributes()
self._generate_system_attributes()
self._generate_processing_attributes()
self._generate_data_integrity_attributes()
|
[
"random.choices",
"random.randint"
] |
[((3899, 3943), 'random.choices', 'random.choices', (['population', 'probability'], {'k': 'n'}), '(population, probability, k=n)\n', (3913, 3943), False, 'import random\n'), ((3371, 3443), 'random.randint', 'random.randint', (['time_ranges[time_metric][0]', 'time_ranges[time_metric][1]'], {}), '(time_ranges[time_metric][0], time_ranges[time_metric][1])\n', (3385, 3443), False, 'import random\n')]
|
"""
Provide quantilized form of Adder2d, https://arxiv.org/pdf/1912.13200.pdf
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Function
import math
from . import extra as ex
from .number import qsigned
class Adder2d(ex.Adder2d):
def __init__(self,
input_channel,
output_channel,
kernel_size,
stride=1,
padding=0,
bias=False,
weight_bit_width=8,
bias_bit_width=16,
inter_bit_width=32,
acti_bit_width=8,
retrain=True,
quant=False):
super().__init__(input_channel,
output_channel,
kernel_size,
stride=stride,
padding=padding,
bias=bias)
self.weight_bit_width = weight_bit_width
self.bias_bit_width = bias_bit_width
self.inter_bit_width = inter_bit_width
self.acti_bit_width = acti_bit_width
self.retrain = retrain
self.quant = quant
if retrain is True:
self.weight_log2_t = nn.Parameter(torch.Tensor(1))
self.acti_log2_t = nn.Parameter(torch.Tensor(1))
if self.bias is not None:
self.bias_log2_t = nn.Parameter(torch.Tensor(1))
else:
self.weight_log2_t = torch.Tensor(1)
self.acti_log2_t = torch.Tensor(1)
if self.bias is not None:
self.bias_log2_t = torch.Tensor(1)
def static(self):
self.retrain = False
if isinstance(self.bias_log2_t, nn.Parameter):
self.bias_log2_t.requires_grad_(False)
if isinstance(self.weight_log2_t, nn.Parameter):
self.weight_log2_t.requires_grad_(False)
if isinstance(self.acti_log2_t, nn.Parameter):
self.acti_log2_t.requires_grad_(False)
def quantilize(self):
self.quant = True
def floatilize(self):
self.quant = False
def adder_forward(self, input):
input_log2_t = input.abs().max().log2()
weight = qsigned(self.weight, self.weight_log2_t,
self.weight_bit_width)
inter = qsigned(
ex.adder2d_function(input,
weight,
None,
stride=self.stride,
padding=self.padding),
self.weight_log2_t + input_log2_t + math.log2(self.weight.numel()),
self.inter_bit_width)
if self.bias is not None:
inter += qsigned(
self.bias, self.bias_log2_t,
self.bias_bit_width).unsqueeze(1).unsqueeze(2).unsqueeze(0)
return qsigned(inter, self.acti_log2_t, self.acti_bit_width)
def adder_forward_unquant(self, input):
return ex.adder2d_function(input,
self.weight,
self.bias,
stride=self.stride,
padding=self.padding)
def forward(self, input):
return self.adder_forward(
input) if self.quant else self.adder_forward_unquant(input)
if __name__ == '__main__':
add = Adder2d(3, 4, 3, bias=True)
x = torch.rand(10, 3, 10, 10)
print(add(x).shape)
|
[
"torch.Tensor",
"torch.rand"
] |
[((3422, 3447), 'torch.rand', 'torch.rand', (['(10)', '(3)', '(10)', '(10)'], {}), '(10, 3, 10, 10)\n', (3432, 3447), False, 'import torch\n'), ((1474, 1489), 'torch.Tensor', 'torch.Tensor', (['(1)'], {}), '(1)\n', (1486, 1489), False, 'import torch\n'), ((1521, 1536), 'torch.Tensor', 'torch.Tensor', (['(1)'], {}), '(1)\n', (1533, 1536), False, 'import torch\n'), ((1246, 1261), 'torch.Tensor', 'torch.Tensor', (['(1)'], {}), '(1)\n', (1258, 1261), False, 'import torch\n'), ((1307, 1322), 'torch.Tensor', 'torch.Tensor', (['(1)'], {}), '(1)\n', (1319, 1322), False, 'import torch\n'), ((1610, 1625), 'torch.Tensor', 'torch.Tensor', (['(1)'], {}), '(1)\n', (1622, 1625), False, 'import torch\n'), ((1410, 1425), 'torch.Tensor', 'torch.Tensor', (['(1)'], {}), '(1)\n', (1422, 1425), False, 'import torch\n')]
|
import sys
import asyncio
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
from aiohttp import web
from aiohttp_session import get_session, setup
from aiohttp_session.cookie_storage import EncryptedCookieStorage
import aiohttp_jinja2
import jinja2
import user
import search
#import personal
import friend
import share
import comment
import notification
import playlists
import history
import export
import speech
if '-debug' in sys.argv[1:]:
print('WARNING: running in debug mode')
import debug
import secrets
from util import routes, get_user, add_globals, error_middleware
ssl_context = None
if secrets.USE_SSL:
import ssl
ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_context.load_cert_chain(secrets.SSL_CRT, secrets.SSL_KEY)
async def update_certificate():
while True:
# let's encrypt certificate is updated every 3 months, we need to reload it
# TODO: only do it if certificate changed
print('reloading SSL certificate')
ssl_context.load_cert_chain(secrets.SSL_CRT, secrets.SSL_KEY)
await asyncio.sleep(3600 * 24) # once a day
async def run_web_app():
app = web.Application(middlewares=[error_middleware])
setup(app, EncryptedCookieStorage(secrets.SERVER_COOKIE_KEY))
aiohttp_jinja2.setup(app, loader=jinja2.FileSystemLoader('templates/'), context_processors=[add_globals])
# warning from doc: in production, /static should be handled by apache/nginx
routes.static('/static', 'static', append_version=True)
routes.static('/pictures', 'data/pictures')
routes.static('/qrcodes', 'data/qrcodes')
routes.static('/export', 'data/export')
routes.static('/', 'static/favicon')
app.add_routes(routes)
if secrets.USE_SSL:
asyncio.get_event_loop().create_task(update_certificate())
return app
app = asyncio.get_event_loop().run_until_complete(run_web_app())
print('Running app at http%s://%s:%d' % ('s' if secrets.USE_SSL else '', secrets.HOST, secrets.PORT))
web.run_app(app, ssl_context=ssl_context, host=secrets.HOST, port=secrets.PORT)
|
[
"asyncio.get_event_loop",
"asyncio.sleep",
"ssl.create_default_context",
"jinja2.FileSystemLoader",
"uvloop.EventLoopPolicy",
"aiohttp.web.run_app",
"aiohttp_session.cookie_storage.EncryptedCookieStorage",
"util.routes.static",
"aiohttp.web.Application"
] |
[((2034, 2113), 'aiohttp.web.run_app', 'web.run_app', (['app'], {'ssl_context': 'ssl_context', 'host': 'secrets.HOST', 'port': 'secrets.PORT'}), '(app, ssl_context=ssl_context, host=secrets.HOST, port=secrets.PORT)\n', (2045, 2113), False, 'from aiohttp import web\n'), ((70, 94), 'uvloop.EventLoopPolicy', 'uvloop.EventLoopPolicy', ([], {}), '()\n', (92, 94), False, 'import uvloop\n'), ((683, 734), 'ssl.create_default_context', 'ssl.create_default_context', (['ssl.Purpose.CLIENT_AUTH'], {}), '(ssl.Purpose.CLIENT_AUTH)\n', (709, 734), False, 'import ssl\n'), ((1185, 1232), 'aiohttp.web.Application', 'web.Application', ([], {'middlewares': '[error_middleware]'}), '(middlewares=[error_middleware])\n', (1200, 1232), False, 'from aiohttp import web\n'), ((1496, 1551), 'util.routes.static', 'routes.static', (['"""/static"""', '"""static"""'], {'append_version': '(True)'}), "('/static', 'static', append_version=True)\n", (1509, 1551), False, 'from util import routes, get_user, add_globals, error_middleware\n'), ((1556, 1599), 'util.routes.static', 'routes.static', (['"""/pictures"""', '"""data/pictures"""'], {}), "('/pictures', 'data/pictures')\n", (1569, 1599), False, 'from util import routes, get_user, add_globals, error_middleware\n'), ((1604, 1645), 'util.routes.static', 'routes.static', (['"""/qrcodes"""', '"""data/qrcodes"""'], {}), "('/qrcodes', 'data/qrcodes')\n", (1617, 1645), False, 'from util import routes, get_user, add_globals, error_middleware\n'), ((1650, 1689), 'util.routes.static', 'routes.static', (['"""/export"""', '"""data/export"""'], {}), "('/export', 'data/export')\n", (1663, 1689), False, 'from util import routes, get_user, add_globals, error_middleware\n'), ((1694, 1730), 'util.routes.static', 'routes.static', (['"""/"""', '"""static/favicon"""'], {}), "('/', 'static/favicon')\n", (1707, 1730), False, 'from util import routes, get_user, add_globals, error_middleware\n'), ((1249, 1298), 'aiohttp_session.cookie_storage.EncryptedCookieStorage', 'EncryptedCookieStorage', (['secrets.SERVER_COOKIE_KEY'], {}), '(secrets.SERVER_COOKIE_KEY)\n', (1271, 1298), False, 'from aiohttp_session.cookie_storage import EncryptedCookieStorage\n'), ((1873, 1897), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1895, 1897), False, 'import asyncio\n'), ((1111, 1135), 'asyncio.sleep', 'asyncio.sleep', (['(3600 * 24)'], {}), '(3600 * 24)\n', (1124, 1135), False, 'import asyncio\n'), ((1337, 1374), 'jinja2.FileSystemLoader', 'jinja2.FileSystemLoader', (['"""templates/"""'], {}), "('templates/')\n", (1360, 1374), False, 'import jinja2\n'), ((1792, 1816), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1814, 1816), False, 'import asyncio\n')]
|
import logging
from airflow.models import BaseOperator
from airflow.operators.sensors import BaseSensorOperator
from airflow.plugins_manager import AirflowPlugin
from airflow.utils.decorators import apply_defaults
log = logging.getLogger(__name__)
#Test comment
class CustomOperator(BaseOperator):
@apply_defaults
def __init__(self, my_operator_param, *args, **kwargs):
self.operator_param = my_operator_param
super(CustomOperator, self).__init__(*args, **kwargs)
def execute(self, context):
log.info("Hello World!")
log.info('operator_param: %s', self.operator_param)
class CustomSensor(BaseOperator):
@apply_defaults
def __init__(self, *args, **kwargs):
super(CustomSensor, self).__init__(*args, **kwargs)
def poke(self, context):
"""Determines whether the task is successful or not
if True: continue with the dag
if False: call poke again
if Exception: call poke again until the max number of retries has been reached
"""
current_minute = datetime.now().minute
if current_minute % 3 != 0:
log.info("Current minute (%s) not is divisible by 3, sensor will retry.", current_minute)
return False
log.info("Current minute (%s) is divisible by 3, sensor finishing.", current_minute)
return True
class MyFirstPlugin(AirflowPlugin):
name = "my_first_plugin"
operators = [CustomOperator, CustomSensor]
|
[
"logging.getLogger"
] |
[((222, 249), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (239, 249), False, 'import logging\n')]
|
from django.urls import path
from raids import views
urlpatterns = [
path('encounter', views.encounter, name='raids_encounter'),
path('dispatch', views.dispatch_loot_system, name='raids_dispatch'),
path('search', views.search, name='raids_search'),
# Ajax
path('ajax/autocomplete', views.ajax_autocomplete_search, name='raids_autocomplete_search'),
]
|
[
"django.urls.path"
] |
[((74, 132), 'django.urls.path', 'path', (['"""encounter"""', 'views.encounter'], {'name': '"""raids_encounter"""'}), "('encounter', views.encounter, name='raids_encounter')\n", (78, 132), False, 'from django.urls import path\n'), ((138, 205), 'django.urls.path', 'path', (['"""dispatch"""', 'views.dispatch_loot_system'], {'name': '"""raids_dispatch"""'}), "('dispatch', views.dispatch_loot_system, name='raids_dispatch')\n", (142, 205), False, 'from django.urls import path\n'), ((211, 260), 'django.urls.path', 'path', (['"""search"""', 'views.search'], {'name': '"""raids_search"""'}), "('search', views.search, name='raids_search')\n", (215, 260), False, 'from django.urls import path\n'), ((278, 374), 'django.urls.path', 'path', (['"""ajax/autocomplete"""', 'views.ajax_autocomplete_search'], {'name': '"""raids_autocomplete_search"""'}), "('ajax/autocomplete', views.ajax_autocomplete_search, name=\n 'raids_autocomplete_search')\n", (282, 374), False, 'from django.urls import path\n')]
|
""" Module to handle attributes related to the site location and details """
import re
import logging
from osg_configure.modules import utilities
from osg_configure.modules import configfile
from osg_configure.modules import validation
from osg_configure.modules.baseconfiguration import BaseConfiguration
__all__ = ['SiteInformation']
# convenience
MANDATORY = configfile.Option.MANDATORY
MANDATORY_ON_CE = configfile.Option.MANDATORY_ON_CE
OPTIONAL = configfile.Option.OPTIONAL
class SiteInformation(BaseConfiguration):
"""Class to handle attributes related to site information such as location and
contact information
"""
# The wlcg_* options are read by GIP directly
IGNORE_OPTIONS = ['wlcg_tier', 'wlcg_parent', 'wlcg_name', 'wlcg_grid']
def __init__(self, *args, **kwargs):
# pylint: disable-msg=W0142
super(SiteInformation, self).__init__(*args, **kwargs)
self.logger = logging.getLogger(__name__)
self.log('SiteInformation.__init__ started')
self.options = {'group':
configfile.Option(name='group',
required=MANDATORY,
default_value='OSG',
mapping='OSG_GROUP'),
'host_name':
configfile.Option(name='host_name',
required=MANDATORY_ON_CE,
default_value='',
mapping='OSG_HOSTNAME'),
'site_name':
configfile.Option(name='site_name',
required=OPTIONAL,
default_value='',
mapping='OSG_SITE_NAME'),
'sponsor':
configfile.Option(name='sponsor',
required=MANDATORY_ON_CE,
mapping='OSG_SPONSOR'),
'site_policy':
configfile.Option(name='site_policy',
required=OPTIONAL,
default_value='',
mapping='OSG_SITE_INFO'),
'contact':
configfile.Option(name='contact',
required=MANDATORY_ON_CE,
mapping='OSG_CONTACT_NAME'),
'email':
configfile.Option(name='email',
required=MANDATORY_ON_CE,
mapping='OSG_CONTACT_EMAIL'),
'city':
configfile.Option(name='city',
required=MANDATORY_ON_CE,
mapping='OSG_SITE_CITY'),
'country':
configfile.Option(name='country',
required=MANDATORY_ON_CE,
mapping='OSG_SITE_COUNTRY'),
'longitude':
configfile.Option(name='longitude',
opt_type=float,
required=MANDATORY_ON_CE,
mapping='OSG_SITE_LONGITUDE'),
'latitude':
configfile.Option(name='latitude',
opt_type=float,
required=MANDATORY_ON_CE,
mapping='OSG_SITE_LATITUDE'),
'resource':
configfile.Option(name='resource',
required=OPTIONAL,
default_value='',
mapping='OSG_SITE_NAME'),
'resource_group':
configfile.Option(name='resource_group',
default_value='',
required=OPTIONAL)}
self.config_section = "Site Information"
self.enabled = True
self.log('SiteInformation.__init__ completed')
def parse_configuration(self, configuration):
"""Try to get configuration information from ConfigParser or SafeConfigParser object given
by configuration and write recognized settings to attributes dict
"""
self.log('SiteInformation.parse_configuration started')
self.check_config(configuration)
if not configuration.has_section(self.config_section):
self.enabled = False
self.log("%s section not in config file" % self.config_section)
self.log('SiteInformation.parse_configuration completed')
return
self.get_options(configuration, ignore_options=self.IGNORE_OPTIONS)
self.log('SiteInformation.parse_configuration completed')
# pylint: disable-msg=W0613
def check_attributes(self, attributes):
"""Check attributes currently stored and make sure that they are consistent"""
self.log('SiteInformation.check_attributes started')
attributes_ok = True
if not self.enabled:
self.log('Not enabled, returning True')
self.log('SiteInformation.check_attributes completed')
return attributes_ok
# OSG_GROUP must be either OSG or OSG-ITB
group = self.opt_val("group")
if group not in ('OSG', 'OSG-ITB'):
self.log("The group setting must be either OSG or OSG-ITB, got: %s" %
group,
option='group',
section=self.config_section,
level=logging.ERROR)
attributes_ok = False
host_name = self.opt_val("host_name")
# host_name must be a valid dns name, check this by getting it's ip adddress
if not utilities.blank(host_name) and not validation.valid_domain(host_name, True):
self.log("hostname %s can't be resolved" % host_name,
option='host_name',
section=self.config_section,
level=logging.ERROR)
attributes_ok = False
if not utilities.blank(self.opt_val("site_name")):
self.log("The site_name setting has been deprecated in favor of the"
" resource and resource_group settings and will be removed",
section=self.config_section,
option="site_name",
level=logging.WARNING)
latitude = self.opt_val("latitude")
if not utilities.blank(latitude) and not -90 < latitude < 90:
self.log("Latitude must be between -90 and 90, got %s" %
latitude,
section=self.config_section,
option='latitude',
level=logging.ERROR)
attributes_ok = False
longitude = self.opt_val("longitude")
if not utilities.blank(longitude) and not -180 < longitude < 180:
self.log("Longitude must be between -180 and 180, got %s" %
longitude,
section=self.config_section,
option='longitude',
level=logging.ERROR)
attributes_ok = False
email = self.opt_val("email")
# make sure the email address has the correct format
if not utilities.blank(email) and not validation.valid_email(email):
self.log("Invalid email address in site information: %s" %
email,
section=self.config_section,
option='email',
level=logging.ERROR)
attributes_ok = False
sponsor = self.opt_val("sponsor")
if not utilities.blank(sponsor):
attributes_ok &= self.check_sponsor(sponsor)
self.log('SiteInformation.check_attributes completed')
return attributes_ok
def check_sponsor(self, sponsor):
attributes_ok = True
percentage = 0
vo_names = utilities.get_vos(None)
if vo_names == []:
map_file_present = False
else:
map_file_present = True
vo_names.append('usatlas') # usatlas is a valid vo name
vo_names.append('uscms') # uscms is a valid vo name
vo_names.append('local') # local is a valid vo name
cap_vo_names = [vo.upper() for vo in vo_names]
for vo in re.split(r'\s*,?\s*', sponsor):
vo_name = vo.split(':')[0]
if vo_name not in vo_names:
if vo_name.upper() in cap_vo_names:
self.log("VO name %s has the wrong capitialization" % vo_name,
section=self.config_section,
option='sponsor',
level=logging.WARNING)
vo_mesg = "Valid VO names are as follows:\n"
for name in vo_names:
vo_mesg += name + "\n"
self.log(vo_mesg, level=logging.WARNING)
else:
if map_file_present:
self.log("In %s section, problem with sponsor setting" % \
self.config_section)
self.log("VO name %s not found" % vo_name,
section=self.config_section,
option='sponsor',
level=logging.ERROR)
vo_mesg = "Valid VO names are as follows:\n"
for name in vo_names:
vo_mesg += name + "\n"
self.log(vo_mesg, level=logging.ERROR)
attributes_ok = False
else:
self.log("Can't currently check VOs in sponsor setting because " +
"the /var/lib/osg/user-vo-map is empty. If you are " +
"configuring osg components, this may be resolved when " +
"osg-configure runs the appropriate script to generate " +
"this file later in the configuration process",
section=self.config_section,
option='sponsor',
level=logging.WARNING)
if len(vo.split(':')) == 1:
percentage += 100
elif len(vo.split(':')) == 2:
vo_percentage = vo.split(':')[1]
try:
percentage += int(vo_percentage)
except ValueError:
self.log("VO percentage (%s) in sponsor field (%s) not an integer" \
% (vo_percentage, vo),
section=self.config_section,
option='sponsor',
level=logging.ERROR,
exception=True)
attributes_ok = False
else:
self.log("VO sponsor field is not formated correctly: %s" % vo,
section=self.config_section,
option='sponsor',
level=logging.ERROR)
self.log("Sponsors should be given as sponsor:percentage "
"separated by a space or comma")
if percentage != 100:
self.log("VO percentages in sponsor field do not add up to 100, got %s" \
% percentage,
section=self.config_section,
option='sponsor',
level=logging.ERROR)
attributes_ok = False
return attributes_ok
def module_name(self):
"""Return a string with the name of the module"""
return "SiteInformation"
def separately_configurable(self):
"""Return a boolean that indicates whether this module can be configured separately"""
return True
def get_attributes(self, converter=str):
"""
Get attributes for the osg attributes file using the dict in self.options
Returns a dictionary of ATTRIBUTE => value mappings
Need to override parent class method since two options may map to OSG_SITE_NAME
"""
self.log("%s.get_attributes started" % self.__class__)
attributes = BaseConfiguration.get_attributes(self, converter)
if attributes == {}:
self.log("%s.get_attributes completed" % self.__class__)
return attributes
if ('OSG_SITE_NAME' in attributes and
self.options['resource'].value is not None and
not utilities.blank(self.options['resource'].value)):
attributes['OSG_SITE_NAME'] = self.options['resource'].value
self.log("%s.get_attributes completed" % self.__class__)
return attributes
|
[
"re.split",
"osg_configure.modules.validation.valid_domain",
"osg_configure.modules.utilities.blank",
"osg_configure.modules.validation.valid_email",
"logging.getLogger",
"osg_configure.modules.utilities.get_vos",
"osg_configure.modules.configfile.Option",
"osg_configure.modules.baseconfiguration.BaseConfiguration.get_attributes"
] |
[((931, 958), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (948, 958), False, 'import logging\n'), ((8680, 8703), 'osg_configure.modules.utilities.get_vos', 'utilities.get_vos', (['None'], {}), '(None)\n', (8697, 8703), False, 'from osg_configure.modules import utilities\n'), ((9079, 9110), 're.split', 're.split', (['"""\\\\s*,?\\\\s*"""', 'sponsor'], {}), "('\\\\s*,?\\\\s*', sponsor)\n", (9087, 9110), False, 'import re\n'), ((13086, 13135), 'osg_configure.modules.baseconfiguration.BaseConfiguration.get_attributes', 'BaseConfiguration.get_attributes', (['self', 'converter'], {}), '(self, converter)\n', (13118, 13135), False, 'from osg_configure.modules.baseconfiguration import BaseConfiguration\n'), ((1073, 1170), 'osg_configure.modules.configfile.Option', 'configfile.Option', ([], {'name': '"""group"""', 'required': 'MANDATORY', 'default_value': '"""OSG"""', 'mapping': '"""OSG_GROUP"""'}), "(name='group', required=MANDATORY, default_value='OSG',\n mapping='OSG_GROUP')\n", (1090, 1170), False, 'from osg_configure.modules import configfile\n'), ((1371, 1479), 'osg_configure.modules.configfile.Option', 'configfile.Option', ([], {'name': '"""host_name"""', 'required': 'MANDATORY_ON_CE', 'default_value': '""""""', 'mapping': '"""OSG_HOSTNAME"""'}), "(name='host_name', required=MANDATORY_ON_CE, default_value\n ='', mapping='OSG_HOSTNAME')\n", (1388, 1479), False, 'from osg_configure.modules import configfile\n'), ((1679, 1780), 'osg_configure.modules.configfile.Option', 'configfile.Option', ([], {'name': '"""site_name"""', 'required': 'OPTIONAL', 'default_value': '""""""', 'mapping': '"""OSG_SITE_NAME"""'}), "(name='site_name', required=OPTIONAL, default_value='',\n mapping='OSG_SITE_NAME')\n", (1696, 1780), False, 'from osg_configure.modules import configfile\n'), ((1979, 2066), 'osg_configure.modules.configfile.Option', 'configfile.Option', ([], {'name': '"""sponsor"""', 'required': 'MANDATORY_ON_CE', 'mapping': '"""OSG_SPONSOR"""'}), "(name='sponsor', required=MANDATORY_ON_CE, mapping=\n 'OSG_SPONSOR')\n", (1996, 2066), False, 'from osg_configure.modules import configfile\n'), ((2222, 2325), 'osg_configure.modules.configfile.Option', 'configfile.Option', ([], {'name': '"""site_policy"""', 'required': 'OPTIONAL', 'default_value': '""""""', 'mapping': '"""OSG_SITE_INFO"""'}), "(name='site_policy', required=OPTIONAL, default_value='',\n mapping='OSG_SITE_INFO')\n", (2239, 2325), False, 'from osg_configure.modules import configfile\n'), ((2524, 2616), 'osg_configure.modules.configfile.Option', 'configfile.Option', ([], {'name': '"""contact"""', 'required': 'MANDATORY_ON_CE', 'mapping': '"""OSG_CONTACT_NAME"""'}), "(name='contact', required=MANDATORY_ON_CE, mapping=\n 'OSG_CONTACT_NAME')\n", (2541, 2616), False, 'from osg_configure.modules import configfile\n'), ((2766, 2857), 'osg_configure.modules.configfile.Option', 'configfile.Option', ([], {'name': '"""email"""', 'required': 'MANDATORY_ON_CE', 'mapping': '"""OSG_CONTACT_EMAIL"""'}), "(name='email', required=MANDATORY_ON_CE, mapping=\n 'OSG_CONTACT_EMAIL')\n", (2783, 2857), False, 'from osg_configure.modules import configfile\n'), ((3006, 3092), 'osg_configure.modules.configfile.Option', 'configfile.Option', ([], {'name': '"""city"""', 'required': 'MANDATORY_ON_CE', 'mapping': '"""OSG_SITE_CITY"""'}), "(name='city', required=MANDATORY_ON_CE, mapping=\n 'OSG_SITE_CITY')\n", (3023, 3092), False, 'from osg_configure.modules import configfile\n'), ((3244, 3336), 'osg_configure.modules.configfile.Option', 'configfile.Option', ([], {'name': '"""country"""', 'required': 'MANDATORY_ON_CE', 'mapping': '"""OSG_SITE_COUNTRY"""'}), "(name='country', required=MANDATORY_ON_CE, mapping=\n 'OSG_SITE_COUNTRY')\n", (3261, 3336), False, 'from osg_configure.modules import configfile\n'), ((3490, 3602), 'osg_configure.modules.configfile.Option', 'configfile.Option', ([], {'name': '"""longitude"""', 'opt_type': 'float', 'required': 'MANDATORY_ON_CE', 'mapping': '"""OSG_SITE_LONGITUDE"""'}), "(name='longitude', opt_type=float, required=\n MANDATORY_ON_CE, mapping='OSG_SITE_LONGITUDE')\n", (3507, 3602), False, 'from osg_configure.modules import configfile\n'), ((3801, 3910), 'osg_configure.modules.configfile.Option', 'configfile.Option', ([], {'name': '"""latitude"""', 'opt_type': 'float', 'required': 'MANDATORY_ON_CE', 'mapping': '"""OSG_SITE_LATITUDE"""'}), "(name='latitude', opt_type=float, required=MANDATORY_ON_CE,\n mapping='OSG_SITE_LATITUDE')\n", (3818, 3910), False, 'from osg_configure.modules import configfile\n'), ((4110, 4210), 'osg_configure.modules.configfile.Option', 'configfile.Option', ([], {'name': '"""resource"""', 'required': 'OPTIONAL', 'default_value': '""""""', 'mapping': '"""OSG_SITE_NAME"""'}), "(name='resource', required=OPTIONAL, default_value='',\n mapping='OSG_SITE_NAME')\n", (4127, 4210), False, 'from osg_configure.modules import configfile\n'), ((4416, 4493), 'osg_configure.modules.configfile.Option', 'configfile.Option', ([], {'name': '"""resource_group"""', 'default_value': '""""""', 'required': 'OPTIONAL'}), "(name='resource_group', default_value='', required=OPTIONAL)\n", (4433, 4493), False, 'from osg_configure.modules import configfile\n'), ((8394, 8418), 'osg_configure.modules.utilities.blank', 'utilities.blank', (['sponsor'], {}), '(sponsor)\n', (8409, 8418), False, 'from osg_configure.modules import utilities\n'), ((6456, 6482), 'osg_configure.modules.utilities.blank', 'utilities.blank', (['host_name'], {}), '(host_name)\n', (6471, 6482), False, 'from osg_configure.modules import utilities\n'), ((6491, 6531), 'osg_configure.modules.validation.valid_domain', 'validation.valid_domain', (['host_name', '(True)'], {}), '(host_name, True)\n', (6514, 6531), False, 'from osg_configure.modules import validation\n'), ((7184, 7209), 'osg_configure.modules.utilities.blank', 'utilities.blank', (['latitude'], {}), '(latitude)\n', (7199, 7209), False, 'from osg_configure.modules import utilities\n'), ((7567, 7593), 'osg_configure.modules.utilities.blank', 'utilities.blank', (['longitude'], {}), '(longitude)\n', (7582, 7593), False, 'from osg_configure.modules import utilities\n'), ((8012, 8034), 'osg_configure.modules.utilities.blank', 'utilities.blank', (['email'], {}), '(email)\n', (8027, 8034), False, 'from osg_configure.modules import utilities\n'), ((8043, 8072), 'osg_configure.modules.validation.valid_email', 'validation.valid_email', (['email'], {}), '(email)\n', (8065, 8072), False, 'from osg_configure.modules import validation\n'), ((13398, 13445), 'osg_configure.modules.utilities.blank', 'utilities.blank', (["self.options['resource'].value"], {}), "(self.options['resource'].value)\n", (13413, 13445), False, 'from osg_configure.modules import utilities\n')]
|
import datetime
from typing import Dict, Optional
import dateparser
import regex
def get_element_text(el) -> str:
return ''.join(el.strings).strip()
def parse_int(
s: str, numbers_map: Dict[str, int], thousands_separator: str
) -> int:
m = regex.search(r'\d+', s.strip().replace(thousands_separator, ''))
if not m:
s_clean = s.lower()
for substr, value in numbers_map.items():
if substr == s_clean:
return value
raise Exception(f'Failed to parse number "{s}"')
return int(m[0])
def parse_int_or_none(
s: str, regex_none: regex.Regex, *args, **kwargs
) -> Optional[int]:
if regex_none.search(s):
return None
return parse_int(s, *args, **kwargs)
def parse_datetime(s: str, default_tz: datetime.tzinfo) -> datetime.datetime:
dt = dateparser.parse(s)
if not dt:
raise Exception(f'Failed to parse datetime "{s}"')
if not dt.tzinfo:
return dt.replace(tzinfo=default_tz)
return dt
|
[
"dateparser.parse"
] |
[((832, 851), 'dateparser.parse', 'dateparser.parse', (['s'], {}), '(s)\n', (848, 851), False, 'import dateparser\n')]
|
# Generated by Django 3.2.7 on 2022-03-07 15:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0006_change_content_field_to_richtextuploading'),
]
operations = [
migrations.AddField(
model_name='post',
name='snippet',
field=models.CharField(blank=True, max_length=500, null=True),
),
]
|
[
"django.db.models.CharField"
] |
[((353, 408), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(500)', 'null': '(True)'}), '(blank=True, max_length=500, null=True)\n', (369, 408), False, 'from django.db import migrations, models\n')]
|
import threading
import time
from django_formset_vuejs.models import Book
def start_cleanup_job():
def cleanup_db():
while True:
time.sleep(60*60)
print('hello')
Book.objects.all().delete()
thread1 = threading.Thread(target=cleanup_db)
thread1.start()
|
[
"threading.Thread",
"django_formset_vuejs.models.Book.objects.all",
"time.sleep"
] |
[((256, 291), 'threading.Thread', 'threading.Thread', ([], {'target': 'cleanup_db'}), '(target=cleanup_db)\n', (272, 291), False, 'import threading\n'), ((156, 175), 'time.sleep', 'time.sleep', (['(60 * 60)'], {}), '(60 * 60)\n', (166, 175), False, 'import time\n'), ((213, 231), 'django_formset_vuejs.models.Book.objects.all', 'Book.objects.all', ([], {}), '()\n', (229, 231), False, 'from django_formset_vuejs.models import Book\n')]
|
#-----------------------------------------------------------------------------
# luna2d DeployTool
# This is part of luna2d engine
# Copyright 2014-2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#-----------------------------------------------------------------------------
import argparse
import shutil
import os
import utils
import sdkmodule_android
def main(args):
if args.debug_clear_project == "true":
shutil.rmtree(args.project_path, ignore_errors=True)
elif os.path.exists(args.project_path):
print("Cannot create project in \"" + args.project_path + "\". Directory already exists.")
exit(1)
luna2d_path = utils.get_luna2d_path()
template_path = luna2d_path + "/templates/" + args.template
constants = {
"LUNA_SDKMODULE_TYPE" : args.module_type,
"LUNA_SDKMODULE_NAME" : args.name,
"LUNA_PACKAGE_NAME" : args.package_name,
"LUNA_CLASS_NAME" : args.class_name,
"LUNA2D_PATH" : luna2d_path,
}
ignored_files = []
if args.platform == "android":
sdkmodule_android.apply_constants(args, constants)
ignored_files = sdkmodule_android.get_ignored_files(args, template_path)
utils.make_from_template(template_path, args.project_path, constants, ignored_files)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--project_path", required=True)
parser.add_argument("--module_type", required=True)
parser.add_argument("--template", required=True)
parser.add_argument("--name", required=True)
parser.add_argument("--platform", required=True)
parser.add_argument("--package_name", default="")
parser.add_argument("--class_name", default="")
parser.add_argument("--strip_git", default=False)
parser.add_argument("--debug_clear_project", default=False)
return parser.parse_args()
main(parse_args())
|
[
"utils.get_luna2d_path",
"argparse.ArgumentParser",
"utils.make_from_template",
"os.path.exists",
"sdkmodule_android.get_ignored_files",
"sdkmodule_android.apply_constants",
"shutil.rmtree"
] |
[((1648, 1671), 'utils.get_luna2d_path', 'utils.get_luna2d_path', ([], {}), '()\n', (1669, 1671), False, 'import utils\n'), ((2129, 2217), 'utils.make_from_template', 'utils.make_from_template', (['template_path', 'args.project_path', 'constants', 'ignored_files'], {}), '(template_path, args.project_path, constants,\n ignored_files)\n', (2153, 2217), False, 'import utils\n'), ((2243, 2268), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2266, 2268), False, 'import argparse\n'), ((1434, 1486), 'shutil.rmtree', 'shutil.rmtree', (['args.project_path'], {'ignore_errors': '(True)'}), '(args.project_path, ignore_errors=True)\n', (1447, 1486), False, 'import shutil\n'), ((1494, 1527), 'os.path.exists', 'os.path.exists', (['args.project_path'], {}), '(args.project_path)\n', (1508, 1527), False, 'import os\n'), ((2001, 2051), 'sdkmodule_android.apply_constants', 'sdkmodule_android.apply_constants', (['args', 'constants'], {}), '(args, constants)\n', (2034, 2051), False, 'import sdkmodule_android\n'), ((2070, 2126), 'sdkmodule_android.get_ignored_files', 'sdkmodule_android.get_ignored_files', (['args', 'template_path'], {}), '(args, template_path)\n', (2105, 2126), False, 'import sdkmodule_android\n')]
|
def process(self):
self.edit("LATIN")
self.replace("CAPITAL LETTER D WITH SMALL LETTER Z", "Dz")
self.replace("CAPITAL LETTER DZ", "DZ")
self.edit("AFRICAN", "african")
self.edit("WITH LONG RIGHT LEG", "long", "right", "leg")
self.edit('LETTER YR', "yr")
self.edit("CAPITAL LETTER O WITH MIDDLE TILDE", "Obar")
self.edit("CAPITAL LETTER SMALL Q WITH HOOK TAIL", "Qsmallhooktail")
self.edit("LETTER REVERSED ESH LOOP", "eshreversedloop")
self.edit("CAPITAL LETTER L WITH SMALL LETTER J", "Lj")
self.edit("CAPITAL LETTER N WITH SMALL LETTER J", "Nj")
self.edit("LETTER INVERTED GLOTTAL STOP WITH STROKE", "glottalinvertedstroke")
self.edit("LETTER TWO WITH STROKE", "twostroke")
self.edit("CAPITAL LETTER LJ", "LJ")
self.edit("CAPITAL LETTER NJ", "NJ")
self.edit("CAPITAL LETTER AE WITH", "AE")
self.edit("CAPITAL LETTER WYNN", "Wynn")
self.edit("LETTER WYNN", "wynn")
self.edit("WITH PALATAL", "palatal")
self.edit("DENTAL", "dental")
self.edit("LATERAL", "lateral")
self.edit("ALVEOLAR", "alveolar")
self.edit("RETROFLEX", "retroflex")
self.replace("LETTER CLICK", "click")
self.forceScriptPrefix("latin", "CAPITAL LETTER GAMMA", "Gamma")
self.forceScriptPrefix("latin", "CAPITAL LETTER IOTA", "Iota")
self.forceScriptPrefix("latin", "CAPITAL LETTER UPSILON", "Upsilon")
self.processAs("Helper Diacritics")
self.processAs("Helper Shapes")
self.handleCase()
self.compress()
if __name__ == "__main__":
from glyphNameFormatter.exporters import printRange
printRange("Latin Extended-B")
|
[
"glyphNameFormatter.exporters.printRange"
] |
[((1598, 1628), 'glyphNameFormatter.exporters.printRange', 'printRange', (['"""Latin Extended-B"""'], {}), "('Latin Extended-B')\n", (1608, 1628), False, 'from glyphNameFormatter.exporters import printRange\n')]
|
import os
from PlotterBokeh import PlotterBokeh
from BusInfo import BusInfo
def start_live_streaming(doc):
if doc is None:
raise NotImplementedError()
# Set the initial location as Yokohama station
lat=35.46591430126525
lng=139.62125644093177
apiKey = os.getenv('GMAP_TOKEN')
plotter = PlotterBokeh(lat, lng, apiKey, doc)
bus_list = BusInfo.update()
plotter.init_buslocation(bus_list)
plotter.loop()
|
[
"BusInfo.BusInfo.update",
"PlotterBokeh.PlotterBokeh",
"os.getenv"
] |
[((283, 306), 'os.getenv', 'os.getenv', (['"""GMAP_TOKEN"""'], {}), "('GMAP_TOKEN')\n", (292, 306), False, 'import os\n'), ((322, 357), 'PlotterBokeh.PlotterBokeh', 'PlotterBokeh', (['lat', 'lng', 'apiKey', 'doc'], {}), '(lat, lng, apiKey, doc)\n', (334, 357), False, 'from PlotterBokeh import PlotterBokeh\n'), ((373, 389), 'BusInfo.BusInfo.update', 'BusInfo.update', ([], {}), '()\n', (387, 389), False, 'from BusInfo import BusInfo\n')]
|
import torch as th
import torch.nn as nn
import torch.nn.functional as F
from typing import Dict, List, Optional, Tuple
from tpp.models.encoders.base.variable_history import VariableHistoryEncoder
from tpp.pytorch.models import MLP
from tpp.utils.events import Events
class RecurrentEncoder(VariableHistoryEncoder):
"""Abstract classes for recurrent encoders. The encoding has a
variable history size.
Args:
name: The name of the encoder class.
rnn: RNN encoder function.
units_mlp: List of hidden layers sizes for MLP.
activations: MLP activation functions. Either a list or a string.
emb_dim: Size of the embeddings. Defaults to 1.
embedding_constraint: Constraint on the weights. Either `None`,
'nonneg' or 'softplus'. Defaults to `None`.
temporal_scaling: Scaling parameter for temporal encoding
padding_id: Id of the padding. Defaults to -1.
encoding: Way to encode the events: either times_only, marks_only,
concatenate or temporal_encoding. Defaults to times_only
marks: The distinct number of marks (classes) for the process. Defaults
to 1.
"""
def __init__(
self,
name: str,
rnn: nn.Module,
# MLP args
units_mlp: List[int],
activation_mlp: Optional[str] = "relu",
dropout_mlp: Optional[float] = 0.,
constraint_mlp: Optional[str] = None,
activation_final_mlp: Optional[str] = None,
# Other args
emb_dim: Optional[int] = 1,
embedding_constraint: Optional[str] = None,
temporal_scaling: Optional[float] = 1.,
encoding: Optional[str] = "times_only",
time_encoding: Optional[str] = "relative",
marks: Optional[int] = 1,
**kwargs):
super(RecurrentEncoder, self).__init__(
name=name,
output_size=units_mlp[-1],
emb_dim=emb_dim,
embedding_constraint=embedding_constraint,
temporal_scaling=temporal_scaling,
encoding=encoding,
time_encoding=time_encoding,
marks=marks,
**kwargs)
self.rnn = rnn
self.mlp = MLP(
units=units_mlp,
activations=activation_mlp,
constraint=constraint_mlp,
dropout_rates=dropout_mlp,
input_shape=self.rnn.hidden_size,
activation_final=activation_final_mlp)
def forward(self, events: Events) -> Tuple[th.Tensor, th.Tensor, Dict]:
"""Compute the (query time independent) event representations.
Args:
events: [B,L] Times and labels of events.
Returns:
representations: [B,L+1,M+1] Representations of each event.
representations_mask: [B,L+1] Mask indicating which representations
are well-defined.
"""
histories, histories_mask = self.get_events_representations(
events=events) # [B,L+1,D] [B,L+1]
representations, _ = self.rnn(histories)
representations = F.normalize(representations, dim=-1, p=2)
representations = self.mlp(representations) # [B,L+1,M+1]
return (representations, histories_mask,
dict()) # [B,L+1,M+1], [B,L+1], Dict
|
[
"torch.nn.functional.normalize",
"tpp.pytorch.models.MLP"
] |
[((2288, 2471), 'tpp.pytorch.models.MLP', 'MLP', ([], {'units': 'units_mlp', 'activations': 'activation_mlp', 'constraint': 'constraint_mlp', 'dropout_rates': 'dropout_mlp', 'input_shape': 'self.rnn.hidden_size', 'activation_final': 'activation_final_mlp'}), '(units=units_mlp, activations=activation_mlp, constraint=constraint_mlp,\n dropout_rates=dropout_mlp, input_shape=self.rnn.hidden_size,\n activation_final=activation_final_mlp)\n', (2291, 2471), False, 'from tpp.pytorch.models import MLP\n'), ((3196, 3237), 'torch.nn.functional.normalize', 'F.normalize', (['representations'], {'dim': '(-1)', 'p': '(2)'}), '(representations, dim=-1, p=2)\n', (3207, 3237), True, 'import torch.nn.functional as F\n')]
|
import os
import glob
import numpy as np
from datetime import datetime
from scipy.io import loadmat
from PIL import Image
np.random.seed(42)
def calc_age(taken, dob):
birth = datetime.fromordinal(max(int(dob) - 366, 1))
# assume the photo was taken in the middle of the year
if birth.month < 7:
return taken - birth.year
else:
return taken - birth.year - 1
def get_meta(mat_path, db):
meta = loadmat(mat_path)
full_path = meta[db][0, 0]["full_path"][0]
dob = meta[db][0, 0]["dob"][0] # Matlab serial date number
gender = meta[db][0, 0]["gender"][0]
photo_taken = meta[db][0, 0]["photo_taken"][0] # year
face_score = meta[db][0, 0]["face_score"][0]
second_face_score = meta[db][0, 0]["second_face_score"][0]
age = [calc_age(photo_taken[i], dob[i]) for i in range(len(dob))]
return full_path, dob, gender, photo_taken, face_score, second_face_score, age
def load_data(data_dir, db='imdb', split=0.1):
out_paths = []
out_ages = []
out_genders = []
db_names = db.split(',')
# Load utkface if need.
if 'utk' in db_names:
utk_dir = os.path.join(data_dir, 'utkface-new')
utk_paths, utk_ages, utk_genders = load_utk(utk_dir)
out_paths += utk_paths
out_ages += utk_ages
out_genders += utk_genders
for d in db_names:
image_dir = os.path.join(data_dir, '{}_crop'.format(d))
mat_path = os.path.join(image_dir, '{}.mat'.format(d))
full_path, dob, gender, photo_taken, face_score, second_face_score, age = get_meta(mat_path, d)
sample_num = len(face_score)
min_score = 1.
for i in range(sample_num):
if face_score[i] < min_score:
continue
if (~np.isnan(second_face_score[i])) and second_face_score[i] > 0.0:
continue
if ~(0 <= age[i] <= 100):
continue
if np.isnan(gender[i]):
continue
out_genders.append(int(gender[i]))
out_ages.append(age[i])
out_paths.append(os.path.join(image_dir, str(full_path[i][0])))
indices = np.arange(len(out_paths))
np.random.shuffle(indices)
out_paths = list(np.asarray(out_paths)[indices])
out_ages = list(np.asarray(out_ages)[indices])
out_genders = list(np.asarray(out_genders)[indices])
num_train = int(len(out_paths) * (1 - split))
train_paths, train_ages, train_genders = out_paths[:num_train], out_ages[:num_train], out_genders[:num_train]
val_paths, val_ages, val_genders = out_paths[num_train:], out_ages[num_train:], out_genders[num_train:]
return (train_paths, train_ages, train_genders), (val_paths, val_ages, val_genders)
def load_utk(data_dir):
"""Load UTKFace dataset."""
out_paths = []
out_ages = []
out_genders = []
paths = glob.glob(os.path.join(data_dir, 'crop_part1', '*'))
for path in paths:
filename = os.path.basename(path)
out_paths.append(path)
age, gender = filename.split('_')[:2]
age = int(age)
gender = 1 if int(gender) == 0 else 0
out_ages.append(age)
out_genders.append(gender)
return out_paths, out_ages, out_genders
def load_appa(data_dir, ignore_list_filename=None):
"""Load APPA-real dataset."""
out_paths = []
out_ages = []
ignore_filenames = set()
if ignore_list_filename is not None:
ignore_list_path = os.path.join(data_dir, ignore_list_filename)
ignore_filenames = set(x.strip() for x in open(ignore_list_path))
data_file = os.path.join(data_dir, 'gt_avg_train.csv')
image_dir = os.path.join(data_dir, 'train')
with open(data_file) as f:
lines = [x.strip() for x in f]
for line in lines[1:]:
filename, _, _, _, age = line.strip().split(',')
if filename in ignore_filenames:
continue
image_path = os.path.join(image_dir, filename + '_face.jpg')
age = int(age)
out_paths.append(image_path)
out_ages.append(age)
return out_paths, out_ages
def load_aligned_data(data_dir, split=0.1):
out_paths = []
out_ages = []
out_genders = []
paths = glob.glob(os.path.join(data_dir, '*'))
for path in paths:
filename = os.path.basename(path)
age, gender = filename.split('_')[-2:]
gender = gender.split('.')[0]
age = int(age)
gender = int(gender)
out_paths.append(path)
out_ages.append(age)
out_genders.append(gender)
indices = np.arange(len(out_paths))
np.random.shuffle(indices)
out_paths = np.asarray(out_paths)[indices]
out_ages = np.asarray(out_ages)[indices]
out_genders = np.asarray(out_genders)[indices]
num_train = int(len(out_paths) * (1 - split))
train_paths, train_ages, train_genders = out_paths[:num_train], out_ages[:num_train], out_genders[:num_train]
val_paths, val_ages, val_genders = out_paths[num_train:], out_ages[num_train:], out_genders[num_train:]
return (train_paths, train_ages, train_genders), (val_paths, val_ages, val_genders)
|
[
"numpy.random.seed",
"os.path.basename",
"scipy.io.loadmat",
"numpy.asarray",
"numpy.isnan",
"os.path.join",
"numpy.random.shuffle"
] |
[((124, 142), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (138, 142), True, 'import numpy as np\n'), ((435, 452), 'scipy.io.loadmat', 'loadmat', (['mat_path'], {}), '(mat_path)\n', (442, 452), False, 'from scipy.io import loadmat\n'), ((2190, 2216), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (2207, 2216), True, 'import numpy as np\n'), ((3601, 3643), 'os.path.join', 'os.path.join', (['data_dir', '"""gt_avg_train.csv"""'], {}), "(data_dir, 'gt_avg_train.csv')\n", (3613, 3643), False, 'import os\n'), ((3660, 3691), 'os.path.join', 'os.path.join', (['data_dir', '"""train"""'], {}), "(data_dir, 'train')\n", (3672, 3691), False, 'import os\n'), ((4629, 4655), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (4646, 4655), True, 'import numpy as np\n'), ((1139, 1176), 'os.path.join', 'os.path.join', (['data_dir', '"""utkface-new"""'], {}), "(data_dir, 'utkface-new')\n", (1151, 1176), False, 'import os\n'), ((2879, 2920), 'os.path.join', 'os.path.join', (['data_dir', '"""crop_part1"""', '"""*"""'], {}), "(data_dir, 'crop_part1', '*')\n", (2891, 2920), False, 'import os\n'), ((2964, 2986), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (2980, 2986), False, 'import os\n'), ((3465, 3509), 'os.path.join', 'os.path.join', (['data_dir', 'ignore_list_filename'], {}), '(data_dir, ignore_list_filename)\n', (3477, 3509), False, 'import os\n'), ((4257, 4284), 'os.path.join', 'os.path.join', (['data_dir', '"""*"""'], {}), "(data_dir, '*')\n", (4269, 4284), False, 'import os\n'), ((4328, 4350), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (4344, 4350), False, 'import os\n'), ((4672, 4693), 'numpy.asarray', 'np.asarray', (['out_paths'], {}), '(out_paths)\n', (4682, 4693), True, 'import numpy as np\n'), ((4718, 4738), 'numpy.asarray', 'np.asarray', (['out_ages'], {}), '(out_ages)\n', (4728, 4738), True, 'import numpy as np\n'), ((4766, 4789), 'numpy.asarray', 'np.asarray', (['out_genders'], {}), '(out_genders)\n', (4776, 4789), True, 'import numpy as np\n'), ((1939, 1958), 'numpy.isnan', 'np.isnan', (['gender[i]'], {}), '(gender[i])\n', (1947, 1958), True, 'import numpy as np\n'), ((2238, 2259), 'numpy.asarray', 'np.asarray', (['out_paths'], {}), '(out_paths)\n', (2248, 2259), True, 'import numpy as np\n'), ((2290, 2310), 'numpy.asarray', 'np.asarray', (['out_ages'], {}), '(out_ages)\n', (2300, 2310), True, 'import numpy as np\n'), ((2344, 2367), 'numpy.asarray', 'np.asarray', (['out_genders'], {}), '(out_genders)\n', (2354, 2367), True, 'import numpy as np\n'), ((3949, 3996), 'os.path.join', 'os.path.join', (['image_dir', "(filename + '_face.jpg')"], {}), "(image_dir, filename + '_face.jpg')\n", (3961, 3996), False, 'import os\n'), ((1770, 1800), 'numpy.isnan', 'np.isnan', (['second_face_score[i]'], {}), '(second_face_score[i])\n', (1778, 1800), True, 'import numpy as np\n')]
|
from simulation.dm_control_cur.utility_classes.simulator import Simulation
class ResidualSimulation(Simulation):
def __init__(
self,
controller_load_model=True,
controller_num_episodes=50,
**kwargs
):
super().__init__(**kwargs)
self.controller = Simulation(
load_model=controller_load_model,
label='controller',
name_model=self.NAME_MODEL,
task=self.TASK,
num_episodes=controller_num_episodes,
batch_size=self.BATCH_SIZE,
duration=self.DURATION,
)
def train_controller(self):
self.controller.train()
def show_controller_simulation(self):
self.controller.show_simulation()
def modify_action(self, action, state, t):
return self.controller.get_action(state, t)
|
[
"simulation.dm_control_cur.utility_classes.simulator.Simulation"
] |
[((320, 527), 'simulation.dm_control_cur.utility_classes.simulator.Simulation', 'Simulation', ([], {'load_model': 'controller_load_model', 'label': '"""controller"""', 'name_model': 'self.NAME_MODEL', 'task': 'self.TASK', 'num_episodes': 'controller_num_episodes', 'batch_size': 'self.BATCH_SIZE', 'duration': 'self.DURATION'}), "(load_model=controller_load_model, label='controller', name_model\n =self.NAME_MODEL, task=self.TASK, num_episodes=controller_num_episodes,\n batch_size=self.BATCH_SIZE, duration=self.DURATION)\n", (330, 527), False, 'from simulation.dm_control_cur.utility_classes.simulator import Simulation\n')]
|
"""
Copyright 2020 <NAME>. See LICENSE for details.
"""
import logging
import time
import farc
from . import phy_sx127x
class PhySX127xAhsm(farc.Ahsm):
"""The physical layer (PHY) state machine for a Semtech SX127x device.
Automates the behavior of the Semtech SX127x family of radio transceivers.
For now, all behavior and operations are for LoRa mode.
"""
# Special time values to use when posting an action
TM_NOW = 0 # Use normally for "do it now"
TM_IMMEDIATE = -1 # Use sparingly to jump the queue
def __init__(self, lstn_by_dflt):
"""Class intialization
Listen by default means the radio enters
continuous-rx mode when it is not doing anything else.
If lstn_by_dflt is False, the radio enters sleep mode
when it is not doing anything else.
"""
super().__init__()
self.sx127x = phy_sx127x.PhySX127x()
self._lstn_by_dflt = lstn_by_dflt
self._dflt_stngs = ()
self._dflt_rx_stngs = ()
def get_stngs(self,):
"""Returns the current settings"""
return self._dflt_stngs
def post_rx_action(self, rx_time, rx_stngs, rx_durxn, rx_clbk):
"""Posts the _PHY_RQST event to this state machine
with the container-ized arguments as the value.
"""
assert not self._lstn_by_dflt, \
"""post_rx_action() should not be used when the PHY is
listen-by-default. Use set_dflt_rx_clbk() once, instead."""
# Convert NOW to an actual time
if rx_time == PhySX127xAhsm.TM_NOW:
rx_time = farc.Framework._event_loop.time()
# The order MUST begin: (action, stngs, ...)
rx_action = ("rx", rx_stngs, rx_durxn, rx_clbk)
self.post_fifo(farc.Event(farc.Signal._PHY_RQST, (rx_time, rx_action)))
def post_tx_action(self, tx_time, tx_stngs, tx_bytes):
"""Posts the _PHY_RQST event to this state machine
with the container-ized arguments as the value.
"""
assert type(tx_bytes) is bytes
# Convert NOW to an actual time
if tx_time == PhySX127xAhsm.TM_NOW:
tx_time = farc.Framework._event_loop.time()
# The order MUST begin: (action, stngs, ...)
tx_action = ("tx", tx_stngs, tx_bytes)
self.post_fifo(farc.Event(farc.Signal._PHY_RQST, (tx_time, tx_action)))
def set_dflt_rx_clbk(self, rx_clbk):
"""Stores the default RX callback for the PHY.
The default RX callback is used when this state machine is
initialized with listen-by-default set to True.
This state machine calls the default RX callback
when a frame is received and there are no reception errors.
"""
assert self._lstn_by_dflt, \
"""set_dflt_rx_clbk() should not be used when the PHY is
sleep-by-default. Pass a callback in post_rx_action() instead.
"""
self._dflt_rx_clbk = rx_clbk
def set_dflt_stngs(self, dflt_stngs):
"""Stores the default settings for the PHY.
This must be called before start() so they
can be written to the device during initilizing.
"""
self._dflt_stngs = dflt_stngs
def start_stack(self, ahsm_prio):
"""PHY is the bottom of the protocol stack, so just start this Ahsm"""
self.start(ahsm_prio)
# State machine
@farc.Hsm.state
def _initial(self, event):
"""Pseudostate: _initial
State machine framework initialization
"""
# Self-signaling
farc.Signal.register("_ALWAYS")
farc.Signal.register("_PHY_RQST")
# DIO Signal table (DO NOT CHANGE ORDER)
# This table is dual maintenance with phy_sx127x.PhySX127x.DIO_*
self._dio_sig_lut = (
farc.Signal.register("_DIO_MODE_RDY"),
farc.Signal.register("_DIO_CAD_DETECTED"),
farc.Signal.register("_DIO_CAD_DONE"),
farc.Signal.register("_DIO_FHSS_CHG_CHNL"),
farc.Signal.register("_DIO_RX_TMOUT"),
farc.Signal.register("_DIO_RX_DONE"),
farc.Signal.register("_DIO_CLK_OUT"),
farc.Signal.register("_DIO_PLL_LOCK"),
farc.Signal.register("_DIO_VALID_HDR"),
farc.Signal.register("_DIO_TX_DONE"),
farc.Signal.register("_DIO_PAYLD_CRC_ERR"),
)
# Self-signaling events
self._evt_always = farc.Event(farc.Signal._ALWAYS, None)
# Time events
self.tmout_evt = farc.TimeEvent("_PHY_TMOUT")
self.prdc_evt = farc.TimeEvent("_PHY_PRDC")
return self.tran(self._initializing)
@farc.Hsm.state
def _initializing(self, event):
""""State: _initializing
Application initialization.
Opens and verifies the SPI driver.
Sets default application values.
Transitions to the _scheduling state if the SPI Comms
and SX127x are good; otherwise, remains in this state
and periodically retries opening the SX127x.
"""
sig = event.signal
if sig == farc.Signal.ENTRY:
logging.debug("PHY._initializing")
self.tmout_evt.post_in(self, 0.0)
# Init data
# We use two queues for a hybrid time-sorted queue:
# One for frames that sort by time.
# It's actually a dict object where the keys are the time value.
self._tm_queue = {}
# Another for frames that need to be sent immediately.
# This should be used sparingly.
self._im_queue = []
return self.handled(event)
elif sig == farc.Signal._PHY_TMOUT:
if self.sx127x.open(self._dio_isr_clbk):
assert len(self._dflt_stngs) > 0, \
"Default settings must be set before initializing"
self.sx127x.set_flds(self._dflt_stngs)
self.sx127x.write_stngs(False)
return self.tran(self._scheduling)
logging.warning("_initializing: no SX127x or SPI")
self.tmout_evt.post_in(self, 1.0)
return self.handled(event)
elif sig == farc.Signal.EXIT:
self.tmout_evt.disarm()
return self.handled(event)
return self.super(self.top)
@farc.Hsm.state
def _scheduling(self, event):
""""State: _scheduling
Writes any outstanding settings and always
transitions to _txing, _sleeping or _listening
"""
sig = event.signal
if sig == farc.Signal.ENTRY:
logging.debug("PHY._scheduling")
# TODO: remove unecessary read once sm design is proven
assert self.sx127x.OPMODE_STBY == self.sx127x.read_opmode()
self.post_fifo(farc.Event(farc.Signal._ALWAYS, None))
return self.handled(event)
elif sig == farc.Signal._ALWAYS:
# If the next action is soon, go to its state
next_action = self._top_soon_action()
self._default_action = not bool(next_action)
if next_action:
_, action = next_action
if action[0] == "rx":
st = self._listening
elif action[0] == "tx":
st = self._txing
else:
# Placeholder for CAD, sleep
assert True, "Got here by accident"
# Otherwise, go to the default
elif self._lstn_by_dflt:
st = self._listening
else:
st = self._sleeping
return self.tran(st)
elif sig == farc.Signal._PHY_RQST:
tm, action = event.value
self._enqueue_action(tm, action)
return self.handled(event)
return self.super(self.top)
@farc.Hsm.state
def _lingering(self, event):
""""State: _scheduling
This state is for shared behavior
between the _listening and _sleeping states.
On entry, optionally starts a timer for when
to exit to go handle the next action.
"""
sig = event.signal
if sig == farc.Signal.ENTRY:
logging.debug("PHY._lingering")
return self.handled(event)
elif sig == farc.Signal._PHY_RQST:
tm, action = event.value
self._enqueue_action(tm, action)
# If lingering because of default action
# transition to scheduling
if self._default_action:
return self.tran(self._scheduling)
# If lingering because of intentional action
# remain in current state
return self.handled(event)
elif sig == farc.Signal._PHY_TMOUT:
return self.tran(self._scheduling)
elif sig == farc.Signal.EXIT:
self.tmout_evt.disarm()
# Changing modes from rx or sleep to STBY is
# "near instantaneous" per SX127x datasheet
# so don't bother awaiting a _DIO_MODE_RDY
self.sx127x.write_opmode(self.sx127x.OPMODE_STBY, False)
return self.handled(event)
return self.super(self.top)
@farc.Hsm.state
def _listening(self, event):
""""State: _lingering:_listening
Puts the device into receive mode
either because of a receive action or listen-by-default.
Transitions to _rxing if a valid header is received.
"""
sig = event.signal
if sig == farc.Signal.ENTRY:
logging.debug("PHY._lingering._listening")
action = self._pop_soon_action()
if action:
rx_time, rx_action = action
(action_str, rx_stngs, rx_durxn, rx_clbk) = rx_action
assert action_str == "rx"
self._rx_clbk = rx_clbk
else:
rx_stngs = self._dflt_rx_stngs
self._rx_clbk = self._dflt_rx_clbk
# Convert given settings to a mutable list
if rx_stngs:
stngs = list(rx_stngs)
else:
# Accept "None" as an argument for rx_stngs
stngs = []
# Combine and write RX settings
stngs.extend((("FLD_RDO_DIO0", 0), # _DIO_RX_DONE
("FLD_RDO_DIO1", 0), # _DIO_RX_TMOUT
("FLD_RDO_DIO3", 1))) # _DIO_VALID_HDR
self.sx127x.set_flds(stngs)
self.sx127x.write_stngs(True)
# Prep interrupts for RX
self.sx127x.write_lora_irq_mask(
self.sx127x.IRQ_FLAGS_ALL,
self.sx127x.IRQ_FLAGS_RXDONE |
self.sx127x.IRQ_FLAGS_PAYLDCRCERROR |
self.sx127x.IRQ_FLAGS_VALIDHEADER
)
self.sx127x.write_lora_irq_flags(
self.sx127x.IRQ_FLAGS_RXDONE |
self.sx127x.IRQ_FLAGS_PAYLDCRCERROR |
self.sx127x.IRQ_FLAGS_VALIDHEADER
)
self.sx127x.write_fifo_ptr(0x00)
# Start periodic event for update_rng()
self.prdc_evt.post_every(self, 0.100) # 100ms
# No action means listen-by-default; receive-continuosly
if not action:
self.sx127x.write_opmode(self.sx127x.OPMODE_RXCONT, False)
# An explicit action means do a receive-once
else:
# Perform a short blocking sleep until rx_time
# to obtain more accurate rx execution time on Linux.
now = farc.Framework._event_loop.time()
tiny_sleep = rx_time - now
assert tiny_sleep > 0.0, \
"didn't beat action time, need to increase _TM_SVC_MARGIN"
if tiny_sleep > PhySX127xAhsm._TM_BLOCKING_MAX:
tiny_sleep = PhySX127xAhsm._TM_BLOCKING_MAX
if tiny_sleep > PhySX127xAhsm._TM_BLOCKING_MIN:
time.sleep(tiny_sleep)
self.sx127x.write_opmode(self.sx127x.OPMODE_RXONCE, False)
# Start the rx duration timer
if rx_durxn > 0:
self.tmout_evt.post_in(self, rx_durxn)
return self.handled(event)
elif sig == farc.Signal._PHY_PRDC:
self.sx127x.updt_rng()
return self.handled(event)
elif sig == farc.Signal._DIO_VALID_HDR:
self._rxd_hdr_time = event.value
return self.tran(self._rxing)
elif sig == farc.Signal._DIO_PAYLD_CRC_ERR:
logging.info("PHY:_listening@_DIO_PAYLD_CRC_ERR")
# TODO: incr phy_data stats crc err cnt
return self.tran(self._scheduling)
elif sig == farc.Signal._DIO_RX_TMOUT:
logging.info("PHY:_listening@_DIO_RX_TMOUT")
# TODO: incr phy_data stats rx tmout
return self.tran(self._scheduling)
elif sig == farc.Signal.EXIT:
self.prdc_evt.disarm()
return self.handled(event)
return self.super(self._lingering)
@farc.Hsm.state
def _rxing(self, event):
""""State: _lingering:_listening:_rxing
Continues a reception in progress.
Protects reception by NOT transitioning upon a _PHY_RQST event.
Transitions to _scheduling after reception ends.
"""
sig = event.signal
if sig == farc.Signal.ENTRY:
logging.debug("PHY._rxing")
return self.handled(event)
elif sig == farc.Signal._DIO_RX_DONE:
self._on_lora_rx_done()
return self.tran(self._scheduling)
elif sig == farc.Signal._PHY_RQST:
# Overrides _lingering's _PHY_RQST handler because we want to
# remain in this state even if we were listening-by-default
tm, action = event.value
self._enqueue_action(tm, action)
return self.handled(event)
return self.super(self._listening)
@farc.Hsm.state
def _sleeping(self, event):
""""State: _lingering:_sleeping
Puts the device into sleep mode.
Timer and timeout handling is performed
by the parent state, _lingering()
"""
sig = event.signal
if sig == farc.Signal.ENTRY:
logging.debug("PHY._lingering._sleeping")
self.sx127x.write_opmode(self.sx127x.OPMODE_SLEEP, False)
return self.handled(event)
return self.super(self._lingering)
@farc.Hsm.state
def _txing(self, event):
""""State: _txing
Prepares for transmission, transmits,
awaits DIO_TX_DONE event from radio,
then transitions to the _scheduling state.
"""
sig = event.signal
if sig == farc.Signal.ENTRY:
logging.debug("PHY._txing")
action = self._pop_soon_action()
assert action is not None, "Mutation between top() and pop()"
(tx_time, tx_action) = action
assert tx_action[0] == "tx", "Mutation between top() and pop()"
(_, tx_stngs, tx_bytes) = tx_action
# Convert given settings to a mutable list
if tx_stngs:
stngs = list(tx_stngs)
else:
# Accept "None" as an argument for tx_stngs
stngs = []
# Write TX settings from higher layer and
# one setting needed for this PHY operation
stngs.append(("FLD_RDO_DIO0", 1)) # _DIO_TX_DONE
self.sx127x.set_flds(stngs)
self.sx127x.write_stngs(False)
# Prep interrupts for TX
self.sx127x.write_lora_irq_mask(
self.sx127x.IRQ_FLAGS_ALL, # disable these
self.sx127x.IRQ_FLAGS_TXDONE # enable these
)
# Write payload into radio's FIFO
self.sx127x.write_fifo_ptr(0x00)
self.sx127x.write_fifo(tx_bytes)
self.sx127x.write_lora_payld_len(len(tx_bytes))
# Blocking sleep until tx_time (assuming a short amount)
now = farc.Framework._event_loop.time()
tiny_sleep = tx_time - now
if tiny_sleep > PhySX127xAhsm._TM_BLOCKING_MAX:
tiny_sleep = PhySX127xAhsm._TM_BLOCKING_MAX
if tiny_sleep > 0.001:
time.sleep(tiny_sleep)
# Start software timer for backstop
self.tmout_evt.post_in(self, 1.0) # TODO: calc soft timeout delta
# Start transmission and await DIO_TX_DONE
self.sx127x.write_opmode(self.sx127x.OPMODE_TX, False)
return self.handled(event)
elif sig == farc.Signal._DIO_TX_DONE:
# TODO: phy stats TX_DONE
return self.tran(self._scheduling)
elif sig == farc.Signal._PHY_RQST:
tm, action = event.value
self._enqueue_action(tm, action)
return self.handled(event)
elif sig == farc.Signal._PHY_TMOUT:
logging.warning("PHY._txing@_PHY_TMOUT")
if self.sx127x.in_sim_mode():
# Sim-radio will never emit DIO events
# so go straight to _scheduling
return self.tran(self._scheduling)
else:
# SX127x takes time to change modes from TX to STBY.
# Use DIO5/ModeReady here so we don't transition
# to _scheduling and try to do stuff before the
# chip is in STBY mode. Await _DIO_MODE_RDY.
self.sx127x.write_opmode(self.sx127x.OPMODE_STBY, True)
return self.handled(event)
elif sig == farc.Signal._DIO_MODE_RDY:
return self.tran(self._scheduling)
elif sig == farc.Signal.EXIT:
self.tmout_evt.disarm()
return self.handled(event)
return self.super(self.top)
# Private
# The margin within which the Ahsm will transition to
# the action's state if there is an entry in the action queue;
# otherwise, transitions to the default state, listening or sleeping.
_TM_SOON = 0.040
# The amount of time it takes to get from the _lingering state
# through _scheduling and to the next action's state.
# This is used so we can set a timer to exit _lingering
# and make it to the deisred state before the designated time.
_TM_SVC_MARGIN = 0.020
# assert _TM_SVC_MARGIN < _TM_SOON
# Blocking times are used around the time.sleep() operation
# to obtain more accurate tx/rx execution times on Linux.
_TM_BLOCKING_MAX = 0.100
_TM_BLOCKING_MIN = 0.001
def _dio_isr_clbk(self, dio):
"""A callback given to the PHY for when a DIO pin event occurs.
The Rpi.GPIO's thread calls this procedure (like an interrupt).
This procedure posts an Event to this state machine
corresponding to the DIO pin that transitioned.
The pin edge's arrival time is the value of the Event.
"""
now = farc.Framework._event_loop.time()
self.post_fifo(farc.Event(self._dio_sig_lut[dio], now))
def _enqueue_action(self, tm, action_args):
"""Enqueues the action at the given time"""
IOTA = 0.000_000_1 # a small amount of time
# IMMEDIATELY means this frame jumps to the front of the line
# put it in the immediate queue (which is serviced before the tm_queue)
if tm == PhySX127xAhsm.TM_IMMEDIATE:
self._im_queue.append(action_args)
else:
# Ensure this tx time doesn't overwrite an existing one
# by adding an iota of time if there is a duplicate.
# This results in FIFO for frames scheduled at the same time.
tm_orig = tm
while tm in self._tm_queue:
tm += IOTA
# Protect against infinite while-loop
if tm == tm_orig:
IOTA *= 10.0
self._tm_queue[tm] = action_args
def _on_lora_rx_done(self,):
"""Reads received bytes and meta data from the radio.
Checks and logs any errors.
Passes the rx_data to the next layer higher via callback.
"""
frame_bytes, rssi, snr, flags = self.sx127x.read_lora_rxd()
if flags == 0:
# TODO: incr phy_data stats rx done
self._rx_clbk(self._rxd_hdr_time, frame_bytes, rssi, snr)
elif flags & self.sx127x.IRQ_FLAGS_RXTIMEOUT:
logging.info("PHY._rxing@RXTMOUT")
# TODO: incr phy_data stats rx tmout
elif flags & self.sx127x.IRQ_FLAGS_PAYLDCRCERROR:
logging.info("PHY._rxing@CRCERR")
# TODO: incr phy_data stats rx payld crc err
def _pop_soon_action(self,):
"""Returns the next (time, action) pair from the queue and removes it.
Returns None if the queue is empty.
"""
if self._im_queue:
tm = farc.Framework._event_loop.time()
action = self._im_queue.pop()
return (tm, action)
elif self._tm_queue:
tm = min(self._tm_queue.keys())
now = farc.Framework._event_loop.time()
if tm < now + PhySX127xAhsm._TM_SOON:
action = self._tm_queue[tm]
del self._tm_queue[tm]
return (tm, action)
return None
def _top_soon_action(self,):
"""Returns the next (time, action) pair from the queue without removal.
Returns None if the queue is empty.
"""
if self._im_queue:
tm = PhySX127xAhsm.TM_IMMEDIATE
action = self._im_queue[-1]
return (tm, action)
elif self._tm_queue:
tm = min(self._tm_queue.keys())
now = farc.Framework._event_loop.time()
if tm < now + PhySX127xAhsm._TM_SOON:
action = self._tm_queue[tm]
return (tm, action)
return None
|
[
"farc.Signal.register",
"farc.Framework._event_loop.time",
"logging.debug",
"logging.warning",
"farc.Event",
"time.sleep",
"logging.info",
"farc.TimeEvent"
] |
[((3581, 3612), 'farc.Signal.register', 'farc.Signal.register', (['"""_ALWAYS"""'], {}), "('_ALWAYS')\n", (3601, 3612), False, 'import farc\n'), ((3621, 3654), 'farc.Signal.register', 'farc.Signal.register', (['"""_PHY_RQST"""'], {}), "('_PHY_RQST')\n", (3641, 3654), False, 'import farc\n'), ((4451, 4488), 'farc.Event', 'farc.Event', (['farc.Signal._ALWAYS', 'None'], {}), '(farc.Signal._ALWAYS, None)\n', (4461, 4488), False, 'import farc\n'), ((4537, 4565), 'farc.TimeEvent', 'farc.TimeEvent', (['"""_PHY_TMOUT"""'], {}), "('_PHY_TMOUT')\n", (4551, 4565), False, 'import farc\n'), ((4590, 4617), 'farc.TimeEvent', 'farc.TimeEvent', (['"""_PHY_PRDC"""'], {}), "('_PHY_PRDC')\n", (4604, 4617), False, 'import farc\n'), ((19060, 19093), 'farc.Framework._event_loop.time', 'farc.Framework._event_loop.time', ([], {}), '()\n', (19091, 19093), False, 'import farc\n'), ((1616, 1649), 'farc.Framework._event_loop.time', 'farc.Framework._event_loop.time', ([], {}), '()\n', (1647, 1649), False, 'import farc\n'), ((1782, 1837), 'farc.Event', 'farc.Event', (['farc.Signal._PHY_RQST', '(rx_time, rx_action)'], {}), '(farc.Signal._PHY_RQST, (rx_time, rx_action))\n', (1792, 1837), False, 'import farc\n'), ((2172, 2205), 'farc.Framework._event_loop.time', 'farc.Framework._event_loop.time', ([], {}), '()\n', (2203, 2205), False, 'import farc\n'), ((2329, 2384), 'farc.Event', 'farc.Event', (['farc.Signal._PHY_RQST', '(tx_time, tx_action)'], {}), '(farc.Signal._PHY_RQST, (tx_time, tx_action))\n', (2339, 2384), False, 'import farc\n'), ((3820, 3857), 'farc.Signal.register', 'farc.Signal.register', (['"""_DIO_MODE_RDY"""'], {}), "('_DIO_MODE_RDY')\n", (3840, 3857), False, 'import farc\n'), ((3871, 3912), 'farc.Signal.register', 'farc.Signal.register', (['"""_DIO_CAD_DETECTED"""'], {}), "('_DIO_CAD_DETECTED')\n", (3891, 3912), False, 'import farc\n'), ((3926, 3963), 'farc.Signal.register', 'farc.Signal.register', (['"""_DIO_CAD_DONE"""'], {}), "('_DIO_CAD_DONE')\n", (3946, 3963), False, 'import farc\n'), ((3977, 4019), 'farc.Signal.register', 'farc.Signal.register', (['"""_DIO_FHSS_CHG_CHNL"""'], {}), "('_DIO_FHSS_CHG_CHNL')\n", (3997, 4019), False, 'import farc\n'), ((4033, 4070), 'farc.Signal.register', 'farc.Signal.register', (['"""_DIO_RX_TMOUT"""'], {}), "('_DIO_RX_TMOUT')\n", (4053, 4070), False, 'import farc\n'), ((4084, 4120), 'farc.Signal.register', 'farc.Signal.register', (['"""_DIO_RX_DONE"""'], {}), "('_DIO_RX_DONE')\n", (4104, 4120), False, 'import farc\n'), ((4134, 4170), 'farc.Signal.register', 'farc.Signal.register', (['"""_DIO_CLK_OUT"""'], {}), "('_DIO_CLK_OUT')\n", (4154, 4170), False, 'import farc\n'), ((4184, 4221), 'farc.Signal.register', 'farc.Signal.register', (['"""_DIO_PLL_LOCK"""'], {}), "('_DIO_PLL_LOCK')\n", (4204, 4221), False, 'import farc\n'), ((4235, 4273), 'farc.Signal.register', 'farc.Signal.register', (['"""_DIO_VALID_HDR"""'], {}), "('_DIO_VALID_HDR')\n", (4255, 4273), False, 'import farc\n'), ((4287, 4323), 'farc.Signal.register', 'farc.Signal.register', (['"""_DIO_TX_DONE"""'], {}), "('_DIO_TX_DONE')\n", (4307, 4323), False, 'import farc\n'), ((4337, 4379), 'farc.Signal.register', 'farc.Signal.register', (['"""_DIO_PAYLD_CRC_ERR"""'], {}), "('_DIO_PAYLD_CRC_ERR')\n", (4357, 4379), False, 'import farc\n'), ((5141, 5175), 'logging.debug', 'logging.debug', (['"""PHY._initializing"""'], {}), "('PHY._initializing')\n", (5154, 5175), False, 'import logging\n'), ((6607, 6639), 'logging.debug', 'logging.debug', (['"""PHY._scheduling"""'], {}), "('PHY._scheduling')\n", (6620, 6639), False, 'import logging\n'), ((8220, 8251), 'logging.debug', 'logging.debug', (['"""PHY._lingering"""'], {}), "('PHY._lingering')\n", (8233, 8251), False, 'import logging\n'), ((9564, 9606), 'logging.debug', 'logging.debug', (['"""PHY._lingering._listening"""'], {}), "('PHY._lingering._listening')\n", (9577, 9606), False, 'import logging\n'), ((13477, 13504), 'logging.debug', 'logging.debug', (['"""PHY._rxing"""'], {}), "('PHY._rxing')\n", (13490, 13504), False, 'import logging\n'), ((14343, 14384), 'logging.debug', 'logging.debug', (['"""PHY._lingering._sleeping"""'], {}), "('PHY._lingering._sleeping')\n", (14356, 14384), False, 'import logging\n'), ((14846, 14873), 'logging.debug', 'logging.debug', (['"""PHY._txing"""'], {}), "('PHY._txing')\n", (14859, 14873), False, 'import logging\n'), ((16148, 16181), 'farc.Framework._event_loop.time', 'farc.Framework._event_loop.time', ([], {}), '()\n', (16179, 16181), False, 'import farc\n'), ((19117, 19156), 'farc.Event', 'farc.Event', (['self._dio_sig_lut[dio]', 'now'], {}), '(self._dio_sig_lut[dio], now)\n', (19127, 19156), False, 'import farc\n'), ((20984, 21017), 'farc.Framework._event_loop.time', 'farc.Framework._event_loop.time', ([], {}), '()\n', (21015, 21017), False, 'import farc\n'), ((6038, 6088), 'logging.warning', 'logging.warning', (['"""_initializing: no SX127x or SPI"""'], {}), "('_initializing: no SX127x or SPI')\n", (6053, 6088), False, 'import logging\n'), ((6807, 6844), 'farc.Event', 'farc.Event', (['farc.Signal._ALWAYS', 'None'], {}), '(farc.Signal._ALWAYS, None)\n', (6817, 6844), False, 'import farc\n'), ((11605, 11638), 'farc.Framework._event_loop.time', 'farc.Framework._event_loop.time', ([], {}), '()\n', (11636, 11638), False, 'import farc\n'), ((16392, 16414), 'time.sleep', 'time.sleep', (['tiny_sleep'], {}), '(tiny_sleep)\n', (16402, 16414), False, 'import time\n'), ((20523, 20557), 'logging.info', 'logging.info', (['"""PHY._rxing@RXTMOUT"""'], {}), "('PHY._rxing@RXTMOUT')\n", (20535, 20557), False, 'import logging\n'), ((21184, 21217), 'farc.Framework._event_loop.time', 'farc.Framework._event_loop.time', ([], {}), '()\n', (21215, 21217), False, 'import farc\n'), ((21814, 21847), 'farc.Framework._event_loop.time', 'farc.Framework._event_loop.time', ([], {}), '()\n', (21845, 21847), False, 'import farc\n'), ((12016, 12038), 'time.sleep', 'time.sleep', (['tiny_sleep'], {}), '(tiny_sleep)\n', (12026, 12038), False, 'import time\n'), ((20678, 20711), 'logging.info', 'logging.info', (['"""PHY._rxing@CRCERR"""'], {}), "('PHY._rxing@CRCERR')\n", (20690, 20711), False, 'import logging\n'), ((12610, 12659), 'logging.info', 'logging.info', (['"""PHY:_listening@_DIO_PAYLD_CRC_ERR"""'], {}), "('PHY:_listening@_DIO_PAYLD_CRC_ERR')\n", (12622, 12659), False, 'import logging\n'), ((17060, 17100), 'logging.warning', 'logging.warning', (['"""PHY._txing@_PHY_TMOUT"""'], {}), "('PHY._txing@_PHY_TMOUT')\n", (17075, 17100), False, 'import logging\n'), ((12819, 12863), 'logging.info', 'logging.info', (['"""PHY:_listening@_DIO_RX_TMOUT"""'], {}), "('PHY:_listening@_DIO_RX_TMOUT')\n", (12831, 12863), False, 'import logging\n')]
|
from common.http_response import json_response_builder as response
from common.jwt import get_user_id as get_id_from_request
from common.jwt import auth_require
from common.project_const import const
from icde.capture import icde_capture
from . import access as icde_access
@auth_require
@icde_capture(const.PAPER_SHARE)
def share_paper(request):
return response(0)
@icde_capture(const.PAPER_SEARCH)
def search_paper(request):
return response(0)
@icde_capture(const.PAPER_ORIGIN_CLICK)
def go_paper_origin(request):
return response(0)
@icde_capture(const.PAPER_DETAIL_CLICK)
def go_paper_detail_page(request):
return response(0)
def get_paper_share_count(request):
getParams = request.GET.dict()
paper_id = getParams.get('paper_id')
return response(0, body=icde_access.access_paper_share_count(paper_id))
@auth_require
def get_paper_team_share_records(request):
getParams = request.GET.dict()
user_id = get_id_from_request(request)
paper_id = getParams.get('paper_id')
return response(0, body=icde_access.access_paper_team_share_records(user_id, paper_id))
@auth_require
def get_user_activities(request):
user_id = get_id_from_request(request)
return response(0, body=icde_access.access_user_activities(user_id))
@auth_require
def get_team_member_activities(request):
getParams = request.GET.dict()
team_id = getParams.get('team_id')
return response(0, body=icde_access.access_team_member_activities(team_id))
def get_all_trending_list(request):
searh_term_trending = icde_access.access_search_term_trending_list()
click_rate_trending = icde_access.access_click_rate_trending_list()
like_trending = icde_access.access_like_trending_list()
dislike_trending = icde_access.access_dislike_trending_list()
share_trending = icde_access.access_share_trending_list()
return response(0, body={
'tranding_list': [
searh_term_trending,
click_rate_trending,
like_trending,
dislike_trending,
share_trending
]
})
|
[
"common.jwt.get_user_id",
"icde.capture.icde_capture",
"common.http_response.json_response_builder"
] |
[((293, 324), 'icde.capture.icde_capture', 'icde_capture', (['const.PAPER_SHARE'], {}), '(const.PAPER_SHARE)\n', (305, 324), False, 'from icde.capture import icde_capture\n'), ((376, 408), 'icde.capture.icde_capture', 'icde_capture', (['const.PAPER_SEARCH'], {}), '(const.PAPER_SEARCH)\n', (388, 408), False, 'from icde.capture import icde_capture\n'), ((461, 499), 'icde.capture.icde_capture', 'icde_capture', (['const.PAPER_ORIGIN_CLICK'], {}), '(const.PAPER_ORIGIN_CLICK)\n', (473, 499), False, 'from icde.capture import icde_capture\n'), ((555, 593), 'icde.capture.icde_capture', 'icde_capture', (['const.PAPER_DETAIL_CLICK'], {}), '(const.PAPER_DETAIL_CLICK)\n', (567, 593), False, 'from icde.capture import icde_capture\n'), ((362, 373), 'common.http_response.json_response_builder', 'response', (['(0)'], {}), '(0)\n', (370, 373), True, 'from common.http_response import json_response_builder as response\n'), ((447, 458), 'common.http_response.json_response_builder', 'response', (['(0)'], {}), '(0)\n', (455, 458), True, 'from common.http_response import json_response_builder as response\n'), ((541, 552), 'common.http_response.json_response_builder', 'response', (['(0)'], {}), '(0)\n', (549, 552), True, 'from common.http_response import json_response_builder as response\n'), ((640, 651), 'common.http_response.json_response_builder', 'response', (['(0)'], {}), '(0)\n', (648, 651), True, 'from common.http_response import json_response_builder as response\n'), ((949, 977), 'common.jwt.get_user_id', 'get_id_from_request', (['request'], {}), '(request)\n', (968, 977), True, 'from common.jwt import get_user_id as get_id_from_request\n'), ((1175, 1203), 'common.jwt.get_user_id', 'get_id_from_request', (['request'], {}), '(request)\n', (1194, 1203), True, 'from common.jwt import get_user_id as get_id_from_request\n'), ((1875, 2007), 'common.http_response.json_response_builder', 'response', (['(0)'], {'body': "{'tranding_list': [searh_term_trending, click_rate_trending, like_trending,\n dislike_trending, share_trending]}"}), "(0, body={'tranding_list': [searh_term_trending,\n click_rate_trending, like_trending, dislike_trending, share_trending]})\n", (1883, 2007), True, 'from common.http_response import json_response_builder as response\n')]
|
from trial_of_the_stones.models.page_model import PageModel
import selenium
import unittest
import time
def trial_of_the_stones_automation():
'''
Source web page: https://techstepacademy.com/trial-of-the-stones
:return:
'''
# open web page
page = PageModel(selenium.webdriver.Chrome())
page.open_page()
password = solve_riddle_of_stone(page)
solve_riddle_of_secrets(password, page)
solve_the_two_merchants(page)
final_check(page)
# close web page
time.sleep(2)
page.close()
def solve_riddle_of_stone(page):
# type answer and click on answer button
page.riddle_of_stone_field.send_keys('rock')
unittest.TestCase().assertFalse(page.password.is_displayed())
page.riddle_of_stone_button.click()
# verify is password displayed
unittest.TestCase().assertTrue(page.password.is_displayed())
password = page.password.text
unittest.TestCase().assertEqual('<PASSWORD>', password)
return password
def solve_riddle_of_secrets(password, page):
# type password and click on Answer button
page.password_field.send_keys(password)
unittest.TestCase().assertFalse(page.password_success.is_displayed())
page.password_answer_button.click()
unittest.TestCase().assertEqual('Success!', page.password_success.text)
unittest.TestCase().assertTrue(page.password_success.is_displayed())
def solve_the_two_merchants(page):
# compare wealth and type the richest merchant name
page.richest_merchant_field.send_keys(page.bernard_name
if int(page.bernard_wealth) > int(page.jessica_wealth)
else page.jessica_name)
unittest.TestCase().assertFalse(page.merchant_success.is_displayed())
page.richest_merchant_button.click()
unittest.TestCase().assertTrue(page.merchant_success.is_displayed())
unittest.TestCase().assertEqual('Success!', page.merchant_success.text)
def final_check(page):
# final check
unittest.TestCase().assertFalse(page.trial_complete.is_displayed())
page.check_answers_button.click()
unittest.TestCase().assertTrue(page.trial_complete.is_displayed())
unittest.TestCase().assertEqual('Trial Complete', page.trial_complete.text)
if __name__ == '__main__':
trial_of_the_stones_automation()
|
[
"unittest.TestCase",
"selenium.webdriver.Chrome",
"time.sleep"
] |
[((504, 517), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (514, 517), False, 'import time\n'), ((284, 311), 'selenium.webdriver.Chrome', 'selenium.webdriver.Chrome', ([], {}), '()\n', (309, 311), False, 'import selenium\n'), ((669, 688), 'unittest.TestCase', 'unittest.TestCase', ([], {}), '()\n', (686, 688), False, 'import unittest\n'), ((811, 830), 'unittest.TestCase', 'unittest.TestCase', ([], {}), '()\n', (828, 830), False, 'import unittest\n'), ((910, 929), 'unittest.TestCase', 'unittest.TestCase', ([], {}), '()\n', (927, 929), False, 'import unittest\n'), ((1130, 1149), 'unittest.TestCase', 'unittest.TestCase', ([], {}), '()\n', (1147, 1149), False, 'import unittest\n'), ((1244, 1263), 'unittest.TestCase', 'unittest.TestCase', ([], {}), '()\n', (1261, 1263), False, 'import unittest\n'), ((1320, 1339), 'unittest.TestCase', 'unittest.TestCase', ([], {}), '()\n', (1337, 1339), False, 'import unittest\n'), ((1710, 1729), 'unittest.TestCase', 'unittest.TestCase', ([], {}), '()\n', (1727, 1729), False, 'import unittest\n'), ((1825, 1844), 'unittest.TestCase', 'unittest.TestCase', ([], {}), '()\n', (1842, 1844), False, 'import unittest\n'), ((1898, 1917), 'unittest.TestCase', 'unittest.TestCase', ([], {}), '()\n', (1915, 1917), False, 'import unittest\n'), ((2017, 2036), 'unittest.TestCase', 'unittest.TestCase', ([], {}), '()\n', (2034, 2036), False, 'import unittest\n'), ((2127, 2146), 'unittest.TestCase', 'unittest.TestCase', ([], {}), '()\n', (2144, 2146), False, 'import unittest\n'), ((2198, 2217), 'unittest.TestCase', 'unittest.TestCase', ([], {}), '()\n', (2215, 2217), False, 'import unittest\n')]
|
from random import randint
"""
For an array of size 10^6 the execution time of the randomized version was 10x faster.
I used an already sorted array, which is an example of a worst case scenario.
The algorithm by selection always the smallest element as the pivot makes n recursive calls
and because the partition step is O(n), it takes O(n^2) to execute.
Avg Quicksort: 47.597230195999146
Avg Quicksort Randomized: 4.145071268081665
"""
def quicksort_randomized(arr):
def swap(arr, j, i):
arr[i], arr[j] = arr[j], arr[i]
def partition(arr, left, right):
pivot = left
j = left
for i in range(left + 1, right + 1):
if arr[i] <= arr[pivot]:
j += 1
swap(arr, j, i)
new_pivot_pos = j
swap(arr, pivot, new_pivot_pos)
return new_pivot_pos
def random_partition(arr, left, right):
pivot = randint(left, right)
swap(arr, left, pivot)
return partition(arr, left, right)
def sort(arr, left, right):
if left < right:
m = random_partition(arr, left, right)
sort(arr, left, m - 1)
sort(arr, m + 1, right)
sort(arr, 0, len(arr) - 1)
if __name__ == "__main__":
arr = [int(i) for i in input().split()]
quicksort_randomized(arr)
print(arr)
|
[
"random.randint"
] |
[((909, 929), 'random.randint', 'randint', (['left', 'right'], {}), '(left, right)\n', (916, 929), False, 'from random import randint\n')]
|
# -*- coding: utf-8 -*-
"""BioImagePy dataset metadata definitions.
This module contains classes that allows to describe the
metadata of scientific dataset
Classes
-------
DataSet
RawDataSet
ProcessedDataSet
"""
import re
from bioimageit_core.config import ConfigAccess
from bioimageit_core.data import RawData, ProcessedData
from bioimageit_core.metadata.run import Run
from bioimageit_core.metadata.factory import metadataServices
from bioimageit_core.metadata.query import query_list_single
class RawDataSet:
"""Class that store a dataset metadata for RawDataSet
Parameters
----------
md_uri
URI of the metadata in the database or file system
depending on backend
Attributes
----------
md_uri
List of the URIs of the data metadata
"""
def __init__(self, md_uri: str = ''):
self.md_uri = md_uri
self.metadata = None # DataSetContainer()
config = ConfigAccess.instance().config['metadata']
self.service = metadataServices.get(config["service"], **config)
self.read()
def read(self):
"""Read the metadata from database
The data base connection is managed by the configuration
object
"""
self.metadata = self.service.read_rawdataset(self.md_uri)
def write(self):
"""Write the metadata to database
The data base connection is managed by the configuration
object
"""
self.service.write_rawdataset(self.metadata, self.md_uri)
def size(self):
"""get the size of the dataser
Returns
-------
The number of data in the dataset
"""
return len(self.metadata.uris)
def get(self, i: int) -> RawData:
"""get one data information
Parameters
----------
i
Index of the data in the dataset
Returns
----------
RawData
The data common information
"""
return RawData(self.metadata.uris[i])
def to_search_containers(self):
"""Convert RawDataSet into a list of SearchContainer
Returns
-------
list
List of data as list of SearchContainer
"""
search_list = []
for i in range(self.size()):
data = RawData(self.metadata.uris[i])
search_list.append(data.to_search_container())
return search_list
def get_data(self, query: str) -> list:
"""query on tags
In this verion only AND queries are supported
(ex: tag1=value1 AND tag2=value2)
and performed on the RawData set
Parameters
----------
query
String query with the key=value format.
Returns
-------
list
List of selected data (md.json files urls are returned)
"""
queries = re.split(' AND ', query)
# initially all the raw data are selected
selected_list = self.to_search_containers()
if query == '':
return selected_list
# run all the AND queries on the preselected dataset
for q in queries:
selected_list = query_list_single(selected_list, q)
# convert SearchContainer list to uri list
out = []
for d in selected_list:
out.append(d.uri())
return out
def add_data(self, data: RawData):
"""Add one data to the dataset
Parameters
----------
data
data to add
"""
data.write()
self.metadata.uris.append(data.md_uri)
self.service.write_rawdataset(self.metadata, self.md_uri)
def get_data_list(self) -> list:
"""Get the metadata information as a list
Returns
-------
list
List of the data metadata stored in BiRawData objects
"""
data_list = []
for i in range(self.size()):
data_list.append(RawData(self.metadata.uris[i]))
return data_list
class ProcessedDataSet:
"""Class that store a dataset metadata for ProcessedDataSet
Parameters
----------
md_uri
URI of the metadata in the database or file system
depending on backend
Attributes
----------
md_uri
List of the URIs of the data metadata
"""
def __init__(self, md_uri: str = ''):
self.md_uri = md_uri
self.metadata = None # DataSetContainer()
config = ConfigAccess.instance().config['metadata']
self.service = metadataServices.get(config["service"], **config)
self.read()
def read(self):
"""Read the metadata from database
The data base connection is managed by the configuration
object
"""
self.metadata = self.service.read_processeddataset(self.md_uri)
def write(self):
"""Write the metadata to database
The data base connection is managed by the configuration
object
"""
self.service.write_processeddataset(self.metadata, self.md_uri)
def add_run(self, run: Run):
"""Add Run to the dataset
The input Run URI is created by this method
Parameters
----------
run
Run to add
"""
run.md_uri = self.service.add_run_processeddataset(run.metadata,
self.md_uri)
def create_data(self, data: ProcessedData):
"""create a new data metadata in the dataset
The input data object must contain only the metadata (ie no
uri and no md_uri).
This method generate the uri and the md_uri and save all the
metadata
Parameters
----------
data
metadata of the processed data to create
"""
self.service.create_data_processeddataset(data.metadata, self.md_uri)
def size(self):
"""get the size of the dataser
Returns
-------
The number of data in the dataset
"""
return len(self.metadata.uris)
def get(self, i: int) -> ProcessedData:
"""get one data information
Parameters
----------
i
Index of the data in the dataset
Returns
----------
RawData
The data common information
"""
return ProcessedData(self.metadata.uris[i])
def to_search_containers(self):
"""Convert RawDataSet into a list of SearchContainer
Returns
-------
list
List of data as list of SearchContainer
"""
search_list = []
for i in range(self.size()):
data = ProcessedData(self.metadata.uris[i])
search_list.append(data.to_search_container())
return search_list
def get_data(self, query: str, origin_output_name: str = '') -> list:
"""Run a query on a BiProcessedDataSet
Parameters
----------
query
Query on tags (ex: 'Population'='population1')
origin_output_name
Filter only the process output with the given name
if origin_output_name is empty, it gets all the processed
data
Returns
-------
list
List of the data URIs
"""
# get all the tags per data
pre_list = self.to_search_containers()
# remove the data where output origin is not the asked one
selected_list = []
if origin_output_name != '':
for pdata in pre_list:
data = ProcessedData(pdata.uri())
if data.metadata.output["name"] == origin_output_name:
selected_list.append(pdata)
else:
selected_list = pre_list
if query == '':
return selected_list
# query on tags
queries = re.split(' AND ', query)
# run all the AND queries on the preselected dataset
for q in queries:
selected_list = query_list_single(selected_list, q)
# convert SearchContainer list to uri list
out = []
for d in selected_list:
out.append(d.uri())
return out
def add_data(self, data: ProcessedData):
"""Add one data to the dataset
Parameters
----------
data
data to add
"""
data.write()
self.metadata.uris.append(data.md_uri)
self.service.write_processeddataset(self.metadata, self.md_uri)
def get_data_list(self) -> list:
"""Get the metadata information as a list
Returns
-------
list
List of the data metadata stored in BiRawData objects
"""
data_list = []
for i in range(self.size()):
data_list.append(ProcessedData(self.metadata.uris[i]))
return data_list
|
[
"bioimageit_core.metadata.query.query_list_single",
"re.split",
"bioimageit_core.data.ProcessedData",
"bioimageit_core.data.RawData",
"bioimageit_core.metadata.factory.metadataServices.get",
"bioimageit_core.config.ConfigAccess.instance"
] |
[((1010, 1059), 'bioimageit_core.metadata.factory.metadataServices.get', 'metadataServices.get', (["config['service']"], {}), "(config['service'], **config)\n", (1030, 1059), False, 'from bioimageit_core.metadata.factory import metadataServices\n'), ((2004, 2034), 'bioimageit_core.data.RawData', 'RawData', (['self.metadata.uris[i]'], {}), '(self.metadata.uris[i])\n', (2011, 2034), False, 'from bioimageit_core.data import RawData, ProcessedData\n'), ((2901, 2925), 're.split', 're.split', (['""" AND """', 'query'], {}), "(' AND ', query)\n", (2909, 2925), False, 'import re\n'), ((4575, 4624), 'bioimageit_core.metadata.factory.metadataServices.get', 'metadataServices.get', (["config['service']"], {}), "(config['service'], **config)\n", (4595, 4624), False, 'from bioimageit_core.metadata.factory import metadataServices\n'), ((6421, 6457), 'bioimageit_core.data.ProcessedData', 'ProcessedData', (['self.metadata.uris[i]'], {}), '(self.metadata.uris[i])\n', (6434, 6457), False, 'from bioimageit_core.data import RawData, ProcessedData\n'), ((7947, 7971), 're.split', 're.split', (['""" AND """', 'query'], {}), "(' AND ', query)\n", (7955, 7971), False, 'import re\n'), ((2325, 2355), 'bioimageit_core.data.RawData', 'RawData', (['self.metadata.uris[i]'], {}), '(self.metadata.uris[i])\n', (2332, 2355), False, 'from bioimageit_core.data import RawData, ProcessedData\n'), ((3203, 3238), 'bioimageit_core.metadata.query.query_list_single', 'query_list_single', (['selected_list', 'q'], {}), '(selected_list, q)\n', (3220, 3238), False, 'from bioimageit_core.metadata.query import query_list_single\n'), ((6748, 6784), 'bioimageit_core.data.ProcessedData', 'ProcessedData', (['self.metadata.uris[i]'], {}), '(self.metadata.uris[i])\n', (6761, 6784), False, 'from bioimageit_core.data import RawData, ProcessedData\n'), ((8088, 8123), 'bioimageit_core.metadata.query.query_list_single', 'query_list_single', (['selected_list', 'q'], {}), '(selected_list, q)\n', (8105, 8123), False, 'from bioimageit_core.metadata.query import query_list_single\n'), ((944, 967), 'bioimageit_core.config.ConfigAccess.instance', 'ConfigAccess.instance', ([], {}), '()\n', (965, 967), False, 'from bioimageit_core.config import ConfigAccess\n'), ((3995, 4025), 'bioimageit_core.data.RawData', 'RawData', (['self.metadata.uris[i]'], {}), '(self.metadata.uris[i])\n', (4002, 4025), False, 'from bioimageit_core.data import RawData, ProcessedData\n'), ((4509, 4532), 'bioimageit_core.config.ConfigAccess.instance', 'ConfigAccess.instance', ([], {}), '()\n', (4530, 4532), False, 'from bioimageit_core.config import ConfigAccess\n'), ((8892, 8928), 'bioimageit_core.data.ProcessedData', 'ProcessedData', (['self.metadata.uris[i]'], {}), '(self.metadata.uris[i])\n', (8905, 8928), False, 'from bioimageit_core.data import RawData, ProcessedData\n')]
|
from flask import send_from_directory
from appserver import app
@app.server.route('/static/<path>')
def serve_static(path):
return send_from_directory('assets', path)
|
[
"flask.send_from_directory",
"appserver.app.server.route"
] |
[((68, 102), 'appserver.app.server.route', 'app.server.route', (['"""/static/<path>"""'], {}), "('/static/<path>')\n", (84, 102), False, 'from appserver import app\n'), ((138, 173), 'flask.send_from_directory', 'send_from_directory', (['"""assets"""', 'path'], {}), "('assets', path)\n", (157, 173), False, 'from flask import send_from_directory\n')]
|
from django.utils.translation import ugettext_lazy as _
MESSAGES = {
'VacancyChange': _('Vacancy status change now pending...') + ' <span data-uk-spinner="ratio: 0.5"></span>',
'Not_VacancyChange':
'<span class="red-text" data-uk-icon="ban"></span> To add new pipeline action you have to disable vacancy.',
'ActionDeleted': _('Action delete now pending...') + ' <span data-uk-spinner="ratio: 0.5"></span>',
'NewAction': _(
'Transaction for add new action now pending...') + ' <span data-uk-spinner="ratio: 0.5"></span>',
'Subscribe': _(
'Transaction for subscribe to vacancy now pending...') + ' <span data-uk-spinner="ratio: 0.5"></span>',
'tokenApprove': _('Approving tokens for platform now pending') + ' <span data-uk-spinner="ratio: 0.5"></span>',
'ChangeStatus': _('Changing status now pending...') + ' <span data-uk-spinner="ratio: 0.5"></span>',
'NewMember': _('Your new contract now creating...') + ' <span data-uk-spinner="ratio: 0.5"></span>',
'NewVacancy': _('Your new vacancy now creating...') + ' <span data-uk-spinner="ratio: 0.5"></span>',
'NewCompany': _('New company contract now creating...') + ' <span data-uk-spinner="ratio: 0.5"></span>',
'MemberSubscribe': _('Vacancy subscribing now pending...') + ' <span data-uk-spinner="ratio: 0.5"></span>',
}
|
[
"django.utils.translation.ugettext_lazy"
] |
[((91, 132), 'django.utils.translation.ugettext_lazy', '_', (['"""Vacancy status change now pending..."""'], {}), "('Vacancy status change now pending...')\n", (92, 132), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((355, 388), 'django.utils.translation.ugettext_lazy', '_', (['"""Action delete now pending..."""'], {}), "('Action delete now pending...')\n", (356, 388), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((460, 510), 'django.utils.translation.ugettext_lazy', '_', (['"""Transaction for add new action now pending..."""'], {}), "('Transaction for add new action now pending...')\n", (461, 510), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((591, 647), 'django.utils.translation.ugettext_lazy', '_', (['"""Transaction for subscribe to vacancy now pending..."""'], {}), "('Transaction for subscribe to vacancy now pending...')\n", (592, 647), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((731, 777), 'django.utils.translation.ugettext_lazy', '_', (['"""Approving tokens for platform now pending"""'], {}), "('Approving tokens for platform now pending')\n", (732, 777), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((852, 887), 'django.utils.translation.ugettext_lazy', '_', (['"""Changing status now pending..."""'], {}), "('Changing status now pending...')\n", (853, 887), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((959, 997), 'django.utils.translation.ugettext_lazy', '_', (['"""Your new contract now creating..."""'], {}), "('Your new contract now creating...')\n", (960, 997), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1070, 1107), 'django.utils.translation.ugettext_lazy', '_', (['"""Your new vacancy now creating..."""'], {}), "('Your new vacancy now creating...')\n", (1071, 1107), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1180, 1221), 'django.utils.translation.ugettext_lazy', '_', (['"""New company contract now creating..."""'], {}), "('New company contract now creating...')\n", (1181, 1221), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1299, 1338), 'django.utils.translation.ugettext_lazy', '_', (['"""Vacancy subscribing now pending..."""'], {}), "('Vacancy subscribing now pending...')\n", (1300, 1338), True, 'from django.utils.translation import ugettext_lazy as _\n')]
|
import io
from typing import Optional
import boto3
import botocore
from adventure_anywhere.definitions import SavesGateway
s3 = boto3.resource("s3")
class S3BucketSavesGateway(SavesGateway):
bucket_name: str
def __init__(self, bucket_name: str) -> None:
self.bucket_name = bucket_name
def fetch_save(self, player_id: str) -> Optional[io.BytesIO]:
s3_object = s3.Object(self.bucket_name, player_id)
try:
content = s3_object.get()
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "NoSuchKey":
return None
raise e
return io.BytesIO(content["Body"].read())
def update_save(self, player_id: str, save: io.BytesIO) -> None:
s3_object = s3.Object(self.bucket_name, player_id)
s3_object.put(Body=save.getvalue())
|
[
"boto3.resource"
] |
[((131, 151), 'boto3.resource', 'boto3.resource', (['"""s3"""'], {}), "('s3')\n", (145, 151), False, 'import boto3\n')]
|
import numpy as np
from fym.core import BaseEnv, BaseSystem
from fym.utils import rot
def hat(v):
v1, v2, v3 = v.squeeze()
return np.array([
[0, -v3, v2],
[v3, 0, -v1],
[-v2, v1, 0]
])
class Quadrotor(BaseEnv):
"""
Prof. <NAME>'s model for quadrotor UAV is used.
- https://www.math.ucsd.edu/~mleok/pdf/LeLeMc2010_quadrotor.pdf
Description:
- an NED frame is used for the inertia and body fixed frame.
Hence, `+z` direction is downward.
- ``pos`` and ``vel`` are resolved in the inertial frame,
whereas ``R`` and ``omega`` are resolved in the body frame
- ``fis`` is a vector of thrusts generated by the rotors.
Variables:
R: SO(3)
The rotation matrix from the body-fixed frame to the inertial frame
R = C_{i/b} = C_{b/i}^T
"""
g = 9.81 # m/s^2
e3 = np.vstack((0, 0, 1))
J = np.diag([0.0820, 0.0845, 0.1377])
m = 4.34 # Mass
d = 0.315 # The distance from the center of mass to the center of each rotor
ctf = 8.004e-4 # The torque coefficient. ``torque_i = (-1)^i ctf f_i``
B = np.array(
[[1, 1, 1, 1],
[0, -d, 0, d],
[d, 0, -d, 0],
[-ctf, ctf, -ctf, ctf]]
)
Binv = np.linalg.pinv(B)
name = "quadrotor"
def __init__(self,
pos=np.zeros((3, 1)),
vel=np.zeros((3, 1)),
R=np.eye(3),
omega=np.zeros((3, 1)),
config="Quadrotor"):
super().__init__()
self.pos = BaseSystem(pos)
self.vel = BaseSystem(vel)
self.R = BaseSystem(R)
self.omega = BaseSystem(omega)
def deriv(self, pos, vel, R, omega, fis):
m, g, J, e3 = self.m, self.g, self.J, self.e3
f, *M = self.fis2fM(fis)
M = np.vstack(M)
dpos = vel
dvel = g * e3 - f * R @ e3 / m
dR = R @ hat(omega)
domega = np.linalg.inv(J).dot(M - np.cross(omega, J.dot(omega), axis=0))
return dpos, dvel, dR, domega
def set_dot(self, t, fis):
pos, vel, R, omega = self.observe_list()
dots = self.deriv(pos, vel, R, omega, fis)
self.pos.dot, self.vel.dot, self.R.dot, self.omega.dot = dots
def fis2fM(self, fis):
"""Convert f_i's to force and moments
Parameters:
fis: (4, 1) array
Return:
f, M1, M2, M3: (4,) array of force and moments
"""
return (self.B @ fis).ravel()
def fM2fis(self, f, M1, M2, M3):
"""Convert force and moments to f_i's
Parameters:
f: scalar, the total thrust
M1, M2, M3: scalars, the moments
Return:
fis: (4, 1) array of f_i's
"""
return self.Binv @ np.vstack((f, M1, M2, M3))
def angle2R(self, angle):
"""angle: phi, theta, psi in radian"""
return rot.angle2dcm(*np.ravel(angle)[::-1]).T
def R2angle(self, R):
"""angle: phi, theta, psi in radian"""
return rot.dcm2angle(R.T)[::-1]
|
[
"numpy.eye",
"fym.core.BaseSystem",
"numpy.ravel",
"numpy.zeros",
"fym.utils.rot.dcm2angle",
"numpy.array",
"numpy.linalg.inv",
"numpy.diag",
"numpy.linalg.pinv",
"numpy.vstack"
] |
[((141, 193), 'numpy.array', 'np.array', (['[[0, -v3, v2], [v3, 0, -v1], [-v2, v1, 0]]'], {}), '([[0, -v3, v2], [v3, 0, -v1], [-v2, v1, 0]])\n', (149, 193), True, 'import numpy as np\n'), ((897, 917), 'numpy.vstack', 'np.vstack', (['(0, 0, 1)'], {}), '((0, 0, 1))\n', (906, 917), True, 'import numpy as np\n'), ((926, 958), 'numpy.diag', 'np.diag', (['[0.082, 0.0845, 0.1377]'], {}), '([0.082, 0.0845, 0.1377])\n', (933, 958), True, 'import numpy as np\n'), ((1147, 1225), 'numpy.array', 'np.array', (['[[1, 1, 1, 1], [0, -d, 0, d], [d, 0, -d, 0], [-ctf, ctf, -ctf, ctf]]'], {}), '([[1, 1, 1, 1], [0, -d, 0, d], [d, 0, -d, 0], [-ctf, ctf, -ctf, ctf]])\n', (1155, 1225), True, 'import numpy as np\n'), ((1278, 1295), 'numpy.linalg.pinv', 'np.linalg.pinv', (['B'], {}), '(B)\n', (1292, 1295), True, 'import numpy as np\n'), ((1365, 1381), 'numpy.zeros', 'np.zeros', (['(3, 1)'], {}), '((3, 1))\n', (1373, 1381), True, 'import numpy as np\n'), ((1404, 1420), 'numpy.zeros', 'np.zeros', (['(3, 1)'], {}), '((3, 1))\n', (1412, 1420), True, 'import numpy as np\n'), ((1441, 1450), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1447, 1450), True, 'import numpy as np\n'), ((1475, 1491), 'numpy.zeros', 'np.zeros', (['(3, 1)'], {}), '((3, 1))\n', (1483, 1491), True, 'import numpy as np\n'), ((1577, 1592), 'fym.core.BaseSystem', 'BaseSystem', (['pos'], {}), '(pos)\n', (1587, 1592), False, 'from fym.core import BaseEnv, BaseSystem\n'), ((1612, 1627), 'fym.core.BaseSystem', 'BaseSystem', (['vel'], {}), '(vel)\n', (1622, 1627), False, 'from fym.core import BaseEnv, BaseSystem\n'), ((1645, 1658), 'fym.core.BaseSystem', 'BaseSystem', (['R'], {}), '(R)\n', (1655, 1658), False, 'from fym.core import BaseEnv, BaseSystem\n'), ((1680, 1697), 'fym.core.BaseSystem', 'BaseSystem', (['omega'], {}), '(omega)\n', (1690, 1697), False, 'from fym.core import BaseEnv, BaseSystem\n'), ((1845, 1857), 'numpy.vstack', 'np.vstack', (['M'], {}), '(M)\n', (1854, 1857), True, 'import numpy as np\n'), ((2798, 2824), 'numpy.vstack', 'np.vstack', (['(f, M1, M2, M3)'], {}), '((f, M1, M2, M3))\n', (2807, 2824), True, 'import numpy as np\n'), ((3047, 3065), 'fym.utils.rot.dcm2angle', 'rot.dcm2angle', (['R.T'], {}), '(R.T)\n', (3060, 3065), False, 'from fym.utils import rot\n'), ((1962, 1978), 'numpy.linalg.inv', 'np.linalg.inv', (['J'], {}), '(J)\n', (1975, 1978), True, 'import numpy as np\n'), ((2933, 2948), 'numpy.ravel', 'np.ravel', (['angle'], {}), '(angle)\n', (2941, 2948), True, 'import numpy as np\n')]
|
import os
import sys
import logging
import json
import requests
import datetime
from msal import ConfidentialClientApplication
# Reusable function to create a logging mechanism
def create_logger(logfile=None):
# Create a logging handler that will write to stdout and optionally to a log file
stdout_handler = logging.StreamHandler(sys.stdout)
try:
if logfile != None:
file_handler = logging.FileHandler(filename=logfile)
handlers = [file_handler, stdout_handler]
else:
handlers = [stdout_handler]
except:
handlers = [stdout_handler]
logging.error('Log file could not be created. Error: ', exc_info=True)
# Configure logging mechanism
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
handlers=handlers
)
# Reusable function to obtain an access token
def get_token(resource):
client = ConfidentialClientApplication(
client_id=os.getenv('CLIENT_ID'),
client_credential=os.getenv('CLIENT_SECRET'),
authority='https://login.microsoftonline.com/' +
os.getenv('TENANT_NAME')
)
logging.info('Issuing request to obtain access token...')
response = client.acquire_token_for_client(resource)
if "token_type" in response:
logging.info('Access token obtained successfully.')
return response['access_token']
else:
logging.error('Error obtaining access token')
logging.error(response['error'] + ': ' + response['error_description'])
# Query Azure REST API
def rest_api_request(url, token, query_params=None):
try:
# Create authorization header
headers = {'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format(token)}
# Issue request to Azure API
logging.info(f"Issuing request to {url}")
response = requests.get(
headers=headers,
url=url,
params=query_params
)
# Validate and process response
if response.status_code == 200:
return json.loads(response.text)
else:
logging.error('Error encountered querying Azure API')
logging.error(
f"Error code was: {(json.loads(response.text))['code']}")
logging.error(
f"Error message was: {(json.loads(response.text))['message']}")
raise Exception
except Exception:
return json.loads(response.text)
def main():
# Create logging mechanism
create_logger()
# Obtain an access token to Azure REST API
token = get_token(
resource="https://management.core.windows.net//.default"
)
# Create date/time stamps and filter
todaydate = (datetime.datetime.now() +
datetime.timedelta(days=int(2))).strftime("%Y-%m-%d")
startdate = (datetime.datetime.today() -
datetime.timedelta(days=int(os.getenv('DAYS')))).strftime("%Y-%m-%d")
filter = "eventTimestamp ge " + startdate + " and eventTimestamp le " + \
todaydate + " and eventChannels eq 'Admin,Operation'" #and resourceProvider eq 'Microsoft.Authorization'"
# Get first set of tenant activity logs and write to a file
response = rest_api_request(
url="https://management.azure.com/providers/Microsoft.Insights/eventtypes/management/values",
token=token,
query_params={
'api-version': '2015-04-01',
'$filter': filter
}
)
# Create a new file and get it formatted for an array of json objects
logging.info('Creating output file...')
try:
with open('logs.json', 'w') as log_file:
log_file.write('[')
except Exception:
logging.error('Output file could not be created. Error: ', exc_info=True)
# Iterate through each returned log and write it the file
logging.info('Adding entries to output file...')
try:
with open('logs.json', 'a') as log_file:
for log_entry in response['value']:
log_file.write(json.dumps(log_entry) + ',')
except Exception:
logging.error('Unable to append to log file. Error: ', exc_info=True)
# If paged results are returned, retreive them and write to a file
while 'nextLink' in response:
logging.info(
f"Paged results returned. Retrieving from {response['nextLink']}")
response = rest_api_request(
url=response['nextLink'],
token=token,
query_params={
}
)
try:
with open('logs.json', 'a') as log_file:
for log_entry in response['value']:
log_file.write(json.dumps(log_entry) + ',')
except Exception:
logging.error('Unable to append to output file. Error: ', exc_info=True)
# Remove the trailing comma from the file
try:
logging.info('Formatting output file...')
with open('logs.json', 'rb+') as log_file:
log_file.seek(-1, os.SEEK_END)
log_file.truncate()
# Close out the array
with open('logs.json', 'a') as log_file:
log_file.write(']')
logging.info('Output file created successfully.')
except Exception:
logging.error('Unable to format output file. Error: ', exc_info=True)
if __name__ == "__main__":
main()
|
[
"logging.error",
"logging.FileHandler",
"logging.basicConfig",
"json.loads",
"datetime.datetime.today",
"logging.StreamHandler",
"json.dumps",
"logging.info",
"requests.get",
"datetime.datetime.now",
"os.getenv"
] |
[((319, 352), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (340, 352), False, 'import logging\n'), ((729, 855), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""', 'handlers': 'handlers'}), "(level=logging.INFO, format=\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s', handlers=handlers)\n", (748, 855), False, 'import logging\n'), ((1193, 1250), 'logging.info', 'logging.info', (['"""Issuing request to obtain access token..."""'], {}), "('Issuing request to obtain access token...')\n", (1205, 1250), False, 'import logging\n'), ((3643, 3682), 'logging.info', 'logging.info', (['"""Creating output file..."""'], {}), "('Creating output file...')\n", (3655, 3682), False, 'import logging\n'), ((3945, 3993), 'logging.info', 'logging.info', (['"""Adding entries to output file..."""'], {}), "('Adding entries to output file...')\n", (3957, 3993), False, 'import logging\n'), ((1349, 1400), 'logging.info', 'logging.info', (['"""Access token obtained successfully."""'], {}), "('Access token obtained successfully.')\n", (1361, 1400), False, 'import logging\n'), ((1459, 1504), 'logging.error', 'logging.error', (['"""Error obtaining access token"""'], {}), "('Error obtaining access token')\n", (1472, 1504), False, 'import logging\n'), ((1513, 1584), 'logging.error', 'logging.error', (["(response['error'] + ': ' + response['error_description'])"], {}), "(response['error'] + ': ' + response['error_description'])\n", (1526, 1584), False, 'import logging\n'), ((1874, 1915), 'logging.info', 'logging.info', (['f"""Issuing request to {url}"""'], {}), "(f'Issuing request to {url}')\n", (1886, 1915), False, 'import logging\n'), ((1935, 1994), 'requests.get', 'requests.get', ([], {'headers': 'headers', 'url': 'url', 'params': 'query_params'}), '(headers=headers, url=url, params=query_params)\n', (1947, 1994), False, 'import requests\n'), ((4374, 4453), 'logging.info', 'logging.info', (['f"""Paged results returned. Retrieving from {response[\'nextLink\']}"""'], {}), '(f"Paged results returned. Retrieving from {response[\'nextLink\']}")\n', (4386, 4453), False, 'import logging\n'), ((4975, 5016), 'logging.info', 'logging.info', (['"""Formatting output file..."""'], {}), "('Formatting output file...')\n", (4987, 5016), False, 'import logging\n'), ((5263, 5312), 'logging.info', 'logging.info', (['"""Output file created successfully."""'], {}), "('Output file created successfully.')\n", (5275, 5312), False, 'import logging\n'), ((417, 454), 'logging.FileHandler', 'logging.FileHandler', ([], {'filename': 'logfile'}), '(filename=logfile)\n', (436, 454), False, 'import logging\n'), ((619, 689), 'logging.error', 'logging.error', (['"""Log file could not be created. Error: """'], {'exc_info': '(True)'}), "('Log file could not be created. Error: ', exc_info=True)\n", (632, 689), False, 'import logging\n'), ((1015, 1037), 'os.getenv', 'os.getenv', (['"""CLIENT_ID"""'], {}), "('CLIENT_ID')\n", (1024, 1037), False, 'import os\n'), ((1065, 1091), 'os.getenv', 'os.getenv', (['"""CLIENT_SECRET"""'], {}), "('CLIENT_SECRET')\n", (1074, 1091), False, 'import os\n'), ((2141, 2166), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (2151, 2166), False, 'import json\n'), ((2193, 2246), 'logging.error', 'logging.error', (['"""Error encountered querying Azure API"""'], {}), "('Error encountered querying Azure API')\n", (2206, 2246), False, 'import logging\n'), ((2520, 2545), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (2530, 2545), False, 'import json\n'), ((3803, 3876), 'logging.error', 'logging.error', (['"""Output file could not be created. Error: """'], {'exc_info': '(True)'}), "('Output file could not be created. Error: ', exc_info=True)\n", (3816, 3876), False, 'import logging\n'), ((4190, 4259), 'logging.error', 'logging.error', (['"""Unable to append to log file. Error: """'], {'exc_info': '(True)'}), "('Unable to append to log file. Error: ', exc_info=True)\n", (4203, 4259), False, 'import logging\n'), ((5348, 5417), 'logging.error', 'logging.error', (['"""Unable to format output file. Error: """'], {'exc_info': '(True)'}), "('Unable to format output file. Error: ', exc_info=True)\n", (5361, 5417), False, 'import logging\n'), ((1158, 1182), 'os.getenv', 'os.getenv', (['"""TENANT_NAME"""'], {}), "('TENANT_NAME')\n", (1167, 1182), False, 'import os\n'), ((2812, 2835), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2833, 2835), False, 'import datetime\n'), ((2926, 2951), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (2949, 2951), False, 'import datetime\n'), ((4838, 4910), 'logging.error', 'logging.error', (['"""Unable to append to output file. Error: """'], {'exc_info': '(True)'}), "('Unable to append to output file. Error: ', exc_info=True)\n", (4851, 4910), False, 'import logging\n'), ((4131, 4152), 'json.dumps', 'json.dumps', (['log_entry'], {}), '(log_entry)\n', (4141, 4152), False, 'import json\n'), ((2310, 2335), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (2320, 2335), False, 'import json\n'), ((2414, 2439), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (2424, 2439), False, 'import json\n'), ((2999, 3016), 'os.getenv', 'os.getenv', (['"""DAYS"""'], {}), "('DAYS')\n", (3008, 3016), False, 'import os\n'), ((4771, 4792), 'json.dumps', 'json.dumps', (['log_entry'], {}), '(log_entry)\n', (4781, 4792), False, 'import json\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import Variable
from skimage.measure import compare_psnr as ski_psnr
from skimage.measure import compare_ssim as ski_ssim
import os
import csv
import logging
from model import Network
import torch.nn.functional as F
from data_load_own import get_training_set, get_test_set
from data_load_mix import get_dataset_deform
import utils
class CNN_train():
def __init__(self, dataset_name, imgSize=63, batchsize=32):
self.imgSize = imgSize
self.batchsize = batchsize
self.dataset_name = dataset_name
# load dataset
if dataset_name == 'mix' or dataset_name == 'yourdata':
if dataset_name == 'mix':
self.num_work = 8
train_dir = '/dataset/train/'
val_dir = '/dataset/val/'
test_dir = '/dataset/test/'
train_set = get_dataset_deform(train_dir, val_dir, test_dir, 0)
val_set = get_dataset_deform(train_dir, val_dir, test_dir, 1)
# test_set = get_dataset_deform(train_dir, val_dir, test_dir, 2)
self.dataloader = DataLoader(dataset=train_set, num_workers=self.num_work, batch_size=self.batchsize, shuffle=True, pin_memory=True)
self.val_loader = DataLoader(dataset=val_set, num_workers=self.num_work, batch_size=1, shuffle=False, pin_memory=False)
# self.test_dataloader = DataLoader(dataset=test_set, num_workers=self.num_work, batch_size=1, shuffle=False, pin_memory=False)
elif dataset_name == 'yourdata':
self.num_work = 8
# Specify the path of your data
train_input_dir = '/dataset/yourdata_train/input/'
train_target_dir = '/dataset/yourdata_train/target/'
test_input_dir = '/dataset/yourdata_test/input/'
test_target_dir = '/dataset/yourdata_test/target/'
train_set = get_training_set(train_input_dir, train_target_dir, True)
test_set = get_training_set(test_input_dir, test_target_dir, False)
self.dataloader = DataLoader(dataset=train_set, num_workers=self.num_work, batch_size=self.batchsize, shuffle=True, drop_last=True)
self.test_dataloader = DataLoader(dataset=test_set, num_workers=self.num_work, batch_size=1, shuffle=False)
else:
print('\tInvalid input dataset name at CNN_train()')
exit(1)
def __call__(self, cgp, gpuID, epoch_num=150, gpu_num=1):
print('GPUID :', gpuID)
print('epoch_num:', epoch_num)
# define model
torch.manual_seed(2018)
torch.cuda.manual_seed(2018)
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.enabled = True
L1_loss = nn.L1Loss()
L1_loss = L1_loss.cuda(gpuID)
model = Network(16, 10, L1_loss, gpuID=gpuID)
if gpu_num > 1:
device_ids = [i for i in range(gpu_num)]
model = torch.nn.DataParallel(model, device_ids=device_ids)
model = model.cuda(gpuID)
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
print('Param:', utils.count_parameters_in_MB(model))
optimizer = optim.Adam(model.parameters(), lr=0.001, betas=(0.9, 0.999))
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=epoch_num)
test_interval = 5
# for output images
if not os.path.exists('./results'):
os.makedirs('./results/Inputs')
os.makedirs('./results/Outputs')
os.makedirs('./results/Targets')
# Train loop
for epoch in range(1, epoch_num+1):
scheduler.step()
start_time = time.time()
print('epoch', epoch)
train_loss = 0
for module in model.children():
module.train(True)
for ite, (input, target) in enumerate(self.dataloader):
lr_patch = Variable(input, requires_grad=False).cuda(gpuID)
hr_patch = Variable(target, requires_grad=False).cuda(gpuID)
optimizer.zero_grad()
output = model(lr_patch)
l1_loss = L1_loss(output, hr_patch)
l1_loss.backward()
optimizer.step()
train_loss += l1_loss.item()
if ite % 500 == 0:
vutils.save_image(lr_patch.data, './input_sample%d.png' % gpuID, normalize=False)
vutils.save_image(hr_patch.data, './target_sample%d.png' % gpuID, normalize=False)
vutils.save_image(output.data, './output_sample%d.png' % gpuID, normalize=False)
print('Train set : Average loss: {:.4f}'.format(train_loss))
print('time ', time.time()-start_time)
# check val/test performance
if epoch % test_interval == 0:
with torch.no_grad():
print('------------------------')
for module in model.children():
module.train(False)
test_psnr = 0
test_ssim = 0
eps = 1e-10
test_ite = 0
for _, (input, target) in enumerate(self.val_loader):
lr_patch = Variable(input, requires_grad=False).cuda(gpuID)
hr_patch = Variable(target, requires_grad=False).cuda(gpuID)
output = model(lr_patch)
# save images
vutils.save_image(output.data, './results/Outputs/%05d.png' % (int(i)), padding=0, normalize=False)
vutils.save_image(lr_patch.data, './results/Inputs/%05d.png' % (int(i)), padding=0, normalize=False)
vutils.save_image(hr_patch.data, './results/Targets/%05d.png' % (int(i)), padding=0, normalize=False)
# Calculation of SSIM and PSNR values
output = output.data.cpu().numpy()[0]
output[output>1] = 1
output[output<0] = 0
output = output.transpose((1,2,0))
hr_patch = hr_patch.data.cpu().numpy()[0]
hr_patch[hr_patch>1] = 1
hr_patch[hr_patch<0] = 0
hr_patch = hr_patch.transpose((1,2,0))
# SSIM
test_ssim+= ski_ssim(output, hr_patch, data_range=1, multichannel=True)
# PSNR
imdf = (output - hr_patch) ** 2
mse = np.mean(imdf) + eps
test_psnr+= 10 * math.log10(1.0/mse)
test_ite += 1
test_psnr /= (test_ite)
test_ssim /= (test_ite)
print('Valid PSNR: {:.4f}'.format(test_psnr))
print('Valid SSIM: {:.4f}'.format(test_ssim))
f = open('PSNR.txt', 'a')
writer = csv.writer(f, lineterminator='\n')
writer.writerow([epoch, test_psnr, test_ssim])
f.close()
print('------------------------')
torch.save(model.state_dict(), './model_%d.pth' % int(epoch))
return train_loss
|
[
"numpy.mean",
"torch.no_grad",
"data_load_mix.get_dataset_deform",
"torch.utils.data.DataLoader",
"os.path.exists",
"torch.optim.lr_scheduler.CosineAnnealingLR",
"math.log10",
"skimage.measure.compare_ssim",
"utils.count_parameters_in_MB",
"csv.writer",
"torch.manual_seed",
"torch.autograd.Variable",
"torch.cuda.manual_seed",
"torchvision.utils.save_image",
"data_load_own.get_training_set",
"os.makedirs",
"torch.nn.L1Loss",
"time.time",
"model.Network",
"torch.nn.DataParallel"
] |
[((2975, 2998), 'torch.manual_seed', 'torch.manual_seed', (['(2018)'], {}), '(2018)\n', (2992, 2998), False, 'import torch\n'), ((3007, 3035), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(2018)'], {}), '(2018)\n', (3029, 3035), False, 'import torch\n'), ((3144, 3155), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {}), '()\n', (3153, 3155), True, 'import torch.nn as nn\n'), ((3210, 3247), 'model.Network', 'Network', (['(16)', '(10)', 'L1_loss'], {'gpuID': 'gpuID'}), '(16, 10, L1_loss, gpuID=gpuID)\n', (3217, 3247), False, 'from model import Network\n'), ((3672, 3736), 'torch.optim.lr_scheduler.CosineAnnealingLR', 'optim.lr_scheduler.CosineAnnealingLR', (['optimizer'], {'T_max': 'epoch_num'}), '(optimizer, T_max=epoch_num)\n', (3708, 3736), True, 'import torch.optim as optim\n'), ((3345, 3396), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {'device_ids': 'device_ids'}), '(model, device_ids=device_ids)\n', (3366, 3396), False, 'import torch\n'), ((3473, 3508), 'utils.count_parameters_in_MB', 'utils.count_parameters_in_MB', (['model'], {}), '(model)\n', (3501, 3508), False, 'import utils\n'), ((3534, 3569), 'utils.count_parameters_in_MB', 'utils.count_parameters_in_MB', (['model'], {}), '(model)\n', (3562, 3569), False, 'import utils\n'), ((3806, 3833), 'os.path.exists', 'os.path.exists', (['"""./results"""'], {}), "('./results')\n", (3820, 3833), False, 'import os\n'), ((3847, 3878), 'os.makedirs', 'os.makedirs', (['"""./results/Inputs"""'], {}), "('./results/Inputs')\n", (3858, 3878), False, 'import os\n'), ((3891, 3923), 'os.makedirs', 'os.makedirs', (['"""./results/Outputs"""'], {}), "('./results/Outputs')\n", (3902, 3923), False, 'import os\n'), ((3936, 3968), 'os.makedirs', 'os.makedirs', (['"""./results/Targets"""'], {}), "('./results/Targets')\n", (3947, 3968), False, 'import os\n'), ((4089, 4100), 'time.time', 'time.time', ([], {}), '()\n', (4098, 4100), False, 'import time\n'), ((1222, 1273), 'data_load_mix.get_dataset_deform', 'get_dataset_deform', (['train_dir', 'val_dir', 'test_dir', '(0)'], {}), '(train_dir, val_dir, test_dir, 0)\n', (1240, 1273), False, 'from data_load_mix import get_dataset_deform\n'), ((1300, 1351), 'data_load_mix.get_dataset_deform', 'get_dataset_deform', (['train_dir', 'val_dir', 'test_dir', '(1)'], {}), '(train_dir, val_dir, test_dir, 1)\n', (1318, 1351), False, 'from data_load_mix import get_dataset_deform\n'), ((1467, 1586), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'train_set', 'num_workers': 'self.num_work', 'batch_size': 'self.batchsize', 'shuffle': '(True)', 'pin_memory': '(True)'}), '(dataset=train_set, num_workers=self.num_work, batch_size=self.\n batchsize, shuffle=True, pin_memory=True)\n', (1477, 1586), False, 'from torch.utils.data import DataLoader\n'), ((1616, 1721), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'val_set', 'num_workers': 'self.num_work', 'batch_size': '(1)', 'shuffle': '(False)', 'pin_memory': '(False)'}), '(dataset=val_set, num_workers=self.num_work, batch_size=1,\n shuffle=False, pin_memory=False)\n', (1626, 1721), False, 'from torch.utils.data import DataLoader\n'), ((2285, 2342), 'data_load_own.get_training_set', 'get_training_set', (['train_input_dir', 'train_target_dir', '(True)'], {}), '(train_input_dir, train_target_dir, True)\n', (2301, 2342), False, 'from data_load_own import get_training_set, get_test_set\n'), ((2370, 2426), 'data_load_own.get_training_set', 'get_training_set', (['test_input_dir', 'test_target_dir', '(False)'], {}), '(test_input_dir, test_target_dir, False)\n', (2386, 2426), False, 'from data_load_own import get_training_set, get_test_set\n'), ((2461, 2579), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'train_set', 'num_workers': 'self.num_work', 'batch_size': 'self.batchsize', 'shuffle': '(True)', 'drop_last': '(True)'}), '(dataset=train_set, num_workers=self.num_work, batch_size=self.\n batchsize, shuffle=True, drop_last=True)\n', (2471, 2579), False, 'from torch.utils.data import DataLoader\n'), ((2614, 2702), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'test_set', 'num_workers': 'self.num_work', 'batch_size': '(1)', 'shuffle': '(False)'}), '(dataset=test_set, num_workers=self.num_work, batch_size=1,\n shuffle=False)\n', (2624, 2702), False, 'from torch.utils.data import DataLoader\n'), ((4761, 4847), 'torchvision.utils.save_image', 'vutils.save_image', (['lr_patch.data', "('./input_sample%d.png' % gpuID)"], {'normalize': '(False)'}), "(lr_patch.data, './input_sample%d.png' % gpuID, normalize=\n False)\n", (4778, 4847), True, 'import torchvision.utils as vutils\n'), ((4863, 4950), 'torchvision.utils.save_image', 'vutils.save_image', (['hr_patch.data', "('./target_sample%d.png' % gpuID)"], {'normalize': '(False)'}), "(hr_patch.data, './target_sample%d.png' % gpuID, normalize\n =False)\n", (4880, 4950), True, 'import torchvision.utils as vutils\n'), ((4966, 5051), 'torchvision.utils.save_image', 'vutils.save_image', (['output.data', "('./output_sample%d.png' % gpuID)"], {'normalize': '(False)'}), "(output.data, './output_sample%d.png' % gpuID, normalize=False\n )\n", (4983, 5051), True, 'import torchvision.utils as vutils\n'), ((5147, 5158), 'time.time', 'time.time', ([], {}), '()\n', (5156, 5158), False, 'import time\n'), ((5289, 5304), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5302, 5304), False, 'import torch\n'), ((7452, 7486), 'csv.writer', 'csv.writer', (['f'], {'lineterminator': '"""\n"""'}), "(f, lineterminator='\\n')\n", (7462, 7486), False, 'import csv\n'), ((4336, 4372), 'torch.autograd.Variable', 'Variable', (['input'], {'requires_grad': '(False)'}), '(input, requires_grad=False)\n', (4344, 4372), False, 'from torch.autograd import Variable\n'), ((4412, 4449), 'torch.autograd.Variable', 'Variable', (['target'], {'requires_grad': '(False)'}), '(target, requires_grad=False)\n', (4420, 4449), False, 'from torch.autograd import Variable\n'), ((6861, 6920), 'skimage.measure.compare_ssim', 'ski_ssim', (['output', 'hr_patch'], {'data_range': '(1)', 'multichannel': '(True)'}), '(output, hr_patch, data_range=1, multichannel=True)\n', (6869, 6920), True, 'from skimage.measure import compare_ssim as ski_ssim\n'), ((7038, 7051), 'numpy.mean', 'np.mean', (['imdf'], {}), '(imdf)\n', (7045, 7051), True, 'import numpy as np\n'), ((7099, 7120), 'math.log10', 'math.log10', (['(1.0 / mse)'], {}), '(1.0 / mse)\n', (7109, 7120), False, 'import math\n'), ((5698, 5734), 'torch.autograd.Variable', 'Variable', (['input'], {'requires_grad': '(False)'}), '(input, requires_grad=False)\n', (5706, 5734), False, 'from torch.autograd import Variable\n'), ((5782, 5819), 'torch.autograd.Variable', 'Variable', (['target'], {'requires_grad': '(False)'}), '(target, requires_grad=False)\n', (5790, 5819), False, 'from torch.autograd import Variable\n')]
|
import random
import numpy as np
import matplotlib.pyplot as plt
import copy
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib import colors
##
counting = [0 for i in range(12)]
counting_temp = [0 for i in range(12)]
def refresh_counting(num):
global counting
counting[num] += 1
if sum(counting) >= 52:
counting = [0 for i in range(12)]
def refresh_counting_temp(num):
global counting_temp
counting_temp[num] += 1
if sum(counting_temp) >= 52:
counting_temp = [0 for i in range(12)]
def get_counting():
global counting
return counting
def get_counting_temp():
global counting_temp
return counting_temp
def copy_counting():
global counting, counting_temp
counting_temp = copy.deepcopy(counting)
def calculate_counting1():
global counting_temp
result = 0
for i, cnt in enumerate(counting_temp):
if 2 <= i and i <= 6:
result += cnt
elif 10 <= i:
result -= cnt
result /= (52 - len(counting_temp)) / 13.0
return round(result)
def calculate_counting2():
global counting_temp
result = 0
for i, cnt in enumerate(counting_temp):
if 2 <= i and i <= 7:
if i == 4 or i == 5:
result += cnt * 2
else:
result += cnt
elif i == 10:
result -= cnt * 2
result /= (52 - len(counting_temp)) / 13.0
return round(result)
def calculate_counting3():
global counting_temp
result = 0
for i, cnt in enumerate(counting_temp):
if i == 2 or i == 3 or i == 6:
result += cnt * 2
elif i == 4:
result += cnt * 3
elif i == 5:
result += cnt * 4
elif i == 7:
result += cnt
elif i == 9:
result -= cnt * 2
elif i == 10:
result -= cnt * 3
result /= (52 - len(counting_temp)) / 13.0
return round(result)
##
class Deck(object):
"""
Deck : Card deck, which can be shuffled, drawn, and reset.
"""
def __init__(self):
deck = [2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10, 11]
self.card_deck = deck * 4
self.shuffle()
def shuffle(self):
random.shuffle(self.card_deck)
def draw(self):
return self.card_deck.pop()
def reset(self):
deck = [2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10, 11]
self.card_deck = deck * 4
self.shuffle()
global counting, counting_temp
counting = [0 for i in range(12)]
counting_temp = [0 for i in range(12)]
class Dealer(object):
"""
Dealer : 딜러 클래스
딜러는 두 장의 카드를 받고, 카드의 합이 16 이하이면 Hit, 17이상이면 Stick 함.
처음 두 장을 받았을 때 한 장의 카드를 랜덤하게 오픈함.
"""
def __init__(self):
"""
hand : 딜러가 가진 카드
usable_ace : 딜러가 가진 카드 리스트 중 ace의 인덱스
natural : 두 장의 카드로 21이 되면 True, 아니면 False
"""
self.hands = list()
self.usable_ace = list()
def hit(self, deck: Deck):
"""
딜러의 Hit. 새로운 카드가 Ace라면 사용 가능한 Ace 리스트에 추가함
:param deck: Deck Object
:return:
"""
new_card = deck.draw()
if new_card == 11:
self.usable_ace.append(len(self.hands))
self.hands.append(new_card)
##
refresh_counting(new_card)
##
def show(self):
"""
딜러가 가진 카드 중 하나를 랜덤하게 보여줌
:return: 딜러의 카드 중 랜덤한 카드 숫자
"""
card = random.choice(self.hands)
##
refresh_counting_temp(card)
##
if card == 11:
card = 1
return card
def calculate_sum(self):
"""
딜러가 가진 카드의 합을 구함
21을 넘을 때 사용 가능한 Ace가 있으면 사용함
:return: 딜러 카드의 합
"""
sums = sum(self.hands)
if sums > 21 and len(self.usable_ace) > 0:
self.hands[self.usable_ace.pop()] = 1
sums = sum(self.hands)
return sums
def action(self, deck: Deck):
"""
딜러의 순서 때 딜러의 행동.
숫자의 합이 16 이하일 때는 Hit, 17 이상이면 Stick
:param deck:
:return:
"""
while True:
sums = self.calculate_sum()
if sums < 17:
self.hit(deck)
else:
return sums
def observation(self, action, agent, deck):
"""
플레이어의 Action을 받아, 그에 맞는 Observation과 Reward를 반환
:param action: agent 의 Action
:param agent: agent 클래스
:param deck: deck 클래스
:return: 에피소드 종료 여부, reward
"""
done = False
reward = 0
if action == True: # Hit
agent.hit(deck)
if agent.calculate_sum() > 21: #플레이어의 Hit으로 인해 카드 합이 21이 넘으면 즉시 종료
done = True
reward = -1
else: # Stick
done = True
reward = self.calcuate_reward(agent, deck)
return done, reward
def calcuate_reward(self, agent, deck):
"""
플레이어가 Stick했을 때 딜러와의 카드 비교 수행
:param agent:
:param deck:
:return: Reward
"""
agent_sum = agent.calculate_sum() # 플레이어의 카드 합 계산
if agent_sum > 21: # 플레이어의 Bust (패)
return -1
dealer_sum = self.action(deck) # 딜러의 카드 합 계산
if dealer_sum > 21: # 딜러가 Bust (승)
return 1
if dealer_sum > agent_sum: # 딜러의 카드 합 > 플레이어 합 (패)
return -1
if dealer_sum < agent_sum: # 딜러의 카드 합 < 플레이어 합 (승)
return 1
return 0 # 딜러의 카드 합 == 플레이어의 합 (무)
def reset(self):
"""
딜러 초기화 (새로운 에피소드 시작을 위해)
"""
self.hands = list()
self.usable_ace = list()
class Agent(object):
def __init__(self):
"""
hand : 플레이어의 카드
usable_ace : 사용 가능한 ace 리스트
Q_table : q(s,a) 값을 저장할 딕셔너리
"""
self.hands = list()
self.usable_ace = list()
self.Q_table = dict()
def hit(self, deck: Deck):
"""
덱에서 새로운 카드를 뽑음
:param deck: Deck for draw a card
:return: None
"""
new_card = deck.draw()
##
refresh_counting(new_card)
refresh_counting_temp(new_card)
##
if new_card == 11:
self.usable_ace.append(len(self.hands))
self.hands.append(new_card)
def calculate_sum(self):
"""
플레이어가 가진 카드의 합을 구함.
21을 넘을 때 사용 가능한 ace가 있으면 사용함
:return:
"""
sums = sum(self.hands)
if sums > 21 and len(self.usable_ace) > 0:
self.hands[self.usable_ace.pop()] = 1
sums = sum(self.hands)
return sums
def random_action(self):
"""
랜덤하게 행동
True = hit, False = stick
:return:
"""
return random.choice([True, False])
def policy(self, state):
"""
Agent의 policy 함수.
e의 확률로 랜덤 행동을 하며, 그 외에는 현재 state에서 큰 q(s,a)값을 갖는 action을 선택함
:param state: Agent에게 주어진 state
:return: agent의 action을 반환 , True = hit and False = stick
"""
# Q_table에서 현재 state-action에 대해 값이 존재하는지 검사함
for action in (True, False):
if (state, action) not in self.Q_table.keys(): # Q_table에 값이 없으면 0으로 초기화
self.Q_table[(state, action)] = [0, 0] # (mean return, visit count)
else:
continue
# q값이 큰 action 선택
if self.Q_table[(state, True)] > self.Q_table[(state, False)]:
return True # Hit
elif self.Q_table[(state, True)] == self.Q_table[(state, False)]: # q값이 같으면 무작위추출
return self.random_action()
else:
return False # Stick
def reset(self):
"""
Agent를 리셋함
:return: None
"""
self.hands = list()
self.usable_ace = list()
def update_qval(self, episode):
"""
에피소드(한 번의 게임)으로부터 Q_table 을 업데이트함
Q 테이블에 없는 state-action 쌍이 나오면 새로 생성
Q 테이블에 state-action 쌍이 존재한다면 Incremental mean 적용하여 업데이트
:param episode: Episode generated from environment
:return: None
"""
total_return = 0
for state, action, reward in episode[::-1]: # 에피소드의 뒤에서부터 (역순)
total_return += reward # return Gt 계산
if (state, action) not in self.Q_table.keys(): # state-action 쌍이 없다면
self.Q_table[(state, action)] = [total_return, 1] # 새롭게 엔트리 생성 (Gt, count)
else: #이미 존재하는 state-action 쌍이면 Incremental mean 적용
prev_val = self.Q_table[(state, action)][0] # 이전의 평균 return
count = self.Q_table[(state, action)][1] + 1 # count 증가
mean = prev_val + (total_return - prev_val) / count # 평균 계산 : Incremental Mean 적용
self.Q_table[(state, action)] = [mean, count] # 업데이트
class MonteCarlo(object):
def generate_episode(self, dealer: Dealer, agent: Agent, deck: Deck):
"""
하나의 에피소드(게임)를 생성함
:param dealer:
:param agent:
:param deck:
:return:
"""
global counting, counting_temp
# 카드 덱, 딜러, Agent를 초기화
##
if len(deck.card_deck) < 15:
deck2 = Deck()
deck.card_deck = deck2.card_deck + deck.card_deck
##
dealer.reset()
agent.reset()
agent.hit(deck)
agent.hit(deck)
dealer.hit(deck)
dealer.hit(deck)
done = False # 에피소드의 종료 여부
episode = list() # 에피소드
showed = dealer.show()
while not done:
# 에피소드가 끝날 때까지 State, Action, Reward를 생성
sums = agent.calculate_sum()
if sums < 12:
agent.hit(deck)
continue
##
# changes counting method
state = (sums, bool(agent.usable_ace), showed, calculate_counting3())
##
######## Exploring Start ~!!!!!!!!! :
if len(episode) == 0: # 첫번째 State 일 때는 무작위 Action 선택
action = agent.random_action()
else: # 그 외에는 Q 테이블에서 큰 값을 갖는 Action 선택
action = agent.policy(state)
done, reward = dealer.observation(action, agent, deck) # 에피소드 종료 여부, Reward 계산
# 생성된 State, Action, Reward를 에피소드에 추가
episode.append([state, action, reward])
##
copy_counting()
##
return episode
def train(self, dealer: Dealer, agent: Agent, deck: Deck, it=10000, verbose=True):
count = 0
win = 0
loss = 0
draw = 0
total_win = 0
total_loss = 0
total_draw = 0
result = str()
for i in range(it):
count += 1
episode = self.generate_episode(dealer, agent, deck)
agent.update_qval(episode)
if episode[-1][-1] == 1:
win += 1
elif episode[-1][-1] == 0:
draw += 1
else:
loss += 1
if count % 1000 == 0 and verbose == True:
total_win += win
total_loss += loss
total_draw += draw
print("========== Training : Episode ", count, " ===========")
print("Recent 1000 games win rate :{:.3f}%".format(win / (win + loss) * 100))
print(" -- 1000 Games WIN :", win, "DRAW :", draw, "LOSS :", loss)
print("Total win rate : {:.3f}%".format(total_win / (total_win + total_loss) * 100))
print(" -- TOTAL Games WIN :", total_win, "DRAW :", total_draw, "LOSS :", total_loss)
win = 0
loss = 0
draw = 0
|
[
"random.shuffle",
"copy.deepcopy",
"random.choice"
] |
[((771, 794), 'copy.deepcopy', 'copy.deepcopy', (['counting'], {}), '(counting)\n', (784, 794), False, 'import copy\n'), ((2251, 2281), 'random.shuffle', 'random.shuffle', (['self.card_deck'], {}), '(self.card_deck)\n', (2265, 2281), False, 'import random\n'), ((3487, 3512), 'random.choice', 'random.choice', (['self.hands'], {}), '(self.hands)\n', (3500, 3512), False, 'import random\n'), ((6932, 6960), 'random.choice', 'random.choice', (['[True, False]'], {}), '([True, False])\n', (6945, 6960), False, 'import random\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "<NAME>"
'''
async web application
'''
import logging;logging.basicConfig(level=logging.INFO)
import asyncio,os,json,time
from datetime import datetime
from aiohttp import web
from jinja2 import Environment,FileSystemLoader
from config import configs
import orm
from coreweb import add_routes,add_static
from handlers import cookie2user,COOKIE_NAME
def init_jinja2(app,**kw):
logging.info('init jinja2...')
options = dict(
autoescape = kw.get('autoescape',True),
block_start_string = kw.get('block_start_string','{%'),
block_end_string = kw.get('block_end_string','%}'),
variable_start_string = kw.get('variable_start_string','{{'),
variable_end_string = kw.get('variable_end_string','}}'),
auto_reload = kw.get('auto_reload',True)
)
path = kw.get('path',None)
if path is None:
path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'templates')
logging.info('set jinja2 template path:%s' % path)
env = Environment(loader=FileSystemLoader(path),**options)
filters = kw.get('filters',None)
if filters is not None:
for name,f in filters.items():
env.filters[name] = f
app['__templating__'] = env
async def logger_factory(app,handler):
async def logger(request):
logging.info('Request:%s %s' %(request.method,request.path))
#await asyncio.sleep(0.3)
return (await handler(request))
return logger
async def auth_factory(app,handler):
async def auth(request):
logging.info('check user:%s %s' % (request.method,request.path))
request.__user__ = None
cookie_str = request.cookies.get(COOKIE_NAME)
if cookie_str:
user = await cookie2user(cookie_str)
if user:
logging.info('set current user: %s' % user.email)
request.__user__ = user
if request.path.startswith('/manage/') and (request.__user__ is None or not request.__user__.admin):
return web.HTTPFound('/signin')
return (await handler(request))
return auth
async def data_factory(app,handler):
async def parse_data(request):
if request.method == 'POST':
if request.content_type.startswith('application/json'):
request.__data__ = await request.json()
logging.info('request json:%s' % str(request.__data__))
elif request.content_type.startswith('application/x-www-form-urlencode'):
request.__data__ = await request.post()
logging.info('request form: %s' % str(request.__data__))
return (await handler(request))
return parse_data
async def response_factory(app,handler):
async def response(request):
logging.info('Response handler...')
r = await handler(request)
if isinstance(r,web.StreamResponse):
return r
if isinstance(r,bytes):
resp = web.Response(body=r)
resp.content_type = 'application/octet-stream'
return resp
if isinstance(r,str):
if r.startswith('redirect:'):
return web.HTTPFound(r[9:])
resp = web.Response(body=r.encode('utf-8'))
resp.content_type = 'text/html;charset=utf-8'
return resp
if isinstance(r,dict):
template = r.get('__template__')
if template is None:
resp = web.Response(body=json.dumps(r,ensure_ascii=False,default=lambda o:o.__dict__).encode('utf-8'))
resp.content_type = 'text/html;charset=utf-8'
return resp
else:
resp = web.Response(body=app['__templating__'].get_template(template).render(**r).encode('utf-8'))
resp.content_type = 'text/html;charset=utf-8'
return resp
if isinstance(r,int) and r >= 100 and r < 600:
return web.Response(r)
if isinstance(r,tuple) and len(r) == 2:
t,m = r
if isinstance(t,int) and t >= 100 and t < 600:
return web.Response(t,str(m))
# default:
resp = web.Response(body=str(r).encode('utf-8'))
resp.content_type = 'text/plain;charset=utf-8'
return resp
return response
def datetime_filter(t):
delta = int(time.time() -t)
if delta < 60:
return u'1分钟前'
if delta < 3600:
return u'%s分钟前' % (delta//60)
if delta < 86400:
return u'%s小时前' % (delta//3600)
if delta < 604800:
return u'%天前' % (delta//86400)
dt = datetime.fromtimestamp(t)
return u'%s年%s月%s日' % (dt.year,dt.month,dt.day)
async def init(loop):
await orm.create_pool(loop=loop,host='127.0.0.1',port=3306,user='root',password='<PASSWORD>',db='py_blog')
app = web.Application(loop = loop,middlewares=[
logger_factory,auth_factory,response_factory])
init_jinja2(app,filters=dict(datetime=datetime_filter))
add_routes(app,'handlers')
add_static(app)
srv = await loop.create_server(app.make_handler(),'127.0.0.1',9000)
logging.info('server started at http://127.0.0.1:9000...')
return srv
loop = asyncio.get_event_loop();
loop.run_until_complete(init(loop))
loop.run_forever()
|
[
"os.path.abspath",
"aiohttp.web.Response",
"asyncio.get_event_loop",
"handlers.cookie2user",
"logging.basicConfig",
"coreweb.add_static",
"coreweb.add_routes",
"aiohttp.web.HTTPFound",
"time.time",
"json.dumps",
"logging.info",
"jinja2.FileSystemLoader",
"datetime.datetime.fromtimestamp",
"aiohttp.web.Application",
"orm.create_pool"
] |
[((117, 156), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (136, 156), False, 'import logging\n'), ((4654, 4678), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (4676, 4678), False, 'import asyncio, os, json, time\n'), ((444, 474), 'logging.info', 'logging.info', (['"""init jinja2..."""'], {}), "('init jinja2...')\n", (456, 474), False, 'import logging\n'), ((943, 993), 'logging.info', 'logging.info', (["('set jinja2 template path:%s' % path)"], {}), "('set jinja2 template path:%s' % path)\n", (955, 993), False, 'import logging\n'), ((4095, 4120), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['t'], {}), '(t)\n', (4117, 4120), False, 'from datetime import datetime\n'), ((4310, 4402), 'aiohttp.web.Application', 'web.Application', ([], {'loop': 'loop', 'middlewares': '[logger_factory, auth_factory, response_factory]'}), '(loop=loop, middlewares=[logger_factory, auth_factory,\n response_factory])\n', (4325, 4402), False, 'from aiohttp import web\n'), ((4459, 4486), 'coreweb.add_routes', 'add_routes', (['app', '"""handlers"""'], {}), "(app, 'handlers')\n", (4469, 4486), False, 'from coreweb import add_routes, add_static\n'), ((4487, 4502), 'coreweb.add_static', 'add_static', (['app'], {}), '(app)\n', (4497, 4502), False, 'from coreweb import add_routes, add_static\n'), ((4575, 4633), 'logging.info', 'logging.info', (['"""server started at http://127.0.0.1:9000..."""'], {}), "('server started at http://127.0.0.1:9000...')\n", (4587, 4633), False, 'import logging\n'), ((1270, 1332), 'logging.info', 'logging.info', (["('Request:%s %s' % (request.method, request.path))"], {}), "('Request:%s %s' % (request.method, request.path))\n", (1282, 1332), False, 'import logging\n'), ((1483, 1548), 'logging.info', 'logging.info', (["('check user:%s %s' % (request.method, request.path))"], {}), "('check user:%s %s' % (request.method, request.path))\n", (1495, 1548), False, 'import logging\n'), ((2615, 2650), 'logging.info', 'logging.info', (['"""Response handler..."""'], {}), "('Response handler...')\n", (2627, 2650), False, 'import logging\n'), ((4202, 4311), 'orm.create_pool', 'orm.create_pool', ([], {'loop': 'loop', 'host': '"""127.0.0.1"""', 'port': '(3306)', 'user': '"""root"""', 'password': '"""<PASSWORD>"""', 'db': '"""py_blog"""'}), "(loop=loop, host='127.0.0.1', port=3306, user='root',\n password='<PASSWORD>', db='py_blog')\n", (4217, 4311), False, 'import orm\n'), ((1020, 1042), 'jinja2.FileSystemLoader', 'FileSystemLoader', (['path'], {}), '(path)\n', (1036, 1042), False, 'from jinja2 import Environment, FileSystemLoader\n'), ((1961, 1985), 'aiohttp.web.HTTPFound', 'web.HTTPFound', (['"""/signin"""'], {}), "('/signin')\n", (1974, 1985), False, 'from aiohttp import web\n'), ((2767, 2787), 'aiohttp.web.Response', 'web.Response', ([], {'body': 'r'}), '(body=r)\n', (2779, 2787), False, 'from aiohttp import web\n'), ((3548, 3563), 'aiohttp.web.Response', 'web.Response', (['r'], {}), '(r)\n', (3560, 3563), False, 'from aiohttp import web\n'), ((3884, 3895), 'time.time', 'time.time', ([], {}), '()\n', (3893, 3895), False, 'import asyncio, os, json, time\n'), ((902, 927), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (917, 927), False, 'import asyncio, os, json, time\n'), ((1682, 1705), 'handlers.cookie2user', 'cookie2user', (['cookie_str'], {}), '(cookie_str)\n', (1693, 1705), False, 'from handlers import cookie2user, COOKIE_NAME\n'), ((1743, 1792), 'logging.info', 'logging.info', (["('set current user: %s' % user.email)"], {}), "('set current user: %s' % user.email)\n", (1755, 1792), False, 'import logging\n'), ((2921, 2941), 'aiohttp.web.HTTPFound', 'web.HTTPFound', (['r[9:]'], {}), '(r[9:])\n', (2934, 2941), False, 'from aiohttp import web\n'), ((3167, 3230), 'json.dumps', 'json.dumps', (['r'], {'ensure_ascii': '(False)', 'default': '(lambda o: o.__dict__)'}), '(r, ensure_ascii=False, default=lambda o: o.__dict__)\n', (3177, 3230), False, 'import asyncio, os, json, time\n')]
|
#!/usr/bin/python
#!/usr/bin/python3
import sys
import subprocess
import struct
with open(sys.argv[1], "rb") as f:
while True:
word = f.read(8)
if len(word) == 8:
print("%016x" % struct.unpack('Q', word));
elif len(word) == 4:
print("00000000%08x" % struct.unpack('I', word));
elif len(word) == 0:
exit(0);
else:
raise Exception("Bad length")
|
[
"struct.unpack"
] |
[((229, 253), 'struct.unpack', 'struct.unpack', (['"""Q"""', 'word'], {}), "('Q', word)\n", (242, 253), False, 'import struct\n'), ((328, 352), 'struct.unpack', 'struct.unpack', (['"""I"""', 'word'], {}), "('I', word)\n", (341, 352), False, 'import struct\n')]
|
from config import db, ma
class Folder(db.Model):
__tablename__ = "folder"
__table_args__ = {"schema": "eagle_db"}
id = db.Column(db.Integer, primary_key=True)
parentFolderId = db.Column(db.String(45))
folderId = db.Column(db.String(45))
folderName = db.Column(db.String(100))
status = db.Column(db.String(50))
taskId = db.Column(db.String(50))
def __repr__(self):
return "<Folder(id={self.id!r}, name={self.folderName!r})>".format(self=self)
class FolderSchema(ma.SQLAlchemyAutoSchema):
class Meta:
model = Folder
include_fk = True
load_instance = True
|
[
"config.db.String",
"config.db.Column"
] |
[((140, 179), 'config.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (149, 179), False, 'from config import db, ma\n'), ((212, 225), 'config.db.String', 'db.String', (['(45)'], {}), '(45)\n', (221, 225), False, 'from config import db, ma\n'), ((253, 266), 'config.db.String', 'db.String', (['(45)'], {}), '(45)\n', (262, 266), False, 'from config import db, ma\n'), ((296, 310), 'config.db.String', 'db.String', (['(100)'], {}), '(100)\n', (305, 310), False, 'from config import db, ma\n'), ((336, 349), 'config.db.String', 'db.String', (['(50)'], {}), '(50)\n', (345, 349), False, 'from config import db, ma\n'), ((375, 388), 'config.db.String', 'db.String', (['(50)'], {}), '(50)\n', (384, 388), False, 'from config import db, ma\n')]
|
import sqlite3
# import win32api
banco = sqlite3.connect('pixClientes.db')
cursor = banco.cursor()
cursor.execute('''
CREATE TABLE IF NOT EXISTS registros (
data_pagamento_pix DATE,
valor_pix NUMERIC (10,2)
);''')
#criando a função que insere um pix
def inserirPix():
cursor.execute(f''' INSERT INTO registros (data_pagamento_pix, valor_pix)
VALUES ('{data_pagamento_input}', '{valor_pix_input}')
''')
banco.commit()
##############################################################################################
#seleciona o registro de acordo com a data
def selecionaRegistro():
cursor.execute(f'''
SELECT valor_pix FROM registros WHERE data_pagamento_pix = '{data_input}';
''')
with open ('rel.txt', 'w') as arquivo:
relatorio = arquivo.write(f' Novo relatorio ')
print(relatorio)
for data_pagamento_pix in cursor.fetchall():
print ("Valor de pagamento pix: R$",data_pagamento_pix)
with open ('rel.txt', 'a') as arquivo:
relatorio = arquivo.write(f'\n Data selecionada: {data_input}, Valor do pagamento PIX: R${data_pagamento_pix}')
print(relatorio)
#menu simples
print (" SELECIONE A OPÇÃO DESEJADA 1 PARA PAGAMENTO E 2 PARA RELATORIO")
op = int (input("O que deseja fazer: "))
if op == 1:
data_pagamento_input = input ("Qual a data do recebimento? ")
valor_pix_input = input ("Qual o valor do pagamento? ")
inserirPix()
# win32api.MessageBox(0, 'Cadastrado com Sucesso', 'Sucesso')
if op == 2:
print ('#####################################################################')
print ('################### DIGITE COMO NO EXMPLO A BAIXO ###################')
print ('################### EX: 30/09/2021 ##################################')
print ('#####################################################################')
data_input = input ("qual data deseja selecionar: ")
selecionaRegistro()
# win32api.MessageBox(0, 'O Relatorio Da Data Seleciona Foi Gerado Procure Pelo Arquivo "rel"', 'Relatorio Gerado Com Sucesso')
|
[
"sqlite3.connect"
] |
[((50, 83), 'sqlite3.connect', 'sqlite3.connect', (['"""pixClientes.db"""'], {}), "('pixClientes.db')\n", (65, 83), False, 'import sqlite3\n')]
|
'''
File: attention_cell_sequence.py
Project: component
File Created: Friday, 28th December 2018 6:05:05 pm
Author: xiaofeng (<EMAIL>)
-----
Last Modified: Friday, 28th December 2018 6:50:40 pm
Modified By: xiaofeng (<EMAIL>>)
-----
Copyright 2018.06 - 2018 onion Math, onion Math
'''
import collections
import numpy as np
import tensorflow as tf
from tensorflow.contrib.rnn import RNNCell
# AttentionState = {"att_weight": [], "decoder_out": [], "logits": [], "decoder_state": []}
AttentionState = collections.namedtuple("AttentionState", ("cell_state", "output"))
Attention_weight = list()
class AttCell(RNNCell):
""" Bahdanau Attention compile for the errorchecker model"""
def __init__(self, name, attention_in, decoder_cell, n_hid, dim_att, dim_o, dropuout,
vacab_size, tiles=1, dtype=tf.float32):
self._scope_name = name
# In case of Bi-RNN, concatenate the forward and the backward RNN outputs.
self._encoder_sequence = attention_in
if isinstance(attention_in, tuple):
self._encoder_sequence = tf.concat(attention_in, 2)
self._cell = decoder_cell # decoder rnn cell
self._n_hid = n_hid # decoder num_unit D_DIM
self._dim_att = dim_att # Attention size,计算的中间变量,一般可以选择输入的_encoder_sequence相同的维度
self._dim_o = dim_o # the dim of output, same with the param: n_hid
self._dropout = dropuout # droupout rate
self._vacab_size = vacab_size # the vocabulary size of the decoder, same with the machine translation model
# in the decoder stage, if use the beamsearch trick, the tiles is needed, default value is 1 for the greedy trick
self._tiles = tiles
self._dtype = dtype # default is tf.float32
self._length = tf.shape(self._encoder_sequence)[1] # length of the input sequence
self._en_dim = self._encoder_sequence.shape[2].value # dims of the encoder
self._state_size = AttentionState(self._n_hid, self._dim_o)
self._att_seq = tf.layers.dense(
inputs=self._encoder_sequence, units=self._dim_att, use_bias=False, name="att_img") # B,L,dim_att
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
# beacause in the function the return is logits,so the size is vocab_size
return self._vacab_size
@property
def output_dtype(self):
return self._dtype
def _CalStateBasedSeq(self, name, dim):
"""Returns initial state of dimension specified by dim"""
scope = tf.get_variable_scope()
with tf.variable_scope(scope):
# (B*T,L,E_DIM) -->(B*T,E_DIM)
img_mean = tf.reduce_mean(self._encoder_sequence, axis=1)
W = tf.get_variable("W_{}_0".format(name), shape=[self._en_dim, dim])
b = tf.get_variable("b_{}_0".format(name), shape=[1, dim])
h = tf.tanh(tf.matmul(img_mean, W) + b)
return h
def initial_state(self):
""" setting initial state and output """
initial_states = self._CalStateBasedSeq('init_state', self._n_hid) # (B,HID)
initial_out = self._CalStateBasedSeq('init_out', self._dim_o) # (B,DIM_O)
return AttentionState(initial_states, initial_out)
def _cal_att(self, hid_cur):
with tf.variable_scope('att_cal'):
if self._tiles > 1:
_encoder_sequence = tf.expand_dims(self._encoder_sequence, axis=1) # (B,1,L,E_DIM)
_encoder_sequence = tf.tile(_encoder_sequence, multiples=[
1, self._tiles, 1, 1]) # (B,T,L,E_DIM)
_encoder_sequence = tf.reshape(
_encoder_sequence, shape=[-1, self._length, self._en_dim]) # (B*T,L,E_DIM)
_att_seq = tf.expand_dims(self._att_seq, axis=1) # B,1,L,dim_att
_att_seq = tf.tile(_att_seq, multiples=[1, self._tiles, 1, 1])
_att_seq = tf.reshape(
_att_seq, shape=[-1, self._length, self._dim_att]) # (B*T,L,dim_att)
else:
_att_seq = self._att_seq
_encoder_sequence = self._encoder_sequence
# computes attention over the hidden vector
# hid_cur shape is [ B,num_units]
# att_h [B,dim_att]
att_h = tf.layers.dense(inputs=hid_cur, units=self._dim_att, use_bias=False)
# sums the two contributions
# att_h --> [B,1,dim_att]
att_h = tf.expand_dims(att_h, axis=1)
# Computes the score for the Bahdanau style
# _att_seq contains the full encoder output, shape is [batch,L, _dim_att]
# att_h contains the current hiddent of the deocder, shape is [B,1,dim_att]
att = tf.tanh(_att_seq + att_h) # shape [B,L,dim_att]
# computes scalar product with beta vector
# works faster with a matmul than with a * and a tf.reduce_sum
# For each of the timestamps its vector of size A from `att` is reduced with `att_beta` vector
att_beta = tf.get_variable("att_beta", shape=[self._dim_att, 1], dtype=tf.float32)
# att_flat shape is [B*L,dim_att]
att_flat = tf.reshape(att, shape=[-1, self._dim_att])
# computes score
e = tf.matmul(att_flat, att_beta) # shape is [B*L,1]
e = tf.reshape(e, shape=[-1, self._length]) # shape is [B,L]
# computes attention weights
attention = tf.nn.softmax(e) # shape is (B,L)
_att = tf.expand_dims(attention, axis=-1) # (B,L,1)
# computes the contex vector with the attention and encoder_sequence
contex = tf.reduce_sum(_att * _encoder_sequence, axis=1) # [B,L,1]*[B,L,E]=(B,E)
return attention, contex
def step(self, embeding, attention_cell_state):
"""
Args:
embeding: shape is (B,EMBEDING_DIM)
attention_cell_state: state from previous step comes from AttentionState
"""
_initial_state, output_tm1 = attention_cell_state
scope = tf.get_variable_scope()
with tf.variable_scope(scope, initializer=tf.orthogonal_initializer()):
x = tf.concat([embeding, output_tm1], axis=-1)
# compute current hidden and cell states
new_hid, new_cell_state = self._cell.__call__(inputs=x, state=_initial_state)
_attention, contex = self._cal_att(new_hid)
def _debug_att(val):
global Attention_weight
Attention_weight = []
Attention_weight += [val]
return False
print_func = tf.py_func(_debug_att, [_attention], [tf.bool])
with tf.control_dependencies(print_func):
_attention = tf.identity(_attention, name='Attention_weight')
o_W_c = tf.get_variable("o_W_c", dtype=tf.float32,
shape=(self._en_dim, self._n_hid))
o_W_h = tf.get_variable("o_W_h", dtype=tf.float32,
shape=(self._n_hid, self._dim_o))
new_o = tf.tanh(tf.matmul(new_hid, o_W_h) + tf.matmul(contex, o_W_c))
new_o = tf.nn.dropout(new_o, self._dropout)
y_W_o = tf.get_variable("y_W_o", dtype=tf.float32,
shape=(self._dim_o, self._vacab_size))
# logits for current step
# shape is [B,vocabsize] for each size
logits = tf.matmul(new_o, y_W_o)
new_state = AttentionState(new_cell_state, new_o)
return logits, new_state
def __call__(self, _inputs, _state):
"""
The dynamic rnn function will use this call function to calculate step by step
Args:
inputs: the embedding of the previous word for training only,decoder sequence
state: (AttentionState) (h,c, o) where h is the hidden state and
o is the vector used to make the prediction of
the previous word
"""
logits, state = self.step(_inputs, _state)
return (logits, state)
|
[
"tensorflow.reduce_sum",
"tensorflow.identity",
"tensorflow.get_variable_scope",
"tensorflow.reshape",
"tensorflow.matmul",
"tensorflow.get_variable",
"tensorflow.nn.softmax",
"tensorflow.concat",
"tensorflow.variable_scope",
"tensorflow.orthogonal_initializer",
"tensorflow.control_dependencies",
"tensorflow.reduce_mean",
"tensorflow.tile",
"tensorflow.expand_dims",
"tensorflow.py_func",
"tensorflow.layers.dense",
"tensorflow.shape",
"collections.namedtuple",
"tensorflow.tanh",
"tensorflow.nn.dropout"
] |
[((503, 569), 'collections.namedtuple', 'collections.namedtuple', (['"""AttentionState"""', "('cell_state', 'output')"], {}), "('AttentionState', ('cell_state', 'output'))\n", (525, 569), False, 'import collections\n'), ((2033, 2136), 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'self._encoder_sequence', 'units': 'self._dim_att', 'use_bias': '(False)', 'name': '"""att_img"""'}), "(inputs=self._encoder_sequence, units=self._dim_att,\n use_bias=False, name='att_img')\n", (2048, 2136), True, 'import tensorflow as tf\n'), ((2587, 2610), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (2608, 2610), True, 'import tensorflow as tf\n'), ((6135, 6158), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (6156, 6158), True, 'import tensorflow as tf\n'), ((1087, 1113), 'tensorflow.concat', 'tf.concat', (['attention_in', '(2)'], {}), '(attention_in, 2)\n', (1096, 1113), True, 'import tensorflow as tf\n'), ((1787, 1819), 'tensorflow.shape', 'tf.shape', (['self._encoder_sequence'], {}), '(self._encoder_sequence)\n', (1795, 1819), True, 'import tensorflow as tf\n'), ((2624, 2648), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), '(scope)\n', (2641, 2648), True, 'import tensorflow as tf\n'), ((2716, 2762), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self._encoder_sequence'], {'axis': '(1)'}), '(self._encoder_sequence, axis=1)\n', (2730, 2762), True, 'import tensorflow as tf\n'), ((3345, 3373), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""att_cal"""'], {}), "('att_cal')\n", (3362, 3373), True, 'import tensorflow as tf\n'), ((4350, 4418), 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'hid_cur', 'units': 'self._dim_att', 'use_bias': '(False)'}), '(inputs=hid_cur, units=self._dim_att, use_bias=False)\n', (4365, 4418), True, 'import tensorflow as tf\n'), ((4518, 4547), 'tensorflow.expand_dims', 'tf.expand_dims', (['att_h'], {'axis': '(1)'}), '(att_h, axis=1)\n', (4532, 4547), True, 'import tensorflow as tf\n'), ((4796, 4821), 'tensorflow.tanh', 'tf.tanh', (['(_att_seq + att_h)'], {}), '(_att_seq + att_h)\n', (4803, 4821), True, 'import tensorflow as tf\n'), ((5105, 5176), 'tensorflow.get_variable', 'tf.get_variable', (['"""att_beta"""'], {'shape': '[self._dim_att, 1]', 'dtype': 'tf.float32'}), "('att_beta', shape=[self._dim_att, 1], dtype=tf.float32)\n", (5120, 5176), True, 'import tensorflow as tf\n'), ((5246, 5288), 'tensorflow.reshape', 'tf.reshape', (['att'], {'shape': '[-1, self._dim_att]'}), '(att, shape=[-1, self._dim_att])\n', (5256, 5288), True, 'import tensorflow as tf\n'), ((5334, 5363), 'tensorflow.matmul', 'tf.matmul', (['att_flat', 'att_beta'], {}), '(att_flat, att_beta)\n', (5343, 5363), True, 'import tensorflow as tf\n'), ((5400, 5439), 'tensorflow.reshape', 'tf.reshape', (['e'], {'shape': '[-1, self._length]'}), '(e, shape=[-1, self._length])\n', (5410, 5439), True, 'import tensorflow as tf\n'), ((5523, 5539), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['e'], {}), '(e)\n', (5536, 5539), True, 'import tensorflow as tf\n'), ((5577, 5611), 'tensorflow.expand_dims', 'tf.expand_dims', (['attention'], {'axis': '(-1)'}), '(attention, axis=-1)\n', (5591, 5611), True, 'import tensorflow as tf\n'), ((5725, 5772), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(_att * _encoder_sequence)'], {'axis': '(1)'}), '(_att * _encoder_sequence, axis=1)\n', (5738, 5772), True, 'import tensorflow as tf\n'), ((6255, 6297), 'tensorflow.concat', 'tf.concat', (['[embeding, output_tm1]'], {'axis': '(-1)'}), '([embeding, output_tm1], axis=-1)\n', (6264, 6297), True, 'import tensorflow as tf\n'), ((6706, 6753), 'tensorflow.py_func', 'tf.py_func', (['_debug_att', '[_attention]', '[tf.bool]'], {}), '(_debug_att, [_attention], [tf.bool])\n', (6716, 6753), True, 'import tensorflow as tf\n'), ((6906, 6983), 'tensorflow.get_variable', 'tf.get_variable', (['"""o_W_c"""'], {'dtype': 'tf.float32', 'shape': '(self._en_dim, self._n_hid)'}), "('o_W_c', dtype=tf.float32, shape=(self._en_dim, self._n_hid))\n", (6921, 6983), True, 'import tensorflow as tf\n'), ((7040, 7116), 'tensorflow.get_variable', 'tf.get_variable', (['"""o_W_h"""'], {'dtype': 'tf.float32', 'shape': '(self._n_hid, self._dim_o)'}), "('o_W_h', dtype=tf.float32, shape=(self._n_hid, self._dim_o))\n", (7055, 7116), True, 'import tensorflow as tf\n'), ((7255, 7290), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['new_o', 'self._dropout'], {}), '(new_o, self._dropout)\n', (7268, 7290), True, 'import tensorflow as tf\n'), ((7311, 7397), 'tensorflow.get_variable', 'tf.get_variable', (['"""y_W_o"""'], {'dtype': 'tf.float32', 'shape': '(self._dim_o, self._vacab_size)'}), "('y_W_o', dtype=tf.float32, shape=(self._dim_o, self.\n _vacab_size))\n", (7326, 7397), True, 'import tensorflow as tf\n'), ((7539, 7562), 'tensorflow.matmul', 'tf.matmul', (['new_o', 'y_W_o'], {}), '(new_o, y_W_o)\n', (7548, 7562), True, 'import tensorflow as tf\n'), ((3443, 3489), 'tensorflow.expand_dims', 'tf.expand_dims', (['self._encoder_sequence'], {'axis': '(1)'}), '(self._encoder_sequence, axis=1)\n', (3457, 3489), True, 'import tensorflow as tf\n'), ((3543, 3603), 'tensorflow.tile', 'tf.tile', (['_encoder_sequence'], {'multiples': '[1, self._tiles, 1, 1]'}), '(_encoder_sequence, multiples=[1, self._tiles, 1, 1])\n', (3550, 3603), True, 'import tensorflow as tf\n'), ((3678, 3747), 'tensorflow.reshape', 'tf.reshape', (['_encoder_sequence'], {'shape': '[-1, self._length, self._en_dim]'}), '(_encoder_sequence, shape=[-1, self._length, self._en_dim])\n', (3688, 3747), True, 'import tensorflow as tf\n'), ((3814, 3851), 'tensorflow.expand_dims', 'tf.expand_dims', (['self._att_seq'], {'axis': '(1)'}), '(self._att_seq, axis=1)\n', (3828, 3851), True, 'import tensorflow as tf\n'), ((3896, 3947), 'tensorflow.tile', 'tf.tile', (['_att_seq'], {'multiples': '[1, self._tiles, 1, 1]'}), '(_att_seq, multiples=[1, self._tiles, 1, 1])\n', (3903, 3947), True, 'import tensorflow as tf\n'), ((3975, 4036), 'tensorflow.reshape', 'tf.reshape', (['_att_seq'], {'shape': '[-1, self._length, self._dim_att]'}), '(_att_seq, shape=[-1, self._length, self._dim_att])\n', (3985, 4036), True, 'import tensorflow as tf\n'), ((6771, 6806), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['print_func'], {}), '(print_func)\n', (6794, 6806), True, 'import tensorflow as tf\n'), ((6837, 6885), 'tensorflow.identity', 'tf.identity', (['_attention'], {'name': '"""Attention_weight"""'}), "(_attention, name='Attention_weight')\n", (6848, 6885), True, 'import tensorflow as tf\n'), ((2940, 2962), 'tensorflow.matmul', 'tf.matmul', (['img_mean', 'W'], {}), '(img_mean, W)\n', (2949, 2962), True, 'import tensorflow as tf\n'), ((6209, 6236), 'tensorflow.orthogonal_initializer', 'tf.orthogonal_initializer', ([], {}), '()\n', (6234, 6236), True, 'import tensorflow as tf\n'), ((7181, 7206), 'tensorflow.matmul', 'tf.matmul', (['new_hid', 'o_W_h'], {}), '(new_hid, o_W_h)\n', (7190, 7206), True, 'import tensorflow as tf\n'), ((7209, 7233), 'tensorflow.matmul', 'tf.matmul', (['contex', 'o_W_c'], {}), '(contex, o_W_c)\n', (7218, 7233), True, 'import tensorflow as tf\n')]
|
import matplotlib.pyplot as plt
def add_cuts(ax, cuts, N):
if cuts[-1] != N:
cuts.append(N)
print(len(cuts))
c_last = 0
for c in cuts:
color = 'k'
ax.plot([c, c], [c, c_last], color)
ax.plot([c, c_last], [c, c], color)
ax.plot([c, c_last], [c_last, c_last], color)
ax.plot([c_last, c_last], [c, c_last], color)
c_last = c
def mplot(a, title=None, boxes=None):
cmap = 'spectral'
fig, ax = plt.subplots(figsize=(4, 4))
ax.pcolor(a, vmin=0.1, vmax=0.8, cmap=cmap)
N = len(a)
if boxes is not None:
add_cuts(ax, cuts=boxes.copy(), N=N)
ax.set_xlim([0, N])
ax.set_ylim([0, N])
if title is not None:
ax.set_title(title, size=20)
plt.show()
def mplot2(a, title=None, boxes=None, cmap='spectral'):
fig, ax = plt.subplots(figsize=(4, 4))
ax.pcolor(a, cmap=cmap)
N = len(a)
if boxes is not None:
add_cuts(ax, cuts=boxes.copy(), N=N)
ax.set_xlim([0, N])
ax.set_ylim([0, N])
if title is not None:
ax.set_title(title, size=20)
plt.show()
def plot_modules(modules, G):
""" Plot the modules of a graph"""
import networkx as nx
values = [modules[n] for n in G.nodes()]
nx.draw(G, node_color=values)
plt.show()
def get_graph(filename):
""" return a graph from an edgelist """
G = nx.Graph()
f = open(filename)
data = f.readlines()
edges = []
for line in data:
entry = map(int, line.rstrip().split())
if entry:
edges.append(tuple(entry))
G.add_edges_from(edges)
f.close()
return G
|
[
"networkx.Graph",
"matplotlib.pyplot.subplots",
"networkx.draw",
"matplotlib.pyplot.show"
] |
[((472, 500), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(4, 4)'}), '(figsize=(4, 4))\n', (484, 500), True, 'import matplotlib.pyplot as plt\n'), ((750, 760), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (758, 760), True, 'import matplotlib.pyplot as plt\n'), ((833, 861), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(4, 4)'}), '(figsize=(4, 4))\n', (845, 861), True, 'import matplotlib.pyplot as plt\n'), ((1091, 1101), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1099, 1101), True, 'import matplotlib.pyplot as plt\n'), ((1248, 1277), 'networkx.draw', 'nx.draw', (['G'], {'node_color': 'values'}), '(G, node_color=values)\n', (1255, 1277), True, 'import networkx as nx\n'), ((1282, 1292), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1290, 1292), True, 'import matplotlib.pyplot as plt\n'), ((1372, 1382), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (1380, 1382), True, 'import networkx as nx\n')]
|
#! -*- encoding:utf-8 -*-
"""
@File : run_dapt_task.py
@Author : <NAME>
@Contact : <EMAIL>
@Dscpt :
"""
import argparse
import logging
import os
import time
from pprint import pprint
from transformers import AlbertTokenizer, BertTokenizer
from dapt_task.data import *
from dapt_task.controller import DomainAdaptivePreTrain
from model.DAPTModels import BertForPreTraining, BertForMaskedLM
from utils.common import mkdir_if_notexist, result_dump, set_seed
logger = logging.getLogger("run_task")
console = logging.StreamHandler();console.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(name)s - %(message)s', datefmt = r"%y/%m/%d %H:%M")
console.setFormatter(formatter)
logger.addHandler(console)
def select_tokenizer(args):
if "albert" in args.PTM_model_vocab_dir:
return AlbertTokenizer.from_pretrained(args.PTM_model_vocab_dir)
elif "bert" in args.PTM_model_vocab_dir:
return BertTokenizer.from_pretrained(args.PTM_model_vocab_dir)
else:
logger.error("No Tokenizer Matched")
def select_task(args):
model_dict = {
"BertPT": (BertForPreTraining, []),
"BertMLM": (BertForMaskedLM, []),
}
processor_dict = {
"Webster": Webster_Processor,
"OMCS": OMCS_Processor,
}
processor_name, model_name = args.task_name.split('_', maxsplit=1)
ModelClass, args_list = model_dict[model_name]
ProcessorClass = processor_dict[processor_name]
model_kwargs = {arg: args.__dict__[arg] for arg in args_list}
return ModelClass, ProcessorClass, model_kwargs
def set_result(args):
'''
set result dir name accroding to the task
'''
if args.mission in ('train', 'conti-train'):
task_str = time.strftime(r'%H%M-%b%d') + f'_seed{args.seed}'
if 'webster' in args.task_name:
task_str += f'_websterv{args.DAPT_version}'
args.result_dir = os.path.join(
args.result_dir,
os.path.basename(args.PTM_model_vocab_dir),
args.task_name,
task_str, ''
)
args.task_str = task_str
else:
args.task_str = 'predict or dev'
args.result_dir = args.saved_model_dir
mkdir_if_notexist(args.result_dir)
# set logging
log_file_dir = os.path.join(args.result_dir, 'task_log.txt')
logging.basicConfig(
filename = log_file_dir,
filemode = 'a',
level = logging.INFO,
format = '%(asctime)s %(name)s - %(message)s',
datefmt = r"%y/%m/%d %H:%M"
)
result_dump(args, args.__dict__, 'task_args.json')
pprint(args.__dict__)
def main(args):
start = time.time()
logger.info(f"start in {start}")
set_result(args)
set_seed(args)
# load data and preprocess
logger.info(f"select tokenizer and model for task {args.task_name}")
tokenizer = select_tokenizer(args)
model, Processor, model_kwargs = select_task(args)
# initalize controller by model
controller = DomainAdaptivePreTrain(args, model_kwargs)
controller.load_model(model)
controller.load_data(Processor, tokenizer)
# run task accroading to mission
if args.mission in ('train', 'conti-train'):
controller.train()
elif args.mission == 'eval':
controller.run_dev()
elif args.mission == 'predict':
controller.predict_test()
end = time.time()
logger.info(f"task total run time {end-start:.2f} second")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# other param
parser.add_argument('--task_name', type=str, help="model & processor will be selected according to task")
parser.add_argument('--mission', type=str, choices=['train', 'eval', 'predict', 'conti-train'])
parser.add_argument('--fp16', type=int, default=0)
parser.add_argument('--gpu_ids', type=str, default='-1')
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--save_mode', type=str, choices=['epoch', 'step', 'end'], default='epoch')
parser.add_argument('--print_step', type=int, default=250)
parser.add_argument('--evltest_batch_size', type=int, default=8)
parser.add_argument('--eval_after_tacc', type=float, default=0)
parser.add_argument('--clip_batch_off', action='store_true', default=False, help="clip batch to shortest case")
# task-specific hyper param
parser.add_argument('--Webster_version', type=str, default=None)
parser.add_argument('--nsp', action='store_true', default=False)
parser.add_argument('--mask_pct', type=float, default=0.15)
parser.add_argument('--max_seq_len', type=int, default=40)
parser.add_argument('--mask_method', type=str, choices=['random'])
# train hyper param
parser.add_argument('--train_batch_size', type=int, default=4)
parser.add_argument('--gradient_accumulation_steps', type=int, default=1)
parser.add_argument('--num_train_epochs', type=int, default=5)
parser.add_argument('--learning_rate', type=float, default=2e-5)
parser.add_argument('--warmup_proportion', type=float, default=0.1)
parser.add_argument('--weight_decay', type=float, default=0.1)
# data param
parser.add_argument('--dataset_dir', type=str, default='../DATA')
parser.add_argument('--result_dir', type=str, default=None)
parser.add_argument('--saved_model_dir', type=str, default=None)
parser.add_argument('--PTM_model_vocab_dir', type=str, default=None)
args_str = r"""
--task_name Webster_Bert
--mission train
--fp16 0
--gpu_ids 0
--seed 42
--save_mode epoch
--print_step 50
--evltest_batch_size 12
--eval_after_tacc 0.8
--DAPT_version 1.0
--mask_pct 0.20
--max_seq_len 40
--mask_method random
--train_batch_size 2
--gradient_accumulation_steps 8
--num_train_epochs 2
--learning_rate 2e-5
--warmup_proportion 0.1
--weight_decay 0.1
--dataset_dir ..\DATA
--result_dir ..\DATA\result
--saved_model_dir D:\CODE\Python\Transformers-Models\bert-base-cased
--PTM_model_vocab_dir D:\CODE\Python\Transformers-Models\bert-base-cased
"""
args = parser.parse_args()
# args = parser.parse_args(args_str.split())
main(args)
|
[
"argparse.ArgumentParser",
"logging.basicConfig",
"dapt_task.controller.DomainAdaptivePreTrain",
"transformers.AlbertTokenizer.from_pretrained",
"os.path.basename",
"logging.StreamHandler",
"time.strftime",
"time.time",
"logging.Formatter",
"utils.common.result_dump",
"utils.common.mkdir_if_notexist",
"transformers.BertTokenizer.from_pretrained",
"pprint.pprint",
"utils.common.set_seed",
"os.path.join",
"logging.getLogger"
] |
[((486, 515), 'logging.getLogger', 'logging.getLogger', (['"""run_task"""'], {}), "('run_task')\n", (503, 515), False, 'import logging\n'), ((526, 549), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (547, 549), False, 'import logging\n'), ((593, 679), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s %(name)s - %(message)s"""'], {'datefmt': '"""%y/%m/%d %H:%M"""'}), "('%(asctime)s %(name)s - %(message)s', datefmt=\n '%y/%m/%d %H:%M')\n", (610, 679), False, 'import logging\n'), ((2217, 2251), 'utils.common.mkdir_if_notexist', 'mkdir_if_notexist', (['args.result_dir'], {}), '(args.result_dir)\n', (2234, 2251), False, 'from utils.common import mkdir_if_notexist, result_dump, set_seed\n'), ((2290, 2335), 'os.path.join', 'os.path.join', (['args.result_dir', '"""task_log.txt"""'], {}), "(args.result_dir, 'task_log.txt')\n", (2302, 2335), False, 'import os\n'), ((2340, 2491), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': 'log_file_dir', 'filemode': '"""a"""', 'level': 'logging.INFO', 'format': '"""%(asctime)s %(name)s - %(message)s"""', 'datefmt': '"""%y/%m/%d %H:%M"""'}), "(filename=log_file_dir, filemode='a', level=logging.INFO,\n format='%(asctime)s %(name)s - %(message)s', datefmt='%y/%m/%d %H:%M')\n", (2359, 2491), False, 'import logging\n'), ((2555, 2605), 'utils.common.result_dump', 'result_dump', (['args', 'args.__dict__', '"""task_args.json"""'], {}), "(args, args.__dict__, 'task_args.json')\n", (2566, 2605), False, 'from utils.common import mkdir_if_notexist, result_dump, set_seed\n'), ((2610, 2631), 'pprint.pprint', 'pprint', (['args.__dict__'], {}), '(args.__dict__)\n', (2616, 2631), False, 'from pprint import pprint\n'), ((2661, 2672), 'time.time', 'time.time', ([], {}), '()\n', (2670, 2672), False, 'import time\n'), ((2735, 2749), 'utils.common.set_seed', 'set_seed', (['args'], {}), '(args)\n', (2743, 2749), False, 'from utils.common import mkdir_if_notexist, result_dump, set_seed\n'), ((3003, 3045), 'dapt_task.controller.DomainAdaptivePreTrain', 'DomainAdaptivePreTrain', (['args', 'model_kwargs'], {}), '(args, model_kwargs)\n', (3025, 3045), False, 'from dapt_task.controller import DomainAdaptivePreTrain\n'), ((3383, 3394), 'time.time', 'time.time', ([], {}), '()\n', (3392, 3394), False, 'import time\n'), ((3500, 3525), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3523, 3525), False, 'import argparse\n'), ((827, 884), 'transformers.AlbertTokenizer.from_pretrained', 'AlbertTokenizer.from_pretrained', (['args.PTM_model_vocab_dir'], {}), '(args.PTM_model_vocab_dir)\n', (858, 884), False, 'from transformers import AlbertTokenizer, BertTokenizer\n'), ((945, 1000), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['args.PTM_model_vocab_dir'], {}), '(args.PTM_model_vocab_dir)\n', (974, 1000), False, 'from transformers import AlbertTokenizer, BertTokenizer\n'), ((1740, 1766), 'time.strftime', 'time.strftime', (['"""%H%M-%b%d"""'], {}), "('%H%M-%b%d')\n", (1753, 1766), False, 'import time\n'), ((1970, 2012), 'os.path.basename', 'os.path.basename', (['args.PTM_model_vocab_dir'], {}), '(args.PTM_model_vocab_dir)\n', (1986, 2012), False, 'import os\n')]
|
"""
Script goal,
Open land cover data and build a simple cover map
"""
#==============================================================================
__title__ = "LandCover"
__author__ = "<NAME>"
__version__ = "v1.0(12.03.2021)"
__email__ = "<EMAIL>"
#==============================================================================
# +++++ Check the paths and set ex path to fireflies folder +++++
import os
import sys
if not os.getcwd().endswith("fireflies"):
if "fireflies" in os.getcwd():
p1, p2, _ = os.getcwd().partition("fireflies")
os.chdir(p1+p2)
else:
raise OSError(
"This script was called from an unknown path. CWD can not be set"
)
sys.path.append(os.getcwd())
#==============================================================================
# Import packages
import numpy as np
import pandas as pd
# import geopandas as gpd
import argparse
import datetime as dt
import warnings as warn
import xarray as xr
import bottleneck as bn
import scipy as sp
import glob
import shutil
import time
from dask.diagnostics import ProgressBar
import rasterio
from collections import OrderedDict
# from scipy import stats
# from numba import jit
# from netCDF4 import Dataset, num2date, date2num
# from scipy import stats
# import statsmodels.stats.multitest as smsM
# Import plotting and colorpackages
import matplotlib.pyplot as plt
import matplotlib.colors as mpc
import matplotlib as mpl
import palettable
# import seaborn as sns
import matplotlib as mpl
import cartopy.crs as ccrs
import cartopy.feature as cpf
import matplotlib.ticker as mticker
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
from mpl_toolkits.axes_grid1 import make_axes_locatable
import socket
# ========== Import my dunctions ==========
import myfunctions.corefunctions as cf
import myfunctions.PlotFunctions as pf
# import cartopy.feature as cpf
# from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
# # Import debugging packages
# import pdb as ipdb
# import ipdb
print("numpy version : ", np.__version__)
print("pandas version : ", pd.__version__)
print("xarray version : ", xr.__version__)
#==============================================================================
def main():
# ========== Setup the broad infomation
region = "SIBERIA"
box = [-10.0, 180, 40, 70]
# ========== Load in the different data from glc ==========
path = "./data/LandCover/"
# years = [2000, 2010]
legendfn = [f"{path}glc2000_v1_1/Tiff/Global_Legend.csv", f"{path}gez2010/Lookup.csv", f"{path}Dinerstein_Aggregated/Lookup.csv", f"{path}Dinerstein_Aggregated/Lookup.csv"]
# geotiffn = [f"{path}glc2000_v1_1/Tiff/glc2000_v1_1.tif", f"{path}gez2010/OUTPUT.tif", f"{path}gez2010/IsBorealV3.tif"]
Down = ["MODIS", "esacci", "COPERN_BA"]
res = ["MODIS", "GFED", "TerraClimate", ] #"COPERN_BA", "esacci",
force = False
for dsres in res:
fnout = f"{path}Regridded_forestzone_{dsres}.nc"
if os.path.isfile(fnout) and not force:
print(f"{dsres} has an existing file")
continue
else:
print(dsres)
dataname = ["LandCover", "GlobalEcologicalZones", "DinersteinRegions", "BorealMask"]
if dsres in Down:
datares = "MODIS"
else:
datares = dsres
geotiffn = [f"{path}glc2000_v1_1/Tiff/glc2000_v1_1.tif", f"{path}Dinerstein_Aggregated/Masks/Boreal_climatic_{datares}.tif", f"{path}Dinerstein_Aggregated/Masks/BorealEco_2017_{datares}.tif", f"{path}Dinerstein_Aggregated/Masks/Boreal_buf_{datares}.tif"]
mskfn = "./data/masks/broad/Hansen_GFC-2018-v1.6_%s_ProcessedTo%s.nc" % (region, dsres)
ds_msk = xr.open_dataset(mskfn).sel(dict(latitude=slice(box[3], box[2]), longitude=slice(box[0], box[1]))).chunk()
mask = ds_msk.datamask
# out_dic = OrderedDict()
outlist = []
key_dic = OrderedDict()
for dsnx, legfn, tiffn in zip(dataname, legendfn, geotiffn):
print(dsnx)
# +++++ open the dataarray +++++
key_dic[dsnx] = pd.read_csv(legfn)
da = xr.open_rasterio(tiffn).transpose("y", "x", "band").rename({"x":"longitude", "y":"latitude", "band":"time"}).sel(dict(latitude=slice(box[3], box[2]), longitude=slice(box[0], box[1]))).chunk()
da["time"] = [pd.Timestamp("2018-12-31")]
if da.longitude.shape > ds_msk.longitude.shape:
print(da.latitude.shape[0], ds_msk.latitude.shape[0])
print ("Coarsnening data started at: ", pd.Timestamp.now())
# breakpoint()
# Coarsen/ downscale
latscale = int(da.latitude.shape[0] / ds_msk.latitude.shape[0])
lonscale = int(da.longitude.shape[0] / ds_msk.longitude.shape[0])
da = da.coarsen(latitude=latscale, longitude=lonscale, boundary ="pad").median()
da = da.round()
da = da.reindex_like(mask, method="nearest")
delay = xr.Dataset({dsnx:da}).to_netcdf(f"/tmp/{dsres}_{dsnx}.nc", format = 'NETCDF4', unlimited_dims = ["time"], compute=False)
print(f"Creating temp netcdf for {dsres} {dsnx} at: {pd.Timestamp.now()}")
with ProgressBar():
delay.compute()
# out_dic[dsnx]
outlist.append(f"/tmp/{dsres}_{dsnx}.nc")
da = None
# ========== get the FAO climate zones ==========
# ds = xr.Dataset(out_dic)
ds = xr.open_mfdataset(outlist).transpose('time', 'latitude', 'longitude')
# breakpoint()
GlobalAttributes(ds, dsres, fnameout=fnout)
delayed_obj = ds.to_netcdf(fnout, format = 'NETCDF4', unlimited_dims = ["time"], compute=False)
print(f"Starting write of {dsres} data at: {pd.Timestamp.now()}")
with ProgressBar():
results = delayed_obj.compute()
print(f"{dsres} completed at: {pd.Timestamp.now()}")
if dsres == "MODIS":
for dsin in ["esacci", "COPERN_BA"]:
print(dsin)
mskfn = "./data/masks/broad/Hansen_GFC-2018-v1.6_%s_ProcessedTo%s.nc" % (region, dsin)
ds_msk = xr.open_dataset(mskfn).sel(dict(latitude=slice(box[3], box[2]), longitude=slice(box[0], box[1]))).chunk()
mask = ds_msk.datamask
ds_out = ds.reindex_like(mask, method="nearest")
fnout = f"{path}Regridded_forestzone_{dsin}.nc"
delayed_obj = ds_out.to_netcdf(fnout, format = 'NETCDF4', unlimited_dims = ["time"], compute=False)
print(f"Starting write of {dsin} data at: {pd.Timestamp.now()}")
with ProgressBar():
results = delayed_obj.compute()
# breakpoint()
breakpoint()
for dsn in ["TerraClimate","GFED", "MODIS", "esacci", "COPERN_BA"]:
print(dsn)
mskfn = "./data/masks/broad/Hansen_GFC-2018-v1.6_%s_ProcessedTo%s.nc" % (region, dsn)
ds_msk = xr.open_dataset(mskfn).sel(dict(latitude=slice(box[3], box[2]), longitude=slice(box[0], box[1])))
# ds_mod = ds.reindex_like(ds_msk, method="nearest")
# mask = ds_msk.datamask
# # mask = ds_msk.datamask.reindex_like(ds, method="nearest")
# # boreal mask
# title = "FAO Boreal Zone"
# plotmaker(ds_mod.Boreal, title, mask)
# # Tree cover mask
# title = "Needle Leaf Tree species"
# plotmaker(((ds_mod.LandCover == 4)+(ds_mod.LandCover == 5)), title, mask)
# title = "Needle Leaf and mixed fores"
# plotmaker(((ds_mod.LandCover == 6)+(ds_mod.LandCover == 4)+(ds_mod.LandCover == 5)), title, mask)
# title = "Broadleaf forest"
# plotmaker(((ds_mod.LandCover == 1)+(ds_mod.LandCover == 2)+(ds_mod.LandCover == 3)), title, mask)
breakpoint()
breakpoint()
#==============================================================================
# def _lookupkeys():
# dataname = ["LandCover", "GlobalEcologicalZones", "DinersteinRegions", "BorealMask"]
# legendfn = ([f"{path}glc2000_v1_1/Tiff/Global_Legend.csv", f"{path}gez2010/Lookup.csv", f"{path}Dinerstein_Aggregated/Lookup.csv", f"{path}Dinerstein_Aggregated/Lookup.csv"])
# for nm, lfn in zip(dataname, legendfn)
def GlobalAttributes(ds, dsn, fnameout=""):
"""
Creates the global attributes for the netcdf file that is being written
these attributes come from :
https://www.unidata.ucar.edu/software/thredds/current/netcdf-java/metadata/DataDiscoveryAttConvention.html
args
ds: xarray ds
Dataset containing the infomation im intepereting
fnout: str
filename out
returns:
attributes Ordered Dictionary cantaining the attribute infomation
"""
# ========== Create the ordered dictionary ==========
if ds is None:
attr = OrderedDict()
else:
attr = ds.attrs
# fetch the references for my publications
# pubs = puplications()
# ========== Fill the Dictionary ==========
# ++++++++++ Highly recomended ++++++++++
attr["FileName"] = fnameout
attr["title"] = "Datamasks"
attr["summary"] = "BorealForestCovermaks_%sData" % (dsn)
attr["Conventions"] = "CF-1.7"
# ++++++++++ Data Provinance ++++++++++
attr["history"] = "%s: Netcdf file created using %s (%s):%s by %s. FRI caluculated using %s data" % (
str(pd.Timestamp.now()), __title__, __file__, __version__, __author__, dsn)
if not ds is None:
attr["history"] += ds.history
attr["creator_name"] = __author__
attr["creator_url"] = "ardenburrell.com"
attr["creator_email"] = __email__
attr["Institution"] = "Woodwell"
attr["date_created"] = str(pd.Timestamp.now())
ds.longitude.attrs['units'] = 'degrees_east'
ds.latitude.attrs['units'] = 'degrees_north'
# ++++++++++ Netcdf Summary infomation ++++++++++
# attr["time_coverage_start"] = str(dt.datetime(ds['time.year'].min(), 1, 1))
# attr["time_coverage_end"] = str(dt.datetime(ds['time.year'].max() , 12, 31))
return attr
def _mode(da):
vals = sp.stats.mode(da, axis=None, nan_policy="omit")
return vals[0][0]
def plotmaker(ds_in, title, mask):
# breakpoint()
latiMid=np.mean([70.0, 40.0])
longMid=np.mean([-10.0, 180.0])
fig, ax = plt.subplots(1, 1, figsize=(20,12), subplot_kw={'projection': ccrs.Orthographic(longMid, latiMid)})
ds_in.where(mask==1).plot(transform=ccrs.PlateCarree(), ax=ax)
coast = cpf.GSHHSFeature(scale="intermediate")
ax.add_feature(cpf.BORDERS, linestyle='--', zorder=102)
ax.add_feature(cpf.LAND, facecolor='dimgrey', alpha=1, zorder=0)
ax.add_feature(coast, zorder=101, alpha=0.5)
# coast_50m = cpf.GSHHSFeature(scale="high")
ax.add_feature(cpf.OCEAN, facecolor="w", alpha=1, zorder=100)
ax.set_title(f"{title}")
plt.show()
#==============================================================================
if __name__ == '__main__':
main()
|
[
"pandas.Timestamp",
"matplotlib.pyplot.show",
"scipy.stats.mode",
"os.getcwd",
"pandas.read_csv",
"xarray.open_rasterio",
"cartopy.feature.GSHHSFeature",
"cartopy.crs.PlateCarree",
"xarray.open_dataset",
"dask.diagnostics.ProgressBar",
"xarray.Dataset",
"os.path.isfile",
"numpy.mean",
"pandas.Timestamp.now",
"collections.OrderedDict",
"cartopy.crs.Orthographic",
"xarray.open_mfdataset",
"os.chdir"
] |
[((705, 716), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (714, 716), False, 'import os\n'), ((9710, 9757), 'scipy.stats.mode', 'sp.stats.mode', (['da'], {'axis': 'None', 'nan_policy': '"""omit"""'}), "(da, axis=None, nan_policy='omit')\n", (9723, 9757), True, 'import scipy as sp\n'), ((9855, 9876), 'numpy.mean', 'np.mean', (['[70.0, 40.0]'], {}), '([70.0, 40.0])\n', (9862, 9876), True, 'import numpy as np\n'), ((9887, 9910), 'numpy.mean', 'np.mean', (['[-10.0, 180.0]'], {}), '([-10.0, 180.0])\n', (9894, 9910), True, 'import numpy as np\n'), ((10100, 10138), 'cartopy.feature.GSHHSFeature', 'cpf.GSHHSFeature', ([], {'scale': '"""intermediate"""'}), "(scale='intermediate')\n", (10116, 10138), True, 'import cartopy.feature as cpf\n'), ((10451, 10461), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10459, 10461), True, 'import matplotlib.pyplot as plt\n'), ((503, 514), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (512, 514), False, 'import os\n'), ((570, 587), 'os.chdir', 'os.chdir', (['(p1 + p2)'], {}), '(p1 + p2)\n', (578, 587), False, 'import os\n'), ((3897, 3910), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3908, 3910), False, 'from collections import OrderedDict\n'), ((8401, 8414), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8412, 8414), False, 'from collections import OrderedDict\n'), ((9334, 9352), 'pandas.Timestamp.now', 'pd.Timestamp.now', ([], {}), '()\n', (9350, 9352), True, 'import pandas as pd\n'), ((448, 459), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (457, 459), False, 'import os\n'), ((3061, 3082), 'os.path.isfile', 'os.path.isfile', (['fnout'], {}), '(fnout)\n', (3075, 3082), False, 'import os\n'), ((4048, 4066), 'pandas.read_csv', 'pd.read_csv', (['legfn'], {}), '(legfn)\n', (4059, 4066), True, 'import pandas as pd\n'), ((5602, 5615), 'dask.diagnostics.ProgressBar', 'ProgressBar', ([], {}), '()\n', (5613, 5615), False, 'from dask.diagnostics import ProgressBar\n'), ((10063, 10081), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (10079, 10081), True, 'import cartopy.crs as ccrs\n'), ((532, 543), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (541, 543), False, 'import os\n'), ((4298, 4324), 'pandas.Timestamp', 'pd.Timestamp', (['"""2018-12-31"""'], {}), "('2018-12-31')\n", (4310, 4324), True, 'import pandas as pd\n'), ((5071, 5084), 'dask.diagnostics.ProgressBar', 'ProgressBar', ([], {}), '()\n', (5082, 5084), False, 'from dask.diagnostics import ProgressBar\n'), ((5287, 5313), 'xarray.open_mfdataset', 'xr.open_mfdataset', (['outlist'], {}), '(outlist)\n', (5304, 5313), True, 'import xarray as xr\n'), ((6618, 6640), 'xarray.open_dataset', 'xr.open_dataset', (['mskfn'], {}), '(mskfn)\n', (6633, 6640), True, 'import xarray as xr\n'), ((8977, 8995), 'pandas.Timestamp.now', 'pd.Timestamp.now', ([], {}), '()\n', (8993, 8995), True, 'import pandas as pd\n'), ((9987, 10022), 'cartopy.crs.Orthographic', 'ccrs.Orthographic', (['longMid', 'latiMid'], {}), '(longMid, latiMid)\n', (10004, 10022), True, 'import cartopy.crs as ccrs\n'), ((4482, 4500), 'pandas.Timestamp.now', 'pd.Timestamp.now', ([], {}), '()\n', (4498, 4500), True, 'import pandas as pd\n'), ((4862, 4884), 'xarray.Dataset', 'xr.Dataset', (['{dsnx: da}'], {}), '({dsnx: da})\n', (4872, 4884), True, 'import xarray as xr\n'), ((5572, 5590), 'pandas.Timestamp.now', 'pd.Timestamp.now', ([], {}), '()\n', (5588, 5590), True, 'import pandas as pd\n'), ((5690, 5708), 'pandas.Timestamp.now', 'pd.Timestamp.now', ([], {}), '()\n', (5706, 5708), True, 'import pandas as pd\n'), ((6332, 6345), 'dask.diagnostics.ProgressBar', 'ProgressBar', ([], {}), '()\n', (6343, 6345), False, 'from dask.diagnostics import ProgressBar\n'), ((3703, 3725), 'xarray.open_dataset', 'xr.open_dataset', (['mskfn'], {}), '(mskfn)\n', (3718, 3725), True, 'import xarray as xr\n'), ((5040, 5058), 'pandas.Timestamp.now', 'pd.Timestamp.now', ([], {}), '()\n', (5056, 5058), True, 'import pandas as pd\n'), ((6300, 6318), 'pandas.Timestamp.now', 'pd.Timestamp.now', ([], {}), '()\n', (6316, 6318), True, 'import pandas as pd\n'), ((5904, 5926), 'xarray.open_dataset', 'xr.open_dataset', (['mskfn'], {}), '(mskfn)\n', (5919, 5926), True, 'import xarray as xr\n'), ((4086, 4109), 'xarray.open_rasterio', 'xr.open_rasterio', (['tiffn'], {}), '(tiffn)\n', (4102, 4109), True, 'import xarray as xr\n')]
|
import pygame
import anime
import random
pygame.init()
screen = pygame.display.set_mode((800, 600))
squares = []
entrance = {
'x' : -50,
'y' : 300
}
exit = {
'x' : 850,
'y' : 300
}
episode = anime.Episode(entrance, exit)
playing = True
while playing:
mx, my = pygame.mouse.get_pos()
for e in pygame.event.get():
if e.type == pygame.QUIT:
playing = False
elif e.type == pygame.KEYDOWN:
if e.key == pygame.K_EQUALS:
tmp_surf = pygame.Surface((100, 100))
tmp_surf.fill((random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255)))
tmp_anime = anime.AnimeBase(tmp_surf, random.randint(200, 600),
random.randint(50, 550))
tmp_anime.set_filter('x', anime.filter.Spring(0.1, 0.5))
tmp_anime.set_filter('y', anime.filter.Spring(0.1, 0.5))
squares.append(tmp_anime)
elif e.key == pygame.K_MINUS:
if squares:
squares.pop(0)
screen.fill((255, 255, 255))
episode.update(squares)
episode.render(episode.get_unmounting(), screen)
episode.render(squares, screen)
pygame.display.flip()
pygame.time.wait(10)
pygame.quit()
|
[
"pygame.quit",
"anime.filter.Spring",
"pygame.Surface",
"random.randint",
"pygame.event.get",
"pygame.display.set_mode",
"pygame.init",
"pygame.display.flip",
"pygame.time.wait",
"pygame.mouse.get_pos",
"anime.Episode"
] |
[((42, 55), 'pygame.init', 'pygame.init', ([], {}), '()\n', (53, 55), False, 'import pygame\n'), ((66, 101), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(800, 600)'], {}), '((800, 600))\n', (89, 101), False, 'import pygame\n'), ((209, 238), 'anime.Episode', 'anime.Episode', (['entrance', 'exit'], {}), '(entrance, exit)\n', (222, 238), False, 'import anime\n'), ((1342, 1355), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (1353, 1355), False, 'import pygame\n'), ((283, 305), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (303, 305), False, 'import pygame\n'), ((319, 337), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (335, 337), False, 'import pygame\n'), ((1294, 1315), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (1313, 1315), False, 'import pygame\n'), ((1320, 1340), 'pygame.time.wait', 'pygame.time.wait', (['(10)'], {}), '(10)\n', (1336, 1340), False, 'import pygame\n'), ((508, 534), 'pygame.Surface', 'pygame.Surface', (['(100, 100)'], {}), '((100, 100))\n', (522, 534), False, 'import pygame\n'), ((755, 779), 'random.randint', 'random.randint', (['(200)', '(600)'], {}), '(200, 600)\n', (769, 779), False, 'import random\n'), ((821, 844), 'random.randint', 'random.randint', (['(50)', '(550)'], {}), '(50, 550)\n', (835, 844), False, 'import random\n'), ((888, 917), 'anime.filter.Spring', 'anime.filter.Spring', (['(0.1)', '(0.5)'], {}), '(0.1, 0.5)\n', (907, 917), False, 'import anime\n'), ((961, 990), 'anime.filter.Spring', 'anime.filter.Spring', (['(0.1)', '(0.5)'], {}), '(0.1, 0.5)\n', (980, 990), False, 'import anime\n'), ((566, 588), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (580, 588), False, 'import random\n'), ((621, 643), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (635, 643), False, 'import random\n'), ((676, 698), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (690, 698), False, 'import random\n')]
|
import torch
import matplotlib as mpl
mpl.use('agg')
import numpy as np
import os
import scipy.integrate as integrate
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.lines import Line2D
from matplotlib import rc
def plotPred(args, t, xT, uPred, uTarget, epoch, bidx=0):
'''
Plots a single prediction contour
'''
plt.close("all")
# Create figure
mpl.rcParams['font.family'] = ['serif'] # default is sans-serif
rc('text', usetex=False)
fig = plt.figure(figsize=(15, 8), dpi=150)
ax = []
ax.append(plt.subplot2grid((3, 15), (0, 0), colspan=14))
ax.append(plt.subplot2grid((3, 15), (1, 0), colspan=14))
ax.append(plt.subplot2grid((3, 15), (2, 0), colspan=14))
cmap = "inferno"
c0 = ax[1].imshow(uPred.T, interpolation='nearest', cmap=cmap, aspect='auto', extent=[t[0],t[-1],xT[0],xT[-1]])
c_max = np.max(uPred.T)
c_min = np.min(uPred.T)
c0.set_clim(vmin=c_min, vmax=c_max)
c0 = ax[0].imshow(uTarget.T, interpolation='nearest', cmap=cmap, aspect='auto', extent=[t[0],t[-1],xT[0],xT[-1]])
c0.set_clim(vmin=c_min, vmax=c_max)
p0 = ax[0].get_position().get_points().flatten()
p1 = ax[1].get_position().get_points().flatten()
ax_cbar = fig.add_axes([p1[2]+0.015, p1[1], 0.020, p0[3]-p1[1]])
ticks = np.linspace(0, 1, 5)
tickLabels = np.linspace(c_min, c_max, 5)
tickLabels = ["{:02.2f}".format(t0) for t0 in tickLabels]
cbar = mpl.colorbar.ColorbarBase(ax_cbar, cmap=cmap, orientation='vertical', ticks=ticks)
cbar.set_ticklabels(tickLabels)
cmap = "viridis"
c0 = ax[2].imshow(np.abs(uPred.T - uTarget.T), interpolation='nearest', cmap=cmap, aspect='auto', extent=[t[0],t[-1],xT[0],xT[-1]])
p0 = ax[2].get_position().get_points().flatten()
ax_cbar = fig.add_axes([p0[2]+0.015, p0[1], 0.020, p0[3]-p0[1]])
ticks = np.linspace(0, 1, 5)
tickLabels = np.linspace(c0.norm.vmin, c0.norm.vmax, 5)
tickLabels = ["{:.2e}".format(t0) for t0 in tickLabels]
cbar = mpl.colorbar.ColorbarBase(ax_cbar, cmap=cmap, orientation='vertical', ticks=ticks)
cbar.set_ticklabels(tickLabels)
ax[0].set_ylabel('x', fontsize=14)
ax[1].set_ylabel('x', fontsize=14)
ax[2].set_ylabel('x', fontsize=14)
ax[2].set_xlabel('t', fontsize=14)
file_name = args.pred_dir+"/burgerPred-epoch{0:03d}-{1:01d}.png".format(epoch, bidx)
plt.savefig(file_name, bbox_inches='tight')
def plotSamples(args, t, xT, uPred, uTarget, epoch=0):
'''
Plots prediction contour of Baysian model samples
'''
plt.close("all")
# Create figure
mpl.rcParams['font.family'] = ['serif'] # default is sans-serif
# rc('text', usetex=True)
n_sample = uPred.shape[0] + 1
nrow = int(np.sqrt(n_sample))
ncol = 6*nrow + 1
fig = plt.figure(figsize=(20, 10), dpi=150)
ax = []
for i in range(nrow):
for j in range(nrow):
ax.append(plt.subplot2grid((nrow, ncol), (i, 6*j), colspan=5))
cmap = "inferno"
# Target in top left
uTarget = uTarget[:uPred.shape[1]]
c0 = ax[0].imshow(uTarget.T, interpolation='nearest', cmap=cmap, aspect='auto', extent=[t[0],t[-1],xT[0],xT[-1]])
c_max = np.max(uPred.T)
c_min = np.min(uPred.T)
c0.set_clim(vmin=c_min, vmax=c_max)
# Prediction samples
for i in range(1, len(ax)):
c0 = ax[i].imshow(uPred[i-1].T, interpolation='nearest', cmap=cmap, aspect='auto', extent=[t[0],t[-1],xT[0],xT[-1]])
c0.set_clim(vmin=c_min, vmax=c_max)
p0 = ax[nrow-1].get_position().get_points().flatten()
p1 = ax[-1].get_position().get_points().flatten()
ax_cbar = fig.add_axes([p1[2]+0.01, p1[1], 0.020, p0[3]-p1[1]])
ticks = np.linspace(0, 1, 5)
tickLabels = np.linspace(c_min, c_max, 5)
tickLabels = ["{:02.2f}".format(t0) for t0 in tickLabels]
cbar = mpl.colorbar.ColorbarBase(ax_cbar, cmap=cmap, orientation='vertical', ticks=ticks)
cbar.set_ticklabels(tickLabels)
# Axis labels
for i in range(len(ax)-nrow, len(ax)):
ax[i].set_xlabel('t')
for i in range(nrow):
ax[int(i*nrow)].set_ylabel('x')
file_name = args.pred_dir+"/burgerSamples_epoch{:03d}.png".format(epoch)
plt.savefig(file_name, bbox_inches='tight')
def calcR2score(uPred, uTarget, epoch=0, save=True):
'''
Calculates the total and time dependent average R2 score
Args:
uPred (torch.Tensor): [b x t x d] tensor of model predictions
uTarget (torch.Tensor): [b x t x d] tensor of corresponding target values
epoch (int): current training epoch (for logging)
'''
# Following:
# https://en.wikipedia.org/wiki/Coefficient_of_determination
# First total average
ybar = torch.mean(uTarget.view(uTarget.size(0),-1), dim=-1)
ss_tot = torch.sum(torch.pow(uTarget - ybar.unsqueeze(-1).unsqueeze(-1), 2).view(uTarget.size(0), -1), dim=-1)
ss_res = torch.sum(torch.pow(uTarget - uPred, 2).view(uTarget.size(0), -1), dim=-1)
r2_avg = torch.mean(1 - ss_res/ss_tot).cpu().numpy()
# Now time dependent
ybar = torch.mean(uTarget, dim=-1)
ss_tot = torch.sum(torch.pow(uTarget - ybar.unsqueeze(-1), 2), dim=-1)
ss_res = torch.sum(torch.pow(uTarget - uPred, 2), dim=-1)
r2_time = torch.mean(1 - ss_res/ss_tot, dim=0).cpu().numpy()
if(save):
f=open('r2score_time.dat','ab')
np.savetxt(f, np.insert(r2_time, 0, epoch)[np.newaxis,:], delimiter=',')
f.close()
f=open('r2score.dat','ab')
np.savetxt(f, np.insert(r2_avg, 0, epoch)[np.newaxis,:], delimiter=',')
f.close()
|
[
"torch.mean",
"matplotlib.rc",
"numpy.abs",
"matplotlib.pyplot.close",
"matplotlib.pyplot.subplot2grid",
"numpy.insert",
"numpy.max",
"matplotlib.use",
"matplotlib.pyplot.figure",
"numpy.min",
"numpy.linspace",
"torch.pow",
"matplotlib.colorbar.ColorbarBase",
"matplotlib.pyplot.savefig",
"numpy.sqrt"
] |
[((38, 52), 'matplotlib.use', 'mpl.use', (['"""agg"""'], {}), "('agg')\n", (45, 52), True, 'import matplotlib as mpl\n'), ((357, 373), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (366, 373), True, 'import matplotlib.pyplot as plt\n'), ((467, 491), 'matplotlib.rc', 'rc', (['"""text"""'], {'usetex': '(False)'}), "('text', usetex=False)\n", (469, 491), False, 'from matplotlib import rc\n'), ((503, 539), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 8)', 'dpi': '(150)'}), '(figsize=(15, 8), dpi=150)\n', (513, 539), True, 'import matplotlib.pyplot as plt\n'), ((885, 900), 'numpy.max', 'np.max', (['uPred.T'], {}), '(uPred.T)\n', (891, 900), True, 'import numpy as np\n'), ((913, 928), 'numpy.min', 'np.min', (['uPred.T'], {}), '(uPred.T)\n', (919, 928), True, 'import numpy as np\n'), ((1316, 1336), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(5)'], {}), '(0, 1, 5)\n', (1327, 1336), True, 'import numpy as np\n'), ((1354, 1382), 'numpy.linspace', 'np.linspace', (['c_min', 'c_max', '(5)'], {}), '(c_min, c_max, 5)\n', (1365, 1382), True, 'import numpy as np\n'), ((1456, 1543), 'matplotlib.colorbar.ColorbarBase', 'mpl.colorbar.ColorbarBase', (['ax_cbar'], {'cmap': 'cmap', 'orientation': '"""vertical"""', 'ticks': 'ticks'}), "(ax_cbar, cmap=cmap, orientation='vertical', ticks\n =ticks)\n", (1481, 1543), True, 'import matplotlib as mpl\n'), ((1867, 1887), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(5)'], {}), '(0, 1, 5)\n', (1878, 1887), True, 'import numpy as np\n'), ((1905, 1947), 'numpy.linspace', 'np.linspace', (['c0.norm.vmin', 'c0.norm.vmax', '(5)'], {}), '(c0.norm.vmin, c0.norm.vmax, 5)\n', (1916, 1947), True, 'import numpy as np\n'), ((2019, 2106), 'matplotlib.colorbar.ColorbarBase', 'mpl.colorbar.ColorbarBase', (['ax_cbar'], {'cmap': 'cmap', 'orientation': '"""vertical"""', 'ticks': 'ticks'}), "(ax_cbar, cmap=cmap, orientation='vertical', ticks\n =ticks)\n", (2044, 2106), True, 'import matplotlib as mpl\n'), ((2389, 2432), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file_name'], {'bbox_inches': '"""tight"""'}), "(file_name, bbox_inches='tight')\n", (2400, 2432), True, 'import matplotlib.pyplot as plt\n'), ((2563, 2579), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (2572, 2579), True, 'import matplotlib.pyplot as plt\n'), ((2799, 2836), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)', 'dpi': '(150)'}), '(figsize=(20, 10), dpi=150)\n', (2809, 2836), True, 'import matplotlib.pyplot as plt\n'), ((3197, 3212), 'numpy.max', 'np.max', (['uPred.T'], {}), '(uPred.T)\n', (3203, 3212), True, 'import numpy as np\n'), ((3225, 3240), 'numpy.min', 'np.min', (['uPred.T'], {}), '(uPred.T)\n', (3231, 3240), True, 'import numpy as np\n'), ((3701, 3721), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(5)'], {}), '(0, 1, 5)\n', (3712, 3721), True, 'import numpy as np\n'), ((3739, 3767), 'numpy.linspace', 'np.linspace', (['c_min', 'c_max', '(5)'], {}), '(c_min, c_max, 5)\n', (3750, 3767), True, 'import numpy as np\n'), ((3841, 3928), 'matplotlib.colorbar.ColorbarBase', 'mpl.colorbar.ColorbarBase', (['ax_cbar'], {'cmap': 'cmap', 'orientation': '"""vertical"""', 'ticks': 'ticks'}), "(ax_cbar, cmap=cmap, orientation='vertical', ticks\n =ticks)\n", (3866, 3928), True, 'import matplotlib as mpl\n'), ((4200, 4243), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file_name'], {'bbox_inches': '"""tight"""'}), "(file_name, bbox_inches='tight')\n", (4211, 4243), True, 'import matplotlib.pyplot as plt\n'), ((5065, 5092), 'torch.mean', 'torch.mean', (['uTarget'], {'dim': '(-1)'}), '(uTarget, dim=-1)\n', (5075, 5092), False, 'import torch\n'), ((566, 611), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(3, 15)', '(0, 0)'], {'colspan': '(14)'}), '((3, 15), (0, 0), colspan=14)\n', (582, 611), True, 'import matplotlib.pyplot as plt\n'), ((627, 672), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(3, 15)', '(1, 0)'], {'colspan': '(14)'}), '((3, 15), (1, 0), colspan=14)\n', (643, 672), True, 'import matplotlib.pyplot as plt\n'), ((688, 733), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(3, 15)', '(2, 0)'], {'colspan': '(14)'}), '((3, 15), (2, 0), colspan=14)\n', (704, 733), True, 'import matplotlib.pyplot as plt\n'), ((1619, 1646), 'numpy.abs', 'np.abs', (['(uPred.T - uTarget.T)'], {}), '(uPred.T - uTarget.T)\n', (1625, 1646), True, 'import numpy as np\n'), ((2748, 2765), 'numpy.sqrt', 'np.sqrt', (['n_sample'], {}), '(n_sample)\n', (2755, 2765), True, 'import numpy as np\n'), ((5191, 5220), 'torch.pow', 'torch.pow', (['(uTarget - uPred)', '(2)'], {}), '(uTarget - uPred, 2)\n', (5200, 5220), False, 'import torch\n'), ((2927, 2980), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(nrow, ncol)', '(i, 6 * j)'], {'colspan': '(5)'}), '((nrow, ncol), (i, 6 * j), colspan=5)\n', (2943, 2980), True, 'import matplotlib.pyplot as plt\n'), ((4905, 4934), 'torch.pow', 'torch.pow', (['(uTarget - uPred)', '(2)'], {}), '(uTarget - uPred, 2)\n', (4914, 4934), False, 'import torch\n'), ((5373, 5401), 'numpy.insert', 'np.insert', (['r2_time', '(0)', 'epoch'], {}), '(r2_time, 0, epoch)\n', (5382, 5401), True, 'import numpy as np\n'), ((5508, 5535), 'numpy.insert', 'np.insert', (['r2_avg', '(0)', 'epoch'], {}), '(r2_avg, 0, epoch)\n', (5517, 5535), True, 'import numpy as np\n'), ((4984, 5015), 'torch.mean', 'torch.mean', (['(1 - ss_res / ss_tot)'], {}), '(1 - ss_res / ss_tot)\n', (4994, 5015), False, 'import torch\n'), ((5245, 5283), 'torch.mean', 'torch.mean', (['(1 - ss_res / ss_tot)'], {'dim': '(0)'}), '(1 - ss_res / ss_tot, dim=0)\n', (5255, 5283), False, 'import torch\n')]
|
#!/usr/bin/env python3
import argparse
import requests
from bioschemas_indexer import indexer
# MAIN
parser = argparse.ArgumentParser('Run a test query against the Solr instance')
parser.add_argument('query')
args = parser.parse_args()
_, solr = indexer.read_conf()
solrSuggester = 'http://' + solr['SOLR_SERVER'] + ':' + \
solr['SOLR_PORT'] + '/solr/' + solr['SOLR_CORE'] + \
'/suggest?suggest.dictionary=mySuggester&suggest=true&suggest.build=true&suggest.q='
# params = {'q': args.query}
r = requests.get(solrSuggester + args.query)
resp = r.json()
# print(r.text)
for word in resp["suggest"]["mySuggester"][args.query]["suggestions"]:
print(word["term"])
|
[
"bioschemas_indexer.indexer.read_conf",
"argparse.ArgumentParser",
"requests.get"
] |
[((113, 182), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Run a test query against the Solr instance"""'], {}), "('Run a test query against the Solr instance')\n", (136, 182), False, 'import argparse\n'), ((250, 269), 'bioschemas_indexer.indexer.read_conf', 'indexer.read_conf', ([], {}), '()\n', (267, 269), False, 'from bioschemas_indexer import indexer\n'), ((509, 549), 'requests.get', 'requests.get', (['(solrSuggester + args.query)'], {}), '(solrSuggester + args.query)\n', (521, 549), False, 'import requests\n')]
|
#!/usr/bin/env python3
from itertools import permutations
from pathlib import Path
class Figure(frozenset):
@classmethod
def parse(cls, text):
lines = [l for l in text.splitlines() if l]
for digit in range(10):
digittext = ''.join(l[3 * digit:3 * (digit + 1)] for l in lines)
assert {' '} == set(digittext[::2])
yield cls(i for i in range(7) if digittext[1 + 2 * i] != ' ')
class Patch:
def __init__(self, chartosegment):
self.chartosegment = chartosegment
def _patches(self, pattern, figure):
if len(figure) != len(pattern):
return
knownsegments = {i for c in pattern for i in [self.chartosegment.get(c)] if i is not None}
if not knownsegments <= figure:
return
unknownsegments = list(figure - knownsegments)
unknownchars = [c for c in pattern if c not in self.chartosegment]
for chars in permutations(unknownchars):
yield type(self)(dict(zip(chars, unknownsegments), **self.chartosegment))
def search(self, patterns, figures):
if patterns:
for f in figures:
for q in self._patches(patterns[0], f):
yield from q.search(patterns[1:], figures - {f})
else:
yield self
def _decodeone(self, pattern):
return figures[Figure(self.chartosegment[c] for c in pattern)]
def decode(self, patterns):
return sum(10 ** i * self._decodeone(p) for i, p in enumerate(reversed(patterns)))
emptypatch = Patch({})
figures = {f: digit for digit, f in enumerate(Figure.parse('''
- - - - - - - - $
| | | | || || | || || |$
- - - - - - - $
| | || | | || | || | |$
- - - - - - - $
'''))}
def main():
n = 0
with Path('input', '8').open() as f:
for line in f:
patterns, digits = (s.split() for s in line.split('|'))
patch, = emptypatch.search(sorted(patterns, key = len), figures.keys())
n += patch.decode(digits)
print(n)
if '__main__' == __name__:
main()
|
[
"itertools.permutations",
"pathlib.Path"
] |
[((944, 970), 'itertools.permutations', 'permutations', (['unknownchars'], {}), '(unknownchars)\n', (956, 970), False, 'from itertools import permutations\n'), ((1830, 1848), 'pathlib.Path', 'Path', (['"""input"""', '"""8"""'], {}), "('input', '8')\n", (1834, 1848), False, 'from pathlib import Path\n')]
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from builtins import range
import utils
import argparse
import time
import os
import sys
import random
import math
import json
import codecs
import numpy as np
import utils
from utils import check_cuda_for_var, check_directory
parser = argparse.ArgumentParser(description=\
"Dialog2Vec Generator")
parser.add_argument('--data', type=str,\
help='location of the data corpus(json file)')
parser.add_argument('--validation_p', type=float, default=0.2,
help='percentage of validation data / all data')
parser.add_argument('--seed', type=int, default=55665566,
help='random seed')
parser.add_argument('--only_stat', type=bool, default=False,
help='only do statistic or not')
args = parser.parse_args()
random.seed(args.seed)
my_lang, document_list = utils.build_lang(args.data, dump_torch_variable=False)
# Statistic
dialog_len_count = {}
sentence_count = 0
total_word_count = 0
word_count = {}
for dialog in document_list:
dialog_len = len(dialog)
sentence_count += dialog_len
for sentence in dialog:
total_word_count += len(sentence)
for index in sentence:
word = my_lang.index2word[index]
word_count[word] = word_count.setdefault(word, 0) + 1
dialog_len_count[dialog_len] = dialog_len_count.setdefault(dialog_len, 0) + 1
print("total_word_count ", total_word_count)
print("sentence_count ", sentence_count)
print("dialog_len_count ", dialog_len_count)
print("word_count ", word_count)
if args.only_stat:
sys.exit(0)
#
random.shuffle(document_list)
cut = int(len(document_list) * args.validation_p)
training_data, validation_data = \
document_list[cut:], document_list[:cut]
# Training data for doc2vec
print("Training data for doc2vec")
gensim_train = []
for train_dialog in training_data:
doc = []
for sentence in train_dialog[:-1]:
doc += sentence
gensim_train.append(doc)
np.save("label/gensim_train.npy", gensim_train)
print("Label data for training")
label = []
dialog2vec = []
doc2vec = []
for train_dialog in training_data:
doc = []
dialog = []
for sentence in train_dialog:
if not sentence == train_dialog[-1]:
doc += sentence
if len(sentence) > 1:
dialog.append(my_lang.index2sentence(sentence[:-1]))
dialog2vec.append(dialog[:-1])
doc2vec.append(doc)
label.append(1)
doc = []
dialog = []
for sentence in train_dialog[:random.randint(1, len(train_dialog)-2)]:
doc += sentence
if len(sentence) > 1:
dialog.append(my_lang.index2sentence(sentence[:-1]))
dialog2vec.append(dialog[:-1])
doc2vec.append(doc)
label.append(0)
np.save("label/gensim_train_test.npy", doc2vec)
np.save("label/train_label.npy", label)
with codecs.open("label/dialog2vec_train.json", "w+", encoding="utf-8") as outfile:
json.dump(dialog2vec, outfile, indent=4, ensure_ascii=False)
print("Label data for testing")
label = []
dialog2vec = []
doc2vec = []
for validate_dialog in validation_data:
doc = []
dialog = []
for sentence in validate_dialog:
if not sentence == train_dialog[-1]:
doc += sentence
if len(sentence) > 1:
dialog.append(my_lang.index2sentence(sentence[:-1]))
dialog2vec.append(dialog[:-1])
doc2vec.append(doc)
label.append(1)
doc = []
dialog = []
for sentence in validate_dialog[:random.randint(1, len(validate_dialog)-2)]:
doc += sentence
if len(sentence) > 1:
dialog.append(my_lang.index2sentence(sentence[:-1]))
dialog2vec.append(dialog[:-1])
doc2vec.append(doc)
label.append(0)
np.save("label/gensim_test_test.npy", doc2vec)
np.save("label/test_label.npy", label)
with codecs.open("label/dialog2vec_test.json", "w+", encoding="utf-8") as outfile:
json.dump(dialog2vec, outfile, indent=4, ensure_ascii=False)
|
[
"json.dump",
"numpy.save",
"argparse.ArgumentParser",
"utils.build_lang",
"codecs.open",
"random.shuffle",
"random.seed",
"sys.exit"
] |
[((376, 435), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Dialog2Vec Generator"""'}), "(description='Dialog2Vec Generator')\n", (399, 435), False, 'import argparse\n'), ((878, 900), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (889, 900), False, 'import random\n'), ((927, 981), 'utils.build_lang', 'utils.build_lang', (['args.data'], {'dump_torch_variable': '(False)'}), '(args.data, dump_torch_variable=False)\n', (943, 981), False, 'import utils\n'), ((1658, 1687), 'random.shuffle', 'random.shuffle', (['document_list'], {}), '(document_list)\n', (1672, 1687), False, 'import random\n'), ((2043, 2090), 'numpy.save', 'np.save', (['"""label/gensim_train.npy"""', 'gensim_train'], {}), "('label/gensim_train.npy', gensim_train)\n", (2050, 2090), True, 'import numpy as np\n'), ((2813, 2860), 'numpy.save', 'np.save', (['"""label/gensim_train_test.npy"""', 'doc2vec'], {}), "('label/gensim_train_test.npy', doc2vec)\n", (2820, 2860), True, 'import numpy as np\n'), ((2861, 2900), 'numpy.save', 'np.save', (['"""label/train_label.npy"""', 'label'], {}), "('label/train_label.npy', label)\n", (2868, 2900), True, 'import numpy as np\n'), ((3785, 3831), 'numpy.save', 'np.save', (['"""label/gensim_test_test.npy"""', 'doc2vec'], {}), "('label/gensim_test_test.npy', doc2vec)\n", (3792, 3831), True, 'import numpy as np\n'), ((3832, 3870), 'numpy.save', 'np.save', (['"""label/test_label.npy"""', 'label'], {}), "('label/test_label.npy', label)\n", (3839, 3870), True, 'import numpy as np\n'), ((1644, 1655), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1652, 1655), False, 'import sys\n'), ((2906, 2972), 'codecs.open', 'codecs.open', (['"""label/dialog2vec_train.json"""', '"""w+"""'], {'encoding': '"""utf-8"""'}), "('label/dialog2vec_train.json', 'w+', encoding='utf-8')\n", (2917, 2972), False, 'import codecs\n'), ((2989, 3049), 'json.dump', 'json.dump', (['dialog2vec', 'outfile'], {'indent': '(4)', 'ensure_ascii': '(False)'}), '(dialog2vec, outfile, indent=4, ensure_ascii=False)\n', (2998, 3049), False, 'import json\n'), ((3876, 3941), 'codecs.open', 'codecs.open', (['"""label/dialog2vec_test.json"""', '"""w+"""'], {'encoding': '"""utf-8"""'}), "('label/dialog2vec_test.json', 'w+', encoding='utf-8')\n", (3887, 3941), False, 'import codecs\n'), ((3958, 4018), 'json.dump', 'json.dump', (['dialog2vec', 'outfile'], {'indent': '(4)', 'ensure_ascii': '(False)'}), '(dialog2vec, outfile, indent=4, ensure_ascii=False)\n', (3967, 4018), False, 'import json\n')]
|
class Solution:
def wallsAndGates(self, rooms: List[List[int]]) -> None:
"""
Do not return anything, modify rooms in-place instead.
"""
if not rooms:
return
INF = 2 ** 31 - 1
m, n = len(rooms), len(rooms[0])
from collections import deque
que = deque()
dirs = [[1, 0], [-1, 0], [0, 1], [0, -1]]
visited = set()
for i in range(m):
for j in range(n):
if rooms[i][j] == 0:
visited.add((i, j))
que.append((i, j, 0))
while que:
x, y, cnt = que.popleft()
for dx, dy in dirs:
xx = x + dx
yy = y + dy
if -1 < xx < m and -1 < yy < n and (xx, yy) not in visited and rooms[xx][yy] == INF:
rooms[xx][yy] = cnt + 1
visited.add((xx, yy))
que.append((xx, yy, cnt + 1))
|
[
"collections.deque"
] |
[((324, 331), 'collections.deque', 'deque', ([], {}), '()\n', (329, 331), False, 'from collections import deque\n')]
|
import more_itertools as mit
import functools as ftl
from recipes.testing import Expect
from astropy.io.fits.hdu.base import _BaseHDU
from pathlib import Path
from pySHOC import shocCampaign, shocHDU, shocNewHDU, shocBiasHDU, shocFlatHDU
import pytest
import numpy as np
import os
import tempfile as tmp
# TODO: old + new data all modes!!!
# TODO: all combinations of science, bias, dark, flats (+ masters)
# TODO:
# pylint: disable=C0111 # Missing %s docstring
# pylint: disable=R0201 # Method could be a function
# pretty sample images here:
DATA = Path(__file__).parent / 'data'
EX1 = DATA / 'AT2020hat'
CAL = DATA / 'calibration'
#
np.random.seed(12345)
# ---------------------------------- Helpers --------------------------------- #
def list_of_files():
# create text file with list of filenames for test load
fp, filename = tmp.mkstemp('.txt')
for name in EX1.glob('*.fits'):
os.write(fp, f'{name}{os.linesep}'.encode())
os.close(fp)
return filename
# --------------------------------- Fixtures --------------------------------- #
@pytest.fixture
def run():
return shocCampaign.load(EX1)
# run = shocCampaign.load(EX1)
# ----------------------------------- Tests ---------------------------------- #
class TestCampaign:
@pytest.mark.parametrize(
'pointer',
( # single file as a str
f'{EX1}/SHA_20200731.0001.fits',
# single file as a Path object
EX1 / 'SHA_20200731.0001.fits',
# file list
[f'{EX1}/SHA_20200731.0001.fits',
f'{EX1}/SHA_20200731.0002.fits'],
# globbing patterns
f'{EX1}/SHA_20200731.000[12].fits',
f'{EX1}/SHA_20200731.000*.fits',
# directory
EX1, str(EX1),
# pointer to text file with list of filenames
f'@{list_of_files()}'
)
)
def test_load(self, pointer):
run = shocCampaign.load(pointer)
def test_file_helper(self, run):
run.files
run.files.names
run.files.stems
run.files.nrs
@pytest.mark.parametrize(
'index',
( # simple indexing
0,
-1,
# by filename
'SHA_20200731.0007.fits',
'SHA_20200731.0007', # both should work
)
)
def test_single_index(self, run, index):
print(run[index].file.name)
assert isinstance(run[index], shocHDU)
@pytest.mark.parametrize(
'index,expected',
[ # slice
(slice(0, 4, 2),
['SHA_20200731.0001.fits', 'SHA_20200731.0003.fits']),
# sequences of ints
([0, 1, 3, -1],
['SHA_20200731.0001.fits', 'SHA_20200731.0002.fits',
'SHA_20200731.0004.fits', 'SHA_20200731.0022.fits']),
# array of ints
(np.arange(3),
['SHA_20200731.0001.fits', 'SHA_20200731.0002.fits',
'SHA_20200731.0003.fits']),
# boolean array
(np.random.randint(0, 2, 22).astype(bool),
['SHA_20200731.0002.fits', 'SHA_20200731.0003.fits',
'SHA_20200731.0004.fits', 'SHA_20200731.0006.fits',
'SHA_20200731.0009.fits', 'SHA_20200731.0011.fits',
'SHA_20200731.0012.fits', 'SHA_20200731.0014.fits',
'SHA_20200731.0015.fits', 'SHA_20200731.0017.fits',
'SHA_20200731.0018.fits', 'SHA_20200731.0019.fits']),
# by list of filenames
(('SHA_20200731.0007.fits', 'SHA_20200731.0008.fits'),
['SHA_20200731.0007.fits', 'SHA_20200731.0008.fits']),
# by globbing pattern
('SHA*[78].fits',
['SHA_20200731.0007.fits', 'SHA_20200731.0008.fits',
'SHA_20200731.0017.fits', 'SHA_20200731.0018.fits']),
# by brace expansion
('SHA*{7,8}.fits',
['SHA_20200731.0007.fits', 'SHA_20200731.0008.fits',
'SHA_20200731.0017.fits', 'SHA_20200731.0018.fits']),
# by filename sequence slice
('*0731.00[10:22].*',
['SHA_20200731.0010.fits', 'SHA_20200731.0011.fits',
'SHA_20200731.0012.fits', 'SHA_20200731.0013.fits',
'SHA_20200731.0014.fits', 'SHA_20200731.0015.fits',
'SHA_20200731.0016.fits', 'SHA_20200731.0017.fits',
'SHA_20200731.0018.fits', 'SHA_20200731.0019.fits',
'SHA_20200731.0020.fits', 'SHA_20200731.0021.fits'])
]
)
def test_multi_index(self, run, index, expected):
sub = run[index]
assert isinstance(sub, shocCampaign)
assert sub.files.names == expected
def test_pprint(self, run):
print(run, run.table(run), sep='\n\n')
# @pytest.mark.parametrize(
# 'filename,expected',
# [(CAL/'SHA_20200822.0005.fits', shocBiasHDU),
# (CAL/'SHA_20200801.0001.fits', shocFlatHDU),
# (EX1/'SHA_20200731.0022.fits', shocNewHDU)]
# )
# def test_hdu_type(filename, expected):
# obj = _BaseHDU.readfr
# @expected(
# (CAL/'SHA_20200822.0005.fits', shocBiasHDU,
# CAL/'SHA_20200801.0001.fits', shocFlatHDU,
# EX1/'SHA_20200731.0022.fits', shocNewHDU)
# )
def hdu_type(filename):
return _BaseHDU.readfrom(filename).__class__
# print('....', filename)
# print(obj)
# return obj
Expect(hdu_type)(
{CAL/'SHA_20200822.0005.fits': shocBiasHDU,
CAL/'SHA_20200801.0001.fits': shocFlatHDU,
EX1/'SHA_20200731.0022.fits': shocNewHDU},
globals())
# TODO: shocOldHDU, shocMasterBias, shocMasterFlat
# TODO
# def test_select
|
[
"numpy.random.seed",
"recipes.testing.Expect",
"tempfile.mkstemp",
"astropy.io.fits.hdu.base._BaseHDU.readfrom",
"pathlib.Path",
"numpy.random.randint",
"os.close",
"numpy.arange",
"pytest.mark.parametrize",
"pySHOC.shocCampaign.load"
] |
[((648, 669), 'numpy.random.seed', 'np.random.seed', (['(12345)'], {}), '(12345)\n', (662, 669), True, 'import numpy as np\n'), ((854, 873), 'tempfile.mkstemp', 'tmp.mkstemp', (['""".txt"""'], {}), "('.txt')\n", (865, 873), True, 'import tempfile as tmp\n'), ((967, 979), 'os.close', 'os.close', (['fp'], {}), '(fp)\n', (975, 979), False, 'import os\n'), ((1122, 1144), 'pySHOC.shocCampaign.load', 'shocCampaign.load', (['EX1'], {}), '(EX1)\n', (1139, 1144), False, 'from pySHOC import shocCampaign, shocHDU, shocNewHDU, shocBiasHDU, shocFlatHDU\n'), ((2103, 2195), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""index"""', "(0, -1, 'SHA_20200731.0007.fits', 'SHA_20200731.0007')"], {}), "('index', (0, -1, 'SHA_20200731.0007.fits',\n 'SHA_20200731.0007'))\n", (2126, 2195), False, 'import pytest\n'), ((5383, 5399), 'recipes.testing.Expect', 'Expect', (['hdu_type'], {}), '(hdu_type)\n', (5389, 5399), False, 'from recipes.testing import Expect\n'), ((562, 576), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (566, 576), False, 'from pathlib import Path\n'), ((1944, 1970), 'pySHOC.shocCampaign.load', 'shocCampaign.load', (['pointer'], {}), '(pointer)\n', (1961, 1970), False, 'from pySHOC import shocCampaign, shocHDU, shocNewHDU, shocBiasHDU, shocFlatHDU\n'), ((5279, 5306), 'astropy.io.fits.hdu.base._BaseHDU.readfrom', '_BaseHDU.readfrom', (['filename'], {}), '(filename)\n', (5296, 5306), False, 'from astropy.io.fits.hdu.base import _BaseHDU\n'), ((2882, 2894), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (2891, 2894), True, 'import numpy as np\n'), ((3046, 3073), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', '(22)'], {}), '(0, 2, 22)\n', (3063, 3073), True, 'import numpy as np\n')]
|
import sys
import os
from tests.test_single_file import PhysicalData
from tests.test_single_file import SingleFile
import pytest
class Rootpath:
"""
@overvieww: class of the absolute path of root directory
"""
def __init__(self, opts):
self.rootpath = opts[1] #TODO study how to resolve the constants in Python
def data(self):
return str(self.rootpath )
def exists(self):
return os.path.exists(self.data())
def files ( self ): #TODO move in class Rootpath
"""
It read a directory recursavely
"""
readfiles = []
try:
self.subdir(self.data(), readfiles)
except:
print ( sys.exc_info() )
print ( "The total number of the read files is {0}".format ( str( len ( readfiles ) ) ) )
return readfiles;
def subdir(self, root_path, readfiles ):
"""
It traverses root directory, and list directories as dirs and files as files
----------
root_path: string root of the path
readfiles: list list of read files inside path
"""
for root, dirs, files in os.walk(root_path) :
path = root.split(os.sep)
for fileTmp in files:
readfiles.append ( SingleFile ( PhysicalData ( fileTmp, os.sep.join ( path ) ) ) )
for directory in dirs:
self.subdir(directory, readfiles)
def __repr__(self):
return "Rootpath.repr:{0}".format( str ( self.rootpath) )
def __str__(self):
#return "{0}".format( str ( self.rootpath) )
return "Rootpath:{0}".format( str ( self.rootpath) )
class OnlyVisible(Rootpath):
"""
It reads only visible directory
"""
def __init__(self, new_rootpath):
self.rootpath = new_rootpath
def data(self):
return self.rootpath.data()
def exists(self):
return super().exists()
def files ( self ): #TODO move in class Rootpath
readfiles = []
try:
if ( self.exists() ):
readfiles = self.rootpath.files()
else:
print ( "The directory [{0}] doesn'nt exists".format ( self.data() ) )
except:
print ( sys.exc_info() )
return readfiles;
def subdir(self, root_path, readfiles ):
if "\\." in root_path :
print ("Directory with dot (.), then it's hidden: {0}".format ( directory ))
else:
return self.rootpath.subdir(root_path, readfiles)
def __repr__(self):
return "OnlyVisible.repr:{0}".format( str ( self.rootpath) )
def __str__(self):
return "OnlyVisible:{0}".format( str ( self.rootpath) )
def test_dot_in_path():
path = "C:\\Users\\apuzielli\\Documents\\personale\\mio-github\\.metadata\\.plugins\\org.jboss.tools.central\\proxyWizards\\1596464026525\\.rcache\\.orphans"
result = ( "." in path)
assert True == result
|
[
"os.walk",
"os.sep.join",
"sys.exc_info"
] |
[((1175, 1193), 'os.walk', 'os.walk', (['root_path'], {}), '(root_path)\n', (1182, 1193), False, 'import os\n'), ((696, 710), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (708, 710), False, 'import sys\n'), ((2270, 2284), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (2282, 2284), False, 'import sys\n'), ((1341, 1358), 'os.sep.join', 'os.sep.join', (['path'], {}), '(path)\n', (1352, 1358), False, 'import os\n')]
|
import logging
def set_custom_log_info(file):
logging.basicConfig(filename=file , level=logging.INFO)
def report(e:Exception):
logging.exception(str(e))
|
[
"logging.basicConfig"
] |
[((49, 103), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': 'file', 'level': 'logging.INFO'}), '(filename=file, level=logging.INFO)\n', (68, 103), False, 'import logging\n')]
|
import numpy as np
import open3d as o3d
import os
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("--red", type = float, default = 0.5)
parser.add_argument("--blue", type = float, default = 0.4)
parser.add_argument("--green", type = float, default = 0.4)
parser.add_argument("--source_dir", type = str, default = "./scatters")
parser.add_argument("--render", action = "store_true", default = False)
args = parser.parse_args()
# Need to consider that some cases disturbance may exist
def segment_cloth(pcd):
color = np.array(pcd.colors)
mask = (color[:,0] > args.red) * (color[:, 1] < args.green) * (color[:,2] < args.blue)
points = np.asarray(pcd.points)
truncated_pcd = o3d.geometry.PointCloud()
truncated_pcd.points = o3d.utility.Vector3dVector(points[mask])
truncated_pcd.colors = o3d.utility.Vector3dVector(color[mask])
truncated_pcd.remove_statistical_outlier(nb_neighbors = 20, std_ratio = 0.04)
return truncated_pcd
# Source direcrtory is identical to target directory
files = os.listdir(f"./pointcloud_transformed/{args.source_dir}/")
for f in files:
filename = f"./pointcloud_transformed/{args.source_dir}/{f}"
pcd = o3d.io.read_point_cloud(filename)
cloth_pcd = segment_cloth(pcd)
o3d.io.write_point_cloud(f"./pointcloud_cloth/{args.source_dir}/{f}", cloth_pcd)
if args.render:
o3d.visualization.draw_geometries([cloth_pcd])
|
[
"argparse.ArgumentParser",
"numpy.asarray",
"open3d.geometry.PointCloud",
"open3d.io.read_point_cloud",
"open3d.io.write_point_cloud",
"open3d.visualization.draw_geometries",
"numpy.array",
"open3d.utility.Vector3dVector",
"os.listdir"
] |
[((96, 112), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (110, 112), False, 'from argparse import ArgumentParser\n'), ((1053, 1111), 'os.listdir', 'os.listdir', (['f"""./pointcloud_transformed/{args.source_dir}/"""'], {}), "(f'./pointcloud_transformed/{args.source_dir}/')\n", (1063, 1111), False, 'import os\n'), ((555, 575), 'numpy.array', 'np.array', (['pcd.colors'], {}), '(pcd.colors)\n', (563, 575), True, 'import numpy as np\n'), ((680, 702), 'numpy.asarray', 'np.asarray', (['pcd.points'], {}), '(pcd.points)\n', (690, 702), True, 'import numpy as np\n'), ((723, 748), 'open3d.geometry.PointCloud', 'o3d.geometry.PointCloud', ([], {}), '()\n', (746, 748), True, 'import open3d as o3d\n'), ((776, 816), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['points[mask]'], {}), '(points[mask])\n', (802, 816), True, 'import open3d as o3d\n'), ((844, 883), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['color[mask]'], {}), '(color[mask])\n', (870, 883), True, 'import open3d as o3d\n'), ((1203, 1236), 'open3d.io.read_point_cloud', 'o3d.io.read_point_cloud', (['filename'], {}), '(filename)\n', (1226, 1236), True, 'import open3d as o3d\n'), ((1276, 1361), 'open3d.io.write_point_cloud', 'o3d.io.write_point_cloud', (['f"""./pointcloud_cloth/{args.source_dir}/{f}"""', 'cloth_pcd'], {}), "(f'./pointcloud_cloth/{args.source_dir}/{f}', cloth_pcd\n )\n", (1300, 1361), True, 'import open3d as o3d\n'), ((1385, 1431), 'open3d.visualization.draw_geometries', 'o3d.visualization.draw_geometries', (['[cloth_pcd]'], {}), '([cloth_pcd])\n', (1418, 1431), True, 'import open3d as o3d\n')]
|
from db import cursor
from db import db as mongodb
from pymongo import ASCENDING
import bson
import datetime
mongo_user = mongodb['user']
mongo_video = mongodb['video']
mongo_author = mongodb['author']
# 用户相关
INSERT_USER_SQL = """
INSERT INTO `user` (`name`, `password`, `credit`, `exp`, `gmt_create`, `role`)
VALUES (%(name)s, %(password)s, %(credit)s, %(exp)s, %(gen_time)s, %(role)s)
ON DUPLICATE KEY UPDATE `name` = VALUES(`name`), `exp` = VALUES(`exp`), `credit` = VALUES(`credit`), `password` = VALUES(`password`), `role` = VALUES(`role`);
"""
GET_USER_ID_SQL = """
SELECT `user_id` FROM `user` WHERE `name` = %s
"""
DELETE_USER_FOCUS_VIDEO_SQL = """
DELETE FROM biliob.user_focus_video
WHERE
`user_id` = %s;
"""
DELETE_USER_FOCUS_AUTHOR_SQL = """
DELETE FROM biliob.user_focus_author
WHERE
`user_id` = %s;
"""
INSERT_USER_FOCUS_VIDEO_SQL = """
INSERT INTO `user_focus_video` (`user_id`, `video_id`)
VALUES (%(user_id)s, %(video_id)s);
"""
INSERT_USER_FOCUS_AUTHOR_SQL = """
INSERT INTO `user_focus_author` (`user_id`, `author_id`)
VALUES (%(user_id)s, %(author_id)s)
"""
def translate_int64(item):
for each_key in item:
if type(item[each_key]) is bson.int64.Int64:
item[each_key] = int(item[each_key])
def move_user():
for each_doc in mongo_user.find().sort('_id', direction=ASCENDING):
item = dict()
item['gen_time'] = each_doc.pop('_id').generation_time
item['name'] = each_doc['name']
item['credit'] = each_doc['credit'] if 'credit' in each_doc else 0
item['password'] = each_doc['password'] if 'password' in each_doc else 0
item['exp'] = each_doc['exp'] if 'exp' in each_doc else 0
item['role'] = each_doc['role'] if 'role' in each_doc else 0
if len(item['name']) > 45:
print(item['name'])
continue
cursor.execute(INSERT_USER_SQL, item)
cursor.execute(GET_USER_ID_SQL, (each_doc['name']))
user_id = cursor.fetchone()['user_id']
cursor.execute(DELETE_USER_FOCUS_VIDEO_SQL, (user_id))
cursor.execute(DELETE_USER_FOCUS_AUTHOR_SQL, (user_id))
if 'favoriteAid' in each_doc:
for each_aid in each_doc['favoriteAid']:
if each_aid == None or each_aid > 4294967295:
continue
item = {}
item['user_id'] = int(user_id)
item['video_id'] = int(each_aid)
cursor.execute(INSERT_USER_FOCUS_VIDEO_SQL, item)
if 'favoriteMid' in each_doc:
for each_mid in each_doc['favoriteMid']:
if each_mid == None or each_mid > 4294967295:
continue
item = {}
item['user_id'] = int(user_id)
item['author_id'] = int(each_mid)
cursor.execute(INSERT_USER_FOCUS_AUTHOR_SQL, item)
# 视频相关
INSERT_VIDEO_SQL = """
INSERT INTO `video` (`video_id`, `author_id`, `title`, `pic`, `is_observe`, `gmt_create`, `channel`, `subchannel`, `pub_datetime`)
VALUES (%(video_id)s, %(author_id)s, %(title)s, %(pic)s, %(is_observe)s, %(gen_time)s, %(channel)s, %(subchannel)s, %(pub_datetime)s)
ON DUPLICATE KEY UPDATE `title` = VALUES(`title`), `pic` = VALUES(`pic`), `is_observe` = VALUES(`is_observe`), `channel` = VALUES(`channel`), `subchannel` = VALUES(`subchannel`), `pub_datetime` = VALUES(`pub_datetime`);
"""
INSERT_VIDEO_RECORD_SQL = """
INSERT INTO `video_record` (`video_id`, `view`, `danmaku`, `favorite`, `coin`, `share`, `like`, `dislike`, `gmt_create`)
VALUES (%(video_id)s, %(view)s, %(danmaku)s, %(favorite)s, %(coin)s, %(share)s, %(like)s, %(dislike)s, %(gmt_create)s)
ON DUPLICATE KEY UPDATE
`video_id` = VALUES(`video_id`),
`view` = VALUES(`view`),
`danmaku` = VALUES(`danmaku`),
`favorite` = VALUES(`favorite`),
`coin` = VALUES(`coin`),
`share` = VALUES(`share`);
`like` = VALUES(`like`);
`dislike` = VALUES(`dislike`);
"""
def move_video():
for each_doc in mongo_video.find().batch_size(8):
translate_int64(each_doc)
item = {}
item['video_id'] = each_doc['aid'] if 'aid' in each_doc else None
print(item['video_id'])
item['author_id'] = each_doc['mid'] if 'mid' in each_doc else None
item['title'] = each_doc['title'] if 'title' in each_doc else None
item['pic'] = each_doc['pic'] if 'pic' in each_doc else None
item['is_observe'] = each_doc['focus'] if 'focus' in each_doc else 1
item['channel'] = each_doc['channel'] if 'channel' in each_doc else None
item['subchannel'] = each_doc['subChannel'] if 'subChannel' in each_doc else None
item['gen_time'] = each_doc.pop('_id').generation_time
item['pub_datetime'] = each_doc['datetime'] if 'datetime' in each_doc else None
cursor.execute(INSERT_VIDEO_SQL, item)
if 'data' in each_doc:
item_list = []
for each_record in each_doc['data']:
translate_int64(each_record)
item = {}
item['video_id'] = each_doc['aid'] if 'aid' in each_doc else None
item['view'] = each_record['view'] if 'view' in each_record else None
item['danmaku'] = each_record['danmaku'] if 'danmaku' in each_record else None
item['favorite'] = each_record['favorite'] if 'favorite' in each_record else None
item['coin'] = each_record['coin'] if 'coin' in each_record else None
item['share'] = each_record['share'] if 'share' in each_record else None
item['like'] = each_record['like'] if 'like' in each_record else None
item['dislike'] = each_record['dislike'] if 'dislike' in each_record else None
item['gmt_create'] = each_record['datetime'] if 'datetime' in each_record else None
item_list.append(item)
cursor.executemany(INSERT_VIDEO_RECORD_SQL, item_list)
|
[
"db.cursor.fetchone",
"db.cursor.executemany",
"db.cursor.execute"
] |
[((1853, 1890), 'db.cursor.execute', 'cursor.execute', (['INSERT_USER_SQL', 'item'], {}), '(INSERT_USER_SQL, item)\n', (1867, 1890), False, 'from db import cursor\n'), ((1899, 1948), 'db.cursor.execute', 'cursor.execute', (['GET_USER_ID_SQL', "each_doc['name']"], {}), "(GET_USER_ID_SQL, each_doc['name'])\n", (1913, 1948), False, 'from db import cursor\n'), ((2007, 2059), 'db.cursor.execute', 'cursor.execute', (['DELETE_USER_FOCUS_VIDEO_SQL', 'user_id'], {}), '(DELETE_USER_FOCUS_VIDEO_SQL, user_id)\n', (2021, 2059), False, 'from db import cursor\n'), ((2070, 2123), 'db.cursor.execute', 'cursor.execute', (['DELETE_USER_FOCUS_AUTHOR_SQL', 'user_id'], {}), '(DELETE_USER_FOCUS_AUTHOR_SQL, user_id)\n', (2084, 2123), False, 'from db import cursor\n'), ((4783, 4821), 'db.cursor.execute', 'cursor.execute', (['INSERT_VIDEO_SQL', 'item'], {}), '(INSERT_VIDEO_SQL, item)\n', (4797, 4821), False, 'from db import cursor\n'), ((1970, 1987), 'db.cursor.fetchone', 'cursor.fetchone', ([], {}), '()\n', (1985, 1987), False, 'from db import cursor\n'), ((5868, 5922), 'db.cursor.executemany', 'cursor.executemany', (['INSERT_VIDEO_RECORD_SQL', 'item_list'], {}), '(INSERT_VIDEO_RECORD_SQL, item_list)\n', (5886, 5922), False, 'from db import cursor\n'), ((2446, 2495), 'db.cursor.execute', 'cursor.execute', (['INSERT_USER_FOCUS_VIDEO_SQL', 'item'], {}), '(INSERT_USER_FOCUS_VIDEO_SQL, item)\n', (2460, 2495), False, 'from db import cursor\n'), ((2817, 2867), 'db.cursor.execute', 'cursor.execute', (['INSERT_USER_FOCUS_AUTHOR_SQL', 'item'], {}), '(INSERT_USER_FOCUS_AUTHOR_SQL, item)\n', (2831, 2867), False, 'from db import cursor\n')]
|
"""Rainbow HAT GPIO Touch Driver."""
try:
import RPi.GPIO as GPIO
except ImportError:
raise ImportError("""This library requires the RPi.GPIO module.
Install with: sudo pip install RPi.GPIO""")
PIN_A = 21
PIN_B = 20
PIN_C = 16
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
class Button(object):
"""Represent GPIO Button."""
def __init__(self, index, gpio_pin):
"""Initialise GPIO Button."""
object.__init__(self)
self.pressed = False
self._on_press_handler = None
self._on_release_handler = None
self._gpio_pin = gpio_pin
self._index = index
self._is_setup = False
def setup(self):
"""Set up the GPIO button."""
if self._is_setup:
return
GPIO.setup(self._gpio_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(self._gpio_pin, GPIO.BOTH, bouncetime=1, callback=self._handle_button)
self._is_setup = True
def _handle_button(self, pin):
self.pressed = GPIO.input(pin) != GPIO.HIGH
if self.pressed and callable(self._on_press_handler):
try:
self._on_press_handler(self._index, self._gpio_pin)
except TypeError:
self._on_press_handler(self._index)
elif callable(self._on_release_handler):
try:
self._on_release_handler(self._index, self._gpio_pin)
except TypeError:
self._on_release_handler(self._index)
def press(self, handler=None):
"""Bind a function to handle touch press."""
self.setup()
if handler is None:
def decorate(handler):
self._on_press_handler = handler
return decorate
self._on_press_handler = handler
def release(self, handler=None):
"""Bind a funciton to handle touch release."""
self.setup()
if handler is None:
def decorate(handler):
self._on_release_handler = handler
return decorate
self._on_release_handler = handler
class Buttons(object):
"""Represent A, B and C GPIO Buttons."""
A = Button(0, PIN_A)
B = Button(1, PIN_B)
C = Button(2, PIN_C)
_all = [A, B, C]
def __getitem__(self, key):
return self._all[key]
def press(self, handler=None):
"""Bind a function to handle touch press."""
if handler is None:
def decorate(handler):
self.A.press(handler)
self.B.press(handler)
self.C.press(handler)
return decorate
self.A.press(handler)
self.B.press(handler)
self.C.press(handler)
def release(self, handler=None):
"""Bind a function to handle touch release."""
if handler is None:
def decorate(handler):
self.A.release(handler)
self.B.release(handler)
self.C.release(handler)
return decorate
self.A.release(handler)
self.B.release(handler)
self.C.release(handler)
Buttons = Buttons()
|
[
"RPi.GPIO.setmode",
"RPi.GPIO.setup",
"RPi.GPIO.input",
"RPi.GPIO.setwarnings",
"RPi.GPIO.add_event_detect"
] |
[((238, 260), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BCM'], {}), '(GPIO.BCM)\n', (250, 260), True, 'import RPi.GPIO as GPIO\n'), ((261, 284), 'RPi.GPIO.setwarnings', 'GPIO.setwarnings', (['(False)'], {}), '(False)\n', (277, 284), True, 'import RPi.GPIO as GPIO\n'), ((767, 828), 'RPi.GPIO.setup', 'GPIO.setup', (['self._gpio_pin', 'GPIO.IN'], {'pull_up_down': 'GPIO.PUD_UP'}), '(self._gpio_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n', (777, 828), True, 'import RPi.GPIO as GPIO\n'), ((837, 934), 'RPi.GPIO.add_event_detect', 'GPIO.add_event_detect', (['self._gpio_pin', 'GPIO.BOTH'], {'bouncetime': '(1)', 'callback': 'self._handle_button'}), '(self._gpio_pin, GPIO.BOTH, bouncetime=1, callback=\n self._handle_button)\n', (858, 934), True, 'import RPi.GPIO as GPIO\n'), ((1019, 1034), 'RPi.GPIO.input', 'GPIO.input', (['pin'], {}), '(pin)\n', (1029, 1034), True, 'import RPi.GPIO as GPIO\n')]
|
#!/usr/bin/python
import xml.sax
import sys
import os
import nltk
from nltk import sent_tokenize
from nltk.corpus import stopwords
from nltk.tokenize import RegexpTokenizer
from nltk.tokenize import TreebankWordTokenizer,ToktokTokenizer
from nltk.stem import PorterStemmer
from nltk.corpus import stopwords
from nltk.stem.snowball import SnowballStemmer
import re
import json
import time
import threading
import Stemmer
# GLOBAL VARIABLES
total_tokens = 0
indexed_tokens = 0
start_time = time.time()
threads = []
end_time = 0
CHUNK = 1000
stem_words = {}
all_stopwords = stopwords.words('english')
# ss = SnowballStemmer("english")
stemmer = Stemmer.Stemmer('english')
output_folder = ""
stat_path = ""
STAT_FILE = ""
INDEX_FILE_PATH = ""
'''
Function to create new directories
'''
#function to create directories to store results
def create_directory(folder_path):
my_path = os.getcwd()
my_path = my_path + '/' +folder_path
if not os.path.exists(my_path):
os.makedirs(my_path)
return my_path
def dummy(n):
print("thread ", n)
time.sleep(10)
print("slept 10 for",n)
'''
Class handler to manage and parse
the XML wiki data accordingly.
'''
class WikiHandler(xml.sax.ContentHandler):
def __init__(self):
self.CurrentData = ""
self.data = ""
self.page_count = 0
self.all_titles = []
self.title = ''
self.text = ''
self.index = {}
self.id = ''
self.id_capture = False
self.page_titles = []
self.page_texts = []
self.page_nos = []
# Call when an element starts
def startElement(self, tag, attributes):
self.CurrentData = tag
if tag == "page":
self.data = ''
if tag == "text":
self.data = ''
if tag == 'id':
self.data = ''
# Call when an elements ends
def endElement(self, tag):
if tag == "page":
self.page_titles.append(self.title)
self.page_texts.append(self.text)
self.page_nos.append(self.id)
self.page_count+=1
self.id_capture = False
#create a new thread for every CHUNK pages
if(self.page_count%CHUNK == 0):
print("new thread for ", self.page_count, "...")
t = threading.Thread(target=process_chunk_pages, args=(self.page_titles, self.page_texts, self.page_nos, self.index,self.page_count,))
threads.append(t)
t.start()
#reset 1000 page arrays
self.page_titles = []
self.page_texts = []
self.page_nos = []
elif tag == "title":
self.title = self.data
self.all_titles.append(self.title)
self.data = ''
elif tag == "text":
self.text = self.data
self.data = ''
elif tag == 'id':
if not self.id_capture:
self.id = self.data
self.data = ''
self.id_capture = True
elif tag == 'mediawiki':
print("new thread for ", self.page_count, "...")
t = threading.Thread(target=process_chunk_pages, args=(self.page_titles, self.page_texts, self.page_nos, self.index,self.page_count,))
threads.append(t)
t.start()
#reset 1000 page arrays
self.page_titles = []
self.page_texts = []
self.page_nos = []
#collect all threads
for t in threads:
t.join()
print("Time to index = ", time.time() - start_time)
write_to_file(self.index, self.all_titles)
self.index = {}
self.all_titles = []
print("Done")
print("Total required Time = ", time.time() - start_time)
# Call when a character is read
def characters(self, content):
self.data += content
'''
Function to process CHUNK sized pages at a time
Each CHUNK will be processed by an individual thread.
'''
def process_chunk_pages(title, text, number, index,num):
t0 = time.time()
for i in range(len(title)):
create_index(title[i],text[i],number[i], index)
print("Finished processing for ---", num, "in : ", time.time()-t0)
'''
Function to process text for further use
Includes : case folding, tokenization, stop
words removal, and stemming.
'''
def process_text(text,count_tokens=False):
processed = []
#case folding : conver to lower case
text = text.lower()
# tokenize by splitting text
tokenized_text = re.split(r'[^A-Za-z0-9]+', text)
tokenized_text = ' '.join(tokenized_text).split()
#stop words removal
tokens_without_sw = [token for token in tokenized_text if not token in all_stopwords]
#stemming : check if the word already exists
# in the stem_words set. if does, then use, else stem
for token in tokens_without_sw:
if token in stem_words:
stemmed = stem_words[token]
else:
# stemmed = ss.stem(token)
stemmed = stemmer.stemWord(token)
stem_words[token]=stemmed
processed.append(stemmed)
#add to total tokens in the corpus
if count_tokens:
global total_tokens
total_tokens+=len(tokenized_text)
return(processed)
'''
Function to extract the infobox from the
pages of the wikipedia dump
'''
def get_infobox(text):
ind = [m.start() for m in re.finditer(r'{{Infobox|{{infobox|{{ Infobox| {{ infobox', text)]
ans = []
for i in ind:
close = False
counter = 0
end = -1
for j in range(i, len(text)-1):
if text[j]=='}' and text[j+1] =='}':
counter-=1
elif text[j]=='{' and text[j+1] =='{':
counter+=1
if counter == 0:
end=j+1
break
ans+= process_text(text[i:end+1])
return ans
'''
Function to extract the categoris, external links,
and the references from the body of the page and
process them individually as well.
'''
def split_components(text):
lis = re.split(r"\[\[Category|\[\[ Category", text,1)
#storing the value for cateogories
if len(lis)==1:
category=''
else:
category = lis[1]
lis = re.split(r"==External links==|== External links ==", lis[0],1)
#storing the value for external links
if len(lis)==1:
links = ''
else:
links = lis[1]
lis = re.split(r"==References==|== References ==|== references ==|==references==", lis[0],1)
#storing the value of references
if len(lis)==1:
references = ''
else:
references = lis[1]
return category, links, references
'''
Function to create the inverted index
'''
def create_index(title, text, doc_no, index):
c,r,l = split_components(text)
processed_components = []
processed_components.append(process_text(title,True))
try:
processed_components.append(process_text(text,True))
except:
pass
processed_components.append(process_text(c))
processed_components.append(get_infobox(text))
processed_components.append(process_text(r))
processed_components.append(process_text(l))
add_to_index(doc_no,processed_components, index)
'''
Function to append an entry to the index object.
'''
def add_to_index(doc_no,processed_components,index):
for i in range(len(processed_components)):
processed_tokens = processed_components[i]
field = i+1
for token in processed_tokens:
if(token == ""):
continue
freq_values = [0, 0, 0, 0, 0, 0, 0]
if token not in index:
freq_values[field] += 1
freq_values[0] += 1
index[token] = {}
index[token][doc_no] = freq_values
else:
if doc_no not in index[token]:
freq_values[field] += 1
freq_values[0] += 1
index[token][doc_no] = freq_values
else:
index[token][doc_no][field]+=1
index[token][doc_no][0]+=1
def write_to_file(index, titles):
#write statistics into file
statistics = str(total_tokens)+"\n"+str(len(index))
with open(STAT_FILE, "w") as file:
file.write(statistics)
#write inverted index into file
print("writing to file ...")
ftype = ['f','t', 'b', 'c', 'i', 'r', 'e']
with open(INDEX_FILE_PATH,'w') as f:
data = ""
for key, docs in sorted(index.items()):
#to reduce index size
if (len(key))>27 or len(index[key])<=1:
continue
data += str(key)+":"
for doc,values in index[key].items():
data+="d"+str(doc)
for i in range(len(values)):
if values[i]>0:
data+=str(ftype[i]) + str(values[i])
data+="\n"
f.write(data)
if ( __name__ == "__main__"):
xml_file = sys.argv[1]
output_folder = sys.argv[2]
STAT_FILE = sys.argv[3]
#create stat directory
stat_dir = stat_path.rsplit('/',1)
if len(stat_dir)>1:
create_directory(stat_dir[0])
INDEX_FILE_PATH = output_folder+'index.txt'
# create an XMLReader
parser = xml.sax.make_parser()
# turn off namepsaces
parser.setFeature(xml.sax.handler.feature_namespaces, 0)
# override the default ContextHandler
Handler = WikiHandler()
parser.setContentHandler( Handler )
parser.parse(xml_file)
|
[
"threading.Thread",
"re.split",
"os.makedirs",
"os.getcwd",
"re.finditer",
"os.path.exists",
"time.sleep",
"time.time",
"nltk.corpus.stopwords.words",
"Stemmer.Stemmer"
] |
[((492, 503), 'time.time', 'time.time', ([], {}), '()\n', (501, 503), False, 'import time\n'), ((575, 601), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (590, 601), False, 'from nltk.corpus import stopwords\n'), ((646, 672), 'Stemmer.Stemmer', 'Stemmer.Stemmer', (['"""english"""'], {}), "('english')\n", (661, 672), False, 'import Stemmer\n'), ((886, 897), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (895, 897), False, 'import os\n'), ((1067, 1081), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (1077, 1081), False, 'import time\n'), ((4189, 4200), 'time.time', 'time.time', ([], {}), '()\n', (4198, 4200), False, 'import time\n'), ((4673, 4704), 're.split', 're.split', (['"""[^A-Za-z0-9]+"""', 'text'], {}), "('[^A-Za-z0-9]+', text)\n", (4681, 4704), False, 'import re\n'), ((6255, 6306), 're.split', 're.split', (['"""\\\\[\\\\[Category|\\\\[\\\\[ Category"""', 'text', '(1)'], {}), "('\\\\[\\\\[Category|\\\\[\\\\[ Category', text, 1)\n", (6263, 6306), False, 'import re\n'), ((6429, 6491), 're.split', 're.split', (['"""==External links==|== External links =="""', 'lis[0]', '(1)'], {}), "('==External links==|== External links ==', lis[0], 1)\n", (6437, 6491), False, 'import re\n'), ((6621, 6711), 're.split', 're.split', (['"""==References==|== References ==|== references ==|==references=="""', 'lis[0]', '(1)'], {}), "('==References==|== References ==|== references ==|==references==',\n lis[0], 1)\n", (6629, 6711), False, 'import re\n'), ((950, 973), 'os.path.exists', 'os.path.exists', (['my_path'], {}), '(my_path)\n', (964, 973), False, 'import os\n'), ((983, 1003), 'os.makedirs', 'os.makedirs', (['my_path'], {}), '(my_path)\n', (994, 1003), False, 'import os\n'), ((4345, 4356), 'time.time', 'time.time', ([], {}), '()\n', (4354, 4356), False, 'import time\n'), ((5569, 5632), 're.finditer', 're.finditer', (['"""{{Infobox|{{infobox|{{ Infobox| {{ infobox"""', 'text'], {}), "('{{Infobox|{{infobox|{{ Infobox| {{ infobox', text)\n", (5580, 5632), False, 'import re\n'), ((2330, 2465), 'threading.Thread', 'threading.Thread', ([], {'target': 'process_chunk_pages', 'args': '(self.page_titles, self.page_texts, self.page_nos, self.index, self.page_count)'}), '(target=process_chunk_pages, args=(self.page_titles, self.\n page_texts, self.page_nos, self.index, self.page_count))\n', (2346, 2465), False, 'import threading\n'), ((3217, 3352), 'threading.Thread', 'threading.Thread', ([], {'target': 'process_chunk_pages', 'args': '(self.page_titles, self.page_texts, self.page_nos, self.index, self.page_count)'}), '(target=process_chunk_pages, args=(self.page_titles, self.\n page_texts, self.page_nos, self.index, self.page_count))\n', (3233, 3352), False, 'import threading\n'), ((3666, 3677), 'time.time', 'time.time', ([], {}), '()\n', (3675, 3677), False, 'import time\n'), ((3878, 3889), 'time.time', 'time.time', ([], {}), '()\n', (3887, 3889), False, 'import time\n')]
|
from django.contrib import admin
from .models import Category, Movie, Comment
class CategoryAdmin(admin.ModelAdmin):
list_display = ['name', 'slug']
prepopulated_fields = {'slug': ('name',)}
admin.site.register(Category, CategoryAdmin)
class FilmAdmin(admin.ModelAdmin):
list_display = ['name', 'slug', 'year', 'film_director', 'trailer', 'after_premiere', 'created_at', 'updated_at']
list_filter = ['after_premiere', 'created_at', 'updated_at']
list_editable = ['year', 'film_director', 'after_premiere']
prepopulated_fields = {'slug': ('name',)}
admin.site.register(Movie, FilmAdmin)
class CommentAdmin(admin.ModelAdmin):
list_display = ('name', 'comment')
admin.site.register(Comment, CommentAdmin)
|
[
"django.contrib.admin.site.register"
] |
[((202, 246), 'django.contrib.admin.site.register', 'admin.site.register', (['Category', 'CategoryAdmin'], {}), '(Category, CategoryAdmin)\n', (221, 246), False, 'from django.contrib import admin\n'), ((578, 615), 'django.contrib.admin.site.register', 'admin.site.register', (['Movie', 'FilmAdmin'], {}), '(Movie, FilmAdmin)\n', (597, 615), False, 'from django.contrib import admin\n'), ((695, 737), 'django.contrib.admin.site.register', 'admin.site.register', (['Comment', 'CommentAdmin'], {}), '(Comment, CommentAdmin)\n', (714, 737), False, 'from django.contrib import admin\n')]
|
# Initial setup following http://docs.chainer.org/en/stable/tutorial/basic.html
import numpy as np
import chainer
from chainer import cuda, Function, gradient_check, report, training, utils, Variable
from chainer import datasets, iterators, optimizers, serializers
from chainer import Link, Chain, ChainList
import chainer.functions as F
import chainer.links as L
from chainer.training import extensions
import matplotlib.pyplot as plt
# Defining your own neural networks using `Chain` class
class MyChain(Chain):
def __init__(self):
super(MyChain, self).__init__(
# 第一个参数设为None,可以根据第一次输入的变量来确定他的大小
l1=L.Linear(None, 30),
l2=L.Linear(None, 30),
l3=L.Linear(None, 1)
)
def __call__(self, x):
h = self.l1(x)
h = self.l2(F.sigmoid(h))
return self.l3(F.sigmoid(h))
# Setup a model
model = MyChain()
model_save_path = 'mlp.model'
print('Loading model')
# --- use NPZ format ---
serializers.load_npz(model_save_path, model)
# --- use HDF5 format (need h5py library) ---
# %timeit serializers.load_hdf5(model_save_path, model)
# define target function
def target_func(x):
"""Target function to be predicted"""
return x ** 3 - x ** 2 + x ** -1 + x
# create efficient function to calculate target_func of numpy array in element wise
target_func_elementwise = np.frompyfunc(target_func, 1, 1)
# define data domain [xmin, xmax]
xmin = -3
xmax = 3
# number of training data
sample_num = 20
# calculate new data from model (predict value)
x_test_data = np.array(np.random.rand(sample_num) * (xmax - xmin) + xmin) # create 20
x_test = Variable(x_test_data.reshape(-1, 1).astype(np.float32))
y_test_data = model(x_test).data # this is predicted value
# calculate target function (true value)
x_detail_data = np.array(np.arange(xmin, xmax, 0.1))
y_detail_data = target_func_elementwise(x_detail_data)
plt.clf()
# plot model predict data
plt.scatter(x_test_data, y_test_data, color='k', label='Model predict value')
# plot target function
plt.plot(x_detail_data, y_detail_data, label='True value')
plt.legend(loc='lower right')
plt.show()
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"chainer.serializers.load_npz",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.legend",
"numpy.frompyfunc",
"numpy.arange",
"chainer.functions.sigmoid",
"numpy.random.rand",
"chainer.links.Linear"
] |
[((976, 1020), 'chainer.serializers.load_npz', 'serializers.load_npz', (['model_save_path', 'model'], {}), '(model_save_path, model)\n', (996, 1020), False, 'from chainer import datasets, iterators, optimizers, serializers\n'), ((1366, 1398), 'numpy.frompyfunc', 'np.frompyfunc', (['target_func', '(1)', '(1)'], {}), '(target_func, 1, 1)\n', (1379, 1398), True, 'import numpy as np\n'), ((1908, 1917), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1915, 1917), True, 'import matplotlib.pyplot as plt\n'), ((1944, 2021), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_test_data', 'y_test_data'], {'color': '"""k"""', 'label': '"""Model predict value"""'}), "(x_test_data, y_test_data, color='k', label='Model predict value')\n", (1955, 2021), True, 'import matplotlib.pyplot as plt\n'), ((2045, 2103), 'matplotlib.pyplot.plot', 'plt.plot', (['x_detail_data', 'y_detail_data'], {'label': '"""True value"""'}), "(x_detail_data, y_detail_data, label='True value')\n", (2053, 2103), True, 'import matplotlib.pyplot as plt\n'), ((2104, 2133), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (2114, 2133), True, 'import matplotlib.pyplot as plt\n'), ((2134, 2144), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2142, 2144), True, 'import matplotlib.pyplot as plt\n'), ((1824, 1850), 'numpy.arange', 'np.arange', (['xmin', 'xmax', '(0.1)'], {}), '(xmin, xmax, 0.1)\n', (1833, 1850), True, 'import numpy as np\n'), ((810, 822), 'chainer.functions.sigmoid', 'F.sigmoid', (['h'], {}), '(h)\n', (819, 822), True, 'import chainer.functions as F\n'), ((847, 859), 'chainer.functions.sigmoid', 'F.sigmoid', (['h'], {}), '(h)\n', (856, 859), True, 'import chainer.functions as F\n'), ((1568, 1594), 'numpy.random.rand', 'np.random.rand', (['sample_num'], {}), '(sample_num)\n', (1582, 1594), True, 'import numpy as np\n'), ((641, 659), 'chainer.links.Linear', 'L.Linear', (['None', '(30)'], {}), '(None, 30)\n', (649, 659), True, 'import chainer.links as L\n'), ((676, 694), 'chainer.links.Linear', 'L.Linear', (['None', '(30)'], {}), '(None, 30)\n', (684, 694), True, 'import chainer.links as L\n'), ((711, 728), 'chainer.links.Linear', 'L.Linear', (['None', '(1)'], {}), '(None, 1)\n', (719, 728), True, 'import chainer.links as L\n')]
|
from django import forms
from contacts.models import Contact
from common.models import Comment
class ContactForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
assigned_users = kwargs.pop('assigned_to', [])
contact_org = kwargs.pop('organization', [])
super(ContactForm, self).__init__(*args, **kwargs)
for field in self.fields.values():
field.widget.attrs = {"class": "form-control"}
self.fields['description'].widget.attrs.update({
'rows': '6'})
self.fields['assigned_to'].queryset = assigned_users
self.fields['organization'].queryset = contact_org
self.fields['organization'].required = False
self.fields['assigned_to'].required = False
self.fields['teams'].required = False
self.fields['title'].required = False
class Meta:
model = Contact
fields = (
'assigned_to', 'organization', 'title','teams', 'first_name', 'last_name', 'email', 'phone', 'address', 'description'
)
def format_phone(phone):
phone_length = len(phone)
if phone_length == 11:
new_phone = phone[:1] + ' (' + phone[1:4] + ') ' + phone[4:7] + '-' + phone[7:]
elif phone_length == 12:
new_phone = phone[:2] + ' (' + phone[2:5] + ') ' + phone[5:8] + '-' + phone[8:]
elif phone_length == 13:
new_phone = phone[:3] + ' (' + phone[3:6] + ') ' + phone[6:9] + '-' + phone[9:]
else:
new_phone = '(' + phone[0:3] + ') ' + phone[3:6] + '-' + phone[6:]
return phone
def clean_phone(self):
client_phone = self.cleaned_data.get('phone', None)
try:
if int(client_phone) and not client_phone.isalpha():
ph_length = str(client_phone)
if len(ph_length) < 10 or len(ph_length) > 13:
raise forms.ValidationError('Phone number must be minimum 10 Digits and maximum of 13 Digits')
except (ValueError):
raise forms.ValidationError('Phone Number should contain only Numbers')
# COMMENTED OUT BECAUSE FILTER WON'T FIND NUMBERS I.E. 7035011932 -> (703) 501-1932, FILTER WON'T FIND IF USER ENTERS 703501...
# phone_length = len(client_phone)
# if phone_length == 11:
# new_phone = client_phone[:1] + ' (' + client_phone[1:4] + ') ' + client_phone[4:7] + '-' + client_phone[7:]
# elif phone_length == 12:
# new_phone = client_phone[:2] + ' (' + client_phone[2:5] + ') ' + client_phone[5:8] + '-' + client_phone[8:]
# elif phone_length == 13:
# new_phone = client_phone[:3] + ' (' + client_phone[3:6] + ') ' + client_phone[6:9] + '-' + client_phone[9:]
# else:
# new_phone = '(' + client_phone[0:3] + ') ' + client_phone[3:6] + '-' + client_phone[6:]
#
# client_phone = new_phone
return client_phone
class ContactCommentForm(forms.ModelForm):
comment = forms.CharField(max_length=64, required=True)
class Meta:
model = Comment
fields = ('comment', 'contact', 'commented_by')
|
[
"django.forms.ValidationError",
"django.forms.CharField"
] |
[((3063, 3108), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(64)', 'required': '(True)'}), '(max_length=64, required=True)\n', (3078, 3108), False, 'from django import forms\n'), ((2080, 2145), 'django.forms.ValidationError', 'forms.ValidationError', (['"""Phone Number should contain only Numbers"""'], {}), "('Phone Number should contain only Numbers')\n", (2101, 2145), False, 'from django import forms\n'), ((1942, 2035), 'django.forms.ValidationError', 'forms.ValidationError', (['"""Phone number must be minimum 10 Digits and maximum of 13 Digits"""'], {}), "(\n 'Phone number must be minimum 10 Digits and maximum of 13 Digits')\n", (1963, 2035), False, 'from django import forms\n')]
|
import nibabel as nib
import glob
import os
import numpy as np
import tensorlayer as tl
'''
Before normalization, run N4 bias correction (https://www.ncbi.nlm.nih.gov/pubmed/20378467),
then save the data under folder ./CamCAN_unbiased/CamCAN
'''
modalities = ['T1w', 'T2w']
BraTS_modalities = ['T1w']
folders = ['HGG', 'LGG']
wd = './Data/CamCAN_unbiased/CamCAN'
thumbnail_idx = [60, 70, 80, 90]
for mod in modalities:
wd_mod = os.path.join(wd, str(mod))
os.chdir(wd_mod)
img_files = [i for i in glob.glob("*") if "_unbiased" in i]
for img in img_files:
print(img)
img_data = nib.load(img)
img_data = img_data.get_data()
mask = img.split("_unbiased")[0] + "_brain_mask.nii.gz"
mask_data = nib.load(mask).get_data()
img_data = np.transpose(img_data, [2, 0, 1])
mask_data = np.transpose(mask_data, [2, 0, 1])
idx = [s for s in range(img_data.shape[0]) if mask_data[s].sum() > 1]
img_data = img_data[idx, :, 17:215]
mask_data = mask_data[idx, :, 17:215]
img_data = np.pad(img_data, ((0, 0), (1, 2), (1, 1)), mode='edge')
mask_data = np.pad(mask_data, ((0, 0), (1, 2), (1, 1)), mode='edge')
img_data = np.rot90(img_data, 1, (2, 1))
mask_data = np.rot90(mask_data, 1, (2, 1))
ref_mean = np.mean(img_data[mask_data == 1])
ref_std = np.std(img_data[mask_data == 1])
normed_img = (img_data - ref_mean) / ref_std
normed_img[normed_img == normed_img.min()] = -3.5
x_nif = nib.Nifti1Image(normed_img, np.eye(4))
nib.save(x_nif, os.path.join(img.split("_unbiased")[0] + "_normalized_cropped_mask.nii.gz"))
x_nif = nib.Nifti1Image(mask_data, np.eye(4))
nib.save(x_nif, os.path.join(img.split("_unbiased")[0] + "_mask_cropped_mask.nii.gz"))
tl.visualize.save_images(normed_img[thumbnail_idx, :, :, np.newaxis], [2, 2],
"/scratch_net/bmicdl01/Data/CamCAN_unbiased/preview/" + str(mod)
+ "/" + img.split("_unbiased")[0] + "_normed_img.png")
print("---")
|
[
"numpy.pad",
"nibabel.load",
"numpy.std",
"numpy.transpose",
"numpy.rot90",
"numpy.mean",
"glob.glob",
"numpy.eye",
"os.chdir"
] |
[((468, 484), 'os.chdir', 'os.chdir', (['wd_mod'], {}), '(wd_mod)\n', (476, 484), False, 'import os\n'), ((614, 627), 'nibabel.load', 'nib.load', (['img'], {}), '(img)\n', (622, 627), True, 'import nibabel as nib\n'), ((797, 830), 'numpy.transpose', 'np.transpose', (['img_data', '[2, 0, 1]'], {}), '(img_data, [2, 0, 1])\n', (809, 830), True, 'import numpy as np\n'), ((851, 885), 'numpy.transpose', 'np.transpose', (['mask_data', '[2, 0, 1]'], {}), '(mask_data, [2, 0, 1])\n', (863, 885), True, 'import numpy as np\n'), ((1075, 1130), 'numpy.pad', 'np.pad', (['img_data', '((0, 0), (1, 2), (1, 1))'], {'mode': '"""edge"""'}), "(img_data, ((0, 0), (1, 2), (1, 1)), mode='edge')\n", (1081, 1130), True, 'import numpy as np\n'), ((1151, 1207), 'numpy.pad', 'np.pad', (['mask_data', '((0, 0), (1, 2), (1, 1))'], {'mode': '"""edge"""'}), "(mask_data, ((0, 0), (1, 2), (1, 1)), mode='edge')\n", (1157, 1207), True, 'import numpy as np\n'), ((1227, 1256), 'numpy.rot90', 'np.rot90', (['img_data', '(1)', '(2, 1)'], {}), '(img_data, 1, (2, 1))\n', (1235, 1256), True, 'import numpy as np\n'), ((1277, 1307), 'numpy.rot90', 'np.rot90', (['mask_data', '(1)', '(2, 1)'], {}), '(mask_data, 1, (2, 1))\n', (1285, 1307), True, 'import numpy as np\n'), ((1328, 1361), 'numpy.mean', 'np.mean', (['img_data[mask_data == 1]'], {}), '(img_data[mask_data == 1])\n', (1335, 1361), True, 'import numpy as np\n'), ((1380, 1412), 'numpy.std', 'np.std', (['img_data[mask_data == 1]'], {}), '(img_data[mask_data == 1])\n', (1386, 1412), True, 'import numpy as np\n'), ((513, 527), 'glob.glob', 'glob.glob', (['"""*"""'], {}), "('*')\n", (522, 527), False, 'import glob\n'), ((1570, 1579), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (1576, 1579), True, 'import numpy as np\n'), ((1726, 1735), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (1732, 1735), True, 'import numpy as np\n'), ((751, 765), 'nibabel.load', 'nib.load', (['mask'], {}), '(mask)\n', (759, 765), True, 'import nibabel as nib\n')]
|
# -*- coding: utf-8 -*-
# @Author: yulidong
# @Date: 2018-08-30 16:47:51
# @Last Modified by: yulidong
# @Last Modified time: 2018-08-30 21:13:04
import torch
import torch.multiprocessing as mp
import time
def add(a,b,c):
start=time.time()
d=a+b
c+=d
print(time.time()-start)
def selfadd(a):
print('a')
a+=2
print(a)
if __name__=='__main__':
mp.set_start_method('forkserver')
c=torch.zeros(1,1,5).float().cuda().share_memory_()
a=torch.arange(5).float().cuda().view_as(c).share_memory_()
b=torch.arange(5).float().cuda().view_as(c).share_memory_()
#c=torch.ones(1).share_memory_()
process=[]
start=time.time()
for i in range(5):
p=mp.Process(target=add,args=[a[:,:,i],b[:,:,i],c[:,:,i],])
#p=mp.Process(target=selfadd,args=(c))
p.daemon=True
p.start()
process.append(p)
for p in process:
p.join()
# p=mp.Process(target=add,args=[a,b,c,])
# p.start()
# p.join()
print('running')
print(a,b,c)
print(time.time()-start)
|
[
"time.time",
"torch.multiprocessing.set_start_method",
"torch.multiprocessing.Process",
"torch.arange",
"torch.zeros"
] |
[((236, 247), 'time.time', 'time.time', ([], {}), '()\n', (245, 247), False, 'import time\n'), ((378, 411), 'torch.multiprocessing.set_start_method', 'mp.set_start_method', (['"""forkserver"""'], {}), "('forkserver')\n", (397, 411), True, 'import torch.multiprocessing as mp\n'), ((658, 669), 'time.time', 'time.time', ([], {}), '()\n', (667, 669), False, 'import time\n'), ((703, 768), 'torch.multiprocessing.Process', 'mp.Process', ([], {'target': 'add', 'args': '[a[:, :, i], b[:, :, i], c[:, :, i]]'}), '(target=add, args=[a[:, :, i], b[:, :, i], c[:, :, i]])\n', (713, 768), True, 'import torch.multiprocessing as mp\n'), ((277, 288), 'time.time', 'time.time', ([], {}), '()\n', (286, 288), False, 'import time\n'), ((1037, 1048), 'time.time', 'time.time', ([], {}), '()\n', (1046, 1048), False, 'import time\n'), ((418, 438), 'torch.zeros', 'torch.zeros', (['(1)', '(1)', '(5)'], {}), '(1, 1, 5)\n', (429, 438), False, 'import torch\n'), ((474, 489), 'torch.arange', 'torch.arange', (['(5)'], {}), '(5)\n', (486, 489), False, 'import torch\n'), ((538, 553), 'torch.arange', 'torch.arange', (['(5)'], {}), '(5)\n', (550, 553), False, 'import torch\n')]
|
from django.db import models
from django.utils import timezone
class NflConference(models.Model):
name = models.TextField()
abbreviation = models.TextField(max_length=5)
def __unicode__(self):
return self.name
class NflDivision(models.Model):
name = models.TextField()
conference = models.ForeignKey(NflConference)
def __unicode__(self):
return self.name
class NflTeam(models.Model):
"""NFL Team"""
name = models.TextField()
abbreviation = models.TextField(max_length=5)
city = models.TextField()
division = models.ForeignKey(NflDivision)
def __unicode__(self):
return u"{a} - {c} {n}" \
.format(c=self.city, n=self.name, a=self.abbreviation)
class NflPosition(models.Model):
"""Football position e.g. RB, QB, S"""
description = models.TextField()
abbreviation = models.TextField(max_length=4)
def __unicode__(self):
return self.description
class FantasyPosition(models.Model):
"""Fantasy position - a simple subset of NflPositions"""
position = models.ForeignKey(NflPosition)
def __unicode__(self):
return unicode(self.position)
class College(models.Model):
"""A NCAA College"""
name = models.TextField(max_length=30)
def __unicode__(self):
return self.name
class NflPlayer(models.Model):
"""Draft-eligible NFL player"""
first_name = models.TextField()
last_name = models.TextField()
draft_year = models.PositiveIntegerField(default=1)
team = models.ForeignKey(NflTeam)
school = models.ForeignKey(College)
position = models.ForeignKey(NflPosition)
fantasy_position = models.ForeignKey(FantasyPosition)
def __unicode__(self):
return u"{f} {l}".format(f=self.first_name, l=self.last_name)
class ExternalDatabase(models.Model):
"""An external player DB ie ESPN or Yahoo"""
name = models.TextField(max_length=20)
description = models.TextField(max_length=200)
homepage = models.URLField()
def __unicode__(self):
return self.name
class ExternalNflPlayer(models.Model):
"""Link to an external database's player info"""
player = models.ForeignKey(NflPlayer)
db = models.ForeignKey(ExternalDatabase)
external_id = models.IntegerField()
url = models.URLField()
picture = models.URLField()
class ExternalNflTeam(models.Model):
"""Link to an external database's team info"""
team = models.ForeignKey(NflTeam)
db = models.ForeignKey(ExternalDatabase)
external_id = models.IntegerField()
url = models.URLField()
class FantasyRoster(models.Model):
description = models.TextField() ## TODO: this should be more than a text field?
slots = models.PositiveIntegerField()
def __unicode__(self):
return self.description
class FantasyDraft(models.Model):
name = models.TextField(max_length=20)
admin = models.EmailField()
draft_start = models.DateTimeField()
time_per_pick = models.PositiveIntegerField()
team_limit = models.PositiveIntegerField()
roster = models.ForeignKey(FantasyRoster)
password = models.TextField(max_length=32, null=True, blank=True)
def __unicode__(self):
return self.name
def picks(self):
return FantasyPick.objects.filter(fantasy_team__draft=self)
def is_active(self, time):
"""A draft is active if any picks are active"""
for p in self.picks():
if p.is_active(time):
return True
return False
class FantasyTeam(models.Model):
draft = models.ForeignKey(FantasyDraft)
name = models.TextField(max_length=80)
email = models.EmailField()
auth_key = models.TextField(max_length=40) # len(uuid.uuid4) == 36
def __unicode__(self):
return self.name
def picks(self):
return FantasyPick.objects.filter(fantasy_team=self)
def remove_picks(self):
self.picks().delete()
class FantasySeason(models.Model):
year = models.TextField(max_length=10) # 2013-2014
def __unicode__(self):
return u"{} Fantasy Season".format(self.year)
class MockDraft(models.Model):
"""Ties together an existing fantasy team & a separate draft"""
owner = models.ForeignKey(FantasyTeam)
draft = models.ForeignKey(FantasyDraft)
class MockDraftBot(models.Model):
season = models.ForeignKey(FantasySeason)
draft = models.ForeignKey(FantasyDraft)
team = models.ForeignKey(FantasyTeam)
brain = models.TextField(max_length=12)
def __unicode__(self):
return u"{} bot for the {}".format(self.brain, self.season)
class FantasyPick(models.Model):
"""An upcoming pick"""
fantasy_team = models.ForeignKey(FantasyTeam)
starts = models.DateTimeField('starts at')
expires = models.DateTimeField('expires at')
pick_number = models.PositiveIntegerField()
class Meta:
ordering = ('pick_number',)
def __unicode__(self):
return u"{d} - Pick {n} - {t}" \
.format(d=self.fantasy_team.draft.name, n=self.pick_number,
t=self.fantasy_team.name)
def is_active(self, time):
"""Returns whether the time is between start & expire"""
return self.starts <= time and \
self.expires >= time
class FantasySelection(models.Model):
"""A pick that's been made"""
when = models.DateTimeField(default=timezone.now())
draft_pick = models.ForeignKey(FantasyPick)
player = models.ForeignKey(NflPlayer)
|
[
"django.db.models.TextField",
"django.db.models.URLField",
"django.db.models.ForeignKey",
"django.utils.timezone.now",
"django.db.models.PositiveIntegerField",
"django.db.models.EmailField",
"django.db.models.IntegerField",
"django.db.models.DateTimeField"
] |
[((110, 128), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (126, 128), False, 'from django.db import models\n'), ((148, 178), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(5)'}), '(max_length=5)\n', (164, 178), False, 'from django.db import models\n'), ((278, 296), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (294, 296), False, 'from django.db import models\n'), ((314, 346), 'django.db.models.ForeignKey', 'models.ForeignKey', (['NflConference'], {}), '(NflConference)\n', (331, 346), False, 'from django.db import models\n'), ((461, 479), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (477, 479), False, 'from django.db import models\n'), ((499, 529), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(5)'}), '(max_length=5)\n', (515, 529), False, 'from django.db import models\n'), ((541, 559), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (557, 559), False, 'from django.db import models\n'), ((575, 605), 'django.db.models.ForeignKey', 'models.ForeignKey', (['NflDivision'], {}), '(NflDivision)\n', (592, 605), False, 'from django.db import models\n'), ((831, 849), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (847, 849), False, 'from django.db import models\n'), ((869, 899), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(4)'}), '(max_length=4)\n', (885, 899), False, 'from django.db import models\n'), ((1075, 1105), 'django.db.models.ForeignKey', 'models.ForeignKey', (['NflPosition'], {}), '(NflPosition)\n', (1092, 1105), False, 'from django.db import models\n'), ((1238, 1269), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (1254, 1269), False, 'from django.db import models\n'), ((1408, 1426), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (1424, 1426), False, 'from django.db import models\n'), ((1443, 1461), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (1459, 1461), False, 'from django.db import models\n'), ((1479, 1517), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'default': '(1)'}), '(default=1)\n', (1506, 1517), False, 'from django.db import models\n'), ((1529, 1555), 'django.db.models.ForeignKey', 'models.ForeignKey', (['NflTeam'], {}), '(NflTeam)\n', (1546, 1555), False, 'from django.db import models\n'), ((1569, 1595), 'django.db.models.ForeignKey', 'models.ForeignKey', (['College'], {}), '(College)\n', (1586, 1595), False, 'from django.db import models\n'), ((1611, 1641), 'django.db.models.ForeignKey', 'models.ForeignKey', (['NflPosition'], {}), '(NflPosition)\n', (1628, 1641), False, 'from django.db import models\n'), ((1665, 1699), 'django.db.models.ForeignKey', 'models.ForeignKey', (['FantasyPosition'], {}), '(FantasyPosition)\n', (1682, 1699), False, 'from django.db import models\n'), ((1898, 1929), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (1914, 1929), False, 'from django.db import models\n'), ((1948, 1980), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (1964, 1980), False, 'from django.db import models\n'), ((1996, 2013), 'django.db.models.URLField', 'models.URLField', ([], {}), '()\n', (2011, 2013), False, 'from django.db import models\n'), ((2174, 2202), 'django.db.models.ForeignKey', 'models.ForeignKey', (['NflPlayer'], {}), '(NflPlayer)\n', (2191, 2202), False, 'from django.db import models\n'), ((2212, 2247), 'django.db.models.ForeignKey', 'models.ForeignKey', (['ExternalDatabase'], {}), '(ExternalDatabase)\n', (2229, 2247), False, 'from django.db import models\n'), ((2266, 2287), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (2285, 2287), False, 'from django.db import models\n'), ((2298, 2315), 'django.db.models.URLField', 'models.URLField', ([], {}), '()\n', (2313, 2315), False, 'from django.db import models\n'), ((2330, 2347), 'django.db.models.URLField', 'models.URLField', ([], {}), '()\n', (2345, 2347), False, 'from django.db import models\n'), ((2449, 2475), 'django.db.models.ForeignKey', 'models.ForeignKey', (['NflTeam'], {}), '(NflTeam)\n', (2466, 2475), False, 'from django.db import models\n'), ((2485, 2520), 'django.db.models.ForeignKey', 'models.ForeignKey', (['ExternalDatabase'], {}), '(ExternalDatabase)\n', (2502, 2520), False, 'from django.db import models\n'), ((2539, 2560), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (2558, 2560), False, 'from django.db import models\n'), ((2571, 2588), 'django.db.models.URLField', 'models.URLField', ([], {}), '()\n', (2586, 2588), False, 'from django.db import models\n'), ((2644, 2662), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (2660, 2662), False, 'from django.db import models\n'), ((2723, 2752), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {}), '()\n', (2750, 2752), False, 'from django.db import models\n'), ((2860, 2891), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (2876, 2891), False, 'from django.db import models\n'), ((2904, 2923), 'django.db.models.EmailField', 'models.EmailField', ([], {}), '()\n', (2921, 2923), False, 'from django.db import models\n'), ((2942, 2964), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (2962, 2964), False, 'from django.db import models\n'), ((2985, 3014), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {}), '()\n', (3012, 3014), False, 'from django.db import models\n'), ((3032, 3061), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {}), '()\n', (3059, 3061), False, 'from django.db import models\n'), ((3075, 3107), 'django.db.models.ForeignKey', 'models.ForeignKey', (['FantasyRoster'], {}), '(FantasyRoster)\n', (3092, 3107), False, 'from django.db import models\n'), ((3123, 3177), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(32)', 'null': '(True)', 'blank': '(True)'}), '(max_length=32, null=True, blank=True)\n', (3139, 3177), False, 'from django.db import models\n'), ((3570, 3601), 'django.db.models.ForeignKey', 'models.ForeignKey', (['FantasyDraft'], {}), '(FantasyDraft)\n', (3587, 3601), False, 'from django.db import models\n'), ((3613, 3644), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(80)'}), '(max_length=80)\n', (3629, 3644), False, 'from django.db import models\n'), ((3657, 3676), 'django.db.models.EmailField', 'models.EmailField', ([], {}), '()\n', (3674, 3676), False, 'from django.db import models\n'), ((3692, 3723), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(40)'}), '(max_length=40)\n', (3708, 3723), False, 'from django.db import models\n'), ((3991, 4022), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(10)'}), '(max_length=10)\n', (4007, 4022), False, 'from django.db import models\n'), ((4230, 4260), 'django.db.models.ForeignKey', 'models.ForeignKey', (['FantasyTeam'], {}), '(FantasyTeam)\n', (4247, 4260), False, 'from django.db import models\n'), ((4273, 4304), 'django.db.models.ForeignKey', 'models.ForeignKey', (['FantasyDraft'], {}), '(FantasyDraft)\n', (4290, 4304), False, 'from django.db import models\n'), ((4354, 4386), 'django.db.models.ForeignKey', 'models.ForeignKey', (['FantasySeason'], {}), '(FantasySeason)\n', (4371, 4386), False, 'from django.db import models\n'), ((4399, 4430), 'django.db.models.ForeignKey', 'models.ForeignKey', (['FantasyDraft'], {}), '(FantasyDraft)\n', (4416, 4430), False, 'from django.db import models\n'), ((4442, 4472), 'django.db.models.ForeignKey', 'models.ForeignKey', (['FantasyTeam'], {}), '(FantasyTeam)\n', (4459, 4472), False, 'from django.db import models\n'), ((4485, 4516), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(12)'}), '(max_length=12)\n', (4501, 4516), False, 'from django.db import models\n'), ((4694, 4724), 'django.db.models.ForeignKey', 'models.ForeignKey', (['FantasyTeam'], {}), '(FantasyTeam)\n', (4711, 4724), False, 'from django.db import models\n'), ((4738, 4771), 'django.db.models.DateTimeField', 'models.DateTimeField', (['"""starts at"""'], {}), "('starts at')\n", (4758, 4771), False, 'from django.db import models\n'), ((4786, 4820), 'django.db.models.DateTimeField', 'models.DateTimeField', (['"""expires at"""'], {}), "('expires at')\n", (4806, 4820), False, 'from django.db import models\n'), ((4839, 4868), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {}), '()\n', (4866, 4868), False, 'from django.db import models\n'), ((5421, 5451), 'django.db.models.ForeignKey', 'models.ForeignKey', (['FantasyPick'], {}), '(FantasyPick)\n', (5438, 5451), False, 'from django.db import models\n'), ((5465, 5493), 'django.db.models.ForeignKey', 'models.ForeignKey', (['NflPlayer'], {}), '(NflPlayer)\n', (5482, 5493), False, 'from django.db import models\n'), ((5388, 5402), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (5400, 5402), False, 'from django.utils import timezone\n')]
|
import sys
def cnt(n):
m = str(n)
l = len(m)
if l == 1:
return n + 1
tot = 0
tot += pow(10, (l - 1) // 2) * (int(m[0]) - 1)
tot += pow(10, l // 2) - 1 - pow(10, l // 2 - 1) * (l & 1 ^ 1)
while l >= 2:
l -= 2
if l == 0:
tot += m[1] >= m[0]
elif l == 1:
tot += int(m[1]) + 1 - (m[-1] < m[0])
else:
m = str(int(m[1:-1]) - (m[-1] < m[0]))
m = "0" * (l - len(m)) + m
tot += int(m[0]) * pow(10, (l - 1) // 2)
return tot
a, b = map(int, sys.stdin.readline().split())
def main():
print(cnt(b) - cnt(a - 1))
if __name__ == "__main__":
main()
|
[
"sys.stdin.readline"
] |
[((590, 610), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (608, 610), False, 'import sys\n')]
|
from DLFrameWork.forward import NetWork
from DLFrameWork.dataset import FashionMNIST,DataLoader
if __name__ == '__main__':
FMNIST = FashionMNIST(path='MNIST_Data',download=False,train=True)
dLoader = DataLoader(FMNIST,batchsize=100,shuffling=False,normalization={'Transform':True})
# (784,256),(256,128),(128,64),(64,10)
net = NetWork((784,256,128,64,10),('ReLU','ReLU','ReLU','SoftMax'),optimType={'Momeuntum':True})
print(net)
costs = []
print_cost = True
epochs = 10
for i in range(epochs):
cost = 0.0
for j,(images,labels) in enumerate(dLoader):
ourimages = images.T
ourlabel = labels.T
innercost = net.fit(ourimages,ourlabel,learning_rate =0.02)
cost += innercost
# print('iteration num {},inner cost is {}'.format(j, innercost))
if print_cost:# and i % 100 == 0:
print("Cost after iteration {}: {}".format(i, cost/600))
print('-'*10)
images, labels = next(dLoader)
net.Prediction(images.T,labels.T,net.Parameters())
|
[
"DLFrameWork.dataset.FashionMNIST",
"DLFrameWork.forward.NetWork",
"DLFrameWork.dataset.DataLoader"
] |
[((138, 197), 'DLFrameWork.dataset.FashionMNIST', 'FashionMNIST', ([], {'path': '"""MNIST_Data"""', 'download': '(False)', 'train': '(True)'}), "(path='MNIST_Data', download=False, train=True)\n", (150, 197), False, 'from DLFrameWork.dataset import FashionMNIST, DataLoader\n'), ((215, 305), 'DLFrameWork.dataset.DataLoader', 'DataLoader', (['FMNIST'], {'batchsize': '(100)', 'shuffling': '(False)', 'normalization': "{'Transform': True}"}), "(FMNIST, batchsize=100, shuffling=False, normalization={\n 'Transform': True})\n", (225, 305), False, 'from DLFrameWork.dataset import FashionMNIST, DataLoader\n'), ((350, 454), 'DLFrameWork.forward.NetWork', 'NetWork', (['(784, 256, 128, 64, 10)', "('ReLU', 'ReLU', 'ReLU', 'SoftMax')"], {'optimType': "{'Momeuntum': True}"}), "((784, 256, 128, 64, 10), ('ReLU', 'ReLU', 'ReLU', 'SoftMax'),\n optimType={'Momeuntum': True})\n", (357, 454), False, 'from DLFrameWork.forward import NetWork\n')]
|
# This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
from pkg_resources import parse_version
from .kaitaistruct import __version__ as ks_version, KaitaiStruct, KaitaiStream, BytesIO
import collections
from enum import Enum
if parse_version(ks_version) < parse_version('0.7'):
raise Exception("Incompatible Kaitai Struct Python API: 0.7 or later is required, but you have %s" % (ks_version))
class Icc4(KaitaiStruct):
SEQ_FIELDS = ["header", "tag_table"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['header']['start'] = self._io.pos()
self.header = self._root.ProfileHeader(self._io, self, self._root)
self.header._read()
self._debug['header']['end'] = self._io.pos()
self._debug['tag_table']['start'] = self._io.pos()
self.tag_table = self._root.TagTable(self._io, self, self._root)
self.tag_table._read()
self._debug['tag_table']['end'] = self._io.pos()
class U8Fixed8Number(KaitaiStruct):
SEQ_FIELDS = ["number"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['number']['start'] = self._io.pos()
self.number = self._io.read_bytes(2)
self._debug['number']['end'] = self._io.pos()
class U16Fixed16Number(KaitaiStruct):
SEQ_FIELDS = ["number"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['number']['start'] = self._io.pos()
self.number = self._io.read_bytes(4)
self._debug['number']['end'] = self._io.pos()
class StandardIlluminantEncoding(KaitaiStruct):
class StandardIlluminantEncodings(Enum):
unknown = 0
d50 = 1
d65 = 2
d93 = 3
f2 = 4
d55 = 5
a = 6
equi_power = 7
f8 = 8
SEQ_FIELDS = ["standard_illuminant_encoding"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['standard_illuminant_encoding']['start'] = self._io.pos()
self.standard_illuminant_encoding = KaitaiStream.resolve_enum(self._root.StandardIlluminantEncoding.StandardIlluminantEncodings, self._io.read_u4be())
self._debug['standard_illuminant_encoding']['end'] = self._io.pos()
class ProfileHeader(KaitaiStruct):
class CmmSignatures(Enum):
the_imaging_factory_cmm = 858931796
agfa_cmm = 1094929747
adobe_cmm = 1094992453
color_gear_cmm = 1128484179
logosync_cmm = 1147629395
efi_cmm = 1162234144
exact_scan_cmm = 1163411779
fuji_film_cmm = 1179000864
harlequin_rip_cmm = 1212370253
heidelberg_cmm = 1212435744
kodak_cmm = 1262701907
konica_minolta_cmm = 1296256324
device_link_cmm = 1380404563
sample_icc_cmm = 1397310275
mutoh_cmm = 1397311310
toshiba_cmm = 1413696845
color_gear_cmm_lite = 1430471501
color_gear_cmm_c = 1430474067
windows_color_system_cmm = 1464029984
ware_to_go_cmm = 1465141024
apple_cmm = 1634758764
argyll_cms_cmm = 1634887532
little_cms_cmm = 1818455411
zoran_cmm = 2053320752
class PrimaryPlatforms(Enum):
apple_computer_inc = 1095782476
microsoft_corporation = 1297303124
silicon_graphics_inc = 1397180704
sun_microsystems = 1398099543
class ProfileClasses(Enum):
abstract_profile = 1633842036
device_link_profile = 1818848875
display_device_profile = 1835955314
named_color_profile = 1852662636
output_device_profile = 1886549106
input_device_profile = 1935896178
color_space_profile = 1936744803
class RenderingIntents(Enum):
perceptual = 0
media_relative_colorimetric = 1
saturation = 2
icc_absolute_colorimetric = 3
class DataColourSpaces(Enum):
two_colour = 843271250
three_colour = 860048466
four_colour = 876825682
five_colour = 893602898
six_colour = 910380114
seven_colour = 927157330
eight_colour = 943934546
nine_colour = 960711762
ten_colour = 1094929490
eleven_colour = 1111706706
twelve_colour = 1128483922
cmy = 1129142560
cmyk = 1129142603
thirteen_colour = 1145261138
fourteen_colour = 1162038354
fifteen_colour = 1178815570
gray = 1196573017
hls = 1212961568
hsv = 1213421088
cielab_or_pcslab = 1281450528
cieluv = 1282766368
rgb = 1380401696
nciexyz_or_pcsxyz = 1482250784
ycbcr = 1497588338
cieyxy = 1501067552
SEQ_FIELDS = ["size", "preferred_cmm_type", "version", "device_class", "color_space", "pcs", "creation_date_time", "file_signature", "primary_platform", "profile_flags", "device_manufacturer", "device_model", "device_attributes", "rendering_intent", "nciexyz_values_of_illuminant_of_pcs", "creator", "identifier", "reserved_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['size']['start'] = self._io.pos()
self.size = self._io.read_u4be()
self._debug['size']['end'] = self._io.pos()
self._debug['preferred_cmm_type']['start'] = self._io.pos()
self.preferred_cmm_type = KaitaiStream.resolve_enum(self._root.ProfileHeader.CmmSignatures, self._io.read_u4be())
self._debug['preferred_cmm_type']['end'] = self._io.pos()
self._debug['version']['start'] = self._io.pos()
self.version = self._root.ProfileHeader.VersionField(self._io, self, self._root)
self.version._read()
self._debug['version']['end'] = self._io.pos()
self._debug['device_class']['start'] = self._io.pos()
self.device_class = KaitaiStream.resolve_enum(self._root.ProfileHeader.ProfileClasses, self._io.read_u4be())
self._debug['device_class']['end'] = self._io.pos()
self._debug['color_space']['start'] = self._io.pos()
self.color_space = KaitaiStream.resolve_enum(self._root.ProfileHeader.DataColourSpaces, self._io.read_u4be())
self._debug['color_space']['end'] = self._io.pos()
self._debug['pcs']['start'] = self._io.pos()
self.pcs = (self._io.read_bytes(4)).decode(u"ASCII")
self._debug['pcs']['end'] = self._io.pos()
self._debug['creation_date_time']['start'] = self._io.pos()
self.creation_date_time = self._root.DateTimeNumber(self._io, self, self._root)
self.creation_date_time._read()
self._debug['creation_date_time']['end'] = self._io.pos()
self._debug['file_signature']['start'] = self._io.pos()
self.file_signature = self._io.ensure_fixed_contents(b"\x61\x63\x73\x70")
self._debug['file_signature']['end'] = self._io.pos()
self._debug['primary_platform']['start'] = self._io.pos()
self.primary_platform = KaitaiStream.resolve_enum(self._root.ProfileHeader.PrimaryPlatforms, self._io.read_u4be())
self._debug['primary_platform']['end'] = self._io.pos()
self._debug['profile_flags']['start'] = self._io.pos()
self.profile_flags = self._root.ProfileHeader.ProfileFlags(self._io, self, self._root)
self.profile_flags._read()
self._debug['profile_flags']['end'] = self._io.pos()
self._debug['device_manufacturer']['start'] = self._io.pos()
self.device_manufacturer = self._root.DeviceManufacturer(self._io, self, self._root)
self.device_manufacturer._read()
self._debug['device_manufacturer']['end'] = self._io.pos()
self._debug['device_model']['start'] = self._io.pos()
self.device_model = (self._io.read_bytes(4)).decode(u"ASCII")
self._debug['device_model']['end'] = self._io.pos()
self._debug['device_attributes']['start'] = self._io.pos()
self.device_attributes = self._root.DeviceAttributes(self._io, self, self._root)
self.device_attributes._read()
self._debug['device_attributes']['end'] = self._io.pos()
self._debug['rendering_intent']['start'] = self._io.pos()
self.rendering_intent = KaitaiStream.resolve_enum(self._root.ProfileHeader.RenderingIntents, self._io.read_u4be())
self._debug['rendering_intent']['end'] = self._io.pos()
self._debug['nciexyz_values_of_illuminant_of_pcs']['start'] = self._io.pos()
self.nciexyz_values_of_illuminant_of_pcs = self._root.XyzNumber(self._io, self, self._root)
self.nciexyz_values_of_illuminant_of_pcs._read()
self._debug['nciexyz_values_of_illuminant_of_pcs']['end'] = self._io.pos()
self._debug['creator']['start'] = self._io.pos()
self.creator = self._root.DeviceManufacturer(self._io, self, self._root)
self.creator._read()
self._debug['creator']['end'] = self._io.pos()
self._debug['identifier']['start'] = self._io.pos()
self.identifier = self._io.read_bytes(16)
self._debug['identifier']['end'] = self._io.pos()
self._debug['reserved_data']['start'] = self._io.pos()
self.reserved_data = self._io.read_bytes(28)
self._debug['reserved_data']['end'] = self._io.pos()
class VersionField(KaitaiStruct):
SEQ_FIELDS = ["major", "minor", "bug_fix_level", "reserved"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['major']['start'] = self._io.pos()
self.major = self._io.ensure_fixed_contents(b"\x04")
self._debug['major']['end'] = self._io.pos()
self._debug['minor']['start'] = self._io.pos()
self.minor = self._io.read_bits_int(4)
self._debug['minor']['end'] = self._io.pos()
self._debug['bug_fix_level']['start'] = self._io.pos()
self.bug_fix_level = self._io.read_bits_int(4)
self._debug['bug_fix_level']['end'] = self._io.pos()
self._io.align_to_byte()
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
class ProfileFlags(KaitaiStruct):
SEQ_FIELDS = ["embedded_profile", "profile_can_be_used_independently_of_embedded_colour_data", "other_flags"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['embedded_profile']['start'] = self._io.pos()
self.embedded_profile = self._io.read_bits_int(1) != 0
self._debug['embedded_profile']['end'] = self._io.pos()
self._debug['profile_can_be_used_independently_of_embedded_colour_data']['start'] = self._io.pos()
self.profile_can_be_used_independently_of_embedded_colour_data = self._io.read_bits_int(1) != 0
self._debug['profile_can_be_used_independently_of_embedded_colour_data']['end'] = self._io.pos()
self._debug['other_flags']['start'] = self._io.pos()
self.other_flags = self._io.read_bits_int(30)
self._debug['other_flags']['end'] = self._io.pos()
class XyzNumber(KaitaiStruct):
SEQ_FIELDS = ["x", "y", "z"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['x']['start'] = self._io.pos()
self.x = self._io.read_bytes(4)
self._debug['x']['end'] = self._io.pos()
self._debug['y']['start'] = self._io.pos()
self.y = self._io.read_bytes(4)
self._debug['y']['end'] = self._io.pos()
self._debug['z']['start'] = self._io.pos()
self.z = self._io.read_bytes(4)
self._debug['z']['end'] = self._io.pos()
class DateTimeNumber(KaitaiStruct):
SEQ_FIELDS = ["year", "month", "day", "hour", "minute", "second"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['year']['start'] = self._io.pos()
self.year = self._io.read_u2be()
self._debug['year']['end'] = self._io.pos()
self._debug['month']['start'] = self._io.pos()
self.month = self._io.read_u2be()
self._debug['month']['end'] = self._io.pos()
self._debug['day']['start'] = self._io.pos()
self.day = self._io.read_u2be()
self._debug['day']['end'] = self._io.pos()
self._debug['hour']['start'] = self._io.pos()
self.hour = self._io.read_u2be()
self._debug['hour']['end'] = self._io.pos()
self._debug['minute']['start'] = self._io.pos()
self.minute = self._io.read_u2be()
self._debug['minute']['end'] = self._io.pos()
self._debug['second']['start'] = self._io.pos()
self.second = self._io.read_u2be()
self._debug['second']['end'] = self._io.pos()
class Response16Number(KaitaiStruct):
SEQ_FIELDS = ["number", "reserved", "measurement_value"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['number']['start'] = self._io.pos()
self.number = self._io.read_u4be()
self._debug['number']['end'] = self._io.pos()
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['measurement_value']['start'] = self._io.pos()
self.measurement_value = self._root.S15Fixed16Number(self._io, self, self._root)
self.measurement_value._read()
self._debug['measurement_value']['end'] = self._io.pos()
class U1Fixed15Number(KaitaiStruct):
SEQ_FIELDS = ["number"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['number']['start'] = self._io.pos()
self.number = self._io.read_bytes(2)
self._debug['number']['end'] = self._io.pos()
class TagTable(KaitaiStruct):
SEQ_FIELDS = ["tag_count", "tags"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_count']['start'] = self._io.pos()
self.tag_count = self._io.read_u4be()
self._debug['tag_count']['end'] = self._io.pos()
self._debug['tags']['start'] = self._io.pos()
self.tags = [None] * (self.tag_count)
for i in range(self.tag_count):
if not 'arr' in self._debug['tags']:
self._debug['tags']['arr'] = []
self._debug['tags']['arr'].append({'start': self._io.pos()})
_t_tags = self._root.TagTable.TagDefinition(self._io, self, self._root)
_t_tags._read()
self.tags[i] = _t_tags
self._debug['tags']['arr'][i]['end'] = self._io.pos()
self._debug['tags']['end'] = self._io.pos()
class TagDefinition(KaitaiStruct):
class TagSignatures(Enum):
a_to_b_0 = 1093812784
a_to_b_1 = 1093812785
a_to_b_2 = 1093812786
b_to_a_0 = 1110589744
b_to_a_1 = 1110589745
b_to_a_2 = 1110589746
b_to_d_0 = 1110590512
b_to_d_1 = 1110590513
b_to_d_2 = 1110590514
b_to_d_3 = 1110590515
d_to_b_0 = 1144144432
d_to_b_1 = 1144144433
d_to_b_2 = 1144144434
d_to_b_3 = 1144144435
blue_trc = 1649693251
blue_matrix_column = 1649957210
calibration_date_time = 1667329140
chromatic_adaptation = 1667785060
chromaticity = 1667789421
colorimetric_intent_image_state = 1667852659
colorant_table_out = 1668050804
colorant_order = 1668051567
colorant_table = 1668051572
copyright = 1668313716
profile_description = 1684370275
device_model_desc = 1684890724
device_mfg_desc = 1684893284
green_trc = 1733579331
green_matrix_column = 1733843290
gamut = 1734438260
gray_trc = 1800688195
luminance = 1819635049
measurement = 1835360627
named_color_2 = 1852009522
preview_0 = 1886545200
preview_1 = 1886545201
preview_2 = 1886545202
profile_sequence = 1886610801
profile_sequence_identifier = 1886611812
red_trc = 1918128707
red_matrix_column = 1918392666
output_response = 1919251312
perceptual_rendering_intent_gamut = 1919510320
saturation_rendering_intent_gamut = 1919510322
char_target = 1952543335
technology = 1952801640
viewing_conditions = 1986618743
viewing_cond_desc = 1987405156
media_white_point = 2004119668
class TagTypeSignatures(Enum):
xyz_type = 1482250784
colorant_table_type = 1668051572
curve_type = 1668641398
data_type = 1684108385
date_time_type = 1685350765
multi_function_a_to_b_table_type = 1832993312
multi_function_b_to_a_table_type = 1833058592
measurement_type = 1835360627
multi_function_table_with_one_byte_precision_type = 1835430961
multi_function_table_with_two_byte_precision_type = 1835430962
multi_localized_unicode_type = 1835824483
multi_process_elements_type = 1836082548
named_color_2_type = 1852009522
parametric_curve_type = 1885434465
profile_sequence_desc_type = 1886610801
profile_sequence_identifier_type = 1886611812
response_curve_set_16_type = 1919120178
s_15_fixed_16_array_type = 1936077618
signature_type = 1936287520
text_type = 1952807028
u_16_fixed_16_array_type = 1969632050
u_int_8_array_type = 1969827896
u_int_16_array_type = 1969828150
u_int_32_array_type = 1969828658
u_int_64_array_type = 1969829428
viewing_conditions_type = 1986618743
class MultiProcessElementsTypes(Enum):
bacs_element_type = 1648444243
clut_element_type = 1668052340
one_dimensional_curves_type = 1668641382
eacs_element_type = 1698775891
matrix_element_type = 1835103334
curve_set_element_table_type = 1835428980
formula_curve_segments_type = 1885434470
sampled_curve_segment_type = 1935764838
SEQ_FIELDS = ["tag_signature", "offset_to_data_element", "size_of_data_element"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_signature']['start'] = self._io.pos()
self.tag_signature = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagSignatures, self._io.read_u4be())
self._debug['tag_signature']['end'] = self._io.pos()
self._debug['offset_to_data_element']['start'] = self._io.pos()
self.offset_to_data_element = self._io.read_u4be()
self._debug['offset_to_data_element']['end'] = self._io.pos()
self._debug['size_of_data_element']['start'] = self._io.pos()
self.size_of_data_element = self._io.read_u4be()
self._debug['size_of_data_element']['end'] = self._io.pos()
class BlueMatrixColumnTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.xyz_type:
self.tag_data = self._root.TagTable.TagDefinition.XyzType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class DeviceMfgDescTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_localized_unicode_type:
self.tag_data = self._root.TagTable.TagDefinition.MultiLocalizedUnicodeType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class NamedColor2Type(KaitaiStruct):
SEQ_FIELDS = ["reserved", "vendor_specific_flag", "count_of_named_colours", "number_of_device_coordinates_for_each_named_colour", "prefix_for_each_colour_name", "prefix_for_each_colour_name_padding", "suffix_for_each_colour_name", "suffix_for_each_colour_name_padding", "named_colour_definitions"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['vendor_specific_flag']['start'] = self._io.pos()
self.vendor_specific_flag = self._io.read_u4be()
self._debug['vendor_specific_flag']['end'] = self._io.pos()
self._debug['count_of_named_colours']['start'] = self._io.pos()
self.count_of_named_colours = self._io.read_u4be()
self._debug['count_of_named_colours']['end'] = self._io.pos()
self._debug['number_of_device_coordinates_for_each_named_colour']['start'] = self._io.pos()
self.number_of_device_coordinates_for_each_named_colour = self._io.read_u4be()
self._debug['number_of_device_coordinates_for_each_named_colour']['end'] = self._io.pos()
self._debug['prefix_for_each_colour_name']['start'] = self._io.pos()
self.prefix_for_each_colour_name = (self._io.read_bytes_term(0, False, True, True)).decode(u"ASCII")
self._debug['prefix_for_each_colour_name']['end'] = self._io.pos()
self._debug['prefix_for_each_colour_name_padding']['start'] = self._io.pos()
self.prefix_for_each_colour_name_padding = [None] * ((32 - len(self.prefix_for_each_colour_name)))
for i in range((32 - len(self.prefix_for_each_colour_name))):
if not 'arr' in self._debug['prefix_for_each_colour_name_padding']:
self._debug['prefix_for_each_colour_name_padding']['arr'] = []
self._debug['prefix_for_each_colour_name_padding']['arr'].append({'start': self._io.pos()})
self.prefix_for_each_colour_name_padding = self._io.ensure_fixed_contents(b"\x00")
self._debug['prefix_for_each_colour_name_padding']['arr'][i]['end'] = self._io.pos()
self._debug['prefix_for_each_colour_name_padding']['end'] = self._io.pos()
self._debug['suffix_for_each_colour_name']['start'] = self._io.pos()
self.suffix_for_each_colour_name = (self._io.read_bytes_term(0, False, True, True)).decode(u"ASCII")
self._debug['suffix_for_each_colour_name']['end'] = self._io.pos()
self._debug['suffix_for_each_colour_name_padding']['start'] = self._io.pos()
self.suffix_for_each_colour_name_padding = [None] * ((32 - len(self.suffix_for_each_colour_name)))
for i in range((32 - len(self.suffix_for_each_colour_name))):
if not 'arr' in self._debug['suffix_for_each_colour_name_padding']:
self._debug['suffix_for_each_colour_name_padding']['arr'] = []
self._debug['suffix_for_each_colour_name_padding']['arr'].append({'start': self._io.pos()})
self.suffix_for_each_colour_name_padding = self._io.ensure_fixed_contents(b"\x00")
self._debug['suffix_for_each_colour_name_padding']['arr'][i]['end'] = self._io.pos()
self._debug['suffix_for_each_colour_name_padding']['end'] = self._io.pos()
self._debug['named_colour_definitions']['start'] = self._io.pos()
self.named_colour_definitions = [None] * (self.count_of_named_colours)
for i in range(self.count_of_named_colours):
if not 'arr' in self._debug['named_colour_definitions']:
self._debug['named_colour_definitions']['arr'] = []
self._debug['named_colour_definitions']['arr'].append({'start': self._io.pos()})
_t_named_colour_definitions = self._root.TagTable.TagDefinition.NamedColor2Type.NamedColourDefinition(self._io, self, self._root)
_t_named_colour_definitions._read()
self.named_colour_definitions[i] = _t_named_colour_definitions
self._debug['named_colour_definitions']['arr'][i]['end'] = self._io.pos()
self._debug['named_colour_definitions']['end'] = self._io.pos()
class NamedColourDefinition(KaitaiStruct):
SEQ_FIELDS = ["root_name", "root_name_padding", "pcs_coordinates", "device_coordinates"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['root_name']['start'] = self._io.pos()
self.root_name = (self._io.read_bytes_term(0, False, True, True)).decode(u"ASCII")
self._debug['root_name']['end'] = self._io.pos()
self._debug['root_name_padding']['start'] = self._io.pos()
self.root_name_padding = [None] * ((32 - len(self.root_name)))
for i in range((32 - len(self.root_name))):
if not 'arr' in self._debug['root_name_padding']:
self._debug['root_name_padding']['arr'] = []
self._debug['root_name_padding']['arr'].append({'start': self._io.pos()})
self.root_name_padding = self._io.ensure_fixed_contents(b"\x00")
self._debug['root_name_padding']['arr'][i]['end'] = self._io.pos()
self._debug['root_name_padding']['end'] = self._io.pos()
self._debug['pcs_coordinates']['start'] = self._io.pos()
self.pcs_coordinates = self._io.read_bytes(6)
self._debug['pcs_coordinates']['end'] = self._io.pos()
if self._parent.number_of_device_coordinates_for_each_named_colour > 0:
self._debug['device_coordinates']['start'] = self._io.pos()
self.device_coordinates = [None] * (self._parent.count_of_named_colours)
for i in range(self._parent.count_of_named_colours):
if not 'arr' in self._debug['device_coordinates']:
self._debug['device_coordinates']['arr'] = []
self._debug['device_coordinates']['arr'].append({'start': self._io.pos()})
self.device_coordinates[i] = self._io.read_u2be()
self._debug['device_coordinates']['arr'][i]['end'] = self._io.pos()
self._debug['device_coordinates']['end'] = self._io.pos()
class ViewingConditionsTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.viewing_conditions_type:
self.tag_data = self._root.TagTable.TagDefinition.ViewingConditionsType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class BlueTrcTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.curve_type:
self.tag_data = self._root.TagTable.TagDefinition.CurveType(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.parametric_curve_type:
self.tag_data = self._root.TagTable.TagDefinition.ParametricCurveType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class ResponseCurveSet16Type(KaitaiStruct):
SEQ_FIELDS = ["reserved", "number_of_channels", "count_of_measurement_types", "response_curve_structure_offsets", "response_curve_structures"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['number_of_channels']['start'] = self._io.pos()
self.number_of_channels = self._io.read_u2be()
self._debug['number_of_channels']['end'] = self._io.pos()
self._debug['count_of_measurement_types']['start'] = self._io.pos()
self.count_of_measurement_types = self._io.read_u2be()
self._debug['count_of_measurement_types']['end'] = self._io.pos()
self._debug['response_curve_structure_offsets']['start'] = self._io.pos()
self.response_curve_structure_offsets = [None] * (self.count_of_measurement_types)
for i in range(self.count_of_measurement_types):
if not 'arr' in self._debug['response_curve_structure_offsets']:
self._debug['response_curve_structure_offsets']['arr'] = []
self._debug['response_curve_structure_offsets']['arr'].append({'start': self._io.pos()})
self.response_curve_structure_offsets[i] = self._io.read_u4be()
self._debug['response_curve_structure_offsets']['arr'][i]['end'] = self._io.pos()
self._debug['response_curve_structure_offsets']['end'] = self._io.pos()
self._debug['response_curve_structures']['start'] = self._io.pos()
self.response_curve_structures = self._io.read_bytes_full()
self._debug['response_curve_structures']['end'] = self._io.pos()
class CurveType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "number_of_entries", "curve_values", "curve_value"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['number_of_entries']['start'] = self._io.pos()
self.number_of_entries = self._io.read_u4be()
self._debug['number_of_entries']['end'] = self._io.pos()
if self.number_of_entries > 1:
self._debug['curve_values']['start'] = self._io.pos()
self.curve_values = [None] * (self.number_of_entries)
for i in range(self.number_of_entries):
if not 'arr' in self._debug['curve_values']:
self._debug['curve_values']['arr'] = []
self._debug['curve_values']['arr'].append({'start': self._io.pos()})
self.curve_values[i] = self._io.read_u4be()
self._debug['curve_values']['arr'][i]['end'] = self._io.pos()
self._debug['curve_values']['end'] = self._io.pos()
if self.number_of_entries == 1:
self._debug['curve_value']['start'] = self._io.pos()
self.curve_value = self._io.read_u1()
self._debug['curve_value']['end'] = self._io.pos()
class SaturationRenderingIntentGamutTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.signature_type:
self.tag_data = self._root.TagTable.TagDefinition.SignatureType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class XyzType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "values"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['values']['start'] = self._io.pos()
self.values = []
i = 0
while not self._io.is_eof():
if not 'arr' in self._debug['values']:
self._debug['values']['arr'] = []
self._debug['values']['arr'].append({'start': self._io.pos()})
_t_values = self._root.XyzNumber(self._io, self, self._root)
_t_values._read()
self.values.append(_t_values)
self._debug['values']['arr'][len(self.values) - 1]['end'] = self._io.pos()
i += 1
self._debug['values']['end'] = self._io.pos()
class Lut8Type(KaitaiStruct):
SEQ_FIELDS = ["reserved", "number_of_input_channels", "number_of_output_channels", "number_of_clut_grid_points", "padding", "encoded_e_parameters", "number_of_input_table_entries", "number_of_output_table_entries", "input_tables", "clut_values", "output_tables"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['number_of_input_channels']['start'] = self._io.pos()
self.number_of_input_channels = self._io.read_u1()
self._debug['number_of_input_channels']['end'] = self._io.pos()
self._debug['number_of_output_channels']['start'] = self._io.pos()
self.number_of_output_channels = self._io.read_u1()
self._debug['number_of_output_channels']['end'] = self._io.pos()
self._debug['number_of_clut_grid_points']['start'] = self._io.pos()
self.number_of_clut_grid_points = self._io.read_u1()
self._debug['number_of_clut_grid_points']['end'] = self._io.pos()
self._debug['padding']['start'] = self._io.pos()
self.padding = self._io.ensure_fixed_contents(b"\x00")
self._debug['padding']['end'] = self._io.pos()
self._debug['encoded_e_parameters']['start'] = self._io.pos()
self.encoded_e_parameters = [None] * (9)
for i in range(9):
if not 'arr' in self._debug['encoded_e_parameters']:
self._debug['encoded_e_parameters']['arr'] = []
self._debug['encoded_e_parameters']['arr'].append({'start': self._io.pos()})
self.encoded_e_parameters[i] = self._io.read_s4be()
self._debug['encoded_e_parameters']['arr'][i]['end'] = self._io.pos()
self._debug['encoded_e_parameters']['end'] = self._io.pos()
self._debug['number_of_input_table_entries']['start'] = self._io.pos()
self.number_of_input_table_entries = self._io.read_u4be()
self._debug['number_of_input_table_entries']['end'] = self._io.pos()
self._debug['number_of_output_table_entries']['start'] = self._io.pos()
self.number_of_output_table_entries = self._io.read_u4be()
self._debug['number_of_output_table_entries']['end'] = self._io.pos()
self._debug['input_tables']['start'] = self._io.pos()
self.input_tables = self._io.read_bytes((256 * self.number_of_input_channels))
self._debug['input_tables']['end'] = self._io.pos()
self._debug['clut_values']['start'] = self._io.pos()
self.clut_values = self._io.read_bytes(((self.number_of_clut_grid_points ^ self.number_of_input_channels) * self.number_of_output_channels))
self._debug['clut_values']['end'] = self._io.pos()
self._debug['output_tables']['start'] = self._io.pos()
self.output_tables = self._io.read_bytes((256 * self.number_of_output_channels))
self._debug['output_tables']['end'] = self._io.pos()
class BToA2Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_one_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut8Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_two_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut16Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_b_to_a_table_type:
self.tag_data = self._root.TagTable.TagDefinition.LutBToAType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class LutAToBType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "number_of_input_channels", "number_of_output_channels", "padding", "offset_to_first_b_curve", "offset_to_matrix", "offset_to_first_m_curve", "offset_to_clut", "offset_to_first_a_curve", "data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['number_of_input_channels']['start'] = self._io.pos()
self.number_of_input_channels = self._io.read_u1()
self._debug['number_of_input_channels']['end'] = self._io.pos()
self._debug['number_of_output_channels']['start'] = self._io.pos()
self.number_of_output_channels = self._io.read_u1()
self._debug['number_of_output_channels']['end'] = self._io.pos()
self._debug['padding']['start'] = self._io.pos()
self.padding = self._io.ensure_fixed_contents(b"\x00\x00")
self._debug['padding']['end'] = self._io.pos()
self._debug['offset_to_first_b_curve']['start'] = self._io.pos()
self.offset_to_first_b_curve = self._io.read_u4be()
self._debug['offset_to_first_b_curve']['end'] = self._io.pos()
self._debug['offset_to_matrix']['start'] = self._io.pos()
self.offset_to_matrix = self._io.read_u4be()
self._debug['offset_to_matrix']['end'] = self._io.pos()
self._debug['offset_to_first_m_curve']['start'] = self._io.pos()
self.offset_to_first_m_curve = self._io.read_u4be()
self._debug['offset_to_first_m_curve']['end'] = self._io.pos()
self._debug['offset_to_clut']['start'] = self._io.pos()
self.offset_to_clut = self._io.read_u4be()
self._debug['offset_to_clut']['end'] = self._io.pos()
self._debug['offset_to_first_a_curve']['start'] = self._io.pos()
self.offset_to_first_a_curve = self._io.read_u4be()
self._debug['offset_to_first_a_curve']['end'] = self._io.pos()
self._debug['data']['start'] = self._io.pos()
self.data = self._io.read_bytes_full()
self._debug['data']['end'] = self._io.pos()
class BToA0Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_one_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut8Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_two_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut16Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_b_to_a_table_type:
self.tag_data = self._root.TagTable.TagDefinition.LutBToAType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class MediaWhitePointTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.xyz_type:
self.tag_data = self._root.TagTable.TagDefinition.XyzType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class Lut16Type(KaitaiStruct):
SEQ_FIELDS = ["reserved", "number_of_input_channels", "number_of_output_channels", "number_of_clut_grid_points", "padding", "encoded_e_parameters", "number_of_input_table_entries", "number_of_output_table_entries", "input_tables", "clut_values", "output_tables"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['number_of_input_channels']['start'] = self._io.pos()
self.number_of_input_channels = self._io.read_u1()
self._debug['number_of_input_channels']['end'] = self._io.pos()
self._debug['number_of_output_channels']['start'] = self._io.pos()
self.number_of_output_channels = self._io.read_u1()
self._debug['number_of_output_channels']['end'] = self._io.pos()
self._debug['number_of_clut_grid_points']['start'] = self._io.pos()
self.number_of_clut_grid_points = self._io.read_u1()
self._debug['number_of_clut_grid_points']['end'] = self._io.pos()
self._debug['padding']['start'] = self._io.pos()
self.padding = self._io.ensure_fixed_contents(b"\x00")
self._debug['padding']['end'] = self._io.pos()
self._debug['encoded_e_parameters']['start'] = self._io.pos()
self.encoded_e_parameters = [None] * (9)
for i in range(9):
if not 'arr' in self._debug['encoded_e_parameters']:
self._debug['encoded_e_parameters']['arr'] = []
self._debug['encoded_e_parameters']['arr'].append({'start': self._io.pos()})
self.encoded_e_parameters[i] = self._io.read_s4be()
self._debug['encoded_e_parameters']['arr'][i]['end'] = self._io.pos()
self._debug['encoded_e_parameters']['end'] = self._io.pos()
self._debug['number_of_input_table_entries']['start'] = self._io.pos()
self.number_of_input_table_entries = self._io.read_u4be()
self._debug['number_of_input_table_entries']['end'] = self._io.pos()
self._debug['number_of_output_table_entries']['start'] = self._io.pos()
self.number_of_output_table_entries = self._io.read_u4be()
self._debug['number_of_output_table_entries']['end'] = self._io.pos()
self._debug['input_tables']['start'] = self._io.pos()
self.input_tables = self._io.read_bytes(((2 * self.number_of_input_channels) * self.number_of_input_table_entries))
self._debug['input_tables']['end'] = self._io.pos()
self._debug['clut_values']['start'] = self._io.pos()
self.clut_values = self._io.read_bytes(((2 * (self.number_of_clut_grid_points ^ self.number_of_input_channels)) * self.number_of_output_channels))
self._debug['clut_values']['end'] = self._io.pos()
self._debug['output_tables']['start'] = self._io.pos()
self.output_tables = self._io.read_bytes(((2 * self.number_of_output_channels) * self.number_of_output_table_entries))
self._debug['output_tables']['end'] = self._io.pos()
class PerceptualRenderingIntentGamutTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.signature_type:
self.tag_data = self._root.TagTable.TagDefinition.SignatureType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class U16Fixed16ArrayType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "values"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['values']['start'] = self._io.pos()
self.values = []
i = 0
while not self._io.is_eof():
if not 'arr' in self._debug['values']:
self._debug['values']['arr'] = []
self._debug['values']['arr'].append({'start': self._io.pos()})
_t_values = self._root.U16Fixed16Number(self._io, self, self._root)
_t_values._read()
self.values.append(_t_values)
self._debug['values']['arr'][len(self.values) - 1]['end'] = self._io.pos()
i += 1
self._debug['values']['end'] = self._io.pos()
class ColorantTableOutTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.colorant_table_type:
self.tag_data = self._root.TagTable.TagDefinition.ColorantTableType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class MeasurementTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.measurement_type:
self.tag_data = self._root.TagTable.TagDefinition.MeasurementType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class ProfileSequenceTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.profile_sequence_desc_type:
self.tag_data = self._root.TagTable.TagDefinition.ProfileSequenceDescType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class TechnologyTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.signature_type:
self.tag_data = self._root.TagTable.TagDefinition.SignatureType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class AToB0Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_one_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut8Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_two_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut16Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_a_to_b_table_type:
self.tag_data = self._root.TagTable.TagDefinition.LutAToBType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class DToB0Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_process_elements_type:
self.tag_data = self._root.TagTable.TagDefinition.MultiProcessElementsType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class OutputResponseTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.response_curve_set_16_type:
self.tag_data = self._root.TagTable.TagDefinition.ResponseCurveSet16Type(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class GreenMatrixColumnTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.xyz_type:
self.tag_data = self._root.TagTable.TagDefinition.XyzType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class ProfileDescriptionTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_localized_unicode_type:
self.tag_data = self._root.TagTable.TagDefinition.MultiLocalizedUnicodeType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class Preview1Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_one_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut8Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_two_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut16Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_b_to_a_table_type:
self.tag_data = self._root.TagTable.TagDefinition.LutBToAType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class RedTrcTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.curve_type:
self.tag_data = self._root.TagTable.TagDefinition.CurveType(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.parametric_curve_type:
self.tag_data = self._root.TagTable.TagDefinition.ParametricCurveType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class BToD0Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_process_elements_type:
self.tag_data = self._root.TagTable.TagDefinition.MultiProcessElementsType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class DToB1Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_process_elements_type:
self.tag_data = self._root.TagTable.TagDefinition.MultiProcessElementsType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class BToA1Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_one_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut8Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_two_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut16Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_b_to_a_table_type:
self.tag_data = self._root.TagTable.TagDefinition.LutBToAType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class ParametricCurveType(KaitaiStruct):
class ParametricCurveTypeFunctions(Enum):
y_equals_x_to_power_of_g = 0
cie_122_1996 = 1
iec_61966_3 = 2
iec_61966_2_1 = 3
y_equals_ob_ax_plus_b_cb_to_power_of_g_plus_c = 4
SEQ_FIELDS = ["reserved", "function_type", "reserved_2", "parameters"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['function_type']['start'] = self._io.pos()
self.function_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.ParametricCurveType.ParametricCurveTypeFunctions, self._io.read_u2be())
self._debug['function_type']['end'] = self._io.pos()
self._debug['reserved_2']['start'] = self._io.pos()
self.reserved_2 = self._io.ensure_fixed_contents(b"\x00\x00")
self._debug['reserved_2']['end'] = self._io.pos()
self._debug['parameters']['start'] = self._io.pos()
_on = self.function_type
if _on == self._root.TagTable.TagDefinition.ParametricCurveType.ParametricCurveTypeFunctions.cie_122_1996:
self.parameters = self._root.TagTable.TagDefinition.ParametricCurveType.ParamsCie1221996(self._io, self, self._root)
self.parameters._read()
elif _on == self._root.TagTable.TagDefinition.ParametricCurveType.ParametricCurveTypeFunctions.iec_61966_3:
self.parameters = self._root.TagTable.TagDefinition.ParametricCurveType.ParamsIec619663(self._io, self, self._root)
self.parameters._read()
elif _on == self._root.TagTable.TagDefinition.ParametricCurveType.ParametricCurveTypeFunctions.iec_61966_2_1:
self.parameters = self._root.TagTable.TagDefinition.ParametricCurveType.ParamsIec6196621(self._io, self, self._root)
self.parameters._read()
elif _on == self._root.TagTable.TagDefinition.ParametricCurveType.ParametricCurveTypeFunctions.y_equals_ob_ax_plus_b_cb_to_power_of_g_plus_c:
self.parameters = self._root.TagTable.TagDefinition.ParametricCurveType.ParamsYEqualsObAxPlusBCbToPowerOfGPlusC(self._io, self, self._root)
self.parameters._read()
elif _on == self._root.TagTable.TagDefinition.ParametricCurveType.ParametricCurveTypeFunctions.y_equals_x_to_power_of_g:
self.parameters = self._root.TagTable.TagDefinition.ParametricCurveType.ParamsYEqualsXToPowerOfG(self._io, self, self._root)
self.parameters._read()
self._debug['parameters']['end'] = self._io.pos()
class ParamsIec619663(KaitaiStruct):
SEQ_FIELDS = ["g", "a", "b", "c"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['g']['start'] = self._io.pos()
self.g = self._io.read_s4be()
self._debug['g']['end'] = self._io.pos()
self._debug['a']['start'] = self._io.pos()
self.a = self._io.read_s4be()
self._debug['a']['end'] = self._io.pos()
self._debug['b']['start'] = self._io.pos()
self.b = self._io.read_s4be()
self._debug['b']['end'] = self._io.pos()
self._debug['c']['start'] = self._io.pos()
self.c = self._io.read_s4be()
self._debug['c']['end'] = self._io.pos()
class ParamsIec6196621(KaitaiStruct):
SEQ_FIELDS = ["g", "a", "b", "c", "d"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['g']['start'] = self._io.pos()
self.g = self._io.read_s4be()
self._debug['g']['end'] = self._io.pos()
self._debug['a']['start'] = self._io.pos()
self.a = self._io.read_s4be()
self._debug['a']['end'] = self._io.pos()
self._debug['b']['start'] = self._io.pos()
self.b = self._io.read_s4be()
self._debug['b']['end'] = self._io.pos()
self._debug['c']['start'] = self._io.pos()
self.c = self._io.read_s4be()
self._debug['c']['end'] = self._io.pos()
self._debug['d']['start'] = self._io.pos()
self.d = self._io.read_s4be()
self._debug['d']['end'] = self._io.pos()
class ParamsYEqualsXToPowerOfG(KaitaiStruct):
SEQ_FIELDS = ["g"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['g']['start'] = self._io.pos()
self.g = self._io.read_s4be()
self._debug['g']['end'] = self._io.pos()
class ParamsYEqualsObAxPlusBCbToPowerOfGPlusC(KaitaiStruct):
SEQ_FIELDS = ["g", "a", "b", "c", "d", "e", "f"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['g']['start'] = self._io.pos()
self.g = self._io.read_s4be()
self._debug['g']['end'] = self._io.pos()
self._debug['a']['start'] = self._io.pos()
self.a = self._io.read_s4be()
self._debug['a']['end'] = self._io.pos()
self._debug['b']['start'] = self._io.pos()
self.b = self._io.read_s4be()
self._debug['b']['end'] = self._io.pos()
self._debug['c']['start'] = self._io.pos()
self.c = self._io.read_s4be()
self._debug['c']['end'] = self._io.pos()
self._debug['d']['start'] = self._io.pos()
self.d = self._io.read_s4be()
self._debug['d']['end'] = self._io.pos()
self._debug['e']['start'] = self._io.pos()
self.e = self._io.read_s4be()
self._debug['e']['end'] = self._io.pos()
self._debug['f']['start'] = self._io.pos()
self.f = self._io.read_s4be()
self._debug['f']['end'] = self._io.pos()
class ParamsCie1221996(KaitaiStruct):
SEQ_FIELDS = ["g", "a", "b"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['g']['start'] = self._io.pos()
self.g = self._io.read_s4be()
self._debug['g']['end'] = self._io.pos()
self._debug['a']['start'] = self._io.pos()
self.a = self._io.read_s4be()
self._debug['a']['end'] = self._io.pos()
self._debug['b']['start'] = self._io.pos()
self.b = self._io.read_s4be()
self._debug['b']['end'] = self._io.pos()
class ChromaticityTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.chromaticity_type:
self.tag_data = self._root.TagTable.TagDefinition.ChromaticityType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class ChromaticAdaptationTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.s_15_fixed_16_array_type:
self.tag_data = self._root.TagTable.TagDefinition.S15Fixed16ArrayType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class MeasurementType(KaitaiStruct):
class StandardObserverEncodings(Enum):
unknown = 0
cie_1931_standard_colorimetric_observer = 1
cie_1964_standard_colorimetric_observer = 2
class MeasurementGeometryEncodings(Enum):
unknown = 0
zero_degrees_to_45_degrees_or_45_degrees_to_zero_degrees = 1
zero_degrees_to_d_degrees_or_d_degrees_to_zero_degrees = 2
class MeasurementFlareEncodings(Enum):
zero_percent = 0
one_hundred_percent = 65536
SEQ_FIELDS = ["reserved", "standard_observer_encoding", "nciexyz_tristimulus_values_for_measurement_backing", "measurement_geometry_encoding", "measurement_flare_encoding", "standard_illuminant_encoding"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['standard_observer_encoding']['start'] = self._io.pos()
self.standard_observer_encoding = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.MeasurementType.StandardObserverEncodings, self._io.read_u4be())
self._debug['standard_observer_encoding']['end'] = self._io.pos()
self._debug['nciexyz_tristimulus_values_for_measurement_backing']['start'] = self._io.pos()
self.nciexyz_tristimulus_values_for_measurement_backing = self._root.XyzNumber(self._io, self, self._root)
self.nciexyz_tristimulus_values_for_measurement_backing._read()
self._debug['nciexyz_tristimulus_values_for_measurement_backing']['end'] = self._io.pos()
self._debug['measurement_geometry_encoding']['start'] = self._io.pos()
self.measurement_geometry_encoding = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.MeasurementType.MeasurementGeometryEncodings, self._io.read_u4be())
self._debug['measurement_geometry_encoding']['end'] = self._io.pos()
self._debug['measurement_flare_encoding']['start'] = self._io.pos()
self.measurement_flare_encoding = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.MeasurementType.MeasurementFlareEncodings, self._io.read_u4be())
self._debug['measurement_flare_encoding']['end'] = self._io.pos()
self._debug['standard_illuminant_encoding']['start'] = self._io.pos()
self.standard_illuminant_encoding = self._root.StandardIlluminantEncoding(self._io, self, self._root)
self.standard_illuminant_encoding._read()
self._debug['standard_illuminant_encoding']['end'] = self._io.pos()
class TextType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "value"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['value']['start'] = self._io.pos()
self.value = (KaitaiStream.bytes_terminate(self._io.read_bytes_full(), 0, False)).decode(u"ASCII")
self._debug['value']['end'] = self._io.pos()
class ProfileSequenceIdentifierType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "number_of_structures", "positions_table", "profile_identifiers"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['number_of_structures']['start'] = self._io.pos()
self.number_of_structures = self._io.read_u4be()
self._debug['number_of_structures']['end'] = self._io.pos()
self._debug['positions_table']['start'] = self._io.pos()
self.positions_table = [None] * (self.number_of_structures)
for i in range(self.number_of_structures):
if not 'arr' in self._debug['positions_table']:
self._debug['positions_table']['arr'] = []
self._debug['positions_table']['arr'].append({'start': self._io.pos()})
_t_positions_table = self._root.PositionNumber(self._io, self, self._root)
_t_positions_table._read()
self.positions_table[i] = _t_positions_table
self._debug['positions_table']['arr'][i]['end'] = self._io.pos()
self._debug['positions_table']['end'] = self._io.pos()
self._debug['profile_identifiers']['start'] = self._io.pos()
self.profile_identifiers = [None] * (self.number_of_structures)
for i in range(self.number_of_structures):
if not 'arr' in self._debug['profile_identifiers']:
self._debug['profile_identifiers']['arr'] = []
self._debug['profile_identifiers']['arr'].append({'start': self._io.pos()})
_t_profile_identifiers = self._root.TagTable.TagDefinition.ProfileSequenceIdentifierType.ProfileIdentifier(self._io, self, self._root)
_t_profile_identifiers._read()
self.profile_identifiers[i] = _t_profile_identifiers
self._debug['profile_identifiers']['arr'][i]['end'] = self._io.pos()
self._debug['profile_identifiers']['end'] = self._io.pos()
class ProfileIdentifier(KaitaiStruct):
SEQ_FIELDS = ["profile_id", "profile_description"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['profile_id']['start'] = self._io.pos()
self.profile_id = self._io.read_bytes(16)
self._debug['profile_id']['end'] = self._io.pos()
self._debug['profile_description']['start'] = self._io.pos()
self.profile_description = self._root.TagTable.TagDefinition.MultiLocalizedUnicodeType(self._io, self, self._root)
self.profile_description._read()
self._debug['profile_description']['end'] = self._io.pos()
class ColorantTableType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "count_of_colorants", "colorants"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['count_of_colorants']['start'] = self._io.pos()
self.count_of_colorants = self._io.read_u4be()
self._debug['count_of_colorants']['end'] = self._io.pos()
self._debug['colorants']['start'] = self._io.pos()
self.colorants = [None] * (self.count_of_colorants)
for i in range(self.count_of_colorants):
if not 'arr' in self._debug['colorants']:
self._debug['colorants']['arr'] = []
self._debug['colorants']['arr'].append({'start': self._io.pos()})
_t_colorants = self._root.TagTable.TagDefinition.ColorantTableType.Colorant(self._io, self, self._root)
_t_colorants._read()
self.colorants[i] = _t_colorants
self._debug['colorants']['arr'][i]['end'] = self._io.pos()
self._debug['colorants']['end'] = self._io.pos()
class Colorant(KaitaiStruct):
SEQ_FIELDS = ["name", "padding", "pcs_values"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['name']['start'] = self._io.pos()
self.name = (self._io.read_bytes_term(0, False, True, True)).decode(u"ASCII")
self._debug['name']['end'] = self._io.pos()
self._debug['padding']['start'] = self._io.pos()
self.padding = [None] * ((32 - len(self.name)))
for i in range((32 - len(self.name))):
if not 'arr' in self._debug['padding']:
self._debug['padding']['arr'] = []
self._debug['padding']['arr'].append({'start': self._io.pos()})
self.padding = self._io.ensure_fixed_contents(b"\x00")
self._debug['padding']['arr'][i]['end'] = self._io.pos()
self._debug['padding']['end'] = self._io.pos()
self._debug['pcs_values']['start'] = self._io.pos()
self.pcs_values = self._io.read_bytes(6)
self._debug['pcs_values']['end'] = self._io.pos()
class SignatureType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "signature"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['signature']['start'] = self._io.pos()
self.signature = (self._io.read_bytes(4)).decode(u"ASCII")
self._debug['signature']['end'] = self._io.pos()
class CopyrightTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_localized_unicode_type:
self.tag_data = self._root.TagTable.TagDefinition.MultiLocalizedUnicodeType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class Preview0Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_one_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut8Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_two_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut16Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_a_to_b_table_type:
self.tag_data = self._root.TagTable.TagDefinition.LutAToBType(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_b_to_a_table_type:
self.tag_data = self._root.TagTable.TagDefinition.LutBToAType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class DateTimeType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "date_and_time"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['date_and_time']['start'] = self._io.pos()
self.date_and_time = self._root.DateTimeNumber(self._io, self, self._root)
self.date_and_time._read()
self._debug['date_and_time']['end'] = self._io.pos()
class DToB3Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_process_elements_type:
self.tag_data = self._root.TagTable.TagDefinition.MultiProcessElementsType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class Preview2Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_one_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut8Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_two_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut16Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_b_to_a_table_type:
self.tag_data = self._root.TagTable.TagDefinition.LutBToAType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class DeviceModelDescTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_localized_unicode_type:
self.tag_data = self._root.TagTable.TagDefinition.MultiLocalizedUnicodeType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class MultiProcessElementsType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "number_of_input_channels", "number_of_output_channels", "number_of_processing_elements", "process_element_positions_table", "data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['number_of_input_channels']['start'] = self._io.pos()
self.number_of_input_channels = self._io.read_u2be()
self._debug['number_of_input_channels']['end'] = self._io.pos()
self._debug['number_of_output_channels']['start'] = self._io.pos()
self.number_of_output_channels = self._io.read_u2be()
self._debug['number_of_output_channels']['end'] = self._io.pos()
self._debug['number_of_processing_elements']['start'] = self._io.pos()
self.number_of_processing_elements = self._io.read_u4be()
self._debug['number_of_processing_elements']['end'] = self._io.pos()
self._debug['process_element_positions_table']['start'] = self._io.pos()
self.process_element_positions_table = [None] * (self.number_of_processing_elements)
for i in range(self.number_of_processing_elements):
if not 'arr' in self._debug['process_element_positions_table']:
self._debug['process_element_positions_table']['arr'] = []
self._debug['process_element_positions_table']['arr'].append({'start': self._io.pos()})
_t_process_element_positions_table = self._root.PositionNumber(self._io, self, self._root)
_t_process_element_positions_table._read()
self.process_element_positions_table[i] = _t_process_element_positions_table
self._debug['process_element_positions_table']['arr'][i]['end'] = self._io.pos()
self._debug['process_element_positions_table']['end'] = self._io.pos()
self._debug['data']['start'] = self._io.pos()
self.data = self._io.read_bytes_full()
self._debug['data']['end'] = self._io.pos()
class UInt16ArrayType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "values"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['values']['start'] = self._io.pos()
self.values = []
i = 0
while not self._io.is_eof():
if not 'arr' in self._debug['values']:
self._debug['values']['arr'] = []
self._debug['values']['arr'].append({'start': self._io.pos()})
self.values.append(self._io.read_u2be())
self._debug['values']['arr'][len(self.values) - 1]['end'] = self._io.pos()
i += 1
self._debug['values']['end'] = self._io.pos()
class ColorantOrderTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.colorant_order_type:
self.tag_data = self._root.TagTable.TagDefinition.ColorantOrderType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class DataType(KaitaiStruct):
class DataTypes(Enum):
ascii_data = 0
binary_data = 1
SEQ_FIELDS = ["data_flag"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['data_flag']['start'] = self._io.pos()
self.data_flag = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.DataType.DataTypes, self._io.read_u4be())
self._debug['data_flag']['end'] = self._io.pos()
class ChromaticityType(KaitaiStruct):
class ColorantAndPhosphorEncodings(Enum):
unknown = 0
itu_r_bt_709_2 = 1
smpte_rp145 = 2
ebu_tech_3213_e = 3
p22 = 4
SEQ_FIELDS = ["reserved", "number_of_device_channels", "colorant_and_phosphor_encoding", "ciexy_coordinates_per_channel"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['number_of_device_channels']['start'] = self._io.pos()
self.number_of_device_channels = self._io.read_u2be()
self._debug['number_of_device_channels']['end'] = self._io.pos()
self._debug['colorant_and_phosphor_encoding']['start'] = self._io.pos()
self.colorant_and_phosphor_encoding = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.ChromaticityType.ColorantAndPhosphorEncodings, self._io.read_u2be())
self._debug['colorant_and_phosphor_encoding']['end'] = self._io.pos()
self._debug['ciexy_coordinates_per_channel']['start'] = self._io.pos()
self.ciexy_coordinates_per_channel = [None] * (self.number_of_device_channels)
for i in range(self.number_of_device_channels):
if not 'arr' in self._debug['ciexy_coordinates_per_channel']:
self._debug['ciexy_coordinates_per_channel']['arr'] = []
self._debug['ciexy_coordinates_per_channel']['arr'].append({'start': self._io.pos()})
_t_ciexy_coordinates_per_channel = self._root.TagTable.TagDefinition.ChromaticityType.CiexyCoordinateValues(self._io, self, self._root)
_t_ciexy_coordinates_per_channel._read()
self.ciexy_coordinates_per_channel[i] = _t_ciexy_coordinates_per_channel
self._debug['ciexy_coordinates_per_channel']['arr'][i]['end'] = self._io.pos()
self._debug['ciexy_coordinates_per_channel']['end'] = self._io.pos()
class CiexyCoordinateValues(KaitaiStruct):
SEQ_FIELDS = ["x_coordinate", "y_coordinate"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['x_coordinate']['start'] = self._io.pos()
self.x_coordinate = self._io.read_u2be()
self._debug['x_coordinate']['end'] = self._io.pos()
self._debug['y_coordinate']['start'] = self._io.pos()
self.y_coordinate = self._io.read_u2be()
self._debug['y_coordinate']['end'] = self._io.pos()
class LuminanceTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.xyz_type:
self.tag_data = self._root.TagTable.TagDefinition.XyzType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class S15Fixed16ArrayType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "values"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['values']['start'] = self._io.pos()
self.values = []
i = 0
while not self._io.is_eof():
if not 'arr' in self._debug['values']:
self._debug['values']['arr'] = []
self._debug['values']['arr'].append({'start': self._io.pos()})
_t_values = self._root.S15Fixed16Number(self._io, self, self._root)
_t_values._read()
self.values.append(_t_values)
self._debug['values']['arr'][len(self.values) - 1]['end'] = self._io.pos()
i += 1
self._debug['values']['end'] = self._io.pos()
class MultiLocalizedUnicodeType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "number_of_records", "record_size", "records"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['number_of_records']['start'] = self._io.pos()
self.number_of_records = self._io.read_u4be()
self._debug['number_of_records']['end'] = self._io.pos()
self._debug['record_size']['start'] = self._io.pos()
self.record_size = self._io.read_u4be()
self._debug['record_size']['end'] = self._io.pos()
self._debug['records']['start'] = self._io.pos()
self.records = [None] * (self.number_of_records)
for i in range(self.number_of_records):
if not 'arr' in self._debug['records']:
self._debug['records']['arr'] = []
self._debug['records']['arr'].append({'start': self._io.pos()})
_t_records = self._root.TagTable.TagDefinition.MultiLocalizedUnicodeType.Record(self._io, self, self._root)
_t_records._read()
self.records[i] = _t_records
self._debug['records']['arr'][i]['end'] = self._io.pos()
self._debug['records']['end'] = self._io.pos()
class Record(KaitaiStruct):
SEQ_FIELDS = ["language_code", "country_code", "string_length", "string_offset"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['language_code']['start'] = self._io.pos()
self.language_code = self._io.read_u2be()
self._debug['language_code']['end'] = self._io.pos()
self._debug['country_code']['start'] = self._io.pos()
self.country_code = self._io.read_u2be()
self._debug['country_code']['end'] = self._io.pos()
self._debug['string_length']['start'] = self._io.pos()
self.string_length = self._io.read_u4be()
self._debug['string_length']['end'] = self._io.pos()
self._debug['string_offset']['start'] = self._io.pos()
self.string_offset = self._io.read_u4be()
self._debug['string_offset']['end'] = self._io.pos()
@property
def string_data(self):
if hasattr(self, '_m_string_data'):
return self._m_string_data if hasattr(self, '_m_string_data') else None
_pos = self._io.pos()
self._io.seek(self.string_offset)
self._debug['_m_string_data']['start'] = self._io.pos()
self._m_string_data = (self._io.read_bytes(self.string_length)).decode(u"UTF-16BE")
self._debug['_m_string_data']['end'] = self._io.pos()
self._io.seek(_pos)
return self._m_string_data if hasattr(self, '_m_string_data') else None
class AToB2Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_one_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut8Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_two_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut16Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_a_to_b_table_type:
self.tag_data = self._root.TagTable.TagDefinition.LutAToBType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class AToB1Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_one_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut8Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_two_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut16Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_a_to_b_table_type:
self.tag_data = self._root.TagTable.TagDefinition.LutAToBType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class ColorimetricIntentImageStateTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.signature_type:
self.tag_data = self._root.TagTable.TagDefinition.SignatureType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class CharTargetTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.text_type:
self.tag_data = self._root.TagTable.TagDefinition.TextType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class ColorantTableTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.colorant_table_type:
self.tag_data = self._root.TagTable.TagDefinition.ColorantTableType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class CalibrationDateTimeTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.date_time_type:
self.tag_data = self._root.TagTable.TagDefinition.DateTimeType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class NamedColor2Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.named_color_2_type:
self.tag_data = self._root.TagTable.TagDefinition.NamedColor2Type(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class ViewingCondDescTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_localized_unicode_type:
self.tag_data = self._root.TagTable.TagDefinition.MultiLocalizedUnicodeType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class BToD3Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_process_elements_type:
self.tag_data = self._root.TagTable.TagDefinition.MultiProcessElementsType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class ProfileSequenceDescType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "number_of_description_structures", "profile_descriptions"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['number_of_description_structures']['start'] = self._io.pos()
self.number_of_description_structures = self._io.read_u4be()
self._debug['number_of_description_structures']['end'] = self._io.pos()
self._debug['profile_descriptions']['start'] = self._io.pos()
self.profile_descriptions = [None] * (self.number_of_description_structures)
for i in range(self.number_of_description_structures):
if not 'arr' in self._debug['profile_descriptions']:
self._debug['profile_descriptions']['arr'] = []
self._debug['profile_descriptions']['arr'].append({'start': self._io.pos()})
_t_profile_descriptions = self._root.TagTable.TagDefinition.ProfileSequenceDescType.ProfileDescription(self._io, self, self._root)
_t_profile_descriptions._read()
self.profile_descriptions[i] = _t_profile_descriptions
self._debug['profile_descriptions']['arr'][i]['end'] = self._io.pos()
self._debug['profile_descriptions']['end'] = self._io.pos()
class ProfileDescription(KaitaiStruct):
SEQ_FIELDS = ["device_manufacturer", "device_model", "device_attributes", "device_technology", "description_of_device_manufacturer", "description_of_device_model"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['device_manufacturer']['start'] = self._io.pos()
self.device_manufacturer = self._root.DeviceManufacturer(self._io, self, self._root)
self.device_manufacturer._read()
self._debug['device_manufacturer']['end'] = self._io.pos()
self._debug['device_model']['start'] = self._io.pos()
self.device_model = (self._io.read_bytes(4)).decode(u"ASCII")
self._debug['device_model']['end'] = self._io.pos()
self._debug['device_attributes']['start'] = self._io.pos()
self.device_attributes = self._root.DeviceAttributes(self._io, self, self._root)
self.device_attributes._read()
self._debug['device_attributes']['end'] = self._io.pos()
self._debug['device_technology']['start'] = self._io.pos()
self.device_technology = self._root.TagTable.TagDefinition.TechnologyTag(self._io, self, self._root)
self.device_technology._read()
self._debug['device_technology']['end'] = self._io.pos()
self._debug['description_of_device_manufacturer']['start'] = self._io.pos()
self.description_of_device_manufacturer = self._root.TagTable.TagDefinition.DeviceMfgDescTag(self._io, self, self._root)
self.description_of_device_manufacturer._read()
self._debug['description_of_device_manufacturer']['end'] = self._io.pos()
self._debug['description_of_device_model']['start'] = self._io.pos()
self.description_of_device_model = self._root.TagTable.TagDefinition.DeviceModelDescTag(self._io, self, self._root)
self.description_of_device_model._read()
self._debug['description_of_device_model']['end'] = self._io.pos()
class ProfileSequenceIdentifierTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.profile_sequence_identifier_type:
self.tag_data = self._root.TagTable.TagDefinition.ProfileSequenceIdentifierType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class BToD1Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_process_elements_type:
self.tag_data = self._root.TagTable.TagDefinition.MultiProcessElementsType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class ColorantOrderType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "count_of_colorants", "numbers_of_colorants_in_order_of_printing"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['count_of_colorants']['start'] = self._io.pos()
self.count_of_colorants = self._io.read_u4be()
self._debug['count_of_colorants']['end'] = self._io.pos()
self._debug['numbers_of_colorants_in_order_of_printing']['start'] = self._io.pos()
self.numbers_of_colorants_in_order_of_printing = [None] * (self.count_of_colorants)
for i in range(self.count_of_colorants):
if not 'arr' in self._debug['numbers_of_colorants_in_order_of_printing']:
self._debug['numbers_of_colorants_in_order_of_printing']['arr'] = []
self._debug['numbers_of_colorants_in_order_of_printing']['arr'].append({'start': self._io.pos()})
self.numbers_of_colorants_in_order_of_printing[i] = self._io.read_u1()
self._debug['numbers_of_colorants_in_order_of_printing']['arr'][i]['end'] = self._io.pos()
self._debug['numbers_of_colorants_in_order_of_printing']['end'] = self._io.pos()
class DToB2Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_process_elements_type:
self.tag_data = self._root.TagTable.TagDefinition.MultiProcessElementsType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class GrayTrcTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.curve_type:
self.tag_data = self._root.TagTable.TagDefinition.CurveType(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.parametric_curve_type:
self.tag_data = self._root.TagTable.TagDefinition.ParametricCurveType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class ViewingConditionsType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "un_normalized_ciexyz_values_for_illuminant", "un_normalized_ciexyz_values_for_surround", "illuminant_type"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['un_normalized_ciexyz_values_for_illuminant']['start'] = self._io.pos()
self.un_normalized_ciexyz_values_for_illuminant = self._root.XyzNumber(self._io, self, self._root)
self.un_normalized_ciexyz_values_for_illuminant._read()
self._debug['un_normalized_ciexyz_values_for_illuminant']['end'] = self._io.pos()
self._debug['un_normalized_ciexyz_values_for_surround']['start'] = self._io.pos()
self.un_normalized_ciexyz_values_for_surround = self._root.XyzNumber(self._io, self, self._root)
self.un_normalized_ciexyz_values_for_surround._read()
self._debug['un_normalized_ciexyz_values_for_surround']['end'] = self._io.pos()
self._debug['illuminant_type']['start'] = self._io.pos()
self.illuminant_type = self._root.StandardIlluminantEncoding(self._io, self, self._root)
self.illuminant_type._read()
self._debug['illuminant_type']['end'] = self._io.pos()
class LutBToAType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "number_of_input_channels", "number_of_output_channels", "padding", "offset_to_first_b_curve", "offset_to_matrix", "offset_to_first_m_curve", "offset_to_clut", "offset_to_first_a_curve", "data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['number_of_input_channels']['start'] = self._io.pos()
self.number_of_input_channels = self._io.read_u1()
self._debug['number_of_input_channels']['end'] = self._io.pos()
self._debug['number_of_output_channels']['start'] = self._io.pos()
self.number_of_output_channels = self._io.read_u1()
self._debug['number_of_output_channels']['end'] = self._io.pos()
self._debug['padding']['start'] = self._io.pos()
self.padding = self._io.ensure_fixed_contents(b"\x00\x00")
self._debug['padding']['end'] = self._io.pos()
self._debug['offset_to_first_b_curve']['start'] = self._io.pos()
self.offset_to_first_b_curve = self._io.read_u4be()
self._debug['offset_to_first_b_curve']['end'] = self._io.pos()
self._debug['offset_to_matrix']['start'] = self._io.pos()
self.offset_to_matrix = self._io.read_u4be()
self._debug['offset_to_matrix']['end'] = self._io.pos()
self._debug['offset_to_first_m_curve']['start'] = self._io.pos()
self.offset_to_first_m_curve = self._io.read_u4be()
self._debug['offset_to_first_m_curve']['end'] = self._io.pos()
self._debug['offset_to_clut']['start'] = self._io.pos()
self.offset_to_clut = self._io.read_u4be()
self._debug['offset_to_clut']['end'] = self._io.pos()
self._debug['offset_to_first_a_curve']['start'] = self._io.pos()
self.offset_to_first_a_curve = self._io.read_u4be()
self._debug['offset_to_first_a_curve']['end'] = self._io.pos()
self._debug['data']['start'] = self._io.pos()
self.data = self._io.read_bytes_full()
self._debug['data']['end'] = self._io.pos()
class GreenTrcTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.curve_type:
self.tag_data = self._root.TagTable.TagDefinition.CurveType(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.parametric_curve_type:
self.tag_data = self._root.TagTable.TagDefinition.ParametricCurveType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class UInt32ArrayType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "values"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['values']['start'] = self._io.pos()
self.values = []
i = 0
while not self._io.is_eof():
if not 'arr' in self._debug['values']:
self._debug['values']['arr'] = []
self._debug['values']['arr'].append({'start': self._io.pos()})
self.values.append(self._io.read_u4be())
self._debug['values']['arr'][len(self.values) - 1]['end'] = self._io.pos()
i += 1
self._debug['values']['end'] = self._io.pos()
class GamutTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_one_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut8Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_two_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut16Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_b_to_a_table_type:
self.tag_data = self._root.TagTable.TagDefinition.LutBToAType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class UInt8ArrayType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "values"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['values']['start'] = self._io.pos()
self.values = []
i = 0
while not self._io.is_eof():
if not 'arr' in self._debug['values']:
self._debug['values']['arr'] = []
self._debug['values']['arr'].append({'start': self._io.pos()})
self.values.append(self._io.read_u1())
self._debug['values']['arr'][len(self.values) - 1]['end'] = self._io.pos()
i += 1
self._debug['values']['end'] = self._io.pos()
class RedMatrixColumnTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.xyz_type:
self.tag_data = self._root.TagTable.TagDefinition.XyzType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class UInt64ArrayType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "values"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['values']['start'] = self._io.pos()
self.values = []
i = 0
while not self._io.is_eof():
if not 'arr' in self._debug['values']:
self._debug['values']['arr'] = []
self._debug['values']['arr'].append({'start': self._io.pos()})
self.values.append(self._io.read_u8be())
self._debug['values']['arr'][len(self.values) - 1]['end'] = self._io.pos()
i += 1
self._debug['values']['end'] = self._io.pos()
class BToD2Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_process_elements_type:
self.tag_data = self._root.TagTable.TagDefinition.MultiProcessElementsType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
@property
def tag_data_element(self):
if hasattr(self, '_m_tag_data_element'):
return self._m_tag_data_element if hasattr(self, '_m_tag_data_element') else None
_pos = self._io.pos()
self._io.seek(self.offset_to_data_element)
self._debug['_m_tag_data_element']['start'] = self._io.pos()
_on = self.tag_signature
if _on == self._root.TagTable.TagDefinition.TagSignatures.colorant_order:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.ColorantOrderTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.b_to_a_2:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.BToA2Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.media_white_point:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.MediaWhitePointTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.b_to_d_3:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.BToD3Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.colorimetric_intent_image_state:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.ColorimetricIntentImageStateTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.viewing_cond_desc:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.ViewingCondDescTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.preview_1:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.Preview1Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.device_model_desc:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.DeviceModelDescTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.chromaticity:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.ChromaticityTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.preview_0:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.Preview0Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.d_to_b_1:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.DToB1Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.saturation_rendering_intent_gamut:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.SaturationRenderingIntentGamutTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.b_to_a_0:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.BToA0Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.green_matrix_column:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.GreenMatrixColumnTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.copyright:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.CopyrightTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.blue_matrix_column:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.BlueMatrixColumnTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.chromatic_adaptation:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.ChromaticAdaptationTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.a_to_b_1:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.AToB1Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.output_response:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.OutputResponseTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.profile_sequence:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.ProfileSequenceTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.char_target:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.CharTargetTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.red_trc:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.RedTrcTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.gamut:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.GamutTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.device_mfg_desc:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.DeviceMfgDescTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.measurement:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.MeasurementTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.green_trc:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.GreenTrcTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.d_to_b_3:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.DToB3Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.colorant_table:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.ColorantTableTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.d_to_b_2:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.DToB2Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.profile_description:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.ProfileDescriptionTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.profile_sequence_identifier:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.ProfileSequenceIdentifierTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.gray_trc:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.GrayTrcTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.perceptual_rendering_intent_gamut:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.PerceptualRenderingIntentGamutTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.blue_trc:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.BlueTrcTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.d_to_b_0:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.DToB0Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.a_to_b_2:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.AToB2Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.calibration_date_time:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.CalibrationDateTimeTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.colorant_table_out:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.ColorantTableOutTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.red_matrix_column:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.RedMatrixColumnTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.preview_2:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.Preview2Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.a_to_b_0:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.AToB0Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.luminance:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.LuminanceTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.named_color_2:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.NamedColor2Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.b_to_d_2:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.BToD2Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.b_to_d_0:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.BToD0Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.b_to_a_1:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.BToA1Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.b_to_d_1:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.BToD1Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.viewing_conditions:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.ViewingConditionsTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.technology:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.TechnologyTag(io, self, self._root)
self._m_tag_data_element._read()
else:
self._m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
self._debug['_m_tag_data_element']['end'] = self._io.pos()
self._io.seek(_pos)
return self._m_tag_data_element if hasattr(self, '_m_tag_data_element') else None
class DeviceAttributes(KaitaiStruct):
class DeviceAttributesReflectiveOrTransparency(Enum):
reflective = 0
transparency = 1
class DeviceAttributesGlossyOrMatte(Enum):
glossy = 0
matte = 1
class DeviceAttributesPositiveOrNegativeMediaPolarity(Enum):
positive_media_polarity = 0
negative_media_polarity = 1
class DeviceAttributesColourOrBlackAndWhiteMedia(Enum):
colour_media = 0
black_and_white_media = 1
SEQ_FIELDS = ["reflective_or_transparency", "glossy_or_matte", "positive_or_negative_media_polarity", "colour_or_black_and_white_media", "reserved", "vendor_specific"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reflective_or_transparency']['start'] = self._io.pos()
self.reflective_or_transparency = KaitaiStream.resolve_enum(self._root.DeviceAttributes.DeviceAttributesReflectiveOrTransparency, self._io.read_bits_int(1))
self._debug['reflective_or_transparency']['end'] = self._io.pos()
self._debug['glossy_or_matte']['start'] = self._io.pos()
self.glossy_or_matte = KaitaiStream.resolve_enum(self._root.DeviceAttributes.DeviceAttributesGlossyOrMatte, self._io.read_bits_int(1))
self._debug['glossy_or_matte']['end'] = self._io.pos()
self._debug['positive_or_negative_media_polarity']['start'] = self._io.pos()
self.positive_or_negative_media_polarity = KaitaiStream.resolve_enum(self._root.DeviceAttributes.DeviceAttributesPositiveOrNegativeMediaPolarity, self._io.read_bits_int(1))
self._debug['positive_or_negative_media_polarity']['end'] = self._io.pos()
self._debug['colour_or_black_and_white_media']['start'] = self._io.pos()
self.colour_or_black_and_white_media = KaitaiStream.resolve_enum(self._root.DeviceAttributes.DeviceAttributesColourOrBlackAndWhiteMedia, self._io.read_bits_int(1))
self._debug['colour_or_black_and_white_media']['end'] = self._io.pos()
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.read_bits_int(28)
self._debug['reserved']['end'] = self._io.pos()
self._debug['vendor_specific']['start'] = self._io.pos()
self.vendor_specific = self._io.read_bits_int(32)
self._debug['vendor_specific']['end'] = self._io.pos()
class DeviceManufacturer(KaitaiStruct):
class DeviceManufacturers(Enum):
erdt_systems_gmbh_and_co_kg = 878981744
aamazing_technologies_inc = 1094798657
acer_peripherals = 1094927698
acolyte_color_research = 1094929492
actix_sytems_inc = 1094931529
adara_technology_inc = 1094992210
adobe_systems_incorporated = 1094992453
adi_systems_inc = 1094994208
agfa_graphics_nv = 1095190081
alps_electric_usa_inc = 1095519556
alps_electric_usa_inc_2 = 1095520339
alwan_color_expertise = 1095522126
amiable_technologies_inc = 1095586889
aoc_international_usa_ltd = 1095713568
apago = 1095778631
apple_computer_inc = 1095782476
ast = 1095980064
atandt_computer_systems = 1096033876
barbieri_electronic = 1111573836
barco_nv = 1112687439
breakpoint_pty_limited = 1112689488
brother_industries_ltd = 1112690516
bull = 1112886348
bus_computer_systems = 1112888096
c_itoh = 1127041364
intel_corporation = 1128353106
canon_inc_canon_development_americas_inc = 1128353359
carroll_touch = 1128354386
casio_computer_co_ltd = 1128354633
colorbus_pl = 1128420691
crossfield = 1128614944
crossfield_2 = 1128615032
cgs_publishing_technologies_international_gmbh = 1128747808
rochester_robotics = 1128811808
colour_imaging_group_london = 1128875852
citizen = 1128879177
candela_ltd = 1129066544
color_iq = 1129072977
chromaco_inc = 1129136975
chromix = 1129146712
colorgraphic_communications_corporation = 1129270351
compaq_computer_corporation = 1129270608
compeq_usa_focus_technology = 1129270640
conrac_display_products = 1129270866
cordata_technologies_inc = 1129271876
compaq_computer_corporation_2 = 1129337120
colorpro = 1129337423
cornerstone = 1129467424
ctx_international_inc = 1129601056
colorvision = 1129728339
fujitsu_laboratories_ltd = 1129792288
darius_technology_ltd = 1145131593
dataproducts = 1145132097
dry_creek_photo = 1145262112
digital_contents_resource_center_chung_ang_university = 1145262659
dell_computer_corporation = 1145392204
dainippon_ink_and_chemicals = 1145652000
diconix = 1145652047
digital = 1145653065
digital_light_and_color = 1145841219
doppelganger_llc = 1146113095
dainippon_screen = 1146298400
doosol = 1146310476
dupont = 1146441806
epson = 1162892111
esko_graphics = 1163086671
electronics_and_telecommunications_research_institute = 1163153993
everex_systems_inc = 1163281746
exactcode_gmbh = 1163411779
eizo_nanao_corporation = 1164540527
falco_data_products_inc = 1178684483
fuji_photo_film_coltd = 1179000864
fujifilm_electronic_imaging_ltd = 1179010377
fnord_software = 1179537988
fora_inc = 1179603521
forefront_technology_corporation = 1179603525
fujitsu = 1179658794
waytech_development_inc = 1179664672
fujitsu_2 = 1179994697
fuji_xerox_co_ltd = 1180180512
gcc_technologies_inc = 1195590432
global_graphics_software_limited = 1195856716
gretagmacbeth = 1196245536
gmg_gmbh_and_co_kg = 1196246816
goldstar_technology_inc = 1196379204
giantprint_pty_ltd = 1196446292
gretagmacbeth_2 = 1196707138
waytech_development_inc_2 = 1196835616
sony_corporation = 1196896843
hci = 1212369184
heidelberger_druckmaschinen_ag = 1212435744
hermes = 1212502605
hitachi_america_ltd = 1212765249
hewlett_packard = 1213210656
hitachi_ltd = 1213481760
hiti_digital_inc = 1214862441
ibm_corporation = 1229081888
scitex_corporation_ltd = 1229213268
hewlett_packard_2 = 1229275936
iiyama_north_america_inc = 1229543745
ikegami_electronics_inc = 1229669703
image_systems_corporation = 1229799751
ingram_micro_inc = 1229801760
intel_corporation_2 = 1229870147
intl = 1229870156
intra_electronics_usa_inc = 1229870162
iocomm_international_technology_corporation = 1229931343
infoprint_solutions_company = 1230000928
scitex_corporation_ltd_3 = 1230129491
ichikawa_soft_laboratory = 1230195744
itnl = 1230261836
ivm = 1230392608
iwatsu_electric_co_ltd = 1230455124
scitex_corporation_ltd_2 = 1231318644
inca_digital_printers_ltd = 1231971169
scitex_corporation_ltd_4 = 1232234867
jetsoft_development = 1246971476
jvc_information_products_co = 1247167264
scitex_corporation_ltd_6 = 1262572116
kfc_computek_components_corporation = 1262895904
klh_computers = 1263290400
konica_minolta_holdings_inc = 1263355972
konica_corporation = 1263420225
kodak = 1263486017
kyocera = 1264144195
scitex_corporation_ltd_7 = 1264677492
leica_camera_ag = 1279476039
leeds_colour = 1279476548
left_dakota = 1279541579
leading_technology_inc = 1279607108
lexmark_international_inc = 1279613005
link_computer_inc = 1279872587
linotronic = 1279872591
lite_on_inc = 1279874117
mag_computronic_usa_inc = 1296123715
mag_innovision_inc = 1296123721
mannesmann = 1296125518
micron_technology_inc = 1296646990
microtek = 1296646994
microvitec_inc = 1296646998
minolta = 1296649807
mitsubishi_electronics_america_inc = 1296651347
mitsuba_corporation = 1296651379
minolta_2 = 1296976980
modgraph_inc = 1297040455
monitronix_inc = 1297043017
monaco_systems_inc = 1297043027
morse_technology_inc = 1297044051
motive_systems = 1297044553
microsoft_corporation = 1297303124
mutoh_industries_ltd = 1297437775
mitsubishi_electric_corporation_kyoto_works = 1298756723
nanao_usa_corporation = 1312902721
nec_corporation = 1313162016
nexpress_solutions_llc = 1313167440
nissei_sangyo_america_ltd = 1313428307
nikon_corporation = 1313558350
oce_technologies_bv = 1329808672
ocecolor = 1329808707
oki = 1330333984
okidata = 1330334020
okidata_2 = 1330334032
olivetti = 1330399574
olympus_optical_co_ltd = 1330403661
onyx_graphics = 1330534744
optiquest = 1330664521
packard_bell = 1346454347
matsushita_electric_industrial_co_ltd = 1346457153
pantone_inc = 1346457172
packard_bell_2 = 1346522656
pfu_limited = 1346786592
philips_consumer_electronics_co = 1346914636
hoya_corporation_pentax_imaging_systems_division = 1347310680
phase_one_a_s = 1347382885
premier_computer_innovations = 1347568973
princeton_graphic_systems = 1347569998
princeton_publishing_labs = 1347570000
qlux = 1363957080
qms_inc = 1364022048
qpcard_ab = 1364214596
quadlaser = 1364541764
qume_corporation = 1364544837
radius_inc = 1380009033
integrated_color_solutions_inc_2 = 1380205688
roland_dg_corporation = 1380206368
redms_group_inc = 1380271181
relisys = 1380273225
rolf_gierling_multitools = 1380404563
ricoh_corporation = 1380533071
edmund_ronald = 1380863044
royal = 1380931905
ricoh_printing_systemsltd = 1380991776
royal_information_electronics_co_ltd = 1381256224
sampo_corporation_of_america = 1396788560
samsung_inc = 1396788563
jaime_santana_pomares = 1396788820
scitex_corporation_ltd_9 = 1396918612
dainippon_screen_3 = 1396920910
scitex_corporation_ltd_12 = 1396985888
samsung_electronics_coltd = 1397048096
seiko_instruments_usa_inc = 1397049675
seikosha = 1397049707
scanguycom = 1397183833
sharp_laboratories = 1397244242
international_color_consortium = 1397310275
sony_corporation_2 = 1397706329
spectracal = 1397769036
star = 1398030674
sampo_technology_corporation = 1398031136
scitex_corporation_ltd_10 = 1399023988
scitex_corporation_ltd_13 = 1399091232
sony_corporation_3 = 1399811705
talon_technology_corporation = 1413565519
tandy = 1413566020
tatung_co_of_america_inc = 1413567573
taxan_america_inc = 1413568577
tokyo_denshi_sekei_kk = 1413763872
teco_information_systems_inc = 1413825359
tegra = 1413826386
tektronix_inc = 1413827412
texas_instruments = 1414078496
typemaker_ltd = 1414351698
toshiba_corp = 1414484802
toshiba_inc = 1414484808
totoku_electric_co_ltd = 1414485067
triumph = 1414678869
toshiba_tec_corporation = 1414742612
ttx_computer_products_inc = 1414813728
tvm_professional_monitor_corporation = 1414941984
tw_casper_corporation = 1414996000
ulead_systems = 1431065432
unisys = 1431193939
utz_fehlau_and_sohn = 1431591494
varityper = 1447121481
viewsonic = 1447642455
visual_communication = 1447646028
wang = 1463897671
wilbur_imaging = 1464615506
ware_to_go = 1465141042
wyse_technology = 1465471813
xerox_corporation = 1480938072
x_rite = 1481787732
lavanyas_test_company = 1513173555
zoran_corporation = 1515340110
zebra_technologies_inc = 1516593778
basiccolor_gmbh = 1648968515
bergdesign_incorporated = 1650815591
integrated_color_solutions_inc = 1667594596
macdermid_colorspan_inc = 1668051824
dainippon_screen_2 = 1685266464
dupont_2 = 1685418094
fujifilm_electronic_imaging_ltd_2 = 1717986665
fluxdata_corporation = 1718383992
scitex_corporation_ltd_5 = 1769105779
scitex_corporation_ltd_8 = 1801548404
erdt_systems_gmbh_and_co_kg_2 = 1868706916
medigraph_gmbh = 1868720483
qubyx_sarl = 1903518329
scitex_corporation_ltd_11 = 1935894900
dainippon_screen_4 = 1935897198
scitex_corporation_ltd_14 = 1935962144
siwi_grafika_corporation = 1936291689
yxymaster_gmbh = 2037938541
SEQ_FIELDS = ["device_manufacturer"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['device_manufacturer']['start'] = self._io.pos()
self.device_manufacturer = KaitaiStream.resolve_enum(self._root.DeviceManufacturer.DeviceManufacturers, self._io.read_u4be())
self._debug['device_manufacturer']['end'] = self._io.pos()
class S15Fixed16Number(KaitaiStruct):
SEQ_FIELDS = ["number"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['number']['start'] = self._io.pos()
self.number = self._io.read_bytes(4)
self._debug['number']['end'] = self._io.pos()
class PositionNumber(KaitaiStruct):
SEQ_FIELDS = ["offset_to_data_element", "size_of_data_element"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['offset_to_data_element']['start'] = self._io.pos()
self.offset_to_data_element = self._io.read_u4be()
self._debug['offset_to_data_element']['end'] = self._io.pos()
self._debug['size_of_data_element']['start'] = self._io.pos()
self.size_of_data_element = self._io.read_u4be()
self._debug['size_of_data_element']['end'] = self._io.pos()
|
[
"collections.defaultdict",
"pkg_resources.parse_version"
] |
[((275, 300), 'pkg_resources.parse_version', 'parse_version', (['ks_version'], {}), '(ks_version)\n', (288, 300), False, 'from pkg_resources import parse_version\n'), ((303, 323), 'pkg_resources.parse_version', 'parse_version', (['"""0.7"""'], {}), "('0.7')\n", (316, 323), False, 'from pkg_resources import parse_version\n'), ((689, 718), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (712, 718), False, 'import collections\n'), ((1444, 1473), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (1467, 1473), False, 'import collections\n'), ((1940, 1969), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (1963, 1969), False, 'import collections\n'), ((2705, 2734), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (2728, 2734), False, 'import collections\n'), ((6320, 6349), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (6343, 6349), False, 'import collections\n'), ((13424, 13453), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (13447, 13453), False, 'import collections\n'), ((14249, 14278), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (14272, 14278), False, 'import collections\n'), ((15577, 15606), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (15600, 15606), False, 'import collections\n'), ((16540, 16569), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (16563, 16569), False, 'import collections\n'), ((17039, 17068), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (17062, 17068), False, 'import collections\n'), ((184954, 184983), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (184977, 184983), False, 'import collections\n'), ((198680, 198709), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (198703, 198709), False, 'import collections\n'), ((199291, 199320), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (199314, 199320), False, 'import collections\n'), ((199825, 199854), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (199848, 199854), False, 'import collections\n'), ((11072, 11101), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (11095, 11101), False, 'import collections\n'), ((12337, 12366), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (12360, 12366), False, 'import collections\n'), ((22282, 22311), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (22305, 22311), False, 'import collections\n'), ((23405, 23434), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (23428, 23434), False, 'import collections\n'), ((24513, 24542), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (24536, 24542), False, 'import collections\n'), ((25918, 25947), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (25941, 25947), False, 'import collections\n'), ((33401, 33430), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (33424, 33430), False, 'import collections\n'), ((34532, 34561), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (34555, 34561), False, 'import collections\n'), ((36030, 36059), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (36053, 36059), False, 'import collections\n'), ((38265, 38294), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (38288, 38294), False, 'import collections\n'), ((40157, 40186), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (40180, 40186), False, 'import collections\n'), ((41266, 41295), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (41289, 41295), False, 'import collections\n'), ((42886, 42915), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (42909, 42915), False, 'import collections\n'), ((46459, 46488), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (46482, 46488), False, 'import collections\n'), ((48358, 48387), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (48381, 48387), False, 'import collections\n'), ((51024, 51053), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (51047, 51053), False, 'import collections\n'), ((52747, 52776), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (52770, 52776), False, 'import collections\n'), ((54073, 54102), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (54096, 54102), False, 'import collections\n'), ((57752, 57781), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (57775, 57781), False, 'import collections\n'), ((58873, 58902), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (58896, 58902), False, 'import collections\n'), ((60286, 60315), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (60309, 60315), False, 'import collections\n'), ((61413, 61442), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (61436, 61442), False, 'import collections\n'), ((62539, 62568), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (62562, 62568), False, 'import collections\n'), ((63678, 63707), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (63701, 63707), False, 'import collections\n'), ((64790, 64819), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (64813, 64819), False, 'import collections\n'), ((66503, 66532), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (66526, 66532), False, 'import collections\n'), ((67648, 67677), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (67671, 67677), False, 'import collections\n'), ((68793, 68822), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (68816, 68822), False, 'import collections\n'), ((69906, 69935), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (69929, 69935), False, 'import collections\n'), ((71047, 71076), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (71070, 71076), False, 'import collections\n'), ((72761, 72790), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (72784, 72790), False, 'import collections\n'), ((74140, 74169), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (74163, 74169), False, 'import collections\n'), ((75276, 75305), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (75299, 75305), False, 'import collections\n'), ((76412, 76441), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (76435, 76441), False, 'import collections\n'), ((78458, 78487), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (78481, 78487), False, 'import collections\n'), ((87410, 87439), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (87433, 87439), False, 'import collections\n'), ((88542, 88571), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (88565, 88571), False, 'import collections\n'), ((90452, 90481), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (90475, 90481), False, 'import collections\n'), ((92940, 92969), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (92963, 92969), False, 'import collections\n'), ((93891, 93920), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (93914, 93920), False, 'import collections\n'), ((97598, 97627), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (97621, 97627), False, 'import collections\n'), ((100832, 100861), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (100855, 100861), False, 'import collections\n'), ((101680, 101709), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (101703, 101709), False, 'import collections\n'), ((102821, 102850), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (102844, 102850), False, 'import collections\n'), ((104821, 104850), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (104844, 104850), False, 'import collections\n'), ((105736, 105765), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (105759, 105765), False, 'import collections\n'), ((106875, 106904), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (106898, 106904), False, 'import collections\n'), ((108598, 108627), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (108621, 108627), False, 'import collections\n'), ((109873, 109902), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (109896, 109902), False, 'import collections\n'), ((112476, 112505), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (112499, 112505), False, 'import collections\n'), ((113763, 113792), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (113786, 113792), False, 'import collections\n'), ((114984, 115013), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (115007, 115013), False, 'import collections\n'), ((115988, 116017), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (116011, 116017), False, 'import collections\n'), ((119176, 119205), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (119199, 119205), False, 'import collections\n'), ((120285, 120314), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (120308, 120314), False, 'import collections\n'), ((121739, 121768), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (121762, 121768), False, 'import collections\n'), ((125682, 125711), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (125705, 125711), False, 'import collections\n'), ((127395, 127424), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (127418, 127424), False, 'import collections\n'), ((129131, 129160), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (129154, 129160), False, 'import collections\n'), ((130248, 130277), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (130271, 130277), False, 'import collections\n'), ((131358, 131387), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (131381, 131387), False, 'import collections\n'), ((132493, 132522), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (132516, 132522), False, 'import collections\n'), ((133610, 133639), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (133633, 133639), False, 'import collections\n'), ((134738, 134767), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (134761, 134767), False, 'import collections\n'), ((135876, 135905), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (135899, 135905), False, 'import collections\n'), ((137075, 137104), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (137098, 137104), False, 'import collections\n'), ((141573, 141602), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (141596, 141602), False, 'import collections\n'), ((142719, 142748), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (142742, 142748), False, 'import collections\n'), ((143919, 143948), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (143942, 143948), False, 'import collections\n'), ((145666, 145695), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (145689, 145695), False, 'import collections\n'), ((146804, 146833), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (146827, 146833), False, 'import collections\n'), ((148293, 148322), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (148316, 148322), False, 'import collections\n'), ((150208, 150237), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (150231, 150237), False, 'import collections\n'), ((152877, 152906), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (152900, 152906), False, 'import collections\n'), ((154261, 154290), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (154284, 154290), False, 'import collections\n'), ((155540, 155569), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (155563, 155569), False, 'import collections\n'), ((157257, 157286), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (157280, 157286), False, 'import collections\n'), ((158544, 158573), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (158567, 158573), False, 'import collections\n'), ((159649, 159678), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (159672, 159678), False, 'import collections\n'), ((160928, 160957), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (160951, 160957), False, 'import collections\n'), ((30827, 30856), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (30850, 30856), False, 'import collections\n'), ((81508, 81537), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (81531, 81537), False, 'import collections\n'), ((82692, 82721), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (82715, 82721), False, 'import collections\n'), ((84050, 84079), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (84073, 84079), False, 'import collections\n'), ((84709, 84738), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (84732, 84738), False, 'import collections\n'), ((86441, 86470), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (86464, 86470), False, 'import collections\n'), ((96582, 96611), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (96605, 96611), False, 'import collections\n'), ((99292, 99321), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (99315, 99321), False, 'import collections\n'), ((118330, 118359), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (118353, 118359), False, 'import collections\n'), ((123648, 123677), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (123671, 123677), False, 'import collections\n'), ((139103, 139132), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (139126, 139132), False, 'import collections\n')]
|
"""
JobMon Launcher
===============
Launches the JobMon supervisor as a daemon - generally, the usage pattern for
this module will be something like the following::
>>> from jobmon import config
>>> config_handler = config.ConfigHandler
>>> config_handler.load(SOME_FILE)
>>> run(config_handler)
"""
import logging
import os
import sys
from jobmon import (
daemon, service, command_server, event_server, status_server, ticker, util
)
# Make sure that we get console logging before the supervisor becomes a
# daemon, so if any errors occur before that, they can be seen
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(message)s')
LOGGER = logging.getLogger('jobmon.launcher')
def run_daemon(config_handler, as_daemon=True):
"""
Starts the supervisor daemon, passing to it the appropriate
configuration.
:param config.ConfigHandler config_handler: The configuration to run the \
daemon with.
:param bool as_daemon: If ``True``, then this will launch a daemon and the \
parent process will exit. If ``False``, then this will launch a daemon but \
the parent process will continue.
"""
supervisor_wrapper = SupervisorDaemon(
home_dir=config_handler.working_dir,
kill_parent=as_daemon,
stderr=config_handler.log_file)
logging.info('Sending log messages[%s] to %s',
config_handler.log_level,
config_handler.log_file)
supervisor_wrapper.start(config_handler)
def run_fork(config_handler):
"""
Starts the supervisor as a direct child process, passing to it the appropriate
configuration. This is meant for use during tests, when the child process needs
to be monitored (and possibly killed if it crashes) instead of allowed to
roam free as in the daemon case.
:param config.ConfigHandler config_handler: The configuration to run the \
supervisor with.
:return int: The PID of the child process that was launched.
"""
logging.info('Sending log messages[%s] to %s',
config_handler.log_level,
config_handler.log_file)
pid = os.fork()
if pid == 0:
LOGGER.info('In child: starting processing')
execute_supervisor(config_handler)
else:
return pid
def execute_supervisor(config_handler):
"""
Runs the supervisor according to the given configuration.
:param config.ConfigHandler config_handler: The configuration.
"""
# Read the jobs and start up the supervisor, and then make sure to
# die if we exit
try:
util.reset_loggers()
logging.basicConfig(filename=config_handler.log_file,
level=config_handler.log_level,
format='%(name)s %(asctime)s %(message)s')
supervisor_shim = service.SupervisorShim()
events = event_server.EventServer(config_handler.event_port)
restart_svr = ticker.Ticker(supervisor_shim.on_job_timer_expire)
commands = command_server.CommandServer(
config_handler.control_port, supervisor_shim)
status = status_server.StatusServer(supervisor_shim)
supervisor = service.SupervisorService(
config_handler, events, status, restart_svr)
events.start()
commands.start()
status.start()
restart_svr.start()
supervisor.start()
# This has to be done last, since it starts up the autostart
# jobs and gets the ball rolling
supervisor_shim.set_service(supervisor)
# The event server should be the last to terminate, since it
# has to tell the outside world that we're gone
LOGGER.info('Waiting for events to exit')
events.wait_for_exit()
except Exception as ex:
LOGGER.error('DEAD SUPERVISOR', exc_info=True)
finally:
LOGGER.info('Peace out!')
os._exit(0)
class SupervisorDaemon(daemon.Daemon):
def run(self, config_handler):
"""
Runs the supervisor according to the given configuration.
:param config.ConfigHandler config_handler: The configuration.
"""
LOGGER.info('Done daemonizing, launching supervisor')
execute_supervisor(config_handler)
|
[
"jobmon.service.SupervisorShim",
"logging.basicConfig",
"jobmon.service.SupervisorService",
"logging.getLogger",
"jobmon.event_server.EventServer",
"logging.info",
"jobmon.command_server.CommandServer",
"os._exit",
"jobmon.status_server.StatusServer",
"os.fork",
"jobmon.ticker.Ticker",
"jobmon.util.reset_loggers"
] |
[((593, 666), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s %(message)s"""'}), "(level=logging.INFO, format='%(asctime)s %(message)s')\n", (612, 666), False, 'import logging\n'), ((697, 733), 'logging.getLogger', 'logging.getLogger', (['"""jobmon.launcher"""'], {}), "('jobmon.launcher')\n", (714, 733), False, 'import logging\n'), ((1344, 1445), 'logging.info', 'logging.info', (['"""Sending log messages[%s] to %s"""', 'config_handler.log_level', 'config_handler.log_file'], {}), "('Sending log messages[%s] to %s', config_handler.log_level,\n config_handler.log_file)\n", (1356, 1445), False, 'import logging\n'), ((2014, 2115), 'logging.info', 'logging.info', (['"""Sending log messages[%s] to %s"""', 'config_handler.log_level', 'config_handler.log_file'], {}), "('Sending log messages[%s] to %s', config_handler.log_level,\n config_handler.log_file)\n", (2026, 2115), False, 'import logging\n'), ((2148, 2157), 'os.fork', 'os.fork', ([], {}), '()\n', (2155, 2157), False, 'import os\n'), ((2596, 2616), 'jobmon.util.reset_loggers', 'util.reset_loggers', ([], {}), '()\n', (2614, 2616), False, 'from jobmon import daemon, service, command_server, event_server, status_server, ticker, util\n'), ((2625, 2758), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': 'config_handler.log_file', 'level': 'config_handler.log_level', 'format': '"""%(name)s %(asctime)s %(message)s"""'}), "(filename=config_handler.log_file, level=config_handler.\n log_level, format='%(name)s %(asctime)s %(message)s')\n", (2644, 2758), False, 'import logging\n'), ((2839, 2863), 'jobmon.service.SupervisorShim', 'service.SupervisorShim', ([], {}), '()\n', (2861, 2863), False, 'from jobmon import daemon, service, command_server, event_server, status_server, ticker, util\n'), ((2881, 2932), 'jobmon.event_server.EventServer', 'event_server.EventServer', (['config_handler.event_port'], {}), '(config_handler.event_port)\n', (2905, 2932), False, 'from jobmon import daemon, service, command_server, event_server, status_server, ticker, util\n'), ((2956, 3006), 'jobmon.ticker.Ticker', 'ticker.Ticker', (['supervisor_shim.on_job_timer_expire'], {}), '(supervisor_shim.on_job_timer_expire)\n', (2969, 3006), False, 'from jobmon import daemon, service, command_server, event_server, status_server, ticker, util\n'), ((3026, 3100), 'jobmon.command_server.CommandServer', 'command_server.CommandServer', (['config_handler.control_port', 'supervisor_shim'], {}), '(config_handler.control_port, supervisor_shim)\n', (3054, 3100), False, 'from jobmon import daemon, service, command_server, event_server, status_server, ticker, util\n'), ((3132, 3175), 'jobmon.status_server.StatusServer', 'status_server.StatusServer', (['supervisor_shim'], {}), '(supervisor_shim)\n', (3158, 3175), False, 'from jobmon import daemon, service, command_server, event_server, status_server, ticker, util\n'), ((3198, 3268), 'jobmon.service.SupervisorService', 'service.SupervisorService', (['config_handler', 'events', 'status', 'restart_svr'], {}), '(config_handler, events, status, restart_svr)\n', (3223, 3268), False, 'from jobmon import daemon, service, command_server, event_server, status_server, ticker, util\n'), ((3917, 3928), 'os._exit', 'os._exit', (['(0)'], {}), '(0)\n', (3925, 3928), False, 'import os\n')]
|
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from pylab import cm
mpl.rcParams['font.family'] = 'STIXGeneral'
plt.rcParams['xtick.labelsize'] = 16
plt.rcParams['ytick.labelsize'] = 16
plt.rcParams['font.size'] = 16
plt.rcParams['figure.figsize'] = [5.6, 4]
plt.rcParams['axes.titlesize'] = 16
plt.rcParams['axes.labelsize'] = 16
plt.rcParams['lines.linewidth'] = 2
plt.rcParams['lines.markersize'] = 6
plt.rcParams['legend.fontsize'] = 13
plt.rcParams['mathtext.fontset'] = 'stix'
plt.rcParams['axes.linewidth'] = 1
colors = cm.get_cmap('Set1', 9)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.xaxis.set_tick_params(which='major', size=5, width=1,
direction='in', top='on')
ax.xaxis.set_tick_params(which='minor', size=3, width=1,
direction='in', top='on')
ax.yaxis.set_tick_params(which='major', size=5, width=1,
direction='in', right='on')
ax.yaxis.set_tick_params(which='minor', size=3, width=1,
direction='in', right='on')
e = 1.6e-19
x = np.loadtxt('out.dat', unpack=True)
ax.hist(x, color=colors(0), bins=500, histtype='step', density=True)
x = np.loadtxt('out2.dat', unpack=True)
ax.hist(x, color=colors(1), bins=500, histtype='step', density=True)
x = np.loadtxt('out3.dat', unpack=True)
ax.hist(x, color=colors(2), bins=500, histtype='step', density=True)
plt.tight_layout()
# plt.savefig('../figure/1a.pdf')
plt.show()
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure",
"numpy.loadtxt",
"pylab.cm.get_cmap",
"matplotlib.pyplot.tight_layout"
] |
[((558, 580), 'pylab.cm.get_cmap', 'cm.get_cmap', (['"""Set1"""', '(9)'], {}), "('Set1', 9)\n", (569, 580), False, 'from pylab import cm\n'), ((588, 600), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (598, 600), True, 'import matplotlib.pyplot as plt\n'), ((1086, 1120), 'numpy.loadtxt', 'np.loadtxt', (['"""out.dat"""'], {'unpack': '(True)'}), "('out.dat', unpack=True)\n", (1096, 1120), True, 'import numpy as np\n'), ((1194, 1229), 'numpy.loadtxt', 'np.loadtxt', (['"""out2.dat"""'], {'unpack': '(True)'}), "('out2.dat', unpack=True)\n", (1204, 1229), True, 'import numpy as np\n'), ((1303, 1338), 'numpy.loadtxt', 'np.loadtxt', (['"""out3.dat"""'], {'unpack': '(True)'}), "('out3.dat', unpack=True)\n", (1313, 1338), True, 'import numpy as np\n'), ((1409, 1427), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1425, 1427), True, 'import matplotlib.pyplot as plt\n'), ((1462, 1472), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1470, 1472), True, 'import matplotlib.pyplot as plt\n')]
|
# coding=utf-8
"""
EXAMPLE
Example file, timer clock with in-menu options.
Copyright (C) 2017 <NAME> @ppizarror
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
"""
# Import pygame and libraries
from random import randrange
import datetime
import os
import pygame
from pygame.locals import *
# Import pygameMenu
import pygameMenu
from pygameMenu.locals import *
# Constants and global variables
ABOUT = ['PygameMenu {0}'.format(pygameMenu.__version__),
'Author: {0}'.format(pygameMenu.__author__),
TEXT_NEWLINE,
'Email: {0}'.format(pygameMenu.__email__)]
COLOR_BLUE = (12, 12, 200)
COLOR_BACKGROUND = [128, 0, 128]
COLOR_WHITE = (255, 255, 255)
FPS = 60
H_SIZE = 600 # Height of window size
HELP = ['Press ESC to enable/disable Menu',
'Press ENTER to access a Sub-Menu or use an option',
'Press UP/DOWN to move through Menu',
'Press LEFT/RIGHT to move through Selectors']
W_SIZE = 800 # Width of window size
# Init pygame
pygame.init()
os.environ['SDL_VIDEO_CENTERED'] = '1'
# Write help message on console
for m in HELP:
print(m)
# Create window
surface = pygame.display.set_mode((W_SIZE, H_SIZE))
pygame.display.set_caption('PygameMenu example')
# Main timer and game clock
clock = pygame.time.Clock()
timer = [0.0]
dt = 1.0 / FPS
timer_font = pygame.font.Font(pygameMenu.fonts.FONT_NEVIS, 100)
# Functions
def mainmenu_background():
"""
Background color of the main menu, on this function user can plot
images, play sounds, etc.
"""
surface.fill((40, 0, 40))
def reset_timer():
"""
Reset timer
"""
timer[0] = 0
def change_color_bg(c, **kwargs):
"""
Change background color
:param c: Color tuple
"""
if c == (-1, -1, -1): # If random color
c = (randrange(0, 255), randrange(0, 255), randrange(0, 255))
if kwargs['write_on_console']:
print('New background color: ({0},{1},{2})'.format(*c))
COLOR_BACKGROUND[0] = c[0]
COLOR_BACKGROUND[1] = c[1]
COLOR_BACKGROUND[2] = c[2]
# Timer menu
timer_menu = pygameMenu.Menu(surface,
window_width=W_SIZE,
window_height=H_SIZE,
font=pygameMenu.fonts.FONT_NEVIS,
title='Timer Menu',
# Adds 5px to title vertical position
title_offsety=5,
menu_alpha=85,
menu_width=600,
menu_height=int(H_SIZE / 2),
# If this menu closes (press ESC) back to main
onclose=PYGAME_MENU_RESET,
dopause=False)
timer_menu.add_option('Reset timer', reset_timer)
# Adds a selector (element that can handle functions)
timer_menu.add_selector('Change bgcolor',
# Values of selector, call to change_color_bg
[('Random', (-1, -1, -1)), # Random color
('Default', (128, 0, 128)),
('Black', (0, 0, 0)),
('Blue', COLOR_BLUE)],
# Action when changing element with left/right
onchange=None,
# Action when pressing return on a element
onreturn=change_color_bg,
# Kwargs, optional parametrs to change_color_bg function
write_on_console=True)
timer_menu.add_option('Return to Menu', PYGAME_MENU_BACK)
timer_menu.add_option('Close Menu', PYGAME_MENU_CLOSE)
# Help menu
help_menu = pygameMenu.TextMenu(surface,
window_width=W_SIZE,
window_height=H_SIZE,
font=pygameMenu.fonts.FONT_FRANCHISE,
title='Help',
# Pressing ESC button does nothing on this menu
onclose=PYGAME_MENU_DISABLE_CLOSE,
menu_color_title=(120, 45, 30),
# Background color
menu_color=(30, 50, 107),
dopause=False)
help_menu.add_option('Return to Menu', PYGAME_MENU_BACK)
for m in HELP:
help_menu.add_line(m)
# About menu
about_menu = pygameMenu.TextMenu(surface,
window_width=W_SIZE,
window_height=H_SIZE,
font=pygameMenu.fonts.FONT_NEVIS,
font_title=pygameMenu.fonts.FONT_8BIT,
title='About',
# Disable menu close (ESC button)
onclose=PYGAME_MENU_DISABLE_CLOSE,
text_fontsize=20,
font_size_title=30,
menu_color_title=COLOR_BLUE,
dopause=False)
about_menu.add_option('Return to Menu', PYGAME_MENU_BACK)
for m in ABOUT:
about_menu.add_line(m)
about_menu.add_line(TEXT_NEWLINE)
# Main menu, pauses execution of the application
menu = pygameMenu.Menu(surface,
window_width=W_SIZE,
window_height=H_SIZE,
font=pygameMenu.fonts.FONT_NEVIS,
title='Main Menu',
title_offsety=5,
menu_alpha=90,
enabled=False,
bgfun=mainmenu_background,
onclose=PYGAME_MENU_CLOSE)
menu.add_option(timer_menu.get_title(), timer_menu) # Add timer submenu
menu.add_option(help_menu.get_title(), help_menu) # Add help submenu
menu.add_option(about_menu.get_title(), about_menu) # Add about submenu
menu.add_option('Exit', PYGAME_MENU_EXIT) # Add exit function
# Main loop
while True:
# Tick
clock.tick(60)
timer[0] += dt
# Paint background
surface.fill(COLOR_BACKGROUND)
# Application events
events = pygame.event.get()
for event in events:
if event.type == QUIT:
exit()
elif event.type == KEYDOWN:
if event.key == K_ESCAPE:
if menu.is_disabled():
menu.enable()
# Draw timer
time_string = str(datetime.timedelta(seconds=int(timer[0])))
time_blit = timer_font.render(time_string, 1, COLOR_WHITE)
time_blit_size = time_blit.get_size()
surface.blit(time_blit, (
W_SIZE / 2 - time_blit_size[0] / 2, H_SIZE / 2 - time_blit_size[1] / 2))
# Execute main from principal menu if is enabled
menu.mainloop(events)
# Flip surface
pygame.display.flip()
|
[
"pygame.event.get",
"pygame.display.set_mode",
"pygameMenu.Menu",
"pygame.init",
"pygame.display.flip",
"random.randrange",
"pygame.font.Font",
"pygameMenu.TextMenu",
"pygame.display.set_caption",
"pygame.time.Clock"
] |
[((1447, 1460), 'pygame.init', 'pygame.init', ([], {}), '()\n', (1458, 1460), False, 'import pygame\n'), ((1596, 1637), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(W_SIZE, H_SIZE)'], {}), '((W_SIZE, H_SIZE))\n', (1619, 1637), False, 'import pygame\n'), ((1639, 1687), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""PygameMenu example"""'], {}), "('PygameMenu example')\n", (1665, 1687), False, 'import pygame\n'), ((1728, 1747), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (1745, 1747), False, 'import pygame\n'), ((1793, 1843), 'pygame.font.Font', 'pygame.font.Font', (['pygameMenu.fonts.FONT_NEVIS', '(100)'], {}), '(pygameMenu.fonts.FONT_NEVIS, 100)\n', (1809, 1843), False, 'import pygame\n'), ((4213, 4458), 'pygameMenu.TextMenu', 'pygameMenu.TextMenu', (['surface'], {'window_width': 'W_SIZE', 'window_height': 'H_SIZE', 'font': 'pygameMenu.fonts.FONT_FRANCHISE', 'title': '"""Help"""', 'onclose': 'PYGAME_MENU_DISABLE_CLOSE', 'menu_color_title': '(120, 45, 30)', 'menu_color': '(30, 50, 107)', 'dopause': '(False)'}), "(surface, window_width=W_SIZE, window_height=H_SIZE,\n font=pygameMenu.fonts.FONT_FRANCHISE, title='Help', onclose=\n PYGAME_MENU_DISABLE_CLOSE, menu_color_title=(120, 45, 30), menu_color=(\n 30, 50, 107), dopause=False)\n", (4232, 4458), False, 'import pygameMenu\n'), ((4973, 5261), 'pygameMenu.TextMenu', 'pygameMenu.TextMenu', (['surface'], {'window_width': 'W_SIZE', 'window_height': 'H_SIZE', 'font': 'pygameMenu.fonts.FONT_NEVIS', 'font_title': 'pygameMenu.fonts.FONT_8BIT', 'title': '"""About"""', 'onclose': 'PYGAME_MENU_DISABLE_CLOSE', 'text_fontsize': '(20)', 'font_size_title': '(30)', 'menu_color_title': 'COLOR_BLUE', 'dopause': '(False)'}), "(surface, window_width=W_SIZE, window_height=H_SIZE,\n font=pygameMenu.fonts.FONT_NEVIS, font_title=pygameMenu.fonts.FONT_8BIT,\n title='About', onclose=PYGAME_MENU_DISABLE_CLOSE, text_fontsize=20,\n font_size_title=30, menu_color_title=COLOR_BLUE, dopause=False)\n", (4992, 5261), False, 'import pygameMenu\n'), ((5857, 6092), 'pygameMenu.Menu', 'pygameMenu.Menu', (['surface'], {'window_width': 'W_SIZE', 'window_height': 'H_SIZE', 'font': 'pygameMenu.fonts.FONT_NEVIS', 'title': '"""Main Menu"""', 'title_offsety': '(5)', 'menu_alpha': '(90)', 'enabled': '(False)', 'bgfun': 'mainmenu_background', 'onclose': 'PYGAME_MENU_CLOSE'}), "(surface, window_width=W_SIZE, window_height=H_SIZE, font=\n pygameMenu.fonts.FONT_NEVIS, title='Main Menu', title_offsety=5,\n menu_alpha=90, enabled=False, bgfun=mainmenu_background, onclose=\n PYGAME_MENU_CLOSE)\n", (5872, 6092), False, 'import pygameMenu\n'), ((6764, 6782), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (6780, 6782), False, 'import pygame\n'), ((7428, 7449), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (7447, 7449), False, 'import pygame\n'), ((2296, 2313), 'random.randrange', 'randrange', (['(0)', '(255)'], {}), '(0, 255)\n', (2305, 2313), False, 'from random import randrange\n'), ((2315, 2332), 'random.randrange', 'randrange', (['(0)', '(255)'], {}), '(0, 255)\n', (2324, 2332), False, 'from random import randrange\n'), ((2334, 2351), 'random.randrange', 'randrange', (['(0)', '(255)'], {}), '(0, 255)\n', (2343, 2351), False, 'from random import randrange\n')]
|
from tracemalloc import start
from matplotlib.pyplot import contour
from parcv.Models import Models
from datetime import datetime
from dateutil import parser
import re
from string import punctuation
from collections import Counter
import math
class ResumeParser:
def __init__(self, ner, ner_dates, zero_shot_classifier, tagger, qa_squad):
self.models = Models()
self.ner, self.ner_dates, self.zero_shot_classifier, self.tagger, self.qa_squad = ner, ner_dates, zero_shot_classifier, tagger, qa_squad
self.parsed_cv = {}
def parse(self, resume_segments):
for segment_name in resume_segments:
resume_segment = resume_segments[segment_name]
if segment_name == "contact_info":
self.new_parse_contact_info(resume_segment)
elif segment_name == "work_and_employment":
self.new_parse_job_history(resume_segment)
elif segment_name == "education_and_training":
self.parse_education_history(resume_segment)
elif segment_name == "skills":
self.parse_skills(resume_segment)
return self.parsed_cv
def parse_skills(self, resume_segment):
splitter = re.compile(r'[{}]+'.format(re.escape(punctuation)))
labels = ['technical skill', 'title', 'other']
skills = []
for item in resume_segment:
for elem in splitter.split(item):
elem_splitted = [i for i in elem.strip().split() if i and not i.isdigit() and i.isalpha()]
capitalized = all([True if i[0].isupper() else False for i in elem_splitted])
if capitalized and elem_splitted and len(elem_splitted) < 4:
candidate_skill = ' '.join(elem_splitted)
if self.belongs_to_label(candidate_skill, 'technical skill', labels):
skills.append(candidate_skill)
self.parsed_cv['Skills'] = skills
def parse_education_history(self, resume_segment):
self.parsed_cv["Education"] = []
education_info = []
questions = ["what is the university's or the school's name?", "what is the field of study?", "what is the qualification?"]
school_names = self.ask_till_stopping(resume_segment, questions[0], 'school name', 10)
school_names = sorted(school_names, key=lambda x: x[1][0])
majors = self.ask_till_stopping(resume_segment, questions[1], 'field of study', len(school_names))
qualifications = self.ask_till_stopping(resume_segment, questions[2], 'qualification', len(school_names))
major_on_right = True
qualification_on_right = True
for idx, school in enumerate(school_names):
education_item = {}
school_name, (idx1, idx2) = school
major, major_on_right = self.get_closest_item_to_school(majors, major_on_right, idx, idx1, idx2)
qualification, qualification_on_right = self.get_closest_item_to_school(qualifications, qualification_on_right, idx, idx1, idx2)
majors.remove(major)
qualifications.remove(qualification)
if major:
major = major[0]
if qualification:
qualification = qualification[0]
if "high school" in school_name.lower():
major, qualification = "", ""
education_item['School Name'] = school_name
education_item['Field of Study'] = major
education_item['Qualification'] = qualification
education_info.append(education_item)
self.parsed_cv["Education"] = education_info
def get_closest_item_to_school(self, items, right_position, idx, idx1, idx2):
closest_left = math.inf
closest_left_item = None
closest_right = math.inf
closest_right_item = None
for item in items:
st_idx, end_idx = item[1]
if end_idx <= idx1:
if idx1 - end_idx < closest_left:
closest_left = idx1 - end_idx
closest_left_item = item
elif st_idx >= idx2:
if st_idx - idx2 < closest_right:
closest_right = st_idx - idx2
closest_right_item = item
if idx == 0:
if closest_right < closest_left: right_position = True
else: right_position = False
if right_position:
if closest_right_item:
return closest_right_item, right_position
elif closest_left_item:
return closest_left_item, right_position
else:
if closest_left_item:
return closest_left_item, right_position
elif closest_right_item:
return closest_right_item, right_position
return "", right_position
def ask_till_stopping(self, resume_segment, question, category, limit):
labels = ['school name', 'field of study', 'degree', "location", "other"]
context = ' , '.join(resume_segment)
answer_idxs = []
if not context.strip(): return answer_idxs
while True:
qa_input = {'question': question, 'context': context}
out = self.qa_squad(qa_input)
start_idx, end_idx, answer = out['start'], out['end'], out['answer']
if not answer:
break
context = context.replace(context[start_idx:end_idx], "")
if not context.strip(): return answer_idxs
splitter = re.compile(r'[\s{}]+'.format(re.escape(punctuation)))
answer_splitted = splitter.split(answer)
answer_splitted = [i for i in answer_splitted if i and not i.isdigit() and i.isalpha() ]
capitalized = all([True if i[0].isupper() else False for i in answer_splitted])
if len(answer_splitted) > 2:
num_of_1 = sum([True if i[0].isupper() else False for i in answer_splitted])
capitalized = num_of_1 > len(answer_splitted)//2
if not capitalized:
break
else:
if category == 'school name':
if self.belongs_to_label(answer, category, labels):
answer_idxs.append([answer, (start_idx, end_idx)])
else:
answer_idxs.append([answer, (start_idx, end_idx)])
if len(answer_idxs) > limit:
break
return answer_idxs
def new_find_person_name(self, contact_info):
context = ' , '.join(contact_info)
qa_input = {'question': "What is the person's name?", 'context': context}
out = self.qa_squad(qa_input)
return out['answer']
def find_school_names(self, resume_segment):
labels = ["institution", "degree", "field of study"]
idx_line = []
for idx, line in enumerate(resume_segment):
splitter = re.compile(r'[\s{}]+'.format(re.escape(punctuation)))
answer_splitted = splitter.split(line)
answer_splitted = [i for i in answer_splitted if i and not i.isdigit() and i.isalpha() ]
capitalized = all([True if i[0].isupper() else False for i in answer_splitted])
if len(answer_splitted) > 2:
num_of_1 = sum([True if i[0].isupper() else False for i in answer_splitted])
capitalized = num_of_1 > len(answer_splitted)//2
if not capitalized: continue
qa_input = {'question': "What is the school's name?", 'context': line}
out = self.qa_squad(qa_input)
answer = out['answer']
if self.belongs_to_label(line, "school", labels):
if answer:
idx_line.append((idx, answer))
return idx_line
def find_job_titles(self, resume_segment):
labels = ["company", "institution", "job title", "details"]
idx_line = []
for idx, line in enumerate(resume_segment):
splitter = re.compile(r'[\s{}]+'.format(re.escape(punctuation)))
answer_splitted = splitter.split(line)
answer_splitted = [i for i in answer_splitted if i and not i.isdigit() and i.isalpha() ]
capitalized = all([True if i[0].isupper() else False for i in answer_splitted])
if len(answer_splitted) > 2:
num_of_1 = sum([True if i[0].isupper() else False for i in answer_splitted])
capitalized = num_of_1 > len(answer_splitted)//2
if not capitalized: continue
qa_input = {'question': "What is the job name?", 'context': line}
out = self.qa_squad(qa_input)
answer = out['answer']
if self.belongs_to_label(line, "job title", labels):
if answer:
idx_line.append((idx, answer))
return idx_line
def belongs_to_label(self, sequence, label, labels):
res = self.zero_shot_classifier(sequence, labels)
class_score = zip(res["labels"], res["scores"])
highest = sorted(class_score, key=lambda x: x[1])[-1]
if highest[0] == label:
return True
return False
def new_parse_contact_info(self, contact_info):
contact_info_dict = {}
name = self.new_find_person_name(contact_info)
email = self.find_contact_email(contact_info)
phone1, phone2 = self.find_phone_numbers(contact_info)
address = self.find_address(contact_info)
contact_info_dict["Email"] = email
contact_info_dict["phone1"] = phone1
contact_info_dict["phone2"] = phone2
contact_info_dict['address'] = address
self.parsed_cv['Name'] = name
self.parsed_cv['Contact Info'] = contact_info_dict
def find_phone_numbers(self, contact_info):
context = ' , '.join(contact_info)
qa_input = {'question': "What is the phone number?", 'context': context}
out = self.qa_squad(qa_input)
answer1 = out['answer']
context = context.replace(answer1, "")
qa_input = {'question': "What is the phone number?", 'context': context}
answer2 = self.qa_squad(qa_input)['answer']
count_nums = lambda x: len([i for i in x if i and i.isdigit()])
if count_nums(answer1) < 7:
answer1 = ""
if count_nums(answer2) < 7:
answer2 = ""
return answer1, answer2
def find_address(self, contact_info):
context = ' , '.join(contact_info)
qa_input = {'question': "What is the address?", 'context': context}
address = self.qa_squad(qa_input)['answer']
labels = ['address', 'email', 'phone number', 'other']
if self.belongs_to_label(address, "address",labels):
return address
else:
return ""
def parse_contact_info(self, contact_info):
contact_info_dict = {}
name = self.find_person_name(contact_info)
email = self.find_contact_email(contact_info)
self.parsed_cv['Name'] = name
contact_info_dict["Email"] = email
self.parsed_cv['Contact Info'] = contact_info_dict
def find_person_name(self, items):
class_score = []
splitter = re.compile(r'[{}]+'.format(re.escape(punctuation.replace("&", "") )))
classes = ["person name", "address", "email", "title"]
for item in items:
elements = splitter.split(item)
for element in elements:
element = ''.join(i for i in element.strip() if not i.isdigit())
if not len(element.strip().split()) > 1: continue
out = self.zero_shot_classifier(element, classes)
highest = sorted(zip(out["labels"], out["scores"]), key=lambda x: x[1])[-1]
if highest[0] == "person name":
class_score.append((element, highest[1]))
if len(class_score):
return sorted(class_score, key=lambda x: x[1], reverse=True)[0][0]
return ""
def find_contact_email(self, items):
for item in items:
match = re.search(r'[\w.+-]+@[\w-]+\.[\w.-]+', item)
if match:
return match.group(0)
return ""
def new_get_job_company(self, line1, line2, resume_segment):
context = resume_segment[line1]
if line2 <= len(resume_segment)-1:
context = context + " , " + resume_segment[line2]
qa_input = {'question': "What is the company's name?", 'context': context}
out = self.qa_squad(qa_input)
return out['answer']
def new_parse_job_history(self, resume_segment):
idx_job_title = self.find_job_titles(resume_segment)
current_and_below = False
if not len(idx_job_title):
self.parsed_cv["Job History"] = []
return
if idx_job_title[0][0] == 0: current_and_below = True
job_history = []
for ls_idx, (idx, job_title) in enumerate(idx_job_title):
job_info = {}
job_info["Job Title"] = job_title
# company
if current_and_below: line1, line2 = idx, idx+1
else: line1, line2 = idx, idx-1
job_info["Company"] = self.new_get_job_company(line1, line2, resume_segment)
if current_and_below: st_span = idx
else: st_span = idx-1
# Dates
if ls_idx == len(idx_job_title) - 1: end_span = len(resume_segment)
else: end_span = idx_job_title[ls_idx+1][0]
start, end = self.get_job_dates(st_span, end_span, resume_segment)
job_info["Start Date"] = start
job_info["End Date"] = end
job_history.append(job_info)
self.parsed_cv["Job History"] = job_history
def parse_job_history(self, resume_segment):
idx_job_title = self.get_job_titles(resume_segment)
current_and_below = False
if not len(idx_job_title):
self.parsed_cv["Job History"] = []
return
if idx_job_title[0][0] == 0: current_and_below = True
job_history = []
for ls_idx, (idx, job_title) in enumerate(idx_job_title):
job_info = {}
job_info["Job Title"] = self.filter_job_title(job_title)
# company
if current_and_below: line1, line2 = idx, idx+1
else: line1, line2 = idx, idx-1
job_info["Company"] = self.get_job_company(line1, line2, resume_segment)
if current_and_below: st_span = idx
else: st_span = idx-1
# Dates
if ls_idx == len(idx_job_title) - 1: end_span = len(resume_segment)
else: end_span = idx_job_title[ls_idx+1][0]
start, end = self.get_job_dates(st_span, end_span, resume_segment)
job_info["Start Date"] = start
job_info["End Date"] = end
job_history.append(job_info)
self.parsed_cv["Job History"] = job_history
def get_job_titles(self, resume_segment):
classes = ["organization", "institution", "company", "job title", "work details"]
idx_line = []
for idx, line in enumerate(resume_segment):
has_verb = False
line_modifed = ''.join(i for i in line if not i.isdigit())
sentence = self.models.get_flair_sentence(line_modifed)
self.tagger.predict(sentence)
tags = []
for entity in sentence.get_spans('pos'):
tags.append(entity.tag)
if entity.tag.startswith("V"):
has_verb = True
most_common_tag = max(set(tags), key=tags.count)
if most_common_tag == "NNP":
if not has_verb:
out = self.zero_shot_classifier(line, classes)
class_score = zip(out["labels"], out["scores"])
highest = sorted(class_score, key=lambda x: x[1])[-1]
if highest[0] == "job title":
idx_line.append((idx, line))
return idx_line
def get_job_dates(self, st, end, resume_segment):
search_span = resume_segment[st:end]
dates = []
for line in search_span:
for dt in self.get_ner_in_line(line, "DATE"):
if self.isvalidyear(dt.strip()):
dates.append(dt)
if len(dates): first = dates[0]
exists_second = False
if len(dates) > 1:
exists_second = True
second = dates[1]
if len(dates) > 0:
if self.has_two_dates(first):
d1, d2 = self.get_two_dates(first)
return self.format_date(d1), self.format_date(d2)
elif exists_second and self.has_two_dates(second):
d1, d2 = self.get_two_dates(second)
return self.format_date(d1), self.format_date(d2)
else:
if exists_second:
st = self.format_date(first)
end = self.format_date(second)
return st, end
else:
return (self.format_date(first), "")
else: return ("", "")
def filter_job_title(self, job_title):
job_title_splitter = re.compile(r'[{}]+'.format(re.escape(punctuation.replace("&", "") )))
job_title = ''.join(i for i in job_title if not i.isdigit())
tokens = job_title_splitter.split(job_title)
tokens = [''.join([i for i in tok.strip() if (i.isalpha() or i.strip()=="")]) for tok in tokens if tok.strip()]
classes = ["company", "organization", "institution", "job title", "responsibility", "details"]
new_title = []
for token in tokens:
if not token: continue
res = self.zero_shot_classifier(token, classes)
class_score = zip(res["labels"], res["scores"])
highest = sorted(class_score, key=lambda x: x[1])[-1]
if highest[0] == "job title":
new_title.append(token.strip())
if len(new_title):
return ', '.join(new_title)
else: return ', '.join(tokens)
def has_two_dates(self, date):
years = self.get_valid_years()
count = 0
for year in years:
if year in str(date):
count+=1
return count == 2
def get_two_dates(self, date):
years = self.get_valid_years()
idxs = []
for year in years:
if year in date:
idxs.append(date.index(year))
min_idx = min(idxs)
first = date[:min_idx+4]
second = date[min_idx+4:]
return first, second
def get_valid_years(self):
current_year = datetime.today().year
years = [str(i) for i in range(current_year-100, current_year)]
return years
def format_date(self, date):
out = self.parse_date(date)
if out:
return out
else:
date = self.clean_date(date)
out = self.parse_date(date)
if out:
return out
else:
return date
def clean_date(self, date):
try:
date = ''.join(i for i in date if i.isalnum() or i =='-' or i == '/')
return date
except:
return date
def parse_date(self, date):
try:
date = parser.parse(date)
return date.strftime("%m-%Y")
except:
try:
date = datetime(date)
return date.strftime("%m-%Y")
except:
return 0
def isvalidyear(self, date):
current_year = datetime.today().year
years = [str(i) for i in range(current_year-100, current_year)]
for year in years:
if year in str(date):
return True
return False
def get_ner_in_line(self, line, entity_type):
if entity_type == "DATE": ner = self.ner_dates
else: ner = self.ner
return [i['word'] for i in ner(line) if i['entity_group'] == entity_type]
def get_job_company(self, idx, idx1, resume_segment):
job_title = resume_segment[idx]
if not idx1 <= len(resume_segment)-1: context = ""
else:context = resume_segment[idx1]
candidate_companies = self.get_ner_in_line(job_title, "ORG") + self.get_ner_in_line(context, "ORG")
classes = ["organization", "company", "institution", "not organization", "not company", "not institution"]
scores = []
for comp in candidate_companies:
res = self.zero_shot_classifier(comp, classes)['scores']
scores.append(max(res[:3]))
sorted_cmps = sorted(zip(candidate_companies, scores), key=lambda x: x[1], reverse=True)
if len(sorted_cmps): return sorted_cmps[0][0]
return context
|
[
"dateutil.parser.parse",
"datetime.datetime.today",
"string.punctuation.replace",
"re.escape",
"datetime.datetime",
"parcv.Models.Models",
"re.search"
] |
[((367, 375), 'parcv.Models.Models', 'Models', ([], {}), '()\n', (373, 375), False, 'from parcv.Models import Models\n'), ((12113, 12160), 're.search', 're.search', (['"""[\\\\w.+-]+@[\\\\w-]+\\\\.[\\\\w.-]+"""', 'item'], {}), "('[\\\\w.+-]+@[\\\\w-]+\\\\.[\\\\w.-]+', item)\n", (12122, 12160), False, 'import re\n'), ((18782, 18798), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (18796, 18798), False, 'from datetime import datetime\n'), ((19456, 19474), 'dateutil.parser.parse', 'parser.parse', (['date'], {}), '(date)\n', (19468, 19474), False, 'from dateutil import parser\n'), ((19740, 19756), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (19754, 19756), False, 'from datetime import datetime\n'), ((1252, 1274), 're.escape', 're.escape', (['punctuation'], {}), '(punctuation)\n', (1261, 1274), False, 'import re\n'), ((5570, 5592), 're.escape', 're.escape', (['punctuation'], {}), '(punctuation)\n', (5579, 5592), False, 'import re\n'), ((6968, 6990), 're.escape', 're.escape', (['punctuation'], {}), '(punctuation)\n', (6977, 6990), False, 'import re\n'), ((8044, 8066), 're.escape', 're.escape', (['punctuation'], {}), '(punctuation)\n', (8053, 8066), False, 'import re\n'), ((11273, 11301), 'string.punctuation.replace', 'punctuation.replace', (['"""&"""', '""""""'], {}), "('&', '')\n", (11292, 11301), False, 'from string import punctuation\n'), ((17348, 17376), 'string.punctuation.replace', 'punctuation.replace', (['"""&"""', '""""""'], {}), "('&', '')\n", (17367, 17376), False, 'from string import punctuation\n'), ((19574, 19588), 'datetime.datetime', 'datetime', (['date'], {}), '(date)\n', (19582, 19588), False, 'from datetime import datetime\n')]
|
# quick scripts to generate example images for figures
import matplotlib.pyplot as plt
import seaborn as sns
import torch
import numpy as np
from functions import *
from data import *
from copy import deepcopy
import pickle
def plot_threshold_value_examples(savename):
with open(savename, 'rb') as handle:
data = pickle.load(handle)
print("LOADED")
images = data["images"]
reconstructions = data["reconstructions"]
sqdiffs = data["sqdiffs"]
print(images.shape)
print(reconstructions.shape)
print(sqdiffs.shape)
# get buckets
bucket_edges = [1,10,25,50,75,100,200,500]
bucket_elements = [[] for i in range(len(bucket_edges))]
for i,sqdiff in enumerate(sqdiffs):
# first 0th bucket
if sqdiff >=bucket_edges[0] and sqdiff <= bucket_edges[1]:
bucket_elements[0].append(i)
for j in range(len(bucket_edges)-2):
j = j+1
if sqdiff >= bucket_edges[j] and sqdiff <= bucket_edges[j+1]:
bucket_elements[j].append(i)
# final bucket
if sqdiff > bucket_edges[-1]:
bucket_elements[-1].append(i)
for b in bucket_elements:
print(len(b))
first_indices = [bucket_elements[i][0] for i in range(len(bucket_elements))]
#print(first_indices)
#setup figure
nrow = 2
ncol = len(bucket_elements)
fig, ax_array = plt.subplots(nrow, ncol, figsize=(ncol+1,nrow+1), gridspec_kw = {'wspace':0, 'hspace':0, 'top':1.-0.5/(nrow+1), 'bottom': 0.5/(nrow+1), 'left': 0.5/(ncol+1), 'right' :1-0.5/(ncol+1)})
for i,ax_row in enumerate(ax_array):
for j,axes in enumerate(ax_row):
idx = first_indices[j]
if i == 0:
axes.imshow(images[idx].transpose(1,2,0))
if i == 1:
axes.imshow(reconstructions[idx])
#axes.set_aspect("auto")
axes.set_yticklabels([])
axes.set_xticklabels([])
axes.set_xticks([])
axes.set_yticks([])
#fig.suptitle("Cifar10 Fraction Masked")
fig.subplots_adjust(wspace=0, hspace=0)
plt.subplots_adjust(wspace=0, hspace=0)
#plt.tight_layout()
plt.savefig("example_images/threshold_examples_cifar10.jpg", format="jpeg",bbox_inches = "tight", pad_inches = 0)
plt.show()
if __name__ == '__main__':
trainset_cifar, testset_cifar = get_cifar10(10000)
imgs = trainset_cifar[0][0]
print(imgs.shape)
for i in range(5):
fig = plt.figure()
plt.imshow(imgs[i].reshape(3,32,32).permute(1,2,0))
plt.xticks([])
plt.yticks([])
plt.tight_layout()
plt.savefig("example_images/img_3_" + str(i) + ".jpg")
plt.show()
# query img
img = imgs[0]
print(img.shape)
img = img.reshape(32 * 32 * 3)
halved = halve_continuous_img(img)
print(halved.shape)
fig = plt.figure()
plt.imshow(halved.permute(1,2,0))
plt.xticks([])
plt.yticks([])
plt.tight_layout()
plt.savefig("example_images/query_img_3" + str(i) + ".jpg")
plt.show()
plot_threshold_value_examples("example_reconstructions_thresholds_saved_3")
|
[
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.show",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.figure",
"pickle.load",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
] |
[((1382, 1601), 'matplotlib.pyplot.subplots', 'plt.subplots', (['nrow', 'ncol'], {'figsize': '(ncol + 1, nrow + 1)', 'gridspec_kw': "{'wspace': 0, 'hspace': 0, 'top': 1.0 - 0.5 / (nrow + 1), 'bottom': 0.5 / (\n nrow + 1), 'left': 0.5 / (ncol + 1), 'right': 1 - 0.5 / (ncol + 1)}"}), "(nrow, ncol, figsize=(ncol + 1, nrow + 1), gridspec_kw={\n 'wspace': 0, 'hspace': 0, 'top': 1.0 - 0.5 / (nrow + 1), 'bottom': 0.5 /\n (nrow + 1), 'left': 0.5 / (ncol + 1), 'right': 1 - 0.5 / (ncol + 1)})\n", (1394, 1601), True, 'import matplotlib.pyplot as plt\n'), ((2105, 2144), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0)', 'hspace': '(0)'}), '(wspace=0, hspace=0)\n', (2124, 2144), True, 'import matplotlib.pyplot as plt\n'), ((2174, 2288), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""example_images/threshold_examples_cifar10.jpg"""'], {'format': '"""jpeg"""', 'bbox_inches': '"""tight"""', 'pad_inches': '(0)'}), "('example_images/threshold_examples_cifar10.jpg', format='jpeg',\n bbox_inches='tight', pad_inches=0)\n", (2185, 2288), True, 'import matplotlib.pyplot as plt\n'), ((2292, 2302), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2300, 2302), True, 'import matplotlib.pyplot as plt\n'), ((2878, 2890), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2888, 2890), True, 'import matplotlib.pyplot as plt\n'), ((2933, 2947), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (2943, 2947), True, 'import matplotlib.pyplot as plt\n'), ((2952, 2966), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (2962, 2966), True, 'import matplotlib.pyplot as plt\n'), ((2971, 2989), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2987, 2989), True, 'import matplotlib.pyplot as plt\n'), ((3058, 3068), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3066, 3068), True, 'import matplotlib.pyplot as plt\n'), ((326, 345), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (337, 345), False, 'import pickle\n'), ((2487, 2499), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2497, 2499), True, 'import matplotlib.pyplot as plt\n'), ((2568, 2582), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (2578, 2582), True, 'import matplotlib.pyplot as plt\n'), ((2591, 2605), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (2601, 2605), True, 'import matplotlib.pyplot as plt\n'), ((2614, 2632), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2630, 2632), True, 'import matplotlib.pyplot as plt\n'), ((2704, 2714), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2712, 2714), True, 'import matplotlib.pyplot as plt\n')]
|
'''A utility for backing up and restoring saved games.
Usage:
savman list [--backups]
savman scan [--nocache]
savman update
savman load <directory>
savman backup <directory> [<game>] [options]
savman restore <game> [<directory>] [options]
savman -h | --help
Commands:
list Show a list of games or backups
scan Perform a scan for games
update Check for a database update
load Load backups from directory
backup Backup all games to directory, or single game if specified
restore Restore game to either save location or specified directory
Options:
-h --help Display this screen
--scan Perform a scan for games
--nocache Scan without cache, slower but can find games the
regular scan missed
--update Check for database update
--max <count> Maximum number of versions to keep (default: 10)
--min <count> Number of versions to trim to when max is exceeded (default: 5)
--source <num> Game location to restore or backup from
--target <num> Game location to restore to
'''
from savman import databaseman, gameman, datapath, __version__
import sys
import os
import logging
import time
from docopt import docopt
from threading import Thread
def run():
print('savman', __version__)
if '--debug' in sys.argv:
sys.argv.remove('--debug')
log = logging.getLogger()
log.setLevel(logging.DEBUG)
args = docopt(__doc__, version='savman {}'.format(__version__))
if args['backup'] and args['<directory>'] and not os.path.isdir(args['<directory>']):
try:
os.mkdir(args['<directory>'])
except FileNotFoundError:
path = os.path.normpath(args['<directory>'])
parser.error("Could not create '{}' as directory '{}' does not exist".format(
path, os.path.dirname(path)
))
sys.exit(1)
dbman = databaseman.Manager()
dbman.latesturl = 'http://strata.me/latestdb.json'
dbname = datapath('gamedata')
#dbname = datapath('dummydataz')
if not os.path.isfile(dbname) and hasattr(sys, 'frozen'):
shutil.copy(os.path.join(sys._MEIPASS, 'gamedata'), dbname)
dbman.load(dbname)
if args['update'] or args['--update']: dbman.check_update()
if dbman.update: dbman.download(dbname)
gman = gameman.GameMan(dbman.db)
gman.cachefile = datapath('cache')
gman.customfile = datapath('custom.txt')
gman.load_custom()
gman.load_cache(dircache=not args['--nocache'], cleargames=dbman.update)
# Clear cache and rescan when database updated
if args['scan'] or args['--scan'] or dbman.update: gman.find_games()
if args['load']:
gman.load_backups(args['<directory>'])
if args['restore']:
try:
gman.restore_game(args['<game>'], args['<directory>'], args['--source'],
args['--target'])
except gameman.InvalidIdError as e:
logging.error("Could not restore '{}': {}".format(args['<game>'], e))
sys.exit(1)
gman.save_cache()
if args['list'] and gman.games:
maxname = max([len(game.name) for game in gman.games.values()])
maxid = max([len(game.id) for game in gman.games.values()])
print('\nName', ' '*(maxname-4), 'ID', ' '*(maxid-2), 'Locations')
print('-'*(maxname)+' ', '-'*(maxid)+' ', '-'*(maxid))
for item, data in sorted(gman.games.items()):
locnum = len(data.locations)
for index, location in enumerate(data.locations):
#bak = Backup()
#bak.build(location.path, location.include, location.exclude)
#size = bak.curver.size/1000
#if size < 1000: sizet = ' ({} KB)'.format(round(size))
#else: sizet = ' ({} MB)'.format(round((size/1000), 1))
namelen = len(data.name)
idlen = len(data.id)
if locnum > 1: prefix = '[{}] '.format(index+1)
else: prefix = ''
if index == 0:
print(data.name, ' '*((maxname-namelen)+2), data.id,
' '*((maxid-idlen)+2), prefix, location.path, sep='')
else: print(' '*(maxname+2), ' '*(maxid+2), prefix, location.path, sep='')
#print('*', location.path, sizet)
print('\n{} games in total.\n'.format(len(gman.games)))
if args['backup'] and args['<directory>']:
if args['<game>']: game = [args['<game>']]
else: game = None
minver = 5
maxver = 10
try:
if args['--min']: minver = int(args['--min'])
if args['--max']: maxver = int(args['--max'])
except ValueError:
logging.error("Argument for '--max' and '--min' must be a number")
sys.exit(1)
if minver >= maxver:
logging.error("Value for '--min' must be under '--max' (min: {}, max: {})".format(
minver, maxver
))
sys.exit(1)
gman.backup_games(args['<directory>'], games=game, trim_min=minver, trim_max=maxver)
logging.info('Finished!')
|
[
"os.mkdir",
"logging.error",
"os.path.isdir",
"os.path.dirname",
"sys.argv.remove",
"savman.gameman.GameMan",
"logging.info",
"os.path.isfile",
"savman.databaseman.Manager",
"os.path.normpath",
"savman.datapath",
"sys.exit",
"os.path.join",
"logging.getLogger"
] |
[((2000, 2021), 'savman.databaseman.Manager', 'databaseman.Manager', ([], {}), '()\n', (2019, 2021), False, 'from savman import databaseman, gameman, datapath, __version__\n'), ((2090, 2110), 'savman.datapath', 'datapath', (['"""gamedata"""'], {}), "('gamedata')\n", (2098, 2110), False, 'from savman import databaseman, gameman, datapath, __version__\n'), ((2423, 2448), 'savman.gameman.GameMan', 'gameman.GameMan', (['dbman.db'], {}), '(dbman.db)\n', (2438, 2448), False, 'from savman import databaseman, gameman, datapath, __version__\n'), ((2470, 2487), 'savman.datapath', 'datapath', (['"""cache"""'], {}), "('cache')\n", (2478, 2487), False, 'from savman import databaseman, gameman, datapath, __version__\n'), ((2510, 2532), 'savman.datapath', 'datapath', (['"""custom.txt"""'], {}), "('custom.txt')\n", (2518, 2532), False, 'from savman import databaseman, gameman, datapath, __version__\n'), ((5256, 5281), 'logging.info', 'logging.info', (['"""Finished!"""'], {}), "('Finished!')\n", (5268, 5281), False, 'import logging\n'), ((1407, 1433), 'sys.argv.remove', 'sys.argv.remove', (['"""--debug"""'], {}), "('--debug')\n", (1422, 1433), False, 'import sys\n'), ((1448, 1467), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1465, 1467), False, 'import logging\n'), ((1628, 1662), 'os.path.isdir', 'os.path.isdir', (["args['<directory>']"], {}), "(args['<directory>'])\n", (1641, 1662), False, 'import os\n'), ((1689, 1718), 'os.mkdir', 'os.mkdir', (["args['<directory>']"], {}), "(args['<directory>'])\n", (1697, 1718), False, 'import os\n'), ((2159, 2181), 'os.path.isfile', 'os.path.isfile', (['dbname'], {}), '(dbname)\n', (2173, 2181), False, 'import os\n'), ((2230, 2268), 'os.path.join', 'os.path.join', (['sys._MEIPASS', '"""gamedata"""'], {}), "(sys._MEIPASS, 'gamedata')\n", (2242, 2268), False, 'import os\n'), ((5138, 5149), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (5146, 5149), False, 'import sys\n'), ((1772, 1809), 'os.path.normpath', 'os.path.normpath', (["args['<directory>']"], {}), "(args['<directory>'])\n", (1788, 1809), False, 'import os\n'), ((1971, 1982), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1979, 1982), False, 'import sys\n'), ((3123, 3134), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3131, 3134), False, 'import sys\n'), ((4864, 4930), 'logging.error', 'logging.error', (['"""Argument for \'--max\' and \'--min\' must be a number"""'], {}), '("Argument for \'--max\' and \'--min\' must be a number")\n', (4877, 4930), False, 'import logging\n'), ((4943, 4954), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4951, 4954), False, 'import sys\n'), ((1922, 1943), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (1937, 1943), False, 'import os\n')]
|
from bsm.config.util import detect_package
from bsm.operation import Base
class DetectPackage(Base):
def execute(self, directory):
return detect_package(directory, self._config['package_runtime'])
|
[
"bsm.config.util.detect_package"
] |
[((152, 210), 'bsm.config.util.detect_package', 'detect_package', (['directory', "self._config['package_runtime']"], {}), "(directory, self._config['package_runtime'])\n", (166, 210), False, 'from bsm.config.util import detect_package\n')]
|
# Generated by Django 3.1.12 on 2021-07-28 18:28
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='TermsOfAccess',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('object_id', models.UUIDField(default=uuid.uuid4, editable=False)),
('name', models.CharField(max_length=256)),
('active', models.BooleanField(default=True)),
('description', models.TextField()),
('version', models.FloatField()),
('notes', models.TextField(blank=True)),
],
options={
'verbose_name': 'Terms of Access',
'verbose_name_plural': 'Terms of Access',
'ordering': ('active', 'name'),
},
),
migrations.CreateModel(
name='TermsOfAccessLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('object_id', models.UUIDField(default=uuid.uuid4, editable=False)),
('terms_of_access', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='terms_of_access.termsofaccess')),
],
options={
'abstract': False,
},
),
]
|
[
"django.db.models.TextField",
"django.db.models.UUIDField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.FloatField",
"django.db.models.BooleanField",
"django.db.models.AutoField",
"django.db.models.DateTimeField"
] |
[((355, 448), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (371, 448), False, 'from django.db import migrations, models\n'), ((475, 514), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (495, 514), False, 'from django.db import migrations, models\n'), ((545, 580), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (565, 580), False, 'from django.db import migrations, models\n'), ((613, 665), 'django.db.models.UUIDField', 'models.UUIDField', ([], {'default': 'uuid.uuid4', 'editable': '(False)'}), '(default=uuid.uuid4, editable=False)\n', (629, 665), False, 'from django.db import migrations, models\n'), ((693, 725), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(256)'}), '(max_length=256)\n', (709, 725), False, 'from django.db import migrations, models\n'), ((755, 788), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (774, 788), False, 'from django.db import migrations, models\n'), ((823, 841), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (839, 841), False, 'from django.db import migrations, models\n'), ((872, 891), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (889, 891), False, 'from django.db import migrations, models\n'), ((920, 948), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (936, 948), False, 'from django.db import migrations, models\n'), ((1284, 1377), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1300, 1377), False, 'from django.db import migrations, models\n'), ((1404, 1443), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1424, 1443), False, 'from django.db import migrations, models\n'), ((1474, 1509), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (1494, 1509), False, 'from django.db import migrations, models\n'), ((1542, 1594), 'django.db.models.UUIDField', 'models.UUIDField', ([], {'default': 'uuid.uuid4', 'editable': '(False)'}), '(default=uuid.uuid4, editable=False)\n', (1558, 1594), False, 'from django.db import migrations, models\n'), ((1633, 1736), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.PROTECT', 'to': '"""terms_of_access.termsofaccess"""'}), "(on_delete=django.db.models.deletion.PROTECT, to=\n 'terms_of_access.termsofaccess')\n", (1650, 1736), False, 'from django.db import migrations, models\n')]
|
import gym
env_name = "CartPole-v0"
env_name = "Ant-v2"
env = gym.make(env_name)
class Agent:
def __init__(self, env):
self.action_space = env.action_space
def get_action(self, obs):
return self.action_space.sample()
env.reset()
agent = Agent(env)
for i_episode in range(10):
state = env.reset()
for t in range(100):
env.render()
action = agent.get_action(state)
next_state, reward, done, info = env.step(action)
if done:
print("Episode finished after {} timesteps".format(t+1))
break
env.close()
|
[
"gym.make"
] |
[((62, 80), 'gym.make', 'gym.make', (['env_name'], {}), '(env_name)\n', (70, 80), False, 'import gym\n')]
|
import curses
import os
import sys
import time
class Board:
column_width = 6
blank_column_line = "{}|".format(" " * column_width)
column_divider = "{}+".format("-" * column_width)
def __init__(self, boardSupplier):
self.boardSupplier = boardSupplier
board = boardSupplier()
self.divider = "\r+{0}".format(self.column_divider * len(board))
self.column_separators = "\r|{0}".format(self.blank_column_line * len(board))
self.width = len(self.column_separators)
curses.initscr()
self.window = curses.newwin(20, self.width, 0, 0)
self.window.keypad(1)
curses.noecho()
def draw_board(self):
""" It will (re)print the string representation of the board """
x = 1
for _, columns in enumerate(self.boardSupplier()):
self.window.addstr(x, 0, self.divider)
self.window.addstr(x + 1, 0, self.column_separators)
self.draw_board_line_with_value(x + 2, columns)
self.window.addstr(x + 3, 0, self.column_separators)
x += 4
self.window.addstr(x, 0, self.divider)
self.window.addstr(x + 2, 0, "Q - exit; Arrows for movement")
def draw_board_line_with_value(self, x, columns):
line = "|"
for num in columns:
if num == 0:
line += self.blank_column_line
else:
space_remainder = self.column_width - len(str(num))
line += "{0}{1}{2}|".format(" " * (space_remainder//2 + space_remainder % 2), num, " " * (space_remainder//2))
self.window.addstr(x, 0, line)
|
[
"curses.noecho",
"curses.initscr",
"curses.newwin"
] |
[((534, 550), 'curses.initscr', 'curses.initscr', ([], {}), '()\n', (548, 550), False, 'import curses\n'), ((573, 608), 'curses.newwin', 'curses.newwin', (['(20)', 'self.width', '(0)', '(0)'], {}), '(20, self.width, 0, 0)\n', (586, 608), False, 'import curses\n'), ((647, 662), 'curses.noecho', 'curses.noecho', ([], {}), '()\n', (660, 662), False, 'import curses\n')]
|
"""
Utilities for solving different problems in `eo-grow` package structure, which are mostly a pure Python magic.
"""
from __future__ import annotations
import importlib
import inspect
from typing import TYPE_CHECKING, Any, Dict, Type
if TYPE_CHECKING:
from ..core.pipeline import Pipeline
from ..core.schemas import BaseSchema
_PIPELINE_PARAM_NAME = "pipeline"
def load_pipeline_class(config: dict) -> Type[Pipeline]:
"""Given a config object it loads the pipeline class referenced in the config"""
pipeline_class_name = config.get(_PIPELINE_PARAM_NAME)
if pipeline_class_name is None:
raise ValueError(f"Config file is missing '{_PIPELINE_PARAM_NAME}' parameter, don't know which pipeline to use")
pipeline_class = import_object(pipeline_class_name)
return pipeline_class
def collect_schema(object_with_schema: Any) -> Type[BaseSchema]:
"""A utility that collects a schema from the given object.
The object is expected to hold a unique internal class which inherits from `BaseSchema`. Example:
class MyObject:
class Schema(BaseSchema):
...
This utility would provide `MySchema`. It works also if `MyObject` inherits from a class that holds the schema.
"""
class_with_schema = object_with_schema if inspect.isclass(object_with_schema) else object_with_schema.__class__
try:
return class_with_schema.Schema
except AttributeError as exception:
raise SyntaxError(
f"Class {class_with_schema} is missing a schema. Each EOGrowObject class needs to contain a pydantic "
"model named `Schema`."
) from exception
def import_object(import_path: str) -> Any:
"""Imports an object from a given import path"""
if "." not in import_path:
raise ValueError(f"Import path {import_path} doesn't reference an object in a module.")
module_name, object_name = import_path.rsplit(".", 1)
try:
module = importlib.import_module(module_name)
except ModuleNotFoundError as exception:
raise ModuleNotFoundError(f"{exception}. Given import path '{import_path}' is invalid.") from exception
if hasattr(module, object_name):
return getattr(module, object_name)
raise ImportError(
f"Cannot import name '{object_name}' from {module_name} ({module.__file__}). Given import path "
f"'{import_path}' is invalid."
)
def get_os_import_path(import_path: str) -> str:
"""For a Python import path it provides OS import path.
E.g. `eogrow.utils.meta` -> `/home/ubuntu/.../eogrow/utils/meta.py`
"""
module_spec = importlib.util.find_spec(import_path)
if module_spec is not None and module_spec.origin is not None:
return module_spec.origin
raise ValueError(f"Given import path {import_path} not found")
def get_package_versions() -> Dict[str, str]:
"""A utility function that provides dependency package versions
At the moment it is and experimental utility. Everything is under try-catch in case something goes wrong
:return: A dictionary with versions
"""
try:
import pkg_resources
dependency_packages = ["eogrow"] + [
requirement.name for requirement in pkg_resources.working_set.by_key["eogrow"].requires() # type: ignore
]
return {name: pkg_resources.get_distribution(name).version for name in dependency_packages}
except BaseException as ex:
return {"error": repr(ex)}
|
[
"pkg_resources.get_distribution",
"importlib.util.find_spec",
"inspect.isclass",
"importlib.import_module"
] |
[((2626, 2663), 'importlib.util.find_spec', 'importlib.util.find_spec', (['import_path'], {}), '(import_path)\n', (2650, 2663), False, 'import importlib\n'), ((1292, 1327), 'inspect.isclass', 'inspect.isclass', (['object_with_schema'], {}), '(object_with_schema)\n', (1307, 1327), False, 'import inspect\n'), ((1966, 2002), 'importlib.import_module', 'importlib.import_module', (['module_name'], {}), '(module_name)\n', (1989, 2002), False, 'import importlib\n'), ((3342, 3378), 'pkg_resources.get_distribution', 'pkg_resources.get_distribution', (['name'], {}), '(name)\n', (3372, 3378), False, 'import pkg_resources\n')]
|
from enum import Enum
from mapperpy.one_way_mapper import OneWayMapper
__author__ = 'lgrech'
class MappingDirection(Enum):
left_to_right = 1
right_to_left = 2
class ObjectMapper(object):
def __init__(self, from_left_mapper, from_right_mapper):
"""
:param from_left_mapper:
:type from_left_mapper: OneWayMapper
:param from_right_mapper:
:type from_right_mapper: OneWayMapper
"""
self.__from_left_mapper = from_left_mapper
self.__from_right_mapper = from_right_mapper
@classmethod
def from_class(cls, left_class, right_class):
return ObjectMapper(
OneWayMapper.for_target_class(right_class),
OneWayMapper.for_target_class(left_class))
@classmethod
def from_prototype(cls, left_proto_obj, right_proto_obj):
return ObjectMapper(
OneWayMapper.for_target_prototype(right_proto_obj),
OneWayMapper.for_target_prototype(left_proto_obj))
@classmethod
def for_dict(cls, left_proto_obj):
return ObjectMapper(
OneWayMapper.for_target_prototype(left_proto_obj.__dict__),
OneWayMapper.for_target_prototype(left_proto_obj))
def map(self, obj):
if isinstance(obj, self.__from_right_mapper.target_class):
return self.__from_left_mapper.map(obj)
elif isinstance(obj, self.__from_left_mapper.target_class):
return self.__from_right_mapper.map(obj)
raise ValueError("This mapper does not support {} class".format(obj.__class__.__name__))
def map_attr_name(self, attr_name):
"""
:type attr_name: basestring
:rtype: basestring
"""
mapped_name = self.__get_mapped_name(self.__from_left_mapper, attr_name)
if mapped_name and self.__get_mapped_name(self.__from_right_mapper, mapped_name) == attr_name:
return mapped_name
mapped_name = self.__get_mapped_name(self.__from_right_mapper, attr_name)
if mapped_name and self.__get_mapped_name(self.__from_left_mapper, mapped_name) == attr_name:
return mapped_name
raise ValueError("Can't find mapping for attribute name: {}".format(attr_name))
def map_attr_value(self, attr_name, attr_value, mapping_direction=None, target_class=None):
"""
:type attr_name: basestring
:type attr_value: object
:type mapping_direction: MappingDirection
:type target_class: type
:rtype: object
"""
if mapping_direction is not None and target_class is not None\
or mapping_direction is None and target_class is None:
raise ValueError("Either mapping direction or target class has to be set (not both)")
if mapping_direction and mapping_direction == MappingDirection.left_to_right \
or target_class and target_class == self.__from_left_mapper.target_class:
mapped_name = self.__get_mapped_name(self.__from_left_mapper, attr_name)
if mapped_name and self.__get_mapped_name(self.__from_right_mapper, mapped_name) == attr_name:
return self.__from_left_mapper.map_attr_value(attr_name, attr_value)
elif mapping_direction and mapping_direction == MappingDirection.right_to_left \
or target_class and target_class == self.__from_right_mapper.target_class:
mapped_name = self.__get_mapped_name(self.__from_right_mapper, attr_name)
if mapped_name and self.__get_mapped_name(self.__from_left_mapper, mapped_name) == attr_name:
return self.__from_right_mapper.map_attr_value(attr_name, attr_value)
raise ValueError(
"Can't find mapping for attribute name: {}, direction: {}, target class: {}".format(
attr_name, mapping_direction, target_class.__name__ if target_class else None))
def custom_mappings(self, mapping_dict):
mapping, rev_mapping = self.__get_explicit_mapping(mapping_dict)
self.__from_left_mapper.custom_mappings(mapping)
self.__from_right_mapper.custom_mappings(rev_mapping)
return self
def nested_mapper(self, mapper):
if not isinstance(mapper, ObjectMapper):
raise ValueError("Nested mapper has to be an instance of {}, {} found".format(
ObjectMapper.__name__, mapper.__class__.__name__))
left_type = mapper.__from_right_mapper.target_class
self.__from_left_mapper.nested_mapper(mapper.__from_left_mapper, left_type)
right_type = mapper.__from_left_mapper.target_class
self.__from_right_mapper.nested_mapper(mapper.__from_right_mapper, right_type)
return self
def left_initializers(self, initializers_dict):
self.__from_right_mapper.target_initializers(initializers_dict)
return self
def right_initializers(self, initializers_dict):
self.__from_left_mapper.target_initializers(initializers_dict)
return self
def value_converters(self, converters_dict):
to_right_converters, to_left_converters = self.__split_converters(converters_dict)
self.__from_left_mapper.target_value_converters(to_right_converters)
self.__from_right_mapper.target_value_converters(to_left_converters)
return self
def options(self, option):
self.__from_left_mapper.options(option)
self.__from_right_mapper.options(option)
return self
def __repr__(self):
return "{}->{}".format(self.__from_right_mapper.target_class, self.__from_left_mapper.target_class)
@classmethod
def __get_mapped_name(cls, one_way_mapper, attr_name):
try:
return one_way_mapper.map_attr_name(attr_name)
except ValueError:
return None
@classmethod
def __get_explicit_mapping(cls, input_mapping):
mapping = {}
rev_mapping = {}
for left, right in input_mapping.items():
if right is None:
# user requested to suppress implicit mapping for k
mapping[left] = rev_mapping[left] = None
else:
mapping[left] = right
rev_mapping[right] = left
return mapping, rev_mapping
def __split_converters(self, converters_dict):
to_right_converters = {}
to_left_converters = {}
for left_attr_name, converters_tuple in converters_dict.iteritems():
if not isinstance(converters_tuple, tuple) or len(converters_tuple) != 2:
raise ValueError("Converters for {} should be provided in a 2-element tuple".format(left_attr_name))
to_right_converters[left_attr_name] = converters_tuple[0]
to_left_converters[self.__from_left_mapper.map_attr_name(left_attr_name)] = converters_tuple[1]
return to_right_converters, to_left_converters
|
[
"mapperpy.one_way_mapper.OneWayMapper.for_target_class",
"mapperpy.one_way_mapper.OneWayMapper.for_target_prototype"
] |
[((657, 699), 'mapperpy.one_way_mapper.OneWayMapper.for_target_class', 'OneWayMapper.for_target_class', (['right_class'], {}), '(right_class)\n', (686, 699), False, 'from mapperpy.one_way_mapper import OneWayMapper\n'), ((713, 754), 'mapperpy.one_way_mapper.OneWayMapper.for_target_class', 'OneWayMapper.for_target_class', (['left_class'], {}), '(left_class)\n', (742, 754), False, 'from mapperpy.one_way_mapper import OneWayMapper\n'), ((877, 927), 'mapperpy.one_way_mapper.OneWayMapper.for_target_prototype', 'OneWayMapper.for_target_prototype', (['right_proto_obj'], {}), '(right_proto_obj)\n', (910, 927), False, 'from mapperpy.one_way_mapper import OneWayMapper\n'), ((941, 990), 'mapperpy.one_way_mapper.OneWayMapper.for_target_prototype', 'OneWayMapper.for_target_prototype', (['left_proto_obj'], {}), '(left_proto_obj)\n', (974, 990), False, 'from mapperpy.one_way_mapper import OneWayMapper\n'), ((1090, 1148), 'mapperpy.one_way_mapper.OneWayMapper.for_target_prototype', 'OneWayMapper.for_target_prototype', (['left_proto_obj.__dict__'], {}), '(left_proto_obj.__dict__)\n', (1123, 1148), False, 'from mapperpy.one_way_mapper import OneWayMapper\n'), ((1162, 1211), 'mapperpy.one_way_mapper.OneWayMapper.for_target_prototype', 'OneWayMapper.for_target_prototype', (['left_proto_obj'], {}), '(left_proto_obj)\n', (1195, 1211), False, 'from mapperpy.one_way_mapper import OneWayMapper\n')]
|
""" Configurable recurrent cell. """
import copy
from pydoc import locate
import torch
from torch.autograd import Variable
import torch.nn as nn
from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence, pad_packed_sequence
from benri.configurable import Configurable
class RNN(nn.Module, Configurable):
def __init__(self, rnn=None, params={}):
nn.Module.__init__(self)
Configurable.__init__(self, params=params)
# Check for unimplemented conditions.
if self.params["bidirectional"]:
raise ValueError("Bidirectional not implemented.")
if self.params["n_layers"] != 1:
raise ValueError("More than 1 layer not implemented.")
if rnn:
rnn = self.rnn
else:
# Locate and build the cell.
cell_ctor = locate("torch.nn.{}".format(self.params["cell_type"]))
if cell_ctor is None:
raise ValueError("Unknown RNN cell: {}".format(self.params["cell_type"]))
self.rnn = cell_ctor(
input_size=self.params["input_size"],
hidden_size=self.params["hidden_size"],
num_layers=self.params["n_layers"],
batch_first=True)
def forward(self, x, state):
""" Wraps the RNN's forward call.
:param x: PackedSequence, or [B, S, E].
:param state: [B, H]
:return: Tuple
- Outputs:
- State:
"""
assert isinstance(x, PackedSequence) or x.shape[0] == state.shape[0]
# Add the sequence dimension to the hidden state. [B, E] -> [S, B, E].
state = state.unsqueeze(0)
if self.params["cell_type"] == "LSTM":
state = torch.split(state, self.params["hidden_size"], dim=2)
y, state = self.rnn(x, state)
if self.params["cell_type"] == "LSTM":
state = torch.cat(state, dim=2)
# Remove the N-layers/bidirectional dimension from the hidden state.
state = state.squeeze(0)
return y, state
def init_state(self, batch_size):
""" Get a an initial zero state.
:param batch_size: Number of examples in the batch.
:return: Initial RNN state of zeros.
"""
if self.params["cell_type"] == "LSTM":
state_shape = [batch_size, self.params["hidden_size"] * 2]
else:
state_shape = [batch_size, self.params["hidden_size"]]
state = Variable(torch.zeros(state_shape), requires_grad=False).float()
return state
@property
def hidden_size(self):
if self.params["cell_type"] == "LSTM":
return 2 * self.params["hidden_size"]
else:
return self.params["hidden_size"]
@property
def output_size(self):
return self.params["hidden_size"]
@staticmethod
def default_params():
return {
"cell_type": "LSTM",
"input_size": 100,
"hidden_size": 100,
"n_layers": 1,
"bidirectional": False}
|
[
"torch.split",
"torch.cat",
"torch.nn.Module.__init__",
"torch.zeros",
"benri.configurable.Configurable.__init__"
] |
[((372, 396), 'torch.nn.Module.__init__', 'nn.Module.__init__', (['self'], {}), '(self)\n', (390, 396), True, 'import torch.nn as nn\n'), ((405, 447), 'benri.configurable.Configurable.__init__', 'Configurable.__init__', (['self'], {'params': 'params'}), '(self, params=params)\n', (426, 447), False, 'from benri.configurable import Configurable\n'), ((1755, 1808), 'torch.split', 'torch.split', (['state', "self.params['hidden_size']"], {'dim': '(2)'}), "(state, self.params['hidden_size'], dim=2)\n", (1766, 1808), False, 'import torch\n'), ((1916, 1939), 'torch.cat', 'torch.cat', (['state'], {'dim': '(2)'}), '(state, dim=2)\n', (1925, 1939), False, 'import torch\n'), ((2507, 2531), 'torch.zeros', 'torch.zeros', (['state_shape'], {}), '(state_shape)\n', (2518, 2531), False, 'import torch\n')]
|
import bpy
import os
# pwd = os.getcwd()
pwd = os.path.dirname(os.path.realpath(__file__))
def loadShader(shaderName, mesh):
# switch to different shader names
if shaderName is "EeveeToon":
bpy.context.scene.render.engine = 'BLENDER_EEVEE'
bpy.context.scene.render.alpha_mode = 'TRANSPARENT'
matName = "ToonShade_EV"
blenderFileName = 'EeveeToon.blend'
elif shaderName is "ColoredSteel":
matName = "Blued_Steel"
blenderFileName = 'ColoredSteel.blend'
elif shaderName is "Wax":
matName = "Wax_PBR_SSS"
blenderFileName = 'Wax.blend'
elif shaderName is "Wood":
matName = "UCP wood-v-1-1"
blenderFileName = 'UCPWood.blend' # createy by Elbriga
# load shaders to the mesh
path = pwd + '/../../shaders/' + blenderFileName + "\\Material\\"
bpy.ops.wm.append(filename=matName, directory=path)
mat = bpy.data.materials.get(matName)
mesh.data.materials.append(mat)
mesh.active_material = mat
tree = mat.node_tree
matNode = tree.nodes[-1]
return matNode
|
[
"bpy.data.materials.get",
"os.path.realpath",
"bpy.ops.wm.append"
] |
[((63, 89), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (79, 89), False, 'import os\n'), ((847, 898), 'bpy.ops.wm.append', 'bpy.ops.wm.append', ([], {'filename': 'matName', 'directory': 'path'}), '(filename=matName, directory=path)\n', (864, 898), False, 'import bpy\n'), ((909, 940), 'bpy.data.materials.get', 'bpy.data.materials.get', (['matName'], {}), '(matName)\n', (931, 940), False, 'import bpy\n')]
|
from flakon import JsonBlueprint
from cb_news.news_extractor.database import *
from flask import request
import logging
report_handler = JsonBlueprint('report_handler', __name__)
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
@report_handler.route('/report', methods=["POST"])
def report_view():
"""Home view.
This view will return an empty JSON mapping.
"""
data = request.get_json()
logger.info("Appending new report %s", data)
new_report = Report()
message = ""
if "id" in data:
new_report.id = data["id"]
if "description" in data:
new_report.description = data["description"]
message += "\nDescription updated"
if "author" in data:
new_report.author = data["author"]
message += "\nAuthor updated"
if "attachment" in data:
att_list = []
for att in data["attachment"]:
att_list.append(Attachment(url=att["url"]))
print(att_list)
report = get_or_create_report(new_report)
return {"id": report.id, "message": message}
@report_handler.route('/report/all', methods=["GET"])
def all_reports():
return {"reports": get_all_saved_reports()}
|
[
"flask.request.get_json",
"logging.getLogger",
"flakon.JsonBlueprint",
"logging.basicConfig"
] |
[((138, 179), 'flakon.JsonBlueprint', 'JsonBlueprint', (['"""report_handler"""', '__name__'], {}), "('report_handler', __name__)\n", (151, 179), False, 'from flakon import JsonBlueprint\n'), ((181, 288), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\n", (200, 288), False, 'import logging\n'), ((314, 341), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (331, 341), False, 'import logging\n'), ((501, 519), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (517, 519), False, 'from flask import request\n')]
|
from django.contrib import admin
from .models import Team, Kpi, KpiValue, Organization
# Register your models here.
@admin.register(Organization)
class OrganizationAdmin(admin.ModelAdmin):
pass
@admin.register(Team)
class TeamAdmin(admin.ModelAdmin):
pass
@admin.register(Kpi)
class KpiAdmin(admin.ModelAdmin):
pass
@admin.register(KpiValue)
class KpiAdmin(admin.ModelAdmin):
pass
|
[
"django.contrib.admin.register"
] |
[((120, 148), 'django.contrib.admin.register', 'admin.register', (['Organization'], {}), '(Organization)\n', (134, 148), False, 'from django.contrib import admin\n'), ((203, 223), 'django.contrib.admin.register', 'admin.register', (['Team'], {}), '(Team)\n', (217, 223), False, 'from django.contrib import admin\n'), ((270, 289), 'django.contrib.admin.register', 'admin.register', (['Kpi'], {}), '(Kpi)\n', (284, 289), False, 'from django.contrib import admin\n'), ((335, 359), 'django.contrib.admin.register', 'admin.register', (['KpiValue'], {}), '(KpiValue)\n', (349, 359), False, 'from django.contrib import admin\n')]
|
from pathlib import Path
from os import path as pt
def file_exist_query(filename):
path = Path(filename)
if path.is_file():
res = None
while res not in ['y', 'Y', 'n', 'N']:
res = input("\nThe file in '{}' already exists, do you really wish to re-write its contents? [y/n]".format(filename))
if res not in ['y', 'Y', 'n', 'N']:
print("Please reply with 'y' or 'n'")
if res in ['n', 'N']:
return False
return True
def file_exists(filename):
path = Path(filename)
if path.is_file():
return True
return False
def folder_exists(folder_name):
return pt.isdir(folder_name)
def folder_create(folder_name, exist_ok=False, parents=True):
path = Path(folder_name)
try:
path.mkdir(parents=parents, exist_ok=exist_ok)
except:
raise OSError("Trying to create an already existing folder!!")
return True
|
[
"os.path.isdir",
"pathlib.Path"
] |
[((96, 110), 'pathlib.Path', 'Path', (['filename'], {}), '(filename)\n', (100, 110), False, 'from pathlib import Path\n'), ((543, 557), 'pathlib.Path', 'Path', (['filename'], {}), '(filename)\n', (547, 557), False, 'from pathlib import Path\n'), ((662, 683), 'os.path.isdir', 'pt.isdir', (['folder_name'], {}), '(folder_name)\n', (670, 683), True, 'from os import path as pt\n'), ((759, 776), 'pathlib.Path', 'Path', (['folder_name'], {}), '(folder_name)\n', (763, 776), False, 'from pathlib import Path\n')]
|
# -*- coding: utf-8 -*-
#
# JSON osu! map analysis
#
import numpy as np;
def get_map_timing_array(map_json, length=-1, divisor=4):
if length == -1:
length = map_json["obj"][-1]["time"] + 1000; # it has an extra time interval after the last note
if map_json["obj"][-1]["type"] & 8: # spinner end
length = map_json["obj"][-1]["spinnerEndTime"] + 1000;
uts_a = map_json["timing"]["uts"];
out = [];
for i, uts in enumerate(uts_a):
begin_time = uts["beginTime"];
mspb = uts["tickLength"];
if i < len(uts_a)-1:
end_time = uts_a[i+1]["beginTime"];
else:
end_time = length;
arr = np.floor(np.arange(begin_time, end_time, mspb / divisor));
out = out + list(map(lambda f: int(f), arr));
return out;
def get_tick_len(map_json, tick):
uts_a = map_json["timing"]["uts"];
if tick < uts_a[0]["beginTime"]:
return uts_a[0]["tickLength"];
_out = 600;
for uts in uts_a:
if tick >= uts["beginTime"]:
_out = uts["tickLength"];
else:
return _out;
return _out;
def get_slider_len(map_json, tick):
ts_a = map_json["timing"]["ts"];
if tick < ts_a[0]["beginTime"]:
return ts_a[0]["sliderLength"];
_out = 100;
for ts in ts_a:
if tick >= ts["beginTime"]:
_out = ts["sliderLength"];
else:
return _out;
return _out;
def get_slider_len_ts(ts_a, tick):
if tick < ts_a[0]["beginTime"]:
return ts_a[0]["sliderLength"];
_out = 100;
for ts in ts_a:
if tick >= ts["beginTime"]:
_out = ts["sliderLength"];
else:
return _out;
return _out;
def get_end_time(note):
if note["type"] & 8:
return note["spinnerEndTime"];
elif note["type"] & 2:
return note["sliderData"]["endTime"];
#elif note["type"] & 128:
# return note["holdEndTime"];
else:
return note["time"];
# edited from uts to ts
def get_all_ticks_and_lengths_from_ts(uts_array, ts_array, end_time, divisor=4):
# Returns array of all timestamps, ticklens and sliderlens.
endtimes = ([uts["beginTime"] for uts in uts_array] + [end_time])[1:];
timestamps = [np.arange(uts["beginTime"], endtimes[i], uts["tickLength"] / divisor) for i, uts in enumerate(uts_array)];
ticks_from_uts = [list(range(len(timestamp_group))) for timestamp_group in timestamps];
tick_len = [[uts["tickLength"]] * len(np.arange(uts["beginTime"], endtimes[i], uts["tickLength"] / divisor)) for i, uts in enumerate(uts_array)];
# slider_len = [[ts["sliderLength"]] * len(np.arange(ts["beginTime"], endtimes[i], ts["tickLength"] / divisor)) for i, ts in enumerate(ts_array)];
slider_len = [get_slider_len_ts(ts_array, timestamp) for timestamp in np.concatenate(timestamps)];
return np.concatenate(ticks_from_uts), np.round(np.concatenate(timestamps)).astype(int), np.concatenate(tick_len), np.array(slider_len);
def get_end_point(note):
if note["type"] & 8:
return np.array([256, 192]);
elif note["type"] & 2:
return np.array(note["sliderData"]["endpoint"]);
else:
return np.array([note["x"], note["y"]]);
def get_input_vector(note, prev_note):
if note["type"] & 8:
return None;
#elif note["type"] & 2:
# return np.array(note["sliderData"]["dIn"]);
else:
vec = np.array([note["x"], note["y"]]) - get_end_point(prev_note);
return vec / max(0.001, np.sqrt(vec.dot(vec)));
def get_output_vector(note, prev_note):
if note["type"] & 8:
return None;
elif note["type"] & 2:
return np.array(note["sliderData"]["dOut"]);
else:
vec = np.array([note["x"], note["y"]]) - get_end_point(prev_note);
return vec / max(0.001, np.sqrt(vec.dot(vec)));
def get_momentum(note, prev_note, slider_len):
"""
momentum = distance snap (distance / slider length).
for sliders, takes small value between from slider end or slider start to next note.
"""
v1 = np.array([note["x"], note["y"]]);
v0 = get_end_point(prev_note);
v = v1 - v0;
if note["time"] - get_end_time(prev_note) == 0 or note["time"] - prev_note["time"] == 0:
# it has the same time the previous note ends. either a bugged sliderend or a double note
return 0;
end_type_momentum = np.sqrt(v.dot(v)) / (note["time"] - get_end_time(prev_note)) / slider_len;
# Since slider jumps in maps cause parameters to be learned too high
# we try to deal with slider leniency by using the beginning of slider
v2 = np.array([prev_note["x"], prev_note["y"]]);
v3 = v1 - v2;
start_type_momentum = np.sqrt(v3.dot(v3)) / (note["time"] - prev_note["time"]) / slider_len;
return np.min([end_type_momentum, start_type_momentum]);
def is_uts_begin(map_json, tick):
uts_a = map_json["timing"]["uts"];
begin_times = [uts["beginTime"] for uts in uts_a];
for t in begin_times:
if tick > t - 1 and tick < t + 5:
return True
return False
def get_map_notes(map_json, **kwargs):
"""
Reads JSON map data and creates a list for every tick
Returns:
data = list of data array: [TICK, TIME, NOTE, NOTE_TYPE, SLIDING, SPINNING, MOMENTUM, Ex1, Ex2, Ex3]
flow_data = list of data array: [i, tick, note_type, x, y, vec_in_x, vec_in_y, vec_out_x, vec_out_y, end_x, end_y]
Ex1, Ex2, Ex3 = tickLength/500, BPM/120, sliderLength/150
"""
length = kwargs.get("length", -1);
divisor = kwargs.get("divisor", 4);
tick_times = get_map_timing_array(map_json, length = length, divisor = divisor);
objs = map_json["obj"];
obj_times = list(map(lambda obj: obj["time"], objs));
# 1 for circle, 2 for slider, 3 for spinner
def get_note_type(obj):
if not obj:
return 0;
if obj["type"] & 2:
return 2;
elif obj["type"] & 8:
return 3;
return 1;
po = 0;
note_max_wait_time = kwargs.get("note_max_wait_time", 1000);
start_time = obj_times[0] - note_max_wait_time;
last_obj_time = start_time;
sliding = 0;
slider_end_time = 0;
spinning = 0;
spinner_end_time = 0;
data = [];
flow_data = [];
# constant multipliers and subtractions
tlen_mp = 1/500;
tlen_s = 1;
bpm_mp = 1/120;
bpm_s = 1;
slen_mp = 1/150;
slen_s = 1;
# tick count from start of uninherited timing section
uts_i = 0;
# tick is timestamp here
for i, tick in enumerate(tick_times):
if is_uts_begin(map_json, tick):
uts_i = 0;
else:
uts_i += 1;
# Attach extra vars at the end of each note data row
tlen = get_tick_len(map_json, tick);
bpm = 60000 / tlen;
slen = get_slider_len(map_json, tick);
ex1 = tlen * tlen_mp - tlen_s;
ex2 = bpm * bpm_mp - bpm_s;
ex3 = slen * slen_mp - slen_s;
while obj_times[po] < tick - 5 and po < len(obj_times) - 1:
po += 1;
if obj_times[po] >= tick - 5 and obj_times[po] <= tick + 5: # found note
last_obj_time = tick;
note_type = get_note_type(objs[po]);
# calculate momentum
if po >= 1:
momentum = get_momentum(objs[po], objs[po-1], slen/tlen);
else:
momentum = 0;
# flow data
if po >= 1:
input_vector = get_input_vector(objs[po], objs[po-1]);
output_vector = get_output_vector(objs[po], objs[po-1]);
else:
input_vector = [0, 0];
output_vector = [0, 0];
if input_vector is None or input_vector[0] is None or input_vector[1] is None:
input_vector = [0, 0];
if output_vector is None or output_vector[0] is None or output_vector[1] is None:
output_vector = [0, 0];
# end point
endpoint = get_end_point(objs[po]);
flow_data.append([uts_i, tick, note_type, objs[po]["x"], objs[po]["y"], input_vector[0], input_vector[1], output_vector[0], output_vector[1], endpoint[0], endpoint[1]]);
# put data
if note_type == 1:
spinning = 0;
sliding = 0;
elif note_type == 2:
sliding = 1;
slider_end_time = objs[po]["sliderData"]["endTime"];
elif note_type == 3:
spinning = 1;
spinner_end_time = objs[po]["spinnerEndTime"];
# because the spinner sometimes get over 3 secs
last_obj_time = spinner_end_time;
# TICK, TIME, NOTE, NOTE_TYPE, SLIDING, SPINNING, MOMENTUM, Ex1, Ex2, Ex3
data.append([uts_i, tick, 1, note_type, sliding, spinning, momentum, ex1, ex2, ex3]);
elif spinning == 1:
if tick >= spinner_end_time - 5:
spinning = 0;
data.append([uts_i, tick, 1, 5, 0, 0, 0, ex1, ex2, ex3]);
else:
data.append([uts_i, tick, 0, 0, 0, 1, 0, ex1, ex2, ex3]);
elif sliding == 1:
if tick >= slider_end_time - 5:
sliding = 0;
data.append([uts_i, tick, 1, 4, 0, 0, 0, ex1, ex2, ex3]);
else:
data.append([uts_i, tick, 0, 0, 1, 0, 0, ex1, ex2, ex3]);
else: # not found
if tick - last_obj_time < note_max_wait_time and tick >= start_time:
data.append([uts_i, tick, 0, 0, 0, 0, 0, ex1, ex2, ex3]);
return data, flow_data;
|
[
"numpy.min",
"numpy.array",
"numpy.arange",
"numpy.concatenate"
] |
[((4069, 4101), 'numpy.array', 'np.array', (["[note['x'], note['y']]"], {}), "([note['x'], note['y']])\n", (4077, 4101), True, 'import numpy as np\n'), ((4621, 4663), 'numpy.array', 'np.array', (["[prev_note['x'], prev_note['y']]"], {}), "([prev_note['x'], prev_note['y']])\n", (4629, 4663), True, 'import numpy as np\n'), ((4791, 4839), 'numpy.min', 'np.min', (['[end_type_momentum, start_type_momentum]'], {}), '([end_type_momentum, start_type_momentum])\n', (4797, 4839), True, 'import numpy as np\n'), ((2259, 2328), 'numpy.arange', 'np.arange', (["uts['beginTime']", 'endtimes[i]', "(uts['tickLength'] / divisor)"], {}), "(uts['beginTime'], endtimes[i], uts['tickLength'] / divisor)\n", (2268, 2328), True, 'import numpy as np\n'), ((2873, 2903), 'numpy.concatenate', 'np.concatenate', (['ticks_from_uts'], {}), '(ticks_from_uts)\n', (2887, 2903), True, 'import numpy as np\n'), ((2955, 2979), 'numpy.concatenate', 'np.concatenate', (['tick_len'], {}), '(tick_len)\n', (2969, 2979), True, 'import numpy as np\n'), ((2981, 3001), 'numpy.array', 'np.array', (['slider_len'], {}), '(slider_len)\n', (2989, 3001), True, 'import numpy as np\n'), ((3069, 3089), 'numpy.array', 'np.array', (['[256, 192]'], {}), '([256, 192])\n', (3077, 3089), True, 'import numpy as np\n'), ((693, 740), 'numpy.arange', 'np.arange', (['begin_time', 'end_time', '(mspb / divisor)'], {}), '(begin_time, end_time, mspb / divisor)\n', (702, 740), True, 'import numpy as np\n'), ((2833, 2859), 'numpy.concatenate', 'np.concatenate', (['timestamps'], {}), '(timestamps)\n', (2847, 2859), True, 'import numpy as np\n'), ((3133, 3173), 'numpy.array', 'np.array', (["note['sliderData']['endpoint']"], {}), "(note['sliderData']['endpoint'])\n", (3141, 3173), True, 'import numpy as np\n'), ((3200, 3232), 'numpy.array', 'np.array', (["[note['x'], note['y']]"], {}), "([note['x'], note['y']])\n", (3208, 3232), True, 'import numpy as np\n'), ((3425, 3457), 'numpy.array', 'np.array', (["[note['x'], note['y']]"], {}), "([note['x'], note['y']])\n", (3433, 3457), True, 'import numpy as np\n'), ((3671, 3707), 'numpy.array', 'np.array', (["note['sliderData']['dOut']"], {}), "(note['sliderData']['dOut'])\n", (3679, 3707), True, 'import numpy as np\n'), ((2500, 2569), 'numpy.arange', 'np.arange', (["uts['beginTime']", 'endtimes[i]', "(uts['tickLength'] / divisor)"], {}), "(uts['beginTime'], endtimes[i], uts['tickLength'] / divisor)\n", (2509, 2569), True, 'import numpy as np\n'), ((3733, 3765), 'numpy.array', 'np.array', (["[note['x'], note['y']]"], {}), "([note['x'], note['y']])\n", (3741, 3765), True, 'import numpy as np\n'), ((2914, 2940), 'numpy.concatenate', 'np.concatenate', (['timestamps'], {}), '(timestamps)\n', (2928, 2940), True, 'import numpy as np\n')]
|
from flask import Flask, request, make_response, jsonify, Response
from flask_restx import Resource, Api, abort, reqparse
from flask_jwt_extended import JWTManager
from flask_jwt_extended import (create_access_token, create_refresh_token, jwt_required,
jwt_refresh_token_required, get_jwt_identity, get_raw_jwt)
from datetime import timedelta
import random
app = Flask(__name__)
app.secret_key = 'mysupersecretkey'
api = Api(app, version='1.0', title='My API Boilerplate',
description='My API Boilerplate',
)
ns = api.namespace('api/v1', description='Example.')
app.config['JWT_SECRET_KEY'] = 'jwt-secret-string'
app.config['JWT_TOKEN_LOCATION'] = 'headers'
app.config['JWT_HEADER_NAME'] = 'X-Example-access-token'
app.config['JWT_HEADER_TYPE'] = ''
app.config['JWT_ACCESS_TOKEN_EXPIRES'] = timedelta(minutes=15)
app.config['JWT_REFRESH_TOKEN_EXPIRES'] = timedelta(minutes=15)
app.config['JWT_BLACKLIST_ENABLED'] = True
app.config['JWT_BLACKLIST_TOKEN_CHECKS'] = ['access', 'refresh']
jwt = JWTManager(app)
blacklist = set()
devices = {
'routers': {
12345: {
'name': 'RT1',
'ip': '192.168.1.101'
},
123456: {
'name': 'RT2',
'ip': '192.168.1.102'
},
123457: {
'name': 'RT3',
'ip': '192.168.1.103'
},
12345712: {
'name': 'RT4',
'ip': '192.168.1.104'
},
12345752: {
'name': 'RT5',
'ip': '192.168.1.105'
}
}
}
def generate_device_id():
return random.randint(10000, 20000)
@jwt.token_in_blacklist_loader
def check_if_token_in_blacklist(decrypted_token):
jti = decrypted_token['jti']
return jti in blacklist
@ns.route('/generatetoken')
class GenerateToken(Resource):
@staticmethod
def post():
if request.authorization.username == 'admin' and request.authorization.password == '<PASSWORD>':
access_token = create_access_token(identity=request.authorization.username, fresh=True)
refresh_token = create_refresh_token(identity=request.authorization.username)
return Response(headers={'X-Example-access-token': access_token,
'X-Example-refresh-token': refresh_token},
status=204, mimetype='application/json')
else:
return make_response(jsonify({'message': 'Bad username or password'}), 401)
@ns.route('/refreshtoken')
class RefreshToken(Resource):
@jwt_refresh_token_required
def post(self):
current_user = get_jwt_identity()
access_token = create_access_token(identity=current_user)
return Response(headers={'X-Example-access-token': access_token,
'X-Example-refresh-token': request.headers.get('X-Example-access-token')},
status=204, mimetype='application/json')
@ns.route('/revoketoken')
class RevokeToken(Resource):
@jwt_required
def post(self):
jti = get_raw_jwt()['jti']
blacklist.add(jti)
return '', 204
@ns.route('/lets_get_all_routers')
class TestRouters(Resource):
@jwt_required
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('PageSize', type=int, location='args')
parser.add_argument('Offset', type=int, location='args')
args = parser.parse_args()
if not args.PageSize:
page_size = 10
else:
page_size = args.PageSize
if page_size > 100:
raise reqparse.exceptions.RequestEntityTooLarge('PageSize cannot exceed 100 items!')
if not args.Offset:
offset = 0
elif args.Offset > page_size:
offset = 0
else:
offset = args.Offset
items = []
for k, v in devices['routers'].items():
v.update({'id': str(k)})
items.append({k: v})
data = {'url': request.url,
'items': items[offset:page_size],
'PageSize': page_size,
'Offset': offset,
'count': len(items[offset:page_size])}
return make_response(jsonify(data), 200)
@ns.route('/routers')
class ListRouters(Resource):
@jwt_required
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('PageSize', type=int, location='args')
parser.add_argument('Offset', type=int, location='args')
args = parser.parse_args()
if not args.PageSize:
page_size = 10
else:
page_size = args.PageSize
if page_size > 100:
raise reqparse.exceptions.RequestEntityTooLarge('PageSize cannot exceed 100 items!')
if not args.Offset:
offset = 0
elif args.Offset > page_size:
offset = 0
else:
offset = args.Offset
items = []
for k, v in devices['routers'].items():
v.update({'id': str(k)})
items.append({k: v})
data = {'url': request.url,
'items': items[offset:page_size],
'PageSize': page_size,
'Offset': offset,
'count': len(items[offset:page_size])}
return make_response(jsonify(data), 200)
@jwt_required
def post(self):
data = request.get_json()
while True:
device_id = generate_device_id()
if device_id not in devices['routers']:
break
devices['routers'][device_id] = data
data.update({'id': device_id})
return make_response(jsonify(data), 200)
@ns.route('/routers/<int:device_id>')
class Routers(Resource):
@jwt_required
def get(self, device_id):
try:
device = devices['routers'][device_id]
except KeyError:
abort(404)
else:
return make_response(jsonify(device), 200)
@jwt_required
def put(self, device_id):
data = request.get_json()
devices['routers'][device_id].update(data)
return make_response(jsonify(devices['routers'][device_id]), 200)
@jwt_required
def delete(self, device_id):
devices['routers'].pop(device_id, None)
return make_response(jsonify({'msg': f'Device ID: {device_id} has been deleted!'}), 200)
@jwt.token_in_blacklist_loader
def check_if_token_in_blacklist(decrypted_token):
jti = decrypted_token['jti']
return jti in blacklist
app.run(debug=True)
|
[
"flask_jwt_extended.JWTManager",
"random.randint",
"flask_jwt_extended.get_jwt_identity",
"flask_restx.reqparse.exceptions.RequestEntityTooLarge",
"flask.request.headers.get",
"flask.Flask",
"flask_restx.Api",
"flask_restx.reqparse.RequestParser",
"flask_jwt_extended.create_access_token",
"flask_jwt_extended.create_refresh_token",
"flask_jwt_extended.get_raw_jwt",
"flask_restx.abort",
"flask.jsonify",
"datetime.timedelta",
"flask.Response",
"flask.request.get_json"
] |
[((396, 411), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (401, 411), False, 'from flask import Flask, request, make_response, jsonify, Response\n'), ((454, 544), 'flask_restx.Api', 'Api', (['app'], {'version': '"""1.0"""', 'title': '"""My API Boilerplate"""', 'description': '"""My API Boilerplate"""'}), "(app, version='1.0', title='My API Boilerplate', description=\n 'My API Boilerplate')\n", (457, 544), False, 'from flask_restx import Resource, Api, abort, reqparse\n'), ((845, 866), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(15)'}), '(minutes=15)\n', (854, 866), False, 'from datetime import timedelta\n'), ((909, 930), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(15)'}), '(minutes=15)\n', (918, 930), False, 'from datetime import timedelta\n'), ((1045, 1060), 'flask_jwt_extended.JWTManager', 'JWTManager', (['app'], {}), '(app)\n', (1055, 1060), False, 'from flask_jwt_extended import JWTManager\n'), ((1609, 1637), 'random.randint', 'random.randint', (['(10000)', '(20000)'], {}), '(10000, 20000)\n', (1623, 1637), False, 'import random\n'), ((2638, 2656), 'flask_jwt_extended.get_jwt_identity', 'get_jwt_identity', ([], {}), '()\n', (2654, 2656), False, 'from flask_jwt_extended import create_access_token, create_refresh_token, jwt_required, jwt_refresh_token_required, get_jwt_identity, get_raw_jwt\n'), ((2680, 2722), 'flask_jwt_extended.create_access_token', 'create_access_token', ([], {'identity': 'current_user'}), '(identity=current_user)\n', (2699, 2722), False, 'from flask_jwt_extended import create_access_token, create_refresh_token, jwt_required, jwt_refresh_token_required, get_jwt_identity, get_raw_jwt\n'), ((3273, 3297), 'flask_restx.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (3295, 3297), False, 'from flask_restx import Resource, Api, abort, reqparse\n'), ((4367, 4391), 'flask_restx.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (4389, 4391), False, 'from flask_restx import Resource, Api, abort, reqparse\n'), ((5408, 5426), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (5424, 5426), False, 'from flask import Flask, request, make_response, jsonify, Response\n'), ((6057, 6075), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (6073, 6075), False, 'from flask import Flask, request, make_response, jsonify, Response\n'), ((2013, 2085), 'flask_jwt_extended.create_access_token', 'create_access_token', ([], {'identity': 'request.authorization.username', 'fresh': '(True)'}), '(identity=request.authorization.username, fresh=True)\n', (2032, 2085), False, 'from flask_jwt_extended import create_access_token, create_refresh_token, jwt_required, jwt_refresh_token_required, get_jwt_identity, get_raw_jwt\n'), ((2114, 2175), 'flask_jwt_extended.create_refresh_token', 'create_refresh_token', ([], {'identity': 'request.authorization.username'}), '(identity=request.authorization.username)\n', (2134, 2175), False, 'from flask_jwt_extended import create_access_token, create_refresh_token, jwt_required, jwt_refresh_token_required, get_jwt_identity, get_raw_jwt\n'), ((2195, 2345), 'flask.Response', 'Response', ([], {'headers': "{'X-Example-access-token': access_token, 'X-Example-refresh-token':\n refresh_token}", 'status': '(204)', 'mimetype': '"""application/json"""'}), "(headers={'X-Example-access-token': access_token,\n 'X-Example-refresh-token': refresh_token}, status=204, mimetype=\n 'application/json')\n", (2203, 2345), False, 'from flask import Flask, request, make_response, jsonify, Response\n'), ((3082, 3095), 'flask_jwt_extended.get_raw_jwt', 'get_raw_jwt', ([], {}), '()\n', (3093, 3095), False, 'from flask_jwt_extended import create_access_token, create_refresh_token, jwt_required, jwt_refresh_token_required, get_jwt_identity, get_raw_jwt\n'), ((3620, 3698), 'flask_restx.reqparse.exceptions.RequestEntityTooLarge', 'reqparse.exceptions.RequestEntityTooLarge', (['"""PageSize cannot exceed 100 items!"""'], {}), "('PageSize cannot exceed 100 items!')\n", (3661, 3698), False, 'from flask_restx import Resource, Api, abort, reqparse\n'), ((4240, 4253), 'flask.jsonify', 'jsonify', (['data'], {}), '(data)\n', (4247, 4253), False, 'from flask import Flask, request, make_response, jsonify, Response\n'), ((4714, 4792), 'flask_restx.reqparse.exceptions.RequestEntityTooLarge', 'reqparse.exceptions.RequestEntityTooLarge', (['"""PageSize cannot exceed 100 items!"""'], {}), "('PageSize cannot exceed 100 items!')\n", (4755, 4792), False, 'from flask_restx import Resource, Api, abort, reqparse\n'), ((5334, 5347), 'flask.jsonify', 'jsonify', (['data'], {}), '(data)\n', (5341, 5347), False, 'from flask import Flask, request, make_response, jsonify, Response\n'), ((5679, 5692), 'flask.jsonify', 'jsonify', (['data'], {}), '(data)\n', (5686, 5692), False, 'from flask import Flask, request, make_response, jsonify, Response\n'), ((6156, 6194), 'flask.jsonify', 'jsonify', (["devices['routers'][device_id]"], {}), "(devices['routers'][device_id])\n", (6163, 6194), False, 'from flask import Flask, request, make_response, jsonify, Response\n'), ((6330, 6391), 'flask.jsonify', 'jsonify', (["{'msg': f'Device ID: {device_id} has been deleted!'}"], {}), "({'msg': f'Device ID: {device_id} has been deleted!'})\n", (6337, 6391), False, 'from flask import Flask, request, make_response, jsonify, Response\n'), ((2449, 2497), 'flask.jsonify', 'jsonify', (["{'message': 'Bad username or password'}"], {}), "({'message': 'Bad username or password'})\n", (2456, 2497), False, 'from flask import Flask, request, make_response, jsonify, Response\n'), ((5913, 5923), 'flask_restx.abort', 'abort', (['(404)'], {}), '(404)\n', (5918, 5923), False, 'from flask_restx import Resource, Api, abort, reqparse\n'), ((5971, 5986), 'flask.jsonify', 'jsonify', (['device'], {}), '(device)\n', (5978, 5986), False, 'from flask import Flask, request, make_response, jsonify, Response\n'), ((2856, 2901), 'flask.request.headers.get', 'request.headers.get', (['"""X-Example-access-token"""'], {}), "('X-Example-access-token')\n", (2875, 2901), False, 'from flask import Flask, request, make_response, jsonify, Response\n')]
|
### IMPORTS
from __future__ import print_function
import os
import fnmatch
import numpy as np
import skimage.data
import cv2
import sys
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from PIL import Image
from keras import applications
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers
from keras.optimizers import RMSprop, Adagrad
from keras.models import Sequential, Model
from keras.layers import Dropout, Flatten, Dense, Input
from keras.callbacks import ModelCheckpoint, CSVLogger, TensorBoard, EarlyStopping
import logging
FORMAT = "[%(lineno)4s : %(funcName)-30s ] %(message)s"
logging.basicConfig(level=logging.DEBUG, format=FORMAT)
from selective_search import selective_search_bbox
### GLOBALS
# dimensions of our images.
# img_width = 150
# img_height = 150
img_width = 224
img_height = 224
# dataset_path = 'dataset_dogs_cats'
dataset_path = 'dataset'
dataset_train_path=os.path.join(dataset_path, 'train')
dataset_val_path=os.path.join(dataset_path, 'validation')
dataset_test_path=os.path.join(dataset_path, 'test')
# path to the model weights files.
weights_path = 'weights/vgg16_weights.h5'
#top_model_weights_path = 'output/bottleneck_fc_model.h5'
#top_model_weights_path = 'output_6_categ/best-weights-015-0.5636-0.7923.hdf5'
#finetune_model_weights_path = 'output/finetune_bottleneck_fc_model.h5'
#finetune_model_weights_path = 'output_6_categ/best-weights-finetune-000-0.2325-0.9062.hdf5'
#finetune_model_weights_path = 'output_6_categ_crop/best-weights-finetune-008-0.3453-0.8774.hdf5'
#finetune_model_weights_path = 'output/best-weights-finetune-000-1.5646-0.5217.hdf5'
#finetune_model_weights_path = 'results_36categ/best-weights-finetune-000-1.5646-0.5217.hdf5'
finetune_model_weights_path = 'output/finetune_bottleneck_fc_model.h5'
#epochs = 50
epochs = 5
#batch_size = 16
#batch_size = 32
batch_size = 1
# Count no. of images(.jpg) in a directory
def get_images_count_recursive(path):
matches = []
logging.debug('path {}'.format(path))
for root, dirnames, filenames in os.walk(path):
for filename in fnmatch.filter(filenames, '*.jpg'):
matches.append(os.path.join(root, filename))
# logging.debug('matches {}'.format(matches))
images_count = len(matches)
return images_count
nb_test_samples = get_images_count_recursive(dataset_test_path)
logging.debug('nb_test_samples {}'.format(nb_test_samples))
if not os.path.exists('output'):
os.makedirs('output')
if not os.path.exists('logs'):
os.makedirs('logs')
# TODO: HARDCODING - Should be same as used during training VGG; Else error (None, None, 512)
input_shape = (img_width, img_height, 3)
# Sorted subdirectories list
def get_subdir_list(path):
names=[]
for name in sorted(os.listdir(path)):
if os.path.isdir(os.path.join(path, name)):
names.append(name)
logging.debug('names {}'.format(names))
return names
class_names = get_subdir_list(dataset_train_path)
logging.debug('class_names {}'.format(class_names))
# build the VGG16 network
base_model = applications.VGG16(weights='imagenet', include_top=False, input_shape=input_shape)
logging.debug('Model loaded.')
logging.debug('{}'.format(base_model.output_shape)) # (None, None, None, 512) if input_shape not given in applications.VGG16
logging.debug('{}'.format(base_model.output_shape[1:])) # (None, None, 512)
### MODEL 1
# build a classifier model to put on top of the convolutional model
# top_model = Sequential()
# top_model.add(Flatten(input_shape=base_model.output_shape[1:]))
# top_model.add(Dense(256, activation='relu'))
# top_model.add(Dropout(0.5))
# top_model.add(Dense(len(class_names), activation='softmax')) # Binary to Multi classification changes
# #top_model.add(Dense(1, activation='sigmoid'))
# # note that it is necessary to start with a fully-trained
# # classifier, including the top classifier,
# # in order to successfully do fine-tuning
# # top_model.load_weights(top_model_weights_path)
# # add the model on top of the convolutional base
# # base_model.add(top_model) # Not working; AttributeError: 'Model' object has no attribute 'add'
# model = Model(inputs=base_model.input, outputs=top_model(base_model.output))
# logging.debug('{}'.format(model.summary()))
# model.compile(loss='sparse_categorical_crossentropy',
# optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
# metrics=['accuracy'])
### MODEL2
inputs = Input(shape=(base_model.output_shape[1:]))
x_common = Dense(256, activation='relu')(inputs)
## Model Classification
x = Flatten()(x_common)
#x = Dropout(dropout_rate)(x)
predictions_class = Dense(len(class_names), activation='softmax', name='predictions_class')(x)
## Model (Regression) IOU score
x = Flatten()(x_common)
# x = Dense(256, activation='relu')(x)
# x = Dropout(dropout_rate)(x)
predictions_iou = Dense(1, activation='sigmoid', name='predictions_iou')(x)
# This creates a model that includes the Input layer and three Dense layers
#model = Model(inputs=inputs, outputs=[predictions_class(base_model.output), predictions_iou(base_model.output)])
model = Model(inputs=inputs, outputs=[predictions_class(base_model.output), predictions_iou])
logging.debug('model summary {}'.format(model.summary()))
model.compile(optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
loss={'predictions_class': 'sparse_categorical_crossentropy', 'predictions_iou': 'mean_squared_error'},
metrics=['accuracy'])
model.load_weights(finetune_model_weights_path)
logging.debug('weights loaded: {}'.format(finetune_model_weights_path))
def evaluate_test_dataset():
## Test
test_datagen = ImageDataGenerator(rescale=1. / 255)
test_generator = test_datagen.flow_from_directory(
dataset_test_path,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='sparse', # Binary to Multi classification changes
save_to_dir=None,
shuffle=False)
scores = model.evaluate_generator(test_generator, nb_test_samples // batch_size)
logging.debug('model.metrics_names {}'.format(model.metrics_names))
logging.debug('scores {}'.format(scores))
def predict_image_dir():
# Predict
# TODO: Hardcoding
# Put all images in sample_images/test folder
dataset_predict_path='sample_images'
#dataset_predict_path='temp'
logging.debug('dataset_predict_path {}'.format(dataset_predict_path))
predict_datagen = ImageDataGenerator(rescale=1. / 255)
predict_generator = predict_datagen.flow_from_directory(
dataset_predict_path,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='sparse', # Binary to Multi classification changes
save_to_dir=None,
shuffle=False)
nb_predict_samples = get_images_count_recursive(dataset_predict_path)
logging.debug('nb_predict_samples {}'.format(nb_predict_samples))
prediction = model.predict_generator(predict_generator, nb_predict_samples // batch_size, verbose=1)
logging.debug('\n\nprediction \n{}'.format(prediction))
# Display predictions
matches=[]
for root, dirnames, filenames in os.walk(os.path.join(dataset_predict_path,'test')):
for filename in fnmatch.filter(filenames, '*.jpg'):
matches.append(os.path.join(root, filename))
for index,preds in enumerate(prediction):
logging.debug('\n{}'.format((matches[index])))
for index2, pred in enumerate(preds):
logging.debug('class_names {}'.format(class_names[index2]))
logging.debug('pred {0:6f}'.format(float(pred)))
def pad_and_crop_image(old_im, new_width, new_height):
# old_im = Image.open('someimage.jpg')
old_size = old_im.size
new_size = (new_width, new_height)
new_im = Image.new("RGB", new_size) # this is already black!
new_im.paste(old_im, ((new_size[0]-old_size[0])/2,
(new_size[1]-old_size[1])/2))
# new_im.show()
# new_im.save('someimage.jpg')
return new_im
def predict_image_name(image_path_name):
logging.debug('image_path_name {}'.format(image_path_name))
candidates = selective_search_bbox(image_path_name)
logging.debug('candidates {}'.format(candidates))
image_name = image_path_name.split('/')[-1].split('.')[0]
logging.debug('image_name {}'.format(image_name))
# img = Image.open(image_path_name)
# logging.debug('{} {} {}'.format(img.format, img.size, img.mode))
#img2 = img.crop((0, 0, 100, 100))
# img2.save("img2.jpg")
# img2.show()
#crop_img = img[200:400, 100:300] # Crop from x, y, w, h -> 100, 200, 300, 400
# NOTE: its img[y: y + h, x: x + w] and *not* img[x: x + w, y: y + h]
# img = cv2.imread(image_path_name)
# fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6))
img_read = Image.open(image_path_name)
logging.debug('{} {} {}'.format(img_read.format, img_read.size, img_read.mode))
# img_read.show()
i=0
for x, y, w, h in (candidates):
# left, upper, right, and lower pixel; The cropped section includes the left column and
# the upper row of pixels and goes up to (but doesn't include) the right column and bottom row of pixels
img_crop = img_read.crop((y, x, y+w, x+h))
img_crop.save('temp/test/'+ image_name + '_' + str(i) + '_cropped_' + '.jpg')
logging.debug('img_crop {} {} {}'.format(img_crop.format, img_crop.size, img_crop.mode))
img_crop_resize = img_crop.resize((img_width, img_height))
img_crop_resize.save('temp/test/'+ image_name + '_' + str(i) + '_cropped_resize' + '.jpg')
logging.debug('img_crop_resize {} {} {}'.format(img_crop_resize.format, img_crop_resize.size, img_crop_resize.mode))
i=i+1
# crop_img = img[x:y, w:h] # Crop from x, y, w, h -> 100, 200, 300, 400
# logging.debug('crop_img {}'.format(crop_img.shape))
# ax.imshow(crop_img)
# # cv2.imshow('cropped', crop_img)
# # cv2.waitKey(0)
# plt.show()
# # Convert Image to array
# img = PIL.Image.open("foo.jpg").convert("L")
# arr = numpy.array(img)
# # Convert array to Image
# img = PIL.Image.fromarray(arr)
# img = cv2.resize(cv2.imread(image_path_name), (224, 224)).astype(np.float32)
# img2.save('temp/test/img_'+str(i)+'.jpg')
# img3 = img2.thumbnail((img_width, img_height))
# logging.debug('img3 {}'.format(type(img3)))
# # img3.save('temp/test/img_'+str(i)+'_resized.jpg')
# logging.debug('{} {} {}'.format(img3.format, img3.size, img3.mode))
# img4 = pad_and_crop_image(img3, img_width, img_height)
# logging.debug('{} {} {}'.format(img4.format, img4.size, img4.mode))
# img4.save('temp/test/img_'+str(i)+'_resized1.jpg')
img=np.array(img_crop_resize).astype(np.float32)
img[:,:,0] -= 103.939
img[:,:,1] -= 116.779
img[:,:,2] -= 123.68
#img = img.transpose((2,0,1))
img = np.expand_dims(img, axis=0)
prediction = model.predict(img, batch_size, verbose=1)
logging.debug('\n\nprediction \n{}'.format(prediction))
for index,preds in enumerate(prediction):
for pred in preds:
logging.debug('pred {0:6f}'.format(float(pred)))
### MAIN ###
#evaluate_test_dataset()
#predict_image_dir()
# #image='dataset/test/Jeans/img_Distressed_Skinny_Jeans_img_00000004.jpg'
# #image='sample_images/test/img_Distressed_Denim_Jeans_img_00000001.jpg'
# image='sample_images/test/img_Acid_Wash_Denim_Romper_img_00000070.jpg'
image='sample_images/test/img_Acid_Wash_-_Skinny_Jeans_img_00000005.jpg'
#image='sample_images/test/img_Boxy_Faux_Fur_Jacket_img_00000001.jpg'
#image='sample_images/test/img_Athletic_Marled_Knit_Joggers_img_00000009.jpg'
predict_image_name(image)
|
[
"keras.preprocessing.image.ImageDataGenerator",
"PIL.Image.new",
"os.walk",
"keras.applications.VGG16",
"keras.layers.Input",
"os.path.join",
"keras.optimizers.SGD",
"os.path.exists",
"keras.layers.Flatten",
"os.listdir",
"selective_search.selective_search_bbox",
"fnmatch.filter",
"logging.debug",
"os.makedirs",
"logging.basicConfig",
"numpy.expand_dims",
"PIL.Image.open",
"keras.layers.Dense",
"numpy.array"
] |
[((647, 702), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'format': 'FORMAT'}), '(level=logging.DEBUG, format=FORMAT)\n', (666, 702), False, 'import logging\n'), ((948, 983), 'os.path.join', 'os.path.join', (['dataset_path', '"""train"""'], {}), "(dataset_path, 'train')\n", (960, 983), False, 'import os\n'), ((1001, 1041), 'os.path.join', 'os.path.join', (['dataset_path', '"""validation"""'], {}), "(dataset_path, 'validation')\n", (1013, 1041), False, 'import os\n'), ((1060, 1094), 'os.path.join', 'os.path.join', (['dataset_path', '"""test"""'], {}), "(dataset_path, 'test')\n", (1072, 1094), False, 'import os\n'), ((3095, 3182), 'keras.applications.VGG16', 'applications.VGG16', ([], {'weights': '"""imagenet"""', 'include_top': '(False)', 'input_shape': 'input_shape'}), "(weights='imagenet', include_top=False, input_shape=\n input_shape)\n", (3113, 3182), False, 'from keras import applications\n'), ((3178, 3208), 'logging.debug', 'logging.debug', (['"""Model loaded."""'], {}), "('Model loaded.')\n", (3191, 3208), False, 'import logging\n'), ((4743, 4783), 'keras.layers.Input', 'Input', ([], {'shape': 'base_model.output_shape[1:]'}), '(shape=base_model.output_shape[1:])\n', (4748, 4783), False, 'from keras.layers import Dropout, Flatten, Dense, Input\n'), ((2078, 2091), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (2085, 2091), False, 'import os\n'), ((2451, 2475), 'os.path.exists', 'os.path.exists', (['"""output"""'], {}), "('output')\n", (2465, 2475), False, 'import os\n'), ((2481, 2502), 'os.makedirs', 'os.makedirs', (['"""output"""'], {}), "('output')\n", (2492, 2502), False, 'import os\n'), ((2511, 2533), 'os.path.exists', 'os.path.exists', (['"""logs"""'], {}), "('logs')\n", (2525, 2533), False, 'import os\n'), ((2539, 2558), 'os.makedirs', 'os.makedirs', (['"""logs"""'], {}), "('logs')\n", (2550, 2558), False, 'import os\n'), ((4797, 4826), 'keras.layers.Dense', 'Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (4802, 4826), False, 'from keras.layers import Dropout, Flatten, Dense, Input\n'), ((4864, 4873), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (4871, 4873), False, 'from keras.layers import Dropout, Flatten, Dense, Input\n'), ((5046, 5055), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (5053, 5055), False, 'from keras.layers import Dropout, Flatten, Dense, Input\n'), ((5154, 5208), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""', 'name': '"""predictions_iou"""'}), "(1, activation='sigmoid', name='predictions_iou')\n", (5159, 5208), False, 'from keras.layers import Dropout, Flatten, Dense, Input\n'), ((5969, 6006), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)'}), '(rescale=1.0 / 255)\n', (5987, 6006), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((6836, 6873), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)'}), '(rescale=1.0 / 255)\n', (6854, 6873), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((8256, 8282), 'PIL.Image.new', 'Image.new', (['"""RGB"""', 'new_size'], {}), "('RGB', new_size)\n", (8265, 8282), False, 'from PIL import Image\n'), ((8646, 8684), 'selective_search.selective_search_bbox', 'selective_search_bbox', (['image_path_name'], {}), '(image_path_name)\n', (8667, 8684), False, 'from selective_search import selective_search_bbox\n'), ((9333, 9360), 'PIL.Image.open', 'Image.open', (['image_path_name'], {}), '(image_path_name)\n', (9343, 9360), False, 'from PIL import Image\n'), ((2117, 2151), 'fnmatch.filter', 'fnmatch.filter', (['filenames', '"""*.jpg"""'], {}), "(filenames, '*.jpg')\n", (2131, 2151), False, 'import fnmatch\n'), ((2789, 2805), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (2799, 2805), False, 'import os\n'), ((5582, 5621), 'keras.optimizers.SGD', 'optimizers.SGD', ([], {'lr': '(0.0001)', 'momentum': '(0.9)'}), '(lr=0.0001, momentum=0.9)\n', (5596, 5621), False, 'from keras import optimizers\n'), ((7633, 7675), 'os.path.join', 'os.path.join', (['dataset_predict_path', '"""test"""'], {}), "(dataset_predict_path, 'test')\n", (7645, 7675), False, 'import os\n'), ((7701, 7735), 'fnmatch.filter', 'fnmatch.filter', (['filenames', '"""*.jpg"""'], {}), "(filenames, '*.jpg')\n", (7715, 7735), False, 'import fnmatch\n'), ((11488, 11515), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (11502, 11515), True, 'import numpy as np\n'), ((2833, 2857), 'os.path.join', 'os.path.join', (['path', 'name'], {}), '(path, name)\n', (2845, 2857), False, 'import os\n'), ((2180, 2208), 'os.path.join', 'os.path.join', (['root', 'filename'], {}), '(root, filename)\n', (2192, 2208), False, 'import os\n'), ((7764, 7792), 'os.path.join', 'os.path.join', (['root', 'filename'], {}), '(root, filename)\n', (7776, 7792), False, 'import os\n'), ((11299, 11324), 'numpy.array', 'np.array', (['img_crop_resize'], {}), '(img_crop_resize)\n', (11307, 11324), True, 'import numpy as np\n')]
|
import os
import glob
import shutil
def del_dummydirs(rootpath, list):
for root, subdirs, files in os.walk(rootpath):
"""
walk through given rootpath, delete dirs in list
"""
for s in subdirs:
if s in list:
shutil.rmtree(os.path.join(root, s))
print("deleted - ", os.path.join(root, s))
"""
walk through given rootpath, delete files in list
"""
for f in files:
if f in list:
os.remove(os.path.join(root, f))
print("deleted - ", os.path.join(root, f))
if __name__ == '__main__':
# del_dummydirs('/home/nas/DB/DB_video-nonlocal-light/400_val', ['@eaDir', 'Thumbs.db'])
del_dummydirs('/home/sangbuem/MARS/dataset/Kinetics', ['@e', 'Thumb'])
|
[
"os.walk",
"os.path.join"
] |
[((105, 122), 'os.walk', 'os.walk', (['rootpath'], {}), '(rootpath)\n', (112, 122), False, 'import os\n'), ((287, 308), 'os.path.join', 'os.path.join', (['root', 's'], {}), '(root, s)\n', (299, 308), False, 'import os\n'), ((346, 367), 'os.path.join', 'os.path.join', (['root', 's'], {}), '(root, s)\n', (358, 367), False, 'import os\n'), ((528, 549), 'os.path.join', 'os.path.join', (['root', 'f'], {}), '(root, f)\n', (540, 549), False, 'import os\n'), ((587, 608), 'os.path.join', 'os.path.join', (['root', 'f'], {}), '(root, f)\n', (599, 608), False, 'import os\n')]
|
'''
This script illustrates training of an inflammation classifier for patches along SI joints
'''
import argparse
import os
import shutil
import pytorch_lightning as pl
from torch.utils.data import DataLoader
from neuralnets.util.io import print_frm
from neuralnets.util.tools import set_seed
from neuralnets.util.augmentation import *
from pytorch_lightning.callbacks import ModelCheckpoint
from data.datasets import SPARCCDataset
from models.sparcc_cnn import Inflammation_CNN
from util.constants import *
factor = {INFLAMMATION_MODULE: 64, DEEP_INFLAMMATION_MODULE: 12, SPARCC_MODULE: 1, JOINT: 1}
def _train_module(net, train_data, val_data, args):
train_data.mode = INFLAMMATION_MODULE
val_data.mode = INFLAMMATION_MODULE
train_loader = DataLoader(train_data, batch_size=factor[INFLAMMATION_MODULE]*args.train_batch_size,
num_workers=args.num_workers, pin_memory=True, shuffle=True)
val_loader = DataLoader(val_data, batch_size=factor[INFLAMMATION_MODULE]*args.test_batch_size,
num_workers=args.num_workers, pin_memory=True)
checkpoint_callback = ModelCheckpoint(save_top_k=5, verbose=True, monitor='val/roc-auc', mode='max')
trainer = pl.Trainer(max_epochs=args.epochs, gpus=args.gpus, accelerator=args.accelerator,
default_root_dir=args.log_dir, flush_logs_every_n_steps=args.log_freq,
log_every_n_steps=args.log_freq, callbacks=[checkpoint_callback],
progress_bar_refresh_rate=args.log_refresh_rate, num_sanity_val_steps=0, deterministic=True)
trainer.fit(net, train_loader, val_loader)
return trainer
def _test_module(trainer, net, test_data, args):
test_data.mode = INFLAMMATION_MODULE
net.load_state_dict(torch.load(trainer.checkpoint_callback.best_model_path)['state_dict'])
test_loader = DataLoader(test_data, batch_size=factor[INFLAMMATION_MODULE]*args.test_batch_size,
num_workers=args.num_workers, pin_memory=True)
trainer.test(net, test_loader)
return trainer
if __name__ == '__main__':
# parse all the arguments
parser = argparse.ArgumentParser()
parser.add_argument("--data-dir", help="Path to the directory that contains a preprocessed dataset", type=str,
required=True)
parser.add_argument("--si-joint-model", help="Path to the SI joint detection checkpoint", type=str, required=True)
parser.add_argument("--model-checkpoint-illium", help="Path to the illium U-Net checkpoint", type=str,
required=True)
parser.add_argument("--model-checkpoint-sacrum", help="Path to the sacrum U-Net checkpoint", type=str,
required=True)
parser.add_argument("--repetitions", help="Number of repetitions", type=int, default=1)
parser.add_argument("--folds", help="Number of folds (overrides repetitions parameter if provided)", type=int,
default=None)
# network parameters
parser.add_argument("--train_val_test_split", help="Train/validation/test split", type=str, default="0.50,0.75")
parser.add_argument("--backbone", help="Backbone feature extractor of the model", type=str, default='ResNet18')
parser.add_argument("--omit_t1_input", help="Boolean flag that omits usage of T1 slices", action='store_true',
default=False)
parser.add_argument("--omit_t2_input", help="Boolean flag that omits usage of T1 slices", action='store_true',
default=False)
parser.add_argument("--omit_weighting", help="Boolean flag that specifies ROI masking", action='store_true',
default=False)
# optimization parameters
parser.add_argument("--epochs", help="Number of training epochs", type=int, default=400)
parser.add_argument("--lr", help="Learning rate for the optimization", type=float, default=1e-3)
# compute parameters
parser.add_argument("--train_batch_size", help="Batch size during training", type=int, default=1)
parser.add_argument("--test_batch_size", help="Batch size during testing", type=int, default=1)
parser.add_argument("--num_workers", help="Amount of workers", type=int, default=12)
parser.add_argument("--gpus", help="Devices available for computing", type=str, default='0')
parser.add_argument("--accelerator", help="Acceleration engine for computations", type=str, default='dp')
# logging parameters
parser.add_argument("--log_dir", help="Logging directory", type=str, default='logs')
parser.add_argument("--log_freq", help="Frequency to log results", type=int, default=50)
parser.add_argument("--log_refresh_rate", help="Refresh rate for logging", type=int, default=1)
parser.add_argument("--seed", help="Seed for reproducibility", type=int, default=0)
parser.add_argument("--clean-up", help="Boolean flag that specifies ROI masking", action='store_true', default=False)
args = parser.parse_args()
args.train_val_test_split = [float(item) for item in args.train_val_test_split.split(',')]
metrics = []
if args.folds is not None:
reps = args.folds
range_split = ((0, 1), (0, 1))
else:
reps = args.repetitions
f = None
split = args.train_val_test_split
range_split = ((0, split[1]), (0, split[1]), (split[1], 1))
for i in range(reps):
rep_str = 'fold' if args.folds is not None else 'repetition'
print_frm('')
print_frm('Start processing %s %d/%d ...' % (rep_str, i+1, reps))
print_frm('')
"""
Fix seed (in case of cross validation), or increment if repetitive training
"""
if args.folds is not None:
set_seed(args.seed)
else:
args.seed = args.seed + 1
set_seed(args.seed)
"""
Load the data
"""
print_frm('Loading data')
transform = Compose([Rotate90(), Flip(prob=0.5, dim=0), Flip(prob=0.5, dim=1), RandomDeformation(),
AddNoise(sigma_max=0.05)])
train = SPARCCDataset(args.data_dir, args.si_joint_model, args.model_checkpoint_illium,
args.model_checkpoint_sacrum, range_split=range_split[0], folds=args.folds, f=i,
train=True, transform=transform, seed=args.seed, mode=INFLAMMATION_MODULE,
use_t1_input=not args.omit_t1_input, use_t2_input=not args.omit_t2_input,
apply_weighting=not args.omit_weighting)
val = SPARCCDataset(args.data_dir, args.si_joint_model, args.model_checkpoint_illium,
args.model_checkpoint_sacrum, range_split=range_split[1], folds=args.folds, f=i,
train=False, seed=args.seed, mode=INFLAMMATION_MODULE, use_t1_input=not args.omit_t1_input,
use_t2_input=not args.omit_t2_input, apply_weighting=not args.omit_weighting)
print_frm('Train data distribution: Infl: %.2f - Non-infl: %.2f' % (100*np.mean(train.q_scores),
100*np.mean(1-train.q_scores)))
print_frm('Val data distribution: Infl: %.2f - Non-infl: %.2f' % (100*np.mean(val.q_scores),
100*np.mean(1-val.q_scores)))
if args.folds is None:
test = SPARCCDataset(args.data_dir, args.si_joint_model, args.model_checkpoint_illium,
args.model_checkpoint_sacrum, range_split=range_split[2], seed=args.seed,
mode=INFLAMMATION_MODULE, use_t1_input=not args.omit_t1_input,
use_t2_input=not args.omit_t2_input, apply_weighting=not args.omit_weighting)
print_frm('Test data distribution: Infl: %.2f - Non-infl: %.2f' % (100*np.mean(test.q_scores),
100*np.mean(1-test.q_scores)))
"""
Build the network
"""
print_frm('Building the network')
weights = train.score_weights[0]
net = Inflammation_CNN(backbone=args.backbone, lr=args.lr, use_t1_input=not args.omit_t1_input,
use_t2_input=not args.omit_t2_input, weights=weights)
print_frm('Balancing weights for loss function: %s' % (weights))
"""
Train the inflammation network
"""
print_frm('Starting training of the inflammation network')
trainer = _train_module(net, train, val, args)
print_frm('Testing network')
_test_module(trainer, net, val if args.folds is not None else test, args)
metrics.append([float(trainer.logged_metrics['test/' + m].cpu()) for m in METRICS])
"""
Save the final model
"""
print_frm('Saving final model')
shutil.copyfile(trainer.checkpoint_callback.best_model_path, os.path.join(trainer.log_dir, OPTIMAL_CKPT))
"""
Clean up
"""
print_frm('Cleaning up')
if args.clean_up:
os.system('rm -r ' + os.path.join(trainer.log_dir, 'checkpoints'))
"""
Report final performance results
"""
metrics = np.asarray(metrics)
metrics_avg = np.mean(metrics, axis=0)
print_frm('Final performance report:')
print_frm('=========================')
for i, m in enumerate(METRICS):
print_frm(' %s: %f' % (m, metrics_avg[i]))
|
[
"pytorch_lightning.callbacks.ModelCheckpoint",
"pytorch_lightning.Trainer",
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"models.sparcc_cnn.Inflammation_CNN",
"neuralnets.util.tools.set_seed",
"neuralnets.util.io.print_frm",
"os.path.join",
"data.datasets.SPARCCDataset"
] |
[((762, 918), 'torch.utils.data.DataLoader', 'DataLoader', (['train_data'], {'batch_size': '(factor[INFLAMMATION_MODULE] * args.train_batch_size)', 'num_workers': 'args.num_workers', 'pin_memory': '(True)', 'shuffle': '(True)'}), '(train_data, batch_size=factor[INFLAMMATION_MODULE] * args.\n train_batch_size, num_workers=args.num_workers, pin_memory=True,\n shuffle=True)\n', (772, 918), False, 'from torch.utils.data import DataLoader\n'), ((955, 1090), 'torch.utils.data.DataLoader', 'DataLoader', (['val_data'], {'batch_size': '(factor[INFLAMMATION_MODULE] * args.test_batch_size)', 'num_workers': 'args.num_workers', 'pin_memory': '(True)'}), '(val_data, batch_size=factor[INFLAMMATION_MODULE] * args.\n test_batch_size, num_workers=args.num_workers, pin_memory=True)\n', (965, 1090), False, 'from torch.utils.data import DataLoader\n'), ((1138, 1216), 'pytorch_lightning.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'save_top_k': '(5)', 'verbose': '(True)', 'monitor': '"""val/roc-auc"""', 'mode': '"""max"""'}), "(save_top_k=5, verbose=True, monitor='val/roc-auc', mode='max')\n", (1153, 1216), False, 'from pytorch_lightning.callbacks import ModelCheckpoint\n'), ((1231, 1560), 'pytorch_lightning.Trainer', 'pl.Trainer', ([], {'max_epochs': 'args.epochs', 'gpus': 'args.gpus', 'accelerator': 'args.accelerator', 'default_root_dir': 'args.log_dir', 'flush_logs_every_n_steps': 'args.log_freq', 'log_every_n_steps': 'args.log_freq', 'callbacks': '[checkpoint_callback]', 'progress_bar_refresh_rate': 'args.log_refresh_rate', 'num_sanity_val_steps': '(0)', 'deterministic': '(True)'}), '(max_epochs=args.epochs, gpus=args.gpus, accelerator=args.\n accelerator, default_root_dir=args.log_dir, flush_logs_every_n_steps=\n args.log_freq, log_every_n_steps=args.log_freq, callbacks=[\n checkpoint_callback], progress_bar_refresh_rate=args.log_refresh_rate,\n num_sanity_val_steps=0, deterministic=True)\n', (1241, 1560), True, 'import pytorch_lightning as pl\n'), ((1890, 2026), 'torch.utils.data.DataLoader', 'DataLoader', (['test_data'], {'batch_size': '(factor[INFLAMMATION_MODULE] * args.test_batch_size)', 'num_workers': 'args.num_workers', 'pin_memory': '(True)'}), '(test_data, batch_size=factor[INFLAMMATION_MODULE] * args.\n test_batch_size, num_workers=args.num_workers, pin_memory=True)\n', (1900, 2026), False, 'from torch.utils.data import DataLoader\n'), ((2177, 2202), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2200, 2202), False, 'import argparse\n'), ((9464, 9502), 'neuralnets.util.io.print_frm', 'print_frm', (['"""Final performance report:"""'], {}), "('Final performance report:')\n", (9473, 9502), False, 'from neuralnets.util.io import print_frm\n'), ((9507, 9545), 'neuralnets.util.io.print_frm', 'print_frm', (['"""========================="""'], {}), "('=========================')\n", (9516, 9545), False, 'from neuralnets.util.io import print_frm\n'), ((5513, 5526), 'neuralnets.util.io.print_frm', 'print_frm', (['""""""'], {}), "('')\n", (5522, 5526), False, 'from neuralnets.util.io import print_frm\n'), ((5535, 5602), 'neuralnets.util.io.print_frm', 'print_frm', (["('Start processing %s %d/%d ...' % (rep_str, i + 1, reps))"], {}), "('Start processing %s %d/%d ...' % (rep_str, i + 1, reps))\n", (5544, 5602), False, 'from neuralnets.util.io import print_frm\n'), ((5609, 5622), 'neuralnets.util.io.print_frm', 'print_frm', (['""""""'], {}), "('')\n", (5618, 5622), False, 'from neuralnets.util.io import print_frm\n'), ((5942, 5967), 'neuralnets.util.io.print_frm', 'print_frm', (['"""Loading data"""'], {}), "('Loading data')\n", (5951, 5967), False, 'from neuralnets.util.io import print_frm\n'), ((6148, 6521), 'data.datasets.SPARCCDataset', 'SPARCCDataset', (['args.data_dir', 'args.si_joint_model', 'args.model_checkpoint_illium', 'args.model_checkpoint_sacrum'], {'range_split': 'range_split[0]', 'folds': 'args.folds', 'f': 'i', 'train': '(True)', 'transform': 'transform', 'seed': 'args.seed', 'mode': 'INFLAMMATION_MODULE', 'use_t1_input': '(not args.omit_t1_input)', 'use_t2_input': '(not args.omit_t2_input)', 'apply_weighting': '(not args.omit_weighting)'}), '(args.data_dir, args.si_joint_model, args.\n model_checkpoint_illium, args.model_checkpoint_sacrum, range_split=\n range_split[0], folds=args.folds, f=i, train=True, transform=transform,\n seed=args.seed, mode=INFLAMMATION_MODULE, use_t1_input=not args.\n omit_t1_input, use_t2_input=not args.omit_t2_input, apply_weighting=not\n args.omit_weighting)\n', (6161, 6521), False, 'from data.datasets import SPARCCDataset\n'), ((6633, 6986), 'data.datasets.SPARCCDataset', 'SPARCCDataset', (['args.data_dir', 'args.si_joint_model', 'args.model_checkpoint_illium', 'args.model_checkpoint_sacrum'], {'range_split': 'range_split[1]', 'folds': 'args.folds', 'f': 'i', 'train': '(False)', 'seed': 'args.seed', 'mode': 'INFLAMMATION_MODULE', 'use_t1_input': '(not args.omit_t1_input)', 'use_t2_input': '(not args.omit_t2_input)', 'apply_weighting': '(not args.omit_weighting)'}), '(args.data_dir, args.si_joint_model, args.\n model_checkpoint_illium, args.model_checkpoint_sacrum, range_split=\n range_split[1], folds=args.folds, f=i, train=False, seed=args.seed,\n mode=INFLAMMATION_MODULE, use_t1_input=not args.omit_t1_input,\n use_t2_input=not args.omit_t2_input, apply_weighting=not args.\n omit_weighting)\n', (6646, 6986), False, 'from data.datasets import SPARCCDataset\n'), ((8190, 8223), 'neuralnets.util.io.print_frm', 'print_frm', (['"""Building the network"""'], {}), "('Building the network')\n", (8199, 8223), False, 'from neuralnets.util.io import print_frm\n'), ((8279, 8427), 'models.sparcc_cnn.Inflammation_CNN', 'Inflammation_CNN', ([], {'backbone': 'args.backbone', 'lr': 'args.lr', 'use_t1_input': '(not args.omit_t1_input)', 'use_t2_input': '(not args.omit_t2_input)', 'weights': 'weights'}), '(backbone=args.backbone, lr=args.lr, use_t1_input=not args.\n omit_t1_input, use_t2_input=not args.omit_t2_input, weights=weights)\n', (8295, 8427), False, 'from models.sparcc_cnn import Inflammation_CNN\n'), ((8462, 8524), 'neuralnets.util.io.print_frm', 'print_frm', (["('Balancing weights for loss function: %s' % weights)"], {}), "('Balancing weights for loss function: %s' % weights)\n", (8471, 8524), False, 'from neuralnets.util.io import print_frm\n'), ((8603, 8661), 'neuralnets.util.io.print_frm', 'print_frm', (['"""Starting training of the inflammation network"""'], {}), "('Starting training of the inflammation network')\n", (8612, 8661), False, 'from neuralnets.util.io import print_frm\n'), ((8725, 8753), 'neuralnets.util.io.print_frm', 'print_frm', (['"""Testing network"""'], {}), "('Testing network')\n", (8734, 8753), False, 'from neuralnets.util.io import print_frm\n'), ((8994, 9025), 'neuralnets.util.io.print_frm', 'print_frm', (['"""Saving final model"""'], {}), "('Saving final model')\n", (9003, 9025), False, 'from neuralnets.util.io import print_frm\n'), ((9194, 9218), 'neuralnets.util.io.print_frm', 'print_frm', (['"""Cleaning up"""'], {}), "('Cleaning up')\n", (9203, 9218), False, 'from neuralnets.util.io import print_frm\n'), ((9590, 9635), 'neuralnets.util.io.print_frm', 'print_frm', (["(' %s: %f' % (m, metrics_avg[i]))"], {}), "(' %s: %f' % (m, metrics_avg[i]))\n", (9599, 9635), False, 'from neuralnets.util.io import print_frm\n'), ((5779, 5798), 'neuralnets.util.tools.set_seed', 'set_seed', (['args.seed'], {}), '(args.seed)\n', (5787, 5798), False, 'from neuralnets.util.tools import set_seed\n'), ((5863, 5882), 'neuralnets.util.tools.set_seed', 'set_seed', (['args.seed'], {}), '(args.seed)\n', (5871, 5882), False, 'from neuralnets.util.tools import set_seed\n'), ((7516, 7829), 'data.datasets.SPARCCDataset', 'SPARCCDataset', (['args.data_dir', 'args.si_joint_model', 'args.model_checkpoint_illium', 'args.model_checkpoint_sacrum'], {'range_split': 'range_split[2]', 'seed': 'args.seed', 'mode': 'INFLAMMATION_MODULE', 'use_t1_input': '(not args.omit_t1_input)', 'use_t2_input': '(not args.omit_t2_input)', 'apply_weighting': '(not args.omit_weighting)'}), '(args.data_dir, args.si_joint_model, args.\n model_checkpoint_illium, args.model_checkpoint_sacrum, range_split=\n range_split[2], seed=args.seed, mode=INFLAMMATION_MODULE, use_t1_input=\n not args.omit_t1_input, use_t2_input=not args.omit_t2_input,\n apply_weighting=not args.omit_weighting)\n', (7529, 7829), False, 'from data.datasets import SPARCCDataset\n'), ((9095, 9138), 'os.path.join', 'os.path.join', (['trainer.log_dir', 'OPTIMAL_CKPT'], {}), '(trainer.log_dir, OPTIMAL_CKPT)\n', (9107, 9138), False, 'import os\n'), ((9278, 9322), 'os.path.join', 'os.path.join', (['trainer.log_dir', '"""checkpoints"""'], {}), "(trainer.log_dir, 'checkpoints')\n", (9290, 9322), False, 'import os\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# middleware.py
#
# Authors:
# - <NAME> <<EMAIL>>
#
import logging
from django.contrib import auth
from django.core.exceptions import ImproperlyConfigured
from django.shortcuts import redirect, get_object_or_404
from django.urls import resolve, reverse
from django.utils.deprecation import MiddlewareMixin
from lti_app.models import ActivityOutcome
from activity.models import Activity
logger = logging.getLogger(__name__)
class LTIAuthMiddleware(MiddlewareMixin):
"""
Middleware for authenticating users via an LTI launch URL.
If the request is an LTI launch request, then this middleware attempts to
authenticate the username and signature passed in the POST data.
If authentication is successful, the user is automatically logged in to
persist the user in the session.
The LTI launch parameter dict is stored in the session keyed with the
resource_link_id to uniquely identify LTI launches of the LTI producer.
The LTI launch parameter dict is also set as the 'LTI' attribute on the
current request object to simplify access to the parameters.
The current request object is set as a thread local attribute so that the
monkey-patching of django's reverse() function (see ./__init__.py) can access
it in order to retrieve the current resource_link_id.
"""
def process_request(self, request):
# AuthenticationMiddleware is required so that request.user exists.
if not hasattr(request, 'user'): # pragma: no cover
logger.debug('improperly configured: request has no user attr')
raise ImproperlyConfigured(
"The Django LTI auth middleware requires the"
" authentication middleware to be installed. Edit your"
" MIDDLEWARE_CLASSES setting to insert"
" 'django.contrib.auth.middleware.AuthenticationMiddleware'"
" before the LTIAuthMiddleware class.")
# These parameters should exist outside of session
request.lti_initial_request = False
request.lti_authentication_successful = False
if request.method == 'POST' \
and request.POST.get('lti_message_type') == 'basic-lti-launch-request':
request.lti_initial_request = True
# authenticate and log the user in
user = auth.authenticate(request=request)
if user is not None:
# User is valid. Set request.user and persist user in the session
# by logging the user in.
request.lti_authentication_successful = True
request.user = user
auth.login(request, user)
resource_link_id = request.POST.get('resource_link_id')
lti_launch = {
'context_id': request.POST.get('context_id'),
'context_label': request.POST.get('context_label'),
'context_title': request.POST.get('context_title'),
'context_type': request.POST.get('context_type'),
'custom_canvas_account_id': request.POST.get(
'custom_canvas_account_id'),
'custom_canvas_account_sis_id': request.POST.get(
'custom_canvas_account_sis_id'),
'custom_canvas_api_domain': request.POST.get(
'custom_canvas_api_domain'),
'custom_canvas_course_id': request.POST.get(
'custom_canvas_course_id'),
'custom_canvas_enrollment_state': request.POST.get(
'custom_canvas_enrollment_state'),
'custom_canvas_membership_roles': request.POST.get(
'custom_canvas_membership_roles', '').split(','),
'custom_canvas_user_id': request.POST.get(
'custom_canvas_user_id'),
'custom_canvas_user_login_id': request.POST.get(
'custom_canvas_user_login_id'),
'launch_presentation_css_url': request.POST.get(
'launch_presentation_css_url'),
'launch_presentation_document_target': request.POST.get(
'launch_presentation_document_target'),
'launch_presentation_height': request.POST.get(
'launch_presentation_height'),
'launch_presentation_locale': request.POST.get(
'launch_presentation_locale'),
'launch_presentation_return_url': request.POST.get(
'launch_presentation_return_url'),
'launch_presentation_width': request.POST.get(
'launch_presentation_width'),
'lis_course_offering_sourcedid': request.POST.get(
'lis_course_offering_sourcedid'),
'lis_outcome_service_url': request.POST.get(
'lis_outcome_service_url'),
'lis_result_sourcedid': request.POST.get(
'lis_result_sourcedid'),
'lis_person_contact_email_primary': request.POST.get(
'lis_person_contact_email_primary'),
'lis_person_name_family': request.POST.get(
'lis_person_name_family'),
'lis_person_name_full': request.POST.get(
'lis_person_name_full'),
'lis_person_name_given': request.POST.get(
'lis_person_name_given'),
'lis_person_sourcedid': request.POST.get(
'lis_person_sourcedid'),
'lti_message_type': request.POST.get('lti_message_type'),
'oauth_consumer_key': request.POST.get(
'oauth_consumer_key'),
'resource_link_description': request.POST.get(
'resource_link_description'),
'resource_link_id': resource_link_id,
'resource_link_title': request.POST.get(
'resource_link_title'),
'roles': request.POST.get('roles', '').split(
','),
'selection_directive': request.POST.get(
'selection_directive'),
'tool_consumer_info_product_family_code': request.POST.get(
'tool_consumer_info_product_family_code'),
'tool_consumer_info_version': request.POST.get(
'tool_consumer_info_version'),
'tool_consumer_instance_contact_email': request.POST.get(
'tool_consumer_instance_contact_email'),
'tool_consumer_instance_description': request.POST.get(
'tool_consumer_instance_description'),
'tool_consumer_instance_guid': request.POST.get(
'tool_consumer_instance_guid'),
'tool_consumer_instance_name': request.POST.get(
'tool_consumer_instance_name'),
'tool_consumer_instance_url': request.POST.get(
'tool_consumer_instance_url'),
'user_id': request.POST.get('user_id'),
'user_image': request.POST.get('user_image'),
}
# Creating and updating data according to lti_launch
user.profile.set_role_lti(lti_launch)
urlmatch = resolve(request.path)
if not urlmatch.app_name or not urlmatch.url_name:
urlmatch = None
if urlmatch and urlmatch.app_name + ":" + urlmatch.url_name == "activity:play":
activity = get_object_or_404(Activity, id=urlmatch.kwargs['activity_id'])
is_course = activity.activity_type == "course"
if not is_course:
Activity.get_or_create_course_from_lti(user, lti_launch)
activity, _ = Activity.get_or_update_from_lti(request, lti_launch)
if not is_course:
ActivityOutcome.get_or_create_from_lti(user, lti_launch)
return redirect(reverse('activity:play', args=[activity.id]))
else:
# User could not be authenticated!
logger.warning('LTI authentication failed')
|
[
"django.core.exceptions.ImproperlyConfigured",
"activity.models.Activity.get_or_update_from_lti",
"activity.models.Activity.get_or_create_course_from_lti",
"django.urls.reverse",
"django.shortcuts.get_object_or_404",
"django.urls.resolve",
"django.contrib.auth.authenticate",
"lti_app.models.ActivityOutcome.get_or_create_from_lti",
"django.contrib.auth.login",
"logging.getLogger"
] |
[((456, 483), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (473, 483), False, 'import logging\n'), ((1661, 1923), 'django.core.exceptions.ImproperlyConfigured', 'ImproperlyConfigured', (['"""The Django LTI auth middleware requires the authentication middleware to be installed. Edit your MIDDLEWARE_CLASSES setting to insert \'django.contrib.auth.middleware.AuthenticationMiddleware\' before the LTIAuthMiddleware class."""'], {}), '(\n "The Django LTI auth middleware requires the authentication middleware to be installed. Edit your MIDDLEWARE_CLASSES setting to insert \'django.contrib.auth.middleware.AuthenticationMiddleware\' before the LTIAuthMiddleware class."\n )\n', (1681, 1923), False, 'from django.core.exceptions import ImproperlyConfigured\n'), ((2408, 2442), 'django.contrib.auth.authenticate', 'auth.authenticate', ([], {'request': 'request'}), '(request=request)\n', (2425, 2442), False, 'from django.contrib import auth\n'), ((2731, 2756), 'django.contrib.auth.login', 'auth.login', (['request', 'user'], {}), '(request, user)\n', (2741, 2756), False, 'from django.contrib import auth\n'), ((8410, 8431), 'django.urls.resolve', 'resolve', (['request.path'], {}), '(request.path)\n', (8417, 8431), False, 'from django.urls import resolve, reverse\n'), ((8662, 8724), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Activity'], {'id': "urlmatch.kwargs['activity_id']"}), "(Activity, id=urlmatch.kwargs['activity_id'])\n", (8679, 8724), False, 'from django.shortcuts import redirect, get_object_or_404\n'), ((8945, 8997), 'activity.models.Activity.get_or_update_from_lti', 'Activity.get_or_update_from_lti', (['request', 'lti_launch'], {}), '(request, lti_launch)\n', (8976, 8997), False, 'from activity.models import Activity\n'), ((8854, 8910), 'activity.models.Activity.get_or_create_course_from_lti', 'Activity.get_or_create_course_from_lti', (['user', 'lti_launch'], {}), '(user, lti_launch)\n', (8892, 8910), False, 'from activity.models import Activity\n'), ((9060, 9116), 'lti_app.models.ActivityOutcome.get_or_create_from_lti', 'ActivityOutcome.get_or_create_from_lti', (['user', 'lti_launch'], {}), '(user, lti_launch)\n', (9098, 9116), False, 'from lti_app.models import ActivityOutcome\n'), ((9153, 9197), 'django.urls.reverse', 'reverse', (['"""activity:play"""'], {'args': '[activity.id]'}), "('activity:play', args=[activity.id])\n", (9160, 9197), False, 'from django.urls import resolve, reverse\n')]
|
# Copyright 2020 Graphcore Ltd.
import argparse
import os
import time as time
import numpy as np
import tensorflow as tf
from tensorflow.python.ipu import ipu_compiler, ipu_infeed_queue, loops, utils
from tensorflow.python.ipu.scopes import ipu_scope, ipu_shard
import tensorflow_probability as tfp
# Model and sampling parameters
# Note: increasing model size, number of steps, or dataset size may cause out of memory errors
first_layer_size = 40
num_burnin_steps = 100
num_ipus = 2
num_results = 400
num_leapfrog_steps = 1000
useful_features = 22
num_skip_columns = 2
output_file = "output_samples.txt"
parser = argparse.ArgumentParser()
parser.add_argument(
"--dataset-dir",
type=str,
default=".",
help="Path to datasets"
)
args = parser.parse_args()
input_file = os.path.join(
args.dataset_dir, "returns_and_features_for_mcmc.txt"
)
# Print the about message
print("\nMCMC sampling example with TensorFlow Probability\n"
" Single precision\n"
f" Number of IPUs {num_ipus} (one MCMC chain per IPU)\n"
f" Number of results per IPU {num_results}\n"
f" Number of burn-in steps {num_burnin_steps}\n"
f" Number of leapfrog steps {num_leapfrog_steps}\n"
f" First layer size {first_layer_size}")
# Load data
raw_data = np.genfromtxt(input_file, skip_header=1,
delimiter="\t", dtype='float32')
# Pre-process data
observed_return_ = raw_data[:, num_skip_columns]
observed_features_ = raw_data[:, num_skip_columns+1:]
num_features = raw_data.shape[1] - num_skip_columns - 1
if useful_features < num_features:
num_features = useful_features
observed_features_ = observed_features_[:, :num_features]
# Model is an MLP with num_features input dims and layer sizes: first_layer_size, 1, 1
num_model_parameters = num_features * first_layer_size + \
first_layer_size + first_layer_size + 3
# Print dataset parameters
print(" Number of data items {}\n"
" Number of features per data item {}\n"
" Number of model parameters {}\n"
.format(raw_data.shape[0],
num_features,
num_model_parameters
))
# Import TensorFlow modules
tfd = tfp.distributions
# Suppress warnings
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
# Initialize TensorFlow graph and session
tf.reset_default_graph()
config = tf.ConfigProto()
sess = tf.Session(config=config)
# Build the neural network
def bdnn(x, p):
nf = num_features
nt = first_layer_size
# Unpack model parameters
w1 = tf.reshape(p[nt+1:nt+nf*nt+1], [nf, nt])
w2 = tf.reshape(p[1:nt+1], [nt, 1])
w3 = p[0]
b1 = p[nt+nf*nt+3:]
b2 = tf.expand_dims(p[nt+nf*nt+2], 0)
b3 = p[nt+nf*nt+1]
# Build layers
x = tf.tanh(tf.nn.xw_plus_b(x, w1, b1))
x = tf.nn.xw_plus_b(x, w2, b2)
x = x * w3 + b3
return tf.squeeze(x)
# Model posterior log probability
def model_log_prob(ret, feat, p):
# Parameters of distributions
prior_scale = 200
studentT_scale = 100
# Features normalization
def normalize_features(f):
return 0.001 * f
# Prior probability distributions on model parameters
rv_p = tfd.Independent(tfd.Normal(loc=0. * tf.ones(shape=[num_model_parameters], dtype=tf.float32),
scale=prior_scale * tf.ones(shape=[num_model_parameters], dtype=tf.float32)),
reinterpreted_batch_ndims=1)
# Likelihood
alpha_bp_estimate = bdnn(normalize_features(feat), p)
rv_observed = tfd.StudentT(
df=2.2, loc=alpha_bp_estimate, scale=studentT_scale)
# Sum of logs
return (rv_p.log_prob(p) +
tf.reduce_sum(rv_observed.log_prob(ret)))
def build_graph(scope_id):
with tf.variable_scope('scope'+scope_id, use_resource=True, reuse=tf.AUTO_REUSE):
# Data items
observed_return = tf.cast(observed_return_, 'float32')
observed_features = tf.cast(observed_features_, 'float32')
# Initial chain state
initial_chain_state = [
0.0 * tf.ones(shape=[num_model_parameters], dtype=tf.float32)
]
# Bijectors
unconstraining_bijectors = [
tfp.bijectors.Identity()
]
# Initialize the step_size
with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
step_size = tf.get_variable(
name='step_size',
initializer=tf.constant(.01, dtype=tf.float32),
trainable=False,
use_resource=True
)
# Put the graph into a function so it can be compiled for running on IPU
def hmc_graph():
# Target log probability function
def target_log_prob_fn(*args):
return model_log_prob(observed_return, observed_features, *args)
# Hamiltonian Monte Carlo kernel
hmc_kernel = tfp.mcmc.TransformedTransitionKernel(
inner_kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob_fn,
num_leapfrog_steps=num_leapfrog_steps,
step_size=step_size,
step_size_update_fn=tfp.mcmc.make_simple_step_size_update_policy(
target_rate=0.2,
num_adaptation_steps=num_burnin_steps,
decrement_multiplier=0.1),
state_gradients_are_stopped=False),
bijector=unconstraining_bijectors)
# Graph to sample from the chain
return tfp.mcmc.sample_chain(
num_results=num_results,
num_burnin_steps=num_burnin_steps,
current_state=initial_chain_state,
kernel=hmc_kernel)
# Compile the graph
[p], kernel_results = ipu_compiler.compile(hmc_graph, [])
return (p, kernel_results)
# Place the graphs on IPUs
ops = []
for i in range(num_ipus):
with ipu_scope('/device:IPU:'+str(i)):
ops.append(build_graph(scope_id=str(i)))
# Configure IPU
config = utils.create_ipu_config()
# Create num_chips TF devices, with 1 IPU per device
config = utils.auto_select_ipus(config, [1]*num_ipus)
utils.configure_ipu_system(config)
utils.move_variable_initialization_to_cpu()
# Initialize variables
init_g = tf.global_variables_initializer()
sess.run(init_g)
# Warm up
print("\nWarming up...")
sess.run(ops)
print("Done\n")
# Sample
print("Sampling...")
start_time = time.time()
results = sess.run(ops)
end_time = time.time()
print("Done\n")
# Concatenate samples from separate MCMC chains
samples = np.concatenate(list(map(lambda x: x[0], results)), axis=0)
# Write samples to file
np.savetxt(output_file, samples, delimiter='\t')
print("Written {} samples to {}".format(samples.shape[0], output_file))
# Print run time
print("Completed in {0:.2f} seconds\n".format(end_time - start_time))
|
[
"argparse.ArgumentParser",
"tensorflow.reset_default_graph",
"tensorflow.reshape",
"tensorflow.get_variable_scope",
"tensorflow.ConfigProto",
"tensorflow_probability.mcmc.sample_chain",
"os.path.join",
"numpy.savetxt",
"numpy.genfromtxt",
"tensorflow.variable_scope",
"tensorflow.python.ipu.ipu_compiler.compile",
"tensorflow.cast",
"tensorflow_probability.bijectors.Identity",
"tensorflow.squeeze",
"tensorflow.ones",
"tensorflow.python.ipu.utils.configure_ipu_system",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"tensorflow.constant",
"tensorflow_probability.mcmc.make_simple_step_size_update_policy",
"tensorflow.python.ipu.utils.auto_select_ipus",
"tensorflow.expand_dims",
"time.time",
"tensorflow.compat.v1.logging.set_verbosity",
"tensorflow.nn.xw_plus_b",
"tensorflow.python.ipu.utils.move_variable_initialization_to_cpu",
"tensorflow.python.ipu.utils.create_ipu_config"
] |
[((645, 670), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (668, 670), False, 'import argparse\n'), ((824, 891), 'os.path.join', 'os.path.join', (['args.dataset_dir', '"""returns_and_features_for_mcmc.txt"""'], {}), "(args.dataset_dir, 'returns_and_features_for_mcmc.txt')\n", (836, 891), False, 'import os\n'), ((1330, 1403), 'numpy.genfromtxt', 'np.genfromtxt', (['input_file'], {'skip_header': '(1)', 'delimiter': '"""\t"""', 'dtype': '"""float32"""'}), "(input_file, skip_header=1, delimiter='\\t', dtype='float32')\n", (1343, 1403), True, 'import numpy as np\n'), ((2299, 2361), 'tensorflow.compat.v1.logging.set_verbosity', 'tf.compat.v1.logging.set_verbosity', (['tf.compat.v1.logging.ERROR'], {}), '(tf.compat.v1.logging.ERROR)\n', (2333, 2361), True, 'import tensorflow as tf\n'), ((2408, 2432), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (2430, 2432), True, 'import tensorflow as tf\n'), ((2443, 2459), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (2457, 2459), True, 'import tensorflow as tf\n'), ((2468, 2493), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (2478, 2493), True, 'import tensorflow as tf\n'), ((6293, 6318), 'tensorflow.python.ipu.utils.create_ipu_config', 'utils.create_ipu_config', ([], {}), '()\n', (6316, 6318), False, 'from tensorflow.python.ipu import ipu_compiler, ipu_infeed_queue, loops, utils\n'), ((6383, 6429), 'tensorflow.python.ipu.utils.auto_select_ipus', 'utils.auto_select_ipus', (['config', '([1] * num_ipus)'], {}), '(config, [1] * num_ipus)\n', (6405, 6429), False, 'from tensorflow.python.ipu import ipu_compiler, ipu_infeed_queue, loops, utils\n'), ((6429, 6463), 'tensorflow.python.ipu.utils.configure_ipu_system', 'utils.configure_ipu_system', (['config'], {}), '(config)\n', (6455, 6463), False, 'from tensorflow.python.ipu import ipu_compiler, ipu_infeed_queue, loops, utils\n'), ((6465, 6508), 'tensorflow.python.ipu.utils.move_variable_initialization_to_cpu', 'utils.move_variable_initialization_to_cpu', ([], {}), '()\n', (6506, 6508), False, 'from tensorflow.python.ipu import ipu_compiler, ipu_infeed_queue, loops, utils\n'), ((6545, 6578), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (6576, 6578), True, 'import tensorflow as tf\n'), ((6716, 6727), 'time.time', 'time.time', ([], {}), '()\n', (6725, 6727), True, 'import time as time\n'), ((6765, 6776), 'time.time', 'time.time', ([], {}), '()\n', (6774, 6776), True, 'import time as time\n'), ((6945, 6993), 'numpy.savetxt', 'np.savetxt', (['output_file', 'samples'], {'delimiter': '"""\t"""'}), "(output_file, samples, delimiter='\\t')\n", (6955, 6993), True, 'import numpy as np\n'), ((2636, 2684), 'tensorflow.reshape', 'tf.reshape', (['p[nt + 1:nt + nf * nt + 1]', '[nf, nt]'], {}), '(p[nt + 1:nt + nf * nt + 1], [nf, nt])\n', (2646, 2684), True, 'import tensorflow as tf\n'), ((2687, 2719), 'tensorflow.reshape', 'tf.reshape', (['p[1:nt + 1]', '[nt, 1]'], {}), '(p[1:nt + 1], [nt, 1])\n', (2697, 2719), True, 'import tensorflow as tf\n'), ((2768, 2806), 'tensorflow.expand_dims', 'tf.expand_dims', (['p[nt + nf * nt + 2]', '(0)'], {}), '(p[nt + nf * nt + 2], 0)\n', (2782, 2806), True, 'import tensorflow as tf\n'), ((2901, 2927), 'tensorflow.nn.xw_plus_b', 'tf.nn.xw_plus_b', (['x', 'w2', 'b2'], {}), '(x, w2, b2)\n', (2916, 2927), True, 'import tensorflow as tf\n'), ((2961, 2974), 'tensorflow.squeeze', 'tf.squeeze', (['x'], {}), '(x)\n', (2971, 2974), True, 'import tensorflow as tf\n'), ((2864, 2890), 'tensorflow.nn.xw_plus_b', 'tf.nn.xw_plus_b', (['x', 'w1', 'b1'], {}), '(x, w1, b1)\n', (2879, 2890), True, 'import tensorflow as tf\n'), ((3887, 3964), 'tensorflow.variable_scope', 'tf.variable_scope', (["('scope' + scope_id)"], {'use_resource': '(True)', 'reuse': 'tf.AUTO_REUSE'}), "('scope' + scope_id, use_resource=True, reuse=tf.AUTO_REUSE)\n", (3904, 3964), True, 'import tensorflow as tf\n'), ((4015, 4051), 'tensorflow.cast', 'tf.cast', (['observed_return_', '"""float32"""'], {}), "(observed_return_, 'float32')\n", (4022, 4051), True, 'import tensorflow as tf\n'), ((4081, 4119), 'tensorflow.cast', 'tf.cast', (['observed_features_', '"""float32"""'], {}), "(observed_features_, 'float32')\n", (4088, 4119), True, 'import tensorflow as tf\n'), ((6033, 6068), 'tensorflow.python.ipu.ipu_compiler.compile', 'ipu_compiler.compile', (['hmc_graph', '[]'], {}), '(hmc_graph, [])\n', (6053, 6068), False, 'from tensorflow.python.ipu import ipu_compiler, ipu_infeed_queue, loops, utils\n'), ((4346, 4370), 'tensorflow_probability.bijectors.Identity', 'tfp.bijectors.Identity', ([], {}), '()\n', (4368, 4370), True, 'import tensorflow_probability as tfp\n'), ((5766, 5906), 'tensorflow_probability.mcmc.sample_chain', 'tfp.mcmc.sample_chain', ([], {'num_results': 'num_results', 'num_burnin_steps': 'num_burnin_steps', 'current_state': 'initial_chain_state', 'kernel': 'hmc_kernel'}), '(num_results=num_results, num_burnin_steps=\n num_burnin_steps, current_state=initial_chain_state, kernel=hmc_kernel)\n', (5787, 5906), True, 'import tensorflow_probability as tfp\n'), ((4205, 4260), 'tensorflow.ones', 'tf.ones', ([], {'shape': '[num_model_parameters]', 'dtype': 'tf.float32'}), '(shape=[num_model_parameters], dtype=tf.float32)\n', (4212, 4260), True, 'import tensorflow as tf\n'), ((4452, 4475), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (4473, 4475), True, 'import tensorflow as tf\n'), ((3332, 3387), 'tensorflow.ones', 'tf.ones', ([], {'shape': '[num_model_parameters]', 'dtype': 'tf.float32'}), '(shape=[num_model_parameters], dtype=tf.float32)\n', (3339, 3387), True, 'import tensorflow as tf\n'), ((3448, 3503), 'tensorflow.ones', 'tf.ones', ([], {'shape': '[num_model_parameters]', 'dtype': 'tf.float32'}), '(shape=[num_model_parameters], dtype=tf.float32)\n', (3455, 3503), True, 'import tensorflow as tf\n'), ((4605, 4640), 'tensorflow.constant', 'tf.constant', (['(0.01)'], {'dtype': 'tf.float32'}), '(0.01, dtype=tf.float32)\n', (4616, 4640), True, 'import tensorflow as tf\n'), ((5385, 5515), 'tensorflow_probability.mcmc.make_simple_step_size_update_policy', 'tfp.mcmc.make_simple_step_size_update_policy', ([], {'target_rate': '(0.2)', 'num_adaptation_steps': 'num_burnin_steps', 'decrement_multiplier': '(0.1)'}), '(target_rate=0.2,\n num_adaptation_steps=num_burnin_steps, decrement_multiplier=0.1)\n', (5429, 5515), True, 'import tensorflow_probability as tfp\n')]
|
import hashlib
import os
from flask import current_app, url_for
cache_busting_values = {}
class CachebustStaticAssets(object):
def __init__(self, app=None):
self.app = app
if app is not None:
self.init_app(app)
def init_app(self, app):
@app.context_processor
def override_url_for():
return dict(url_for=hashed_url_for)
def hashed_url_for(endpoint, **values):
"""Cachebusting
Use the md5 hash of the file on disk to perform cachebusting duties.
This forces browsers to download new versions of files when they change.
"""
if endpoint == "static":
filename = values.get("filename", None)
if filename:
file_path = os.path.join(current_app.root_path, current_app.static_folder, filename)
if os.path.isfile(file_path):
# Store the hashes in a dict so that on subsequent
# requests we don't have to md5 the file every time
cached_hash = cache_busting_values.get(file_path)
if cached_hash:
values["cache"] = cached_hash
else:
file_hash = md5_for_file(file_path, hexdigest=True)
cache_busting_values[file_path] = file_hash
values["cache"] = file_hash
return url_for(endpoint, **values)
def md5_for_file(path, block_size=256 * 128, hexdigest=False):
"""Calculate an md5 hash for a file
Block size directly depends on the block size of your filesystem
to avoid performances issues
Here I have blocks of 4096 octets (Default NTFS)
"""
md5 = hashlib.md5()
with open(path, "rb") as f:
for chunk in iter(lambda: f.read(block_size), b""):
md5.update(chunk)
if hexdigest:
return md5.hexdigest()
return md5.digest()
|
[
"os.path.isfile",
"flask.url_for",
"hashlib.md5",
"os.path.join"
] |
[((1351, 1378), 'flask.url_for', 'url_for', (['endpoint'], {}), '(endpoint, **values)\n', (1358, 1378), False, 'from flask import current_app, url_for\n'), ((1659, 1672), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (1670, 1672), False, 'import hashlib\n'), ((733, 805), 'os.path.join', 'os.path.join', (['current_app.root_path', 'current_app.static_folder', 'filename'], {}), '(current_app.root_path, current_app.static_folder, filename)\n', (745, 805), False, 'import os\n'), ((822, 847), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (836, 847), False, 'import os\n')]
|
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
# os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import json
import time
import argparse
from pathlib import Path
import random
import numpy as np
import tensorflow as tf
tf.autograph.set_verbosity(3) # 0: debug, 1: info, 2: warning, 3: error
from src.models.encoder import Encoder
from src.models.RACL import RACL
from src.utils import (
load_config,
split_documents, read_data, reverse_unk,
decode_results, format_results, dict2html
)
def load_basic_arguments(parser):
# Define arguments
parser.add_argument('--model', default='racl', type=str, help='model name')
parser.add_argument('--max_sentence_len', default=156, type=int, help='maximum number of words in sentence')
parser.add_argument('--embedding_dim', default=768, type=int, help='embedding dimension')
parser.add_argument('--n_interactions', default=6, type=int, help='number of RACL blocks to interact')
parser.add_argument('--n_filters', default=96, type=int, help='number of filters in convolution')
parser.add_argument('--kernel_size', default=11, type=int, help='kernel size in convolution')
parser.add_argument('--random_seed', default=4_10_20, type=int, help='random seed')
parser.add_argument('--include_opinion', default=True, type=bool, help='whether to use opinion for model')
parser.add_argument('--random_type', default='normal', type=str, help='random type: uniform or normal (default)')
parser.add_argument('--ckpt', default=798, type=int, help='checkpoint id to load weights')
opt = parser.parse_args()
opt.n_classes = 3
opt.is_training = False
opt.is_evaluating = False
opt.label_smoothing = False
opt.keep_prob_1, opt.keep_prob_2 = 1., 1.
random.seed(opt.random_seed)
np.random.seed(opt.random_seed)
tf.random.set_seed(opt.random_seed)
return opt
# Samples for prediction
documents = [
# 'dessert was also to die for',
# 'sushi so fresh that it crunches in your mouth',
# 'in fact , this was not a nicoise salad and was barely eatable',
# "the two waitress 's looked like they had been sucking on lemons",
"the absence of halal food - not even for room service",
"la foresto de halalaj manĝaĵoj - eĉ ne por ĉambroservo",
"عدم وجود الطعام الحلال - ولا حتى لخدمة الغرف",
"អវត្ដមាននៃអាហារហាឡាល់ - មិនសូម្បីតែសម្រាប់សេវាកម្មបន្ទប់",
"ການຂາດອາຫານຮາລານ - ບໍ່ແມ່ນແຕ່ ສຳ ລັບການບໍລິການຫ້ອງ",
"халал тағамның болмауы - тіпті бөлме қызметтері үшін де емес",
"отсутствие халяльной еды - даже для обслуживания номеров",
"die afwesigheid van halal-kos - nie eens vir kamerdiens nie",
"l'assenza di cibo halal - nemmeno per il servizio in camera",
"ハラルフードがない-ルームサービスでもない",
"할랄 음식의 부재-룸 서비스조차도",
"la ausencia de comida halal, ni siquiera para el servicio de habitaciones",
"sự vắng mặt của thức ăn halal - thậm chí không có dịch vụ ăn uống tại phòng",
# "Have to travel out in order to get food",
# "Smell of the pillows... smelt like someone odour",
# " Very noisy outside the room, found a cockroaches in bathroom, the condition did not works whole nights, very hot can't sleep",
# "I had to stay here due to holiday inn transferring me here because they were closed for renovations. First I am pist because this hotel stinks of weed, my room was not very clean and due to Covid you would think the room would be super clean but nope wrappers all over the place towels had stains, to top it off I even found bugs in my room. I am disgusted. The service is horrible. “There was never a manager on duty” I even reached out to them in email and still no reply from them so they clearly don’t care. Avoid this hotel there are so many other options by the airport that this one poor excuse for cleanliness and bugs they do not deserve a dime. They don’t fix their problems and a manager is never reachable",
# "First impression is the hotel seem to be in need of an upgrade. The grounds did not feel welcoming on the exterior. The interior had carpet coming up in the hallway, I was on the third floor. It had a bad smell that hits you in the face as soon as you get off the elevator. The rooms was decent with a nice size television, desk and a refrigerator but lacked cleanliness. We couldn't shower because the tubes were GROSS. It looked as if it hadn't been properly cleaned for months! You can see the filth buildup YUCK! This is very concerning considering the month I traveled was during the covid-19 pandemic. If this hotel is not properly cleaning guest rooms than are they really practicing safe measures during a global coronavirus pandemic?",
# "Small rooms, restaurant offers the best of microwaved food and wifi is poor. Staff set engaged, but this establishment needs investment and attention to the the customer experience. Plenty of examples where the site could use a goos cleaning - including the restaurant.",
# "I had a horrible check-in experience at this crown plaza. The manager at night shift was exceptionally rude. Just because it was night and I was tired, I stayed there. I checked out next day and went to The Renaissance across the street.",
# "DIRTY FILTHY DISGUSTING!!! Hair and mold in the bathroom, DIRTY carpeting, smells of cigarette smoke and my daughter woke up with bug bites all over her legs!!! Front desk was an absolute joke! Unprofessional rude and lazy!! Travelers BEWARE!!",
# "Called to say my flight is cancelled because of weather ,can you change to next day or refund.before I could complete the sentence they cancelled my reservation and hung up.i know the hotel room was given to somebody else.i cannot believe the service was from very reputable company like yours",
# "The value for the room and the service was very good but the Furnishings in the room is very outdated and more out. The carpet has been replaced and the linen and the bathtub was spotless. Restaurant bar",
# "The Crowne Plaza is located near the newark airport. The hotel offers a transfer ( i got it on my way back). The rooms are small but the bed is very comfortable. Bathroom regular. Also offers a transfer to the outlet nearby but only in 2 specific times a day.",
# "We stayed one night (thankfully) as there was a lot of noise from airplanes taking off and landing and from traffic on the road nearby. The room was very nice with comfortable bed. The shower was over the bath",
# "I visited this hotel with 6 family members in jan 2020. we reached jetlagged early in the morning to be greeted by an extremely rude lady whose name started with Q. I saw her even mocking a few clients. Rooms were clean. Sleep quality was nice Not many eating options around hotel for breakfast, except the hotel itself. In evening one can walk out towards quay and be delighted with so many restaurants. over all a an average hotel BUT the RUDEST STAFF i have ever seen. STAY AWAY IF YOU ANYOTHER OPTION.",
# "Hotel was very crowded and so called club lounge was so crowded that we couldn't use 20 minute wait for breakfast in main restaurant Hotel room small and basic - not luxury Pool good and hotel location excellent",
# "The hotel is actually <NAME> not <NAME> as the name claims. I had booked a room with a king size bed but they could only give me twin beds on the first night so I had to move rooms on the second day. All of the rooms I saw were tired with very bland decor and badly in need of a refresh. I also experienced a lot of noise from neighbouring rooms",
# "I do no understand why you are charging me USD 100 (66% of original room charge) because I have Netherlands nationality but booked my room stating my residential address in Thailand, where I have lived for the last 13 years",
# "Check in was appalling ! Checked into a deluxe room but was given two single beds!! Went downstairs to speak to reception and they told me only room they have is a smoking room which was not practical!!! Then had to sleep there and next day await a room change!!! Which was chased by us as no one remembered the next day!!",
# "I would not recommend this hotel, it is seriously understaffed the restaurant is small for the size of the hotel which results in the tables being too close together. The restaurant staff tried their best but there just weren't enough of them",
# "nice bar and front desk staff members happy faces they made me feel like a vip. update! hotel is dark and old. bathroom was tiny, dark and poor design. elevator was slow. hotel facilities and staff were excellent",
]
def predict(parser, args):
"""
Predict from command line and return response output as html + json
Parameters
----------
args :
args.config_path : str
path to config yml e.g. /production/model_config.yml
args.log_level: str
'debug', 'info', or 'warning' level for root logger and all handlers
"""
config = load_config(Path(args.config_path))
opt = load_basic_arguments(parser)
for key, value in config["model_params"].items():
print(f"Key: {key} - Value: {value}")
opt.key = value
# Define useful directories
predicts_dir = config["paths"]["predictions"]
artefacts_dir = config["paths"]["artefacts"]
checkpoint_dir = config["paths"]["checkpoint"]
opt.ckpt_path = os.path.join(checkpoint_dir, f"RACL-epoch={opt.ckpt:03d}.h5")
# Split document into sentences
sentences, sent2doc = split_documents(documents)
opt.batch_size = len(sentences)
# Load Tokenizer and Encoder
print(f"\n\n\nLoading Encoder ...")
sbert_version = 'distilUSE'
sbert_dir = os.path.join(artefacts_dir, sbert_version)
encoder = Encoder(sbert_dir)
# Tokenize
start_time = time.time()
embeddings, sentences_mask, position_matrices, tokens_in_doc = read_data(sentences, opt, encoder)
embeddings = np.reshape(embeddings, (opt.batch_size, opt.max_sentence_len, opt.embedding_dim))
tokens_in_doc = reverse_unk(tokens_in_doc, sentences)
end_time = time.time()
time_running = end_time - start_time
run_time = f'\n\n\nTokenize {len(sentences)} samples in {time_running:.2f}s'
print(run_time)
# Load model
model = RACL(opt)
model.load_weights(opt.ckpt_path)
# Predict
start_time = time.time()
aspect_probs, opinion_probs, sentiment_probs = model.predict(
sentence=embeddings,
word_mask=sentences_mask.reshape((opt.batch_size, opt.max_sentence_len)),
position_att=position_matrices.reshape((opt.batch_size, opt.max_sentence_len, opt.max_sentence_len))
)
end_time = time.time()
time_running = end_time - start_time
run_time = f'\n\n\nPredict {len(sentences)} samples in {time_running:.2f}s'
print(run_time)
# Feed results into DataFrame
results_df = decode_results(tokens_in_doc, sent2doc,
aspect_probs, opinion_probs, sentiment_probs)
# Write logs
output_file = os.path.join(predicts_dir, f'case_study_{opt.task}')
print(f'\n\nWriting result to \n\t{output_file}.json\n\t{output_file}.html ...')
doc_results = format_results(results_df)
with open(output_file+'.json', 'w') as f_writer:
json.dump(doc_results, f_writer, indent=4)
dict2html(doc_results, output_file+'.html')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Model Prediction')
parser.add_argument('-c', '--config-path', default='production/model_config.yml', type=str, help='Config path')
args, unk_args = parser.parse_known_args()
predict(parser, args)
##########################################
# Executive Time on Local Machine: #
# Tokenize 13 samples in 0.22s #
# Predict 13 samples in 2.27s #
##########################################
|
[
"tensorflow.random.set_seed",
"json.dump",
"numpy.random.seed",
"argparse.ArgumentParser",
"src.models.RACL.RACL",
"src.utils.dict2html",
"time.time",
"pathlib.Path",
"src.utils.split_documents",
"random.seed",
"src.models.encoder.Encoder",
"numpy.reshape",
"src.utils.reverse_unk",
"src.utils.decode_results",
"src.utils.format_results",
"os.path.join",
"src.utils.read_data",
"tensorflow.autograph.set_verbosity"
] |
[((282, 311), 'tensorflow.autograph.set_verbosity', 'tf.autograph.set_verbosity', (['(3)'], {}), '(3)\n', (308, 311), True, 'import tensorflow as tf\n'), ((1851, 1879), 'random.seed', 'random.seed', (['opt.random_seed'], {}), '(opt.random_seed)\n', (1862, 1879), False, 'import random\n'), ((1885, 1916), 'numpy.random.seed', 'np.random.seed', (['opt.random_seed'], {}), '(opt.random_seed)\n', (1899, 1916), True, 'import numpy as np\n'), ((1922, 1957), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['opt.random_seed'], {}), '(opt.random_seed)\n', (1940, 1957), True, 'import tensorflow as tf\n'), ((9518, 9579), 'os.path.join', 'os.path.join', (['checkpoint_dir', 'f"""RACL-epoch={opt.ckpt:03d}.h5"""'], {}), "(checkpoint_dir, f'RACL-epoch={opt.ckpt:03d}.h5')\n", (9530, 9579), False, 'import os\n'), ((9646, 9672), 'src.utils.split_documents', 'split_documents', (['documents'], {}), '(documents)\n', (9661, 9672), False, 'from src.utils import load_config, split_documents, read_data, reverse_unk, decode_results, format_results, dict2html\n'), ((9837, 9879), 'os.path.join', 'os.path.join', (['artefacts_dir', 'sbert_version'], {}), '(artefacts_dir, sbert_version)\n', (9849, 9879), False, 'import os\n'), ((9895, 9913), 'src.models.encoder.Encoder', 'Encoder', (['sbert_dir'], {}), '(sbert_dir)\n', (9902, 9913), False, 'from src.models.encoder import Encoder\n'), ((9950, 9961), 'time.time', 'time.time', ([], {}), '()\n', (9959, 9961), False, 'import time\n'), ((10030, 10064), 'src.utils.read_data', 'read_data', (['sentences', 'opt', 'encoder'], {}), '(sentences, opt, encoder)\n', (10039, 10064), False, 'from src.utils import load_config, split_documents, read_data, reverse_unk, decode_results, format_results, dict2html\n'), ((10083, 10169), 'numpy.reshape', 'np.reshape', (['embeddings', '(opt.batch_size, opt.max_sentence_len, opt.embedding_dim)'], {}), '(embeddings, (opt.batch_size, opt.max_sentence_len, opt.\n embedding_dim))\n', (10093, 10169), True, 'import numpy as np\n'), ((10186, 10223), 'src.utils.reverse_unk', 'reverse_unk', (['tokens_in_doc', 'sentences'], {}), '(tokens_in_doc, sentences)\n', (10197, 10223), False, 'from src.utils import load_config, split_documents, read_data, reverse_unk, decode_results, format_results, dict2html\n'), ((10240, 10251), 'time.time', 'time.time', ([], {}), '()\n', (10249, 10251), False, 'import time\n'), ((10430, 10439), 'src.models.RACL.RACL', 'RACL', (['opt'], {}), '(opt)\n', (10434, 10439), False, 'from src.models.RACL import RACL\n'), ((10514, 10525), 'time.time', 'time.time', ([], {}), '()\n', (10523, 10525), False, 'import time\n'), ((10839, 10850), 'time.time', 'time.time', ([], {}), '()\n', (10848, 10850), False, 'import time\n'), ((11050, 11139), 'src.utils.decode_results', 'decode_results', (['tokens_in_doc', 'sent2doc', 'aspect_probs', 'opinion_probs', 'sentiment_probs'], {}), '(tokens_in_doc, sent2doc, aspect_probs, opinion_probs,\n sentiment_probs)\n', (11064, 11139), False, 'from src.utils import load_config, split_documents, read_data, reverse_unk, decode_results, format_results, dict2html\n'), ((11208, 11260), 'os.path.join', 'os.path.join', (['predicts_dir', 'f"""case_study_{opt.task}"""'], {}), "(predicts_dir, f'case_study_{opt.task}')\n", (11220, 11260), False, 'import os\n'), ((11366, 11392), 'src.utils.format_results', 'format_results', (['results_df'], {}), '(results_df)\n', (11380, 11392), False, 'from src.utils import load_config, split_documents, read_data, reverse_unk, decode_results, format_results, dict2html\n'), ((11504, 11549), 'src.utils.dict2html', 'dict2html', (['doc_results', "(output_file + '.html')"], {}), "(doc_results, output_file + '.html')\n", (11513, 11549), False, 'from src.utils import load_config, split_documents, read_data, reverse_unk, decode_results, format_results, dict2html\n'), ((11594, 11649), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Model Prediction"""'}), "(description='Model Prediction')\n", (11617, 11649), False, 'import argparse\n'), ((9114, 9136), 'pathlib.Path', 'Path', (['args.config_path'], {}), '(args.config_path)\n', (9118, 9136), False, 'from pathlib import Path\n'), ((11456, 11498), 'json.dump', 'json.dump', (['doc_results', 'f_writer'], {'indent': '(4)'}), '(doc_results, f_writer, indent=4)\n', (11465, 11498), False, 'import json\n')]
|
from queue import Empty, SimpleQueue
from typing import Any, Callable, Optional, cast
from PyQt5.QtCore import QEvent, QObject
from PyQt5.QtWidgets import QApplication
from ..log import get_logger
Function = Callable[[], None]
class IoThreadExecutor:
def __init__(self) -> None:
self._queue: SimpleQueue[Function] = SimpleQueue()
self._logger: Any = get_logger("IoThreadExecutor")
def schedule(self, fun: Function) -> None:
self._queue.put(fun)
self._logger.debug("Schedule task", qsize=self._queue.qsize())
def execute_all(self) -> None:
try:
while True:
fun = self._queue.get_nowait()
self._logger.debug(
"Execute task",
qsize=self._queue.qsize(),
)
fun()
except Empty:
return
class _FunctionEvent(QEvent):
EVENT_TYPE: QEvent.Type = QEvent.Type.User
def __init__(self, fun: Function) -> None:
super().__init__(self.EVENT_TYPE)
self.fun = fun
class UiThreadExecutor(QObject):
def __init__(self, app: QApplication) -> None:
super().__init__(parent=None)
self._app = app
self._logger: Any = get_logger("UiThreadExecutor")
def schedule(self, fun: Function) -> None:
self._logger.debug("Schedule task")
self._app.postEvent(self, _FunctionEvent(fun))
def event(self, e: QEvent) -> bool:
if e.type() != _FunctionEvent.EVENT_TYPE:
return super().event(e)
fun_event = cast(_FunctionEvent, e)
self._logger.debug("Execute task")
fun_event.fun()
return True
|
[
"typing.cast",
"queue.SimpleQueue"
] |
[((333, 346), 'queue.SimpleQueue', 'SimpleQueue', ([], {}), '()\n', (344, 346), False, 'from queue import Empty, SimpleQueue\n'), ((1568, 1591), 'typing.cast', 'cast', (['_FunctionEvent', 'e'], {}), '(_FunctionEvent, e)\n', (1572, 1591), False, 'from typing import Any, Callable, Optional, cast\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 29 17:41:44 2020
@author: salman
"""
from PIL import Image
import pandas as pd
import numpy as np
import cv2
import os
d={}
data = pd.read_csv('E:\\fyp data\\ADEK-20\\new_se_new\\new.txt', sep="\t")
arr=np.zeros(151)
print(arr)
for point in data.values:
(key,name,val)=point[0],point[-2],point[-1]
arr[key]=val
print(arr)
print(arr)
train_file= pd.read_csv('E:\\fyp data\\ADEK-20\\validation_images.txt', sep="\t")
train_lst=list(train_file["images"])
path="E:\\fyp data\\ADEK-20\\ADEChallengeData2016\\ADEChallengeData2016\\annotations\\validation\\"
saved="E:\\fyp data\\ADEK-20\\new_se_new\\adk_annotations\\validation\\"
for img in train_lst:
imgPath=path+img+'.png'
image=np.array(cv2.imread(imgPath,0))
image=arr[image]
uniques=np.unique(image)
if len(uniques>0):
cv2.imwrite(saved+img+'.png',image)
print("Done")
|
[
"pandas.read_csv",
"cv2.imwrite",
"numpy.zeros",
"cv2.imread",
"numpy.unique"
] |
[((182, 249), 'pandas.read_csv', 'pd.read_csv', (['"""E:\\\\fyp data\\\\ADEK-20\\\\new_se_new\\\\new.txt"""'], {'sep': '"""\t"""'}), "('E:\\\\fyp data\\\\ADEK-20\\\\new_se_new\\\\new.txt', sep='\\t')\n", (193, 249), True, 'import pandas as pd\n'), ((255, 268), 'numpy.zeros', 'np.zeros', (['(151)'], {}), '(151)\n', (263, 268), True, 'import numpy as np\n'), ((408, 477), 'pandas.read_csv', 'pd.read_csv', (['"""E:\\\\fyp data\\\\ADEK-20\\\\validation_images.txt"""'], {'sep': '"""\t"""'}), "('E:\\\\fyp data\\\\ADEK-20\\\\validation_images.txt', sep='\\t')\n", (419, 477), True, 'import pandas as pd\n'), ((818, 834), 'numpy.unique', 'np.unique', (['image'], {}), '(image)\n', (827, 834), True, 'import numpy as np\n'), ((762, 784), 'cv2.imread', 'cv2.imread', (['imgPath', '(0)'], {}), '(imgPath, 0)\n', (772, 784), False, 'import cv2\n'), ((871, 911), 'cv2.imwrite', 'cv2.imwrite', (["(saved + img + '.png')", 'image'], {}), "(saved + img + '.png', image)\n", (882, 911), False, 'import cv2\n')]
|
"""Tests for the marion application views"""
import json
import tempfile
from pathlib import Path
from django.urls import reverse
import pytest
from pytest_django import asserts as django_assertions
from rest_framework import exceptions as drf_exceptions
from rest_framework import status
from rest_framework.test import APIClient
from marion import defaults, models
from marion.issuers import DummyDocument
client = APIClient()
def count_documents(root):
"""Return the number of generated PDF files in the root directory"""
return len(list(root.glob("*.pdf")))
@pytest.mark.django_db
def test_document_request_viewset_post(monkeypatch):
"""Test the DocumentRequestViewSet create view"""
monkeypatch.setattr(defaults, "DOCUMENTS_ROOT", Path(tempfile.mkdtemp()))
url = reverse("documentrequest-list")
assert count_documents(defaults.DOCUMENTS_ROOT) == 0
# Request payload required parameters
data = {}
response = client.post(url, data, format="json")
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert isinstance(response.data.get("context_query")[0], drf_exceptions.ErrorDetail)
assert response.data.get("context_query")[0].code == "required"
assert isinstance(response.data.get("issuer")[0], drf_exceptions.ErrorDetail)
assert response.data.get("issuer")[0].code == "required"
assert models.DocumentRequest.objects.count() == 0
assert count_documents(defaults.DOCUMENTS_ROOT) == 0
# Invalid issuer
data = {
"issuer": "marion.issuers.DumberDocument",
"context_query": json.dumps({"fullname": "<NAME>"}),
}
response = client.post(url, data, format="json")
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert response.data.get("issuer")[0].code == "invalid_choice"
assert models.DocumentRequest.objects.count() == 0
assert count_documents(defaults.DOCUMENTS_ROOT) == 0
# Perform standard request
data = {
"issuer": "marion.issuers.DummyDocument",
"context_query": json.dumps({"fullname": "<NAME>"}),
}
response = client.post(url, data, format="json")
assert response.status_code == status.HTTP_201_CREATED
assert models.DocumentRequest.objects.count() == 1
assert (
models.DocumentRequest.objects.get().context.get("fullname")
== "<NAME>"
)
assert count_documents(defaults.DOCUMENTS_ROOT) == 1
@pytest.mark.django_db
def test_document_request_viewset_post_context_query_pydantic_model_validation(
monkeypatch,
):
"""Test the DocumentRequestViewSet create view context_query pydantic model
validation.
"""
monkeypatch.setattr(defaults, "DOCUMENTS_ROOT", Path(tempfile.mkdtemp()))
url = reverse("documentrequest-list")
assert count_documents(defaults.DOCUMENTS_ROOT) == 0
# Refuse extra fields in context query
data = {
"issuer": "marion.issuers.DummyDocument",
"context_query": json.dumps({"fullname": "<NAME>", "friends": 2}),
}
response = client.post(url, data, format="json")
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert "extra fields not permitted" in str(response.data.get("error"))
assert models.DocumentRequest.objects.count() == 0
assert count_documents(defaults.DOCUMENTS_ROOT) == 0
# Input types checking
data = {
"issuer": "marion.issuers.DummyDocument",
"context_query": json.dumps({"fullname": None}),
}
response = client.post(url, data, format="json")
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert "none is not an allowed value" in str(response.data.get("error"))
assert models.DocumentRequest.objects.count() == 0
assert count_documents(defaults.DOCUMENTS_ROOT) == 0
# Input contraints checking (short fullname)
data = {
"issuer": "marion.issuers.DummyDocument",
"context_query": json.dumps({"fullname": "D"}),
}
response = client.post(url, data, format="json")
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert "ensure this value has at least 2 characters" in str(
response.data.get("error")
)
assert models.DocumentRequest.objects.count() == 0
assert count_documents(defaults.DOCUMENTS_ROOT) == 0
# Input contraints checking (too long fullname)
data = {
"issuer": "marion.issuers.DummyDocument",
"context_query": json.dumps({"fullname": "F" * 256}),
}
response = client.post(url, data, format="json")
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert "ensure this value has at most 255 characters" in str(
response.data.get("error")
)
assert models.DocumentRequest.objects.count() == 0
assert count_documents(defaults.DOCUMENTS_ROOT) == 0
@pytest.mark.django_db
def test_document_request_viewset_post_context_pydantic_model_validation(
monkeypatch,
):
"""Test the DocumentRequestViewSet create view context pydantic model
validation.
"""
# pylint: disable=unused-argument,function-redefined
monkeypatch.setattr(defaults, "DOCUMENTS_ROOT", Path(tempfile.mkdtemp()))
url = reverse("documentrequest-list")
data = {
"issuer": "marion.issuers.DummyDocument",
"context_query": json.dumps({"fullname": "<NAME>"}),
}
# Refuse extra fields in context
def mock_fetch_context(*args, **kwargs):
"""A mock that returns invalid context"""
return {
"fullname": "<NAME>",
"identifier": "0a1c3ccf-c67d-4071-ab1f-3b27628db9b1",
"friends": 2,
}
monkeypatch.setattr(DummyDocument, "fetch_context", mock_fetch_context)
response = client.post(url, data, format="json")
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert "extra fields not permitted" in response.data.get("error")
assert models.DocumentRequest.objects.count() == 0
assert count_documents(defaults.DOCUMENTS_ROOT) == 0
# Types checking
def mock_fetch_context(*args, **kwargs):
"""A mock that returns invalid context"""
return {"fullname": None, "identifier": "0a1c3ccf-c67d-4071-ab1f-3b27628db9b1"}
monkeypatch.setattr(DummyDocument, "fetch_context", mock_fetch_context)
response = client.post(url, data, format="json")
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert "none is not an allowed value" in response.data.get("error")
assert models.DocumentRequest.objects.count() == 0
assert count_documents(defaults.DOCUMENTS_ROOT) == 0
# Missing identifier
def mock_fetch_context(*args, **kwargs):
"""A mock that returns invalid context"""
return {"fullname": "<NAME>"}
monkeypatch.setattr(DummyDocument, "fetch_context", mock_fetch_context)
response = client.post(url, data, format="json")
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert "identifier\n field required" in response.data.get("error")
assert models.DocumentRequest.objects.count() == 0
assert count_documents(defaults.DOCUMENTS_ROOT) == 0
# Constraints checking (short fullname)
def mock_fetch_context(*args, **kwargs):
"""A mock that returns invalid context"""
return {"fullname": "D", "identifier": "0a1c3ccf-c67d-4071-ab1f-3b27628db9b1"}
monkeypatch.setattr(DummyDocument, "fetch_context", mock_fetch_context)
response = client.post(url, data, format="json")
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert "ensure this value has at least 2 characters" in response.data.get("error")
assert models.DocumentRequest.objects.count() == 0
assert count_documents(defaults.DOCUMENTS_ROOT) == 0
# Constraints checking (too long fullname)
def mock_fetch_context(*args, **kwargs):
"""A mock that returns invalid context"""
return {
"fullname": "F" * 256,
"identifier": "0a1c3ccf-c67d-4071-ab1f-3b27628db9b1",
}
monkeypatch.setattr(DummyDocument, "fetch_context", mock_fetch_context)
response = client.post(url, data, format="json")
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert "ensure this value has at most 255 characters" in response.data.get("error")
assert models.DocumentRequest.objects.count() == 0
assert count_documents(defaults.DOCUMENTS_ROOT) == 0
def test_document_template_debug_view_is_only_active_in_debug_mode(settings):
"""Test if the document_template_debug view is active when not in debug mode"""
settings.DEBUG = False
url = reverse("documents-template-debug")
response = client.get(url)
assert response.status_code == 403
def test_document_template_debug_view(settings):
"""Test the document_template_debug view"""
settings.DEBUG = True
settings.MARION_DOCUMENT_ISSUER_CHOICES_CLASS = (
"marion.default.DocumentIssuerChoices"
)
url = reverse("documents-template-debug")
response = client.get(url)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert b"You should provide an issuer." in response.content
response = client.get(url, {"issuer": "foo.bar.baz"})
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert b"Unknown issuer foo.bar.baz" in response.content
response = client.get(url, {"issuer": "marion.issuers.DummyDocument"})
assert response.status_code == 200
# pylint: disable=no-member
django_assertions.assertContains(response, "<h1>Dummy document</h1>")
|
[
"marion.models.DocumentRequest.objects.count",
"pytest_django.asserts.assertContains",
"json.dumps",
"django.urls.reverse",
"tempfile.mkdtemp",
"marion.models.DocumentRequest.objects.get",
"rest_framework.test.APIClient"
] |
[((422, 433), 'rest_framework.test.APIClient', 'APIClient', ([], {}), '()\n', (431, 433), False, 'from rest_framework.test import APIClient\n'), ((800, 831), 'django.urls.reverse', 'reverse', (['"""documentrequest-list"""'], {}), "('documentrequest-list')\n", (807, 831), False, 'from django.urls import reverse\n'), ((2737, 2768), 'django.urls.reverse', 'reverse', (['"""documentrequest-list"""'], {}), "('documentrequest-list')\n", (2744, 2768), False, 'from django.urls import reverse\n'), ((5170, 5201), 'django.urls.reverse', 'reverse', (['"""documentrequest-list"""'], {}), "('documentrequest-list')\n", (5177, 5201), False, 'from django.urls import reverse\n'), ((8597, 8632), 'django.urls.reverse', 'reverse', (['"""documents-template-debug"""'], {}), "('documents-template-debug')\n", (8604, 8632), False, 'from django.urls import reverse\n'), ((8947, 8982), 'django.urls.reverse', 'reverse', (['"""documents-template-debug"""'], {}), "('documents-template-debug')\n", (8954, 8982), False, 'from django.urls import reverse\n'), ((9476, 9545), 'pytest_django.asserts.assertContains', 'django_assertions.assertContains', (['response', '"""<h1>Dummy document</h1>"""'], {}), "(response, '<h1>Dummy document</h1>')\n", (9508, 9545), True, 'from pytest_django import asserts as django_assertions\n'), ((1374, 1412), 'marion.models.DocumentRequest.objects.count', 'models.DocumentRequest.objects.count', ([], {}), '()\n', (1410, 1412), False, 'from marion import defaults, models\n'), ((1586, 1620), 'json.dumps', 'json.dumps', (["{'fullname': '<NAME>'}"], {}), "({'fullname': '<NAME>'})\n", (1596, 1620), False, 'import json\n'), ((1822, 1860), 'marion.models.DocumentRequest.objects.count', 'models.DocumentRequest.objects.count', ([], {}), '()\n', (1858, 1860), False, 'from marion import defaults, models\n'), ((2043, 2077), 'json.dumps', 'json.dumps', (["{'fullname': '<NAME>'}"], {}), "({'fullname': '<NAME>'})\n", (2053, 2077), False, 'import json\n'), ((2208, 2246), 'marion.models.DocumentRequest.objects.count', 'models.DocumentRequest.objects.count', ([], {}), '()\n', (2244, 2246), False, 'from marion import defaults, models\n'), ((2959, 3007), 'json.dumps', 'json.dumps', (["{'fullname': '<NAME>', 'friends': 2}"], {}), "({'fullname': '<NAME>', 'friends': 2})\n", (2969, 3007), False, 'import json\n'), ((3217, 3255), 'marion.models.DocumentRequest.objects.count', 'models.DocumentRequest.objects.count', ([], {}), '()\n', (3253, 3255), False, 'from marion import defaults, models\n'), ((3434, 3464), 'json.dumps', 'json.dumps', (["{'fullname': None}"], {}), "({'fullname': None})\n", (3444, 3464), False, 'import json\n'), ((3676, 3714), 'marion.models.DocumentRequest.objects.count', 'models.DocumentRequest.objects.count', ([], {}), '()\n', (3712, 3714), False, 'from marion import defaults, models\n'), ((3915, 3944), 'json.dumps', 'json.dumps', (["{'fullname': 'D'}"], {}), "({'fullname': 'D'})\n", (3925, 3944), False, 'import json\n'), ((4185, 4223), 'marion.models.DocumentRequest.objects.count', 'models.DocumentRequest.objects.count', ([], {}), '()\n', (4221, 4223), False, 'from marion import defaults, models\n'), ((4427, 4462), 'json.dumps', 'json.dumps', (["{'fullname': 'F' * 256}"], {}), "({'fullname': 'F' * 256})\n", (4437, 4462), False, 'import json\n'), ((4704, 4742), 'marion.models.DocumentRequest.objects.count', 'models.DocumentRequest.objects.count', ([], {}), '()\n', (4740, 4742), False, 'from marion import defaults, models\n'), ((5291, 5325), 'json.dumps', 'json.dumps', (["{'fullname': '<NAME>'}"], {}), "({'fullname': '<NAME>'})\n", (5301, 5325), False, 'import json\n'), ((5893, 5931), 'marion.models.DocumentRequest.objects.count', 'models.DocumentRequest.objects.count', ([], {}), '()\n', (5929, 5931), False, 'from marion import defaults, models\n'), ((6475, 6513), 'marion.models.DocumentRequest.objects.count', 'models.DocumentRequest.objects.count', ([], {}), '()\n', (6511, 6513), False, 'from marion import defaults, models\n'), ((7011, 7049), 'marion.models.DocumentRequest.objects.count', 'models.DocumentRequest.objects.count', ([], {}), '()\n', (7047, 7049), False, 'from marion import defaults, models\n'), ((7630, 7668), 'marion.models.DocumentRequest.objects.count', 'models.DocumentRequest.objects.count', ([], {}), '()\n', (7666, 7668), False, 'from marion import defaults, models\n'), ((8294, 8332), 'marion.models.DocumentRequest.objects.count', 'models.DocumentRequest.objects.count', ([], {}), '()\n', (8330, 8332), False, 'from marion import defaults, models\n'), ((768, 786), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (784, 786), False, 'import tempfile\n'), ((2705, 2723), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (2721, 2723), False, 'import tempfile\n'), ((5138, 5156), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (5154, 5156), False, 'import tempfile\n'), ((2273, 2309), 'marion.models.DocumentRequest.objects.get', 'models.DocumentRequest.objects.get', ([], {}), '()\n', (2307, 2309), False, 'from marion import defaults, models\n')]
|
import os
import numpy as np
from glob import glob
from textwrap import wrap
from tabulate import tabulate
from collections import defaultdict
from typing import List, Union, Iterator, Iterable, Tuple, Dict
from typing import TypeVar, Generic
from .applicator import Applicator
T = TypeVar('T', bound='BiText')
class BatchDocs(Generic[T]):
def __init__(self,
docs: Iterable[Tuple[str, T]],
num_docs: int = None):
self.docs = docs
if isinstance(docs, list):
num_docs = len(docs)
self.num_docs = num_docs
def __iter__(self):
return iter(self.docs)
def __len__(self):
return self.num_docs
def __str__(self):
str_repr = ""
prefix: str
doc: T
for prefix, doc in self.docs:
str_repr += f"{prefix}\n{doc}\n\n"
return str_repr
def to_files(self,
output_dir: str,
suffix: str = ''):
prefix: str
doc: T
for prefix, doc in self.docs:
dirname = os.path.dirname(prefix)
basename = os.path.basename(prefix)
path_prefix = os.path.join(output_dir, basename)
doc.to_files(path_prefix, suffix=suffix)
def apply(self,
applicator_type: Union['Applicator', str],
applicator: Union['Applicator', str] = None,
*args,
only_src: bool = False,
only_tgt: bool = False,
progress: str = None,
**kwargs) -> T:
if not isinstance(applicator_type, str):
assert applicator is None
fn = applicator_type
else:
applicator_cls = Applicator.by_name(applicator_type).by_name(applicator)
fn = applicator_cls(*args, **kwargs)
return fn.batch_apply(self,
only_src=only_src,
only_tgt=only_tgt,
progress=progress)
def split(self,
mapping_path: str):
subsets: Dict[str, str] = dict()
with open(mapping_path, 'r') as f:
for line in f:
line = line.rstrip('\n')
prefix, _, subset = line.partition('\t')
subsets[prefix] = subset
docs = list(self.docs)
cls, = set(type(doc) for _, doc in docs)
src_lang, = set(doc.src_lang for _, doc in docs)
tgt_lang, = set(doc.tgt_lang for _, doc in docs)
src_lines: Dict[str, List[str]] = defaultdict(list)
tgt_lines: Dict[str, List[str]] = defaultdict(list)
for prefix, doc in docs:
basename = os.path.basename(prefix)
subset = subsets[basename]
src_lines[subset] += doc.src_lines
tgt_lines[subset] += doc.tgt_lines
merged_docs: List[Tuple[str, T]] = []
for subset in src_lines.keys() | tgt_lines.keys():
doc = cls(src=src_lines[subset],
tgt=tgt_lines[subset],
src_lang=src_lang,
tgt_lang=tgt_lang)
prefix = f'{subset}.{src_lang}-{tgt_lang}.'
merged_docs.append((prefix, doc))
return BatchDocs(merged_docs)
class BiText:
__slots__ = ('src_lang', 'tgt_lang',
'src_lines', 'tgt_lines')
def __init__(self,
src: Union[str, List[str]],
tgt: Union[str, List[str]],
*,
src_lang: str,
tgt_lang: str):
self.src_lang = src_lang
self.tgt_lang = tgt_lang
self.src_lines: List[str]
self.tgt_lines: List[str]
if isinstance(src, str):
self.src_lines = src.split('\n')
else:
self.src_lines = src
if isinstance(tgt, str):
self.tgt_lines = tgt.split('\n')
else:
self.tgt_lines = tgt
@classmethod
def read_lines(cls,
file_path: str) -> Iterator[str]:
with open(file_path, 'r') as f:
for line in f:
line = line.rstrip('\n')
if not line:
continue
yield line
@classmethod
def write_lines(cls,
lines: Iterable[str],
file_path: str):
with open(file_path, 'w') as f:
for line in lines:
f.write(f'{line}\n')
@classmethod
def batch_from_files(cls,
*prefixes: str,
src_lang: str,
tgt_lang: str,
suffix: str = ''):
resolved_prefixes: List[str] = []
for prefix in prefixes:
if not prefix.endswith(f"{src_lang}{suffix}"):
prefix = f"{prefix}{src_lang}{suffix}"
for path in glob(prefix):
suffix_offset = len(src_lang) + len(suffix)
resolved_prefix = path[:-suffix_offset]
resolved_prefixes.append(resolved_prefix)
generator = (
(prefix, cls.from_files(prefix,
src_lang=src_lang,
tgt_lang=tgt_lang,
suffix=suffix))
for prefix in resolved_prefixes
)
return BatchDocs(
generator,
num_docs=len(prefixes)
)
@classmethod
def from_files(cls,
prefix: str,
*,
src_lang: str,
tgt_lang: str,
suffix: str = ''):
src_path = f'{prefix}{src_lang}{suffix}'
tgt_path = f'{prefix}{tgt_lang}{suffix}'
src_lines = list(cls.read_lines(src_path))
tgt_lines = list(cls.read_lines(tgt_path))
return cls(src=src_lines,
tgt=tgt_lines,
src_lang=src_lang,
tgt_lang=tgt_lang)
def to_files(self,
prefix: str,
suffix: str = ''):
src_path = f'{prefix}{self.src_lang}{suffix}'
os.makedirs(os.path.dirname(src_path), exist_ok=True)
tgt_path = f'{prefix}{self.tgt_lang}{suffix}'
os.makedirs(os.path.dirname(tgt_path), exist_ok=True)
self.write_lines(self.src_lines, src_path)
self.write_lines(self.tgt_lines, tgt_path)
def segment(self, segmenter: Union['Applicator', str],
*args,
only_src: bool = False,
only_tgt: bool = False,
**kwargs) -> 'BiText':
if isinstance(segmenter, str):
applicator_cls = Applicator.by_name('segmenter').by_name(segmenter)
applicator = applicator_cls(**kwargs)
else:
assert not kwargs
applicator = segmenter
return applicator(self,
only_src=only_src,
only_tgt=only_tgt)
def encode(self, encoder: Union['Applicator', str],
*args,
**kwargs):
if isinstance(encoder, str):
applicator_cls = Applicator.by_name('encoder').by_name(encoder)
applicator = applicator_cls(**kwargs)
else:
assert not kwargs
applicator = encoder
return applicator(self)
@classmethod
def wrap_row(cls, *cols: str, **kwargs) -> List[str]:
return [
'\n'.join(wrap(col, **kwargs))
for col in cols
]
def __str__(self):
src_lines = self.src_lines
tgt_lines = self.tgt_lines
num_src_lines = len(src_lines)
num_tgt_lines = len(tgt_lines)
num_rows = max(num_src_lines, num_tgt_lines)
src_lines = [''] * (num_rows - num_src_lines) + src_lines
tgt_lines = [''] * (num_rows - num_tgt_lines) + tgt_lines
rows = [
self.wrap_row(src_line, tgt_line)
for src_line, tgt_line
in zip(src_lines, tgt_lines)
]
return tabulate(
rows,
headers=[self.src_lang, self.tgt_lang],
tablefmt='grid',
showindex='always'
)
class AlignedBiText(BiText):
def __init__(self,
src: Union[str, List[str]],
tgt: Union[str, List[str]],
*,
src_lang: str,
tgt_lang: str):
super().__init__(src, tgt, src_lang=src_lang, tgt_lang=tgt_lang)
assert len(self.src_lines) == len(self.tgt_lines)
class EncodedBiText(BiText):
__slots__ = ('src_lang', 'tgt_lang',
'src_lines', 'tgt_lines',
'_src_embeddings', '_tgt_embeddings',
'_src_lines', '_tgt_lines',
'num_src_lines', 'num_tgt_lines',
'num_src_tokens', 'num_tgt_tokens')
def __init__(self,
src: Union[str, List[str]],
tgt: Union[str, List[str]],
*,
src_lang: str,
tgt_lang: str,
# num_src_tokens: int,
# num_tgt_tokens: int,
src_embeddings: np.ndarray,
tgt_embeddings: np.ndarray,
num_src_lines: int = None,
num_tgt_lines: int = None):
if num_src_lines is None:
num_src_lines = len(src)
self.num_src_lines = num_src_lines
if num_tgt_lines is None:
num_tgt_lines = len(tgt)
self.num_tgt_lines = num_tgt_lines
super().__init__(src[:self.num_src_lines],
tgt[:self.num_tgt_lines],
src_lang=src_lang, tgt_lang=tgt_lang)
self._src_embeddings = src_embeddings
self._tgt_embeddings = tgt_embeddings
self._src_lines = src
self._tgt_lines = tgt
# self.num_src_tokens = num_src_tokens
# self.num_tgt_tokens = num_tgt_tokens
@property
def src_embeddings(self):
return self._src_embeddings[:self.num_src_lines]
@property
def tgt_embeddings(self):
return self._tgt_embeddings[:self.num_tgt_lines]
def src_windows_embeddings(self):
return self._src_embeddings
def tgt_windows_embeddings(self):
return self._tgt_embeddings
@property
def src_windows_lines(self):
return self._src_lines
@property
def tgt_windows_lines(self):
return self._tgt_lines
|
[
"os.path.basename",
"textwrap.wrap",
"os.path.dirname",
"collections.defaultdict",
"tabulate.tabulate",
"glob.glob",
"typing.TypeVar",
"os.path.join"
] |
[((289, 317), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {'bound': '"""BiText"""'}), "('T', bound='BiText')\n", (296, 317), False, 'from typing import TypeVar, Generic\n'), ((2556, 2573), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2567, 2573), False, 'from collections import defaultdict\n'), ((2616, 2633), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2627, 2633), False, 'from collections import defaultdict\n'), ((8085, 8180), 'tabulate.tabulate', 'tabulate', (['rows'], {'headers': '[self.src_lang, self.tgt_lang]', 'tablefmt': '"""grid"""', 'showindex': '"""always"""'}), "(rows, headers=[self.src_lang, self.tgt_lang], tablefmt='grid',\n showindex='always')\n", (8093, 8180), False, 'from tabulate import tabulate\n'), ((1074, 1097), 'os.path.dirname', 'os.path.dirname', (['prefix'], {}), '(prefix)\n', (1089, 1097), False, 'import os\n'), ((1121, 1145), 'os.path.basename', 'os.path.basename', (['prefix'], {}), '(prefix)\n', (1137, 1145), False, 'import os\n'), ((1172, 1206), 'os.path.join', 'os.path.join', (['output_dir', 'basename'], {}), '(output_dir, basename)\n', (1184, 1206), False, 'import os\n'), ((2691, 2715), 'os.path.basename', 'os.path.basename', (['prefix'], {}), '(prefix)\n', (2707, 2715), False, 'import os\n'), ((4898, 4910), 'glob.glob', 'glob', (['prefix'], {}), '(prefix)\n', (4902, 4910), False, 'from glob import glob\n'), ((6175, 6200), 'os.path.dirname', 'os.path.dirname', (['src_path'], {}), '(src_path)\n', (6190, 6200), False, 'import os\n'), ((6291, 6316), 'os.path.dirname', 'os.path.dirname', (['tgt_path'], {}), '(tgt_path)\n', (6306, 6316), False, 'import os\n'), ((7501, 7520), 'textwrap.wrap', 'wrap', (['col'], {}), '(col, **kwargs)\n', (7505, 7520), False, 'from textwrap import wrap\n')]
|
# -*- coding: utf-8 -*-
"""
@author: bartulem
Run Kilosort2 through Python.
As it stands (spring/summer 2020), to use Kilosort2 one still requires Matlab. To ensure it works,
one needs a specific combination of Matlab, the GPU driver version and CUDA compiler files.
On the lab computer, I set it up to work on Matlab R2019b, driver version 10.2. (GeForce RTX 2080 Ti)
and v10.1 CUDA. !!! NB: a different Matlab or driver version would require different CUDA files !!!
Additionally, since I don't change the config file from session to session, I wrote the script
below to run Matlab code through Python, such that the whole processing pipeline would remain Pythonic.
Apart from Matlab, in order for this to run, you need to install the "matlab engine"; further instructions
can be found here: https://www.mathworks.com/help/matlab/matlab_external/install-the-matlab-engine-for-python.html
However, if you need to modify the config file, you either need to change the code below accordingly
or go to Matlab and run Kilosort2 the old school way.
"""
import time
import matlab.engine
import os
import sys
def run_kilosort(file_dir, kilosort2_dir):
# check that the data directory is there
if not os.path.exists(file_dir):
print('Could not find data directory {}, try again.'.format(file_dir))
sys.exit()
# check that the Kilosort2 directory is there
if not os.path.exists(kilosort2_dir):
print('Could not find Kilosort directory {}, try again.'.format(kilosort2_dir))
sys.exit()
print('Kilosort2 to be run on file: {}.'.format(file_dir))
# run Kilosort2
print('Running Kilosort2, please be patient - this could take >1 hour.')
start_time = time.time()
eng = matlab.engine.start_matlab()
eng.cd(kilosort2_dir, nargout=0)
eng.ls(nargout=0)
eng.master_kilosort(file_dir, nargout=0)
eng.quit()
print('Finished! Running Kilosort2 took {:.2f} minutes.\n'.format((time.time() - start_time) / 60))
|
[
"os.path.exists",
"sys.exit",
"time.time"
] |
[((1726, 1737), 'time.time', 'time.time', ([], {}), '()\n', (1735, 1737), False, 'import time\n'), ((1215, 1239), 'os.path.exists', 'os.path.exists', (['file_dir'], {}), '(file_dir)\n', (1229, 1239), False, 'import os\n'), ((1328, 1338), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1336, 1338), False, 'import sys\n'), ((1401, 1430), 'os.path.exists', 'os.path.exists', (['kilosort2_dir'], {}), '(kilosort2_dir)\n', (1415, 1430), False, 'import os\n'), ((1528, 1538), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1536, 1538), False, 'import sys\n'), ((1969, 1980), 'time.time', 'time.time', ([], {}), '()\n', (1978, 1980), False, 'import time\n')]
|
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor, GradientBoostingClassifier
from sklearn.metrics import mean_absolute_error, accuracy_score, roc_curve, roc_auc_score
from src.utils import calc_annual_return_vec, print_test_results
from config import basedir
if __name__ == "__main__":
# Read the datasets
train = pd.read_csv(os.path.join(basedir, 'data', 'processed', 'PD_train_continuous.csv'), sep=";")
test = pd.read_csv(os.path.join(basedir, 'data', 'processed', 'PD_test_continuous.csv'), sep=";")
X_train = np.array(train.drop(columns="good_bad"))
y_train = np.array(train["good_bad"])
X_test = np.array(test.drop(columns="good_bad"))
y_test = np.array(test["good_bad"])
print('Length of training set:', len(y_train))
print('Length of testing set: ', len(y_test))
####################################################################################################################
###################################### Random Forest Classification ######################################
####################################################################################################################
reg = GradientBoostingClassifier()
reg.fit(X_train, y_train)
y_train_predict = np.round(reg.predict(X_train), 2)
y_test_predict = np.round(reg.predict(X_test), 2)
y_hat_test = reg.predict(X_test)
print("Accuracy:", accuracy_score(y_test, y_hat_test))
y_hat_test_proba = reg.predict_proba(X_test)[:][:, 1]
predictions = pd.concat([pd.DataFrame(y_test), pd.DataFrame(y_hat_test_proba)], axis=1)
predictions.columns = ["y_test", "y_hat_test_proba"]
fpr, tpr, thresholds = roc_curve(y_test, y_hat_test_proba)
auc = roc_auc_score(y_test, y_hat_test_proba)
plt.figure()
plt.plot(fpr, tpr)
plt.plot(fpr, fpr, linestyle="--", color="k")
plt.xlabel("False positive rate")
plt.ylabel("True positive rate")
plt.title(f"ROC curve (AUC = {np.round(auc, 2)})")
plt.savefig('../results/PD_GradientBoosting_model_auc.png')
plt.savefig(os.path.join(basedir, 'results', 'roc', 'PD_GradientBoosting.png'))
plt.show()
scores = mean_absolute_error(y_test_predict, y_test)
print('Mean Abs Error: {:.2f}'.format(scores))
####################################################################################################################
########################################### Feature Importance ###########################################
####################################################################################################################
print_FeatureImportance = False
if print_FeatureImportance:
importances = reg.feature_importances_
std = np.std([tree.feature_importances_ for tree in reg.estimators_], axis=0)
indices = np.flip(np.argsort(importances), axis=0)
xaxis = np.linspace(0, len(indices) - 1, len(indices))
names = []
for idx in indices:
names.append(train.columns[idx])
ax = plt.figure()
plt.title("Feature Importance")
plt.bar(xaxis, importances[indices] * 100, color="r", yerr=std[indices] * 100, align="center")
plt.xticks(xaxis, names, rotation=90)
plt.ylabel('%')
plt.tight_layout()
plt.savefig(os.path.join(basedir, 'results', 'roc', 'PD_GradientBoosting_FeatureImportance.png'))
####################################################################################################################
####################################### Evaluating Output Results ########################################
####################################################################################################################
print_results = False
if print_results:
idx = y_test_predict > 15.0
print_test_results(f"Yield (15% < predict):", test[idx])
idx = np.logical_and(y_test_predict > 10.0, y_test_predict < 15.0)
print_test_results(f"Yield (10% < predict < 15%):", test[idx])
idx = np.logical_and(y_test_predict > 5.0, y_test_predict < 10.0)
print_test_results(f"Yield (5% < predict < 10%):", test[idx])
idx = np.logical_and(y_test_predict > 0.0, y_test_predict < 5.0)
print_test_results(f"Yield (0% < predict < 5%):", test[idx])
idx = np.logical_and(y_test_predict > -10.0, y_test_predict < 0.0)
print_test_results(f"Yield (-10% < predict < 0%):", test[idx])
idx = np.logical_and(y_test_predict > -20.0, y_test_predict < -10.0)
print_test_results(f"Yield (-20% < predict < -10%):", test[idx])
idx = y_test_predict < -20.0
print_test_results(f"Yield (-20% > predict):", test[idx])
plt.show(block=True)
|
[
"matplotlib.pyplot.title",
"sklearn.metrics.accuracy_score",
"matplotlib.pyplot.bar",
"sklearn.metrics.mean_absolute_error",
"numpy.argsort",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"os.path.join",
"numpy.round",
"pandas.DataFrame",
"numpy.std",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show",
"src.utils.print_test_results",
"sklearn.metrics.roc_auc_score",
"matplotlib.pyplot.ylabel",
"sklearn.metrics.roc_curve",
"matplotlib.pyplot.plot",
"numpy.logical_and",
"sklearn.ensemble.GradientBoostingClassifier",
"numpy.array",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((696, 723), 'numpy.array', 'np.array', (["train['good_bad']"], {}), "(train['good_bad'])\n", (704, 723), True, 'import numpy as np\n'), ((790, 816), 'numpy.array', 'np.array', (["test['good_bad']"], {}), "(test['good_bad'])\n", (798, 816), True, 'import numpy as np\n'), ((1294, 1322), 'sklearn.ensemble.GradientBoostingClassifier', 'GradientBoostingClassifier', ([], {}), '()\n', (1320, 1322), False, 'from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor, GradientBoostingClassifier\n'), ((1797, 1832), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_test', 'y_hat_test_proba'], {}), '(y_test, y_hat_test_proba)\n', (1806, 1832), False, 'from sklearn.metrics import mean_absolute_error, accuracy_score, roc_curve, roc_auc_score\n'), ((1843, 1882), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_test', 'y_hat_test_proba'], {}), '(y_test, y_hat_test_proba)\n', (1856, 1882), False, 'from sklearn.metrics import mean_absolute_error, accuracy_score, roc_curve, roc_auc_score\n'), ((1888, 1900), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1898, 1900), True, 'import matplotlib.pyplot as plt\n'), ((1905, 1923), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (1913, 1923), True, 'import matplotlib.pyplot as plt\n'), ((1928, 1973), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr', 'fpr'], {'linestyle': '"""--"""', 'color': '"""k"""'}), "(fpr, fpr, linestyle='--', color='k')\n", (1936, 1973), True, 'import matplotlib.pyplot as plt\n'), ((1978, 2011), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False positive rate"""'], {}), "('False positive rate')\n", (1988, 2011), True, 'import matplotlib.pyplot as plt\n'), ((2016, 2048), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True positive rate"""'], {}), "('True positive rate')\n", (2026, 2048), True, 'import matplotlib.pyplot as plt\n'), ((2108, 2167), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../results/PD_GradientBoosting_model_auc.png"""'], {}), "('../results/PD_GradientBoosting_model_auc.png')\n", (2119, 2167), True, 'import matplotlib.pyplot as plt\n'), ((2256, 2266), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2264, 2266), True, 'import matplotlib.pyplot as plt\n'), ((2281, 2324), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['y_test_predict', 'y_test'], {}), '(y_test_predict, y_test)\n', (2300, 2324), False, 'from sklearn.metrics import mean_absolute_error, accuracy_score, roc_curve, roc_auc_score\n'), ((4891, 4911), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(True)'}), '(block=True)\n', (4899, 4911), True, 'import matplotlib.pyplot as plt\n'), ((444, 513), 'os.path.join', 'os.path.join', (['basedir', '"""data"""', '"""processed"""', '"""PD_train_continuous.csv"""'], {}), "(basedir, 'data', 'processed', 'PD_train_continuous.csv')\n", (456, 513), False, 'import os\n'), ((547, 615), 'os.path.join', 'os.path.join', (['basedir', '"""data"""', '"""processed"""', '"""PD_test_continuous.csv"""'], {}), "(basedir, 'data', 'processed', 'PD_test_continuous.csv')\n", (559, 615), False, 'import os\n'), ((1525, 1559), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_hat_test'], {}), '(y_test, y_hat_test)\n', (1539, 1559), False, 'from sklearn.metrics import mean_absolute_error, accuracy_score, roc_curve, roc_auc_score\n'), ((2184, 2250), 'os.path.join', 'os.path.join', (['basedir', '"""results"""', '"""roc"""', '"""PD_GradientBoosting.png"""'], {}), "(basedir, 'results', 'roc', 'PD_GradientBoosting.png')\n", (2196, 2250), False, 'import os\n'), ((2870, 2941), 'numpy.std', 'np.std', (['[tree.feature_importances_ for tree in reg.estimators_]'], {'axis': '(0)'}), '([tree.feature_importances_ for tree in reg.estimators_], axis=0)\n', (2876, 2941), True, 'import numpy as np\n'), ((3170, 3182), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3180, 3182), True, 'import matplotlib.pyplot as plt\n'), ((3191, 3222), 'matplotlib.pyplot.title', 'plt.title', (['"""Feature Importance"""'], {}), "('Feature Importance')\n", (3200, 3222), True, 'import matplotlib.pyplot as plt\n'), ((3231, 3330), 'matplotlib.pyplot.bar', 'plt.bar', (['xaxis', '(importances[indices] * 100)'], {'color': '"""r"""', 'yerr': '(std[indices] * 100)', 'align': '"""center"""'}), "(xaxis, importances[indices] * 100, color='r', yerr=std[indices] * \n 100, align='center')\n", (3238, 3330), True, 'import matplotlib.pyplot as plt\n'), ((3334, 3371), 'matplotlib.pyplot.xticks', 'plt.xticks', (['xaxis', 'names'], {'rotation': '(90)'}), '(xaxis, names, rotation=90)\n', (3344, 3371), True, 'import matplotlib.pyplot as plt\n'), ((3380, 3395), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""%"""'], {}), "('%')\n", (3390, 3395), True, 'import matplotlib.pyplot as plt\n'), ((3404, 3422), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3420, 3422), True, 'import matplotlib.pyplot as plt\n'), ((3986, 4043), 'src.utils.print_test_results', 'print_test_results', (['f"""Yield (15% < predict):"""', 'test[idx]'], {}), "(f'Yield (15% < predict):', test[idx])\n", (4004, 4043), False, 'from src.utils import calc_annual_return_vec, print_test_results\n'), ((4059, 4119), 'numpy.logical_and', 'np.logical_and', (['(y_test_predict > 10.0)', '(y_test_predict < 15.0)'], {}), '(y_test_predict > 10.0, y_test_predict < 15.0)\n', (4073, 4119), True, 'import numpy as np\n'), ((4128, 4191), 'src.utils.print_test_results', 'print_test_results', (['f"""Yield (10% < predict < 15%):"""', 'test[idx]'], {}), "(f'Yield (10% < predict < 15%):', test[idx])\n", (4146, 4191), False, 'from src.utils import calc_annual_return_vec, print_test_results\n'), ((4207, 4266), 'numpy.logical_and', 'np.logical_and', (['(y_test_predict > 5.0)', '(y_test_predict < 10.0)'], {}), '(y_test_predict > 5.0, y_test_predict < 10.0)\n', (4221, 4266), True, 'import numpy as np\n'), ((4275, 4338), 'src.utils.print_test_results', 'print_test_results', (['f"""Yield (5% < predict < 10%):"""', 'test[idx]'], {}), "(f'Yield (5% < predict < 10%):', test[idx])\n", (4293, 4338), False, 'from src.utils import calc_annual_return_vec, print_test_results\n'), ((4354, 4412), 'numpy.logical_and', 'np.logical_and', (['(y_test_predict > 0.0)', '(y_test_predict < 5.0)'], {}), '(y_test_predict > 0.0, y_test_predict < 5.0)\n', (4368, 4412), True, 'import numpy as np\n'), ((4421, 4483), 'src.utils.print_test_results', 'print_test_results', (['f"""Yield (0% < predict < 5%):"""', 'test[idx]'], {}), "(f'Yield (0% < predict < 5%):', test[idx])\n", (4439, 4483), False, 'from src.utils import calc_annual_return_vec, print_test_results\n'), ((4499, 4559), 'numpy.logical_and', 'np.logical_and', (['(y_test_predict > -10.0)', '(y_test_predict < 0.0)'], {}), '(y_test_predict > -10.0, y_test_predict < 0.0)\n', (4513, 4559), True, 'import numpy as np\n'), ((4568, 4630), 'src.utils.print_test_results', 'print_test_results', (['f"""Yield (-10% < predict < 0%):"""', 'test[idx]'], {}), "(f'Yield (-10% < predict < 0%):', test[idx])\n", (4586, 4630), False, 'from src.utils import calc_annual_return_vec, print_test_results\n'), ((4646, 4708), 'numpy.logical_and', 'np.logical_and', (['(y_test_predict > -20.0)', '(y_test_predict < -10.0)'], {}), '(y_test_predict > -20.0, y_test_predict < -10.0)\n', (4660, 4708), True, 'import numpy as np\n'), ((4717, 4781), 'src.utils.print_test_results', 'print_test_results', (['f"""Yield (-20% < predict < -10%):"""', 'test[idx]'], {}), "(f'Yield (-20% < predict < -10%):', test[idx])\n", (4735, 4781), False, 'from src.utils import calc_annual_return_vec, print_test_results\n'), ((4828, 4885), 'src.utils.print_test_results', 'print_test_results', (['f"""Yield (-20% > predict):"""', 'test[idx]'], {}), "(f'Yield (-20% > predict):', test[idx])\n", (4846, 4885), False, 'from src.utils import calc_annual_return_vec, print_test_results\n'), ((1649, 1669), 'pandas.DataFrame', 'pd.DataFrame', (['y_test'], {}), '(y_test)\n', (1661, 1669), True, 'import pandas as pd\n'), ((1671, 1701), 'pandas.DataFrame', 'pd.DataFrame', (['y_hat_test_proba'], {}), '(y_hat_test_proba)\n', (1683, 1701), True, 'import pandas as pd\n'), ((2968, 2991), 'numpy.argsort', 'np.argsort', (['importances'], {}), '(importances)\n', (2978, 2991), True, 'import numpy as np\n'), ((3443, 3531), 'os.path.join', 'os.path.join', (['basedir', '"""results"""', '"""roc"""', '"""PD_GradientBoosting_FeatureImportance.png"""'], {}), "(basedir, 'results', 'roc',\n 'PD_GradientBoosting_FeatureImportance.png')\n", (3455, 3531), False, 'import os\n'), ((2083, 2099), 'numpy.round', 'np.round', (['auc', '(2)'], {}), '(auc, 2)\n', (2091, 2099), True, 'import numpy as np\n')]
|
import numpy as np
import torch
import torch.nn as nn
def soft_update(target: nn.Module, source: nn.Module, tau):
with torch.no_grad():
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(target_param.data * (1.0 - tau) + param.data * tau)
def hard_update(target: nn.Module, source: nn.Module):
with torch.no_grad():
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(param.data)
def compute_target_value(reward, gamma, done, next_q):
q_target = reward + gamma * (1.0 - done) * next_q
return q_target
def to_numpy_or_python_type(tensors):
"""Converts a structure of `Tensor`s to `NumPy` arrays or Python scalar types.
For each tensor, it calls `tensor.numpy()`. If the result is a scalar value,
it converts it to a Python type, such as a float or int, by calling
`result.item()`.
Numpy scalars are converted, as Python types are often more convenient to deal
with. This is especially useful for bfloat16 Numpy scalars, which don't
support as many operations as other Numpy values.
Args:
tensors: A structure of tensors.
Returns:
`tensors`, but scalar tensors are converted to Python types and non-scalar
tensors are converted to Numpy arrays.
"""
def _to_single_numpy_or_python_type(t):
if isinstance(t, torch.Tensor):
x = t.detach().cpu().numpy()
return x.item() if np.ndim(x) == 0 else x
return t # Don't turn ragged or sparse tensors to NumPy.
import tensorflow as tf
return tf.nest.map_structure(_to_single_numpy_or_python_type, tensors)
|
[
"tensorflow.nest.map_structure",
"torch.no_grad",
"numpy.ndim"
] |
[((1652, 1715), 'tensorflow.nest.map_structure', 'tf.nest.map_structure', (['_to_single_numpy_or_python_type', 'tensors'], {}), '(_to_single_numpy_or_python_type, tensors)\n', (1673, 1715), True, 'import tensorflow as tf\n'), ((125, 140), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (138, 140), False, 'import torch\n'), ((378, 393), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (391, 393), False, 'import torch\n'), ((1523, 1533), 'numpy.ndim', 'np.ndim', (['x'], {}), '(x)\n', (1530, 1533), True, 'import numpy as np\n')]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# @CreateTime: Jun 18, 2017 1:13 PM
# @Author: <NAME>
# @Contact: <EMAIL>
# @Last Modified By: <NAME>
# @Last Modified Time: Jun 18, 2017 3:45 PM
# @Description: Modify Here, Please
from __future__ import print_function, division
import re
import json
import csv
from datetime import datetime
import ast
import argparse
import logging
import requests
from requests.exceptions import HTTPError, ConnectionError
from urllib3.exceptions import InsecureRequestWarning
import yaml
# to disable warning
# InsecureRequestWarning: Unverified HTTPS request is being made.
# Adding certificate verification is strongly advised. See:
# https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
requests.urllib3.disable_warnings(InsecureRequestWarning)
START = datetime.now()
ZFSURL = "" # API URL (https://example:215/api)
ZAUTH = () # API Authentication tuple (username, password)
HEADER = {"Content-Type": "application/json"}
LOGFILE = "projects_output.log"
def create_parser():
"""Get Arguments"""
parser = argparse.ArgumentParser(
description="Script to handle projects in ZFS Storage Appliance")
parser.add_argument(
"-s", "--server", type=str, help="Server config file (YAML)", required=True)
parser.add_argument(
"-f", "--file", type=str, help="projects file (CSV)", required=True)
parser.add_argument(
"-p", "--progress", action="store_true", help="progress bar and logging to file",
required=False)
group = parser.add_mutually_exclusive_group()
group.add_argument("-c", "--create", action="store_true",
help="Create projects specified in csv file")
group.add_argument("-d", "--delete", action="store_true",
help="Delete projects specified in csv file")
group.add_argument("-l", "--list", action="store_true",
help="List/Check projects specified in csv file")
return parser
def read_project_file(filename):
"""Read projects csv file and return the list."""
projectlist = []
with open(filename, 'r') as cvsfile:
filereader = csv.reader(cvsfile, delimiter=',')
for row in filereader:
projectlist.append(row)
del projectlist[0]
return projectlist
def read_yaml_file(configfile):
"""Read config file and return credentials in json."""
config = {}
with open(configfile, 'r') as configuration:
try:
config = yaml.load(configuration)
except yaml.YAMLError as error:
print("Error in configuration file: {}").format(error)
return config
suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
def response_size(nbytes):
i = 0
while nbytes >= 1024 and i < len(suffixes)-1:
nbytes /= 1024.
i += 1
f = ('%.2f' % nbytes).rstrip('0').rstrip('.')
return '%s %s' % (f, suffixes[i])
def create_project(fileline):
"""Create Project from csv file. (err, msg)"""
if len(fileline) != 20:
return True, "CREATE - FAIL - Error in line {} It needs to be 20 columns long"\
.format(fileline)
pool, project, mountpoint, quota, reservation, compression, dedup, logbias, nodestroy,\
recordsize, readonly, atime, default_sparse, default_user, default_group, default_permissions,\
default_volblocksize, default_volsize, sharenfs, sharesmb = fileline
fullurl = ZFSURL + "/storage/v1/pools/{}/projects"\
.format(pool)
# converted_size = get_real_size(size, size_unit)
# real_blocksize = get_real_blocksize(blocksize)
try:
data = {"name": project,
"mountpoint": mountpoint,
"quota": quota,
"reservation": reservation,
"compression": compression,
"dedup": dedup,
"logbias": logbias,
"nodestroy": nodestroy,
"recordsize": recordsize,
"readonly": readonly,
"atime": atime,
"default_sparse": default_sparse,
"default_user": default_user,
"default_group": default_group,
"default_permissions": default_permissions,
"default_volblocksize": default_volblocksize,
"default_volsize": default_volsize,
"sharenfs": sharenfs,
"sharesmb": sharesmb}
if quota == 'None' and reservation == 'None':
del data["quota"]
del data["reservation"]
elif quota == 'None':
del data["quota"]
elif reservation == 'None':
del data["reservation"]
req = requests.post(fullurl, data=json.dumps(data),
auth=ZAUTH, verify=False, headers=HEADER)
j = json.loads(req.text)
if 'fault' in j:
if 'message' in j['fault']:
return True, "CREATE - FAIL - project '{}' pool '{}' - Error {}"\
.format(project, pool, j['fault']['message'])
req.close()
req.raise_for_status()
return False, "CREATE - SUCCESS - project '{}' pool '{}'".format(project, pool)
except HTTPError as error:
if error.response.status_code == 401:
exit("CREATE - FAIL - project '{}' pool '{}' - Error {}".format(project, pool,
error.message))
else:
return True, "CREATE - FAIL - project '{}' pool '{}' - Error {}"\
.format(project, pool, error.message)
except ConnectionError as error:
return True, "CREATE - FAIL - project '{}' pool '{}' - Error {}"\
.format(project, pool, error.message)
def delete_project(fileline):
"""Delete project specified in csv file (err, msg)"""
if len(fileline) != 2:
return True, "DELETE - FAIL - Error in line {} It needs to be 2 columns long"\
.format(fileline)
pool, project = fileline
fullurl = ZFSURL + "/storage/v1/pools/{}/projects/{}".format(pool, project)
try:
req = requests.delete(fullurl, auth=ZAUTH, verify=False, headers=HEADER)
req.close()
req.raise_for_status()
return False, "DELETE - SUCCESS - project '{}' pool '{}'".format(project, pool)
except HTTPError as error:
if error.response.status_code == 401:
exit("DELETE - FAIL - project '{}' pool '{}' - Error {}".format(project, pool,
error.message))
else:
return True, "DELETE - FAIL - project '{}' pool '{}' - Error {}"\
.format(project, pool, error.message)
except ConnectionError as error:
return True, "DELETE - FAIL - project '{}' pool '{}' - Error {}"\
.format(project, pool, error.message)
def list_projects(fileline):
"""List/Show projects specified in csv file (err, msg)"""
pool = project = None
if len(fileline) == 2:
pool, project = fileline
elif len(fileline) == 20:
pool, project, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _ = fileline
else:
return True, "LIST - FAIL - Error in line {} It needs to be 2 or 20 columns long"\
.format(fileline)
fullurl = ZFSURL + "/storage/v1/pools/{}/projects/{}".format(pool, project)
try:
req = requests.get(fullurl, auth=ZAUTH, verify=False, headers=HEADER)
j = json.loads(req.text)
req.close()
req.raise_for_status()
return False, "LIST - PRESENT - project '{}' pool '{}' mountpoint '{}' quota '{}' "\
"reservation '{}' compression '{}' dedup '{}' logbias '{}' nodestroy '{}' "\
"recordsize '{}' readonly '{}' atime '{}' def_sparse '{}' def_user '{}' "\
"def_group '{}' def_perms '{}' def_volblocksize '{}' def_volsize '{}' "\
"sharenfs '{}' sharesmb '{}'"\
.format(j["project"]["name"],
j["project"]["pool"],
j["project"]["mountpoint"],
response_size(j["project"]["quota"]),
response_size(j["project"]["reservation"]),
j["project"]["compression"],
j["project"]["dedup"],
j["project"]["logbias"],
j["project"]["nodestroy"],
response_size(j["project"]["recordsize"]),
j["project"]["readonly"],
j["project"]["atime"],
j["project"]["default_sparse"],
j["project"]["default_user"],
j["project"]["default_group"],
j["project"]["default_permissions"],
response_size(j["project"]["default_volblocksize"]),
response_size(j["project"]["default_volsize"]),
j["project"]["sharenfs"],
j["project"]["sharesmb"])
except HTTPError as error:
if error.response.status_code == 401:
exit("LIST - FAIL - project '{}', pool '{}' - Error {}".format(project, pool,
error.message))
else:
return True, "LIST - FAIL - project '{}' pool '{}' - Error {}"\
.format(project, pool, error.message)
except ConnectionError as error:
return True, "LIST - FAIL - project '{}' pool '{}' - Error {}"\
.format(project, pool, error.message)
def createprogress(count):
"""Return Bar class with max size specified"""
progressbar = Bar(message='Processing',
suffix='%(index)d/%(max)d - remain: %(remaining)d'
' - %(percent).1f%% - %(eta)ds',
max=count)
return progressbar
def createlogger():
"""Return logger"""
# create logger with 'progress bar'
logger = logging.getLogger('projects')
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler(LOGFILE)
# create formatter and add it to the handler
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
# add the handler to logger
logger.addHandler(fh)
return logger
def main(args):
"""Run all projects actions"""
csvfile = args.file
listprojects = args.list
createproject = args.create
deleteproject = args.delete
projectlistfromfile = read_project_file(csvfile)
configfile = args.server
config = read_yaml_file(configfile)
global ZFSURL, ZAUTH
ZFSURL = "https://{}:215/api".format(config['ip'])
ZAUTH = (config['username'], config['password'])
if createproject:
if args.progress:
progbar = createprogress(len(projectlistfromfile))
logger = createlogger()
for entry in projectlistfromfile:
err, msg = create_project(entry)
if err:
logger.warn(msg)
else:
logger.info(msg)
progbar.next()
progbar.finish()
else:
print("#" * 79)
print("Creating projects")
print("#" * 79)
for entry in projectlistfromfile:
print(create_project(entry)[1])
print("=" * 79)
elif deleteproject:
if args.progress:
progbar = createprogress(len(projectlistfromfile))
logger = createlogger()
for entry in projectlistfromfile:
err, msg = delete_project(entry)
if err:
logger.warn(msg)
else:
logger.info(msg)
progbar.next()
progbar.finish()
else:
print("#" * 79)
print("Deleting projects")
print("#" * 79)
for entry in projectlistfromfile:
print(delete_project(entry)[1])
print("=" * 79)
elif listprojects:
if args.progress:
progbar = createprogress(len(projectlistfromfile))
logger = createlogger()
for entry in projectlistfromfile:
err, msg = list_projects(entry)
if err:
logger.warn(msg)
else:
logger.info(msg)
progbar.next()
progbar.finish()
else:
print("#" * 79)
print("Listing projects")
print("#" * 79)
for entry in projectlistfromfile:
print(list_projects(entry)[1])
print("=" * 79)
else:
print("#" * 79)
print("You need to specify an option (--list, --create, --delete)")
print("#" * 79)
delta = datetime.now() - START
print("Finished in {} seconds".format(delta.seconds))
if __name__ == "__main__":
parser = create_parser()
args = parser.parse_args()
if args.progress:
try:
from progress.bar import Bar
except ImportError as err:
print("You need to install progress: pip install progress - Error: {}".format(err))
exit(1)
main(args)
|
[
"yaml.load",
"csv.reader",
"argparse.ArgumentParser",
"logging.FileHandler",
"json.loads",
"progress.bar.Bar",
"json.dumps",
"requests.urllib3.disable_warnings",
"logging.Formatter",
"requests.delete",
"requests.get",
"datetime.datetime.now",
"logging.getLogger"
] |
[((743, 800), 'requests.urllib3.disable_warnings', 'requests.urllib3.disable_warnings', (['InsecureRequestWarning'], {}), '(InsecureRequestWarning)\n', (776, 800), False, 'import requests\n'), ((810, 824), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (822, 824), False, 'from datetime import datetime\n'), ((1073, 1167), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Script to handle projects in ZFS Storage Appliance"""'}), "(description=\n 'Script to handle projects in ZFS Storage Appliance')\n", (1096, 1167), False, 'import argparse\n'), ((9998, 10124), 'progress.bar.Bar', 'Bar', ([], {'message': '"""Processing"""', 'suffix': '"""%(index)d/%(max)d - remain: %(remaining)d - %(percent).1f%% - %(eta)ds"""', 'max': 'count'}), "(message='Processing', suffix=\n '%(index)d/%(max)d - remain: %(remaining)d - %(percent).1f%% - %(eta)ds',\n max=count)\n", (10001, 10124), False, 'from progress.bar import Bar\n'), ((10307, 10336), 'logging.getLogger', 'logging.getLogger', (['"""projects"""'], {}), "('projects')\n", (10324, 10336), False, 'import logging\n'), ((10438, 10466), 'logging.FileHandler', 'logging.FileHandler', (['LOGFILE'], {}), '(LOGFILE)\n', (10457, 10466), False, 'import logging\n'), ((10532, 10605), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (10549, 10605), False, 'import logging\n'), ((2158, 2192), 'csv.reader', 'csv.reader', (['cvsfile'], {'delimiter': '""","""'}), "(cvsfile, delimiter=',')\n", (2168, 2192), False, 'import csv\n'), ((4827, 4847), 'json.loads', 'json.loads', (['req.text'], {}), '(req.text)\n', (4837, 4847), False, 'import json\n'), ((6169, 6235), 'requests.delete', 'requests.delete', (['fullurl'], {'auth': 'ZAUTH', 'verify': '(False)', 'headers': 'HEADER'}), '(fullurl, auth=ZAUTH, verify=False, headers=HEADER)\n', (6184, 6235), False, 'import requests\n'), ((7500, 7563), 'requests.get', 'requests.get', (['fullurl'], {'auth': 'ZAUTH', 'verify': '(False)', 'headers': 'HEADER'}), '(fullurl, auth=ZAUTH, verify=False, headers=HEADER)\n', (7512, 7563), False, 'import requests\n'), ((7576, 7596), 'json.loads', 'json.loads', (['req.text'], {}), '(req.text)\n', (7586, 7596), False, 'import json\n'), ((13255, 13269), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (13267, 13269), False, 'from datetime import datetime\n'), ((2498, 2522), 'yaml.load', 'yaml.load', (['configuration'], {}), '(configuration)\n', (2507, 2522), False, 'import yaml\n'), ((4727, 4743), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (4737, 4743), False, 'import json\n')]
|
from pelican import signals
from . import count
def add_filter(pelican):
"""Add count_elements filter to Pelican."""
pelican.env.filters.update(
{'sort_by_article_count': count.sort_by_article_count})
def register():
"""Plugin registration."""
signals.generator_init.connect(add_filter)
|
[
"pelican.signals.generator_init.connect"
] |
[((272, 314), 'pelican.signals.generator_init.connect', 'signals.generator_init.connect', (['add_filter'], {}), '(add_filter)\n', (302, 314), False, 'from pelican import signals\n')]
|
#!/usr/bin/env python
'''
Takes in the usgs neic event object, then determines if it
is relevant above the input filter criteria. If it passes
this filter, an aoi type for the event is created and submitted
to create_aoi
'''
from __future__ import division
from builtins import range
from past.utils import old_div
import os
import re
import json
import argparse
import datetime
import dateutil.parser
import requests
import submit_create_aoi
import submit_slack_notification
import track_displacement_evaluator
import pytz
import math
from shapely import wkt
import geojson
from shapely.geometry import shape, Point, Polygon
from shapely.ops import nearest_points
import constants
def main(event_path, depth_filter=None, mag_filter=None, alertlevel_filter=None, polygon_filter=None, slack_notification=None, water_filter=False, dynamic_threshold=False, create_aoi_version='master', days_pre_event=30, days_post_event=30, distance_from_land=50):
'''runs the filters on the input event. If it passes it generates appropriate metadata and submits the aoi'''
event = get_event(event_path)
print('found event: {0}'.format(event))
# calculate relevant event information such as mag, extent, etc
event_info = calculate_event_info(event)
# determine if the event passes the requisite filters
#if not pass_filters(event_info, depth_filter, mag_filter, alertlevel_filter, polygon_filter, water_filter, dynamic_threshold, distance_from_land):
if not pass_filters(event_info, depth_filter, mag_filter, alertlevel_filter, polygon_filter, water_filter, dynamic_threshold):
print("Event failed to pass filters....not generating AOI.")
return
# call displacement code
event_tracks = track_displacement_evaluator.main(event['location']['coordinates'], event_info['location']['coordinates'])
# submit job for event AOI
#params = build_params(event, event_info, days_pre_event, days_post_event, aoi, False)
# submit the aoi
#submit_create_aoi.main(params, create_aoi_version, 'factotum-job_worker-small', '8', 'create_neic_event_aoi')
for event_track in event_tracks:
# set the end time for the AOITRACKs 5 years into the future so they remain active for long-term analysis
days_post_event = 365 * 5
# process the aoi params
params = build_params(event, event_info, days_pre_event, days_post_event, event_track, True)
print(params)
# submit the aoi
submit_create_aoi.main(params, create_aoi_version, 'factotum-job_worker-small', '8', 'create_neic_event_aoi')
# run slack notification
# mlucas if slack_notification:
# mlucas run_slack_notification(event, slack_notification)
#def pass_filters(event_info, depth_filter, mag_filter, alertlevel_filter, polygon_filter, water_filter, dynamic_threshold, distance_from_land):
def pass_filters(event_info, depth_filter, mag_filter, alertlevel_filter, polygon_filter, water_filter, dynamic_threshold):
'''runs all requisite filters, returning true if it needs to process, false if not'''
# if it's a test, just pass it
if event_info['id'] == 'USGS_NEIC_us1000test':
return True
# run polygon filter
if polygon_filter:
if not run_polygon_filter(event_info, polygon_filter):
print("Event failed polygon filter.")
return False
# run distance filter
# if distance_from_land:
# if not run_distance_filter(event_info, distance_from_land):
# print("Event failed distance filter.")
# return False
# run depth filter
if depth_filter:
if not run_depth_filter(event_info, float(depth_filter)):
print('Event failed depth filter.')
return False
# run water filter
if water_filter:
if not run_water_filter(event_info, float(water_filter)):
print('Event failed water mask filter.')
return False
print('Event passed water mask filter.')
# run dynamic thresholding
# if dynamic_threshold:
# if run_dynamic_threshold(event_info):
# print('event meets dynamic threshold, submitting event.')
# return True
# else:
# print('event does not meet dynamic threshold. not submitting event.')
# return False
if mag_filter: # run magnitude filter
if event_info['mag'] >= mag_filter:
print('Event passed magnitude filter, processing')
return True
else:
print('Event failed magnitude filter, not processing')
return False
if alertlevel_filter: # run alertlevel filter
if alertlevel_reaches(event_info['alert'], alertlevel_filter):
print('Event passes alertlevel filter, processing')
return True
else:
print('Event fails alertlevel filter, not processing.')
return False
print('Event has not been excluded by filters, processing.')
return True
def calculate_event_info(event):
'''builds a dict of relevant event information, such as magnitude, region, etc, returns it as a dict'''
event_id = get_met(event, 'id')
event_mag = float(get_met(event, 'mag'))
event_alertlevel = get_met(event, 'alert')
event_location = get_met(event, 'epicenter')
event_lat = event_location['coordinates'][1]
event_lon = event_location['coordinates'][0]
event_depth = float(event['metadata']['geometry']['coordinates'][2])
# determine event extent
event_geojson = determine_extent(event_lat, event_lon, event_mag)
# call displacement_evaluator here
return {'id': event_id, 'mag': event_mag, 'depth': event_depth, 'alertlevel': event_alertlevel, 'location': event_geojson, 'lat': event_lat, 'lon': event_lon}
def run_water_filter(event_info, amount):
'''returns True if it passes the mask or fails to load/run the mask'''
try:
# lazy loading
import lightweight_water_mask
print("Geojson being processed: {}".format(event_info['location']))
land_area = lightweight_water_mask.get_land_area(event_info['location'])
print("Land area is: {}".format(land_area))
if land_area > amount:
print("Land area of event is {}".format(land_area))
print("Threshold: {}".format(amount))
return True
else:
print("Land area less than {}".format(amount))
except Exception as err:
print('Failed on water masking: {}'.format(err))
return True
return False
def run_depth_filter(event_info, depth_filter):
'''returns True if it passes the mask, False otherwise. True == depth < depth_filter'''
depth = float(event_info['depth'])
if depth >= depth_filter:
return False
return True
def run_distance_filter(event_info, distance_from_land):
''' Returns True if the event epicenter is within the specified distance from land; otherwise, False'''
print("Running distance filter...")
nearest_distance = None
# Read config file that defines region geojson and region-specific params
try:
f = open('/home/ops/verdi/ops/usgs_neic_evaluator/config/regions.json')
except Exception as e:
print(e)
data = json.load(f)
for region in data:
# If a distance_from_land parameter is specified in the region, pull it; if not, use default
print("Evaluating region: ".format(region['region_name']))
region_distance_from_land = region.get('distance_from_land')
if isValid(region_distance_from_land):
print("Distance from land parameter specified within region config; overwriting default value to {}".format(region_distance_from_land))
tmp_distance_from_land = int(region_distance_from_land)
else:
print("Distance from land parameter NOT specified within region config; using default value of {}".format(distance_from_land))
tmp_distance_from_land = distance_from_land
try:
# Create shape objects from region geojson defined in config
s = json.dumps(region['region_geojson'])
p = shape(geojson.loads(s))
polygon = wkt.loads(str(p))
# Create point object from event epicenter
#lng = event_info["event_location"]["coordinates"][1]
#lat = event_info["event_location"]["coordinates"][0]
lng = event_info["lon"]
lat = event_info["lat"]
point = Point(lng, lat)
# If event overlaps with region, no need to calculate distance; event will be processed
if point.within(polygon):
print("Event epicenter is within a defined region. Processing event.")
return True
np1, np2 = nearest_points(polygon, point)
nearest_distance = haversine(np1.y, np1.x, point.y, point.x)
except Exception as e:
print(e)
if nearest_distance <= tmp_distance_from_land:
print("Event will be processed. The distance between the two closest is: {}".format(nearest_distance))
return True
else:
print("Event distance from this region is too great. The distance between the two closest is: {}".format(nearest_distance))
f.close()
return False
def isValid(region_distance_from_land):
''' Make sure the distance from land value is valid '''
try:
if (region_distance_from_land is not None and
region_distance_from_land != "" and
(int(region_distance_from_land, 10) >= 0)):
return True
except:
return False
def haversine(lat1, lon1, lat2, lon2):
'''
Calculate the distance between two points
'''
lat1 = math.radians(lat1)
lon1 = math.radians(lon1)
lat2 = math.radians(lat2)
lon2 = math.radians(lon2)
dlon = lon2 - lon1
dlat = lat2 - lat1
a = math.sin(dlat / 2) ** 2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2) ** 2
c = 2 * math.asin(math.sqrt(a))
return constants.EARTH_RADIUS * c
def get_coord(lat, lng):
lat_r = math.radians(lat)
lng_r = math.radians(lng)
x = constants.EARTH_RADIUS * math.cos(lat_r) * math.cos(lng_r)
y = constants.EARTH_RADIUS * math.cos(lat_r) * math.sin(lng_r)
return x, y
def run_polygon_filter(event_info, polygon_filter):
'''runs the event through a spatial filter. returns True if it passes, False otherwise.'''
if type(polygon_filter) is str:
polygon_filter = json.loads(polygon_filter)
event_geojson = event_info['location']['coordinates']
if is_overlap(polygon_filter, event_geojson):
return True
return False
def is_overlap(geojson1, geojson2):
'''returns True if there is any overlap between the two geojsons. The geojsons
are just a list of coordinate tuples'''
p1 = Polygon(geojson1)
p2 = Polygon(geojson2)
return p1.intersects(p2)
def run_dynamic_threshold(event_info):
'''runs a series of filters, designed to pick up relevant events'''
event_mag = event_info['mag']
event_alertlevel = event_info['alertlevel']
if event_mag >= 7.0:
return True
if event_mag >= 6.0 and alertlevel_reaches(event_alertlevel, 'yellow'):
return True
if alertlevel_reaches(event_alertlevel, 'red'):
return True
return False
def alertlevel_reaches(event_level, comparison_level):
'''looks to see if the event alert level is at or above the comparison level, returns true if is is
false otherwise'''
if event_level is None:
return False
alert_dict = {'green': 1, 'yellow': 2, 'orange': 3, 'red': 4}
if alert_dict[event_level] < alert_dict[comparison_level]:
return False
return True
def get_event(event_path):
'''loads the event json as a dict'''
event_filename = os.path.basename(event_path)
event_ds_path = os.path.join(event_path, event_filename + '.dataset.json')
event_met_path = os.path.join(event_path, event_filename + '.met.json')
cwd = os.getcwd()
event_object = {}
with open(event_ds_path) as f:
event_object = json.load(f)
with open(event_met_path) as f:
event_object['metadata'] = json.load(f)
return event_object
def get_met(product, key):
if key in list(product.keys()):
return product[key]
if '_source' in list(product.keys()) and key in list(product['_source'].keys()):
return product['_source'][key]
if '_source' in list(product.keys()) and 'metadata' in list(product['_source'].keys()) and key in list(product['_source']['metadata'].keys()):
return product['_source']['metadata'][key]
if 'metadata' in list(product.keys()) and key in list(product['metadata'].keys()):
return product['metadata'][key]
if 'metadata' in list(product.keys()) and 'properties' in list(product['metadata'].keys()) and key in list(product['metadata']['properties'].keys()):
return product['metadata']['properties'][key]
if 'properties' in list(product.keys()) and key in product['properties']:
return product['properties'][key]
return False
def shift(lat, lon, bearing, distance):
R = 6378.1 # Radius of the Earth
bearing = old_div(math.pi * bearing, 180) # convert degrees to radians
lat1 = math.radians(lat) # Current lat point converted to radians
lon1 = math.radians(lon) # Current long point converted to radians
lat2 = math.asin(math.sin(lat1) * math.cos(old_div(distance, R)) +
math.cos(lat1) * math.sin(old_div(distance, R)) * math.cos(bearing))
lon2 = lon1 + math.atan2(math.sin(bearing) * math.sin(old_div(distance, R)) * math.cos(lat1),
math.cos(old_div(distance, R)) - math.sin(lat1) * math.sin(lat2))
lat2 = math.degrees(lat2)
lon2 = math.degrees(lon2)
return [lon2, lat2]
def determine_extent(lat, lon, mag):
lat = float(lat)
lon = float(lon)
mag = float(mag)
distance = (mag - 5.0) / 2.0 * 150
l = list(range(0, 361, 20))
coordinates = []
for b in l:
coords = shift(lat, lon, b, distance)
coordinates.append(coords)
return {"coordinates": [coordinates], "type": "Polygon"}
def build_params(event, event_info, days_pre_event, days_post_event, event_track, isTrack):
'''builds parameters for a job submission from the event, which creates the aoi,
and returns those parameters'''
# loads the config json
current_dir = os.path.dirname(os.path.realpath(__file__))
params_path = os.path.join(current_dir, 'config', 'aoi_params.json')
params = json.load(open(params_path, 'r'))
aoi_name = build_aoi_name(event, event_info, isTrack)
# geojson_polygon = event_info['location']
aoi_event_time = get_met(event, 'starttime')
starttime = determine_time(aoi_event_time, -1 * float(days_pre_event))
eventtime = get_met(event, 'starttime')
endtime = determine_time(aoi_event_time, float(days_post_event))
aoi_image_url = parse_browse_url(event)
event_metadata = build_event_metadata(event, event_info) # builds additional metadata to be displayed
params['starttime'] = starttime
params['eventtime'] = eventtime
params['endtime'] = endtime
params['additional_metadata']['image_url'] = aoi_image_url
params['additional_metadata']['event_metadata'] = event_metadata
if isTrack:
params['name'] = aoi_name + "_" + str(event_track[0])
params['geojson_polygon'] = json.loads(event_track[1])
params['additional_metadata']['event_metadata']['track_number'] = event_track[0]
params['additional_metadata']['event_metadata']['orbit_direction'] = event_track[2]
params['track_number'] = event_track[0]
params['orbit_direction'] = event_track[2]
#params['water_masked_geojson_polygon'] = event_track[3]
params['additional_metadata']['event_metadata']['water_masked_geojson_polygon'] = json.loads(event_track[3])
else:
params['name'] = aoi_name
params['geojson_polygon'] = event_track
params['track_number'] = ""
params['orbit_direction'] = ""
# load account and username from context
context = load_json('_context.json')
params['account'] = context['account']
params['username'] = context['username']
return params
def load_json(file_path):
'''load the file path into a dict and return the dict'''
with open(file_path, 'r') as json_data:
json_dict = json.load(json_data)
json_data.close()
return json_dict
def build_event_metadata(event, event_info):
'''builds info that goes into the aoi met, event_metadata field, that is displayed'''
event_met = {}
event_met['event id'] = event_info['id']
event_met['magnitude'] = event_info['mag']
event_met['depth'] = event_info['depth']
event_met['location'] = get_met(event, 'place')
event_met['latitude'] = event_info['lat']
event_met['longitude'] = event_info['lon']
event_met['label'] = get_met(event, 'title')
try:
event_met['time'] = convert_epoch_time_to_utc(get_met(event, 'time'))
except:
pass
event_met['pager_status'] = event_info['alertlevel']
event_met['tsunami warning'] = get_met(event, 'tsunami')
event_met['usgs information'] = 'https://earthquake.usgs.gov/earthquakes/eventpage/{0}'.format(event_info['id'])
return event_met
def build_aoi_name(event, event_info, isTrack):
'''attempts to build a readable event name'''
if isTrack:
try:
id_str = get_met(event, 'id')
place = get_met(event, 'place')
regex = re.compile(' of (.*)[,]? (.*)')
match = re.search(regex, place)
location_str = '{0}_{1}'.format(match.group(1), match.group(2))
location_str = location_str.replace(',', '')
mag = get_met(event, 'mag')
mag_str = "{0:0.1f}".format(float(mag))
return 'AOITRACK_eq_usgs_neic_pdl_{0}_{1}_{2}'.format(id_str, mag_str, location_str)
except:
return 'AOITRACK_eq_usgs_neic_pdl_{0}'.format(event_info['id'])
else:
try:
id_str = get_met(event, 'id')
place = get_met(event, 'place')
regex = re.compile(' of (.*)[,]? (.*)')
match = re.search(regex, place)
location_str = '{0}_{1}'.format(match.group(1), match.group(2))
location_str = location_str.replace(',', '')
mag = get_met(event, 'mag')
mag_str = "{0:0.1f}".format(float(mag))
return 'AOI_monitoring_{0}_{1}_{2}'.format(id_str, mag_str, location_str)
except:
return 'AOI_monitoring_{0}'.format(event_info['id'])
def convert_epoch_time_to_utc(epoch_timestring):
dt = datetime.datetime.utcfromtimestamp(epoch_timestring).replace(tzinfo=pytz.UTC)
return dt.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] # use microseconds and convert to milli
def determine_time(time_string, offset):
initial_time = dateutil.parser.parse(time_string).replace(tzinfo=pytz.UTC)
final_time = initial_time + datetime.timedelta(days=offset)
return final_time.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3]
def parse_browse_url(event):
'''Pull the event detail json from the feed and attempt to extract the shakemap. Return None if fails.'''
try:
url = event['properties']['detail']
session = requests.session()
response = session.get(url)
json_data = json.loads(response.text)
browse_url = json_data['properties']['products']['shakemap'][0]['contents']['download/tvmap.jpg']['url']
return browse_url
except:
print('Failed to parse browse url')
return None
def build_longlabel(event):
estr = get_met(event, 'place') # ex: "69km WSW of Kirakira, Solomon Islands"
regex = re.compile(' of (.*)[,]? (.*)')
match = re.search(regex, estr)
if match:
product_name = '%s %s' % (match.group(1), match.group(2))
else:
product_name = estr
product_name = product_name.replace(' ', '_')
return product_name.replace(',', '')
def run_slack_notification(event, slack_notification):
'''submit slack webhook, requires slack notification key'''
event_vals = get_met(event, 'metadata')
submit_slack_notification.slack_notify(event_vals, slack_notification)
def parser():
'''
Construct a parser to parse arguments
@return argparse parser
'''
parse = argparse.ArgumentParser(description="Run PAGER query with given parameters")
parse.add_argument("-e", "--event_path", required=True, help="path to the event file", dest="event_path")
parse.add_argument("-t", "--depth_filter", required=False, default=None, help="Maximum depth filter in km", dest="depth_filter")
parse.add_argument("-m", "--mag_filter", required=False, default=None, help="Minimum magnitude filter", dest="mag_filter")
parse.add_argument("-a", "--alertlevel_filter", required=False, default=None, help="Minium pager alert level filter", choices=['green', 'yellow', 'orange', 'red'], dest="alertlevel_filter")
parse.add_argument("-p", "--polygon_filter", required=False, default=None, help="Geojson polygon filter", dest="polygon_filter")
parse.add_argument("-s", "--slack_notification", required=False, default=False, help="Key for slack notification, will notify via slack if provided.", dest="slack_notification")
parse.add_argument("-w", "--water_filter", required=False, default=False, help="Water filter. If provided, use minimum number of square kilometers in the aoi required to pass the filter.", dest="water_filter")
parse.add_argument("-d", "--dynamic_threshold", required=False, default=False, action='store_true', help="Flag for whether a dynamic threshold is used. Takes priority over pager & mag filters.", dest="dynamic_threshold")
parse.add_argument("-r", "--create_aoi_version", required=False, default='master', help="Version of create_aoi to submit", dest="create_aoi_version")
parse.add_argument("--days_pre_event", required=False, default=30, help="Days for the AOI to span pre-event", dest="days_pre_event")
parse.add_argument("--days_post_event", required=False, default=30, help="Days for the AOI to span post-event", dest="days_post_event")
parse.add_argument("--distance_from_land", required=False, default=50, help="Distance from land (km)", dest="distance_from_land")
return parse
if __name__ == '__main__':
args = parser().parse_args()
main(event_path=args.event_path, depth_filter=args.depth_filter, mag_filter=args.mag_filter, alertlevel_filter=args.alertlevel_filter, polygon_filter=args.polygon_filter, slack_notification=args.slack_notification, water_filter=args.water_filter, dynamic_threshold=args.dynamic_threshold,
create_aoi_version=args.create_aoi_version, days_pre_event=args.days_pre_event, days_post_event=args.days_post_event, distance_from_land=args.distance_from_land)
|
[
"argparse.ArgumentParser",
"past.utils.old_div",
"json.dumps",
"submit_slack_notification.slack_notify",
"builtins.range",
"os.path.join",
"lightweight_water_mask.get_land_area",
"shapely.geometry.Point",
"track_displacement_evaluator.main",
"shapely.geometry.Polygon",
"json.loads",
"math.radians",
"datetime.datetime.utcfromtimestamp",
"datetime.timedelta",
"submit_create_aoi.main",
"math.cos",
"re.search",
"requests.session",
"geojson.loads",
"math.sqrt",
"os.path.basename",
"os.path.realpath",
"math.sin",
"math.degrees",
"re.compile",
"json.load",
"shapely.ops.nearest_points",
"os.getcwd"
] |
[((1730, 1840), 'track_displacement_evaluator.main', 'track_displacement_evaluator.main', (["event['location']['coordinates']", "event_info['location']['coordinates']"], {}), "(event['location']['coordinates'],\n event_info['location']['coordinates'])\n", (1763, 1840), False, 'import track_displacement_evaluator\n'), ((7258, 7270), 'json.load', 'json.load', (['f'], {}), '(f)\n', (7267, 7270), False, 'import json\n'), ((9780, 9798), 'math.radians', 'math.radians', (['lat1'], {}), '(lat1)\n', (9792, 9798), False, 'import math\n'), ((9810, 9828), 'math.radians', 'math.radians', (['lon1'], {}), '(lon1)\n', (9822, 9828), False, 'import math\n'), ((9840, 9858), 'math.radians', 'math.radians', (['lat2'], {}), '(lat2)\n', (9852, 9858), False, 'import math\n'), ((9870, 9888), 'math.radians', 'math.radians', (['lon2'], {}), '(lon2)\n', (9882, 9888), False, 'import math\n'), ((10141, 10158), 'math.radians', 'math.radians', (['lat'], {}), '(lat)\n', (10153, 10158), False, 'import math\n'), ((10171, 10188), 'math.radians', 'math.radians', (['lng'], {}), '(lng)\n', (10183, 10188), False, 'import math\n'), ((10895, 10912), 'shapely.geometry.Polygon', 'Polygon', (['geojson1'], {}), '(geojson1)\n', (10902, 10912), False, 'from shapely.geometry import shape, Point, Polygon\n'), ((10922, 10939), 'shapely.geometry.Polygon', 'Polygon', (['geojson2'], {}), '(geojson2)\n', (10929, 10939), False, 'from shapely.geometry import shape, Point, Polygon\n'), ((11884, 11912), 'os.path.basename', 'os.path.basename', (['event_path'], {}), '(event_path)\n', (11900, 11912), False, 'import os\n'), ((11933, 11991), 'os.path.join', 'os.path.join', (['event_path', "(event_filename + '.dataset.json')"], {}), "(event_path, event_filename + '.dataset.json')\n", (11945, 11991), False, 'import os\n'), ((12013, 12067), 'os.path.join', 'os.path.join', (['event_path', "(event_filename + '.met.json')"], {}), "(event_path, event_filename + '.met.json')\n", (12025, 12067), False, 'import os\n'), ((12078, 12089), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (12087, 12089), False, 'import os\n'), ((13272, 13303), 'past.utils.old_div', 'old_div', (['(math.pi * bearing)', '(180)'], {}), '(math.pi * bearing, 180)\n', (13279, 13303), False, 'from past.utils import old_div\n'), ((13345, 13362), 'math.radians', 'math.radians', (['lat'], {}), '(lat)\n', (13357, 13362), False, 'import math\n'), ((13416, 13433), 'math.radians', 'math.radians', (['lon'], {}), '(lon)\n', (13428, 13433), False, 'import math\n'), ((13842, 13860), 'math.degrees', 'math.degrees', (['lat2'], {}), '(lat2)\n', (13854, 13860), False, 'import math\n'), ((13872, 13890), 'math.degrees', 'math.degrees', (['lon2'], {}), '(lon2)\n', (13884, 13890), False, 'import math\n'), ((14590, 14644), 'os.path.join', 'os.path.join', (['current_dir', '"""config"""', '"""aoi_params.json"""'], {}), "(current_dir, 'config', 'aoi_params.json')\n", (14602, 14644), False, 'import os\n'), ((19914, 19945), 're.compile', 're.compile', (['""" of (.*)[,]? (.*)"""'], {}), "(' of (.*)[,]? (.*)')\n", (19924, 19945), False, 'import re\n'), ((19958, 19980), 're.search', 're.search', (['regex', 'estr'], {}), '(regex, estr)\n', (19967, 19980), False, 'import re\n'), ((20359, 20429), 'submit_slack_notification.slack_notify', 'submit_slack_notification.slack_notify', (['event_vals', 'slack_notification'], {}), '(event_vals, slack_notification)\n', (20397, 20429), False, 'import submit_slack_notification\n'), ((20544, 20620), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run PAGER query with given parameters"""'}), "(description='Run PAGER query with given parameters')\n", (20567, 20620), False, 'import argparse\n'), ((2471, 2584), 'submit_create_aoi.main', 'submit_create_aoi.main', (['params', 'create_aoi_version', '"""factotum-job_worker-small"""', '"""8"""', '"""create_neic_event_aoi"""'], {}), "(params, create_aoi_version,\n 'factotum-job_worker-small', '8', 'create_neic_event_aoi')\n", (2493, 2584), False, 'import submit_create_aoi\n'), ((6072, 6132), 'lightweight_water_mask.get_land_area', 'lightweight_water_mask.get_land_area', (["event_info['location']"], {}), "(event_info['location'])\n", (6108, 6132), False, 'import lightweight_water_mask\n'), ((10240, 10255), 'math.cos', 'math.cos', (['lng_r'], {}), '(lng_r)\n', (10248, 10255), False, 'import math\n'), ((10307, 10322), 'math.sin', 'math.sin', (['lng_r'], {}), '(lng_r)\n', (10315, 10322), False, 'import math\n'), ((10549, 10575), 'json.loads', 'json.loads', (['polygon_filter'], {}), '(polygon_filter)\n', (10559, 10575), False, 'import json\n'), ((12170, 12182), 'json.load', 'json.load', (['f'], {}), '(f)\n', (12179, 12182), False, 'import json\n'), ((12254, 12266), 'json.load', 'json.load', (['f'], {}), '(f)\n', (12263, 12266), False, 'import json\n'), ((14069, 14086), 'builtins.range', 'range', (['(0)', '(361)', '(20)'], {}), '(0, 361, 20)\n', (14074, 14086), False, 'from builtins import range\n'), ((14544, 14570), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (14560, 14570), False, 'import os\n'), ((15535, 15561), 'json.loads', 'json.loads', (['event_track[1]'], {}), '(event_track[1])\n', (15545, 15561), False, 'import json\n'), ((15997, 16023), 'json.loads', 'json.loads', (['event_track[3]'], {}), '(event_track[3])\n', (16007, 16023), False, 'import json\n'), ((16540, 16560), 'json.load', 'json.load', (['json_data'], {}), '(json_data)\n', (16549, 16560), False, 'import json\n'), ((19170, 19201), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'offset'}), '(days=offset)\n', (19188, 19201), False, 'import datetime\n'), ((19474, 19492), 'requests.session', 'requests.session', ([], {}), '()\n', (19490, 19492), False, 'import requests\n'), ((19549, 19574), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (19559, 19574), False, 'import json\n'), ((8107, 8143), 'json.dumps', 'json.dumps', (["region['region_geojson']"], {}), "(region['region_geojson'])\n", (8117, 8143), False, 'import json\n'), ((8504, 8519), 'shapely.geometry.Point', 'Point', (['lng', 'lat'], {}), '(lng, lat)\n', (8509, 8519), False, 'from shapely.geometry import shape, Point, Polygon\n'), ((8798, 8828), 'shapely.ops.nearest_points', 'nearest_points', (['polygon', 'point'], {}), '(polygon, point)\n', (8812, 8828), False, 'from shapely.ops import nearest_points\n'), ((9944, 9962), 'math.sin', 'math.sin', (['(dlat / 2)'], {}), '(dlat / 2)\n', (9952, 9962), False, 'import math\n'), ((10050, 10062), 'math.sqrt', 'math.sqrt', (['a'], {}), '(a)\n', (10059, 10062), False, 'import math\n'), ((10222, 10237), 'math.cos', 'math.cos', (['lat_r'], {}), '(lat_r)\n', (10230, 10237), False, 'import math\n'), ((10289, 10304), 'math.cos', 'math.cos', (['lat_r'], {}), '(lat_r)\n', (10297, 10304), False, 'import math\n'), ((17698, 17729), 're.compile', 're.compile', (['""" of (.*)[,]? (.*)"""'], {}), "(' of (.*)[,]? (.*)')\n", (17708, 17729), False, 'import re\n'), ((17750, 17773), 're.search', 're.search', (['regex', 'place'], {}), '(regex, place)\n', (17759, 17773), False, 'import re\n'), ((18317, 18348), 're.compile', 're.compile', (['""" of (.*)[,]? (.*)"""'], {}), "(' of (.*)[,]? (.*)')\n", (18327, 18348), False, 'import re\n'), ((18369, 18392), 're.search', 're.search', (['regex', 'place'], {}), '(regex, place)\n', (18378, 18392), False, 'import re\n'), ((18845, 18897), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', (['epoch_timestring'], {}), '(epoch_timestring)\n', (18879, 18897), False, 'import datetime\n'), ((8166, 8182), 'geojson.loads', 'geojson.loads', (['s'], {}), '(s)\n', (8179, 8182), False, 'import geojson\n'), ((9970, 9984), 'math.cos', 'math.cos', (['lat1'], {}), '(lat1)\n', (9978, 9984), False, 'import math\n'), ((9987, 10001), 'math.cos', 'math.cos', (['lat2'], {}), '(lat2)\n', (9995, 10001), False, 'import math\n'), ((10004, 10022), 'math.sin', 'math.sin', (['(dlon / 2)'], {}), '(dlon / 2)\n', (10012, 10022), False, 'import math\n'), ((13498, 13512), 'math.sin', 'math.sin', (['lat1'], {}), '(lat1)\n', (13506, 13512), False, 'import math\n'), ((13619, 13636), 'math.cos', 'math.cos', (['bearing'], {}), '(bearing)\n', (13627, 13636), False, 'import math\n'), ((13720, 13734), 'math.cos', 'math.cos', (['lat1'], {}), '(lat1)\n', (13728, 13734), False, 'import math\n'), ((13524, 13544), 'past.utils.old_div', 'old_div', (['distance', 'R'], {}), '(distance, R)\n', (13531, 13544), False, 'from past.utils import old_div\n'), ((13569, 13583), 'math.cos', 'math.cos', (['lat1'], {}), '(lat1)\n', (13577, 13583), False, 'import math\n'), ((13667, 13684), 'math.sin', 'math.sin', (['bearing'], {}), '(bearing)\n', (13675, 13684), False, 'import math\n'), ((13774, 13794), 'past.utils.old_div', 'old_div', (['distance', 'R'], {}), '(distance, R)\n', (13781, 13794), False, 'from past.utils import old_div\n'), ((13798, 13812), 'math.sin', 'math.sin', (['lat1'], {}), '(lat1)\n', (13806, 13812), False, 'import math\n'), ((13815, 13829), 'math.sin', 'math.sin', (['lat2'], {}), '(lat2)\n', (13823, 13829), False, 'import math\n'), ((13595, 13615), 'past.utils.old_div', 'old_div', (['distance', 'R'], {}), '(distance, R)\n', (13602, 13615), False, 'from past.utils import old_div\n'), ((13696, 13716), 'past.utils.old_div', 'old_div', (['distance', 'R'], {}), '(distance, R)\n', (13703, 13716), False, 'from past.utils import old_div\n')]
|
from contextlib import suppress
with suppress(ImportError):
from .mxnet_object_detector import MxnetObjectDetector
|
[
"contextlib.suppress"
] |
[((38, 59), 'contextlib.suppress', 'suppress', (['ImportError'], {}), '(ImportError)\n', (46, 59), False, 'from contextlib import suppress\n')]
|
# -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import GenericRepr, Snapshot
snapshots = Snapshot()
snapshots['TestCreate.test[True-uvloop-None-True] history'] = {
'_id': '9pfsom1b.0',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Created Tobacco mosaic virus',
'diff': {
'_id': '9pfsom1b',
'abbreviation': '',
'isolates': [
],
'last_indexed_version': None,
'lower_name': 'tobacco mosaic virus',
'name': 'Tobacco mosaic virus',
'reference': {
'id': 'foo'
},
'schema': [
],
'verified': False,
'version': 0
},
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'create',
'otu': {
'id': '9pfsom1b',
'name': 'Tobacco mosaic virus',
'version': 0
},
'reference': {
'id': 'foo'
},
'user': {
'id': 'test'
}
}
snapshots['TestCreate.test[True-uvloop-None-True] otu'] = {
'_id': '9pfsom1b',
'abbreviation': '',
'isolates': [
],
'last_indexed_version': None,
'lower_name': 'tobacco mosaic virus',
'name': 'Tobacco mosaic virus',
'reference': {
'id': 'foo'
},
'schema': [
],
'verified': False,
'version': 0
}
snapshots['TestCreate.test[True-uvloop-TMV-True] history'] = {
'_id': '9pfsom1b.0',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Created Tobacco mosaic virus (TMV)',
'diff': {
'_id': '9pfsom1b',
'abbreviation': 'TMV',
'isolates': [
],
'last_indexed_version': None,
'lower_name': 'tobacco mosaic virus',
'name': 'Tobacco mosaic virus',
'reference': {
'id': 'foo'
},
'schema': [
],
'verified': False,
'version': 0
},
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'create',
'otu': {
'id': '9pfsom1b',
'name': 'Tobacco mosaic virus',
'version': 0
},
'reference': {
'id': 'foo'
},
'user': {
'id': 'test'
}
}
snapshots['TestCreate.test[True-uvloop-TMV-True] otu'] = {
'_id': '9pfsom1b',
'abbreviation': 'TMV',
'isolates': [
],
'last_indexed_version': None,
'lower_name': 'tobacco mosaic virus',
'name': 'Tobacco mosaic virus',
'reference': {
'id': 'foo'
},
'schema': [
],
'verified': False,
'version': 0
}
snapshots['TestCreate.test[True-uvloop-True] history'] = {
'_id': '9pfsom1b.0',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Created Tobacco mosaic virus',
'diff': {
'_id': '9pfsom1b',
'abbreviation': '',
'isolates': [
],
'last_indexed_version': None,
'lower_name': 'tobacco mosaic virus',
'name': 'Tobacco mosaic virus',
'reference': {
'id': 'foo'
},
'schema': [
],
'verified': False,
'version': 0
},
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'create',
'otu': {
'id': '9pfsom1b',
'name': 'Tobacco mosaic virus',
'version': 0
},
'reference': {
'id': 'foo'
},
'user': {
'id': 'test'
}
}
snapshots['TestCreate.test[True-uvloop-True] otu'] = {
'_id': '9pfsom1b',
'abbreviation': '',
'isolates': [
],
'last_indexed_version': None,
'lower_name': 'tobacco mosaic virus',
'name': 'Tobacco mosaic virus',
'reference': {
'id': 'foo'
},
'schema': [
],
'verified': False,
'version': 0
}
snapshots['TestSetAsDefault.test[True-uvloop] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Set Isolate b as default',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'change',
[
'isolates',
0,
'default'
],
[
True,
False
]
],
[
'change',
[
'isolates',
1,
'default'
],
[
False,
True
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'set_as_default',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['TestSetAsDefault.test[True-uvloop] joined'] = {
'_id': '6116cba1',
'abbreviation': 'PVF',
'imported': True,
'isolates': [
{
'default': False,
'id': 'cab8b360',
'sequences': [
],
'source_name': '8816-v2',
'source_type': 'isolate'
},
{
'default': True,
'id': 'test',
'sequences': [
],
'source_name': 'b',
'source_type': 'isolate'
}
],
'last_indexed_version': 0,
'lower_name': 'prunus virus f',
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestSetAsDefault.test_no_change[True-uvloop] joined'] = {
'_id': '6116cba1',
'abbreviation': 'PVF',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'sequences': [
],
'source_name': '8816-v2',
'source_type': 'isolate'
},
{
'default': False,
'id': 'test',
'sequences': [
],
'source_name': 'b',
'source_type': 'isolate'
}
],
'last_indexed_version': 0,
'lower_name': 'prunus virus f',
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 0
}
snapshots['TestSetAsDefault.test_no_change[True-uvloop] response'] = {
'default': True,
'id': 'cab8b360',
'sequences': [
],
'source_name': '8816-v2',
'source_type': 'isolate'
}
snapshots['test_get[uvloop-None] 1'] = {
'abbreviation': 'PVF',
'id': '6116cba1',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'sequences': [
{
'definition': 'Prunus virus F isolate 8816-s2 segment RNA2 polyprotein 2 gene, complete cds.',
'host': 'sweet cherry',
'id': 'KX269872',
'segment': None,
'sequence': 'TGTTTAAGAGATTAAACAACCGCTTTC'
}
],
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'issues': None,
'last_indexed_version': 0,
'most_recent_change': None,
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 0
}
snapshots['TestEdit.test[True-uvloop-data0-TMV-Changed name to Tobacco mosaic otu] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Changed name to Tobacco mosaic otu',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'change',
'lower_name',
[
'prunus virus f',
'tobacco mosaic otu'
]
],
[
'change',
'name',
[
'Prunus virus F',
'Tobacco mosaic otu'
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'edit',
'otu': {
'id': '<KEY>',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['TestEdit.test[True-uvloop-data1-PVF-Changed name to Tobacco mosaic otu and changed abbreviation to TMV] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Changed name to Tobacco mosaic otu and changed abbreviation to TMV',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'change',
'abbreviation',
[
'PVF',
'TMV'
]
],
[
'change',
'lower_name',
[
'prunus virus f',
'tobacco mosaic otu'
]
],
[
'change',
'name',
[
'Prunus virus F',
'Tobacco mosaic otu'
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'edit',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['TestEdit.test[True-uvloop-data2-PVF-Changed abbreviation to TMV] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Changed abbreviation to TMV',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'change',
'abbreviation',
[
'PVF',
'TMV'
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'edit',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['TestEdit.test[True-uvloop-data3-TMV-Changed name to Tobacco mosaic otu] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Changed name to Tobacco mosaic otu',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'change',
'lower_name',
[
'prunus virus f',
'tobacco mosaic otu'
]
],
[
'change',
'name',
[
'Prunus virus F',
'Tobacco mosaic otu'
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'edit',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['TestEdit.test[True-uvloop-data4-TMV-Changed name to Tobacco mosaic otu and removed abbreviation TMV] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Changed name to Tobacco mosaic otu and removed abbreviation TMV',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'change',
'abbreviation',
[
'TMV',
''
]
],
[
'change',
'lower_name',
[
'prunus virus f',
'tobacco mosaic otu'
]
],
[
'change',
'name',
[
'Prunus virus F',
'Tobacco mosaic otu'
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'edit',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['TestEdit.test[True-uvloop-data5--Changed name to Tobacco mosaic otu and added abbreviation TMV] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Changed name to Tobacco mosaic otu and added abbreviation TMV',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'change',
'abbreviation',
[
'',
'TMV'
]
],
[
'change',
'lower_name',
[
'prunus virus f',
'tobacco mosaic otu'
]
],
[
'change',
'name',
[
'Prunus virus F',
'Tobacco mosaic otu'
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'edit',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['TestEdit.test[True-uvloop-data6-PVF-Changed abbreviation to TMV] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Changed abbreviation to TMV',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'change',
'abbreviation',
[
'PVF',
'TMV'
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'edit',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['TestEdit.test[True-uvloop-data7-PVF-Changed abbreviation to TMV] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Changed abbreviation to TMV',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'change',
'abbreviation',
[
'PVF',
'TMV'
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'edit',
'otu': {
'id': '6116cba1',
'name': '<NAME>',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['TestEdit.test[True-uvloop-data8--Added abbreviation TMV] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Added abbreviation TMV',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'change',
'abbreviation',
[
'',
'TMV'
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'edit',
'otu': {
'id': '6116cba1',
'name': '<NAME>',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['TestEdit.test[True-uvloop-data9-TMV-Removed abbreviation TMV] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Removed abbreviation TMV',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'change',
'abbreviation',
[
'TMV',
''
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'edit',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['TestEdit.test_no_change[True-uvloop-Tobacco mosaic otu-TMV-data0] 1'] = {
'abbreviation': 'TMV',
'id': 'test',
'isolates': [
],
'issues': {
'empty_isolate': False,
'empty_otu': True,
'empty_sequence': False,
'isolate_inconsistency': False
},
'most_recent_change': None,
'name': 'Tobacco mosaic otu',
'reference': {
'id': 'foo'
}
}
snapshots['TestEdit.test_no_change[True-uvloop-Tobacco mosaic otu-TMV-data1] 1'] = {
'abbreviation': 'TMV',
'id': 'test',
'isolates': [
],
'issues': {
'empty_isolate': False,
'empty_otu': True,
'empty_sequence': False,
'isolate_inconsistency': False
},
'most_recent_change': None,
'name': 'Tobacco mosaic otu',
'reference': {
'id': 'foo'
}
}
snapshots['TestEdit.test_no_change[True-uvloop-Tobacco mosaic otu-TMV-data2] 1'] = {
'abbreviation': 'TMV',
'id': 'test',
'isolates': [
],
'issues': {
'empty_isolate': False,
'empty_otu': True,
'empty_sequence': False,
'isolate_inconsistency': False
},
'most_recent_change': None,
'name': 'Tobacco mosaic otu',
'reference': {
'id': 'foo'
}
}
snapshots['test_remove[True-uvloop--True] history'] = {
'_id': '6116cba1.removed',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Removed Prunus virus F',
'diff': {
'_id': '6116cba1',
'abbreviation': '',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'sequences': [
],
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'last_indexed_version': 0,
'lower_name': 'prunus virus f',
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 0
},
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'remove',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 'removed'
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['test_remove[True-uvloop-PVF-True] history'] = {
'_id': '6116cba1.removed',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Removed Prunus virus F (PVF)',
'diff': {
'_id': '6116cba1',
'abbreviation': 'PVF',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'sequences': [
],
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'last_indexed_version': 0,
'lower_name': 'prunus virus f',
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 0
},
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'remove',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 'removed'
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['test_get_isolate[uvloop-None] 1'] = {
'default': True,
'id': 'cab8b360',
'sequences': [
{
'definition': 'Prunus virus F isolate 8816-s2 segment RNA2 polyprotein 2 gene, complete cds.',
'host': 'sweet cherry',
'id': 'KX269872',
'segment': None,
'sequence': 'TGTTTAAGAGATTAAACAACCGCTTTC'
}
],
'source_name': '8816-v2',
'source_type': 'isolate'
}
snapshots['TestEditIsolate.test[True-uvloop-data0-Renamed Isolate b to Variant b] json'] = {
'default': False,
'id': 'test',
'sequences': [
],
'source_name': 'b',
'source_type': 'variant'
}
snapshots['TestEditIsolate.test[True-uvloop-data0-Renamed Isolate b to Variant b] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Renamed Isolate b to Variant b',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'change',
[
'isolates',
1,
'source_type'
],
[
'isolate',
'variant'
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'edit_isolate',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['TestEditIsolate.test[True-uvloop-data1-Renamed Isolate b to Variant b] json'] = {
'default': False,
'id': 'test',
'sequences': [
],
'source_name': 'b',
'source_type': 'variant'
}
snapshots['TestEditIsolate.test[True-uvloop-data1-Renamed Isolate b to Variant b] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Renamed Isolate b to Variant b',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'change',
[
'isolates',
1,
'source_type'
],
[
'isolate',
'variant'
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'edit_isolate',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['TestEditIsolate.test[True-uvloop-data2-Renamed Isolate b to Variant A] json'] = {
'default': False,
'id': 'test',
'sequences': [
],
'source_name': 'A',
'source_type': 'variant'
}
snapshots['TestEditIsolate.test[True-uvloop-data2-Renamed Isolate b to Variant A] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Renamed Isolate b to Variant A',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'change',
[
'isolates',
1,
'source_name'
],
[
'b',
'A'
]
],
[
'change',
[
'isolates',
1,
'source_type'
],
[
'isolate',
'variant'
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'edit_isolate',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['TestEditIsolate.test[True-uvloop-data3-Renamed Isolate b to Isolate A] json'] = {
'default': False,
'id': 'test',
'sequences': [
],
'source_name': 'A',
'source_type': 'isolate'
}
snapshots['TestEditIsolate.test[True-uvloop-data3-Renamed Isolate b to Isolate A] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Renamed Isolate b to Isolate A',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'change',
[
'isolates',
1,
'source_name'
],
[
'b',
'A'
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'edit_isolate',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['TestEditIsolate.test_force_case[True-uvloop] json'] = {
'default': True,
'id': 'cab8b360',
'sequences': [
],
'source_name': '8816-v2',
'source_type': 'variant'
}
snapshots['TestEditIsolate.test_force_case[True-uvloop] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Renamed Isolate 8816-v2 to Variant 8816-v2',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'change',
[
'isolates',
0,
'source_type'
],
[
'isolate',
'variant'
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'edit_isolate',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['TestCreate.test[True-uvloop-None-True] json'] = {
'abbreviation': '',
'id': '9pfsom1b',
'isolates': [
],
'issues': {
'empty_isolate': False,
'empty_otu': True,
'empty_sequence': False,
'isolate_inconsistency': False
},
'last_indexed_version': None,
'most_recent_change': {
'created_at': '2015-10-06T20:00:00Z',
'description': 'Created Tobacco mosaic virus',
'diff': {
'_id': '9pfsom1b',
'abbreviation': '',
'isolates': [
],
'last_indexed_version': None,
'lower_name': 'tobacco mosaic virus',
'name': 'Tobacco mosaic virus',
'reference': {
'id': 'foo'
},
'schema': [
],
'verified': False,
'version': 0
},
'id': '9pfsom1b.0',
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'create',
'otu': {
'id': '9pfsom1b',
'name': 'Tobacco mosaic virus',
'version': 0
},
'reference': {
'id': 'foo'
},
'user': {
'id': 'test'
}
},
'name': 'Tobacco mosaic virus',
'reference': {
'id': 'foo'
},
'schema': [
],
'verified': False,
'version': 0
}
snapshots['TestCreate.test[True-uvloop-True] json'] = {
'abbreviation': '',
'id': '9pfsom1b',
'isolates': [
],
'issues': {
'empty_isolate': False,
'empty_otu': True,
'empty_sequence': False,
'isolate_inconsistency': False
},
'last_indexed_version': None,
'most_recent_change': {
'created_at': '2015-10-06T20:00:00Z',
'description': 'Created Tobacco mosaic virus',
'diff': {
'_id': '9pfsom1b',
'abbreviation': '',
'isolates': [
],
'last_indexed_version': None,
'lower_name': 'tobacco mosaic virus',
'name': 'Tobacco mosaic virus',
'reference': {
'id': 'foo'
},
'schema': [
],
'verified': False,
'version': 0
},
'id': '9pfsom1b.0',
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'create',
'otu': {
'id': '9pfsom1b',
'name': 'Tobacco mosaic virus',
'version': 0
},
'reference': {
'id': 'foo'
},
'user': {
'id': 'test'
}
},
'name': 'Tobacco mosaic virus',
'reference': {
'id': 'foo'
},
'schema': [
],
'verified': False,
'version': 0
}
snapshots['TestCreate.test[True-uvloop-TMV-True] json'] = {
'abbreviation': 'TMV',
'id': '9pfsom1b',
'isolates': [
],
'issues': {
'empty_isolate': False,
'empty_otu': True,
'empty_sequence': False,
'isolate_inconsistency': False
},
'last_indexed_version': None,
'most_recent_change': {
'created_at': '2015-10-06T20:00:00Z',
'description': 'Created Tobacco mosaic virus (TMV)',
'diff': {
'_id': '9pfsom1b',
'abbreviation': 'TMV',
'isolates': [
],
'last_indexed_version': None,
'lower_name': 'tobacco mosaic virus',
'name': 'Tobacco mosaic virus',
'reference': {
'id': 'foo'
},
'schema': [
],
'verified': False,
'version': 0
},
'id': '9pfsom1b.0',
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'create',
'otu': {
'id': '9pfsom1b',
'name': 'Tobacco mosaic virus',
'version': 0
},
'reference': {
'id': 'foo'
},
'user': {
'id': 'test'
}
},
'name': 'Tobacco mosaic virus',
'reference': {
'id': 'foo'
},
'schema': [
],
'verified': False,
'version': 0
}
snapshots['TestEdit.test[True-uvloop-data0-TMV-Changed name to Tobacco mosaic otu] json'] = {
'abbreviation': 'TMV',
'id': '6116cba1',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'sequences': [
],
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'issues': {
'empty_isolate': [
'cab8b360'
],
'empty_otu': False,
'empty_sequence': False,
'isolate_inconsistency': False
},
'last_indexed_version': 0,
'most_recent_change': {
'created_at': '2015-10-06T20:00:00Z',
'description': 'Changed name to Tobacco mosaic otu',
'id': '6116cba1.1',
'method_name': 'edit',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'user': {
'id': 'test'
}
},
'name': 'Tobacco mosaic otu',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestEdit.test[True-uvloop-data0-TMV-Changed name to Tobacco mosaic otu] otu'] = {
'_id': '6116cba1',
'abbreviation': 'TMV',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'last_indexed_version': 0,
'lower_name': 'tobacco mosaic otu',
'name': 'Tobacco mosaic otu',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestEdit.test[True-uvloop-data1-PVF-Changed name to Tobacco mosaic otu and changed abbreviation to TMV] json'] = {
'abbreviation': 'TMV',
'id': '6116cba1',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'sequences': [
],
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'issues': {
'empty_isolate': [
'cab8b360'
],
'empty_otu': False,
'empty_sequence': False,
'isolate_inconsistency': False
},
'last_indexed_version': 0,
'most_recent_change': {
'created_at': '2015-10-06T20:00:00Z',
'description': 'Changed name to Tobacco mosaic otu and changed abbreviation to TMV',
'id': '6116cba1.1',
'method_name': 'edit',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'user': {
'id': 'test'
}
},
'name': 'Tobacco mosaic otu',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestEdit.test[True-uvloop-data1-PVF-Changed name to Tobacco mosaic otu and changed abbreviation to TMV] otu'] = {
'_id': '6116cba1',
'abbreviation': 'TMV',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'last_indexed_version': 0,
'lower_name': 'tobacco mosaic otu',
'name': 'Tobacco mosaic otu',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestEdit.test[True-uvloop-data2-PVF-Changed abbreviation to TMV] json'] = {
'abbreviation': 'TMV',
'id': '6116cba1',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'sequences': [
],
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'issues': {
'empty_isolate': [
'cab8b360'
],
'empty_otu': False,
'empty_sequence': False,
'isolate_inconsistency': False
},
'last_indexed_version': 0,
'most_recent_change': {
'created_at': '2015-10-06T20:00:00Z',
'description': 'Changed abbreviation to TMV',
'id': '6116cba1.1',
'method_name': 'edit',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'user': {
'id': 'test'
}
},
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestEdit.test[True-uvloop-data2-PVF-Changed abbreviation to TMV] otu'] = {
'_id': '6116cba1',
'abbreviation': 'TMV',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'last_indexed_version': 0,
'lower_name': 'prunus virus f',
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestEdit.test[True-uvloop-data3-TMV-Changed name to Tobacco mosaic otu] json'] = {
'abbreviation': 'TMV',
'id': '6116cba1',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'sequences': [
],
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'issues': {
'empty_isolate': [
'cab8b360'
],
'empty_otu': False,
'empty_sequence': False,
'isolate_inconsistency': False
},
'last_indexed_version': 0,
'most_recent_change': {
'created_at': '2015-10-06T20:00:00Z',
'description': 'Changed name to Tobacco mosaic otu',
'id': '6116cba1.1',
'method_name': 'edit',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'user': {
'id': 'test'
}
},
'name': 'Tobacco mosaic otu',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestEdit.test[True-uvloop-data3-TMV-Changed name to Tobacco mosaic otu] otu'] = {
'_id': '6116cba1',
'abbreviation': 'TMV',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'last_indexed_version': 0,
'lower_name': 'tobacco mosaic otu',
'name': 'Tobacco mosaic otu',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestEdit.test[True-uvloop-data4-TMV-Changed name to Tobacco mosaic otu and removed abbreviation TMV] json'] = {
'abbreviation': '',
'id': '6116cba1',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'sequences': [
],
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'issues': {
'empty_isolate': [
'cab8b360'
],
'empty_otu': False,
'empty_sequence': False,
'isolate_inconsistency': False
},
'last_indexed_version': 0,
'most_recent_change': {
'created_at': '2015-10-06T20:00:00Z',
'description': 'Changed name to Tobacco mosaic otu and removed abbreviation TMV',
'id': '6116cba1.1',
'method_name': 'edit',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'user': {
'id': 'test'
}
},
'name': 'Tobacco mosaic otu',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestEdit.test[True-uvloop-data4-TMV-Changed name to Tobacco mosaic otu and removed abbreviation TMV] otu'] = {
'_id': '6116cba1',
'abbreviation': '',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'last_indexed_version': 0,
'lower_name': 'tobacco mosaic otu',
'name': 'Tobacco mosaic otu',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestEdit.test[True-uvloop-data5--Changed name to Tobacco mosaic otu and added abbreviation TMV] json'] = {
'abbreviation': 'TMV',
'id': '6116cba1',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'sequences': [
],
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'issues': {
'empty_isolate': [
'cab8b360'
],
'empty_otu': False,
'empty_sequence': False,
'isolate_inconsistency': False
},
'last_indexed_version': 0,
'most_recent_change': {
'created_at': '2015-10-06T20:00:00Z',
'description': 'Changed name to Tobacco mosaic otu and added abbreviation TMV',
'id': '6116cba1.1',
'method_name': 'edit',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'user': {
'id': 'test'
}
},
'name': 'Tobacco mosaic otu',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestEdit.test[True-uvloop-data5--Changed name to Tobacco mosaic otu and added abbreviation TMV] otu'] = {
'_id': '6116cba1',
'abbreviation': 'TMV',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'last_indexed_version': 0,
'lower_name': 'tobacco mosaic otu',
'name': 'Tobacco mosaic otu',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestEdit.test[True-uvloop-data6-PVF-Changed abbreviation to TMV] json'] = {
'abbreviation': 'TMV',
'id': '6116cba1',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'sequences': [
],
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'issues': {
'empty_isolate': [
'cab8b360'
],
'empty_otu': False,
'empty_sequence': False,
'isolate_inconsistency': False
},
'last_indexed_version': 0,
'most_recent_change': {
'created_at': '2015-10-06T20:00:00Z',
'description': 'Changed abbreviation to TMV',
'id': '6116cba1.1',
'method_name': 'edit',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'user': {
'id': 'test'
}
},
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestEdit.test[True-uvloop-data6-PVF-Changed abbreviation to TMV] otu'] = {
'_id': '6116cba1',
'abbreviation': 'TMV',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'last_indexed_version': 0,
'lower_name': 'prunus virus f',
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestEdit.test[True-uvloop-data7-PVF-Changed abbreviation to TMV] json'] = {
'abbreviation': 'TMV',
'id': '6116cba1',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'sequences': [
],
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'issues': {
'empty_isolate': [
'cab8b360'
],
'empty_otu': False,
'empty_sequence': False,
'isolate_inconsistency': False
},
'last_indexed_version': 0,
'most_recent_change': {
'created_at': '2015-10-06T20:00:00Z',
'description': 'Changed abbreviation to TMV',
'id': '6116cba1.1',
'method_name': 'edit',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'user': {
'id': 'test'
}
},
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestEdit.test[True-uvloop-data7-PVF-Changed abbreviation to TMV] otu'] = {
'_id': '6116cba1',
'abbreviation': 'TMV',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'last_indexed_version': 0,
'lower_name': 'prunus virus f',
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestEdit.test[True-uvloop-data8--Added abbreviation TMV] json'] = {
'abbreviation': 'TMV',
'id': '6116cba1',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'sequences': [
],
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'issues': {
'empty_isolate': [
'cab8b360'
],
'empty_otu': False,
'empty_sequence': False,
'isolate_inconsistency': False
},
'last_indexed_version': 0,
'most_recent_change': {
'created_at': '2015-10-06T20:00:00Z',
'description': 'Added abbreviation TMV',
'id': '6116cba1.1',
'method_name': 'edit',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'user': {
'id': 'test'
}
},
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestEdit.test[True-uvloop-data8--Added abbreviation TMV] otu'] = {
'_id': '6116cba1',
'abbreviation': 'TMV',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'last_indexed_version': 0,
'lower_name': 'prunus virus f',
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestEdit.test[True-uvloop-data9-TMV-Removed abbreviation TMV] json'] = {
'abbreviation': '',
'id': '6116cba1',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'sequences': [
],
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'issues': {
'empty_isolate': [
'cab8b360'
],
'empty_otu': False,
'empty_sequence': False,
'isolate_inconsistency': False
},
'last_indexed_version': 0,
'most_recent_change': {
'created_at': '2015-10-06T20:00:00Z',
'description': 'Removed abbreviation TMV',
'id': '6116cba1.1',
'method_name': 'edit',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'user': {
'id': 'test'
}
},
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestEdit.test[True-uvloop-data9-TMV-Removed abbreviation TMV] otu'] = {
'_id': '6116cba1',
'abbreviation': '',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'last_indexed_version': 0,
'lower_name': 'prunus virus f',
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['test_list_isolates[uvloop-None] json'] = [
{
'default': True,
'id': 'cab8b360',
'sequences': [
],
'source_name': '8816-v2',
'source_type': 'isolate'
},
{
'default': False,
'id': 'bcb9b352',
'sequences': [
],
'source_name': '7865',
'source_type': 'isolate'
}
]
snapshots['TestAddIsolate.test_first[True-uvloop] json'] = {
'default': True,
'id': '9pf',
'sequences': [
],
'source_name': 'b',
'source_type': 'isolate'
}
snapshots['TestAddIsolate.test_first[True-uvloop] otu'] = {
'_id': '6116cba1',
'abbreviation': 'PVF',
'imported': True,
'isolates': [
{
'default': True,
'id': '9pf',
'source_name': 'b',
'source_type': 'isolate'
}
],
'last_indexed_version': 0,
'lower_name': 'prunus virus f',
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestAddIsolate.test_first[True-uvloop] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Added Isolate b as default',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'add',
'isolates',
[
[
0,
{
'default': True,
'id': '9pf',
'sequences': [
],
'source_name': 'b',
'source_type': 'isolate'
}
]
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'add_isolate',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['TestAddIsolate.test_force_case[True-uvloop] json'] = {
'default': False,
'id': '9pf',
'sequences': [
],
'source_name': 'Beta',
'source_type': 'isolate'
}
snapshots['TestAddIsolate.test_force_case[True-uvloop] otu'] = {
'_id': '6116cba1',
'abbreviation': 'PVF',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'source_name': '8816-v2',
'source_type': 'isolate'
},
{
'default': False,
'id': '9pf',
'source_name': 'Beta',
'source_type': 'isolate'
}
],
'last_indexed_version': 0,
'lower_name': 'prunus virus f',
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestAddIsolate.test_force_case[True-uvloop] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Added Isolate Beta',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'add',
'isolates',
[
[
1,
{
'default': False,
'id': '9pf',
'sequences': [
],
'source_name': 'Beta',
'source_type': 'isolate'
}
]
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'add_isolate',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['TestAddIsolate.test_empty[True-uvloop] json'] = {
'default': False,
'id': '9pf',
'sequences': [
],
'source_name': '',
'source_type': ''
}
snapshots['TestAddIsolate.test_empty[True-uvloop] otu'] = {
'_id': '6116cba1',
'abbreviation': 'PVF',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'source_name': '8816-v2',
'source_type': 'isolate'
},
{
'default': False,
'id': '9pf',
'source_name': '',
'source_type': ''
}
],
'last_indexed_version': 0,
'lower_name': 'prunus virus f',
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestAddIsolate.test_empty[True-uvloop] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Added Unnamed Isolate',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'add',
'isolates',
[
[
1,
{
'default': False,
'id': '9pf',
'sequences': [
],
'source_name': '',
'source_type': ''
}
]
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'add_isolate',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['TestEditIsolate.test[True-uvloop-data0-Renamed Isolate b to Variant b] otu'] = {
'_id': '6116cba1',
'abbreviation': 'PVF',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'source_name': '8816-v2',
'source_type': 'isolate'
},
{
'default': False,
'id': 'test',
'source_name': 'b',
'source_type': 'variant'
}
],
'last_indexed_version': 0,
'lower_name': 'prunus virus f',
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestEditIsolate.test[True-uvloop-data1-Renamed Isolate b to Variant b] otu'] = {
'_id': '6116cba1',
'abbreviation': 'PVF',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'source_name': '8816-v2',
'source_type': 'isolate'
},
{
'default': False,
'id': 'test',
'source_name': 'b',
'source_type': 'variant'
}
],
'last_indexed_version': 0,
'lower_name': 'prunus virus f',
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestEditIsolate.test[True-uvloop-data2-Renamed Isolate b to Variant A] otu'] = {
'_id': '6116cba1',
'abbreviation': 'PVF',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'source_name': '8816-v2',
'source_type': 'isolate'
},
{
'default': False,
'id': 'test',
'source_name': 'A',
'source_type': 'variant'
}
],
'last_indexed_version': 0,
'lower_name': 'prunus virus f',
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestEditIsolate.test[True-uvloop-data3-Renamed Isolate b to Isolate A] otu'] = {
'_id': '6116cba1',
'abbreviation': 'PVF',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'source_name': '8816-v2',
'source_type': 'isolate'
},
{
'default': False,
'id': 'test',
'source_name': 'A',
'source_type': 'isolate'
}
],
'last_indexed_version': 0,
'lower_name': 'prunus virus f',
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestEditIsolate.test_force_case[True-uvloop] otu'] = {
'_id': '6116cba1',
'abbreviation': 'PVF',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'source_name': '8816-v2',
'source_type': 'variant'
}
],
'last_indexed_version': 0,
'lower_name': 'prunus virus f',
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestSetAsDefault.test[True-uvloop] json'] = {
'default': True,
'id': 'test',
'sequences': [
],
'source_name': 'b',
'source_type': 'isolate'
}
snapshots['TestRemoveIsolate.test_change_default[True-uvloop] otu'] = {
'_id': '6116cba1',
'abbreviation': 'PVF',
'imported': True,
'isolates': [
{
'default': True,
'id': 'bcb9b352',
'source_name': '7865',
'source_type': 'isolate'
}
],
'last_indexed_version': 0,
'lower_name': 'prunus virus f',
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestRemoveIsolate.test_change_default[True-uvloop] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Removed Isolate 8816-v2 and set Isolate 7865 as default',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'change',
[
'isolates',
0,
'id'
],
[
'cab8b360',
'bcb9b352'
]
],
[
'change',
[
'isolates',
0,
'source_name'
],
[
'8816-v2',
'7865'
]
],
[
'remove',
[
'isolates',
0,
'sequences'
],
[
[
0,
{
'_id': 'KX269872',
'definition': 'Prunus virus F isolate 8816-s2 segment RNA2 polyprotein 2 gene, complete cds.',
'host': 'sweet cherry',
'isolate_id': 'cab8b360',
'otu_id': '6116cba1',
'segment': None,
'sequence': 'TGTTTAAGAGATTAAACAACCGCTTTC'
}
]
]
],
[
'remove',
'isolates',
[
[
1,
{
'default': False,
'id': 'bcb9b352',
'sequences': [
],
'source_name': '7865',
'source_type': 'isolate'
}
]
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'remove_isolate',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['test_list_sequences[uvloop-None] json'] = [
{
'definition': 'Prunus virus F isolate 8816-s2 segment RNA2 polyprotein 2 gene, complete cds.',
'host': 'sweet cherry',
'id': 'KX269872',
'segment': None,
'sequence': 'TGTTTAAGAGATTAAACAACCGCTTTC'
}
]
snapshots['test_get_sequence[uvloop-None] json'] = {
'definition': 'Prunus virus F isolate 8816-s2 segment RNA2 polyprotein 2 gene, complete cds.',
'host': 'sweet cherry',
'id': 'KX269872',
'isolate_id': 'cab8b360',
'otu_id': '6116cba1',
'segment': None,
'sequence': 'TGTTTAAGAGATTAAACAACCGCTTTC'
}
snapshots['test_create_sequence[True-uvloop-None] json'] = {
'accession': 'foobar',
'definition': 'A made up sequence',
'host': 'Plant',
'id': '9pfsom1b',
'isolate_id': 'cab8b360',
'otu_id': '6116cba1',
'reference': {
'id': 'hxn167'
},
'segment': None,
'sequence': 'ATGCGTGTACTG'
}
snapshots['test_create_sequence[True-uvloop-None] otu'] = {
'_id': '6116cba1',
'abbreviation': 'PVF',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'last_indexed_version': 0,
'lower_name': 'prunus virus f',
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': True,
'version': 1
}
snapshots['test_create_sequence[True-uvloop-None] sequence'] = {
'_id': '9pfsom1b',
'accession': 'foobar',
'definition': 'A made up sequence',
'host': 'Plant',
'isolate_id': 'cab8b360',
'otu_id': '6116cba1',
'reference': {
'id': 'hxn167'
},
'segment': None,
'sequence': 'ATGCGTGTACTG'
}
snapshots['test_create_sequence[True-uvloop-None] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Created new sequence foobar in Isolate 8816-v2',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'add',
[
'isolates',
0,
'sequences'
],
[
[
0,
{
'_id': '9pfsom1b',
'accession': 'foobar',
'definition': 'A made up sequence',
'host': 'Plant',
'isolate_id': 'cab8b360',
'otu_id': '6116cba1',
'reference': {
'id': 'hxn167'
},
'segment': None,
'sequence': 'ATGCGTGTACTG'
}
]
]
],
[
'change',
'verified',
[
False,
True
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'create_sequence',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['test_edit_sequence[True-uvloop-None] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Edited sequence KX269872 in Isolate 8816-v2',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'change',
[
'isolates',
0,
'sequences',
0,
'definition'
],
[
'Prunus virus F isolate 8816-s2 segment RNA2 polyprotein 2 gene, complete cds.',
'A made up sequence'
]
],
[
'change',
[
'isolates',
0,
'sequences',
0,
'host'
],
[
'sweet cherry',
'Grapevine'
]
],
[
'change',
[
'isolates',
0,
'sequences',
0,
'sequence'
],
[
'TGTTTAAGAGATTAAACAACCGCTTTC',
'ATGCGTGTACTG'
]
],
[
'change',
'verified',
[
False,
True
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'edit_sequence',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['test_edit_sequence[True-uvloop-None] otu'] = {
'_id': '6116cba1',
'abbreviation': 'PVF',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'last_indexed_version': 0,
'lower_name': 'prunus virus f',
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': True,
'version': 1
}
snapshots['test_edit_sequence[True-uvloop-None] sequence'] = {
'_id': 'KX269872',
'definition': 'A made up sequence',
'host': 'Grapevine',
'isolate_id': 'cab8b360',
'otu_id': '6116cba1',
'segment': None,
'sequence': 'ATGCGTGTACTG'
}
snapshots['test_remove_sequence[True-uvloop-None] otu'] = {
'_id': '6116cba1',
'abbreviation': 'PVF',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'last_indexed_version': 0,
'lower_name': 'prunus virus f',
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['test_remove_sequence[True-uvloop-None] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Removed sequence KX269872 from Isolate 8816-v2',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'remove',
[
'isolates',
0,
'sequences'
],
[
[
0,
{
'_id': 'KX269872',
'definition': 'Prunus virus F isolate 8816-s2 segment RNA2 polyprotein 2 gene, complete cds.',
'host': 'sweet cherry',
'isolate_id': 'cab8b360',
'otu_id': '6116cba1',
'segment': None,
'sequence': 'TGTTTAAGAGATTAAACAACCGCTTTC'
}
]
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'remove_sequence',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['test_edit_sequence[True-uvloop-None] json'] = {
'definition': 'A made up sequence',
'host': 'Grapevine',
'id': 'KX269872',
'isolate_id': 'cab8b360',
'otu_id': '6116cba1',
'segment': None,
'sequence': 'ATGCGTGTACTG'
}
snapshots['TestAddIsolate.test_default[True-uvloop-True] json'] = {
'default': True,
'id': '9pf',
'sequences': [
],
'source_name': 'b',
'source_type': 'isolate'
}
snapshots['TestAddIsolate.test_default[True-uvloop-True] otu'] = {
'_id': '6116cba1',
'abbreviation': 'PVF',
'imported': True,
'isolates': [
{
'default': False,
'id': 'cab8b360',
'source_name': '8816-v2',
'source_type': 'isolate'
},
{
'default': True,
'id': '9pf',
'source_name': 'b',
'source_type': 'isolate'
}
],
'last_indexed_version': 0,
'lower_name': 'prunus virus f',
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestAddIsolate.test_default[True-uvloop-True] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Added Isolate b as default',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'change',
[
'isolates',
0,
'default'
],
[
True,
False
]
],
[
'add',
'isolates',
[
[
1,
{
'default': True,
'id': '9pf',
'sequences': [
],
'source_name': 'b',
'source_type': 'isolate'
}
]
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'add_isolate',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['TestAddIsolate.test_default[True-uvloop-False] json'] = {
'default': False,
'id': '9pf',
'sequences': [
],
'source_name': 'b',
'source_type': 'isolate'
}
snapshots['TestAddIsolate.test_default[True-uvloop-False] otu'] = {
'_id': '6116cba1',
'abbreviation': 'PVF',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'source_name': '8816-v2',
'source_type': 'isolate'
},
{
'default': False,
'id': '9pf',
'source_name': 'b',
'source_type': 'isolate'
}
],
'last_indexed_version': 0,
'lower_name': 'prunus virus f',
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestAddIsolate.test_default[True-uvloop-False] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Added Isolate b',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'add',
'isolates',
[
[
1,
{
'default': False,
'id': '9pf',
'sequences': [
],
'source_name': 'b',
'source_type': 'isolate'
}
]
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'add_isolate',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['TestRemoveIsolate.test[True-uvloop] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Removed Isolate 8816-v2',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'remove',
'isolates',
[
[
0,
{
'default': True,
'id': 'cab8b360',
'sequences': [
{
'_id': 'KX269872',
'definition': 'Prunus virus F isolate 8816-s2 segment RNA2 polyprotein 2 gene, complete cds.',
'host': 'sweet cherry',
'isolate_id': 'cab8b360',
'otu_id': '6116cba1',
'segment': None,
'sequence': 'TGTTTAAGAGATTAAACAACCGCTTTC'
}
],
'source_name': '8816-v2',
'source_type': 'isolate'
}
]
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'remove_isolate',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['TestCreate.test[True-uvloop--True] json'] = {
'abbreviation': '',
'id': '9pfsom1b',
'isolates': [
],
'issues': {
'empty_isolate': False,
'empty_otu': True,
'empty_sequence': False,
'isolate_inconsistency': False
},
'last_indexed_version': None,
'most_recent_change': {
'created_at': '2015-10-06T20:00:00Z',
'description': 'Created Tobacco mosaic virus',
'diff': {
'_id': '9pfsom1b',
'abbreviation': '',
'isolates': [
],
'last_indexed_version': None,
'lower_name': 'tobacco mosaic virus',
'name': 'Tobacco mosaic virus',
'reference': {
'id': 'foo'
},
'schema': [
],
'verified': False,
'version': 0
},
'id': '9pfsom1b.0',
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'create',
'otu': {
'id': '9pfsom1b',
'name': 'Tobacco mosaic virus',
'version': 0
},
'reference': {
'id': 'foo'
},
'user': {
'id': 'test'
}
},
'name': 'Tobacco mosaic virus',
'reference': {
'id': 'foo'
},
'schema': [
],
'verified': False,
'version': 0
}
snapshots['TestCreate.test[True-uvloop--True] otu'] = {
'_id': '9pfsom1b',
'abbreviation': '',
'isolates': [
],
'last_indexed_version': None,
'lower_name': 'tobacco mosaic virus',
'name': 'Tobacco mosaic virus',
'reference': {
'id': 'foo'
},
'schema': [
],
'verified': False,
'version': 0
}
snapshots['TestCreate.test[True-uvloop--True] history'] = {
'_id': '9pfsom1b.0',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Created Tobacco mosaic virus',
'diff': {
'_id': '9pfsom1b',
'abbreviation': '',
'isolates': [
],
'last_indexed_version': None,
'lower_name': 'tobacco mosaic virus',
'name': 'Tobacco mosaic virus',
'reference': {
'id': 'foo'
},
'schema': [
],
'verified': False,
'version': 0
},
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'create',
'otu': {
'id': '9pfsom1b',
'name': 'Tobacco mosaic virus',
'version': 0
},
'reference': {
'id': 'foo'
},
'user': {
'id': 'test'
}
}
|
[
"snapshottest.GenericRepr",
"snapshottest.Snapshot"
] |
[((169, 179), 'snapshottest.Snapshot', 'Snapshot', ([], {}), '()\n', (177, 179), False, 'from snapshottest import GenericRepr, Snapshot\n'), ((288, 340), 'snapshottest.GenericRepr', 'GenericRepr', (['"""datetime.datetime(2015, 10, 6, 20, 0)"""'], {}), "('datetime.datetime(2015, 10, 6, 20, 0)')\n", (299, 340), False, 'from snapshottest import GenericRepr, Snapshot\n'), ((1527, 1579), 'snapshottest.GenericRepr', 'GenericRepr', (['"""datetime.datetime(2015, 10, 6, 20, 0)"""'], {}), "('datetime.datetime(2015, 10, 6, 20, 0)')\n", (1538, 1579), False, 'from snapshottest import GenericRepr, Snapshot\n'), ((2773, 2825), 'snapshottest.GenericRepr', 'GenericRepr', (['"""datetime.datetime(2015, 10, 6, 20, 0)"""'], {}), "('datetime.datetime(2015, 10, 6, 20, 0)')\n", (2784, 2825), False, 'from snapshottest import GenericRepr, Snapshot\n'), ((4004, 4056), 'snapshottest.GenericRepr', 'GenericRepr', (['"""datetime.datetime(2015, 10, 6, 20, 0)"""'], {}), "('datetime.datetime(2015, 10, 6, 20, 0)')\n", (4015, 4056), False, 'from snapshottest import GenericRepr, Snapshot\n'), ((7746, 7798), 'snapshottest.GenericRepr', 'GenericRepr', (['"""datetime.datetime(2015, 10, 6, 20, 0)"""'], {}), "('datetime.datetime(2015, 10, 6, 20, 0)')\n", (7757, 7798), False, 'from snapshottest import GenericRepr, Snapshot\n'), ((8804, 8856), 'snapshottest.GenericRepr', 'GenericRepr', (['"""datetime.datetime(2015, 10, 6, 20, 0)"""'], {}), "('datetime.datetime(2015, 10, 6, 20, 0)')\n", (8815, 8856), False, 'from snapshottest import GenericRepr, Snapshot\n'), ((10002, 10054), 'snapshottest.GenericRepr', 'GenericRepr', (['"""datetime.datetime(2015, 10, 6, 20, 0)"""'], {}), "('datetime.datetime(2015, 10, 6, 20, 0)')\n", (10013, 10054), False, 'from snapshottest import GenericRepr, Snapshot\n'), ((10838, 10890), 'snapshottest.GenericRepr', 'GenericRepr', (['"""datetime.datetime(2015, 10, 6, 20, 0)"""'], {}), "('datetime.datetime(2015, 10, 6, 20, 0)')\n", (10849, 10890), False, 'from snapshottest import GenericRepr, Snapshot\n'), ((11896, 11948), 'snapshottest.GenericRepr', 'GenericRepr', (['"""datetime.datetime(2015, 10, 6, 20, 0)"""'], {}), "('datetime.datetime(2015, 10, 6, 20, 0)')\n", (11907, 11948), False, 'from snapshottest import GenericRepr, Snapshot\n'), ((13119, 13171), 'snapshottest.GenericRepr', 'GenericRepr', (['"""datetime.datetime(2015, 10, 6, 20, 0)"""'], {}), "('datetime.datetime(2015, 10, 6, 20, 0)')\n", (13130, 13171), False, 'from snapshottest import GenericRepr, Snapshot\n'), ((14309, 14361), 'snapshottest.GenericRepr', 'GenericRepr', (['"""datetime.datetime(2015, 10, 6, 20, 0)"""'], {}), "('datetime.datetime(2015, 10, 6, 20, 0)')\n", (14320, 14361), False, 'from snapshottest import GenericRepr, Snapshot\n'), ((15138, 15190), 'snapshottest.GenericRepr', 'GenericRepr', (['"""datetime.datetime(2015, 10, 6, 20, 0)"""'], {}), "('datetime.datetime(2015, 10, 6, 20, 0)')\n", (15149, 15190), False, 'from snapshottest import GenericRepr, Snapshot\n'), ((15951, 16003), 'snapshottest.GenericRepr', 'GenericRepr', (['"""datetime.datetime(2015, 10, 6, 20, 0)"""'], {}), "('datetime.datetime(2015, 10, 6, 20, 0)')\n", (15962, 16003), False, 'from snapshottest import GenericRepr, Snapshot\n'), ((16761, 16813), 'snapshottest.GenericRepr', 'GenericRepr', (['"""datetime.datetime(2015, 10, 6, 20, 0)"""'], {}), "('datetime.datetime(2015, 10, 6, 20, 0)')\n", (16772, 16813), False, 'from snapshottest import GenericRepr, Snapshot\n'), ((18825, 18877), 'snapshottest.GenericRepr', 'GenericRepr', (['"""datetime.datetime(2015, 10, 6, 20, 0)"""'], {}), "('datetime.datetime(2015, 10, 6, 20, 0)')\n", (18836, 18877), False, 'from snapshottest import GenericRepr, Snapshot\n'), ((19951, 20003), 'snapshottest.GenericRepr', 'GenericRepr', (['"""datetime.datetime(2015, 10, 6, 20, 0)"""'], {}), "('datetime.datetime(2015, 10, 6, 20, 0)')\n", (19962, 20003), False, 'from snapshottest import GenericRepr, Snapshot\n'), ((21788, 21840), 'snapshottest.GenericRepr', 'GenericRepr', (['"""datetime.datetime(2015, 10, 6, 20, 0)"""'], {}), "('datetime.datetime(2015, 10, 6, 20, 0)')\n", (21799, 21840), False, 'from snapshottest import GenericRepr, Snapshot\n'), ((22935, 22987), 'snapshottest.GenericRepr', 'GenericRepr', (['"""datetime.datetime(2015, 10, 6, 20, 0)"""'], {}), "('datetime.datetime(2015, 10, 6, 20, 0)')\n", (22946, 22987), False, 'from snapshottest import GenericRepr, Snapshot\n'), ((24082, 24134), 'snapshottest.GenericRepr', 'GenericRepr', (['"""datetime.datetime(2015, 10, 6, 20, 0)"""'], {}), "('datetime.datetime(2015, 10, 6, 20, 0)')\n", (24093, 24134), False, 'from snapshottest import GenericRepr, Snapshot\n'), ((25447, 25499), 'snapshottest.GenericRepr', 'GenericRepr', (['"""datetime.datetime(2015, 10, 6, 20, 0)"""'], {}), "('datetime.datetime(2015, 10, 6, 20, 0)')\n", (25458, 25499), False, 'from snapshottest import GenericRepr, Snapshot\n'), ((26539, 26591), 'snapshottest.GenericRepr', 'GenericRepr', (['"""datetime.datetime(2015, 10, 6, 20, 0)"""'], {}), "('datetime.datetime(2015, 10, 6, 20, 0)')\n", (26550, 26591), False, 'from snapshottest import GenericRepr, Snapshot\n'), ((49762, 49814), 'snapshottest.GenericRepr', 'GenericRepr', (['"""datetime.datetime(2015, 10, 6, 20, 0)"""'], {}), "('datetime.datetime(2015, 10, 6, 20, 0)')\n", (49773, 49814), False, 'from snapshottest import GenericRepr, Snapshot\n'), ((51730, 51782), 'snapshottest.GenericRepr', 'GenericRepr', (['"""datetime.datetime(2015, 10, 6, 20, 0)"""'], {}), "('datetime.datetime(2015, 10, 6, 20, 0)')\n", (51741, 51782), False, 'from snapshottest import GenericRepr, Snapshot\n'), ((53657, 53709), 'snapshottest.GenericRepr', 'GenericRepr', (['"""datetime.datetime(2015, 10, 6, 20, 0)"""'], {}), "('datetime.datetime(2015, 10, 6, 20, 0)')\n", (53668, 53709), False, 'from snapshottest import GenericRepr, Snapshot\n'), ((58797, 58849), 'snapshottest.GenericRepr', 'GenericRepr', (['"""datetime.datetime(2015, 10, 6, 20, 0)"""'], {}), "('datetime.datetime(2015, 10, 6, 20, 0)')\n", (58808, 58849), False, 'from snapshottest import GenericRepr, Snapshot\n'), ((62902, 62954), 'snapshottest.GenericRepr', 'GenericRepr', (['"""datetime.datetime(2015, 10, 6, 20, 0)"""'], {}), "('datetime.datetime(2015, 10, 6, 20, 0)')\n", (62913, 62954), False, 'from snapshottest import GenericRepr, Snapshot\n'), ((64491, 64543), 'snapshottest.GenericRepr', 'GenericRepr', (['"""datetime.datetime(2015, 10, 6, 20, 0)"""'], {}), "('datetime.datetime(2015, 10, 6, 20, 0)')\n", (64502, 64543), False, 'from snapshottest import GenericRepr, Snapshot\n'), ((67555, 67607), 'snapshottest.GenericRepr', 'GenericRepr', (['"""datetime.datetime(2015, 10, 6, 20, 0)"""'], {}), "('datetime.datetime(2015, 10, 6, 20, 0)')\n", (67566, 67607), False, 'from snapshottest import GenericRepr, Snapshot\n'), ((70062, 70114), 'snapshottest.GenericRepr', 'GenericRepr', (['"""datetime.datetime(2015, 10, 6, 20, 0)"""'], {}), "('datetime.datetime(2015, 10, 6, 20, 0)')\n", (70073, 70114), False, 'from snapshottest import GenericRepr, Snapshot\n'), ((72250, 72302), 'snapshottest.GenericRepr', 'GenericRepr', (['"""datetime.datetime(2015, 10, 6, 20, 0)"""'], {}), "('datetime.datetime(2015, 10, 6, 20, 0)')\n", (72261, 72302), False, 'from snapshottest import GenericRepr, Snapshot\n'), ((73334, 73386), 'snapshottest.GenericRepr', 'GenericRepr', (['"""datetime.datetime(2015, 10, 6, 20, 0)"""'], {}), "('datetime.datetime(2015, 10, 6, 20, 0)')\n", (73345, 73386), False, 'from snapshottest import GenericRepr, Snapshot\n'), ((76747, 76799), 'snapshottest.GenericRepr', 'GenericRepr', (['"""datetime.datetime(2015, 10, 6, 20, 0)"""'], {}), "('datetime.datetime(2015, 10, 6, 20, 0)')\n", (76758, 76799), False, 'from snapshottest import GenericRepr, Snapshot\n')]
|
import argparse
import inspect
from copy import deepcopy
from functools import partial, update_wrapper
from typing import List, Mapping, Sequence, _GenericAlias
import yaml
class Registrable:
"""Class used to denote which types of objects can be registered in the RLHive
Registry. These objects can also be configured directly from the command line, and
recursively built from the config, assuming type annotations are present.
"""
@classmethod
def type_name(cls):
"""This should represent a string that denotes the which type of class you are
creating. For example, "logger", "agent", or "env".
"""
raise ValueError
class CallableType(Registrable):
"""A wrapper that allows any callable to be registered in the RLHive Registry.
Specifically, it maps the arguments and annotations of the wrapped function to the
resulting callable, allowing any argument names and type annotations of the
underlying function to be present for outer wrapper. When called with some
arguments, this object returns a partial function with those arguments assigned.
By default, the type_name is "callable", but if you want to create specific types
of callables, you can simply create a subclass and override the type_name method.
See :py:class:`hive.utils.utils.OptimizerFn`.
"""
def __init__(self, fn):
"""
Args:
fn: callable to be wrapped.
"""
self._fn = fn
update_wrapper(self, self._fn)
def __call__(self, *args, **kwargs):
return partial(self._fn, *args, **kwargs)
@classmethod
def type_name(cls):
return "callable"
def __repr__(self):
return f"<{type(self).__name__} {repr(self._fn)}>"
class Registry:
"""This is the Registry class for RLHive. It allows you to register different types
of :py:class:`Registrable` classes and objects and generates constructors for those
classes in the form of `get_{type_name}`.
These constructors allow you to construct objects from dictionary configs. These
configs should have two fields: `name`, which corresponds to the name used when
registering a class in the registry, and `kwargs`, which corresponds to the keyword
arguments that will be passed to the constructor of the object. These constructors
can also build objects recursively, i.e. if a config contains the config for
another `Registrable` object, this will be automatically created before being
passed to the constructor of the original object. These constructors also allow you
to directly specify/override arguments for object constructors directly from the
command line. These parameters are specified in dot notation. They also are able
to handle lists and dictionaries of Registrable objects.
For example, let's consider the following scenario:
Your agent class has an argument `arg1` which is annotated to be `List[Class1]`,
`Class1` is `Registrable`, and the `Class1` constructor takes an argument `arg2`.
In the passed yml config, there are two different Class1 object configs listed.
the constructor will check to see if both `--agent.arg1.0.arg2` and
`--agent.arg1.1.arg2` have been passed.
The parameters passed in the command line will be parsed according to the type
annotation of the corresponding low level constructor. If it is not one of
`int`, `float`, `str`, or `bool`, it simply loads the string into python using a
yaml loader.
Each constructor returns the object, as well a dictionary config with all the
parameters used to create the object and any Registrable objects created in the
process of creating this object.
"""
def __init__(self) -> None:
self._registry = {}
def register(self, name, constructor, type):
"""Register a Registrable class/object with RLHive.
Args:
name (str): Name of the class/object being registered.
constructor (callable): Callable that will be passed all kwargs from
configs and be analyzed to get type annotations.
type (type): Type of class/object being registered. Should be subclass of
Registrable.
"""
if not issubclass(type, Registrable):
raise ValueError(f"{type} is not Registrable")
if type.type_name() not in self._registry:
self._registry[type.type_name()] = {}
def getter(self, object_or_config, prefix=None):
if object_or_config is None:
return None, {}
elif isinstance(object_or_config, type):
return object_or_config, {}
name = object_or_config["name"]
kwargs = object_or_config.get("kwargs", {})
expanded_config = deepcopy(object_or_config)
if name in self._registry[type.type_name()]:
object_class = self._registry[type.type_name()][name]
parsed_args = get_callable_parsed_args(object_class, prefix=prefix)
kwargs.update(parsed_args)
kwargs, kwargs_config = construct_objects(
object_class, kwargs, prefix
)
expanded_config["kwargs"] = kwargs_config
return object_class(**kwargs), expanded_config
else:
raise ValueError(f"{name} class not found")
setattr(self.__class__, f"get_{type.type_name()}", getter)
self._registry[type.type_name()][name] = constructor
def register_all(self, base_class, class_dict):
"""Bulk register function.
Args:
base_class (type): Corresponds to the `type` of the register function
class_dict (dict[str, callable]): A dictionary mapping from name to
constructor.
"""
for cls in class_dict:
self.register(cls, class_dict[cls], base_class)
def __repr__(self):
return str(self._registry)
def construct_objects(object_constructor, config, prefix=None):
"""Helper function that constructs any objects specified in the config that
are registrable.
Returns the object, as well a dictionary config with all the parameters used to
create the object and any Registrable objects created in the process of creating
this object.
Args:
object_constructor (callable): constructor of object that corresponds to
config. The signature of this function will be analyzed to see if there
are any :py:class:`Registrable` objects that might be specified in the
config.
config (dict): The kwargs for the object being created. May contain configs for
other `Registrable` objects that need to be recursively created.
prefix (str): Prefix that is attached to the argument names when looking for
command line arguments.
"""
signature = inspect.signature(object_constructor)
prefix = "" if prefix is None else f"{prefix}."
expanded_config = deepcopy(config)
for argument in signature.parameters:
if argument not in config:
continue
expected_type = signature.parameters[argument].annotation
if isinstance(expected_type, type) and issubclass(expected_type, Registrable):
config[argument], expanded_config[argument] = registry.__getattribute__(
f"get_{expected_type.type_name()}"
)(config[argument], f"{prefix}{argument}")
if isinstance(expected_type, _GenericAlias):
origin = expected_type.__origin__
args = expected_type.__args__
if (
(origin == List or origin == list)
and len(args) == 1
and isinstance(args[0], type)
and issubclass(args[0], Registrable)
and isinstance(config[argument], Sequence)
):
objs = []
expanded_config[argument] = []
for idx, item in enumerate(config[argument]):
obj, obj_config = registry.__getattribute__(
f"get_{args[0].type_name()}"
)(item, f"{prefix}{argument}.{idx}")
objs.append(obj)
expanded_config[argument].append(obj_config)
config[argument] = objs
elif (
origin == dict
and len(args) == 2
and isinstance(args[1], type)
and issubclass(args[1], Registrable)
and isinstance(config[argument], Mapping)
):
objs = {}
expanded_config[argument] = {}
for key, val in config[argument].items():
obj, obj_config = registry.__getattribute__(
f"get_{args[1].type_name()}"
)(val, f"{prefix}{argument}.{key}")
objs[key] = obj
expanded_config[argument][key] = obj_config
config[argument] = objs
return config, expanded_config
def get_callable_parsed_args(callable, prefix=None):
"""Helper function that extracts the command line arguments for a given function.
Args:
callable (callable): function whose arguments will be inspected to extract
arguments from the command line.
prefix (str): Prefix that is attached to the argument names when looking for
command line arguments.
"""
signature = inspect.signature(callable)
arguments = {
argument: signature.parameters[argument]
for argument in signature.parameters
if argument != "self"
}
return get_parsed_args(arguments, prefix)
def get_parsed_args(arguments, prefix=None):
"""Helper function that takes a dictionary mapping argument names to types, and
extracts command line arguments for those arguments. If the dictionary contains
a key-value pair "bar": int, and the prefix passed is "foo", this function will
look for a command line argument "\-\-foo.bar". If present, it will cast it to an
int.
If the type for a given argument is not one of `int`, `float`, `str`, or `bool`,
it simply loads the string into python using a yaml loader.
Args:
arguments (dict[str, type]): dictionary mapping argument names to types
prefix (str): prefix that is attached to each argument name before searching
for command line arguments.
"""
prefix = "" if prefix is None else f"{prefix}."
parser = argparse.ArgumentParser()
for argument in arguments:
parser.add_argument(f"--{prefix}{argument}")
parsed_args, _ = parser.parse_known_args()
parsed_args = vars(parsed_args)
# Strip the prefix from the parsed arguments and remove arguments not present
parsed_args = {
(key[len(prefix) :] if key.startswith(prefix) else key): parsed_args[key]
for key in parsed_args
if parsed_args[key] is not None
}
for argument in parsed_args:
expected_type = arguments[argument]
if isinstance(expected_type, inspect.Parameter):
expected_type = expected_type.annotation
if expected_type in [int, str, float]:
parsed_args[argument] = expected_type(parsed_args[argument])
elif expected_type is bool:
value = str(parsed_args[argument]).lower()
parsed_args[argument] = not ("false".startswith(value) or value == "0")
else:
parsed_args[argument] = yaml.safe_load(parsed_args[argument])
return parsed_args
registry = Registry()
|
[
"functools.partial",
"copy.deepcopy",
"argparse.ArgumentParser",
"functools.update_wrapper",
"inspect.signature",
"yaml.safe_load"
] |
[((7047, 7084), 'inspect.signature', 'inspect.signature', (['object_constructor'], {}), '(object_constructor)\n', (7064, 7084), False, 'import inspect\n'), ((7159, 7175), 'copy.deepcopy', 'deepcopy', (['config'], {}), '(config)\n', (7167, 7175), False, 'from copy import deepcopy\n'), ((9651, 9678), 'inspect.signature', 'inspect.signature', (['callable'], {}), '(callable)\n', (9668, 9678), False, 'import inspect\n'), ((10706, 10731), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (10729, 10731), False, 'import argparse\n'), ((1494, 1524), 'functools.update_wrapper', 'update_wrapper', (['self', 'self._fn'], {}), '(self, self._fn)\n', (1508, 1524), False, 'from functools import partial, update_wrapper\n'), ((1582, 1616), 'functools.partial', 'partial', (['self._fn', '*args'], {}), '(self._fn, *args, **kwargs)\n', (1589, 1616), False, 'from functools import partial, update_wrapper\n'), ((4864, 4890), 'copy.deepcopy', 'deepcopy', (['object_or_config'], {}), '(object_or_config)\n', (4872, 4890), False, 'from copy import deepcopy\n'), ((11694, 11731), 'yaml.safe_load', 'yaml.safe_load', (['parsed_args[argument]'], {}), '(parsed_args[argument])\n', (11708, 11731), False, 'import yaml\n')]
|
from django.dispatch import receiver
from django.db.models.signals import post_delete
from linuxmachinebeta.review.models import ServiceReview
@receiver(post_delete, sender=ServiceReview)
def update_rating_after_delete(sender, instance, **kwargs):
instance.service.update_rating()
|
[
"django.dispatch.receiver"
] |
[((147, 190), 'django.dispatch.receiver', 'receiver', (['post_delete'], {'sender': 'ServiceReview'}), '(post_delete, sender=ServiceReview)\n', (155, 190), False, 'from django.dispatch import receiver\n')]
|
# users/admin.py
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .forms import ReviewsUserCreationForm, ReviewsUserChangeForm
from .models import ReviewsUser
class ReviewUserAdmin(UserAdmin):
add_form = ReviewsUserCreationForm
form = ReviewsUserChangeForm
list_display = ['email', 'username', 'created_reviews']
model = ReviewsUser
admin.site.register(ReviewsUser, ReviewUserAdmin)
|
[
"django.contrib.admin.site.register"
] |
[((391, 440), 'django.contrib.admin.site.register', 'admin.site.register', (['ReviewsUser', 'ReviewUserAdmin'], {}), '(ReviewsUser, ReviewUserAdmin)\n', (410, 440), False, 'from django.contrib import admin\n')]
|
import os
import itertools
from itertools import product
# get_files(loadrules,["[F.3]","[A.1]"],[".yaml"])
def get_files(_path, _startwith=None, _endwith=None):
'''
get all files
:param _startwith : ["str1","str2"]
:param _endwith : [".sol",".py"]
'''
if not _startwith: _startwith = [""]
if type(_startwith) is str :
if os.path.isfile(_startwith):
return [_startwith]
else:
_startwith = [_startwith]
if not _endwith: _endwith = [""]
if type(_endwith) is str : _endwith = [_endwith]
all_files = []
def checkwith(_fp,_fn):
for x,y in list(product(_startwith, _endwith)):
if _fn.startswith(x) and _fn.endswith(y):
path_name = os.path.join(_fp,_fn)
all_files.append(path_name)
for fpath, dirname, fnames in os.walk(_path):
for filename in fnames:
checkwith(fpath,filename)
return all_files
def filestartwith(_file):
_startwith = []
if '.' in _file:
for t in _file.split(","):
_startwith.append("[{0}]".format(t))
else:
_startwith = [""]
return _startwith
def filenamewith(_file):
_startwith = []
if _file:
for t in _file.split(","):
_startwith.append(t)
else:
_startwith = [""]
return _startwith
|
[
"os.path.isfile",
"os.walk",
"os.path.join",
"itertools.product"
] |
[((882, 896), 'os.walk', 'os.walk', (['_path'], {}), '(_path)\n', (889, 896), False, 'import os\n'), ((377, 403), 'os.path.isfile', 'os.path.isfile', (['_startwith'], {}), '(_startwith)\n', (391, 403), False, 'import os\n'), ((662, 691), 'itertools.product', 'product', (['_startwith', '_endwith'], {}), '(_startwith, _endwith)\n', (669, 691), False, 'from itertools import product\n'), ((778, 800), 'os.path.join', 'os.path.join', (['_fp', '_fn'], {}), '(_fp, _fn)\n', (790, 800), False, 'import os\n')]
|
"""Submodule containing frequency-based models."""
from freqtools.freq_data import OscillatorNoise
import numpy as np
import matplotlib.pyplot as plt
class FreqModel:
"""
Base class for frequency based models, i.e. values (y axis) as a function of
frequency (x axis). Its functionality is purposfully kept simple and its main
purpose is to implement basic behaviour.
Parameters
----------
*args :
Placeholder, not used. The respective subclasses have to implement behaviour of
positional
arguments
**kwargs :
All keyworded arguments are added as attribues.
"""
def __init__(self, *args, **kwargs):
del args
for key, value in kwargs.items():
setattr(self, key, value)
def values(self, freqs):
raise NotImplementedError("Subclasses have to implement this method.")
def plot(self, freqs, ax=None, xscale="log", yscale="log", ylabel=""):
"""
Plot the model.
Parameters
----------
ax : Axis (optional)
If axis is provided, they will be used for the plot. if not provided, a new
plot will automatically be created.
xscale : {"log" or "linear"}
Scaling of the x axis.
yscale : {"log" or "linear"}
Scaling for the y axis.
ylabel : str
Label for the y axis.
Returns
-------
fig, ax : Figure, Axis
The Figure and Axis handles of the plot that was used.
"""
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.figure
ax.plot(freqs, self.values(freqs), label=self.label)
ax.set_xscale(xscale)
ax.set_yscale(yscale)
ax.set_ylabel(ylabel)
ax.set_xlabel("Frequency / Hz")
plt.grid(True, which="both", ls="-")
return fig, ax
class OscillatorNoiseModel(FreqModel):
"""
A base class holding models of spectral densities of oscillator noise, i.e.
frequency or phase noise. Its main purpose is to make it easy to convert between
ASD(f), PSD(f) and L(f) in terms of both frequency and phase noise. The data is
provided in one of these representations and makes all other representations
available.
Parameters
----------
*args :
Placeholder, not used. The respective subclasses have to implement behaviour of
positional arguments
n_sided : 1 (optional)
placeholder, for now only one-sided distributions are supported.
label : str
Optional label used for plotting.
**kwargs :
All keyworded arguments are added as attribues.
Attributes
----------
n_sided
label : str
Optional label used for plotting
representation
unit
ylabel
"""
def __init__(self, n_sided=1, label="", representation=None, **kwargs):
_allowed_representations = [
"asd_freq",
"asd_phase",
"psd_freq",
"psd_phase",
"script_L",
]
super().__init__(
label=label,
n_sided=n_sided,
_allowed_representations=list(_allowed_representations),
representation=representation,
**kwargs
)
self._unit_dict = {
"asd_freq": "Hz/$\\sqrt{\\mathrm{Hz}}$",
"asd_phase": "$\\mathrm{rad}/\\sqrt{\\mathrm{Hz}}$",
"psd_freq": "Hz${}^2$/Hz",
"psd_phase": "rad${}^2$/Hz",
"script_L": "dBc/Hz",
}
self._ylabel_dict = {
"asd_freq": "{}-sided ASD",
"asd_phase": "{}-sided ASD",
"psd_freq": "{}-sided PSD",
"psd_phase": "{}-sided PSD",
"script_L": "L(f)",
}
@property
def ylabel(self):
"""y axis label used for plotting; doesn't contain the unit.""" # noqa: D403
return self._ylabel_dict[self.representation].format(self.n_sided)
@property
def unit(self):
"""String containing the unit of `values`"""
return self._unit_dict[self.representation]
@property
def representation(self):
"""The representation of `values`."""
return self._representation
@representation.setter
def representation(self, representation):
assert (
representation in self._allowed_representations
), "representation must be one of {}".format(self._allowed_representations)
self._representation = representation
@property
def n_sided(self):
"""Currently only one-sided distribtuions are supported."""
return self._n_sided
@n_sided.setter
def n_sided(self, new_n):
# FIXME: support for two-sided distributions.
assert new_n == 1, "Only 1-sided distributions are supported as of yet."
self._n_sided = new_n
def values(self, freqs):
"""
Array containing the values of the spectral density model. Maps to one
representation, depending on `representation` attribute.
"""
method = getattr(self, self.representation)
return method(freqs)
def asd_freq(self, freqs):
"""
Amplitude spectral density of the frequency noise.
Parameters
----------
freqs : list_like
Frequencies where the model is evaluated
Returns
-------
1darray
"""
return np.array(freqs) * self.asd_phase(freqs)
def asd_phase(self, freqs):
"""
Amplitude spectral density of the phase noise.
Parameters
----------
freqs : list_like
Frequencies where the model is evaluated
Returns
-------
1darray
"""
return np.sqrt(self.psd_phase(freqs))
def psd_freq(self, freqs):
"""
Power spectral density of the frequency noise.
Parameters
----------
freqs : list_like
Frequencies where the model is evaluated
Returns
-------
1darray
"""
return self.asd_freq(freqs) ** 2
def psd_phase(self, freqs):
"""
Power spectral density of the phase noise.
Parameters
----------
freqs : list_like
Frequencies where the model is evaluated
Returns
-------
1darray
"""
# psd_phase can either be derived from psd_freq or script_L
try:
# convert to linear scale, factor 1/10 in exponent because dBc are used
psd_phase = 10 ** (self.script_L(freqs) / 10)
if self.n_sided == 1:
# one-sided distributions have a factor 2, see Table A1 in [1]
psd_phase *= 2
except AttributeError:
psd_phase = self.psd_freq(freqs) / np.array(freqs) ** 2
return psd_phase
def script_L(self, freqs):
"""
The phase noise L(f) (pronounced "script ell of f").
Parameters
----------
freqs : list_like
Frequencies where the model is evaluated
Returns
-------
1darray
"""
# see Table A.1 in [1] for the conversion from S_phi(f) and L(f)
L = self.psd_phase(freqs)
if self.n_sided == 1:
L /= 2
L = 10 * np.log10(L) # convert to dBc/Hz
return L
def plot(self, freqs, ax=None, xscale="log", yscale="log", ylabel=""):
"""
Plot the spectral density model.
Parameters
----------
freqs : list_like
Frequencies where the model is evaluated.
ax : matplotlib.axes.Axes (optional)
The axes to plot on. If not given, a new figure is created.
xscale : str {"log", "linear"} (optional)
The scale of the x-axis.
yscale : str {"log", "linear"} (optional)
The scale of the y-axis.
ylabel : str (optional)
The label of the y-axis.
"""
if not ylabel:
# automatically create ylabel
ylabel = self.ylabel + " / " + self.unit
fig, ax = super().plot(
freqs, ax=ax, xscale=xscale, yscale=yscale, ylabel=ylabel
)
if not self.representation == "script_L":
ax.set_yscale("log")
return fig, ax
def to_oscillator_noise(self, freqs):
"""
Convert the noise model to a `OscillatorNoise` object.
Parameters
----------
freqs : 1d-array
The Fourier frequencies in Hz.
Returns
-------
oscillator_noise : OscillatorNoise
The model represented as an `OscillatorNoise` object.
"""
oscillator_noise = OscillatorNoise(
freqs,
self.values(freqs),
representation=self.representation,
n_sided=self.n_sided,
divide_by=1,
)
return oscillator_noise
class PowerLawNoise(OscillatorNoiseModel):
r"""
Power law phase and frequency noise models [1] for common noise types:
.. math:: S_\phi = b_{i} \cdot f^{i}
or
.. math:: S_\phi = d_{i} \cdot f^{i}
Parameters
----------
coeff : float or list of floats
Coefficient b_i (for phase noise) or d_i (for frequency noise), cp. [1]. Has to
b a list if `edge_freqs` is set.
exponent : int or list of ints
The coefficient of the power law noise. The noise type depends on the `base`
for a given exponent, cp. [1]. Has to be a list if `edge_freqs` is set.
edge_freqs : list of floats (optional)
Allows to construct composite models that have different noise types for
different frequency ranges. In this case, `coeff` and `exponent` have to be
lists of length `len(edge_freqs) + 1`. The edge frequencies are the frequencies
where the noise type changes.
Allowed coefficients for phase noise:
- -4 : random walk frequency
- -3 : flicker frequency
- -2 : white frequency
- -1 : flicker phase
- 0 : white phase
Allowed coefficients for frequency noise:
- -2 : random walk frequency
- -1 : flicker frequency
- 0 : white frequency
- 1 : flicker phase
- 2 : white phase
base : {'phase', 'freq'}:
determines whether the exponent and coefficient is given in terms of phase or
frequency.
References
----------
[1] <NAME> - Enrico's Chart of Phase Noise and Two-Sample Variances
(http://rubiola.org/pdf-static/Enrico%27s-chart-EFTS.pdf)
"""
def __init__(
self,
coeff=1,
exponent=0,
base="phase",
representation="psd_phase",
edge_freqs=None,
):
assert base in ["phase", "freq"]
if base == "freq":
# express everything in terms of psd_phase
if type(exponent) == list:
exponent = np.array(exponent)
exponent = exponent - 2
_label_dict = {
-4: "random walk frequency",
-3: "flicker frequency",
-2: "white frequency",
-1: "flicker phase",
0: "white phase",
}
try:
label = _label_dict[exponent] + " noise"
except (KeyError, TypeError):
label = "noise model"
super().__init__(
coeff=coeff, exponent=exponent, label=label, representation=representation
)
if edge_freqs:
self.edge_freqs = list(edge_freqs)
self.edge_freqs.append(np.inf)
def psd_phase(self, freqs):
"""
Power spectral density of the phase noise.
Parameters
----------
freqs : list_like
Frequencies where the model is evaluated.
Returns
-------
1darray :
The power spectral density of the phase noise.
"""
# Implement PSD of phase, all other representations can be calculated by virtue
# of subclassing OscillatorNoiseModel.
# FIXME: Improve the cases
if type(self.coeff) == list:
previous_f_edge = 0
freqs = np.array(freqs)
values = []
for f_edge, coeff, exp in zip(self.edge_freqs, self.coeff, self.exponent):
idx = np.where(np.logical_and(freqs > previous_f_edge, freqs <= f_edge))
new_vals = coeff * freqs[idx] ** exp
values.append(new_vals)
previous_f_edge = f_edge
# flatten the list of lists
values = [item for sublist in values for item in sublist]
if len(values) < len(freqs):
# add the last value
values.append(coeff * freqs[-1] ** exp)
values = np.array(values)
else:
values = self.coeff * freqs**self.exponent
return values
class JohnsonNoise(OscillatorNoiseModel):
"""
Johnson Noise model.
Parameters
----------
signal_power : float
Carrier signal power in dBm / Hz
temperature : float (default 300.)
Temperature in kelvin
Attributes
----------
signal_power : float
temperature : float
References
----------
[1] Wikipedia: Johnson–Nyquist noise
(https://en.wikipedia.org/wiki/Johnson%E2%80%93Nyquist_noise)
"""
def __init__(
self,
signal_power,
temperature=300.0,
label="Johnson Noise",
representation=None,
):
super().__init__(temperature=temperature, label=label, n_sided=1)
self.signal_power = signal_power
def script_L(self, freqs):
"""
Calculate the script_L representation of the Johnson noise.
Parameters
----------
freqs : list_like
Frequencies where the model is evaluated.
Returns
-------
1darray :
The script_L representation of the Johnson noise.
"""
# Implement L(f), all other representations can be calculated by virtue of
# subclassing OscillatorNoiseModel.
kb = 1.380649e-23 # Boltzmann constant in J/K
freqs = np.ones(len(freqs))
# 1e-3 because normalized to mW, normalized to signal power, length of freqds
noise = (
10 * np.log10(4 * kb * self.temperature / 1e-3) * freqs - self.signal_power
)
# subtract 3 dB since above quantity is defined as one-sided according to [1]
noise -= 3
return noise
class PhotonShotNoise(OscillatorNoiseModel):
"""
Shot noise of an optical beatnote
Parameters
----------
signal_power : float
Signal power in dBm / Hz
radiant_sensitivity : float (default 0.3)
Radiant sensitivity of the photodiode in A/W. Default taken for Hamamatsu G4176.
optical_power : float (default 1e-3)
optical power in W
resisitivity : float (default 50)
resistivity in Ohm.
"""
def __init__(
self,
signal_power,
optical_power=1e-3,
radiant_sensitivity=0.3,
representation=None,
resistivity=50,
label="Photon shot noise",
):
super().__init__(
radiant_sensitivity=radiant_sensitivity,
resistivity=resistivity,
label=label,
optical_power=optical_power,
n_sided=1,
)
self.signal_power = signal_power
def script_L(self, freqs):
"""
Calculate the script_L representation of the Johnson noise.
Parameters
----------
freqs : list_like
Frequencies where the model is evaluated.
Returns
-------
1darray :
The script_L representation of the photon shot noise.
"""
e = 1.6e-19 # electron charge in C
freqs = np.ones(len(freqs))
noise = (
10
* np.log10(
2
* e
* self.radiant_sensitivity
* self.optical_power
* self.resistivity
/ 1e-3
)
* freqs
- self.signal_power
)
# FIXME: Assume above expression is a one-sided distribution, but didn't check.
noise -= 3
return noise
class NoiseFloor(OscillatorNoiseModel):
"""
Used for converting a spectrum analyzer measurement to oscilaltor noise model of the
noise floor by dividing the detection noise by the carrier signal ampliude.
Parameters
----------
signal_power : float
Signal power in dBm / Hz
noise_floor : float
measured noise floor in dBm / Hz
divide_by : int (optional)
dividy-by factor if prescaler was used for the measurements
Attributes
----------
signal_power : float
Signal power in dBm / Hz
noise_floor : float
measured noise floor in dBm / Hz
divide_by : int
dividy-by factor if prescaler was used for the measurements
"""
def __init__(
self,
signal_power,
noise_floor,
representation=None,
divide_by=1,
label="Detection noise",
):
super().__init__(label=label, divide_by=divide_by, n_sided=1)
self.signal_power = signal_power
self.noise_floor = noise_floor
def script_L(self, freqs):
"""
Calculate the script_L representation of the noise floor.
Parameters
----------
freqs : list_like
Frequencies where the model is evaluated.
Returns
-------
1darray :
The script_L representation of the noise floor.
"""
freqs = np.ones(len(freqs))
noise = (
freqs * self.noise_floor + 20 * np.log10(self.divide_by) - self.signal_power
)
noise -= 3 # is measured as one-sided distribution
return noise
class BetaLine(OscillatorNoiseModel):
"""
The beta separation line as a function of frequency. It is originally defined for
the single-sided spectral density (in Hz²/Hz).
References
----------
[1] <NAME>., <NAME>., & <NAME>. (2010). Simple approach to the
relation between laser frequency noise and laser line shape.
Applied Optics, 49(25), 4801.
https://doi.org/10.1364/AO.49.004801
"""
def __init__(self, representation="psd_freq", **kwargs):
super().__init__(
representation=representation, label=r"$\beta$ separation line", **kwargs
)
def psd_freq(self, freqs):
"""
The values of the beta separation line in Hz²/Hz as a function of frequency
Parameters
----------
freqs : float or list_like
Frequency in Hz
Returns
-------
1d array :
The values of the beta separation line.
"""
return 8 * np.log(2) * np.array(freqs) / np.pi**2
def intersection(self, density, which="first"):
"""
Returns the freqeuncy where the PSD and the beta separation line intersect.
Parameters
----------
density : OscillatorNoise
A OscillatorNoise object. Correct representation (PSD of frequency) will
automatically be used.
which : {'first', 'last'}
if there are more intersections between beta separation line and PSD, this
argument determines whether the lowest (first, default) or highest (last)
intersection frequency should be returned.
Returns
-------
float :
the frequency where the two lines intersect in Hz
"""
psd_vals = density.psd_freq
beta_vals = self.values(density.freqs)
# indices of the intersections, i.e. where the sign of the difference between
# the PSD and the beta separation line switches.
idx = np.argwhere(np.diff(np.sign(psd_vals - beta_vals))).flatten()
first_or_last = {"first": 0, "last": -1}
if idx.size == 0: # array is empty
return np.inf
return density.freqs[idx][first_or_last[which]]
def linewidth(self, density, f_min=1e3, which="first"):
"""
The FWHM linewidth according to equation (10) in [1].
Parameters
----------
density : OscillatorNoise
A PhaseFreqNoise object. Correct scaling and base (PSD of frequency) will
automatically be used.
f_min : float
minimum values of the frequency that should be considered in Hz. The
default value for f_min (1e-3) corresponds to 1 ms.
which : {'first', 'last'}
if there are more intersections between beta separation line and PSD, this
argument determines whether the lowest (first, default) or highest (last)
intersection frequency should be returned.
"""
f_max = self.intersection(density, which=which)
idx = np.where(np.logical_and(density.freqs <= f_max, density.freqs >= f_min))
freqs = density.freqs[idx]
psd_vals_over_line = density.values[idx]
# equation (10) in [1]
area = np.trapz(psd_vals_over_line, x=freqs)
fwhm = np.sqrt(8 * np.log(2) * area) # equation (9) in [1]
return fwhm
class AtomShotNoise(FreqModel):
"""
Atomic shot noise of an atom interferometer gravimeter.
Parameters
----------
n_atoms : float
Number of atoms.
contrast : float
Peak-to-peak contrast of the fringe.
T : float
Interferometer time in seconds.
keff : float
Effective wavevector of the atom interferometer in 1/m.
"""
def __init__(self, n_atoms, contrast, T, keff, **kwargs):
super().__init__(n_atoms=n_atoms, contrast=contrast, T=-T, keff=keff, **kwargs)
def values(self, freqs):
"""Shot noise limit in m/s²."""
sigma_p = 1 / np.sqrt(self.n_atoms) # atomic shot noise
sigma_g = 2 * sigma_p / (self.contrast * self.keff * self.T**2) # in m/s**2
return sigma_g
|
[
"numpy.trapz",
"numpy.log",
"numpy.logical_and",
"matplotlib.pyplot.subplots",
"numpy.array",
"numpy.sign",
"numpy.log10",
"matplotlib.pyplot.grid",
"numpy.sqrt"
] |
[((1841, 1877), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {'which': '"""both"""', 'ls': '"""-"""'}), "(True, which='both', ls='-')\n", (1849, 1877), True, 'import matplotlib.pyplot as plt\n'), ((21402, 21439), 'numpy.trapz', 'np.trapz', (['psd_vals_over_line'], {'x': 'freqs'}), '(psd_vals_over_line, x=freqs)\n', (21410, 21439), True, 'import numpy as np\n'), ((1585, 1599), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1597, 1599), True, 'import matplotlib.pyplot as plt\n'), ((5482, 5497), 'numpy.array', 'np.array', (['freqs'], {}), '(freqs)\n', (5490, 5497), True, 'import numpy as np\n'), ((7389, 7400), 'numpy.log10', 'np.log10', (['L'], {}), '(L)\n', (7397, 7400), True, 'import numpy as np\n'), ((12325, 12340), 'numpy.array', 'np.array', (['freqs'], {}), '(freqs)\n', (12333, 12340), True, 'import numpy as np\n'), ((12942, 12958), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (12950, 12958), True, 'import numpy as np\n'), ((21208, 21270), 'numpy.logical_and', 'np.logical_and', (['(density.freqs <= f_max)', '(density.freqs >= f_min)'], {}), '(density.freqs <= f_max, density.freqs >= f_min)\n', (21222, 21270), True, 'import numpy as np\n'), ((22158, 22179), 'numpy.sqrt', 'np.sqrt', (['self.n_atoms'], {}), '(self.n_atoms)\n', (22165, 22179), True, 'import numpy as np\n'), ((11089, 11107), 'numpy.array', 'np.array', (['exponent'], {}), '(exponent)\n', (11097, 11107), True, 'import numpy as np\n'), ((19128, 19143), 'numpy.array', 'np.array', (['freqs'], {}), '(freqs)\n', (19136, 19143), True, 'import numpy as np\n'), ((12483, 12539), 'numpy.logical_and', 'np.logical_and', (['(freqs > previous_f_edge)', '(freqs <= f_edge)'], {}), '(freqs > previous_f_edge, freqs <= f_edge)\n', (12497, 12539), True, 'import numpy as np\n'), ((14483, 14526), 'numpy.log10', 'np.log10', (['(4 * kb * self.temperature / 0.001)'], {}), '(4 * kb * self.temperature / 0.001)\n', (14491, 14526), True, 'import numpy as np\n'), ((16105, 16200), 'numpy.log10', 'np.log10', (['(2 * e * self.radiant_sensitivity * self.optical_power * self.resistivity /\n 0.001)'], {}), '(2 * e * self.radiant_sensitivity * self.optical_power * self.\n resistivity / 0.001)\n', (16113, 16200), True, 'import numpy as np\n'), ((17988, 18012), 'numpy.log10', 'np.log10', (['self.divide_by'], {}), '(self.divide_by)\n', (17996, 18012), True, 'import numpy as np\n'), ((19116, 19125), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (19122, 19125), True, 'import numpy as np\n'), ((21467, 21476), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (21473, 21476), True, 'import numpy as np\n'), ((6886, 6901), 'numpy.array', 'np.array', (['freqs'], {}), '(freqs)\n', (6894, 6901), True, 'import numpy as np\n'), ((20142, 20171), 'numpy.sign', 'np.sign', (['(psd_vals - beta_vals)'], {}), '(psd_vals - beta_vals)\n', (20149, 20171), True, 'import numpy as np\n')]
|