code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
"""Adds a unique constraint to name and url_name on bets.
Revision ID: 7e15c6b3d73b
Revises: <PASSWORD>
Create Date: 2016-08-27 18:15:32.180825
"""
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<KEY>7'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_unique_constraint(None, 'bet', ['url_name'])
op.create_unique_constraint(None, 'bet', ['name'])
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'bet', type_='unique')
op.drop_constraint(None, 'bet', type_='unique')
### end Alembic commands ###
| [
"alembic.op.drop_constraint",
"alembic.op.create_unique_constraint"
] | [((369, 423), 'alembic.op.create_unique_constraint', 'op.create_unique_constraint', (['None', '"""bet"""', "['url_name']"], {}), "(None, 'bet', ['url_name'])\n", (396, 423), False, 'from alembic import op\n'), ((428, 478), 'alembic.op.create_unique_constraint', 'op.create_unique_constraint', (['None', '"""bet"""', "['name']"], {}), "(None, 'bet', ['name'])\n", (455, 478), False, 'from alembic import op\n'), ((599, 646), 'alembic.op.drop_constraint', 'op.drop_constraint', (['None', '"""bet"""'], {'type_': '"""unique"""'}), "(None, 'bet', type_='unique')\n", (617, 646), False, 'from alembic import op\n'), ((651, 698), 'alembic.op.drop_constraint', 'op.drop_constraint', (['None', '"""bet"""'], {'type_': '"""unique"""'}), "(None, 'bet', type_='unique')\n", (669, 698), False, 'from alembic import op\n')] |
import click
def error(msg, logger=False):
"""Prints an error message to stderr and logs."""
click.secho(msg, fg='red', err=True)
if logger:
logger.error(msg)
def warn(msg, logger=False):
'''Prints a warning message to stderr.'''
click.secho(msg, fg='yellow')
if logger:
logger.warning(msg)
def info(msg, logger=False):
click.secho(msg, fg='green')
if logger:
logger.info(msg) | [
"click.secho"
] | [((102, 138), 'click.secho', 'click.secho', (['msg'], {'fg': '"""red"""', 'err': '(True)'}), "(msg, fg='red', err=True)\n", (113, 138), False, 'import click\n'), ((261, 290), 'click.secho', 'click.secho', (['msg'], {'fg': '"""yellow"""'}), "(msg, fg='yellow')\n", (272, 290), False, 'import click\n'), ((374, 402), 'click.secho', 'click.secho', (['msg'], {'fg': '"""green"""'}), "(msg, fg='green')\n", (385, 402), False, 'import click\n')] |
# -*- coding: utf-8 -*-
if __name__ == "__main__":
# Spark Session and Spark Context
from pyspark.sql import SparkSession
spark = SparkSession.builder \
.appName("PySpark Plaso WebAPI Application") \
.getOrCreate()
sc = spark.sparkContext
from os import getenv
from plaso.tarzan.app.pyspark_plaso_webapp import configure_app
app = configure_app(sc, getenv("PP_HDFS_URI", "hdfs://hadoop@namenode:8020/test_data"))
# Enable WSGI access logging via Paste
from paste.translogger import TransLogger
app_logged = TransLogger(app)
# Mount the WSGI callable object (app) on the root directory
import cherrypy
cherrypy.tree.graft(app_logged, '/')
# Set the configuration of the web server
cherrypy.config.update({
'engine.autoreload.on': True,
'log.screen': True,
'server.socket_port': int(getenv("PP_PORT", 54380)),
'server.socket_host': getenv("PP_HOST", '0.0.0.0'),
# remove size-limit for file uploads
'server.max_request_body_size': 0,
})
# Start the CherryPy WSGI web server
cherrypy.engine.start()
cherrypy.engine.block()
| [
"cherrypy.engine.block",
"os.getenv",
"cherrypy.tree.graft",
"cherrypy.engine.start",
"paste.translogger.TransLogger",
"pyspark.sql.SparkSession.builder.appName"
] | [((568, 584), 'paste.translogger.TransLogger', 'TransLogger', (['app'], {}), '(app)\n', (579, 584), False, 'from paste.translogger import TransLogger\n'), ((675, 711), 'cherrypy.tree.graft', 'cherrypy.tree.graft', (['app_logged', '"""/"""'], {}), "(app_logged, '/')\n", (694, 711), False, 'import cherrypy\n'), ((1116, 1139), 'cherrypy.engine.start', 'cherrypy.engine.start', ([], {}), '()\n', (1137, 1139), False, 'import cherrypy\n'), ((1144, 1167), 'cherrypy.engine.block', 'cherrypy.engine.block', ([], {}), '()\n', (1165, 1167), False, 'import cherrypy\n'), ((397, 459), 'os.getenv', 'getenv', (['"""PP_HDFS_URI"""', '"""hdfs://hadoop@namenode:8020/test_data"""'], {}), "('PP_HDFS_URI', 'hdfs://hadoop@namenode:8020/test_data')\n", (403, 459), False, 'from os import getenv\n'), ((144, 208), 'pyspark.sql.SparkSession.builder.appName', 'SparkSession.builder.appName', (['"""PySpark Plaso WebAPI Application"""'], {}), "('PySpark Plaso WebAPI Application')\n", (172, 208), False, 'from pyspark.sql import SparkSession\n'), ((945, 973), 'os.getenv', 'getenv', (['"""PP_HOST"""', '"""0.0.0.0"""'], {}), "('PP_HOST', '0.0.0.0')\n", (951, 973), False, 'from os import getenv\n'), ((888, 912), 'os.getenv', 'getenv', (['"""PP_PORT"""', '(54380)'], {}), "('PP_PORT', 54380)\n", (894, 912), False, 'from os import getenv\n')] |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
dataset classes
"""
import cv2
import numpy as np
import src.utils.img
from src.dataset.MPIIDataLoader import flipped_parts
class GenerateHeatmap:
"""
get train target heatmap
"""
def __init__(self, output_res, num_parts):
self.output_res = output_res
self.num_parts = num_parts
sigma = self.output_res / 64
self.sigma = sigma
size = 6 * sigma + 3
x = np.arange(0, size, 1, float)
y = x[:, np.newaxis]
x0, y0 = 3 * sigma + 1, 3 * sigma + 1
self.g = np.exp(-((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))
def __call__(self, keypoints):
hms = np.zeros(shape=(self.num_parts, self.output_res, self.output_res), dtype=np.float32)
sigma = self.sigma
for p in keypoints:
for idx, pt in enumerate(p):
if pt[0] > 0:
x, y = int(pt[0]), int(pt[1])
if x < 0 or y < 0 or x >= self.output_res or y >= self.output_res:
continue
ul = int(x - 3 * sigma - 1), int(y - 3 * sigma - 1)
br = int(x + 3 * sigma + 2), int(y + 3 * sigma + 2)
c, d = max(0, -ul[0]), min(br[0], self.output_res) - ul[0]
a, b = max(0, -ul[1]), min(br[1], self.output_res) - ul[1]
cc, dd = max(0, ul[0]), min(br[0], self.output_res)
aa, bb = max(0, ul[1]), min(br[1], self.output_res)
hms[idx, aa:bb, cc:dd] = np.maximum(hms[idx, aa:bb, cc:dd], self.g[a:b, c:d])
return hms
class DatasetGenerator:
"""
mindspore general dataset generator
"""
def __init__(self, input_res, output_res, ds, index):
self.input_res = input_res
self.output_res = output_res
self.generateHeatmap = GenerateHeatmap(self.output_res, 16)
self.ds = ds
self.index = index
def __len__(self):
return len(self.index)
def __getitem__(self, idx):
# print(f"loading...{idx}")
return self.loadImage(self.index[idx])
def loadImage(self, idx):
"""
load and preprocess image
"""
ds = self.ds
# Load + Crop
orig_img = ds.get_img(idx)
orig_keypoints = ds.get_kps(idx)
kptmp = orig_keypoints.copy()
c = ds.get_center(idx)
s = ds.get_scale(idx)
cropped = src.utils.img.crop(orig_img, c, s, (self.input_res, self.input_res))
for i in range(np.shape(orig_keypoints)[1]):
if orig_keypoints[0, i, 0] > 0:
orig_keypoints[0, i, :2] = src.utils.img.transform(
orig_keypoints[0, i, :2], c, s, (self.input_res, self.input_res)
)
keypoints = np.copy(orig_keypoints)
# Random Crop
height, width = cropped.shape[0:2]
center = np.array((width / 2, height / 2))
scale = max(height, width) / 200
aug_rot = 0
aug_rot = (np.random.random() * 2 - 1) * 30.0
aug_scale = np.random.random() * (1.25 - 0.75) + 0.75
scale *= aug_scale
mat_mask = src.utils.img.get_transform(center, scale, (self.output_res, self.output_res), aug_rot)[:2]
mat = src.utils.img.get_transform(center, scale, (self.input_res, self.input_res), aug_rot)[:2]
inp = cv2.warpAffine(cropped, mat, (self.input_res, self.input_res)).astype(np.float32) / 255
keypoints[:, :, 0:2] = src.utils.img.kpt_affine(keypoints[:, :, 0:2], mat_mask)
if np.random.randint(2) == 0:
inp = self.preprocess(inp)
inp = inp[:, ::-1]
keypoints = keypoints[:, flipped_parts["mpii"]]
keypoints[:, :, 0] = self.output_res - keypoints[:, :, 0]
orig_keypoints = orig_keypoints[:, flipped_parts["mpii"]]
orig_keypoints[:, :, 0] = self.input_res - orig_keypoints[:, :, 0]
# If keypoint is invisible, set to 0
for i in range(np.shape(orig_keypoints)[1]):
if kptmp[0, i, 0] == 0 and kptmp[0, i, 1] == 0:
keypoints[0, i, 0] = 0
keypoints[0, i, 1] = 0
orig_keypoints[0, i, 0] = 0
orig_keypoints[0, i, 1] = 0
# Generate target heatmap
heatmaps = self.generateHeatmap(keypoints)
return inp.astype(np.float32), heatmaps.astype(np.float32)
def preprocess(self, data):
"""
preprocess images
"""
# Random hue and saturation
data = cv2.cvtColor(data, cv2.COLOR_RGB2HSV)
delta = (np.random.random() * 2 - 1) * 0.2
data[:, :, 0] = np.mod(data[:, :, 0] + (delta * 360 + 360.0), 360.0)
delta_sature = np.random.random() + 0.5
data[:, :, 1] *= delta_sature
data[:, :, 1] = np.maximum(np.minimum(data[:, :, 1], 1), 0)
data = cv2.cvtColor(data, cv2.COLOR_HSV2RGB)
# Random brightness
delta = (np.random.random() * 2 - 1) * 0.3
data += delta
# Random contrast
mean = data.mean(axis=2, keepdims=True)
data = (data - mean) * (np.random.random() + 0.5) + mean
data = np.minimum(np.maximum(data, 0), 1)
return data
| [
"numpy.copy",
"numpy.shape",
"cv2.warpAffine",
"numpy.minimum",
"numpy.random.random",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numpy.random.randint",
"cv2.cvtColor",
"numpy.maximum",
"numpy.mod",
"numpy.arange"
] | [((1091, 1119), 'numpy.arange', 'np.arange', (['(0)', 'size', '(1)', 'float'], {}), '(0, size, 1, float)\n', (1100, 1119), True, 'import numpy as np\n'), ((1212, 1271), 'numpy.exp', 'np.exp', (['(-((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))'], {}), '(-((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))\n', (1218, 1271), True, 'import numpy as np\n'), ((1322, 1411), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.num_parts, self.output_res, self.output_res)', 'dtype': 'np.float32'}), '(shape=(self.num_parts, self.output_res, self.output_res), dtype=np\n .float32)\n', (1330, 1411), True, 'import numpy as np\n'), ((3452, 3475), 'numpy.copy', 'np.copy', (['orig_keypoints'], {}), '(orig_keypoints)\n', (3459, 3475), True, 'import numpy as np\n'), ((3559, 3592), 'numpy.array', 'np.array', (['(width / 2, height / 2)'], {}), '((width / 2, height / 2))\n', (3567, 3592), True, 'import numpy as np\n'), ((5206, 5243), 'cv2.cvtColor', 'cv2.cvtColor', (['data', 'cv2.COLOR_RGB2HSV'], {}), '(data, cv2.COLOR_RGB2HSV)\n', (5218, 5243), False, 'import cv2\n'), ((5319, 5371), 'numpy.mod', 'np.mod', (['(data[:, :, 0] + (delta * 360 + 360.0))', '(360.0)'], {}), '(data[:, :, 0] + (delta * 360 + 360.0), 360.0)\n', (5325, 5371), True, 'import numpy as np\n'), ((5542, 5579), 'cv2.cvtColor', 'cv2.cvtColor', (['data', 'cv2.COLOR_HSV2RGB'], {}), '(data, cv2.COLOR_HSV2RGB)\n', (5554, 5579), False, 'import cv2\n'), ((4217, 4237), 'numpy.random.randint', 'np.random.randint', (['(2)'], {}), '(2)\n', (4234, 4237), True, 'import numpy as np\n'), ((5396, 5414), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (5412, 5414), True, 'import numpy as np\n'), ((5494, 5522), 'numpy.minimum', 'np.minimum', (['data[:, :, 1]', '(1)'], {}), '(data[:, :, 1], 1)\n', (5504, 5522), True, 'import numpy as np\n'), ((5848, 5867), 'numpy.maximum', 'np.maximum', (['data', '(0)'], {}), '(data, 0)\n', (5858, 5867), True, 'import numpy as np\n'), ((3187, 3211), 'numpy.shape', 'np.shape', (['orig_keypoints'], {}), '(orig_keypoints)\n', (3195, 3211), True, 'import numpy as np\n'), ((3730, 3748), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3746, 3748), True, 'import numpy as np\n'), ((4662, 4686), 'numpy.shape', 'np.shape', (['orig_keypoints'], {}), '(orig_keypoints)\n', (4670, 4686), True, 'import numpy as np\n'), ((2196, 2248), 'numpy.maximum', 'np.maximum', (['hms[idx, aa:bb, cc:dd]', 'self.g[a:b, c:d]'], {}), '(hms[idx, aa:bb, cc:dd], self.g[a:b, c:d])\n', (2206, 2248), True, 'import numpy as np\n'), ((3675, 3693), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3691, 3693), True, 'import numpy as np\n'), ((4030, 4092), 'cv2.warpAffine', 'cv2.warpAffine', (['cropped', 'mat', '(self.input_res, self.input_res)'], {}), '(cropped, mat, (self.input_res, self.input_res))\n', (4044, 4092), False, 'import cv2\n'), ((5261, 5279), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (5277, 5279), True, 'import numpy as np\n'), ((5626, 5644), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (5642, 5644), True, 'import numpy as np\n'), ((5789, 5807), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (5805, 5807), True, 'import numpy as np\n')] |
from aqt import mw
from .config import get, set
def prepare_deck_to_ease_range():
deck_to_ease_range = d if (d := get('deck_to_ease_range')) else {}
# for backwards compatibilty
deck_to_ease = d if (d := get('deck_to_ease')) else {}
deck_to_ease_range.update(**_to_deck_to_ease_range(deck_to_ease))
set('deck_to_ease', None)
# remove entries of decks that do not exist in anki
# and ensure the deck ids are of type int
cleaned = {
int(deck_id) : ease_range
for deck_id, ease_range in deck_to_ease_range.items()
if str(deck_id) in mw.col.decks.allIds()
}
set('deck_to_ease_range', cleaned)
def _to_deck_to_ease_range(deck_to_ease):
converted = {
deck_id : (ease, ease)
for deck_id, ease in deck_to_ease.items()
}
return converted
| [
"aqt.mw.col.decks.allIds"
] | [((592, 613), 'aqt.mw.col.decks.allIds', 'mw.col.decks.allIds', ([], {}), '()\n', (611, 613), False, 'from aqt import mw\n')] |
import os
import nextcord as discord
from nextcord.ext import commands
import pytube
class Youtube(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command(name = 'youtube', aliases = ['yt'])
async def youtube(self, context, url):
# Check if 20 internal files limit has been exceeded
count = 0
for file in os.listdir('data/music'):
count += 1
if count > 20:
for file in os.listdir('data/music'):
os.remove(file)
# Pytube things
downloader = pytube.YouTube(url)
music = downloader.streams.filter(only_audio = True).first()
out_file = music.download(output_path = 'data/music')
# Create file on my computer
base, ext = os.path.splitext(out_file)
new_file = base + '.mp3'
os.rename(out_file, new_file)
# Send the file to Discord
music_file = discord.File(new_file, filename = 'music.mp3')
await context.reply(file = music_file)
def setup(client):
client.add_cog(Youtube(client)) | [
"nextcord.File",
"os.listdir",
"nextcord.ext.commands.command",
"os.rename",
"pytube.YouTube",
"os.path.splitext",
"os.remove"
] | [((187, 235), 'nextcord.ext.commands.command', 'commands.command', ([], {'name': '"""youtube"""', 'aliases': "['yt']"}), "(name='youtube', aliases=['yt'])\n", (203, 235), False, 'from nextcord.ext import commands\n'), ((391, 415), 'os.listdir', 'os.listdir', (['"""data/music"""'], {}), "('data/music')\n", (401, 415), False, 'import os\n'), ((600, 619), 'pytube.YouTube', 'pytube.YouTube', (['url'], {}), '(url)\n', (614, 619), False, 'import pytube\n'), ((809, 835), 'os.path.splitext', 'os.path.splitext', (['out_file'], {}), '(out_file)\n', (825, 835), False, 'import os\n'), ((877, 906), 'os.rename', 'os.rename', (['out_file', 'new_file'], {}), '(out_file, new_file)\n', (886, 906), False, 'import os\n'), ((964, 1008), 'nextcord.File', 'discord.File', (['new_file'], {'filename': '"""music.mp3"""'}), "(new_file, filename='music.mp3')\n", (976, 1008), True, 'import nextcord as discord\n'), ((496, 520), 'os.listdir', 'os.listdir', (['"""data/music"""'], {}), "('data/music')\n", (506, 520), False, 'import os\n'), ((538, 553), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (547, 553), False, 'import os\n')] |
"""
"""
import re
from collections import namedtuple
from functools import lru_cache
from lexref.model import Value
__all__ = ['ListItemsAndPatterns']
romans_pattern = Value.tag_2_pattern('EN')['ROM_L'].pattern.strip('b\\()')
_eur_lex_item_patterns_en = { # key: (itemization-character-pattern, ordered [bool], first two items, decorations)
# TODO: Amendments could cause Itemizations of the type "5a. ". Keep that in mind and see if / how the code
# TODO: can cope with that.
'nump': (re.compile(r'^[1-9][0-9]{,3}\.' + chr(160) + '{,3}', flags=re.UNICODE), True,
('1', '2'), '(). ' + chr(160)),
'numpt': (re.compile(r'^[0-9]{1,3}\.?(?!([0-9/();]| of))'), True, # TODO: This pattern does not belong here!
('1', '2'), '.'), # TODO: => get rid of it!
'numbr': (re.compile(r'^\([0-9]{1,3}\)'), True,
('1', '2'), '()'), # 2
'alpha': (re.compile(r'^\([a-z]\)'), True,
('a', 'b'), '()'), # 3
'roman': (re.compile(r'^\((' + romans_pattern + r')\)'), True,
('i', 'ii'), '()'),
'dash': (re.compile(u'^(—|' + chr(8212) + ')', flags=re.UNICODE), False, None, None)
}
_eur_lex_item_patterns_es = { # key: (itemization-character-pattern, ordered [bool], first two items, decorations)
'nump': (re.compile(r'^[1-9][0-9]{,3}\.' + chr(160) + '{,3}', flags=re.UNICODE), True,
('1', '2'), '(). ' + chr(160)),
'numpt': (re.compile(r'^[0-9]{1,3}\.?(?!([0-9/();]| de))'), True,
# TODO: This pattern does not belong here!
('1', '2'), '.'), # TODO: => get rid of it!
'numbr': (re.compile(r'^\([0-9]{1,3}\)'), True,
('1', '2'), '()'), # 2
'alpha': (re.compile(r'^\([a-z]\)'), True,
('a', 'b'), '()'), # 3
'roman': (re.compile(r'^\((' + romans_pattern + r')\)'), True,
('i', 'ii'), '()'),
'dash': (re.compile(u'^(—|' + chr(8212) + ')', flags=re.UNICODE),
False, None, None)
}
_eur_lex_item_patterns_de = { # key: (itemization-character-pattern, ordered [bool], first two items, decorations)
'nump': (re.compile(r'^\([0-9]{1,3}\)'), True, ('1', '2'), '()'), # 2
'alpha': (re.compile(r'^\([a-z]\)'), True, ('a', 'b'), '()'), # 3
'roman': (re.compile(r'^\((' + romans_pattern + r')\)'), True, ('i', 'ii'), '()'),
'dash': (re.compile(u'^(—|' + chr(8212) + ')', flags=re.UNICODE),
False, None, None)
}
_eur_lex_item_patterns_hierarchy = ['nump', 'numpt', 'numbr', 'alpha', 'roman', 'dash']
class ListItemPattern:
FirstSecond = namedtuple('FirstSecond', ['first', 'second'])
def __init__(self, tag, # Tag is used as CSS class on the surface
item_pattern, ordered, first_two_items, decoration):
self.item_pattern = item_pattern
self.tag = tag
self.ordered = ordered
self.first_two_items = (None if first_two_items is None
else self.FirstSecond(*first_two_items))
self.decoration = decoration
@classmethod
@lru_cache()
def create(cls, tag, # Tag is used as CSS class on the surface
item_pattern, ordered, first_two_items, decoration):
return cls(tag, item_pattern, ordered, first_two_items, decoration)
@lru_cache()
class ListItemsAndPatterns:
TagProposal = namedtuple('TagProposal', ['tags', 'inner'])
def __init__(self, language, document_domain, known_firsts=False):
if document_domain.lower() == 'eu':
try:
_eur_lex_item_patterns = eval(
f'_eur_lex_item_patterns_{language.lower()}')
except NameError:
raise NotImplementedError(
f'It seems that the time has come to implement '
f'language {language} for domain eu.'
)
else:
self.list_item_patterns = {
key: ListItemPattern.create(key, *value)
for key, value in _eur_lex_item_patterns.items()
}
self.known_firsts = known_firsts
self.list_label_generic = re.compile('^(' + '|'.join(
['(' + x.item_pattern.pattern.strip('^') + ')'
for x in self.list_item_patterns.values()]) + ')')
self.tag_hierarchy = _eur_lex_item_patterns_hierarchy
else:
raise NotImplementedError(f'It seems that the time has come to '
f'implement domain {document_domain}')
def get_list_item_tag(self, arg, force_ambivalence_resolution=True):
if type(arg) is str:
if force_ambivalence_resolution:
return self.get_list_item_tag([arg])[0]
else:
tag_candidates = set()
inner = None
for list_item_pattern in self.list_item_patterns.values():
m = list_item_pattern.item_pattern.match(arg)
if m is not None:
if inner is None:
inner = m.group(0).strip(list_item_pattern.decoration)
elif inner != m.group(0).strip(list_item_pattern.decoration):
raise RuntimeError("Unexpected ambivalence (type 0) "
"within ListItemsHandler")
tag_candidates |= {list_item_pattern.tag}
return self.TagProposal(tag_candidates, inner)
elif type(arg) is list:
tags_list = [
self.get_list_item_tag(it, force_ambivalence_resolution=False)
for it in arg
]
self._resolve_ambivalences(tags_list)
return tags_list
def __getitem__(self, item):
return self.list_item_patterns[item]
def _resolve_ambivalences(self, tag_candidates_list):
"""
1. Identify
:param tag_candidates_list:
:return:
TODO: This routine works more or les fine. However, it does not
really take into account all the context sensitivity that may arise.
Furthermore, at least two of the test cases have no unique solution,
but this routine simply chooses one possible solution. That is more
than questionable. Furthermore, this routine, does not take into
account the full nested structure of itemization, which would
clearly help to make the outcome this task more more correct for
all possible input cases.
"""
def ambivalence_resolvable(tag_list):
for tag_l in tag_list:
for tag_r in tag_list:
if tag_r > tag_l:
if self[tag_l] != self[tag_r]:
return True
return False
# TODO: distinction between two types of ambivalence:
ambivalent_cases = [k for k, (tags, inner) in enumerate(tag_candidates_list)
if ambivalence_resolvable(tags)]
# TODO: Not resolvable cases must be handled via the hierarchy
for k in ambivalent_cases:
case = tag_candidates_list[k]
if k < len(tag_candidates_list) - 1:
subsequent = tag_candidates_list[k+1]
if k + 1 not in ambivalent_cases:
# If the adjacent item is not ambivalent. The tag of the subsequent is it
if subsequent.tags.issubset(case.tags) \
and self[subsequent.tags.copy().pop()].first_two_items.first != subsequent.inner:
tag_candidates_list[k] = self.TagProposal(subsequent.tags, case.inner)
continue
if k > 0: # No successor of case but a precedent (of course)
preceding = tag_candidates_list[k-1]
if k - 1 not in ambivalent_cases:
if preceding.tags.issubset(case.tags): # and case is not first with respect to preceding tag
if self[preceding.tags.copy().pop()].first_two_items.first != case.inner:
tag_candidates_list[k] = self.TagProposal(preceding.tags, case.inner)
continue
else:
case.tags.remove(preceding.tags.copy().pop())
continue
for tag in self.tag_hierarchy: # map to hierarchy and take the first one.
if tag in case.tags:
tag_candidates_list[k] = self.TagProposal({tag}, case.inner)
continue
if len([_ for _ in tag_candidates_list if len(_.tags) > 1]) > 0:
self._resolve_ambivalences(tag_candidates_list)
| [
"functools.lru_cache",
"lexref.model.Value.tag_2_pattern",
"collections.namedtuple",
"re.compile"
] | [((3319, 3330), 'functools.lru_cache', 'lru_cache', ([], {}), '()\n', (3328, 3330), False, 'from functools import lru_cache\n'), ((2611, 2657), 'collections.namedtuple', 'namedtuple', (['"""FirstSecond"""', "['first', 'second']"], {}), "('FirstSecond', ['first', 'second'])\n", (2621, 2657), False, 'from collections import namedtuple\n'), ((3092, 3103), 'functools.lru_cache', 'lru_cache', ([], {}), '()\n', (3101, 3103), False, 'from functools import lru_cache\n'), ((3378, 3422), 'collections.namedtuple', 'namedtuple', (['"""TagProposal"""', "['tags', 'inner']"], {}), "('TagProposal', ['tags', 'inner'])\n", (3388, 3422), False, 'from collections import namedtuple\n'), ((645, 693), 're.compile', 're.compile', (['"""^[0-9]{1,3}\\\\.?(?!([0-9/();]| of))"""'], {}), "('^[0-9]{1,3}\\\\.?(?!([0-9/();]| of))')\n", (655, 693), False, 'import re\n'), ((826, 857), 're.compile', 're.compile', (['"""^\\\\([0-9]{1,3}\\\\)"""'], {}), "('^\\\\([0-9]{1,3}\\\\)')\n", (836, 857), False, 'import re\n'), ((916, 942), 're.compile', 're.compile', (['"""^\\\\([a-z]\\\\)"""'], {}), "('^\\\\([a-z]\\\\)')\n", (926, 942), False, 'import re\n'), ((1001, 1046), 're.compile', 're.compile', (["('^\\\\((' + romans_pattern + ')\\\\)')"], {}), "('^\\\\((' + romans_pattern + ')\\\\)')\n", (1011, 1046), False, 'import re\n'), ((1452, 1500), 're.compile', 're.compile', (['"""^[0-9]{1,3}\\\\.?(?!([0-9/();]| de))"""'], {}), "('^[0-9]{1,3}\\\\.?(?!([0-9/();]| de))')\n", (1462, 1500), False, 'import re\n'), ((1646, 1677), 're.compile', 're.compile', (['"""^\\\\([0-9]{1,3}\\\\)"""'], {}), "('^\\\\([0-9]{1,3}\\\\)')\n", (1656, 1677), False, 'import re\n'), ((1736, 1762), 're.compile', 're.compile', (['"""^\\\\([a-z]\\\\)"""'], {}), "('^\\\\([a-z]\\\\)')\n", (1746, 1762), False, 'import re\n'), ((1821, 1866), 're.compile', 're.compile', (["('^\\\\((' + romans_pattern + ')\\\\)')"], {}), "('^\\\\((' + romans_pattern + ')\\\\)')\n", (1831, 1866), False, 'import re\n'), ((2148, 2179), 're.compile', 're.compile', (['"""^\\\\([0-9]{1,3}\\\\)"""'], {}), "('^\\\\([0-9]{1,3}\\\\)')\n", (2158, 2179), False, 'import re\n'), ((2224, 2250), 're.compile', 're.compile', (['"""^\\\\([a-z]\\\\)"""'], {}), "('^\\\\([a-z]\\\\)')\n", (2234, 2250), False, 'import re\n'), ((2295, 2340), 're.compile', 're.compile', (["('^\\\\((' + romans_pattern + ')\\\\)')"], {}), "('^\\\\((' + romans_pattern + ')\\\\)')\n", (2305, 2340), False, 'import re\n'), ((172, 197), 'lexref.model.Value.tag_2_pattern', 'Value.tag_2_pattern', (['"""EN"""'], {}), "('EN')\n", (191, 197), False, 'from lexref.model import Value\n')] |
# Copyright (c) 2018-2019 <NAME>
# Copyright (c) 2021 RACOM s.r.o.
# SPDX-License-Identifier: MIT
from contextlib import suppress
from typing import IO, Any, Dict, Iterator, Optional, Tuple, Union
from _libyang import ffi, lib
from .util import IOType, c2str, init_output, ly_array_iter, str2c
# -------------------------------------------------------------------------------------
def schema_in_format(fmt_string: str) -> int:
if fmt_string == "yang":
return lib.LYS_IN_YANG
if fmt_string == "yin":
return lib.LYS_IN_YIN
raise ValueError("unknown schema input format: %r" % fmt_string)
# -------------------------------------------------------------------------------------
def schema_out_format(fmt_string: str) -> int:
if fmt_string == "yang":
return lib.LYS_OUT_YANG
if fmt_string == "yin":
return lib.LYS_OUT_YIN
if fmt_string == "tree":
return lib.LYS_OUT_TREE
raise ValueError("unknown schema output format: %r" % fmt_string)
# -------------------------------------------------------------------------------------
def printer_flags(
no_substmt: bool = False,
shrink: bool = False,
) -> int:
flags = 0
if no_substmt:
flags |= lib.LYS_PRINT_NO_SUBSTMT
if shrink:
flags |= lib.LYS_PRINT_SHRINK
return flags
# -------------------------------------------------------------------------------------
class Module:
__slots__ = ("context", "cdata")
def __init__(self, context: "libyang.Context", cdata):
self.context = context
self.cdata = cdata # C type: "struct lys_module *"
def name(self) -> str:
return c2str(self.cdata.name)
def prefix(self) -> str:
return c2str(self.cdata.prefix)
def description(self) -> Optional[str]:
return c2str(self.cdata.dsc)
def filepath(self) -> Optional[str]:
return c2str(self.cdata.filepath)
def implemented(self) -> bool:
return bool(self.cdata.implemented)
def feature_enable(self, name: str) -> None:
p = str2c(name)
q = ffi.new("char *[2]", [p, ffi.NULL])
ret = lib.lys_set_implemented(self.cdata, q)
if ret != lib.LY_SUCCESS:
raise self.context.error("no such feature: %r" % name)
def feature_enable_all(self) -> None:
self.feature_enable("*")
def feature_disable_all(self) -> None:
val = ffi.new("char **", ffi.NULL)
ret = lib.lys_set_implemented(self.cdata, val)
if ret != lib.LY_SUCCESS:
raise self.context.error("cannot disable all features")
def feature_state(self, name: str) -> bool:
ret = lib.lys_feature_value(self.cdata, str2c(name))
if ret == lib.LY_SUCCESS:
return True
if ret == lib.LY_ENOT:
return False
raise self.context.error("no such feature: %r" % name)
def features(self) -> Iterator["Feature"]:
features_list = []
f = ffi.NULL
idx = ffi.new("uint32_t *")
while True:
f = lib.lysp_feature_next(f, self.cdata.parsed, idx)
if f == ffi.NULL:
break
features_list.append(f)
for i in features_list:
yield Feature(self.context, i)
def get_feature(self, name: str) -> "Feature":
for f in self.features():
if f.name() == name:
return f
raise self.context.error("no such feature: %r" % name)
def revisions(self) -> Iterator["Revision"]:
for revision in ly_array_iter(self.cdata.parsed.revs):
yield Revision(self.context, revision, self)
def __iter__(self) -> Iterator["SNode"]:
return self.children()
def children(self, types: Optional[Tuple[int, ...]] = None) -> Iterator["SNode"]:
return iter_children(self.context, self.cdata, types=types)
def __str__(self) -> str:
return self.name()
def print(
self,
fmt: str,
out_type: IOType,
out_target: Union[IO, str, None] = None,
printer_no_substmt: bool = False,
printer_shrink: bool = False,
) -> Union[str, bytes, None]:
fmt = schema_out_format(fmt)
flags = printer_flags(no_substmt=printer_no_substmt, shrink=printer_shrink)
out_data = ffi.new("struct ly_out **")
ret, output = init_output(out_type, out_target, out_data)
if ret != lib.LY_SUCCESS:
raise self.context.error("failed to initialize output target")
ret = lib.lys_print_module(out_data[0], self.cdata, fmt, 0, flags)
if output is not None:
tmp = output[0]
output = c2str(tmp)
lib.free(tmp)
lib.ly_out_free(out_data[0], ffi.NULL, False)
if ret != lib.LY_SUCCESS:
raise self.context.error("failed to write data")
return output
def print_mem(
self,
fmt: str = "tree",
printer_no_substmt: bool = False,
printer_shrink: bool = False,
) -> Union[str, bytes]:
return self.print(
fmt,
IOType.MEMORY,
None,
printer_no_substmt=printer_no_substmt,
printer_shrink=printer_shrink,
)
def print_file(
self,
fileobj: IO,
fmt: str = "tree",
printer_no_substmt: bool = False,
printer_shrink: bool = False,
) -> None:
return self.print(
fmt,
IOType.FD,
fileobj,
printer_no_substmt=printer_no_substmt,
printer_shrink=printer_shrink,
)
def parse_data_dict(
self,
dic: Dict[str, Any],
no_state: bool = False,
validate_present: bool = False,
validate: bool = True,
strict: bool = False,
rpc: bool = False,
rpcreply: bool = False,
notification: bool = False,
) -> "libyang.data.DNode":
"""
Convert a python dictionary to a DNode object following the schema of this
module. The returned value is always a top-level data node (i.e.: without
parent).
:arg dic:
The python dictionary to convert.
:arg no_state:
Consider state data not allowed and raise an error during validation if they are found.
:arg validate_present:
Validate result of the operation against schema.
:arg validate:
Run validation on result of the operation.
:arg strict:
Instead of ignoring data without schema definition, raise an error.
:arg rpc:
Data represents RPC or action input parameters.
:arg rpcreply:
Data represents RPC or action output parameters.
:arg notification:
Data represents a NETCONF notification.
"""
from .data import dict_to_dnode # circular import
return dict_to_dnode(
dic,
self,
no_state=no_state,
validate_present=validate_present,
validate=validate,
strict=strict,
rpc=rpc,
rpcreply=rpcreply,
notification=notification,
)
# -------------------------------------------------------------------------------------
class Revision:
__slots__ = ("context", "cdata", "module")
def __init__(self, context: "libyang.Context", cdata, module):
self.context = context
self.cdata = cdata # C type: "struct lysp_revision *"
self.module = module
def date(self) -> str:
return c2str(self.cdata.date)
def description(self) -> Optional[str]:
return c2str(self.cdata.dsc)
def reference(self) -> Optional[str]:
return c2str(self.cdata.ref)
def extensions(self) -> Iterator["ExtensionParsed"]:
for ext in ly_array_iter(self.cdata.exts):
yield ExtensionParsed(self.context, ext, self.module)
def get_extension(
self, name: str, prefix: Optional[str] = None, arg_value: Optional[str] = None
) -> Optional["ExtensionParsed"]:
for ext in self.extensions():
if ext.name() != name:
continue
if prefix is not None and ext.module().name() != prefix:
continue
if arg_value is not None and ext.argument() != arg_value:
continue
return ext
return None
def __repr__(self):
cls = self.__class__
return "<%s.%s: %s>" % (cls.__module__, cls.__name__, str(self))
def __str__(self):
return self.date()
# -------------------------------------------------------------------------------------
class Extension:
__slots__ = ("context", "cdata")
def __init__(self, context: "libyang.Context", cdata, module_parent: Module = None):
self.context = context
self.cdata = cdata
def argument(self) -> Optional[str]:
return c2str(self.cdata.argument)
def name(self) -> str:
return str(self.cdata)
def __repr__(self):
cls = self.__class__
return "<%s.%s: %s>" % (cls.__module__, cls.__name__, str(self))
def __str__(self):
return self.name()
# -------------------------------------------------------------------------------------
class ExtensionParsed(Extension):
__slots__ = ("module_parent",)
def __init__(self, context: "libyang.Context", cdata, module_parent: Module = None):
super().__init__(context, cdata)
self.module_parent = module_parent
def _module_from_parsed(self) -> Module:
prefix = c2str(self.cdata.name).split(":")[0]
for cdata_imp_mod in ly_array_iter(self.module_parent.cdata.parsed.imports):
if ffi.string(cdata_imp_mod.prefix).decode() == prefix:
return Module(self.context, cdata_imp_mod.module)
raise self.context.error("cannot get module")
def name(self) -> str:
return c2str(self.cdata.name).split(":")[1]
def module(self) -> Module:
return self._module_from_parsed()
# -------------------------------------------------------------------------------------
class ExtensionCompiled(Extension):
__slots__ = ("cdata_def",)
def __init__(self, context: "libyang.Context", cdata):
super().__init__(context, cdata)
self.cdata_def = getattr(cdata, "def", None)
def name(self) -> str:
return c2str(self.cdata_def.name)
def module(self) -> Module:
if not self.cdata_def.module:
raise self.context.error("cannot get module")
return Module(self.context, self.cdata_def.module)
# -------------------------------------------------------------------------------------
class _EnumBit:
__slots__ = ("context", "cdata")
def __init__(self, context: "libyang.Context", cdata):
self.context = context
self.cdata = cdata # C type "struct lys_type_bit" or "struct lys_type_enum"
def position(self) -> int:
return self.cdata.position
def value(self) -> int:
return self.cdata.value
def name(self) -> str:
return c2str(self.cdata.name)
def description(self) -> str:
return c2str(self.cdata.dsc)
def deprecated(self) -> bool:
return bool(self.cdata.flags & lib.LYS_STATUS_DEPRC)
def obsolete(self) -> bool:
return bool(self.cdata.flags & lib.LYS_STATUS_OBSLT)
def status(self) -> str:
if self.cdata.flags & lib.LYS_STATUS_OBSLT:
return "obsolete"
if self.cdata.flags & lib.LYS_STATUS_DEPRC:
return "deprecated"
return "current"
def __repr__(self):
cls = self.__class__
return "<%s.%s: %s>" % (cls.__module__, cls.__name__, self)
def __str__(self):
return self.name()
# -------------------------------------------------------------------------------------
class Enum(_EnumBit):
pass
# -------------------------------------------------------------------------------------
class Bit(_EnumBit):
pass
# -------------------------------------------------------------------------------------
class Type:
__slots__ = ("context", "cdata", "cdata_parsed")
UNKNOWN = lib.LY_TYPE_UNKNOWN
BINARY = lib.LY_TYPE_BINARY
UINT8 = lib.LY_TYPE_UINT8
UINT16 = lib.LY_TYPE_UINT16
UINT32 = lib.LY_TYPE_UINT32
UINT64 = lib.LY_TYPE_UINT64
STRING = lib.LY_TYPE_STRING
BITS = lib.LY_TYPE_BITS
BOOL = lib.LY_TYPE_BOOL
DEC64 = lib.LY_TYPE_DEC64
EMPTY = lib.LY_TYPE_EMPTY
ENUM = lib.LY_TYPE_ENUM
IDENT = lib.LY_TYPE_IDENT
INST = lib.LY_TYPE_INST
LEAFREF = lib.LY_TYPE_LEAFREF
UNION = lib.LY_TYPE_UNION
INT8 = lib.LY_TYPE_INT8
INT16 = lib.LY_TYPE_INT16
INT32 = lib.LY_TYPE_INT32
INT64 = lib.LY_TYPE_INT64
BASENAMES = {
UNKNOWN: "unknown",
BINARY: "binary",
UINT8: "uint8",
UINT16: "uint16",
UINT32: "uint32",
UINT64: "uint64",
STRING: "string",
BITS: "bits",
BOOL: "boolean",
DEC64: "decimal64",
EMPTY: "empty",
ENUM: "enumeration",
IDENT: "identityref",
INST: "instance-id",
LEAFREF: "leafref",
UNION: "union",
INT8: "int8",
INT16: "int16",
INT32: "int32",
INT64: "int64",
}
def __init__(self, context: "libyang.Context", cdata, cdata_parsed):
self.context = context
self.cdata = cdata # C type: "struct lysc_type*"
self.cdata_parsed = cdata_parsed # C type: "struct lysp_type*"
def get_bases(self) -> Iterator["Type"]:
if self.cdata.basetype == lib.LY_TYPE_LEAFREF:
yield from self.leafref_type().get_bases()
elif self.cdata.basetype == lib.LY_TYPE_UNION:
for t in self.union_types():
yield from t.get_bases()
else: # builtin type
yield self
def name(self) -> str:
if self.cdata_parsed is not None and self.cdata_parsed.name:
return c2str(self.cdata_parsed.name)
return self.basename()
def description(self) -> Optional[str]:
return None
def base(self) -> int:
return self.cdata.basetype
def bases(self) -> Iterator[int]:
for b in self.get_bases():
yield b.base()
def basename(self) -> str:
return self.BASENAMES.get(self.cdata.basetype, "unknown")
def basenames(self) -> Iterator[str]:
for b in self.get_bases():
yield b.basename()
def leafref_type(self) -> Optional["Type"]:
if self.cdata.basetype != self.LEAFREF:
return None
lr = ffi.cast("struct lysc_type_leafref *", self.cdata)
return Type(self.context, lr.realtype, None)
def leafref_path(self) -> Optional["str"]:
if self.cdata.basetype != self.LEAFREF:
return None
lr = ffi.cast("struct lysc_type_leafref *", self.cdata)
return c2str(lib.lyxp_get_expr(lr.path))
def union_types(self) -> Iterator["Type"]:
if self.cdata.basetype != self.UNION:
return
t = ffi.cast("struct lysc_type_union *", self.cdata)
for union_type in ly_array_iter(t.types):
yield Type(self.context, union_type, None)
def enums(self) -> Iterator[Enum]:
if self.cdata.basetype != self.ENUM:
return
t = ffi.cast("struct lysc_type_enum *", self.cdata)
for enum in ly_array_iter(t.enums):
yield Enum(self.context, enum)
def all_enums(self) -> Iterator[Enum]:
for b in self.get_bases():
yield from b.enums()
def bits(self) -> Iterator[Bit]:
if self.cdata.basetype != self.BITS:
return
t = ffi.cast("struct lysc_type_bits *", self.cdata)
for bit in ly_array_iter(t.bits):
yield Enum(self.context, bit)
def all_bits(self) -> Iterator[Bit]:
for b in self.get_bases():
yield from b.bits()
NUM_TYPES = frozenset((INT8, INT16, INT32, INT64, UINT8, UINT16, UINT32, UINT64))
def range(self) -> Optional[str]:
if (
self.cdata.basetype in self.NUM_TYPES or self.cdata.basetype == self.DEC64
) and self.cdata_parsed.range != ffi.NULL:
return c2str(self.cdata_parsed.range.arg.str)
return None
def all_ranges(self) -> Iterator[str]:
if self.cdata.basetype == lib.LY_TYPE_UNION:
for t in self.union_types():
yield from t.all_ranges()
else:
rng = self.range()
if rng is not None:
yield rng
STR_TYPES = frozenset((STRING, BINARY, ENUM, IDENT, BITS))
def length(self) -> Optional[str]:
if not self.cdata_parsed:
return None
if (
self.cdata.basetype in (self.STRING, self.BINARY)
) and self.cdata_parsed.length != ffi.NULL:
return c2str(self.cdata_parsed.length.arg.str)
return None
def all_lengths(self) -> Iterator[str]:
if self.cdata.basetype == lib.LY_TYPE_UNION:
for t in self.union_types():
yield from t.all_lengths()
else:
length = self.length()
if length is not None:
yield length
def patterns(self) -> Iterator[Tuple[str, bool]]:
if not self.cdata_parsed or self.cdata.basetype != self.STRING:
return
if self.cdata_parsed.patterns == ffi.NULL:
return
arr_length = ffi.cast("uint64_t *", self.cdata_parsed.patterns)[-1]
for i in range(arr_length):
yield c2str(self.cdata_parsed.patterns[i].arg.str)
def all_patterns(self) -> Iterator[Tuple[str, bool]]:
if self.cdata.basetype == lib.LY_TYPE_UNION:
for t in self.union_types():
yield from t.all_patterns()
else:
yield from self.patterns()
def module(self) -> Module:
# TODO: pointer to the parsed module wehere is the type defined is in self.cdata_parsed.pmod
# however there is no way how to get name of the module from lysp_module
if not self.cdata.der.module:
return None
return Module(self.context, self.cdata.der.module)
def extensions(self) -> Iterator[ExtensionCompiled]:
for i in range(self.cdata.ext_size):
yield ExtensionCompiled(self.context, self.cdata.ext[i])
if self.cdata.parent:
for i in range(self.cdata.parent.ext_size):
yield ExtensionCompiled(self.context, self.cdata.parent.ext[i])
def get_extension(
self, name: str, prefix: Optional[str] = None, arg_value: Optional[str] = None
) -> Optional[ExtensionCompiled]:
for ext in self.extensions():
if ext.name() != name:
continue
if prefix is not None and ext.module().name() != prefix:
continue
if arg_value is not None and ext.argument() != arg_value:
continue
return ext
return None
def __repr__(self):
cls = self.__class__
return "<%s.%s: %s>" % (cls.__module__, cls.__name__, str(self))
def __str__(self):
return self.name()
# -------------------------------------------------------------------------------------
class Feature:
__slots__ = ("context", "cdata")
def __init__(self, context: "libyang.Context", cdata):
self.context = context
self.cdata = cdata # C type: "struct lysp_feature *"
def name(self) -> str:
return c2str(self.cdata.name)
def description(self) -> Optional[str]:
return c2str(self.cdata.dsc)
def reference(self) -> Optional[str]:
return c2str(self.cdata.ref)
def state(self) -> bool:
return bool(self.cdata.flags & lib.LYS_FENABLED)
def deprecated(self) -> bool:
return bool(self.cdata.flags & lib.LYS_STATUS_DEPRC)
def obsolete(self) -> bool:
return bool(self.cdata.flags & lib.LYS_STATUS_OBSLT)
def if_features(self) -> Iterator["IfFeatureExpr"]:
arr_length = ffi.cast("uint64_t *", self.cdata.iffeatures)[-1]
for i in range(arr_length):
yield IfFeatureExpr(self.context, self.cdata.iffeatures[i])
def test_all_if_features(self) -> Iterator["IfFeatureExpr"]:
for cdata_lysc_iffeature in ly_array_iter(self.cdata.iffeatures_c):
for cdata_feature in ly_array_iter(cdata_lysc_iffeature.features):
yield Feature(self.context, cdata_feature)
def module(self) -> Module:
return Module(self.context, self.cdata.module)
def __str__(self):
return self.name()
# -------------------------------------------------------------------------------------
class IfFeatureExpr:
__slots__ = ("context", "cdata", "module_features", "compiled")
def __init__(self, context: "libyang.Context", cdata, module_features=None):
"""
if module_features is not None, it means we are using a parsed IfFeatureExpr
"""
self.context = context
# Can be "struct lysc_iffeature *" if comes from module feature
# Can be "struct lysp_qname *" if comes from lysp_node
self.cdata = cdata
self.module_features = module_features
self.compiled = not module_features
def _get_operator(self, position: int) -> int:
# the ->exp field is a 2bit array of operator values stored under a uint8_t C
# array.
mask = 0x3 # 2bits mask
shift = 2 * (position % 4)
item = self.cdata.expr[position // 4]
result = item & (mask << shift)
return result >> shift
def _get_operands_parsed(self):
qname = ffi.string(self.cdata.str).decode()
tokens = qname.split()
operators = []
features = []
operators_map = {
"or": lib.LYS_IFF_OR,
"and": lib.LYS_IFF_AND,
"not": lib.LYS_IFF_NOT,
"f": lib.LYS_IFF_F,
}
def get_feature(name):
for feature in self.module_features:
if feature.name() == name:
return feature.cdata
raise Exception("No feature %s in module" % name)
def parse_iffeature(tokens):
def oper2(op):
op_index = tokens.index(op)
operators.append(operators_map[op])
left, right = tokens[:op_index], tokens[op_index + 1 :]
parse_iffeature(left)
parse_iffeature(right)
def oper1(op):
op_index = tokens.index(op)
feature_name = tokens[op_index + 1]
operators.append(operators_map[op])
operators.append(operators_map["f"])
features.append(get_feature(feature_name))
oper_map = {"or": oper2, "and": oper2, "not": oper1}
for op, fun in oper_map.items():
with suppress(ValueError):
fun(op)
return
# Token is a feature
operators.append(operators_map["f"])
features.append(get_feature(tokens[0]))
parse_iffeature(tokens)
return operators, features
def _operands(self) -> Iterator[Union["IfFeature", type]]:
if self.compiled:
def get_operator(op_index):
return self._get_operator(op_index)
def get_feature(ft_index):
return self.cdata.features[ft_index]
else:
operators, features = self._get_operands_parsed()
def get_operator(op_index):
return operators[op_index]
def get_feature(ft_index):
return features[ft_index]
op_index = 0
ft_index = 0
expected = 1
while expected > 0:
operator = get_operator(op_index)
op_index += 1
if operator == lib.LYS_IFF_F:
yield IfFeature(self.context, get_feature(ft_index))
ft_index += 1
expected -= 1
elif operator == lib.LYS_IFF_NOT:
yield IfNotFeature
elif operator == lib.LYS_IFF_AND:
yield IfAndFeatures
expected += 1
elif operator == lib.LYS_IFF_OR:
yield IfOrFeatures
expected += 1
def tree(self) -> "IfFeatureExprTree":
def _tree(operands):
op = next(operands)
if op is IfNotFeature:
return op(self.context, _tree(operands))
if op in (IfAndFeatures, IfOrFeatures):
return op(self.context, _tree(operands), _tree(operands))
return op
return _tree(self._operands())
def dump(self) -> str:
return self.tree().dump()
def __str__(self):
return str(self.tree()).strip("()")
# -------------------------------------------------------------------------------------
class IfFeatureExprTree:
def dump(self, indent: int = 0) -> str:
raise NotImplementedError()
def __str__(self):
raise NotImplementedError()
# -------------------------------------------------------------------------------------
class IfFeature(IfFeatureExprTree):
__slots__ = ("context", "cdata")
def __init__(self, context: "libyang.Context", cdata):
self.context = context
self.cdata = cdata # C type: "struct lys_feature *"
def feature(self) -> Feature:
return Feature(self.context, self.cdata)
def state(self) -> bool:
return self.feature().state()
def dump(self, indent: int = 0) -> str:
feat = self.feature()
return "%s%s [%s]\n" % (" " * indent, feat.name(), feat.description())
def __str__(self):
return self.feature().name()
# -------------------------------------------------------------------------------------
class IfNotFeature(IfFeatureExprTree):
__slots__ = ("context", "child")
def __init__(self, context: "libyang.Context", child: IfFeatureExprTree):
self.context = context
self.child = child
def state(self) -> bool:
return not self.child.state()
def dump(self, indent: int = 0) -> str:
return " " * indent + "NOT\n" + self.child.dump(indent + 1)
def __str__(self):
return "NOT %s" % self.child
# -------------------------------------------------------------------------------------
class IfAndFeatures(IfFeatureExprTree):
__slots__ = ("context", "a", "b")
def __init__(
self, context: "libyang.Context", a: IfFeatureExprTree, b: IfFeatureExprTree
):
self.context = context
self.a = a
self.b = b
def state(self) -> bool:
return self.a.state() and self.b.state()
def dump(self, indent: int = 0) -> str:
s = " " * indent + "AND\n"
s += self.a.dump(indent + 1)
s += self.b.dump(indent + 1)
return s
def __str__(self):
return "%s AND %s" % (self.a, self.b)
# -------------------------------------------------------------------------------------
class IfOrFeatures(IfFeatureExprTree):
__slots__ = ("context", "a", "b")
def __init__(
self, context: "libyang.Context", a: IfFeatureExprTree, b: IfFeatureExprTree
):
self.context = context
self.a = a
self.b = b
def state(self) -> bool:
return self.a.state() or self.b.state()
def dump(self, indent: int = 0) -> str:
s = " " * indent + "OR\n"
s += self.a.dump(indent + 1)
s += self.b.dump(indent + 1)
return s
def __str__(self):
return "(%s OR %s)" % (self.a, self.b)
# -------------------------------------------------------------------------------------
class SNode:
__slots__ = ("context", "cdata", "cdata_parsed")
CONTAINER = lib.LYS_CONTAINER
LEAF = lib.LYS_LEAF
LEAFLIST = lib.LYS_LEAFLIST
LIST = lib.LYS_LIST
RPC = lib.LYS_RPC
ACTION = lib.LYS_ACTION
INPUT = lib.LYS_INPUT
OUTPUT = lib.LYS_OUTPUT
NOTIF = lib.LYS_NOTIF
ANYXML = lib.LYS_ANYXML
ANYDATA = lib.LYS_ANYDATA
KEYWORDS = {
CONTAINER: "container",
LEAF: "leaf",
LEAFLIST: "leaf-list",
LIST: "list",
RPC: "rpc",
ACTION: "action",
INPUT: "input",
OUTPUT: "output",
NOTIF: "notification",
ANYXML: "anyxml",
ANYDATA: "anydata",
}
def __init__(self, context: "libyang.Context", cdata):
self.context = context
self.cdata = cdata # C type: "struct lysc_node *"
self.cdata_parsed = ffi.cast("struct lysp_node *", self.cdata.priv)
def nodetype(self) -> int:
return self.cdata.nodetype
def keyword(self) -> str:
return self.KEYWORDS.get(self.cdata.nodetype, "???")
def name(self) -> str:
return c2str(self.cdata.name)
def fullname(self) -> str:
return "%s:%s" % (self.module().name(), self.name())
def description(self) -> Optional[str]:
return c2str(self.cdata.dsc)
def config_set(self) -> bool:
return bool(self.cdata.flags & lib.LYS_SET_CONFIG)
def config_false(self) -> bool:
return bool(self.cdata.flags & lib.LYS_CONFIG_R)
def mandatory(self) -> bool:
return bool(self.cdata.flags & lib.LYS_MAND_TRUE)
def deprecated(self) -> bool:
return bool(self.cdata.flags & lib.LYS_STATUS_DEPRC)
def obsolete(self) -> bool:
return bool(self.cdata.flags & lib.LYS_STATUS_OBSLT)
def status(self) -> str:
if self.cdata.flags & lib.LYS_STATUS_OBSLT:
return "obsolete"
if self.cdata.flags & lib.LYS_STATUS_DEPRC:
return "deprecated"
return "current"
def module(self) -> Module:
return Module(self.context, self.cdata.module)
def schema_path(self) -> str:
try:
s = lib.lysc_path(self.cdata, lib.LYSC_PATH_LOG, ffi.NULL, 0)
return c2str(s)
finally:
lib.free(s)
def data_path(self, key_placeholder: str = "'%s'") -> str:
try:
s = lib.lysc_path(self.cdata, lib.LYSC_PATH_DATA_PATTERN, ffi.NULL, 0)
val = c2str(s)
if key_placeholder != "'%s'":
val = val.replace("'%s'", key_placeholder)
return val
finally:
lib.free(s)
def extensions(self) -> Iterator[ExtensionCompiled]:
ext = ffi.cast("struct lysc_ext_instance *", self.cdata.exts)
if ext == ffi.NULL:
return
for extension in ly_array_iter(ext):
yield ExtensionCompiled(self.context, extension)
def must_conditions(self) -> Iterator[str]:
return iter(())
def get_extension(
self, name: str, prefix: Optional[str] = None, arg_value: Optional[str] = None
) -> Optional[ExtensionCompiled]:
for ext in self.extensions():
if ext.name() != name:
continue
if prefix is not None and ext.module().name() != prefix:
continue
if arg_value is not None and ext.argument() != arg_value:
continue
return ext
return None
def if_features(self) -> Iterator[IfFeatureExpr]:
iff = ffi.cast("struct lysp_qname *", self.cdata_parsed.iffeatures)
module_features = self.module().features()
for if_feature in ly_array_iter(iff):
yield IfFeatureExpr(self.context, if_feature, list(module_features))
def parent(self) -> Optional["SNode"]:
parent_p = self.cdata.parent
while parent_p and parent_p.nodetype not in SNode.NODETYPE_CLASS:
parent_p = parent_p.parent
if parent_p:
return SNode.new(self.context, parent_p)
return None
def when_conditions(self):
wh = ffi.new("struct lysc_when **")
wh = lib.lysc_node_when(self.cdata)
if wh == ffi.NULL:
return
for cond in ly_array_iter(wh):
yield c2str(lib.lyxp_get_expr(cond.cond))
def __repr__(self):
cls = self.__class__
return "<%s.%s: %s>" % (cls.__module__, cls.__name__, str(self))
def __str__(self):
return self.name()
NODETYPE_CLASS = {}
@staticmethod
def register(nodetype):
def _decorator(nodeclass):
SNode.NODETYPE_CLASS[nodetype] = nodeclass
return nodeclass
return _decorator
@staticmethod
def new(context: "libyang.Context", cdata) -> "SNode":
cdata = ffi.cast("struct lysc_node *", cdata)
nodecls = SNode.NODETYPE_CLASS.get(cdata.nodetype, None)
if nodecls is None:
raise TypeError("node type %s not implemented" % cdata.nodetype)
return nodecls(context, cdata)
# -------------------------------------------------------------------------------------
@SNode.register(SNode.LEAF)
class SLeaf(SNode):
__slots__ = ("cdata_leaf", "cdata_leaf_parsed")
def __init__(self, context: "libyang.Context", cdata):
super().__init__(context, cdata)
self.cdata_leaf = ffi.cast("struct lysc_node_leaf *", cdata)
self.cdata_leaf_parsed = ffi.cast("struct lysp_node_leaf *", self.cdata_parsed)
def default(self) -> Optional[str]:
if not self.cdata_leaf.dflt:
return None
val = lib.lyd_value_get_canonical(self.context.cdata, self.cdata_leaf.dflt)
if not val:
return None
val = c2str(val)
val_type = self.cdata_leaf.dflt.realtype
if val_type == Type.BOOL:
return val == "true"
if val_type in Type.NUM_TYPES:
return int(val)
return val
def units(self) -> Optional[str]:
return c2str(self.cdata_leaf.units)
def type(self) -> Type:
return Type(self.context, self.cdata_leaf.type, self.cdata_leaf_parsed.type)
def is_key(self) -> bool:
if self.cdata_leaf.flags & lib.LYS_KEY:
return True
return False
def must_conditions(self) -> Iterator[str]:
pdata = self.cdata_leaf_parsed
if pdata.musts == ffi.NULL:
return
for must in ly_array_iter(pdata.musts):
yield c2str(must.arg.str)
def __str__(self):
return "%s %s" % (self.name(), self.type().name())
# -------------------------------------------------------------------------------------
@SNode.register(SNode.LEAFLIST)
class SLeafList(SNode):
__slots__ = ("cdata_leaflist", "cdata_leaflist_parsed")
def __init__(self, context: "libyang.Context", cdata):
super().__init__(context, cdata)
self.cdata_leaflist = ffi.cast("struct lysc_node_leaflist *", cdata)
self.cdata_leaflist_parsed = ffi.cast(
"struct lysp_node_leaflist *", self.cdata_parsed
)
def ordered(self) -> bool:
return bool(self.cdata.flags & lib.LYS_ORDBY_USER)
def units(self) -> Optional[str]:
return c2str(self.cdata_leaflist.units)
def type(self) -> Type:
return Type(
self.context, self.cdata_leaflist.type, self.cdata_leaflist_parsed.type
)
def defaults(self) -> Iterator[str]:
if self.cdata_leaflist.dflts == ffi.NULL:
return
arr_length = ffi.cast("uint64_t *", self.cdata_leaflist.dflts)[-1]
for i in range(arr_length):
val = lib.lyd_value_get_canonical(
self.context.cdata, self.cdata_leaflist.dflts[i]
)
if not val:
yield None
ret = c2str(val)
val_type = self.cdata_leaflist.dflts[i].realtype
if val_type == Type.BOOL:
ret = val == "true"
elif val_type in Type.NUM_TYPES:
ret = int(val)
yield ret
def must_conditions(self) -> Iterator[str]:
pdata = self.cdata_leaflist_parsed
if pdata.musts == ffi.NULL:
return
for must in ly_array_iter(pdata.musts):
yield c2str(must.arg.str)
def __str__(self):
return "%s %s" % (self.name(), self.type().name())
# -------------------------------------------------------------------------------------
@SNode.register(SNode.CONTAINER)
class SContainer(SNode):
__slots__ = ("cdata_container", "cdata_container_parsed")
def __init__(self, context: "libyang.Context", cdata):
super().__init__(context, cdata)
self.cdata_container = ffi.cast("struct lysc_node_container *", cdata)
self.cdata_container_parsed = ffi.cast(
"struct lysp_node_container *", self.cdata_parsed
)
def presence(self) -> Optional[str]:
if not self.cdata_container.flags & lib.LYS_PRESENCE:
return None
return c2str(self.cdata_container_parsed.presence)
def must_conditions(self) -> Iterator[str]:
pdata = self.cdata_container_parsed
if pdata.musts == ffi.NULL:
return
for must in ly_array_iter(pdata.musts):
yield c2str(must.arg.str)
def __iter__(self) -> Iterator[SNode]:
return self.children()
def children(self, types: Optional[Tuple[int, ...]] = None) -> Iterator[SNode]:
return iter_children(self.context, self.cdata, types=types)
# -------------------------------------------------------------------------------------
@SNode.register(SNode.LIST)
class SList(SNode):
__slots__ = ("cdata_list", "cdata_list_parsed")
def __init__(self, context: "libyang.Context", cdata):
super().__init__(context, cdata)
self.cdata_list = ffi.cast("struct lysc_node_list *", cdata)
self.cdata_list_parsed = ffi.cast("struct lysp_node_list *", self.cdata_parsed)
def ordered(self) -> bool:
return bool(self.cdata.flags & lib.LYS_ORDBY_USER)
def __iter__(self) -> Iterator[SNode]:
return self.children()
def children(
self, skip_keys: bool = False, types: Optional[Tuple[int, ...]] = None
) -> Iterator[SNode]:
return iter_children(self.context, self.cdata, skip_keys=skip_keys, types=types)
def keys(self) -> Iterator[SNode]:
node = lib.lysc_node_child(self.cdata)
while node:
if node.flags & lib.LYS_KEY:
yield SLeaf(self.context, node)
node = node.next
def must_conditions(self) -> Iterator[str]:
pdata = self.cdata_list_parsed
if pdata.musts == ffi.NULL:
return
for must in ly_array_iter(pdata.musts):
yield c2str(must.arg.str)
def __str__(self):
return "%s [%s]" % (self.name(), ", ".join(k.name() for k in self.keys()))
# -------------------------------------------------------------------------------------
@SNode.register(SNode.INPUT)
@SNode.register(SNode.OUTPUT)
class SRpcInOut(SNode):
def __iter__(self) -> Iterator[SNode]:
return self.children()
def children(self, types: Optional[Tuple[int, ...]] = None) -> Iterator[SNode]:
return iter_children(self.context, self.cdata, types=types)
# -------------------------------------------------------------------------------------
@SNode.register(SNode.RPC)
@SNode.register(SNode.ACTION)
class SRpc(SNode):
def input(self) -> Optional[SRpcInOut]:
node = lib.lysc_node_child(self.cdata)
while True:
if not node:
break
if node.nodetype == self.INPUT:
return SNode.new(self.context, node)
node = node.next
return None
def output(self) -> Optional[SRpcInOut]:
node = lib.lysc_node_child(self.cdata)
while True:
if not node:
break
if node.nodetype == self.OUTPUT:
return SNode.new(self.context, node)
node = node.next
return None
def __iter__(self) -> Iterator[SNode]:
return self.children()
def children(self, types: Optional[Tuple[int, ...]] = None) -> Iterator[SNode]:
yield from iter_children(self.context, self.cdata, types=types)
# With libyang2, you can get only input or output
# To keep behavior, we iter 2 times witt output options
yield from iter_children(
self.context, self.cdata, types=types, options=lib.LYS_GETNEXT_OUTPUT
)
# -------------------------------------------------------------------------------------
@SNode.register(SNode.NOTIF)
class SNotif(SNode):
def __iter__(self) -> Iterator[SNode]:
return self.children()
def children(self, types: Optional[Tuple[int, ...]] = None) -> Iterator[SNode]:
return iter_children(self.context, self.cdata, types=types)
# -------------------------------------------------------------------------------------
@SNode.register(SNode.ANYXML)
class SAnyxml(SNode):
pass
# -------------------------------------------------------------------------------------
@SNode.register(SNode.ANYDATA)
class SAnydata(SNode):
pass
# -------------------------------------------------------------------------------------
def iter_children(
context: "libyang.Context",
parent, # C type: Union["struct lys_module *", "struct lys_node *"]
skip_keys: bool = False,
types: Optional[Tuple[int, ...]] = None,
options: int = 0,
) -> Iterator[SNode]:
if types is None:
types = (
lib.LYS_ACTION,
lib.LYS_CONTAINER,
lib.LYS_LIST,
lib.LYS_RPC,
lib.LYS_LEAF,
lib.LYS_LEAFLIST,
lib.LYS_NOTIF,
)
def _skip(node) -> bool:
if node.nodetype not in types:
return True
if not skip_keys:
return False
if node.nodetype != lib.LYS_LEAF:
return False
leaf = ffi.cast("struct lysc_node_leaf *", node)
if leaf.flags & lib.LYS_KEY:
return True
return False
if ffi.typeof(parent) == ffi.typeof("struct lys_module *"):
module = parent.compiled
parent = ffi.NULL
else:
module = ffi.NULL
child = lib.lys_getnext(ffi.NULL, parent, module, options)
while child:
if not _skip(child):
yield SNode.new(context, child)
child = lib.lys_getnext(child, parent, module, options)
# -------------------------------------------------------------------------------------
# compat
Container = SContainer
Leaf = SLeaf
LeafList = SLeafList
List = SList
Node = SNode
Rpc = SRpc
RpcInOut = SRpcInOut
Anyxml = SAnyxml
| [
"_libyang.lib.lyxp_get_expr",
"_libyang.ffi.new",
"_libyang.lib.lyd_value_get_canonical",
"_libyang.ffi.string",
"_libyang.ffi.typeof",
"_libyang.lib.free",
"_libyang.lib.lysc_node_when",
"_libyang.lib.lys_print_module",
"_libyang.lib.lys_set_implemented",
"_libyang.lib.ly_out_free",
"_libyang.f... | [((42292, 42342), '_libyang.lib.lys_getnext', 'lib.lys_getnext', (['ffi.NULL', 'parent', 'module', 'options'], {}), '(ffi.NULL, parent, module, options)\n', (42307, 42342), False, 'from _libyang import ffi, lib\n'), ((2088, 2123), '_libyang.ffi.new', 'ffi.new', (['"""char *[2]"""', '[p, ffi.NULL]'], {}), "('char *[2]', [p, ffi.NULL])\n", (2095, 2123), False, 'from _libyang import ffi, lib\n'), ((2138, 2176), '_libyang.lib.lys_set_implemented', 'lib.lys_set_implemented', (['self.cdata', 'q'], {}), '(self.cdata, q)\n', (2161, 2176), False, 'from _libyang import ffi, lib\n'), ((2412, 2440), '_libyang.ffi.new', 'ffi.new', (['"""char **"""', 'ffi.NULL'], {}), "('char **', ffi.NULL)\n", (2419, 2440), False, 'from _libyang import ffi, lib\n'), ((2455, 2495), '_libyang.lib.lys_set_implemented', 'lib.lys_set_implemented', (['self.cdata', 'val'], {}), '(self.cdata, val)\n', (2478, 2495), False, 'from _libyang import ffi, lib\n'), ((2995, 3016), '_libyang.ffi.new', 'ffi.new', (['"""uint32_t *"""'], {}), "('uint32_t *')\n", (3002, 3016), False, 'from _libyang import ffi, lib\n'), ((4310, 4337), '_libyang.ffi.new', 'ffi.new', (['"""struct ly_out **"""'], {}), "('struct ly_out **')\n", (4317, 4337), False, 'from _libyang import ffi, lib\n'), ((4528, 4588), '_libyang.lib.lys_print_module', 'lib.lys_print_module', (['out_data[0]', 'self.cdata', 'fmt', '(0)', 'flags'], {}), '(out_data[0], self.cdata, fmt, 0, flags)\n', (4548, 4588), False, 'from _libyang import ffi, lib\n'), ((4714, 4759), '_libyang.lib.ly_out_free', 'lib.ly_out_free', (['out_data[0]', 'ffi.NULL', '(False)'], {}), '(out_data[0], ffi.NULL, False)\n', (4729, 4759), False, 'from _libyang import ffi, lib\n'), ((14702, 14752), '_libyang.ffi.cast', 'ffi.cast', (['"""struct lysc_type_leafref *"""', 'self.cdata'], {}), "('struct lysc_type_leafref *', self.cdata)\n", (14710, 14752), False, 'from _libyang import ffi, lib\n'), ((14939, 14989), '_libyang.ffi.cast', 'ffi.cast', (['"""struct lysc_type_leafref *"""', 'self.cdata'], {}), "('struct lysc_type_leafref *', self.cdata)\n", (14947, 14989), False, 'from _libyang import ffi, lib\n'), ((15164, 15212), '_libyang.ffi.cast', 'ffi.cast', (['"""struct lysc_type_union *"""', 'self.cdata'], {}), "('struct lysc_type_union *', self.cdata)\n", (15172, 15212), False, 'from _libyang import ffi, lib\n'), ((15434, 15481), '_libyang.ffi.cast', 'ffi.cast', (['"""struct lysc_type_enum *"""', 'self.cdata'], {}), "('struct lysc_type_enum *', self.cdata)\n", (15442, 15481), False, 'from _libyang import ffi, lib\n'), ((15795, 15842), '_libyang.ffi.cast', 'ffi.cast', (['"""struct lysc_type_bits *"""', 'self.cdata'], {}), "('struct lysc_type_bits *', self.cdata)\n", (15803, 15842), False, 'from _libyang import ffi, lib\n'), ((28778, 28825), '_libyang.ffi.cast', 'ffi.cast', (['"""struct lysp_node *"""', 'self.cdata.priv'], {}), "('struct lysp_node *', self.cdata.priv)\n", (28786, 28825), False, 'from _libyang import ffi, lib\n'), ((30620, 30675), '_libyang.ffi.cast', 'ffi.cast', (['"""struct lysc_ext_instance *"""', 'self.cdata.exts'], {}), "('struct lysc_ext_instance *', self.cdata.exts)\n", (30628, 30675), False, 'from _libyang import ffi, lib\n'), ((31450, 31512), '_libyang.ffi.cast', 'ffi.cast', (['"""struct lysp_qname *"""', 'self.cdata_parsed.iffeatures'], {}), "('struct lysp_qname *', self.cdata_parsed.iffeatures)\n", (31458, 31512), False, 'from _libyang import ffi, lib\n'), ((32024, 32054), '_libyang.ffi.new', 'ffi.new', (['"""struct lysc_when **"""'], {}), "('struct lysc_when **')\n", (32031, 32054), False, 'from _libyang import ffi, lib\n'), ((32068, 32098), '_libyang.lib.lysc_node_when', 'lib.lysc_node_when', (['self.cdata'], {}), '(self.cdata)\n', (32086, 32098), False, 'from _libyang import ffi, lib\n'), ((32728, 32765), '_libyang.ffi.cast', 'ffi.cast', (['"""struct lysc_node *"""', 'cdata'], {}), "('struct lysc_node *', cdata)\n", (32736, 32765), False, 'from _libyang import ffi, lib\n'), ((33293, 33335), '_libyang.ffi.cast', 'ffi.cast', (['"""struct lysc_node_leaf *"""', 'cdata'], {}), "('struct lysc_node_leaf *', cdata)\n", (33301, 33335), False, 'from _libyang import ffi, lib\n'), ((33369, 33423), '_libyang.ffi.cast', 'ffi.cast', (['"""struct lysp_node_leaf *"""', 'self.cdata_parsed'], {}), "('struct lysp_node_leaf *', self.cdata_parsed)\n", (33377, 33423), False, 'from _libyang import ffi, lib\n'), ((33540, 33609), '_libyang.lib.lyd_value_get_canonical', 'lib.lyd_value_get_canonical', (['self.context.cdata', 'self.cdata_leaf.dflt'], {}), '(self.context.cdata, self.cdata_leaf.dflt)\n', (33567, 33609), False, 'from _libyang import ffi, lib\n'), ((34852, 34898), '_libyang.ffi.cast', 'ffi.cast', (['"""struct lysc_node_leaflist *"""', 'cdata'], {}), "('struct lysc_node_leaflist *', cdata)\n", (34860, 34898), False, 'from _libyang import ffi, lib\n'), ((34936, 34994), '_libyang.ffi.cast', 'ffi.cast', (['"""struct lysp_node_leaflist *"""', 'self.cdata_parsed'], {}), "('struct lysp_node_leaflist *', self.cdata_parsed)\n", (34944, 34994), False, 'from _libyang import ffi, lib\n'), ((36659, 36706), '_libyang.ffi.cast', 'ffi.cast', (['"""struct lysc_node_container *"""', 'cdata'], {}), "('struct lysc_node_container *', cdata)\n", (36667, 36706), False, 'from _libyang import ffi, lib\n'), ((36745, 36804), '_libyang.ffi.cast', 'ffi.cast', (['"""struct lysp_node_container *"""', 'self.cdata_parsed'], {}), "('struct lysp_node_container *', self.cdata_parsed)\n", (36753, 36804), False, 'from _libyang import ffi, lib\n'), ((37795, 37837), '_libyang.ffi.cast', 'ffi.cast', (['"""struct lysc_node_list *"""', 'cdata'], {}), "('struct lysc_node_list *', cdata)\n", (37803, 37837), False, 'from _libyang import ffi, lib\n'), ((37871, 37925), '_libyang.ffi.cast', 'ffi.cast', (['"""struct lysp_node_list *"""', 'self.cdata_parsed'], {}), "('struct lysp_node_list *', self.cdata_parsed)\n", (37879, 37925), False, 'from _libyang import ffi, lib\n'), ((38360, 38391), '_libyang.lib.lysc_node_child', 'lib.lysc_node_child', (['self.cdata'], {}), '(self.cdata)\n', (38379, 38391), False, 'from _libyang import ffi, lib\n'), ((39491, 39522), '_libyang.lib.lysc_node_child', 'lib.lysc_node_child', (['self.cdata'], {}), '(self.cdata)\n', (39510, 39522), False, 'from _libyang import ffi, lib\n'), ((39797, 39828), '_libyang.lib.lysc_node_child', 'lib.lysc_node_child', (['self.cdata'], {}), '(self.cdata)\n', (39816, 39828), False, 'from _libyang import ffi, lib\n'), ((41995, 42036), '_libyang.ffi.cast', 'ffi.cast', (['"""struct lysc_node_leaf *"""', 'node'], {}), "('struct lysc_node_leaf *', node)\n", (42003, 42036), False, 'from _libyang import ffi, lib\n'), ((42127, 42145), '_libyang.ffi.typeof', 'ffi.typeof', (['parent'], {}), '(parent)\n', (42137, 42145), False, 'from _libyang import ffi, lib\n'), ((42149, 42182), '_libyang.ffi.typeof', 'ffi.typeof', (['"""struct lys_module *"""'], {}), "('struct lys_module *')\n", (42159, 42182), False, 'from _libyang import ffi, lib\n'), ((42449, 42496), '_libyang.lib.lys_getnext', 'lib.lys_getnext', (['child', 'parent', 'module', 'options'], {}), '(child, parent, module, options)\n', (42464, 42496), False, 'from _libyang import ffi, lib\n'), ((3053, 3101), '_libyang.lib.lysp_feature_next', 'lib.lysp_feature_next', (['f', 'self.cdata.parsed', 'idx'], {}), '(f, self.cdata.parsed, idx)\n', (3074, 3101), False, 'from _libyang import ffi, lib\n'), ((4692, 4705), '_libyang.lib.free', 'lib.free', (['tmp'], {}), '(tmp)\n', (4700, 4705), False, 'from _libyang import ffi, lib\n'), ((15011, 15037), '_libyang.lib.lyxp_get_expr', 'lib.lyxp_get_expr', (['lr.path'], {}), '(lr.path)\n', (15028, 15037), False, 'from _libyang import ffi, lib\n'), ((17574, 17624), '_libyang.ffi.cast', 'ffi.cast', (['"""uint64_t *"""', 'self.cdata_parsed.patterns'], {}), "('uint64_t *', self.cdata_parsed.patterns)\n", (17582, 17624), False, 'from _libyang import ffi, lib\n'), ((20188, 20233), '_libyang.ffi.cast', 'ffi.cast', (['"""uint64_t *"""', 'self.cdata.iffeatures'], {}), "('uint64_t *', self.cdata.iffeatures)\n", (20196, 20233), False, 'from _libyang import ffi, lib\n'), ((30069, 30126), '_libyang.lib.lysc_path', 'lib.lysc_path', (['self.cdata', 'lib.LYSC_PATH_LOG', 'ffi.NULL', '(0)'], {}), '(self.cdata, lib.LYSC_PATH_LOG, ffi.NULL, 0)\n', (30082, 30126), False, 'from _libyang import ffi, lib\n'), ((30184, 30195), '_libyang.lib.free', 'lib.free', (['s'], {}), '(s)\n', (30192, 30195), False, 'from _libyang import ffi, lib\n'), ((30289, 30355), '_libyang.lib.lysc_path', 'lib.lysc_path', (['self.cdata', 'lib.LYSC_PATH_DATA_PATTERN', 'ffi.NULL', '(0)'], {}), '(self.cdata, lib.LYSC_PATH_DATA_PATTERN, ffi.NULL, 0)\n', (30302, 30355), False, 'from _libyang import ffi, lib\n'), ((30536, 30547), '_libyang.lib.free', 'lib.free', (['s'], {}), '(s)\n', (30544, 30547), False, 'from _libyang import ffi, lib\n'), ((35471, 35520), '_libyang.ffi.cast', 'ffi.cast', (['"""uint64_t *"""', 'self.cdata_leaflist.dflts'], {}), "('uint64_t *', self.cdata_leaflist.dflts)\n", (35479, 35520), False, 'from _libyang import ffi, lib\n'), ((35579, 35656), '_libyang.lib.lyd_value_get_canonical', 'lib.lyd_value_get_canonical', (['self.context.cdata', 'self.cdata_leaflist.dflts[i]'], {}), '(self.context.cdata, self.cdata_leaflist.dflts[i])\n', (35606, 35656), False, 'from _libyang import ffi, lib\n'), ((21813, 21839), '_libyang.ffi.string', 'ffi.string', (['self.cdata.str'], {}), '(self.cdata.str)\n', (21823, 21839), False, 'from _libyang import ffi, lib\n'), ((23056, 23076), 'contextlib.suppress', 'suppress', (['ValueError'], {}), '(ValueError)\n', (23064, 23076), False, 'from contextlib import suppress\n'), ((32208, 32236), '_libyang.lib.lyxp_get_expr', 'lib.lyxp_get_expr', (['cond.cond'], {}), '(cond.cond)\n', (32225, 32236), False, 'from _libyang import ffi, lib\n'), ((9760, 9792), '_libyang.ffi.string', 'ffi.string', (['cdata_imp_mod.prefix'], {}), '(cdata_imp_mod.prefix)\n', (9770, 9792), False, 'from _libyang import ffi, lib\n')] |
"""Main vcf2maf logic for spec gdc-1.0.0-aliquot"""
import urllib.parse
from operator import itemgetter
import pysam
from maflib.header import MafHeader, MafHeaderRecord
from maflib.sort_order import BarcodesAndCoordinate
from maflib.sorter import MafSorter
from maflib.validation import ValidationStringency
from maflib.writer import MafWriter
import aliquotmaf.annotators as Annotators
import aliquotmaf.filters as Filters
import aliquotmaf.subcommands.vcf_to_aliquot.extractors as Extractors
from aliquotmaf.converters.builder import get_builder
from aliquotmaf.converters.collection import InputCollection
from aliquotmaf.converters.formatters import (
format_all_effects,
format_alleles,
format_depths,
format_vcf_columns,
)
from aliquotmaf.converters.utils import get_columns_from_header, init_empty_maf_record
from aliquotmaf.subcommands.utils import (
assert_sample_in_header,
extract_annotation_from_header,
load_enst,
load_json,
)
from aliquotmaf.subcommands.vcf_to_aliquot.runners import BaseRunner
class GDC_1_0_0_Aliquot(BaseRunner):
def __init__(self, options=dict()):
super(GDC_1_0_0_Aliquot, self).__init__(options)
# Load the resource files
self.logger.info("Loading priority files")
self.biotype_priority = load_json(self.options["biotype_priority_file"])
self.effect_priority = load_json(self.options["effect_priority_file"])
self.custom_enst = (
load_enst(self.options["custom_enst"])
if self.options["custom_enst"]
else None
)
# Schema
self.options["version"] = "gdc-1.0.0"
self.options["annotation"] = "gdc-1.0.0-aliquot"
# Annotators
self.annotators = {
"dbsnp_priority_db": None,
"reference_context": None,
"cosmic_id": None,
"mutation_status": None,
"non_tcga_exac": None,
"hotspots": None,
}
# Filters
self.filters = {
"common_in_exac": None,
"gdc_blacklist": None,
"normal_depth": None,
"gdc_pon": None,
"multiallelic": None,
"nonexonic": None,
"offtarget": None,
}
@classmethod
def __validate_options__(cls, options):
"""Validates the tumor only stuff"""
if options.tumor_only:
options.normal_vcf_id = None
else:
if options.normal_aliquot_uuid is None:
raise ValueError("--normal_aliquot_uuid is required")
if options.normal_submitter_id is None:
raise ValueError("--normal_submitter_id is required")
if options.normal_bam_uuid is None:
raise ValueError("--normal_bam_uuid is required")
@classmethod
def __add_arguments__(cls, parser):
"""Add the arguments to the parser"""
vcf = parser.add_argument_group(title="VCF options")
vcf.add_argument(
"--tumor_only", action="store_true", help="Is this a tumor-only VCF?"
)
vcf.add_argument(
"-t",
"--tumor_vcf_id",
default="TUMOR",
help="Name of the tumor sample in the VCF",
)
vcf.add_argument(
"-n",
"--normal_vcf_id",
default="NORMAL",
help="Name of the normal sample in the VCF",
)
vcf.add_argument(
"--caller_id",
required=True,
help="Name of the caller used to detect mutations",
)
vcf.add_argument(
"--src_vcf_uuid", required=True, help="The UUID of the src VCF file"
)
sample = parser.add_argument_group(title="Sample Metadata")
sample.add_argument("--case_uuid", required=True, help="Sample case UUID")
sample.add_argument(
"--tumor_submitter_id",
required=True,
help="Tumor sample aliquot submitter ID",
)
sample.add_argument(
"--tumor_aliquot_uuid", required=True, help="Tumor sample aliquot UUID"
)
sample.add_argument(
"--tumor_bam_uuid", required=True, help="Tumor sample bam UUID"
)
sample.add_argument(
"--normal_submitter_id", help="Normal sample aliquot submitter ID"
)
sample.add_argument("--normal_aliquot_uuid", help="Normal sample aliquot UUID")
sample.add_argument("--normal_bam_uuid", help="Normal sample bam UUID")
sample.add_argument("--sequencer", action="append", help="The sequencer used")
sample.add_argument(
"--maf_center", action="append", required=True, help="The sequencing center"
)
anno = parser.add_argument_group(title="Annotation Resources")
anno.add_argument(
"--biotype_priority_file", required=True, help="Biotype priority JSON"
)
anno.add_argument(
"--effect_priority_file", required=True, help="Effect priority JSON"
)
anno.add_argument(
"--custom_enst", default=None, help="Optional custom ENST overrides"
)
anno.add_argument(
"--dbsnp_priority_db", default=None, help="DBSNP priority sqlite database"
)
anno.add_argument(
"--reference_fasta", required=True, help="Reference fasta file"
)
anno.add_argument(
"--reference_fasta_index", required=True, help="Reference fasta fai file"
)
anno.add_argument(
"--reference_context_size",
type=int,
default=5,
help="Number of BP to add both upstream and "
+ "downstream from variant for reference context",
)
anno.add_argument(
"--cosmic_vcf", default=None, help="Optional COSMIC VCF for annotating"
)
anno.add_argument(
"--non_tcga_exac_vcf",
default=None,
help="Optional non-TCGA ExAC VCF for annotating and filtering",
)
anno.add_argument("--hotspot_tsv", default=None, help="Optional hotspot TSV")
filt = parser.add_argument_group(title="Filtering Options")
filt.add_argument(
"--exac_freq_cutoff",
default=0.001,
type=float,
help="Flag variants where the allele frequency in any ExAC population "
+ "is great than this value as common_in_exac [0.001]",
)
filt.add_argument(
"--gdc_blacklist",
type=str,
default=None,
help="The file containing the blacklist tags and tumor aliquot uuids to "
+ "apply them to.",
)
filt.add_argument(
"--min_n_depth",
default=7,
type=int,
help="Flag variants where normal depth is <= INT as ndp [7].",
)
filt.add_argument(
"--gdc_pon_vcf",
type=str,
default=None,
help="The tabix-indexed panel of normals VCF for applying the gdc "
+ "pon filter",
)
filt.add_argument(
"--nonexonic_intervals",
type=str,
default=None,
help="Flag variants outside of this tabix-indexed bed file "
+ "as NonExonic",
)
filt.add_argument(
"--target_intervals",
action="append",
help="Flag variants outside of these tabix-indexed bed files "
+ "as off_target. Use one or more times.",
)
def setup_maf_header(self):
"""
Sets up the maf header.
"""
self.maf_header = MafHeader.from_defaults(
version=self.options["version"],
annotation=self.options["annotation"],
sort_order=BarcodesAndCoordinate(),
fasta_index=self.options["reference_fasta_index"],
)
header_date = BaseRunner.get_header_date()
self.maf_header[header_date.key] = header_date
if not self.options["tumor_only"]:
normal_aliquot = MafHeaderRecord(
key="normal.aliquot",
value=self.options["normal_aliquot_uuid"]
if not self.options["tumor_only"]
else "",
)
self.maf_header[normal_aliquot.key] = normal_aliquot
tumor_aliquot = MafHeaderRecord(
key="tumor.aliquot", value=self.options["tumor_aliquot_uuid"]
)
self.maf_header[tumor_aliquot.key] = tumor_aliquot
def do_work(self):
"""Main wrapper function for running vcf2maf"""
self.logger.info(
"Processing input vcf {0}...".format(self.options["input_vcf"])
)
# Initialize the maf file
self.setup_maf_header()
sorter = MafSorter(
max_objects_in_ram=100000,
sort_order_name=BarcodesAndCoordinate.name(),
scheme=self.maf_header.scheme(),
fasta_index=self.options["reference_fasta_index"],
)
self._scheme = self.maf_header.scheme()
self._columns = get_columns_from_header(self.maf_header)
self._colset = set(self._columns)
# Initialize vcf reader
vcf_object = pysam.VariantFile(self.options["input_vcf"])
tumor_sample_id = self.options["tumor_vcf_id"]
normal_sample_id = self.options["normal_vcf_id"]
is_tumor_only = self.options["tumor_only"]
try:
# Validate samples
tumor_idx = assert_sample_in_header(
vcf_object, self.options["tumor_vcf_id"]
)
normal_idx = assert_sample_in_header(
vcf_object, self.options["normal_vcf_id"], can_fail=is_tumor_only
)
# extract annotation from header
ann_cols_format, vep_key = extract_annotation_from_header(
vcf_object, vep_key="CSQ"
)
# Initialize annotators
self.setup_annotators()
# Initialize filters
self.setup_filters()
# Convert
line = 0
for vcf_record in vcf_object.fetch():
line += 1
if line % 1000 == 0:
self.logger.info("Processed {0} records...".format(line))
# Extract data
data = self.extract(
tumor_sample_id,
normal_sample_id,
tumor_idx,
normal_idx,
ann_cols_format,
vep_key,
vcf_record,
is_tumor_only,
)
# Skip rare occasions where VEP doesn't provide IMPACT or the consequence is ?
if (
not data["selected_effect"]["IMPACT"]
or data["selected_effect"]["One_Consequence"] == "?"
):
self.logger.warn(
"Skipping record with unknown impact or consequence: {0} - {1}".format(
data["selected_effect"]["IMPACT"],
data["selected_effect"]["One_Consequence"],
)
)
continue
# Transform
maf_record = self.transform(
vcf_record, data, is_tumor_only, line_number=line
)
# Add to sorter
sorter += maf_record
# Write
self.logger.info("Writing {0} sorted records...".format(line))
self.maf_writer = MafWriter.from_path(
path=self.options["output_maf"],
header=self.maf_header,
validation_stringency=ValidationStringency.Strict,
)
counter = 0
for record in sorter:
counter += 1
if counter % 1000 == 0:
self.logger.info("Wrote {0} records...".format(counter))
self.maf_writer += record
self.logger.info("Finished writing {0} records".format(counter))
finally:
vcf_object.close()
sorter.close()
if self.maf_writer:
self.maf_writer.close()
for anno in self.annotators:
if self.annotators[anno]:
self.annotators[anno].shutdown()
self.logger.info("Finished")
def extract(
self,
tumor_sample_id,
normal_sample_id,
tumor_idx,
normal_idx,
ann_cols,
vep_key,
record,
is_tumor_only,
):
"""
Extract the VCF information needed to transform into MAF.
"""
dic = {
"var_allele_idx": None,
"tumor_gt": None,
"tumor_depths": None,
"normal_gt": None,
"normal_depths": None,
"location_data": None,
"effects": None,
"selected_effect": None,
"variant_class": None,
}
# Genotypes
var_allele_idx = Extractors.VariantAlleleIndexExtractor.extract(
tumor_genotype=record.samples[tumor_sample_id]
)
tumor_gt, tumor_depths = Extractors.GenotypeAndDepthsExtractor.extract(
var_allele_idx=var_allele_idx,
genotype=record.samples[tumor_sample_id],
alleles=record.alleles,
)
if not is_tumor_only:
normal_gt, normal_depths = Extractors.GenotypeAndDepthsExtractor.extract(
var_allele_idx=var_allele_idx,
genotype=record.samples[normal_sample_id],
alleles=record.alleles,
)
else:
normal_gt, normal_depths = None, None
# Locations
location_data = Extractors.LocationDataExtractor.extract(
ref_allele=record.ref,
var_allele=record.alleles[var_allele_idx],
position=record.pos,
alleles=record.alleles,
)
# Handle effects
effects = Extractors.EffectsExtractor.extract(
effect_priority=self.effect_priority,
biotype_priority=self.biotype_priority,
effect_keys=ann_cols,
effect_list=[
urllib.parse.unquote(i).split("|") for i in record.info[vep_key]
],
var_idx=var_allele_idx,
)
effects, selected_effect = Extractors.SelectOneEffectExtractor.extract(
all_effects=effects,
effect_priority=self.effect_priority,
biotype_priority=self.biotype_priority,
custom_enst=self.custom_enst,
)
selected_effect = Extractors.PopulationFrequencyExtractor.extract(
effect=selected_effect, var_allele=location_data["var_allele"]
)
# Handle variant class
variant_class = Extractors.VariantClassExtractor.extract(
cons=selected_effect["One_Consequence"],
var_type=location_data["var_type"],
inframe=location_data["inframe"],
)
# Make return dictionary
dic["var_allele_idx"] = var_allele_idx
dic["tumor_gt"] = tumor_gt
dic["tumor_depths"] = tumor_depths
dic["normal_gt"] = normal_gt
dic["normal_depths"] = normal_depths
dic["location_data"] = location_data
dic["effects"] = format_all_effects(effects)
dic["selected_effect"] = selected_effect
dic["variant_class"] = variant_class
dic["vcf_columns"] = format_vcf_columns(
vcf_record=record,
vep_key=vep_key,
tumor_idx=tumor_idx,
normal_idx=normal_idx,
)
return dic
def transform(self, vcf_record, data, is_tumor_only, line_number=None):
"""
Transform into maf record.
"""
# Generic data
collection = InputCollection()
keys = itemgetter("selected_effect", itemgetter("Hugo_Symbol"))
collection.add(
column="Hugo_Symbol",
value=data["selected_effect"].get("Hugo_Symbol"),
default="Unknown",
)
collection.add(
column="Entrez_Gene_Id", value=data["selected_effect"]["Entrez_Gene_Id"]
)
collection.add(column="Center", value=self.options["maf_center"])
collection.add(column="NCBI_Build", value="GRCh38")
collection.add(column="Chromosome", value=vcf_record.chrom)
collection.add(column="Start_Position", value=data["location_data"]["start"])
collection.add(column="End_Position", value=data["location_data"]["stop"])
collection.add(column="Strand", value="+")
collection.add(column="Variant_Classification", value=data["variant_class"])
collection.add(column="Variant_Type", value=data["location_data"]["var_type"])
collection.add(
column="Reference_Allele", value=data["location_data"]["ref_allele"]
)
for k, v in zip(
["Tumor_Seq_Allele1", "Tumor_Seq_Allele2"],
format_alleles(
genotype=data["tumor_gt"],
alleles=data["location_data"]["alleles"],
defaults=[
data["location_data"]["ref_allele"],
data["location_data"]["var_allele"],
],
),
):
collection.add(column=k, value=v)
if not is_tumor_only:
for k, v in zip(
["Match_Norm_Seq_Allele1", "Match_Norm_Seq_Allele2"],
format_alleles(
genotype=data["normal_gt"],
alleles=data["location_data"]["alleles"],
defaults=[
data["location_data"]["ref_allele"],
data["location_data"]["ref_allele"],
],
),
):
collection.add(column=k, value=v)
else:
for k in ["Match_Norm_Seq_Allele1", "Match_Norm_Seq_Allele2"]:
collection.add(column=k, value="")
collection.add(column="dbSNP_RS", value=data["selected_effect"]["dbSNP_RS"])
collection.add(
column="Tumor_Sample_Barcode", value=self.options["tumor_submitter_id"]
)
collection.add(
column="Matched_Norm_Sample_Barcode",
value=self.options["normal_submitter_id"],
default="",
)
collection.add(column="Sequencer", value=self.options["sequencer"], default="")
collection.add(
column="Tumor_Sample_UUID", value=self.options["tumor_aliquot_uuid"]
)
collection.add(
column="Matched_Norm_Sample_UUID",
value=self.options["normal_aliquot_uuid"],
default="",
)
collection.add(column="all_effects", value=";".join(data["effects"]))
for k, v in zip(
["t_depth", "t_ref_count", "t_alt_count"],
format_depths(
genotype=data["tumor_gt"],
depths=data["tumor_depths"],
var_allele_idx=data["var_allele_idx"],
default_total_dp=0,
),
):
collection.add(column=k, value=v)
if not is_tumor_only:
for k, v in zip(
["n_depth", "n_ref_count", "n_alt_count"],
format_depths(
genotype=data["normal_gt"],
depths=data["normal_depths"],
var_allele_idx=data["var_allele_idx"],
),
):
collection.add(column=k, value=v)
else:
for k in ["n_depth", "n_ref_count", "n_alt_count"]:
collection.add(column=k, value=None)
for k in data["selected_effect"]:
if k in self._colset and k not in collection._colset:
collection.add(column=k, value=data["selected_effect"][k])
# Set other uuids
collection.add(column="src_vcf_id", value=self.options["src_vcf_uuid"])
collection.add(column="tumor_bam_uuid", value=self.options["tumor_bam_uuid"])
collection.add(column="normal_bam_uuid", value=self.options["normal_bam_uuid"])
collection.add(column="case_id", value=self.options["case_uuid"])
# VCF columns
collection.add(column="FILTER", value=";".join(sorted(list(vcf_record.filter))))
collection.add(column="vcf_region", value=data["vcf_columns"]["vcf_region"])
collection.add(column="vcf_info", value=data["vcf_columns"]["vcf_info"])
collection.add(column="vcf_format", value=data["vcf_columns"]["vcf_format"])
collection.add(column="vcf_tumor_gt", value=data["vcf_columns"]["vcf_tumor_gt"])
collection.add(
column="vcf_normal_gt", value=data["vcf_columns"].get("vcf_normal_gt")
)
# Set the other columns to none
collection.add(column="Score", value="")
collection.add(column="BAM_File", value="")
collection.add(column="Sequencing_Phase", value="")
anno_set = ("dbSNP_Val_Status", "COSMIC", "CONTEXT", "Mutation_Status")
for i in self._colset - set(collection.columns()):
if i not in anno_set:
collection.add(column=i, value=None)
collection.transform(self._scheme)
# Generate maf record
maf_record = init_empty_maf_record(line_number=line_number)
for i in collection:
maf_record += i.transformed
# Annotations
if self.annotators["dbsnp_priority_db"]:
maf_record = self.annotators["dbsnp_priority_db"].annotate(maf_record)
else:
maf_record["dbSNP_Val_Status"] = get_builder(
"dbSNP_Val_Status", self._scheme, value=None
)
if self.annotators["cosmic_id"]:
maf_record = self.annotators["cosmic_id"].annotate(maf_record, vcf_record)
else:
maf_record["COSMIC"] = get_builder("COSMIC", self._scheme, value=None)
if self.annotators["non_tcga_exac"]:
maf_record = self.annotators["non_tcga_exac"].annotate(
maf_record, vcf_record, var_allele_idx=data["var_allele_idx"]
)
if self.annotators["hotspots"]:
maf_record = self.annotators["hotspots"].annotate(maf_record)
else:
maf_record["hotspot"] = get_builder("hotspot", self._scheme, value=None)
maf_record = self.annotators["reference_context"].annotate(
maf_record, vcf_record
)
maf_record = self.annotators["mutation_status"].annotate(
maf_record, vcf_record, self.options["tumor_vcf_id"]
)
# Filters
gdc_filters = []
for filt_key in self.filters:
filt_obj = self.filters[filt_key]
if filt_obj and filt_obj.filter(maf_record):
gdc_filters.extend(filt_obj.tags)
maf_record["GDC_FILTER"] = get_builder(
"GDC_FILTER", self._scheme, value=";".join(sorted(gdc_filters))
)
return maf_record
def setup_annotators(self):
"""
Sets up all annotator classes.
"""
self.annotators["mutation_status"] = Annotators.MutationStatus.setup(
self._scheme, self.options["caller_id"]
)
self.annotators["reference_context"] = Annotators.ReferenceContext.setup(
self._scheme,
self.options["reference_fasta"],
self.options["reference_context_size"],
)
if self.options["dbsnp_priority_db"]:
self.annotators["dbsnp_priority_db"] = Annotators.DbSnpValidation.setup(
self._scheme, self.options["dbsnp_priority_db"]
)
if self.options["cosmic_vcf"]:
self.annotators["cosmic_id"] = Annotators.CosmicID.setup(
self._scheme, self.options["cosmic_vcf"]
)
if self.options["non_tcga_exac_vcf"]:
self.annotators["non_tcga_exac"] = Annotators.NonTcgaExac.setup(
self._scheme, self.options["non_tcga_exac_vcf"]
)
if self.options["hotspot_tsv"]:
self.annotators["hotspots"] = Annotators.Hotspot.setup(
self._scheme, self.options["hotspot_tsv"]
)
def setup_filters(self):
"""
Sets up all filter classes.
"""
self.filters["common_in_exac"] = Filters.ExAC.setup(
self.options["exac_freq_cutoff"]
)
self.filters["multiallelic"] = Filters.Multiallelic.setup()
if self.options["gdc_blacklist"]:
self.filters["gdc_blacklist"] = Filters.GdcBlacklist.setup(
self.options["gdc_blacklist"]
)
if not self.options["tumor_only"]:
self.filters["normal_depth"] = Filters.NormalDepth.setup(
self.options["min_n_depth"]
)
if self.options["gdc_pon_vcf"]:
self.filters["gdc_pon"] = Filters.GdcPon.setup(self.options["gdc_pon_vcf"])
if self.options["nonexonic_intervals"]:
self.filters["nonexonic"] = Filters.NonExonic.setup(
self.options["nonexonic_intervals"]
)
if self.options["target_intervals"]:
self.filters["off_target"] = Filters.OffTarget.setup(
self.options["target_intervals"]
)
@classmethod
def __tool_name__(cls):
return "gdc-1.0.0-aliquot"
| [
"aliquotmaf.subcommands.vcf_to_aliquot.extractors.VariantAlleleIndexExtractor.extract",
"aliquotmaf.converters.formatters.format_vcf_columns",
"aliquotmaf.filters.ExAC.setup",
"maflib.sort_order.BarcodesAndCoordinate",
"aliquotmaf.converters.formatters.format_depths",
"aliquotmaf.converters.utils.get_colu... | [((1298, 1346), 'aliquotmaf.subcommands.utils.load_json', 'load_json', (["self.options['biotype_priority_file']"], {}), "(self.options['biotype_priority_file'])\n", (1307, 1346), False, 'from aliquotmaf.subcommands.utils import assert_sample_in_header, extract_annotation_from_header, load_enst, load_json\n'), ((1378, 1425), 'aliquotmaf.subcommands.utils.load_json', 'load_json', (["self.options['effect_priority_file']"], {}), "(self.options['effect_priority_file'])\n", (1387, 1425), False, 'from aliquotmaf.subcommands.utils import assert_sample_in_header, extract_annotation_from_header, load_enst, load_json\n'), ((7984, 8012), 'aliquotmaf.subcommands.vcf_to_aliquot.runners.BaseRunner.get_header_date', 'BaseRunner.get_header_date', ([], {}), '()\n', (8010, 8012), False, 'from aliquotmaf.subcommands.vcf_to_aliquot.runners import BaseRunner\n'), ((8433, 8511), 'maflib.header.MafHeaderRecord', 'MafHeaderRecord', ([], {'key': '"""tumor.aliquot"""', 'value': "self.options['tumor_aliquot_uuid']"}), "(key='tumor.aliquot', value=self.options['tumor_aliquot_uuid'])\n", (8448, 8511), False, 'from maflib.header import MafHeader, MafHeaderRecord\n'), ((9169, 9209), 'aliquotmaf.converters.utils.get_columns_from_header', 'get_columns_from_header', (['self.maf_header'], {}), '(self.maf_header)\n', (9192, 9209), False, 'from aliquotmaf.converters.utils import get_columns_from_header, init_empty_maf_record\n'), ((9306, 9350), 'pysam.VariantFile', 'pysam.VariantFile', (["self.options['input_vcf']"], {}), "(self.options['input_vcf'])\n", (9323, 9350), False, 'import pysam\n'), ((13212, 13311), 'aliquotmaf.subcommands.vcf_to_aliquot.extractors.VariantAlleleIndexExtractor.extract', 'Extractors.VariantAlleleIndexExtractor.extract', ([], {'tumor_genotype': 'record.samples[tumor_sample_id]'}), '(tumor_genotype=record.\n samples[tumor_sample_id])\n', (13258, 13311), True, 'import aliquotmaf.subcommands.vcf_to_aliquot.extractors as Extractors\n'), ((13362, 13508), 'aliquotmaf.subcommands.vcf_to_aliquot.extractors.GenotypeAndDepthsExtractor.extract', 'Extractors.GenotypeAndDepthsExtractor.extract', ([], {'var_allele_idx': 'var_allele_idx', 'genotype': 'record.samples[tumor_sample_id]', 'alleles': 'record.alleles'}), '(var_allele_idx=var_allele_idx,\n genotype=record.samples[tumor_sample_id], alleles=record.alleles)\n', (13407, 13508), True, 'import aliquotmaf.subcommands.vcf_to_aliquot.extractors as Extractors\n'), ((13938, 14099), 'aliquotmaf.subcommands.vcf_to_aliquot.extractors.LocationDataExtractor.extract', 'Extractors.LocationDataExtractor.extract', ([], {'ref_allele': 'record.ref', 'var_allele': 'record.alleles[var_allele_idx]', 'position': 'record.pos', 'alleles': 'record.alleles'}), '(ref_allele=record.ref, var_allele=\n record.alleles[var_allele_idx], position=record.pos, alleles=record.alleles\n )\n', (13978, 14099), True, 'import aliquotmaf.subcommands.vcf_to_aliquot.extractors as Extractors\n'), ((14570, 14751), 'aliquotmaf.subcommands.vcf_to_aliquot.extractors.SelectOneEffectExtractor.extract', 'Extractors.SelectOneEffectExtractor.extract', ([], {'all_effects': 'effects', 'effect_priority': 'self.effect_priority', 'biotype_priority': 'self.biotype_priority', 'custom_enst': 'self.custom_enst'}), '(all_effects=effects,\n effect_priority=self.effect_priority, biotype_priority=self.\n biotype_priority, custom_enst=self.custom_enst)\n', (14613, 14751), True, 'import aliquotmaf.subcommands.vcf_to_aliquot.extractors as Extractors\n'), ((14829, 14944), 'aliquotmaf.subcommands.vcf_to_aliquot.extractors.PopulationFrequencyExtractor.extract', 'Extractors.PopulationFrequencyExtractor.extract', ([], {'effect': 'selected_effect', 'var_allele': "location_data['var_allele']"}), "(effect=selected_effect,\n var_allele=location_data['var_allele'])\n", (14876, 14944), True, 'import aliquotmaf.subcommands.vcf_to_aliquot.extractors as Extractors\n'), ((15019, 15180), 'aliquotmaf.subcommands.vcf_to_aliquot.extractors.VariantClassExtractor.extract', 'Extractors.VariantClassExtractor.extract', ([], {'cons': "selected_effect['One_Consequence']", 'var_type': "location_data['var_type']", 'inframe': "location_data['inframe']"}), "(cons=selected_effect[\n 'One_Consequence'], var_type=location_data['var_type'], inframe=\n location_data['inframe'])\n", (15059, 15180), True, 'import aliquotmaf.subcommands.vcf_to_aliquot.extractors as Extractors\n'), ((15529, 15556), 'aliquotmaf.converters.formatters.format_all_effects', 'format_all_effects', (['effects'], {}), '(effects)\n', (15547, 15556), False, 'from aliquotmaf.converters.formatters import format_all_effects, format_alleles, format_depths, format_vcf_columns\n'), ((15680, 15782), 'aliquotmaf.converters.formatters.format_vcf_columns', 'format_vcf_columns', ([], {'vcf_record': 'record', 'vep_key': 'vep_key', 'tumor_idx': 'tumor_idx', 'normal_idx': 'normal_idx'}), '(vcf_record=record, vep_key=vep_key, tumor_idx=tumor_idx,\n normal_idx=normal_idx)\n', (15698, 15782), False, 'from aliquotmaf.converters.formatters import format_all_effects, format_alleles, format_depths, format_vcf_columns\n'), ((16038, 16055), 'aliquotmaf.converters.collection.InputCollection', 'InputCollection', ([], {}), '()\n', (16053, 16055), False, 'from aliquotmaf.converters.collection import InputCollection\n'), ((21554, 21600), 'aliquotmaf.converters.utils.init_empty_maf_record', 'init_empty_maf_record', ([], {'line_number': 'line_number'}), '(line_number=line_number)\n', (21575, 21600), False, 'from aliquotmaf.converters.utils import get_columns_from_header, init_empty_maf_record\n'), ((23411, 23483), 'aliquotmaf.annotators.MutationStatus.setup', 'Annotators.MutationStatus.setup', (['self._scheme', "self.options['caller_id']"], {}), "(self._scheme, self.options['caller_id'])\n", (23442, 23483), True, 'import aliquotmaf.annotators as Annotators\n'), ((23554, 23679), 'aliquotmaf.annotators.ReferenceContext.setup', 'Annotators.ReferenceContext.setup', (['self._scheme', "self.options['reference_fasta']", "self.options['reference_context_size']"], {}), "(self._scheme, self.options[\n 'reference_fasta'], self.options['reference_context_size'])\n", (23587, 23679), True, 'import aliquotmaf.annotators as Annotators\n'), ((24627, 24679), 'aliquotmaf.filters.ExAC.setup', 'Filters.ExAC.setup', (["self.options['exac_freq_cutoff']"], {}), "(self.options['exac_freq_cutoff'])\n", (24645, 24679), True, 'import aliquotmaf.filters as Filters\n'), ((24742, 24770), 'aliquotmaf.filters.Multiallelic.setup', 'Filters.Multiallelic.setup', ([], {}), '()\n', (24768, 24770), True, 'import aliquotmaf.filters as Filters\n'), ((1467, 1505), 'aliquotmaf.subcommands.utils.load_enst', 'load_enst', (["self.options['custom_enst']"], {}), "(self.options['custom_enst'])\n", (1476, 1505), False, 'from aliquotmaf.subcommands.utils import assert_sample_in_header, extract_annotation_from_header, load_enst, load_json\n'), ((8141, 8268), 'maflib.header.MafHeaderRecord', 'MafHeaderRecord', ([], {'key': '"""normal.aliquot"""', 'value': "(self.options['normal_aliquot_uuid'] if not self.options['tumor_only'] else '')"}), "(key='normal.aliquot', value=self.options[\n 'normal_aliquot_uuid'] if not self.options['tumor_only'] else '')\n", (8156, 8268), False, 'from maflib.header import MafHeader, MafHeaderRecord\n'), ((9583, 9648), 'aliquotmaf.subcommands.utils.assert_sample_in_header', 'assert_sample_in_header', (['vcf_object', "self.options['tumor_vcf_id']"], {}), "(vcf_object, self.options['tumor_vcf_id'])\n", (9606, 9648), False, 'from aliquotmaf.subcommands.utils import assert_sample_in_header, extract_annotation_from_header, load_enst, load_json\n'), ((9704, 9799), 'aliquotmaf.subcommands.utils.assert_sample_in_header', 'assert_sample_in_header', (['vcf_object', "self.options['normal_vcf_id']"], {'can_fail': 'is_tumor_only'}), "(vcf_object, self.options['normal_vcf_id'], can_fail\n =is_tumor_only)\n", (9727, 9799), False, 'from aliquotmaf.subcommands.utils import assert_sample_in_header, extract_annotation_from_header, load_enst, load_json\n'), ((9910, 9967), 'aliquotmaf.subcommands.utils.extract_annotation_from_header', 'extract_annotation_from_header', (['vcf_object'], {'vep_key': '"""CSQ"""'}), "(vcf_object, vep_key='CSQ')\n", (9940, 9967), False, 'from aliquotmaf.subcommands.utils import assert_sample_in_header, extract_annotation_from_header, load_enst, load_json\n'), ((11704, 11835), 'maflib.writer.MafWriter.from_path', 'MafWriter.from_path', ([], {'path': "self.options['output_maf']", 'header': 'self.maf_header', 'validation_stringency': 'ValidationStringency.Strict'}), "(path=self.options['output_maf'], header=self.maf_header,\n validation_stringency=ValidationStringency.Strict)\n", (11723, 11835), False, 'from maflib.writer import MafWriter\n'), ((13622, 13769), 'aliquotmaf.subcommands.vcf_to_aliquot.extractors.GenotypeAndDepthsExtractor.extract', 'Extractors.GenotypeAndDepthsExtractor.extract', ([], {'var_allele_idx': 'var_allele_idx', 'genotype': 'record.samples[normal_sample_id]', 'alleles': 'record.alleles'}), '(var_allele_idx=var_allele_idx,\n genotype=record.samples[normal_sample_id], alleles=record.alleles)\n', (13667, 13769), True, 'import aliquotmaf.subcommands.vcf_to_aliquot.extractors as Extractors\n'), ((16101, 16126), 'operator.itemgetter', 'itemgetter', (['"""Hugo_Symbol"""'], {}), "('Hugo_Symbol')\n", (16111, 16126), False, 'from operator import itemgetter\n'), ((17211, 17389), 'aliquotmaf.converters.formatters.format_alleles', 'format_alleles', ([], {'genotype': "data['tumor_gt']", 'alleles': "data['location_data']['alleles']", 'defaults': "[data['location_data']['ref_allele'], data['location_data']['var_allele']]"}), "(genotype=data['tumor_gt'], alleles=data['location_data'][\n 'alleles'], defaults=[data['location_data']['ref_allele'], data[\n 'location_data']['var_allele']])\n", (17225, 17389), False, 'from aliquotmaf.converters.formatters import format_all_effects, format_alleles, format_depths, format_vcf_columns\n'), ((19134, 19266), 'aliquotmaf.converters.formatters.format_depths', 'format_depths', ([], {'genotype': "data['tumor_gt']", 'depths': "data['tumor_depths']", 'var_allele_idx': "data['var_allele_idx']", 'default_total_dp': '(0)'}), "(genotype=data['tumor_gt'], depths=data['tumor_depths'],\n var_allele_idx=data['var_allele_idx'], default_total_dp=0)\n", (19147, 19266), False, 'from aliquotmaf.converters.formatters import format_all_effects, format_alleles, format_depths, format_vcf_columns\n'), ((21884, 21941), 'aliquotmaf.converters.builder.get_builder', 'get_builder', (['"""dbSNP_Val_Status"""', 'self._scheme'], {'value': 'None'}), "('dbSNP_Val_Status', self._scheme, value=None)\n", (21895, 21941), False, 'from aliquotmaf.converters.builder import get_builder\n'), ((22150, 22197), 'aliquotmaf.converters.builder.get_builder', 'get_builder', (['"""COSMIC"""', 'self._scheme'], {'value': 'None'}), "('COSMIC', self._scheme, value=None)\n", (22161, 22197), False, 'from aliquotmaf.converters.builder import get_builder\n'), ((22569, 22617), 'aliquotmaf.converters.builder.get_builder', 'get_builder', (['"""hotspot"""', 'self._scheme'], {'value': 'None'}), "('hotspot', self._scheme, value=None)\n", (22580, 22617), False, 'from aliquotmaf.converters.builder import get_builder\n'), ((23820, 23906), 'aliquotmaf.annotators.DbSnpValidation.setup', 'Annotators.DbSnpValidation.setup', (['self._scheme', "self.options['dbsnp_priority_db']"], {}), "(self._scheme, self.options[\n 'dbsnp_priority_db'])\n", (23852, 23906), True, 'import aliquotmaf.annotators as Annotators\n'), ((24015, 24082), 'aliquotmaf.annotators.CosmicID.setup', 'Annotators.CosmicID.setup', (['self._scheme', "self.options['cosmic_vcf']"], {}), "(self._scheme, self.options['cosmic_vcf'])\n", (24040, 24082), True, 'import aliquotmaf.annotators as Annotators\n'), ((24207, 24284), 'aliquotmaf.annotators.NonTcgaExac.setup', 'Annotators.NonTcgaExac.setup', (['self._scheme', "self.options['non_tcga_exac_vcf']"], {}), "(self._scheme, self.options['non_tcga_exac_vcf'])\n", (24235, 24284), True, 'import aliquotmaf.annotators as Annotators\n'), ((24398, 24465), 'aliquotmaf.annotators.Hotspot.setup', 'Annotators.Hotspot.setup', (['self._scheme', "self.options['hotspot_tsv']"], {}), "(self._scheme, self.options['hotspot_tsv'])\n", (24422, 24465), True, 'import aliquotmaf.annotators as Annotators\n'), ((24858, 24915), 'aliquotmaf.filters.GdcBlacklist.setup', 'Filters.GdcBlacklist.setup', (["self.options['gdc_blacklist']"], {}), "(self.options['gdc_blacklist'])\n", (24884, 24915), True, 'import aliquotmaf.filters as Filters\n'), ((25033, 25087), 'aliquotmaf.filters.NormalDepth.setup', 'Filters.NormalDepth.setup', (["self.options['min_n_depth']"], {}), "(self.options['min_n_depth'])\n", (25058, 25087), True, 'import aliquotmaf.filters as Filters\n'), ((25197, 25246), 'aliquotmaf.filters.GdcPon.setup', 'Filters.GdcPon.setup', (["self.options['gdc_pon_vcf']"], {}), "(self.options['gdc_pon_vcf'])\n", (25217, 25246), True, 'import aliquotmaf.filters as Filters\n'), ((25336, 25396), 'aliquotmaf.filters.NonExonic.setup', 'Filters.NonExonic.setup', (["self.options['nonexonic_intervals']"], {}), "(self.options['nonexonic_intervals'])\n", (25359, 25396), True, 'import aliquotmaf.filters as Filters\n'), ((25514, 25571), 'aliquotmaf.filters.OffTarget.setup', 'Filters.OffTarget.setup', (["self.options['target_intervals']"], {}), "(self.options['target_intervals'])\n", (25537, 25571), True, 'import aliquotmaf.filters as Filters\n'), ((7863, 7886), 'maflib.sort_order.BarcodesAndCoordinate', 'BarcodesAndCoordinate', ([], {}), '()\n', (7884, 7886), False, 'from maflib.sort_order import BarcodesAndCoordinate\n'), ((8948, 8976), 'maflib.sort_order.BarcodesAndCoordinate.name', 'BarcodesAndCoordinate.name', ([], {}), '()\n', (8974, 8976), False, 'from maflib.sort_order import BarcodesAndCoordinate\n'), ((17706, 17885), 'aliquotmaf.converters.formatters.format_alleles', 'format_alleles', ([], {'genotype': "data['normal_gt']", 'alleles': "data['location_data']['alleles']", 'defaults': "[data['location_data']['ref_allele'], data['location_data']['ref_allele']]"}), "(genotype=data['normal_gt'], alleles=data['location_data'][\n 'alleles'], defaults=[data['location_data']['ref_allele'], data[\n 'location_data']['ref_allele']])\n", (17720, 17885), False, 'from aliquotmaf.converters.formatters import format_all_effects, format_alleles, format_depths, format_vcf_columns\n'), ((19535, 19649), 'aliquotmaf.converters.formatters.format_depths', 'format_depths', ([], {'genotype': "data['normal_gt']", 'depths': "data['normal_depths']", 'var_allele_idx': "data['var_allele_idx']"}), "(genotype=data['normal_gt'], depths=data['normal_depths'],\n var_allele_idx=data['var_allele_idx'])\n", (19548, 19649), False, 'from aliquotmaf.converters.formatters import format_all_effects, format_alleles, format_depths, format_vcf_columns\n')] |
# -*- coding: utf-8 -*-
"""
shortly.settings
~~~~~~~~~~~~~~~~
Shortly config.
:copyright: (c) 2014 by fsp.
:license: BSD.
"""
import os
DEBUG = False
# Detect environment by whether debug named file exists or not
if os.path.exists(os.path.join(os.path.dirname(__file__), 'debug')):
DEBUG = True
if DEBUG:
REDIS_HOST = 'localhost'
REDIS_PORT = 6379
REDIS_DB = 0
else:
REDIS_HOST = 'localhost'
REDIS_PORT = 6379
REDIS_DB = 0
| [
"os.path.dirname"
] | [((269, 294), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (284, 294), False, 'import os\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
"""
import sys
import matplotlib.pyplot as plt
import numpy as np
# from tomo_encoders.misc_utils.feature_maps_vis import view_midplanes
import cupy as cp
import time
import h5py
#from recon_subvol import fbp_filter, recon_patch
# from tomo_encoders import DataFile
import os
fpath = '/data02/MyArchive/AM_part_Xuan/data/mli_L206_HT_650_L3_rec_1x1_uint16.hdf5'
binning = 1
def _rescale_data(data, min_val, max_val):
'''
Recales data to values into range [min_val, max_val]. Data can be any numpy or cupy array of any shape.
'''
xp = cp.get_array_module(data) # 'xp' is a standard usage in the community
eps = 1e-12
data = (data - min_val) / (max_val - min_val + eps)
return data
def _find_min_max(vol, sampling_factor):
ss = slice(None, None, sampling_factor)
xp = cp.get_array_module(vol[ss,ss,ss]) # 'xp' is a standard usage in the community
max_val = xp.max(vol[ss,ss,ss])
min_val = xp.min(vol[ss,ss,ss])
return max_val, min_val
def normalize_volume_gpu(vol, chunk_size = 64, normalize_sampling_factor = 1):
'''
Normalizes volume to values into range [0,1]
'''
tot_len = vol.shape[0]
nchunks = int(np.ceil(tot_len/chunk_size))
max_val, min_val = _find_min_max(vol, normalize_sampling_factor)
proc_times = []
copy_to_times = []
copy_from_times = []
stream1 = cp.cuda.Stream()
t0 = time.time()
vol_gpu = cp.zeros((chunk_size, vol.shape[1], vol.shape[2]), dtype = cp.float32)
for jj in range(nchunks):
t01 = time.time()
sz = slice(jj*chunk_size, min((jj+1)*chunk_size, tot_len))
## copy to gpu from cpu
with stream1:
vol_gpu.set(vol[sz,...])
stream1.synchronize()
t02 = time.time()
copy_to_times.append(t02-t01)
## process
with stream1:
vol_gpu = _rescale_data(vol_gpu, min_val, max_val)
stream1.synchronize()
t03 = time.time()
proc_times.append(t03-t02)
## copy from gpu to cpu
with stream1:
vol[sz,...] = vol_gpu.get()
stream1.synchronize()
t04 = time.time()
copy_from_times.append(t04 - t03)
print("copy to gpu time per %i size chunk: %.2f ms"%(chunk_size,np.mean(copy_to_times)*1000.0))
print("processing time per %i size chunk: %.2f ms"%(chunk_size,np.mean(proc_times)*1000.0))
print("copy from gpu time per %i size chunk: %.2f ms"%(chunk_size,np.mean(copy_from_times)*1000.0))
print("total time: ", time.time() - t0)
return vol
if len(sys.argv) > 1:
chunk_size = int(sys.argv[1])
else:
chunk_size = 64
if __name__ == "__main__":
vol_shape = (512,1224,1224)
vol = np.random.normal(0.0, 1.0, vol_shape).astype(np.float32)
print("input volume: ", vol.shape)
vol = normalize_volume_gpu(vol, chunk_size = chunk_size, normalize_sampling_factor = 4)
| [
"numpy.random.normal",
"numpy.mean",
"numpy.ceil",
"cupy.cuda.Stream",
"cupy.get_array_module",
"time.time",
"cupy.zeros"
] | [((621, 646), 'cupy.get_array_module', 'cp.get_array_module', (['data'], {}), '(data)\n', (640, 646), True, 'import cupy as cp\n'), ((877, 913), 'cupy.get_array_module', 'cp.get_array_module', (['vol[ss, ss, ss]'], {}), '(vol[ss, ss, ss])\n', (896, 913), True, 'import cupy as cp\n'), ((1436, 1452), 'cupy.cuda.Stream', 'cp.cuda.Stream', ([], {}), '()\n', (1450, 1452), True, 'import cupy as cp\n'), ((1462, 1473), 'time.time', 'time.time', ([], {}), '()\n', (1471, 1473), False, 'import time\n'), ((1493, 1561), 'cupy.zeros', 'cp.zeros', (['(chunk_size, vol.shape[1], vol.shape[2])'], {'dtype': 'cp.float32'}), '((chunk_size, vol.shape[1], vol.shape[2]), dtype=cp.float32)\n', (1501, 1561), True, 'import cupy as cp\n'), ((1251, 1280), 'numpy.ceil', 'np.ceil', (['(tot_len / chunk_size)'], {}), '(tot_len / chunk_size)\n', (1258, 1280), True, 'import numpy as np\n'), ((1608, 1619), 'time.time', 'time.time', ([], {}), '()\n', (1617, 1619), False, 'import time\n'), ((1848, 1859), 'time.time', 'time.time', ([], {}), '()\n', (1857, 1859), False, 'import time\n'), ((2056, 2067), 'time.time', 'time.time', ([], {}), '()\n', (2065, 2067), False, 'import time\n'), ((2266, 2277), 'time.time', 'time.time', ([], {}), '()\n', (2275, 2277), False, 'import time\n'), ((2651, 2662), 'time.time', 'time.time', ([], {}), '()\n', (2660, 2662), False, 'import time\n'), ((2843, 2880), 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(1.0)', 'vol_shape'], {}), '(0.0, 1.0, vol_shape)\n', (2859, 2880), True, 'import numpy as np\n'), ((2393, 2415), 'numpy.mean', 'np.mean', (['copy_to_times'], {}), '(copy_to_times)\n', (2400, 2415), True, 'import numpy as np\n'), ((2492, 2511), 'numpy.mean', 'np.mean', (['proc_times'], {}), '(proc_times)\n', (2499, 2511), True, 'import numpy as np\n'), ((2591, 2615), 'numpy.mean', 'np.mean', (['copy_from_times'], {}), '(copy_from_times)\n', (2598, 2615), True, 'import numpy as np\n')] |
import random
from PIL import Image
from captcha.image import ImageCaptcha
from utils.dataset import CaptchaDataset
from utils.img_util import display_images
from torchvision import transforms
import numpy as np
img_width = 160
img_height = 60
n_chars = 7
chars = list('1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ')
gen = ImageCaptcha(img_width, img_height)
#img_trans = transforms.Compose([
# transforms.Grayscale(num_output_channels=1)
# ,transforms.ToTensor()
# ,transforms.Normalize(mean=[0.5], std=[0.5])
##])
img_trans = transforms.Compose([
transforms.Grayscale(num_output_channels=3)
,transforms.ToTensor()
,transforms.Normalize(mean=[0.5, 0.5, 0.5], std=(0.5, 0.5, 0.5))
])
content = [random.randrange(0, len(chars)) for _ in range(n_chars)]
s = ''.join([chars[i] for i in content])
d = gen.generate(s)
d = Image.open(d)
t = img_trans(d)
print(f'\ntensor shape{t.shape}')
display_images(t.numpy(), 1, 3) | [
"PIL.Image.open",
"torchvision.transforms.Grayscale",
"captcha.image.ImageCaptcha",
"torchvision.transforms.Normalize",
"torchvision.transforms.ToTensor"
] | [((346, 381), 'captcha.image.ImageCaptcha', 'ImageCaptcha', (['img_width', 'img_height'], {}), '(img_width, img_height)\n', (358, 381), False, 'from captcha.image import ImageCaptcha\n'), ((865, 878), 'PIL.Image.open', 'Image.open', (['d'], {}), '(d)\n', (875, 878), False, 'from PIL import Image\n'), ((588, 631), 'torchvision.transforms.Grayscale', 'transforms.Grayscale', ([], {'num_output_channels': '(3)'}), '(num_output_channels=3)\n', (608, 631), False, 'from torchvision import transforms\n'), ((637, 658), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (656, 658), False, 'from torchvision import transforms\n'), ((664, 727), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.5, 0.5, 0.5]', 'std': '(0.5, 0.5, 0.5)'}), '(mean=[0.5, 0.5, 0.5], std=(0.5, 0.5, 0.5))\n', (684, 727), False, 'from torchvision import transforms\n')] |
#!/usr/bin/env python
"""
Extract MFCC and filterbank features for the Buckeye dataset.
Author: <NAME>
Contact: <EMAIL>
Date: 2019, 2021
"""
from datetime import datetime
from os import path
from tqdm import tqdm
import argparse
import numpy as np
import os
import sys
sys.path.append("..")
from paths import buckeye_datadir
import features
import utils
def extract_features_for_subset(subset, feat_type, output_fn):
"""
Extract specified features for a subset.
The `feat_type` parameter can be "mfcc" or "fbank".
"""
# Speakers for subset
speaker_fn = path.join(
"..", "data", "buckeye_" + subset + "_speakers.list"
)
print("Reading:", speaker_fn)
speakers = set()
with open(speaker_fn) as f:
for line in f:
speakers.add(line.strip())
print("Speakers:", ", ".join(sorted(speakers)))
# Raw features
feat_dict = {}
print("Extracting features per speaker:")
for speaker in sorted(speakers):
if feat_type == "mfcc":
speaker_feat_dict = features.extract_mfcc_dir(
path.join(buckeye_datadir, speaker)
)
elif feat_type == "fbank":
speaker_feat_dict = features.extract_fbank_dir(
path.join(buckeye_datadir, speaker)
)
else:
assert False, "invalid feature type"
for wav_key in speaker_feat_dict:
feat_dict[speaker + "_" + wav_key[3:]] = speaker_feat_dict[wav_key]
# Read voice activity regions
fa_fn = path.join("..", "data", "buckeye_english.wrd")
print("Reading:", fa_fn)
vad_dict = utils.read_vad_from_fa(fa_fn)
# Only keep voice active regions
print("Extracting VAD regions:")
feat_dict = features.extract_vad(feat_dict, vad_dict)
# Perform per speaker mean and variance normalisation
print("Per speaker mean and variance normalisation:")
feat_dict = features.speaker_mvn(feat_dict)
# Write output
print("Writing:", output_fn)
np.savez_compressed(output_fn, **feat_dict)
def main():
print(datetime.now())
# RAW FEATURES
# Extract MFCCs for the different sets
mfcc_dir = path.join("mfcc", "buckeye")
for subset in ["devpart1", "devpart2", "zs"]:
if not path.isdir(mfcc_dir):
os.makedirs(mfcc_dir)
output_fn = path.join(mfcc_dir, subset + ".dd.npz")
if not path.isfile(output_fn):
print("Extracting MFCCs:", subset)
extract_features_for_subset(subset, "mfcc", output_fn)
else:
print("Using existing file:", output_fn)
# # Extract filterbanks for the different sets
# fbank_dir = path.join("fbank", "buckeye")
# for subset in ["devpart1", "devpart2", "zs"]:
# if not path.isdir(fbank_dir):
# os.makedirs(fbank_dir)
# output_fn = path.join(fbank_dir, subset + ".npz")
# if not path.isfile(output_fn):
# print("Extracting filterbanks:", subset)
# extract_features_for_subset(subset, "fbank", output_fn)
# else:
# print("Using existing file:", output_fn)
# GROUND TRUTH WORD SEGMENTS
# Create a ground truth word list of at least 50 frames and 5 characters
fa_fn = path.join("..", "data", "buckeye_english.wrd")
list_dir = "lists"
if not path.isdir(list_dir):
os.makedirs(list_dir)
list_fn = path.join(list_dir, "buckeye.samediff.list")
if not path.isfile(list_fn):
utils.write_samediff_words(fa_fn, list_fn)
else:
print("Using existing file:", list_fn)
# Extract word segments from the MFCC NumPy archives
for subset in ["devpart1", "devpart2", "zs"]:
input_npz_fn = path.join(mfcc_dir, subset + ".dd.npz")
output_npz_fn = path.join(mfcc_dir, subset + ".samediff.dd.npz")
if not path.isfile(output_npz_fn):
print("Extracting MFCCs for same-different word tokens:", subset)
utils.segments_from_npz(input_npz_fn, list_fn, output_npz_fn)
else:
print("Using existing file:", output_npz_fn)
print(datetime.now())
if __name__ == "__main__":
main()
| [
"features.extract_vad",
"features.speaker_mvn",
"os.makedirs",
"os.path.join",
"utils.segments_from_npz",
"utils.read_vad_from_fa",
"datetime.datetime.now",
"os.path.isfile",
"os.path.isdir",
"utils.write_samediff_words",
"numpy.savez_compressed",
"sys.path.append"
] | [((273, 294), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (288, 294), False, 'import sys\n'), ((586, 649), 'os.path.join', 'path.join', (['""".."""', '"""data"""', "('buckeye_' + subset + '_speakers.list')"], {}), "('..', 'data', 'buckeye_' + subset + '_speakers.list')\n", (595, 649), False, 'from os import path\n'), ((1549, 1595), 'os.path.join', 'path.join', (['""".."""', '"""data"""', '"""buckeye_english.wrd"""'], {}), "('..', 'data', 'buckeye_english.wrd')\n", (1558, 1595), False, 'from os import path\n'), ((1640, 1669), 'utils.read_vad_from_fa', 'utils.read_vad_from_fa', (['fa_fn'], {}), '(fa_fn)\n', (1662, 1669), False, 'import utils\n'), ((1761, 1802), 'features.extract_vad', 'features.extract_vad', (['feat_dict', 'vad_dict'], {}), '(feat_dict, vad_dict)\n', (1781, 1802), False, 'import features\n'), ((1936, 1967), 'features.speaker_mvn', 'features.speaker_mvn', (['feat_dict'], {}), '(feat_dict)\n', (1956, 1967), False, 'import features\n'), ((2025, 2068), 'numpy.savez_compressed', 'np.savez_compressed', (['output_fn'], {}), '(output_fn, **feat_dict)\n', (2044, 2068), True, 'import numpy as np\n'), ((2189, 2217), 'os.path.join', 'path.join', (['"""mfcc"""', '"""buckeye"""'], {}), "('mfcc', 'buckeye')\n", (2198, 2217), False, 'from os import path\n'), ((3270, 3316), 'os.path.join', 'path.join', (['""".."""', '"""data"""', '"""buckeye_english.wrd"""'], {}), "('..', 'data', 'buckeye_english.wrd')\n", (3279, 3316), False, 'from os import path\n'), ((3417, 3461), 'os.path.join', 'path.join', (['list_dir', '"""buckeye.samediff.list"""'], {}), "(list_dir, 'buckeye.samediff.list')\n", (3426, 3461), False, 'from os import path\n'), ((2094, 2108), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2106, 2108), False, 'from datetime import datetime\n'), ((2359, 2398), 'os.path.join', 'path.join', (['mfcc_dir', "(subset + '.dd.npz')"], {}), "(mfcc_dir, subset + '.dd.npz')\n", (2368, 2398), False, 'from os import path\n'), ((3351, 3371), 'os.path.isdir', 'path.isdir', (['list_dir'], {}), '(list_dir)\n', (3361, 3371), False, 'from os import path\n'), ((3381, 3402), 'os.makedirs', 'os.makedirs', (['list_dir'], {}), '(list_dir)\n', (3392, 3402), False, 'import os\n'), ((3473, 3493), 'os.path.isfile', 'path.isfile', (['list_fn'], {}), '(list_fn)\n', (3484, 3493), False, 'from os import path\n'), ((3503, 3545), 'utils.write_samediff_words', 'utils.write_samediff_words', (['fa_fn', 'list_fn'], {}), '(fa_fn, list_fn)\n', (3529, 3545), False, 'import utils\n'), ((3734, 3773), 'os.path.join', 'path.join', (['mfcc_dir', "(subset + '.dd.npz')"], {}), "(mfcc_dir, subset + '.dd.npz')\n", (3743, 3773), False, 'from os import path\n'), ((3798, 3846), 'os.path.join', 'path.join', (['mfcc_dir', "(subset + '.samediff.dd.npz')"], {}), "(mfcc_dir, subset + '.samediff.dd.npz')\n", (3807, 3846), False, 'from os import path\n'), ((4124, 4138), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4136, 4138), False, 'from datetime import datetime\n'), ((2283, 2303), 'os.path.isdir', 'path.isdir', (['mfcc_dir'], {}), '(mfcc_dir)\n', (2293, 2303), False, 'from os import path\n'), ((2317, 2338), 'os.makedirs', 'os.makedirs', (['mfcc_dir'], {}), '(mfcc_dir)\n', (2328, 2338), False, 'import os\n'), ((2414, 2436), 'os.path.isfile', 'path.isfile', (['output_fn'], {}), '(output_fn)\n', (2425, 2436), False, 'from os import path\n'), ((3862, 3888), 'os.path.isfile', 'path.isfile', (['output_npz_fn'], {}), '(output_npz_fn)\n', (3873, 3888), False, 'from os import path\n'), ((3980, 4041), 'utils.segments_from_npz', 'utils.segments_from_npz', (['input_npz_fn', 'list_fn', 'output_npz_fn'], {}), '(input_npz_fn, list_fn, output_npz_fn)\n', (4003, 4041), False, 'import utils\n'), ((1098, 1133), 'os.path.join', 'path.join', (['buckeye_datadir', 'speaker'], {}), '(buckeye_datadir, speaker)\n', (1107, 1133), False, 'from os import path\n'), ((1263, 1298), 'os.path.join', 'path.join', (['buckeye_datadir', 'speaker'], {}), '(buckeye_datadir, speaker)\n', (1272, 1298), False, 'from os import path\n')] |
import os, json, subprocess
class Console():
"""Run PHP job"""
def get_interface_methods(namespace):
try:
output = Console.run_command('interface-methods', [namespace])
return json.loads(output)
except Exception as e:
return {}
def get_class_methods(namespace):
try:
output = Console.run_command('class-methods', [namespace])
return json.loads(output)
except Exception as e:
return {}
def get_classes(symbol):
try:
output = Console.run_command('classes', [symbol])
return json.loads(output)
except Exception as e:
return []
def git_config(config):
try:
return subprocess.check_output(['git', 'config', '--get', config]).decode('utf-8')
except Exception as e:
print('[Phpme]', 'error: ' + str(e))
def run_command(command, args):
try:
console = os.path.dirname(os.path.abspath(__file__)) + os.sep + 'console.php'
output = subprocess.check_output(['php', '-f', console, command] + args).decode('utf-8')
if output.startswith('error'):
print('[Phpme]', output)
else:
return output
except Exception as e:
print('[Phpme]', 'error: ' + str(e))
| [
"subprocess.check_output",
"json.loads",
"os.path.abspath"
] | [((220, 238), 'json.loads', 'json.loads', (['output'], {}), '(output)\n', (230, 238), False, 'import os, json, subprocess\n'), ((435, 453), 'json.loads', 'json.loads', (['output'], {}), '(output)\n', (445, 453), False, 'import os, json, subprocess\n'), ((632, 650), 'json.loads', 'json.loads', (['output'], {}), '(output)\n', (642, 650), False, 'import os, json, subprocess\n'), ((765, 824), 'subprocess.check_output', 'subprocess.check_output', (["['git', 'config', '--get', config]"], {}), "(['git', 'config', '--get', config])\n", (788, 824), False, 'import os, json, subprocess\n'), ((1082, 1145), 'subprocess.check_output', 'subprocess.check_output', (["(['php', '-f', console, command] + args)"], {}), "(['php', '-f', console, command] + args)\n", (1105, 1145), False, 'import os, json, subprocess\n'), ((1009, 1034), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1024, 1034), False, 'import os, json, subprocess\n')] |
from django.test import TestCase
from django.contrib.auth import get_user_model
class ModelTests(TestCase):
def test_create_user_with_email(self):
'''Tet creating a new user with an email is sucessfull'''
email = '<EMAIL>'
password = '<PASSWORD>'
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_user_email_is_normalize(self):
email = '<EMAIL>'
user = get_user_model().objects.create_user(email, 'test123')
self.assertEqual(user.email, email.lower())
def test_email_field_not_empty(self):
'''Raises Error if email is not provided'''
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test123')
def test_create_super_user(self):
'''Test Creating a new Super USer'''
user = get_user_model().objects.create_super_user(
'vj"dev.com',
'tst123'
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
| [
"django.contrib.auth.get_user_model"
] | [((292, 308), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (306, 308), False, 'from django.contrib.auth import get_user_model\n'), ((583, 599), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (597, 599), False, 'from django.contrib.auth import get_user_model\n'), ((996, 1012), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (1010, 1012), False, 'from django.contrib.auth import get_user_model\n'), ((842, 858), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (856, 858), False, 'from django.contrib.auth import get_user_model\n')] |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Add custom configs and default values"""
from fvcore.common.config import CfgNode
def add_custom_config(_C):
# Knowledge distillation
_C.KD = CfgNode()
# If True enable KD, else skip KD.
_C.KD.ENABLE = False
# Teacher's config
_C.KD.CONFIG = ""
# Alpha
_C.KD.ALPHA = 0.95
# Temperature
_C.KD.TEMPERATURE = 6
# Teacher's config
_C.KD.CONFIG = "configs/Kinetics/SLOWFAST_8x8_R50.yaml"
# Path to the checkpoint to load the initial weight.
_C.KD.CHECKPOINT_FILE_PATH = ""
# Checkpoint types include `caffe2` or `pytorch`.
_C.KD.CHECKPOINT_TYPE = "pytorch"
_C.KD.TEACHER_TRANS_FUNC = 'bottleneck_transform'
# TSM
_C.TSM = CfgNode()
# n_div for TSM
_C.TSM.N_DIV = [[8, 8], [8, 8], [8, 8], [8, 8]]
# fusion n_div
_C.TSM.FUSION_N_DIV = [8, 8, 8, 8]
_C.TEST.CLASS_LIST = 'filenames/kinetics-40'
| [
"fvcore.common.config.CfgNode"
] | [((251, 260), 'fvcore.common.config.CfgNode', 'CfgNode', ([], {}), '()\n', (258, 260), False, 'from fvcore.common.config import CfgNode\n'), ((815, 824), 'fvcore.common.config.CfgNode', 'CfgNode', ([], {}), '()\n', (822, 824), False, 'from fvcore.common.config import CfgNode\n')] |
import dash
import dash_bio as dashbio
import dash_html_components as html
import dash_core_components as dcc
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div([
'Select which chromosomes to display on the ideogram below:',
dcc.Dropdown(
id='displayed-chromosomes',
options=[{'label': str(i), 'value': str(i)} for i in range(1, 23)],
multi=True,
value=[str(i) for i in range(1, 23)]
),
dashbio.Ideogram(
id='my-dashbio-ideogram'
),
html.Div(id='ideogram-rotated')
])
@app.callback(
dash.dependencies.Output('my-dashbio-ideogram', 'chromosomes'),
[dash.dependencies.Input('displayed-chromosomes', 'value')]
)
def update_ideogram(value):
return value
@app.callback(
dash.dependencies.Output('ideogram-rotated', 'children'),
[dash.dependencies.Input('my-dashbio-ideogram', 'rotated')]
)
def update_ideogram_rotated(rot):
return 'You have {} selected a chromosome.'.format(
'' if rot else 'not')
if __name__ == '__main__':
app.run_server(debug=True)
| [
"dash.dependencies.Output",
"dash.dependencies.Input",
"dash.Dash",
"dash_bio.Ideogram",
"dash_html_components.Div"
] | [((188, 250), 'dash.Dash', 'dash.Dash', (['__name__'], {'external_stylesheets': 'external_stylesheets'}), '(__name__, external_stylesheets=external_stylesheets)\n', (197, 250), False, 'import dash\n'), ((666, 728), 'dash.dependencies.Output', 'dash.dependencies.Output', (['"""my-dashbio-ideogram"""', '"""chromosomes"""'], {}), "('my-dashbio-ideogram', 'chromosomes')\n", (690, 728), False, 'import dash\n'), ((862, 918), 'dash.dependencies.Output', 'dash.dependencies.Output', (['"""ideogram-rotated"""', '"""children"""'], {}), "('ideogram-rotated', 'children')\n", (886, 918), False, 'import dash\n'), ((548, 590), 'dash_bio.Ideogram', 'dashbio.Ideogram', ([], {'id': '"""my-dashbio-ideogram"""'}), "(id='my-dashbio-ideogram')\n", (564, 590), True, 'import dash_bio as dashbio\n'), ((610, 641), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""ideogram-rotated"""'}), "(id='ideogram-rotated')\n", (618, 641), True, 'import dash_html_components as html\n'), ((735, 792), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""displayed-chromosomes"""', '"""value"""'], {}), "('displayed-chromosomes', 'value')\n", (758, 792), False, 'import dash\n'), ((925, 982), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""my-dashbio-ideogram"""', '"""rotated"""'], {}), "('my-dashbio-ideogram', 'rotated')\n", (948, 982), False, 'import dash\n')] |
from blockchain import Blockchain, Transaction
from nacl.signing import SigningKey
from hashlib import sha256
from time import sleep
from threading import Thread
import random
class Node:
"""Represent a Node."""
def __init__(self, neighbours, unverified_transactions_pool):
"""
Initialize the Node.
:param neighbours: Other nodes that take part in the network.
:param unverified_transactions_pool: Pool of unverified transactions
"""
self.private_key = SigningKey.generate()
self.public_key = self.private_key.verify_key
self.id = sha256(self.public_key.encode()).hexdigest()
self.name = self.id
self.blockchain = Blockchain()
self.neighbours = neighbours
self.unverified_transactions_pool = unverified_transactions_pool
def log(self, message):
"""Log a message to stdout, adding this node's identifier"""
print("[{id}]: {msg}".format(id=self.name, msg=message))
def mine(self):
"""Mine a new block"""
try:
transaction = self.unverified_transactions_pool.pop()
except IndexError:
self.log("No transaction new transaction found")
return False
self.consensus() # ensure consensus
if not transaction.is_valid(self.blockchain.ledger):
self.log("Transaction invalid")
return False
# Get proof of last block
last_block = self.blockchain.last_block
last_proof = last_block.dict["proof"]
proof = self.blockchain.proof_of_work(last_proof) # compute new proof
block = self.blockchain.new_block(proof, transaction) # Add new block to ledger
self.log("New block forged: {}".format(block.hash))
return True
def consensus(self):
"""Replace the blockchain with the longest valid in the network."""
for node in self.neighbours:
min_length = len(self.blockchain.ledger)
current_neighbour_chain = node.blockchain
# Only replace ledger if the neighbours chain is longer and valid
if len(current_neighbour_chain.ledger) > min_length and current_neighbour_chain.is_valid():
self.blockchain.ledger = current_neighbour_chain.ledger
class MiningNode(Node, Thread):
"""Represent a Thread that mines new blocks"""
def __init__(self, neighbours, unverified_transactions_pool):
Thread.__init__(self)
super().__init__(neighbours, unverified_transactions_pool)
self.daemon = True
def run(self):
"""Mine and never stop (unless there is an evil alien that demands you to stop. Then stop.)"""
while True:
if not self.mine():
sleep(5)
class WalletNode(Node, Thread):
"""Represent a Person using a simple wallet."""
def __init__(self, neighbours, unverified_transactions_pool, name):
Thread.__init__(self)
super().__init__(neighbours, unverified_transactions_pool)
self.daemon = True
self.name = name
self.friends = []
def add_friends(self, *friend_nodes):
for node in friend_nodes:
self.friends.append(node)
def new_transaction(self, recipient, amount):
"""Send an amount of coins to a recipient"""
self.consensus()
if recipient not in [x.name for x in self.friends]:
self.log("I don't know {}".format(recipient))
return False
if amount > self.balance:
self.log("I don't have enough money to send {} {} Coins.".format(recipient, amount))
return False
self.log("I'm sending {} {} Coins.".format(recipient, amount))
outputs = []
spent_outputs = []
for block in self.blockchain.ledger:
for output in block.transaction.outputs: # Sum all earnings
if output["public_key"] == self.public_key:
outputs.append((block.transaction.hash, block.transaction.outputs.index(output)))
for input in block.transaction.inputs: # Detect outgoings
if input["public_key"] == self.public_key:
spent_outputs.append((input["hash"], input["output_index"]))
outputs_for_t_input = []
for output in outputs:
if output not in spent_outputs:
outputs_for_t_input.append(output)
outputs = outputs_for_t_input
output_amount = 0
for b in self.blockchain.ledger:
for output in outputs:
if b.transaction.hash == output[0]:
output_amount += b.transaction.outputs[output[1]]["amount"]
for friend in self.friends:
if friend.name == recipient:
recipient = friend.public_key
inputs = []
for output in outputs: # Generate inputs
sig = self.private_key.sign(output[0].encode())
inputs.append({"hash": output[0], "output_index": output[1],
"signature": sig, "public_key": self.public_key})
outputs = [{"public_key": recipient, "amount": amount}]
if amount < output_amount:
outputs.append({"public_key": self.public_key, "amount": output_amount - amount})
transaction = Transaction(inputs=inputs.copy(), outputs=outputs.copy())
self.unverified_transactions_pool.append(transaction)
def go_to_work(self):
"""Add a new generating transaction for 50 coins"""
self.consensus()
transaction = Transaction([], [{"public_key": self.public_key, "amount": 50}])
self.unverified_transactions_pool.append(transaction)
@property
def balance(self):
"""Return the Node's balance"""
self.consensus() # update
balance = 0
outgoings = []
for block in self.blockchain.ledger:
for output in block.transaction.outputs: # Sum all earnings
if output["public_key"] == self.public_key:
balance += output["amount"]
for input in block.transaction.inputs: # Detect outgoings
if input["public_key"] == self.public_key:
outgoings.append((input["hash"], input["output_index"]))
# Sub outgoings
for block in self.blockchain.ledger:
for outgoing in outgoings:
if block.transaction.hash == outgoing[0]:
balance -= block.transaction.outputs[outgoing[1]]["amount"]
return balance
def run(self):
while True:
self.go_to_work()
self.log("Balance {}".format(self.balance))
sleep(5)
recipient = random.choice(self.friends).name
amount = random.randint(1, 100)
self.new_transaction(recipient, amount)
| [
"nacl.signing.SigningKey.generate",
"threading.Thread.__init__",
"random.choice",
"blockchain.Transaction",
"time.sleep",
"blockchain.Blockchain",
"random.randint"
] | [((513, 534), 'nacl.signing.SigningKey.generate', 'SigningKey.generate', ([], {}), '()\n', (532, 534), False, 'from nacl.signing import SigningKey\n'), ((706, 718), 'blockchain.Blockchain', 'Blockchain', ([], {}), '()\n', (716, 718), False, 'from blockchain import Blockchain, Transaction\n'), ((2454, 2475), 'threading.Thread.__init__', 'Thread.__init__', (['self'], {}), '(self)\n', (2469, 2475), False, 'from threading import Thread\n'), ((2937, 2958), 'threading.Thread.__init__', 'Thread.__init__', (['self'], {}), '(self)\n', (2952, 2958), False, 'from threading import Thread\n'), ((5570, 5634), 'blockchain.Transaction', 'Transaction', (['[]', "[{'public_key': self.public_key, 'amount': 50}]"], {}), "([], [{'public_key': self.public_key, 'amount': 50}])\n", (5581, 5634), False, 'from blockchain import Blockchain, Transaction\n'), ((6698, 6706), 'time.sleep', 'sleep', (['(5)'], {}), '(5)\n', (6703, 6706), False, 'from time import sleep\n'), ((6785, 6807), 'random.randint', 'random.randint', (['(1)', '(100)'], {}), '(1, 100)\n', (6799, 6807), False, 'import random\n'), ((2761, 2769), 'time.sleep', 'sleep', (['(5)'], {}), '(5)\n', (2766, 2769), False, 'from time import sleep\n'), ((6731, 6758), 'random.choice', 'random.choice', (['self.friends'], {}), '(self.friends)\n', (6744, 6758), False, 'import random\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 29 16:13:48 2017
@author: laide
"""
"""prints a list of tuples (station name, town, distance) for the 10 closest
and the 10 furthest stations from the Cambridge city centre, (52.2053, 0.1218)."""
from floodsystem.geo import stations_by_distance
from floodsystem.stationdata import build_station_list
def run():
#Input coordinates of Cambridge city centre
Reference_coordinate = (52.2053, 0.1218)
#Create list of tuples (station name, distance)
TheList = stations_by_distance (build_station_list(), Reference_coordinate)
#Create list of tuples (station name, town, distance) for the 10 closest and furthest stations
closest = [(s.name, s.town, d) for s, d in TheList[:10]]
furthest = [(s.name, s.town, d) for s, d in TheList[-10:]]
print ("The closest 10 stations are:")
print (closest)
print ("The furthest 10 stations are:")
print (furthest)
if __name__ == "__main__":
run() | [
"floodsystem.stationdata.build_station_list"
] | [((543, 563), 'floodsystem.stationdata.build_station_list', 'build_station_list', ([], {}), '()\n', (561, 563), False, 'from floodsystem.stationdata import build_station_list\n')] |
#!/usr/bin/env python3
# Copyright 2018-2019 <NAME>
# Copyright 2020-2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import difflib
import json
import os
import re
import subprocess
import sys
CI = os.getenv('CI') == 'true'
DISAMBIGUATION_SUFFIX_PATTERN = re.compile(r'\._[0-9A-F]+$')
GLYPH_POSITION_PATTERN = re.compile(r'@-?[0-9]+,-?[0-9]+')
NOTDEF_PATTERN = re.compile(r'[\[|]\.notdef@')
SPACE_NAME_COMPONENT_PATTERN = re.compile('(?<=[\[|])(?:uni00A0|uni200[0-9A]|uni202F|uni205F|uni3000)(?![0-9A-Za-z_])')
FULL_FONT_CODE_POINTS = [0x034F]
NAME_PREFIX = r'(?:(?:dupl|u(?:ni(?:[0-9A-F]{4})+|[0-9A-F]{4,6})(?:_[^.]*)?)\.)'
UNSTABLE_NAME_COMPONENT_PATTERN = re.compile(fr'(?<=[\[|])(?:{NAME_PREFIX}[0-9A-Za-z_]+|(?!{NAME_PREFIX})[0-9A-Za-z_]+)')
def parse_color(color):
if color == 'auto':
return CI or sys.stdout.isatty()
if color == 'no':
return False
if color == 'yes':
return True
raise ValueError(f'Invalid --color value: {color}')
def parse_json(s):
x = 0
y = 0
for glyph in json.loads(s):
if not (name := glyph['g']).startswith('_'):
yield f'''{
DISAMBIGUATION_SUFFIX_PATTERN.sub('', name)
}@{
x + glyph["dx"]
},{
y + glyph["dy"]
}'''
x += int(glyph['ax'])
y += int(glyph['ay'])
yield f'_@{x},{y}'
def munge(output, regular, incomplete):
if incomplete:
output = UNSTABLE_NAME_COMPONENT_PATTERN.sub('dupl', output)
if not regular:
output = GLYPH_POSITION_PATTERN.sub('', output)
return output
def print_diff(code_points, options, actual_output, expected_output, color):
if color:
highlighted_actual_output = []
highlighted_expected_output = []
matcher = difflib.SequenceMatcher(None, actual_output, expected_output, False)
for tag, i1, i2, j1, j2 in matcher.get_opcodes():
if tag == 'equal':
highlighted_actual_output.append(actual_output[i1:i2])
highlighted_expected_output.append(expected_output[j1:j2])
elif tag == 'delete':
highlighted_actual_output.append('\x1B[1;96m')
highlighted_actual_output.append(actual_output[i1:i2])
highlighted_actual_output.append('\x1B[0m')
elif tag == 'insert':
highlighted_expected_output.append('\x1B[1;93m')
highlighted_expected_output.append(expected_output[j1:j2])
highlighted_expected_output.append('\x1B[0m')
elif tag == 'replace':
highlighted_actual_output.append('\x1B[1;96m')
highlighted_actual_output.append(actual_output[i1:i2])
highlighted_actual_output.append('\x1B[0m')
highlighted_expected_output.append('\x1B[1;93m')
highlighted_expected_output.append(expected_output[j1:j2])
highlighted_expected_output.append('\x1B[0m')
else:
assert False, f'Unknown tag: {tag}'
actual_output = ''.join(highlighted_actual_output)
expected_output = ''.join(highlighted_expected_output)
print()
print(f'Input: {code_points}:{options}')
print('Actual: ' + actual_output)
print('Expected: ' + expected_output)
def run_test(font, line, png_file, color, incomplete, view_all):
code_points, options, expected_output = line.split(':')
p = subprocess.Popen(
[
'hb-shape',
font,
'-u',
code_points,
'-O',
'json',
'--remove-default-ignorables',
*options.split(),
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout_data, stderr_data = p.communicate()
print(stderr_data.decode('utf-8'), end='', file=sys.stderr)
actual_output = f'[{"|".join(parse_json(stdout_data.decode("utf-8")))}]'
regular = font.endswith('-Regular.otf')
passed = (munge(actual_output, regular, incomplete) == munge(expected_output, regular, incomplete)
or incomplete and (
NOTDEF_PATTERN.search(actual_output)
or SPACE_NAME_COMPONENT_PATTERN.search(expected_output)
or any(int(cp, 16) in FULL_FONT_CODE_POINTS for cp in code_points.split())
)
)
if not passed or view_all:
if not passed:
print_diff(code_points, options, actual_output, expected_output, color)
if not CI:
os.makedirs(os.path.dirname(png_file), exist_ok=True)
png_file = '{}-{}.png'.format(png_file, code_points.replace(' ', '-'))
p = subprocess.Popen(
[
'hb-view',
'--font-file',
font,
'--font-size',
'upem',
'-u',
f'E000 {code_points} E000',
'--remove-default-ignorables',
'-o',
png_file,
'-O',
'png',
'--margin',
'800 0',
*options.split(),
],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
p.wait()
print(p.stderr.read().decode('utf-8'), end='', file=sys.stderr)
return (passed, ':'.join([code_points, options, actual_output]))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Run shaping tests.')
parser.add_argument('--color', default='auto', help='Whether to print diffs in color: "yes", "no", or "auto".')
parser.add_argument('--incomplete', action='store_true', help='Whether the font is less than the complete font. Do not fail a test if the actual result contains `.notdef`. Ignore the parts of glyph names that indicate code points.')
parser.add_argument('--view', action='store_true', help='Render all test cases, not just the failures.')
parser.add_argument('font', help='The path to a font.')
parser.add_argument('tests', nargs='*', help='The paths to test files.')
args = parser.parse_args()
color = parse_color(args.color.lower())
passed_all = True
failed_dir = os.path.join(os.path.dirname(sys.argv[0]), 'failed', os.path.basename(args.font))
os.makedirs(failed_dir, exist_ok=True)
for fn in args.tests:
result_lines = []
passed_file = True
with open(fn) as f:
for line_number, line in enumerate(f, start=1):
line = line.rstrip()
if line and line[0] != '#':
passed_line, result_line = run_test(
args.font,
line,
os.path.join(failed_dir, 'png', os.path.basename(fn), '{:03}'.format(line_number)),
color,
args.incomplete,
args.view,
)
passed_file = passed_file and passed_line
result_lines.append(result_line + '\n')
else:
result_lines.append(line + '\n')
if not passed_file:
with open(os.path.join(failed_dir, os.path.basename(fn)), 'w') as f:
f.writelines(result_lines)
passed_all = passed_all and passed_file
if not passed_all:
sys.exit(1)
| [
"json.loads",
"os.makedirs",
"argparse.ArgumentParser",
"os.getenv",
"re.compile",
"difflib.SequenceMatcher",
"os.path.dirname",
"sys.stdout.isatty",
"os.path.basename",
"sys.exit"
] | [((789, 817), 're.compile', 're.compile', (['"""\\\\._[0-9A-F]+$"""'], {}), "('\\\\._[0-9A-F]+$')\n", (799, 817), False, 'import re\n'), ((843, 875), 're.compile', 're.compile', (['"""@-?[0-9]+,-?[0-9]+"""'], {}), "('@-?[0-9]+,-?[0-9]+')\n", (853, 875), False, 'import re\n'), ((894, 924), 're.compile', 're.compile', (['"""[\\\\[|]\\\\.notdef@"""'], {}), "('[\\\\[|]\\\\.notdef@')\n", (904, 924), False, 'import re\n'), ((955, 1054), 're.compile', 're.compile', (['"""(?<=[\\\\[|])(?:uni00A0|uni200[0-9A]|uni202F|uni205F|uni3000)(?![0-9A-Za-z_])"""'], {}), "(\n '(?<=[\\\\[|])(?:uni00A0|uni200[0-9A]|uni202F|uni205F|uni3000)(?![0-9A-Za-z_])'\n )\n", (965, 1054), False, 'import re\n'), ((1192, 1289), 're.compile', 're.compile', (['f"""(?<=[\\\\[|])(?:{NAME_PREFIX}[0-9A-Za-z_]+|(?!{NAME_PREFIX})[0-9A-Za-z_]+)"""'], {}), "(\n f'(?<=[\\\\[|])(?:{NAME_PREFIX}[0-9A-Za-z_]+|(?!{NAME_PREFIX})[0-9A-Za-z_]+)'\n )\n", (1202, 1289), False, 'import re\n'), ((731, 746), 'os.getenv', 'os.getenv', (['"""CI"""'], {}), "('CI')\n", (740, 746), False, 'import os\n'), ((1569, 1582), 'json.loads', 'json.loads', (['s'], {}), '(s)\n', (1579, 1582), False, 'import json\n'), ((6037, 6094), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run shaping tests."""'}), "(description='Run shaping tests.')\n", (6060, 6094), False, 'import argparse\n'), ((6894, 6932), 'os.makedirs', 'os.makedirs', (['failed_dir'], {'exist_ok': '(True)'}), '(failed_dir, exist_ok=True)\n', (6905, 6932), False, 'import os\n'), ((2330, 2398), 'difflib.SequenceMatcher', 'difflib.SequenceMatcher', (['None', 'actual_output', 'expected_output', '(False)'], {}), '(None, actual_output, expected_output, False)\n', (2353, 2398), False, 'import difflib\n'), ((6821, 6849), 'os.path.dirname', 'os.path.dirname', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (6836, 6849), False, 'import os\n'), ((6861, 6888), 'os.path.basename', 'os.path.basename', (['args.font'], {}), '(args.font)\n', (6877, 6888), False, 'import os\n'), ((7968, 7979), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (7976, 7979), False, 'import sys\n'), ((1350, 1369), 'sys.stdout.isatty', 'sys.stdout.isatty', ([], {}), '()\n', (1367, 1369), False, 'import sys\n'), ((5066, 5091), 'os.path.dirname', 'os.path.dirname', (['png_file'], {}), '(png_file)\n', (5081, 5091), False, 'import os\n'), ((7812, 7832), 'os.path.basename', 'os.path.basename', (['fn'], {}), '(fn)\n', (7828, 7832), False, 'import os\n'), ((7359, 7379), 'os.path.basename', 'os.path.basename', (['fn'], {}), '(fn)\n', (7375, 7379), False, 'import os\n')] |
import re
import os
from prob import trans_P, emit_P, start_P
from preprocess import preprocess, recov, UNK
DATAROOT = '/home/luod/class/nlp/HanTokenization/datasets'
RESULTROOT = '/home/luod/class/nlp/HanTokenization/results'
VOCAB_FILE = os.path.join(DATAROOT, 'training_vocab.txt')
VOCAB_FREQ = os.path.join(RESULTROOT, 'vocab-freq.txt')
TRAIN_FILE = os.path.join(DATAROOT, 'training.txt')
TEST_FILE = os.path.join(DATAROOT, 'test.txt')
MIN_FLOAT = -3.14e100
PrevStatus = {
'B': 'ES',
'M': 'MB',
'S': 'SE',
'E': 'BM'
}
Force_Split_Words = set([])
def add_force_split(word):
global Force_Split_Words
Force_Split_Words.add(word)
def viterbi(obs, states, start_p, trans_p, emit_p):
V = [{}] # tabular
path = {}
for y in states: # init
V[0][y] = start_p[y] + emit_p[y].get(obs[0], MIN_FLOAT)
path[y] = [y]
for t in range(1, len(obs)):
V.append({})
newpath = {}
for y in states:
em_p = emit_p[y].get(obs[t], MIN_FLOAT)
(prob, state) = max(
[(V[t - 1][y0] + trans_p[y0].get(y, MIN_FLOAT) + em_p, y0) for y0 in PrevStatus[y]])
V[t][y] = prob
newpath[y] = path[state] + [y]
path = newpath
(prob, state) = max((V[len(obs) - 1][y], y) for y in 'ES')
return (prob, path[state])
def hmm_cut(sentence):
global emit_P
prob, pos_list = viterbi(sentence, 'BMES', start_P, trans_P, emit_P)
begin, nexti = 0, 0
# print pos_list, sentence
for i, char in enumerate(sentence):
pos = pos_list[i]
if pos == 'B':
begin = i
elif pos == 'E':
yield sentence[begin:i + 1]
nexti = i + 1
elif pos == 'S':
yield char
nexti = i + 1
if nexti < len(sentence):
yield sentence[nexti:]
re_han = re.compile("([\u4E00-\u9FD5]+)")
re_skip = re.compile("([a-zA-Z0-9]+(?:\.\d+)?%?)")
def cut(sentence):
if not sentence:
yield None
blocks = re_han.split(sentence)
for blk in blocks:
if re_han.match(blk):
for word in hmm_cut(blk):
if word not in Force_Split_Words:
yield word
else:
for c in word:
yield c
else:
tmp = re_skip.split(blk)
for x in tmp:
if x:
yield x
with open(TRAIN_FILE, 'r', encoding='utf-8') as f:
train_set = list(map(str.strip, f.readlines()))
with open(TEST_FILE, 'r', encoding='utf-8') as f:
test_set = list(map(str.strip, f.readlines()))
train_set_split = [line.split(' ') for line in train_set]
test_set_split = [line.split(' ') for line in test_set]
train_raw = [''.join(line) for line in train_set_split]
test_raw = [''.join(line) for line in test_set_split]
def eval(file_path, train=False):
if not train:
os.system('perl /home/luod/class/nlp/HanTokenization/scripts/score /home/luod/class/nlp/HanTokenization/datasets/training_vocab.txt /home/luod/class/nlp/HanTokenization/datasets/test.txt %s ' % file_path)
else:
os.system('perl /home/luod/class/nlp/HanTokenization/scripts/score /home/luod/class/nlp/HanTokenization/datasets/training_vocab.txt /home/luod/class/nlp/HanTokenization/datasets/training.txt %s' % file_path)
def pre_make_cut(cut_func, result_file):
file_path = os.path.join(RESULTROOT, result_file)
with open(os.path.join(RESULTROOT, result_file), 'w+', encoding='utf-8') as f:
for line in test_raw:
if not line:
f.write('\n')
continue
sens, rec = preprocess(line)
res, idx = [], 0
le, ri = 0, 0
while ri < len(sens):
if sens[ri] == UNK:
if sens[le: ri]:
res += hmm_cut(sens[le: ri])
le = ri + 1
if idx < len(rec):
res += [rec[idx]]
idx += 1
ri += 1
if ri == len(sens) and sens[-1] != UNK:
res += hmm_cut(sens[le:])
res = ' '.join(res)
f.write(res)
f.write('\n')
eval(file_path)
def make_cut(cut_func, result_file, train=False):
file_path = os.path.join(RESULTROOT, result_file)
line_list = test_raw
if train:
line_list = train_raw
with open(os.path.join(RESULTROOT, result_file), 'w+', encoding='utf-8') as f:
for line in line_list:
if not line:
f.write('\n')
continue
sen = line
res = cut_func(sen)
res = ' '.join(res)
f.write(res)
f.write('\n')
eval(file_path, train)
def get_result():
pre_make_cut(hmm_cut, 'pre_test_hmm_no_chunk.txt')
pre_make_cut(cut, 'pre_test_hmm_chunk.txt')
def make_test_file():
with open('../datasets/raw_test.txt', 'w', encoding='utf8') as f:
for line in test_raw:
f.write(line + '\n')
if __name__ == '__main__':
make_test_file()
# get_result()
| [
"os.system",
"preprocess.preprocess",
"os.path.join",
"re.compile"
] | [((242, 286), 'os.path.join', 'os.path.join', (['DATAROOT', '"""training_vocab.txt"""'], {}), "(DATAROOT, 'training_vocab.txt')\n", (254, 286), False, 'import os\n'), ((300, 342), 'os.path.join', 'os.path.join', (['RESULTROOT', '"""vocab-freq.txt"""'], {}), "(RESULTROOT, 'vocab-freq.txt')\n", (312, 342), False, 'import os\n'), ((356, 394), 'os.path.join', 'os.path.join', (['DATAROOT', '"""training.txt"""'], {}), "(DATAROOT, 'training.txt')\n", (368, 394), False, 'import os\n'), ((407, 441), 'os.path.join', 'os.path.join', (['DATAROOT', '"""test.txt"""'], {}), "(DATAROOT, 'test.txt')\n", (419, 441), False, 'import os\n'), ((1861, 1883), 're.compile', 're.compile', (['"""([一-鿕]+)"""'], {}), "('([一-鿕]+)')\n", (1871, 1883), False, 'import re\n'), ((1904, 1946), 're.compile', 're.compile', (['"""([a-zA-Z0-9]+(?:\\\\.\\\\d+)?%?)"""'], {}), "('([a-zA-Z0-9]+(?:\\\\.\\\\d+)?%?)')\n", (1914, 1946), False, 'import re\n'), ((3418, 3455), 'os.path.join', 'os.path.join', (['RESULTROOT', 'result_file'], {}), '(RESULTROOT, result_file)\n', (3430, 3455), False, 'import os\n'), ((4341, 4378), 'os.path.join', 'os.path.join', (['RESULTROOT', 'result_file'], {}), '(RESULTROOT, result_file)\n', (4353, 4378), False, 'import os\n'), ((2928, 3142), 'os.system', 'os.system', (["('perl /home/luod/class/nlp/HanTokenization/scripts/score /home/luod/class/nlp/HanTokenization/datasets/training_vocab.txt /home/luod/class/nlp/HanTokenization/datasets/test.txt %s '\n % file_path)"], {}), "(\n 'perl /home/luod/class/nlp/HanTokenization/scripts/score /home/luod/class/nlp/HanTokenization/datasets/training_vocab.txt /home/luod/class/nlp/HanTokenization/datasets/test.txt %s '\n % file_path)\n", (2937, 3142), False, 'import os\n'), ((3151, 3368), 'os.system', 'os.system', (["('perl /home/luod/class/nlp/HanTokenization/scripts/score /home/luod/class/nlp/HanTokenization/datasets/training_vocab.txt /home/luod/class/nlp/HanTokenization/datasets/training.txt %s'\n % file_path)"], {}), "(\n 'perl /home/luod/class/nlp/HanTokenization/scripts/score /home/luod/class/nlp/HanTokenization/datasets/training_vocab.txt /home/luod/class/nlp/HanTokenization/datasets/training.txt %s'\n % file_path)\n", (3160, 3368), False, 'import os\n'), ((3470, 3507), 'os.path.join', 'os.path.join', (['RESULTROOT', 'result_file'], {}), '(RESULTROOT, result_file)\n', (3482, 3507), False, 'import os\n'), ((3673, 3689), 'preprocess.preprocess', 'preprocess', (['line'], {}), '(line)\n', (3683, 3689), False, 'from preprocess import preprocess, recov, UNK\n'), ((4462, 4499), 'os.path.join', 'os.path.join', (['RESULTROOT', 'result_file'], {}), '(RESULTROOT, result_file)\n', (4474, 4499), False, 'import os\n')] |
#!/usr/bin/env python
import pymongo
conn_string="mongodb://dbUser19:LSVyKnHW@cluster<EMAIL>-00-0<EMAIL>.mongodb.<EMAIL>:27017,cluster0-shard-00-01-nadgn.mongodb.net:27017,cluster0-shard-00-02-nadgn.mongodb.net:27017/test?ssl=true&replicaSet=Cluster0-shard-0&authSource=admin&retryWrites=true"
client=pymongo.MongoClient(conn_string)
db=client.test
| [
"pymongo.MongoClient"
] | [((304, 336), 'pymongo.MongoClient', 'pymongo.MongoClient', (['conn_string'], {}), '(conn_string)\n', (323, 336), False, 'import pymongo\n')] |
import pytest
from test_migrations import constants
from .fixtures import migrator # pylint: disable=W0611
pytest_plugins = ['pytest_django'] # pylint: disable=C0103
def pytest_load_initial_conftests(early_config):
# Register the marks
early_config.addinivalue_line(
'markers',
(
"{marker}: Mark the test as a"
"Django migration test. Dynamically add `transactional_db` "
"fixture to marked item. Migration tests are run only when "
"`--test-migrations` pytest's CLI option passed."
).format(marker=constants.MIGRATIONS_TEST_MARKER),
)
def pytest_addoption(parser):
"""Add option for running migration tests.
"""
group = parser.getgroup('django_test_migrations')
group._addoption( # pylint: disable=W0212
'--test-migrations',
action='store_true',
dest='test_migrations',
default=False,
help=(
"Run Django migration tests. This does the following: "
" ensure migrations are enabled, skip all test not marked with "
"`{marker}` marker."
).format(marker=constants.MIGRATIONS_TEST_MARKER)
)
def pytest_sessionstart(session):
if session.config.getoption('test_migrations', False):
# TODO: consider raising AssertionError when `nomigration` falsy
session.config.option.nomigrations = False
def pytest_collection_modifyitems(session, items):
migration_test_skip_marker = pytest.mark.skip(
reason=(
'Migration tests not skipped, because`--test-migration` option '
'passed.'
),
)
for item in items:
# mark all tests using `migrator` fixture with `MIGRATION_TEST_MARKER`
if 'migrator' in getattr(item, 'fixturenames', list()):
item.add_marker(constants.MIGRATIONS_TEST_MARKER)
# skip all no migration tests when option `--test-migrations` passed
if (
session.config.getoption('test_migrations', False)
and not item.get_closest_marker(constants.MIGRATIONS_TEST_MARKER)
):
item.add_marker(migration_test_skip_marker)
@pytest.fixture(autouse=True, scope='function')
def _django_migration_marker(request):
"""Implement the migration marker, internal to `django_test_migrations`.
This will dynamically request the `transactional_db` fixture
and skip tests marked with migration marker if not
explicitly requested by passing `--test-migrations` option.
"""
marker = request.node.get_closest_marker(constants.MIGRATIONS_TEST_MARKER)
if marker:
if request.config.getoption('test_migrations', False):
request.getfixturevalue('transactional_db')
else:
pytest.skip(
msg=(
'Migration tests can require `migrations` enabled and can '
'be slow hence they should be ran separetly with pytest '
'`--test-migrations` option.'
),
)
| [
"pytest.fixture",
"pytest.mark.skip",
"pytest.skip"
] | [((2178, 2224), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)', 'scope': '"""function"""'}), "(autouse=True, scope='function')\n", (2192, 2224), False, 'import pytest\n'), ((1490, 1591), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""Migration tests not skipped, because`--test-migration` option passed."""'}), "(reason=\n 'Migration tests not skipped, because`--test-migration` option passed.')\n", (1506, 1591), False, 'import pytest\n'), ((2773, 2941), 'pytest.skip', 'pytest.skip', ([], {'msg': '"""Migration tests can require `migrations` enabled and can be slow hence they should be ran separetly with pytest `--test-migrations` option."""'}), "(msg=\n 'Migration tests can require `migrations` enabled and can be slow hence they should be ran separetly with pytest `--test-migrations` option.'\n )\n", (2784, 2941), False, 'import pytest\n')] |
import os
from django.contrib.gis.db import models
from hrp.ontologies import *
# from hrp.ontologies import ITEM_TYPE_VOCABULARY, HRP_COLLECTOR_CHOICES, \
# HRP_COLLECTING_METHOD_VOCABULARY, HRP_BASIS_OF_RECORD_VOCABULARY, HRP_COLLECTION_CODES
from django.contrib.gis.geos import Point
import projects.models
class TaxonRank(projects.models.TaxonRank):
class Meta:
verbose_name = "HRP Taxon Rank"
verbose_name_plural = "HRP Taxon Ranks"
class Taxon(projects.models.Taxon):
parent = models.ForeignKey('self', null=True, blank=True)
rank = models.ForeignKey(TaxonRank)
class Meta:
verbose_name = "HRP Taxon"
verbose_name_plural = "HRP Taxa"
class IdentificationQualifier(projects.models.IdentificationQualifier):
class Meta:
verbose_name = "HRP ID Qualifier"
verbose_name_plural = "HRP ID Qualifiers"
# Locality Class
class Locality(projects.models.PaleoCoreLocalityBaseClass):
id = models.CharField(primary_key=True, max_length=255)
collection_code = models.CharField(null=True, blank=True, choices=HRP_COLLECTION_CODES, max_length=10)
locality_number = models.IntegerField(null=True, blank=True)
sublocality = models.CharField(null=True, blank=True, max_length=50)
description = models.TextField(null=True, blank=True, max_length=255)
stratigraphic_section = models.CharField(null=True, blank=True, max_length=50)
upper_limit_in_section = models.IntegerField(null=True, blank=True)
lower_limit_in_section = models.DecimalField(max_digits=38, decimal_places=8, null=True, blank=True)
error_notes = models.CharField(max_length=255, null=True, blank=True)
notes = models.CharField(max_length=254, null=True, blank=True)
geom = models.PointField(srid=4326, blank=True, null=True)
date_last_modified = models.DateTimeField("Date Last Modified", auto_now=True)
objects = models.GeoManager()
def __str__(self):
nice_name = str(self.collection_code) + " " + str(self.locality_number) + str(self.sublocality)
return nice_name.replace("None", "").replace("--", "")
class Meta:
verbose_name = "HRP Locality"
verbose_name_plural = "HRP Localities"
ordering = ("locality_number", "sublocality")
class Person(projects.models.Person):
last_name = models.CharField("Last Name", null=True, blank=True, max_length=256)
first_name = models.CharField("First Name", null=True, blank=True, max_length=256)
class Meta:
verbose_name = "HRP Person"
verbose_name_plural = "HRP People"
ordering = ["last_name", "first_name"]
def __str__(self):
if self.last_name and self.first_name:
name = self.last_name+', '+self.first_name
else:
name = self.last_name
return name
# Occurrence Class and Subclasses
class Occurrence(projects.models.PaleoCoreOccurrenceBaseClass):
"""
Occurrence == Specimen, a general class for things discovered in the field.
Find's have three subtypes: Archaeology, Biology, Geology
Fields are grouped by comments into logical sets (i.e. ontological classes)
"""
basis_of_record = models.CharField("Basis of Record", max_length=50, blank=True, null=False,
help_text='e.g. Observed item or Collected item',
choices=HRP_BASIS_OF_RECORD_VOCABULARY) # NOT NULL dwc:basisOfRecord
field_number = models.CharField("Field Number", max_length=50, null=True, blank=True)
item_type = models.CharField("Item Type", max_length=255, blank=True, null=False,
choices=ITEM_TYPE_VOCABULARY) # NOT NULL
# TODO merge with taxon
item_scientific_name = models.CharField("Sci Name", max_length=255, null=True, blank=True)
# TODO merge with element
item_description = models.CharField("Description", max_length=255, blank=True, null=True)
item_count = models.IntegerField("Item Count", blank=True, null=True, default=1)
collector = models.CharField("Collector", max_length=50, blank=True, null=True, choices=HRP_COLLECTOR_CHOICES)
recorded_by = models.ForeignKey("Person", null=True, blank=True, related_name="occurrence_recorded_by")
finder = models.CharField("Finder", null=True, blank=True, max_length=50, choices=HRP_COLLECTOR_CHOICES)
found_by = models.ForeignKey("Person", null=True, blank=True, related_name="occurrence_found_by")
collecting_method = models.CharField("Collecting Method", max_length=50,
choices=HRP_COLLECTING_METHOD_VOCABULARY,
null=True, blank=True)
locality = models.ForeignKey("Locality", null=True, blank=True) # dwc:sampling_protocol
item_number = models.IntegerField("Item #", null=True, blank=True)
item_part = models.CharField("Item Part", max_length=10, null=True, blank=True)
cat_number = models.CharField("Cat Number", max_length=255, blank=True, null=True)
disposition = models.CharField("Disposition", max_length=255, blank=True, null=True)
preparation_status = models.CharField("Prep Status", max_length=50, blank=True, null=True)
# TODO rename collection remarks to find remarks
collection_remarks = models.TextField("Collection Remarks", null=True, blank=True, max_length=255)
# Geological Context
stratigraphic_formation = models.CharField("Formation", max_length=255, blank=True, null=True)
stratigraphic_member = models.CharField("Member", max_length=255, blank=True, null=True)
analytical_unit_1 = models.CharField(max_length=255, blank=True, null=True)
analytical_unit_2 = models.CharField(max_length=255, blank=True, null=True)
analytical_unit_3 = models.CharField(max_length=255, blank=True, null=True)
analytical_unit_found = models.CharField(max_length=255, blank=True, null=True)
analytical_unit_likely = models.CharField(max_length=255, blank=True, null=True)
analytical_unit_simplified = models.CharField(max_length=255, blank=True, null=True)
in_situ = models.BooleanField(default=False)
ranked = models.BooleanField(default=False)
weathering = models.SmallIntegerField(blank=True, null=True)
surface_modification = models.CharField("Surface Mod", max_length=255, blank=True, null=True)
geology_remarks = models.TextField("Geol Remarks", max_length=500, null=True, blank=True)
# Location
collection_code = models.CharField("Collection Code", max_length=20, blank=True, null=True)
drainage_region = models.CharField("Drainage Region", null=True, blank=True, max_length=255)
# Media
image = models.FileField(max_length=255, blank=True, upload_to="uploads/images/hrp", null=True)
class Meta:
verbose_name = "HRP Occurrence"
verbose_name_plural = "HRP Occurrences"
ordering = ["collection_code", "locality", "item_number", "item_part"]
def catalog_number(self):
"""
Generate a pretty string formatted catalog number from constituent fields
:return: catalog number as string
"""
if self.basis_of_record == 'Collection':
# Crate catalog number string. Null values become None when converted to string
if self.item_number:
if self.item_part:
item_text = '-' + str(self.item_number) + str(self.item_part)
else:
item_text = '-' + str(self.item_number)
else:
item_text = ''
catalog_number_string = str(self.collection_code) + " " + str(self.locality_id) + item_text
return catalog_number_string.replace('None', '').replace('- ', '') # replace None with empty string
else:
return None
@staticmethod
def fields_to_display():
fields = ("id", "barcode")
return fields
@staticmethod
def method_fields_to_export():
"""
Method to store a list of fields that should be added to data exports.
Called by export admin actions.
These fields are defined in methods and are not concrete fields in the DB so have to be declared.
:return:
"""
return ['longitude', 'latitude', 'easting', 'northing', 'catalog_number', 'photo']
def get_all_field_names(self):
"""
Field names from model
:return: list with all field names
"""
field_list = self._meta.get_fields() # produce a list of field objects
return [f.name for f in field_list] # return a list of names from each field
def get_foreign_key_field_names(self):
"""
Get foreign key fields
:return: returns a list of for key field names
"""
field_list = self._meta.get_fields() # produce a list of field objects
return [f.name for f in field_list if f.is_relation] # return a list of names for fk fields
def get_concrete_field_names(self):
"""
Get field names that correspond to columns in the DB
:return: returns a lift
"""
field_list = self._meta.get_fields()
return [f.name for f in field_list if f.concrete]
def photo(self):
try:
return u'<a href="%s"><img src="%s" style="width:600px" /></a>' \
% (os.path.join(self.image.url), os.path.join(self.image.url))
except:
return None
photo.short_description = 'Photo'
photo.allow_tags = True
photo.mark_safe = True
def thumbnail(self):
try:
return u'<a href="%s"><img src="%s" style="width:100px" /></a>' \
% (os.path.join(self.image.url), os.path.join(self.image.url))
except:
return None
thumbnail.short_description = 'Thumb'
thumbnail.allow_tags = True
thumbnail.mark_safe = True
class Biology(Occurrence):
# Biology
sex = models.CharField("Sex", null=True, blank=True, max_length=50)
life_stage = models.CharField("Life Stage", null=True, blank=True, max_length=50, choices=HRP_LIFE_STAGE_CHOICES)
size_class = models.CharField("Size Class", null=True, blank=True, max_length=50, choices=HRP_SIZE_CLASS_CHOICES)
# Taxon
taxon = models.ForeignKey(Taxon,
default=0, on_delete=models.SET_DEFAULT, # prevent deletion when taxa deleted
related_name='hrp_taxon_bio_occurrences')
identification_qualifier = models.ForeignKey(IdentificationQualifier, null=True, blank=True,
on_delete=models.SET_NULL,
related_name='hrp_id_qualifier_bio_occurrences')
qualifier_taxon = models.ForeignKey(Taxon, null=True, blank=True,
on_delete=models.SET_NULL,
related_name='hrp_qualifier_taxon_bio_occurrences')
verbatim_taxon = models.CharField(null=True, blank=True, max_length=1024)
verbatim_identification_qualifier = models.CharField(null=True, blank=True, max_length=255)
taxonomy_remarks = models.TextField(max_length=500, null=True, blank=True)
# Identification
identified_by = models.CharField(null=True, blank=True, max_length=100, choices=HRP_IDENTIFIER_CHOICES)
year_identified = models.IntegerField(null=True, blank=True)
type_status = models.CharField(null=True, blank=True, max_length=50)
fauna_notes = models.TextField(null=True, blank=True, max_length=64000)
# Element
side = models.CharField("Side", null=True, blank=True, max_length=50, choices=HRP_SIDE_CHOICES)
element = models.CharField("Element", null=True, blank=True, max_length=50, choices=HRP_ELEMENT_CHOICES)
# TODO add element_modifier choices once field is cleaned
element_modifier = models.CharField("Element Mod", null=True, blank=True, max_length=50,
choices=HRP_ELEMENT_MODIFIER_CHOICES)
# TODO populate portion after migrate
element_portion = models.CharField("Element Portion", null=True, blank=True, max_length=50,
choices=HRP_ELEMENT_PORTION_CHOICES)
# TODO populate number choices after migrate
element_number = models.CharField(null=True, blank=True, max_length=50, choices=HRP_ELEMENT_NUMBER_CHOICES)
element_remarks = models.TextField(max_length=500, null=True, blank=True)
tooth_upper_or_lower = models.CharField(null=True, blank=True, max_length=50)
tooth_number = models.CharField(null=True, blank=True, max_length=50)
tooth_type = models.CharField(null=True, blank=True, max_length=50)
# upper dentition fields
uli1 = models.BooleanField(default=False)
uli2 = models.BooleanField(default=False)
uli3 = models.BooleanField(default=False)
uli4 = models.BooleanField(default=False)
uli5 = models.BooleanField(default=False)
uri1 = models.BooleanField(default=False)
uri2 = models.BooleanField(default=False)
uri3 = models.BooleanField(default=False)
uri4 = models.BooleanField(default=False)
uri5 = models.BooleanField(default=False)
ulc = models.BooleanField(default=False)
urc = models.BooleanField(default=False)
ulp1 = models.BooleanField(default=False)
ulp2 = models.BooleanField(default=False)
ulp3 = models.BooleanField(default=False)
ulp4 = models.BooleanField(default=False)
urp1 = models.BooleanField(default=False)
urp2 = models.BooleanField(default=False)
urp3 = models.BooleanField(default=False)
urp4 = models.BooleanField(default=False)
ulm1 = models.BooleanField(default=False)
ulm2 = models.BooleanField(default=False)
ulm3 = models.BooleanField(default=False)
urm1 = models.BooleanField(default=False)
urm2 = models.BooleanField(default=False)
urm3 = models.BooleanField(default=False)
# lower dentition fields
lli1 = models.BooleanField(default=False)
lli2 = models.BooleanField(default=False)
lli3 = models.BooleanField(default=False)
lli4 = models.BooleanField(default=False)
lli5 = models.BooleanField(default=False)
lri1 = models.BooleanField(default=False)
lri2 = models.BooleanField(default=False)
lri3 = models.BooleanField(default=False)
lri4 = models.BooleanField(default=False)
lri5 = models.BooleanField(default=False)
llc = models.BooleanField(default=False)
lrc = models.BooleanField(default=False)
llp1 = models.BooleanField(default=False)
llp2 = models.BooleanField(default=False)
llp3 = models.BooleanField(default=False)
llp4 = models.BooleanField(default=False)
lrp1 = models.BooleanField(default=False)
lrp2 = models.BooleanField(default=False)
lrp3 = models.BooleanField(default=False)
lrp4 = models.BooleanField(default=False)
llm1 = models.BooleanField(default=False)
llm2 = models.BooleanField(default=False)
llm3 = models.BooleanField(default=False)
lrm1 = models.BooleanField(default=False)
lrm2 = models.BooleanField(default=False)
lrm3 = models.BooleanField(default=False)
# indeterminate dental fields
indet_incisor = models.BooleanField(default=False)
indet_canine = models.BooleanField(default=False)
indet_premolar = models.BooleanField(default=False)
indet_molar = models.BooleanField(default=False)
indet_tooth = models.BooleanField(default=False)
deciduous = models.BooleanField(default=False)
# Measurements
um_tooth_row_length_mm = models.DecimalField(max_digits=38, decimal_places=8, null=True, blank=True)
um_1_length_mm = models.DecimalField(max_digits=38, decimal_places=8, null=True, blank=True)
um_1_width_mm = models.DecimalField(max_digits=38, decimal_places=8, null=True, blank=True)
um_2_length_mm = models.DecimalField(max_digits=38, decimal_places=8, null=True, blank=True)
um_2_width_mm = models.DecimalField(max_digits=38, decimal_places=8, null=True, blank=True)
um_3_length_mm = models.DecimalField(max_digits=38, decimal_places=8, null=True, blank=True)
um_3_width_mm = models.DecimalField(max_digits=38, decimal_places=8, null=True, blank=True)
lm_tooth_row_length_mm = models.DecimalField(max_digits=38, decimal_places=8, null=True, blank=True)
lm_1_length = models.DecimalField(max_digits=38, decimal_places=8, null=True, blank=True)
lm_1_width = models.DecimalField(max_digits=38, decimal_places=8, null=True, blank=True)
lm_2_length = models.DecimalField(max_digits=38, decimal_places=8, null=True, blank=True)
lm_2_width = models.DecimalField(max_digits=38, decimal_places=8, null=True, blank=True)
lm_3_length = models.DecimalField(max_digits=38, decimal_places=8, null=True, blank=True)
lm_3_width = models.DecimalField(max_digits=38, decimal_places=8, null=True, blank=True)
# TODO delete attributes, preparations and morphobank number
attributes = models.CharField(null=True, blank=True, max_length=50)
preparations = models.CharField(null=True, blank=True, max_length=50)
morphobank_number = models.IntegerField(null=True, blank=True) # empty, ok to delete
def __str__(self):
return str(self.taxon.__str__())
class Meta:
verbose_name = "HRP Biology"
verbose_name_plural = "HRP Biology"
class Archaeology(Occurrence):
find_type = models.CharField(null=True, blank=True, max_length=255)
length_mm = models.DecimalField(max_digits=38, decimal_places=8, null=True, blank=True)
width_mm = models.DecimalField(max_digits=38, decimal_places=8, null=True, blank=True)
class Meta:
verbose_name = "HRP Archaeology"
verbose_name_plural = "HRP Archaeology"
class Geology(Occurrence):
find_type = models.CharField(null=True, blank=True, max_length=255)
dip = models.DecimalField(max_digits=38, decimal_places=8, null=True, blank=True)
strike = models.DecimalField(max_digits=38, decimal_places=8, null=True, blank=True)
color = models.CharField(null=True, blank=True, max_length=255)
texture = models.CharField(null=True, blank=True, max_length=255)
class Meta:
verbose_name = "HRP Geology"
verbose_name_plural = "HRP Geology"
# Hydrology Class
class Hydrology(models.Model):
length = models.DecimalField(max_digits=38, decimal_places=8, null=True, blank=True)
name = models.CharField(null=True, blank=True, max_length=50)
size = models.IntegerField(null=True, blank=True)
map_sheet = models.CharField(null=True, blank=True, max_length=50)
geom = models.LineStringField(srid=4326)
objects = models.GeoManager()
def __str__(self):
return str(self.name)
class Meta:
verbose_name = "HRP Hydrology"
verbose_name_plural = "HRP Hydrology"
# Media Classes
class Image(models.Model):
occurrence = models.ForeignKey("Occurrence", related_name='hrp_occurrences')
image = models.ImageField(upload_to="uploads/images", null=True, blank=True)
description = models.TextField(null=True, blank=True)
class File(models.Model):
occurrence = models.ForeignKey("Occurrence")
file = models.FileField(upload_to="uploads/files", null=True, blank=True)
description = models.TextField(null=True, blank=True)
| [
"django.contrib.gis.db.models.ForeignKey",
"django.contrib.gis.db.models.SmallIntegerField",
"django.contrib.gis.db.models.CharField",
"django.contrib.gis.db.models.ImageField",
"django.contrib.gis.db.models.FileField",
"django.contrib.gis.db.models.LineStringField",
"os.path.join",
"django.contrib.gi... | [((517, 565), 'django.contrib.gis.db.models.ForeignKey', 'models.ForeignKey', (['"""self"""'], {'null': '(True)', 'blank': '(True)'}), "('self', null=True, blank=True)\n", (534, 565), False, 'from django.contrib.gis.db import models\n'), ((577, 605), 'django.contrib.gis.db.models.ForeignKey', 'models.ForeignKey', (['TaxonRank'], {}), '(TaxonRank)\n', (594, 605), False, 'from django.contrib.gis.db import models\n'), ((969, 1019), 'django.contrib.gis.db.models.CharField', 'models.CharField', ([], {'primary_key': '(True)', 'max_length': '(255)'}), '(primary_key=True, max_length=255)\n', (985, 1019), False, 'from django.contrib.gis.db import models\n'), ((1042, 1130), 'django.contrib.gis.db.models.CharField', 'models.CharField', ([], {'null': '(True)', 'blank': '(True)', 'choices': 'HRP_COLLECTION_CODES', 'max_length': '(10)'}), '(null=True, blank=True, choices=HRP_COLLECTION_CODES,\n max_length=10)\n', (1058, 1130), False, 'from django.contrib.gis.db import models\n'), ((1149, 1191), 'django.contrib.gis.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (1168, 1191), False, 'from django.contrib.gis.db import models\n'), ((1210, 1264), 'django.contrib.gis.db.models.CharField', 'models.CharField', ([], {'null': '(True)', 'blank': '(True)', 'max_length': '(50)'}), '(null=True, blank=True, max_length=50)\n', (1226, 1264), False, 'from django.contrib.gis.db import models\n'), ((1283, 1338), 'django.contrib.gis.db.models.TextField', 'models.TextField', ([], {'null': '(True)', 'blank': '(True)', 'max_length': '(255)'}), '(null=True, blank=True, max_length=255)\n', (1299, 1338), False, 'from django.contrib.gis.db import models\n'), ((1367, 1421), 'django.contrib.gis.db.models.CharField', 'models.CharField', ([], {'null': '(True)', 'blank': '(True)', 'max_length': '(50)'}), '(null=True, blank=True, max_length=50)\n', (1383, 1421), False, 'from django.contrib.gis.db import models\n'), ((1451, 1493), 'django.contrib.gis.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (1470, 1493), False, 'from django.contrib.gis.db import models\n'), ((1523, 1598), 'django.contrib.gis.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(38)', 'decimal_places': '(8)', 'null': '(True)', 'blank': '(True)'}), '(max_digits=38, decimal_places=8, null=True, blank=True)\n', (1542, 1598), False, 'from django.contrib.gis.db import models\n'), ((1617, 1672), 'django.contrib.gis.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'null': '(True)', 'blank': '(True)'}), '(max_length=255, null=True, blank=True)\n', (1633, 1672), False, 'from django.contrib.gis.db import models\n'), ((1685, 1740), 'django.contrib.gis.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)', 'null': '(True)', 'blank': '(True)'}), '(max_length=254, null=True, blank=True)\n', (1701, 1740), False, 'from django.contrib.gis.db import models\n'), ((1752, 1803), 'django.contrib.gis.db.models.PointField', 'models.PointField', ([], {'srid': '(4326)', 'blank': '(True)', 'null': '(True)'}), '(srid=4326, blank=True, null=True)\n', (1769, 1803), False, 'from django.contrib.gis.db import models\n'), ((1829, 1886), 'django.contrib.gis.db.models.DateTimeField', 'models.DateTimeField', (['"""Date Last Modified"""'], {'auto_now': '(True)'}), "('Date Last Modified', auto_now=True)\n", (1849, 1886), False, 'from django.contrib.gis.db import models\n'), ((1901, 1920), 'django.contrib.gis.db.models.GeoManager', 'models.GeoManager', ([], {}), '()\n', (1918, 1920), False, 'from django.contrib.gis.db import models\n'), ((2324, 2392), 'django.contrib.gis.db.models.CharField', 'models.CharField', (['"""Last Name"""'], {'null': '(True)', 'blank': '(True)', 'max_length': '(256)'}), "('Last Name', null=True, blank=True, max_length=256)\n", (2340, 2392), False, 'from django.contrib.gis.db import models\n'), ((2410, 2479), 'django.contrib.gis.db.models.CharField', 'models.CharField', (['"""First Name"""'], {'null': '(True)', 'blank': '(True)', 'max_length': '(256)'}), "('First Name', null=True, blank=True, max_length=256)\n", (2426, 2479), False, 'from django.contrib.gis.db import models\n'), ((3193, 3366), 'django.contrib.gis.db.models.CharField', 'models.CharField', (['"""Basis of Record"""'], {'max_length': '(50)', 'blank': '(True)', 'null': '(False)', 'help_text': '"""e.g. Observed item or Collected item"""', 'choices': 'HRP_BASIS_OF_RECORD_VOCABULARY'}), "('Basis of Record', max_length=50, blank=True, null=False,\n help_text='e.g. Observed item or Collected item', choices=\n HRP_BASIS_OF_RECORD_VOCABULARY)\n", (3209, 3366), False, 'from django.contrib.gis.db import models\n'), ((3486, 3556), 'django.contrib.gis.db.models.CharField', 'models.CharField', (['"""Field Number"""'], {'max_length': '(50)', 'null': '(True)', 'blank': '(True)'}), "('Field Number', max_length=50, null=True, blank=True)\n", (3502, 3556), False, 'from django.contrib.gis.db import models\n'), ((3573, 3676), 'django.contrib.gis.db.models.CharField', 'models.CharField', (['"""Item Type"""'], {'max_length': '(255)', 'blank': '(True)', 'null': '(False)', 'choices': 'ITEM_TYPE_VOCABULARY'}), "('Item Type', max_length=255, blank=True, null=False,\n choices=ITEM_TYPE_VOCABULARY)\n", (3589, 3676), False, 'from django.contrib.gis.db import models\n'), ((3773, 3840), 'django.contrib.gis.db.models.CharField', 'models.CharField', (['"""Sci Name"""'], {'max_length': '(255)', 'null': '(True)', 'blank': '(True)'}), "('Sci Name', max_length=255, null=True, blank=True)\n", (3789, 3840), False, 'from django.contrib.gis.db import models\n'), ((3894, 3964), 'django.contrib.gis.db.models.CharField', 'models.CharField', (['"""Description"""'], {'max_length': '(255)', 'blank': '(True)', 'null': '(True)'}), "('Description', max_length=255, blank=True, null=True)\n", (3910, 3964), False, 'from django.contrib.gis.db import models\n'), ((3982, 4049), 'django.contrib.gis.db.models.IntegerField', 'models.IntegerField', (['"""Item Count"""'], {'blank': '(True)', 'null': '(True)', 'default': '(1)'}), "('Item Count', blank=True, null=True, default=1)\n", (4001, 4049), False, 'from django.contrib.gis.db import models\n'), ((4066, 4169), 'django.contrib.gis.db.models.CharField', 'models.CharField', (['"""Collector"""'], {'max_length': '(50)', 'blank': '(True)', 'null': '(True)', 'choices': 'HRP_COLLECTOR_CHOICES'}), "('Collector', max_length=50, blank=True, null=True, choices\n =HRP_COLLECTOR_CHOICES)\n", (4082, 4169), False, 'from django.contrib.gis.db import models\n'), ((4183, 4277), 'django.contrib.gis.db.models.ForeignKey', 'models.ForeignKey', (['"""Person"""'], {'null': '(True)', 'blank': '(True)', 'related_name': '"""occurrence_recorded_by"""'}), "('Person', null=True, blank=True, related_name=\n 'occurrence_recorded_by')\n", (4200, 4277), False, 'from django.contrib.gis.db import models\n'), ((4286, 4386), 'django.contrib.gis.db.models.CharField', 'models.CharField', (['"""Finder"""'], {'null': '(True)', 'blank': '(True)', 'max_length': '(50)', 'choices': 'HRP_COLLECTOR_CHOICES'}), "('Finder', null=True, blank=True, max_length=50, choices=\n HRP_COLLECTOR_CHOICES)\n", (4302, 4386), False, 'from django.contrib.gis.db import models\n'), ((4397, 4488), 'django.contrib.gis.db.models.ForeignKey', 'models.ForeignKey', (['"""Person"""'], {'null': '(True)', 'blank': '(True)', 'related_name': '"""occurrence_found_by"""'}), "('Person', null=True, blank=True, related_name=\n 'occurrence_found_by')\n", (4414, 4488), False, 'from django.contrib.gis.db import models\n'), ((4508, 4630), 'django.contrib.gis.db.models.CharField', 'models.CharField', (['"""Collecting Method"""'], {'max_length': '(50)', 'choices': 'HRP_COLLECTING_METHOD_VOCABULARY', 'null': '(True)', 'blank': '(True)'}), "('Collecting Method', max_length=50, choices=\n HRP_COLLECTING_METHOD_VOCABULARY, null=True, blank=True)\n", (4524, 4630), False, 'from django.contrib.gis.db import models\n'), ((4723, 4775), 'django.contrib.gis.db.models.ForeignKey', 'models.ForeignKey', (['"""Locality"""'], {'null': '(True)', 'blank': '(True)'}), "('Locality', null=True, blank=True)\n", (4740, 4775), False, 'from django.contrib.gis.db import models\n'), ((4819, 4871), 'django.contrib.gis.db.models.IntegerField', 'models.IntegerField', (['"""Item #"""'], {'null': '(True)', 'blank': '(True)'}), "('Item #', null=True, blank=True)\n", (4838, 4871), False, 'from django.contrib.gis.db import models\n'), ((4888, 4955), 'django.contrib.gis.db.models.CharField', 'models.CharField', (['"""Item Part"""'], {'max_length': '(10)', 'null': '(True)', 'blank': '(True)'}), "('Item Part', max_length=10, null=True, blank=True)\n", (4904, 4955), False, 'from django.contrib.gis.db import models\n'), ((4973, 5042), 'django.contrib.gis.db.models.CharField', 'models.CharField', (['"""Cat Number"""'], {'max_length': '(255)', 'blank': '(True)', 'null': '(True)'}), "('Cat Number', max_length=255, blank=True, null=True)\n", (4989, 5042), False, 'from django.contrib.gis.db import models\n'), ((5061, 5131), 'django.contrib.gis.db.models.CharField', 'models.CharField', (['"""Disposition"""'], {'max_length': '(255)', 'blank': '(True)', 'null': '(True)'}), "('Disposition', max_length=255, blank=True, null=True)\n", (5077, 5131), False, 'from django.contrib.gis.db import models\n'), ((5157, 5226), 'django.contrib.gis.db.models.CharField', 'models.CharField', (['"""Prep Status"""'], {'max_length': '(50)', 'blank': '(True)', 'null': '(True)'}), "('Prep Status', max_length=50, blank=True, null=True)\n", (5173, 5226), False, 'from django.contrib.gis.db import models\n'), ((5305, 5382), 'django.contrib.gis.db.models.TextField', 'models.TextField', (['"""Collection Remarks"""'], {'null': '(True)', 'blank': '(True)', 'max_length': '(255)'}), "('Collection Remarks', null=True, blank=True, max_length=255)\n", (5321, 5382), False, 'from django.contrib.gis.db import models\n'), ((5439, 5507), 'django.contrib.gis.db.models.CharField', 'models.CharField', (['"""Formation"""'], {'max_length': '(255)', 'blank': '(True)', 'null': '(True)'}), "('Formation', max_length=255, blank=True, null=True)\n", (5455, 5507), False, 'from django.contrib.gis.db import models\n'), ((5535, 5600), 'django.contrib.gis.db.models.CharField', 'models.CharField', (['"""Member"""'], {'max_length': '(255)', 'blank': '(True)', 'null': '(True)'}), "('Member', max_length=255, blank=True, null=True)\n", (5551, 5600), False, 'from django.contrib.gis.db import models\n'), ((5625, 5680), 'django.contrib.gis.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(True)', 'null': '(True)'}), '(max_length=255, blank=True, null=True)\n', (5641, 5680), False, 'from django.contrib.gis.db import models\n'), ((5705, 5760), 'django.contrib.gis.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(True)', 'null': '(True)'}), '(max_length=255, blank=True, null=True)\n', (5721, 5760), False, 'from django.contrib.gis.db import models\n'), ((5785, 5840), 'django.contrib.gis.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(True)', 'null': '(True)'}), '(max_length=255, blank=True, null=True)\n', (5801, 5840), False, 'from django.contrib.gis.db import models\n'), ((5869, 5924), 'django.contrib.gis.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(True)', 'null': '(True)'}), '(max_length=255, blank=True, null=True)\n', (5885, 5924), False, 'from django.contrib.gis.db import models\n'), ((5954, 6009), 'django.contrib.gis.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(True)', 'null': '(True)'}), '(max_length=255, blank=True, null=True)\n', (5970, 6009), False, 'from django.contrib.gis.db import models\n'), ((6043, 6098), 'django.contrib.gis.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(True)', 'null': '(True)'}), '(max_length=255, blank=True, null=True)\n', (6059, 6098), False, 'from django.contrib.gis.db import models\n'), ((6113, 6147), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (6132, 6147), False, 'from django.contrib.gis.db import models\n'), ((6161, 6195), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (6180, 6195), False, 'from django.contrib.gis.db import models\n'), ((6213, 6260), 'django.contrib.gis.db.models.SmallIntegerField', 'models.SmallIntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (6237, 6260), False, 'from django.contrib.gis.db import models\n'), ((6288, 6358), 'django.contrib.gis.db.models.CharField', 'models.CharField', (['"""Surface Mod"""'], {'max_length': '(255)', 'blank': '(True)', 'null': '(True)'}), "('Surface Mod', max_length=255, blank=True, null=True)\n", (6304, 6358), False, 'from django.contrib.gis.db import models\n'), ((6381, 6452), 'django.contrib.gis.db.models.TextField', 'models.TextField', (['"""Geol Remarks"""'], {'max_length': '(500)', 'null': '(True)', 'blank': '(True)'}), "('Geol Remarks', max_length=500, null=True, blank=True)\n", (6397, 6452), False, 'from django.contrib.gis.db import models\n'), ((6491, 6564), 'django.contrib.gis.db.models.CharField', 'models.CharField', (['"""Collection Code"""'], {'max_length': '(20)', 'blank': '(True)', 'null': '(True)'}), "('Collection Code', max_length=20, blank=True, null=True)\n", (6507, 6564), False, 'from django.contrib.gis.db import models\n'), ((6587, 6661), 'django.contrib.gis.db.models.CharField', 'models.CharField', (['"""Drainage Region"""'], {'null': '(True)', 'blank': '(True)', 'max_length': '(255)'}), "('Drainage Region', null=True, blank=True, max_length=255)\n", (6603, 6661), False, 'from django.contrib.gis.db import models\n'), ((6687, 6778), 'django.contrib.gis.db.models.FileField', 'models.FileField', ([], {'max_length': '(255)', 'blank': '(True)', 'upload_to': '"""uploads/images/hrp"""', 'null': '(True)'}), "(max_length=255, blank=True, upload_to='uploads/images/hrp',\n null=True)\n", (6703, 6778), False, 'from django.contrib.gis.db import models\n'), ((9955, 10016), 'django.contrib.gis.db.models.CharField', 'models.CharField', (['"""Sex"""'], {'null': '(True)', 'blank': '(True)', 'max_length': '(50)'}), "('Sex', null=True, blank=True, max_length=50)\n", (9971, 10016), False, 'from django.contrib.gis.db import models\n'), ((10034, 10138), 'django.contrib.gis.db.models.CharField', 'models.CharField', (['"""Life Stage"""'], {'null': '(True)', 'blank': '(True)', 'max_length': '(50)', 'choices': 'HRP_LIFE_STAGE_CHOICES'}), "('Life Stage', null=True, blank=True, max_length=50,\n choices=HRP_LIFE_STAGE_CHOICES)\n", (10050, 10138), False, 'from django.contrib.gis.db import models\n'), ((10152, 10256), 'django.contrib.gis.db.models.CharField', 'models.CharField', (['"""Size Class"""'], {'null': '(True)', 'blank': '(True)', 'max_length': '(50)', 'choices': 'HRP_SIZE_CLASS_CHOICES'}), "('Size Class', null=True, blank=True, max_length=50,\n choices=HRP_SIZE_CLASS_CHOICES)\n", (10168, 10256), False, 'from django.contrib.gis.db import models\n'), ((10277, 10388), 'django.contrib.gis.db.models.ForeignKey', 'models.ForeignKey', (['Taxon'], {'default': '(0)', 'on_delete': 'models.SET_DEFAULT', 'related_name': '"""hrp_taxon_bio_occurrences"""'}), "(Taxon, default=0, on_delete=models.SET_DEFAULT,\n related_name='hrp_taxon_bio_occurrences')\n", (10294, 10388), False, 'from django.contrib.gis.db import models\n'), ((10514, 10660), 'django.contrib.gis.db.models.ForeignKey', 'models.ForeignKey', (['IdentificationQualifier'], {'null': '(True)', 'blank': '(True)', 'on_delete': 'models.SET_NULL', 'related_name': '"""hrp_id_qualifier_bio_occurrences"""'}), "(IdentificationQualifier, null=True, blank=True, on_delete\n =models.SET_NULL, related_name='hrp_id_qualifier_bio_occurrences')\n", (10531, 10660), False, 'from django.contrib.gis.db import models\n'), ((10776, 10906), 'django.contrib.gis.db.models.ForeignKey', 'models.ForeignKey', (['Taxon'], {'null': '(True)', 'blank': '(True)', 'on_delete': 'models.SET_NULL', 'related_name': '"""hrp_qualifier_taxon_bio_occurrences"""'}), "(Taxon, null=True, blank=True, on_delete=models.SET_NULL,\n related_name='hrp_qualifier_taxon_bio_occurrences')\n", (10793, 10906), False, 'from django.contrib.gis.db import models\n'), ((11004, 11060), 'django.contrib.gis.db.models.CharField', 'models.CharField', ([], {'null': '(True)', 'blank': '(True)', 'max_length': '(1024)'}), '(null=True, blank=True, max_length=1024)\n', (11020, 11060), False, 'from django.contrib.gis.db import models\n'), ((11101, 11156), 'django.contrib.gis.db.models.CharField', 'models.CharField', ([], {'null': '(True)', 'blank': '(True)', 'max_length': '(255)'}), '(null=True, blank=True, max_length=255)\n', (11117, 11156), False, 'from django.contrib.gis.db import models\n'), ((11180, 11235), 'django.contrib.gis.db.models.TextField', 'models.TextField', ([], {'max_length': '(500)', 'null': '(True)', 'blank': '(True)'}), '(max_length=500, null=True, blank=True)\n', (11196, 11235), False, 'from django.contrib.gis.db import models\n'), ((11278, 11370), 'django.contrib.gis.db.models.CharField', 'models.CharField', ([], {'null': '(True)', 'blank': '(True)', 'max_length': '(100)', 'choices': 'HRP_IDENTIFIER_CHOICES'}), '(null=True, blank=True, max_length=100, choices=\n HRP_IDENTIFIER_CHOICES)\n', (11294, 11370), False, 'from django.contrib.gis.db import models\n'), ((11388, 11430), 'django.contrib.gis.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (11407, 11430), False, 'from django.contrib.gis.db import models\n'), ((11449, 11503), 'django.contrib.gis.db.models.CharField', 'models.CharField', ([], {'null': '(True)', 'blank': '(True)', 'max_length': '(50)'}), '(null=True, blank=True, max_length=50)\n', (11465, 11503), False, 'from django.contrib.gis.db import models\n'), ((11523, 11580), 'django.contrib.gis.db.models.TextField', 'models.TextField', ([], {'null': '(True)', 'blank': '(True)', 'max_length': '(64000)'}), '(null=True, blank=True, max_length=64000)\n', (11539, 11580), False, 'from django.contrib.gis.db import models\n'), ((11607, 11700), 'django.contrib.gis.db.models.CharField', 'models.CharField', (['"""Side"""'], {'null': '(True)', 'blank': '(True)', 'max_length': '(50)', 'choices': 'HRP_SIDE_CHOICES'}), "('Side', null=True, blank=True, max_length=50, choices=\n HRP_SIDE_CHOICES)\n", (11623, 11700), False, 'from django.contrib.gis.db import models\n'), ((11710, 11809), 'django.contrib.gis.db.models.CharField', 'models.CharField', (['"""Element"""'], {'null': '(True)', 'blank': '(True)', 'max_length': '(50)', 'choices': 'HRP_ELEMENT_CHOICES'}), "('Element', null=True, blank=True, max_length=50, choices=\n HRP_ELEMENT_CHOICES)\n", (11726, 11809), False, 'from django.contrib.gis.db import models\n'), ((11890, 12001), 'django.contrib.gis.db.models.CharField', 'models.CharField', (['"""Element Mod"""'], {'null': '(True)', 'blank': '(True)', 'max_length': '(50)', 'choices': 'HRP_ELEMENT_MODIFIER_CHOICES'}), "('Element Mod', null=True, blank=True, max_length=50,\n choices=HRP_ELEMENT_MODIFIER_CHOICES)\n", (11906, 12001), False, 'from django.contrib.gis.db import models\n'), ((12102, 12216), 'django.contrib.gis.db.models.CharField', 'models.CharField', (['"""Element Portion"""'], {'null': '(True)', 'blank': '(True)', 'max_length': '(50)', 'choices': 'HRP_ELEMENT_PORTION_CHOICES'}), "('Element Portion', null=True, blank=True, max_length=50,\n choices=HRP_ELEMENT_PORTION_CHOICES)\n", (12118, 12216), False, 'from django.contrib.gis.db import models\n'), ((12322, 12417), 'django.contrib.gis.db.models.CharField', 'models.CharField', ([], {'null': '(True)', 'blank': '(True)', 'max_length': '(50)', 'choices': 'HRP_ELEMENT_NUMBER_CHOICES'}), '(null=True, blank=True, max_length=50, choices=\n HRP_ELEMENT_NUMBER_CHOICES)\n', (12338, 12417), False, 'from django.contrib.gis.db import models\n'), ((12435, 12490), 'django.contrib.gis.db.models.TextField', 'models.TextField', ([], {'max_length': '(500)', 'null': '(True)', 'blank': '(True)'}), '(max_length=500, null=True, blank=True)\n', (12451, 12490), False, 'from django.contrib.gis.db import models\n'), ((12519, 12573), 'django.contrib.gis.db.models.CharField', 'models.CharField', ([], {'null': '(True)', 'blank': '(True)', 'max_length': '(50)'}), '(null=True, blank=True, max_length=50)\n', (12535, 12573), False, 'from django.contrib.gis.db import models\n'), ((12593, 12647), 'django.contrib.gis.db.models.CharField', 'models.CharField', ([], {'null': '(True)', 'blank': '(True)', 'max_length': '(50)'}), '(null=True, blank=True, max_length=50)\n', (12609, 12647), False, 'from django.contrib.gis.db import models\n'), ((12665, 12719), 'django.contrib.gis.db.models.CharField', 'models.CharField', ([], {'null': '(True)', 'blank': '(True)', 'max_length': '(50)'}), '(null=True, blank=True, max_length=50)\n', (12681, 12719), False, 'from django.contrib.gis.db import models\n'), ((12761, 12795), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (12780, 12795), False, 'from django.contrib.gis.db import models\n'), ((12807, 12841), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (12826, 12841), False, 'from django.contrib.gis.db import models\n'), ((12853, 12887), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (12872, 12887), False, 'from django.contrib.gis.db import models\n'), ((12899, 12933), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (12918, 12933), False, 'from django.contrib.gis.db import models\n'), ((12945, 12979), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (12964, 12979), False, 'from django.contrib.gis.db import models\n'), ((12991, 13025), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (13010, 13025), False, 'from django.contrib.gis.db import models\n'), ((13037, 13071), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (13056, 13071), False, 'from django.contrib.gis.db import models\n'), ((13083, 13117), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (13102, 13117), False, 'from django.contrib.gis.db import models\n'), ((13129, 13163), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (13148, 13163), False, 'from django.contrib.gis.db import models\n'), ((13175, 13209), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (13194, 13209), False, 'from django.contrib.gis.db import models\n'), ((13220, 13254), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (13239, 13254), False, 'from django.contrib.gis.db import models\n'), ((13265, 13299), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (13284, 13299), False, 'from django.contrib.gis.db import models\n'), ((13311, 13345), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (13330, 13345), False, 'from django.contrib.gis.db import models\n'), ((13357, 13391), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (13376, 13391), False, 'from django.contrib.gis.db import models\n'), ((13403, 13437), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (13422, 13437), False, 'from django.contrib.gis.db import models\n'), ((13449, 13483), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (13468, 13483), False, 'from django.contrib.gis.db import models\n'), ((13495, 13529), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (13514, 13529), False, 'from django.contrib.gis.db import models\n'), ((13541, 13575), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (13560, 13575), False, 'from django.contrib.gis.db import models\n'), ((13587, 13621), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (13606, 13621), False, 'from django.contrib.gis.db import models\n'), ((13633, 13667), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (13652, 13667), False, 'from django.contrib.gis.db import models\n'), ((13679, 13713), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (13698, 13713), False, 'from django.contrib.gis.db import models\n'), ((13725, 13759), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (13744, 13759), False, 'from django.contrib.gis.db import models\n'), ((13771, 13805), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (13790, 13805), False, 'from django.contrib.gis.db import models\n'), ((13817, 13851), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (13836, 13851), False, 'from django.contrib.gis.db import models\n'), ((13863, 13897), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (13882, 13897), False, 'from django.contrib.gis.db import models\n'), ((13909, 13943), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (13928, 13943), False, 'from django.contrib.gis.db import models\n'), ((13984, 14018), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (14003, 14018), False, 'from django.contrib.gis.db import models\n'), ((14030, 14064), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (14049, 14064), False, 'from django.contrib.gis.db import models\n'), ((14076, 14110), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (14095, 14110), False, 'from django.contrib.gis.db import models\n'), ((14122, 14156), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (14141, 14156), False, 'from django.contrib.gis.db import models\n'), ((14168, 14202), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (14187, 14202), False, 'from django.contrib.gis.db import models\n'), ((14214, 14248), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (14233, 14248), False, 'from django.contrib.gis.db import models\n'), ((14260, 14294), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (14279, 14294), False, 'from django.contrib.gis.db import models\n'), ((14306, 14340), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (14325, 14340), False, 'from django.contrib.gis.db import models\n'), ((14352, 14386), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (14371, 14386), False, 'from django.contrib.gis.db import models\n'), ((14398, 14432), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (14417, 14432), False, 'from django.contrib.gis.db import models\n'), ((14443, 14477), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (14462, 14477), False, 'from django.contrib.gis.db import models\n'), ((14488, 14522), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (14507, 14522), False, 'from django.contrib.gis.db import models\n'), ((14534, 14568), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (14553, 14568), False, 'from django.contrib.gis.db import models\n'), ((14580, 14614), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (14599, 14614), False, 'from django.contrib.gis.db import models\n'), ((14626, 14660), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (14645, 14660), False, 'from django.contrib.gis.db import models\n'), ((14672, 14706), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (14691, 14706), False, 'from django.contrib.gis.db import models\n'), ((14718, 14752), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (14737, 14752), False, 'from django.contrib.gis.db import models\n'), ((14764, 14798), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (14783, 14798), False, 'from django.contrib.gis.db import models\n'), ((14810, 14844), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (14829, 14844), False, 'from django.contrib.gis.db import models\n'), ((14856, 14890), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (14875, 14890), False, 'from django.contrib.gis.db import models\n'), ((14902, 14936), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (14921, 14936), False, 'from django.contrib.gis.db import models\n'), ((14948, 14982), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (14967, 14982), False, 'from django.contrib.gis.db import models\n'), ((14994, 15028), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (15013, 15028), False, 'from django.contrib.gis.db import models\n'), ((15040, 15074), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (15059, 15074), False, 'from django.contrib.gis.db import models\n'), ((15086, 15120), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (15105, 15120), False, 'from django.contrib.gis.db import models\n'), ((15132, 15166), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (15151, 15166), False, 'from django.contrib.gis.db import models\n'), ((15221, 15255), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (15240, 15255), False, 'from django.contrib.gis.db import models\n'), ((15275, 15309), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (15294, 15309), False, 'from django.contrib.gis.db import models\n'), ((15331, 15365), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (15350, 15365), False, 'from django.contrib.gis.db import models\n'), ((15384, 15418), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (15403, 15418), False, 'from django.contrib.gis.db import models\n'), ((15437, 15471), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (15456, 15471), False, 'from django.contrib.gis.db import models\n'), ((15488, 15522), 'django.contrib.gis.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (15507, 15522), False, 'from django.contrib.gis.db import models\n'), ((15572, 15647), 'django.contrib.gis.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(38)', 'decimal_places': '(8)', 'null': '(True)', 'blank': '(True)'}), '(max_digits=38, decimal_places=8, null=True, blank=True)\n', (15591, 15647), False, 'from django.contrib.gis.db import models\n'), ((15669, 15744), 'django.contrib.gis.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(38)', 'decimal_places': '(8)', 'null': '(True)', 'blank': '(True)'}), '(max_digits=38, decimal_places=8, null=True, blank=True)\n', (15688, 15744), False, 'from django.contrib.gis.db import models\n'), ((15765, 15840), 'django.contrib.gis.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(38)', 'decimal_places': '(8)', 'null': '(True)', 'blank': '(True)'}), '(max_digits=38, decimal_places=8, null=True, blank=True)\n', (15784, 15840), False, 'from django.contrib.gis.db import models\n'), ((15862, 15937), 'django.contrib.gis.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(38)', 'decimal_places': '(8)', 'null': '(True)', 'blank': '(True)'}), '(max_digits=38, decimal_places=8, null=True, blank=True)\n', (15881, 15937), False, 'from django.contrib.gis.db import models\n'), ((15958, 16033), 'django.contrib.gis.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(38)', 'decimal_places': '(8)', 'null': '(True)', 'blank': '(True)'}), '(max_digits=38, decimal_places=8, null=True, blank=True)\n', (15977, 16033), False, 'from django.contrib.gis.db import models\n'), ((16055, 16130), 'django.contrib.gis.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(38)', 'decimal_places': '(8)', 'null': '(True)', 'blank': '(True)'}), '(max_digits=38, decimal_places=8, null=True, blank=True)\n', (16074, 16130), False, 'from django.contrib.gis.db import models\n'), ((16151, 16226), 'django.contrib.gis.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(38)', 'decimal_places': '(8)', 'null': '(True)', 'blank': '(True)'}), '(max_digits=38, decimal_places=8, null=True, blank=True)\n', (16170, 16226), False, 'from django.contrib.gis.db import models\n'), ((16256, 16331), 'django.contrib.gis.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(38)', 'decimal_places': '(8)', 'null': '(True)', 'blank': '(True)'}), '(max_digits=38, decimal_places=8, null=True, blank=True)\n', (16275, 16331), False, 'from django.contrib.gis.db import models\n'), ((16350, 16425), 'django.contrib.gis.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(38)', 'decimal_places': '(8)', 'null': '(True)', 'blank': '(True)'}), '(max_digits=38, decimal_places=8, null=True, blank=True)\n', (16369, 16425), False, 'from django.contrib.gis.db import models\n'), ((16443, 16518), 'django.contrib.gis.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(38)', 'decimal_places': '(8)', 'null': '(True)', 'blank': '(True)'}), '(max_digits=38, decimal_places=8, null=True, blank=True)\n', (16462, 16518), False, 'from django.contrib.gis.db import models\n'), ((16537, 16612), 'django.contrib.gis.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(38)', 'decimal_places': '(8)', 'null': '(True)', 'blank': '(True)'}), '(max_digits=38, decimal_places=8, null=True, blank=True)\n', (16556, 16612), False, 'from django.contrib.gis.db import models\n'), ((16630, 16705), 'django.contrib.gis.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(38)', 'decimal_places': '(8)', 'null': '(True)', 'blank': '(True)'}), '(max_digits=38, decimal_places=8, null=True, blank=True)\n', (16649, 16705), False, 'from django.contrib.gis.db import models\n'), ((16724, 16799), 'django.contrib.gis.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(38)', 'decimal_places': '(8)', 'null': '(True)', 'blank': '(True)'}), '(max_digits=38, decimal_places=8, null=True, blank=True)\n', (16743, 16799), False, 'from django.contrib.gis.db import models\n'), ((16817, 16892), 'django.contrib.gis.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(38)', 'decimal_places': '(8)', 'null': '(True)', 'blank': '(True)'}), '(max_digits=38, decimal_places=8, null=True, blank=True)\n', (16836, 16892), False, 'from django.contrib.gis.db import models\n'), ((16975, 17029), 'django.contrib.gis.db.models.CharField', 'models.CharField', ([], {'null': '(True)', 'blank': '(True)', 'max_length': '(50)'}), '(null=True, blank=True, max_length=50)\n', (16991, 17029), False, 'from django.contrib.gis.db import models\n'), ((17049, 17103), 'django.contrib.gis.db.models.CharField', 'models.CharField', ([], {'null': '(True)', 'blank': '(True)', 'max_length': '(50)'}), '(null=True, blank=True, max_length=50)\n', (17065, 17103), False, 'from django.contrib.gis.db import models\n'), ((17128, 17170), 'django.contrib.gis.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (17147, 17170), False, 'from django.contrib.gis.db import models\n'), ((17406, 17461), 'django.contrib.gis.db.models.CharField', 'models.CharField', ([], {'null': '(True)', 'blank': '(True)', 'max_length': '(255)'}), '(null=True, blank=True, max_length=255)\n', (17422, 17461), False, 'from django.contrib.gis.db import models\n'), ((17478, 17553), 'django.contrib.gis.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(38)', 'decimal_places': '(8)', 'null': '(True)', 'blank': '(True)'}), '(max_digits=38, decimal_places=8, null=True, blank=True)\n', (17497, 17553), False, 'from django.contrib.gis.db import models\n'), ((17569, 17644), 'django.contrib.gis.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(38)', 'decimal_places': '(8)', 'null': '(True)', 'blank': '(True)'}), '(max_digits=38, decimal_places=8, null=True, blank=True)\n', (17588, 17644), False, 'from django.contrib.gis.db import models\n'), ((17796, 17851), 'django.contrib.gis.db.models.CharField', 'models.CharField', ([], {'null': '(True)', 'blank': '(True)', 'max_length': '(255)'}), '(null=True, blank=True, max_length=255)\n', (17812, 17851), False, 'from django.contrib.gis.db import models\n'), ((17862, 17937), 'django.contrib.gis.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(38)', 'decimal_places': '(8)', 'null': '(True)', 'blank': '(True)'}), '(max_digits=38, decimal_places=8, null=True, blank=True)\n', (17881, 17937), False, 'from django.contrib.gis.db import models\n'), ((17951, 18026), 'django.contrib.gis.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(38)', 'decimal_places': '(8)', 'null': '(True)', 'blank': '(True)'}), '(max_digits=38, decimal_places=8, null=True, blank=True)\n', (17970, 18026), False, 'from django.contrib.gis.db import models\n'), ((18039, 18094), 'django.contrib.gis.db.models.CharField', 'models.CharField', ([], {'null': '(True)', 'blank': '(True)', 'max_length': '(255)'}), '(null=True, blank=True, max_length=255)\n', (18055, 18094), False, 'from django.contrib.gis.db import models\n'), ((18109, 18164), 'django.contrib.gis.db.models.CharField', 'models.CharField', ([], {'null': '(True)', 'blank': '(True)', 'max_length': '(255)'}), '(null=True, blank=True, max_length=255)\n', (18125, 18164), False, 'from django.contrib.gis.db import models\n'), ((18327, 18402), 'django.contrib.gis.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(38)', 'decimal_places': '(8)', 'null': '(True)', 'blank': '(True)'}), '(max_digits=38, decimal_places=8, null=True, blank=True)\n', (18346, 18402), False, 'from django.contrib.gis.db import models\n'), ((18414, 18468), 'django.contrib.gis.db.models.CharField', 'models.CharField', ([], {'null': '(True)', 'blank': '(True)', 'max_length': '(50)'}), '(null=True, blank=True, max_length=50)\n', (18430, 18468), False, 'from django.contrib.gis.db import models\n'), ((18480, 18522), 'django.contrib.gis.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (18499, 18522), False, 'from django.contrib.gis.db import models\n'), ((18539, 18593), 'django.contrib.gis.db.models.CharField', 'models.CharField', ([], {'null': '(True)', 'blank': '(True)', 'max_length': '(50)'}), '(null=True, blank=True, max_length=50)\n', (18555, 18593), False, 'from django.contrib.gis.db import models\n'), ((18605, 18638), 'django.contrib.gis.db.models.LineStringField', 'models.LineStringField', ([], {'srid': '(4326)'}), '(srid=4326)\n', (18627, 18638), False, 'from django.contrib.gis.db import models\n'), ((18653, 18672), 'django.contrib.gis.db.models.GeoManager', 'models.GeoManager', ([], {}), '()\n', (18670, 18672), False, 'from django.contrib.gis.db import models\n'), ((18891, 18954), 'django.contrib.gis.db.models.ForeignKey', 'models.ForeignKey', (['"""Occurrence"""'], {'related_name': '"""hrp_occurrences"""'}), "('Occurrence', related_name='hrp_occurrences')\n", (18908, 18954), False, 'from django.contrib.gis.db import models\n'), ((18967, 19035), 'django.contrib.gis.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""uploads/images"""', 'null': '(True)', 'blank': '(True)'}), "(upload_to='uploads/images', null=True, blank=True)\n", (18984, 19035), False, 'from django.contrib.gis.db import models\n'), ((19054, 19093), 'django.contrib.gis.db.models.TextField', 'models.TextField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (19070, 19093), False, 'from django.contrib.gis.db import models\n'), ((19139, 19170), 'django.contrib.gis.db.models.ForeignKey', 'models.ForeignKey', (['"""Occurrence"""'], {}), "('Occurrence')\n", (19156, 19170), False, 'from django.contrib.gis.db import models\n'), ((19182, 19248), 'django.contrib.gis.db.models.FileField', 'models.FileField', ([], {'upload_to': '"""uploads/files"""', 'null': '(True)', 'blank': '(True)'}), "(upload_to='uploads/files', null=True, blank=True)\n", (19198, 19248), False, 'from django.contrib.gis.db import models\n'), ((19267, 19306), 'django.contrib.gis.db.models.TextField', 'models.TextField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (19283, 19306), False, 'from django.contrib.gis.db import models\n'), ((9365, 9393), 'os.path.join', 'os.path.join', (['self.image.url'], {}), '(self.image.url)\n', (9377, 9393), False, 'import os\n'), ((9395, 9423), 'os.path.join', 'os.path.join', (['self.image.url'], {}), '(self.image.url)\n', (9407, 9423), False, 'import os\n'), ((9697, 9725), 'os.path.join', 'os.path.join', (['self.image.url'], {}), '(self.image.url)\n', (9709, 9725), False, 'import os\n'), ((9727, 9755), 'os.path.join', 'os.path.join', (['self.image.url'], {}), '(self.image.url)\n', (9739, 9755), False, 'import os\n')] |
import random
import math
listes = []
"""for i in range(3):
# listes.append(random.sample(range(5, 50), random.randint(5,1000)))
listes.append(random.sample(range(1, 100), 10))
"""
listes = [
[10,20,30,90,30,54,123,34,656,246,24,842,6784,2,56,4,5,7423,6,6,3,345,6,7,345,46],
[10,20,30,90],
[10,20,30,90,30,54,123,34,656,246,24,842,6784,2,56,4]
]
def sum(liste):
total = 0
for x in liste:
total += x
return total
def mean(liste):
total = sum(liste)
return total / len(liste)
def std(liste):
meann = mean(liste)
total = 0.0
for x in liste:
total += (meann - x) ** 2
return math.sqrt(total / (len(liste) - 1))
def variance(samples):
M = 0
S = 0
index = 0
for x in samples:
x = samples[index]
oldM = M
M = M + (x - M) / (index + 1)
S = S + (x - M) * (x - oldM)
if index != 0:
print("---- {}".format(S/(index+1-1)))
index += 1
return S / (len(samples) - 1)
def evaluate(nums):
print("list: ")
print(nums)
print("sum: {}".format(sum(nums)))
print("mean: {}".format(mean(nums)))
print("size: {}".format(len(nums)))
batch_std = std(nums)
print("batch_std: {}".format(batch_std))
streaming_std = variance(nums)
print("streaming_std: {}".format(streaming_std))
difference = batch_std - streaming_std
print("batch_std - streaming_std = {}".format(difference))
error = 100 * (difference / batch_std)
print("original error: {}%".format((error)))
print("float error: {}%".format(float("%0.9f" % error)))
print("int error: {}%".format(int(error)))
print("\n")
def main():
#for liss in listes:
liss = listes[0]
print("liste:{}".format(liss) )
"""
for i in range(len(liss)):
if i==0 or i==1:
continue
sub_liste = liss[:i]
print("original: {}".format(std(sub_liste)))
"""
for i in range(len(liss)):
if i == 0 or i == 1:
continue
sub_liste = liss[:i]
print("original: {}".format(std(sub_liste)))
evaluate(sub_liste)
for liste in listes:
evaluate(liste)
#main()
"""
for i in range(len(listes[0])):
if i == 0 or i == 1:
continue
sub_liste = listes[0][:i]
print("original[{}] S: {} - standardization: {}".format(i,std(sub_liste)))
"""
i = 0
for x in listes[0]:
i += 1
if i == 1:
continue
print("standardization({}) : {}".format(x, ((x-mean(listes[0][:i]))/(std(listes[0][:i])))))
M = S = count = sum = 0
while(True):
val = input("sayı: ")
val = int(val)
sum += val
count += 1
oldM = M
M = M + (val - M) / (count)
S = S + (val - M) * (val - oldM)
if count != 1:
print("S = {}".format(S / (count - 1)))
print("stream standardization({}) : {}".format(val, (val-(sum/count))/math.sqrt(S/(count-1))))
| [
"math.sqrt"
] | [((2908, 2934), 'math.sqrt', 'math.sqrt', (['(S / (count - 1))'], {}), '(S / (count - 1))\n', (2917, 2934), False, 'import math\n')] |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.backend.jvm.tasks.jvm_compile.resource_mapping import ResourceMapping
from pants_test.base_test import BaseTest
class ResourceMappingTest(BaseTest):
def test_resource_mapping_ok(self):
rel_dir = 'tests/python/pants_test/backend/jvm/tasks/jvm_compile/test-data/resource_mapping'
resource_mapping = ResourceMapping(rel_dir)
self.assertEquals(2, len(resource_mapping.mappings))
def test_resource_mapping_short(self):
rel_dir = 'tests/python/pants_test/backend/jvm/tasks/jvm_compile/test-data/resource_mapping-broken-short'
resource_mapping = ResourceMapping(rel_dir)
with self.assertRaises(ResourceMapping.TruncatedFileException):
resource_mapping.mappings
def test_resource_mapping_long(self):
rel_dir = 'tests/python/pants_test/backend/jvm/tasks/jvm_compile/test-data/resource_mapping-broken-long'
resource_mapping = ResourceMapping(rel_dir)
with self.assertRaises(ResourceMapping.TooLongFileException):
resource_mapping.mappings
def test_resource_mapping_mangled(self):
rel_dir = 'tests/python/pants_test/backend/jvm/tasks/jvm_compile/test-data/resource_mapping-broken-mangled'
resource_mapping = ResourceMapping(rel_dir)
with self.assertRaises(ResourceMapping.UnparseableLineException):
resource_mapping.mappings
def test_resource_mapping_noitems(self):
rel_dir = 'tests/python/pants_test/backend/jvm/tasks/jvm_compile/test-data/resource_mapping-broken-missing-items'
resource_mapping = ResourceMapping(rel_dir)
with self.assertRaises(ResourceMapping.MissingItemsLineException):
resource_mapping.mappings
| [
"pants.backend.jvm.tasks.jvm_compile.resource_mapping.ResourceMapping"
] | [((631, 655), 'pants.backend.jvm.tasks.jvm_compile.resource_mapping.ResourceMapping', 'ResourceMapping', (['rel_dir'], {}), '(rel_dir)\n', (646, 655), False, 'from pants.backend.jvm.tasks.jvm_compile.resource_mapping import ResourceMapping\n'), ((889, 913), 'pants.backend.jvm.tasks.jvm_compile.resource_mapping.ResourceMapping', 'ResourceMapping', (['rel_dir'], {}), '(rel_dir)\n', (904, 913), False, 'from pants.backend.jvm.tasks.jvm_compile.resource_mapping import ResourceMapping\n'), ((1188, 1212), 'pants.backend.jvm.tasks.jvm_compile.resource_mapping.ResourceMapping', 'ResourceMapping', (['rel_dir'], {}), '(rel_dir)\n', (1203, 1212), False, 'from pants.backend.jvm.tasks.jvm_compile.resource_mapping import ResourceMapping\n'), ((1491, 1515), 'pants.backend.jvm.tasks.jvm_compile.resource_mapping.ResourceMapping', 'ResourceMapping', (['rel_dir'], {}), '(rel_dir)\n', (1506, 1515), False, 'from pants.backend.jvm.tasks.jvm_compile.resource_mapping import ResourceMapping\n'), ((1805, 1829), 'pants.backend.jvm.tasks.jvm_compile.resource_mapping.ResourceMapping', 'ResourceMapping', (['rel_dir'], {}), '(rel_dir)\n', (1820, 1829), False, 'from pants.backend.jvm.tasks.jvm_compile.resource_mapping import ResourceMapping\n')] |
from dogey import Dogey
from dogey.classes import Message, User, Room, Context
from dogey.exceptions import DogeyCommandError
dogey = Dogey(token='your token', refresh_token='<PASSWORD> refresh token', prefix='.')
bot = dogey.bot
@dogey.event
async def on_ready():
print(f'{bot.name} is up! (prefix is {bot.prefix})')
await dogey.create_room('dogey.py', description='A simple event example bot', is_private=False)
@dogey.event
async def on_room_created(room: Room):
# Dogey auto saves both room details and room members when you get in a room
print(f'Created room: {room.name}')
@dogey.event
async def on_user_join(user: User, room: Room):
print(f'{user.username} has joined {room.name}')
await dogey.send(f'Welcome {user.username} to {room.name}!')
@dogey.event
async def on_user_leave(user: User, room: Room):
print(f'{user.username} has left {room.name}')
@dogey.event
async def on_message(message: Message):
author: User = dogey.room_members[message.sent_from]
print(f'A message has been sent by {author.username}: {message.content}')
@dogey.event
async def on_hand_raised(user: User):
await dogey.add_speaker(user.id)
await dogey.send(f'Gave {user.username} permission to speak.')
@dogey.event
async def on_room_leave(room: Room):
print(f'I\ve left: {room.name}')
@dogey.event
async def on_command_error(ctx: Context, error: DogeyCommandError):
await dogey.send(f'{error.command_name}: {error.message}')
dogey.start()
| [
"dogey.Dogey"
] | [((135, 214), 'dogey.Dogey', 'Dogey', ([], {'token': '"""your token"""', 'refresh_token': '"""<PASSWORD> refresh token"""', 'prefix': '"""."""'}), "(token='your token', refresh_token='<PASSWORD> refresh token', prefix='.')\n", (140, 214), False, 'from dogey import Dogey\n')] |
#ミニバッチ学習
import numpy as np
from dataset.mnist import load_mnist
(x_train, t_train), (x_test, t_test) =\
load_mnist(normalize=True, one_hot_label=True)
print(x_train.shape)
print(t_train.shape)
train_size = x_train.shape[0]
batch_size = 10
batch_mask = np.random.choice(train_size, batch_size)
x_batch = x_train[batch_mask]
t_batch = x_train[batch_mask]
print(batch_mask)
| [
"numpy.random.choice",
"dataset.mnist.load_mnist"
] | [((110, 156), 'dataset.mnist.load_mnist', 'load_mnist', ([], {'normalize': '(True)', 'one_hot_label': '(True)'}), '(normalize=True, one_hot_label=True)\n', (120, 156), False, 'from dataset.mnist import load_mnist\n'), ((260, 300), 'numpy.random.choice', 'np.random.choice', (['train_size', 'batch_size'], {}), '(train_size, batch_size)\n', (276, 300), True, 'import numpy as np\n')] |
import socket
from multiprocessing import Pool, Queue, Manager, cpu_count
from ..protocol.methods import *
from ..protocol.models import *
from ..various.abc import CounterServer
MAX_WORKERS = cpu_count()
class UDPCounterServer(CounterServer):
def __init__(self, ip="0.0.0.0", port=0, max_workers=MAX_WORKERS):
self.ip = ip
self.port = port
self.sock = None
self.is_running = False
# workers
self.manager = Manager()
self.topic_sum_map = self.manager.dict()
self.pending_requests = Queue()
self.workers_pool = Pool(
processes=max_workers, initializer=self.worker_loop)
def run(self) -> None:
self.bind_socket()
self.is_running = True
try:
while self.is_running:
msg, addr = self.sock.recvfrom(MSG_MAXIMUM_LENGTH)
self.pending_requests.put((msg, addr))
except Exception as err:
if self.is_running:
raise err
def worker_loop(self) -> None:
while True:
msg, addr = self.pending_requests.get()
re_msg = get_response(msg, self.topic_sum_map)
self.send_response(re_msg, addr)
def send_response(self, message: bytes, addr) -> None:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(message, addr)
def bind_socket(self) -> None:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((self.ip, self.port))
def stop(self) -> None:
self.is_running = False
self.workers_pool.terminate()
self.workers_pool.join()
self.sock.close()
class TCPCounterServer(CounterServer):
def __init__(self, ip="0.0.0.0", port=0, max_workers=MAX_WORKERS):
self.ip = ip
self.port = port
self.sock = None
self.is_running = False
# workers
self.manager = Manager()
self.topic_sum_map = self.manager.dict()
self.pending_requests = Queue()
self.workers_pool = Pool(
processes=max_workers, initializer=self.worker_loop)
def run(self) -> None:
self.bind_socket()
self.is_running = True
try:
while self.is_running:
conn, addr = self.sock.accept()
self.pending_requests.put((conn, addr))
except Exception as err:
if self.is_running:
raise err
def worker_loop(self) -> None:
while True:
conn, addr = self.pending_requests.get()
msg = conn.recv(MSG_MAXIMUM_LENGTH)
re_msg = get_response(msg, self.topic_sum_map)
self.send_response(re_msg, conn)
def send_response(self, message: bytes, conn) -> None:
conn.send(message)
conn.close()
def bind_socket(self) -> None:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((self.ip, self.port))
self.sock.listen(1)
def stop(self) -> None:
self.is_running = False
self.sock.close()
| [
"socket.socket",
"multiprocessing.cpu_count",
"multiprocessing.Pool",
"multiprocessing.Manager",
"multiprocessing.Queue"
] | [((195, 206), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (204, 206), False, 'from multiprocessing import Pool, Queue, Manager, cpu_count\n'), ((463, 472), 'multiprocessing.Manager', 'Manager', ([], {}), '()\n', (470, 472), False, 'from multiprocessing import Pool, Queue, Manager, cpu_count\n'), ((554, 561), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (559, 561), False, 'from multiprocessing import Pool, Queue, Manager, cpu_count\n'), ((590, 647), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'max_workers', 'initializer': 'self.worker_loop'}), '(processes=max_workers, initializer=self.worker_loop)\n', (594, 647), False, 'from multiprocessing import Pool, Queue, Manager, cpu_count\n'), ((1296, 1344), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (1309, 1344), False, 'import socket\n'), ((1436, 1484), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (1449, 1484), False, 'import socket\n'), ((2016, 2025), 'multiprocessing.Manager', 'Manager', ([], {}), '()\n', (2023, 2025), False, 'from multiprocessing import Pool, Queue, Manager, cpu_count\n'), ((2107, 2114), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (2112, 2114), False, 'from multiprocessing import Pool, Queue, Manager, cpu_count\n'), ((2143, 2200), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'max_workers', 'initializer': 'self.worker_loop'}), '(processes=max_workers, initializer=self.worker_loop)\n', (2147, 2200), False, 'from multiprocessing import Pool, Queue, Manager, cpu_count\n'), ((2969, 3018), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (2982, 3018), False, 'import socket\n')] |
# Copyright (c) 2019 Science and Technology Facilities Council
# All rights reserved.
# Modifications made as part of the fparser project are distributed
# under the following license:
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Test Fortran 2003 constraint C1002 : This file tests the support
for a format specification. The standard C1002 tests are performed via
test_format_specification_r1002.py as the constraints are associated
with R1002. This file picks up any tests that need to act directly on
this class.
'''
import pytest
from fparser.two.Fortran2003 import Format_Item_C1002
from fparser.two.utils import InternalError, NoMatchError
def test_data_edit_descriptor_error(f2003_create):
'''Check that None is returned if the descriptor following a P edit
descriptor is not of the expected type. What is expected is a
Format_Item instance containing a Data_Edit_Descriptor as its
second item. This test checks that we return None if the second
item is not a Data_Edit_Descriptor.
We do this by trying to match with a format-item-list as this is
the only other thing that returns a Format_Item instance. However,
it does not contain a Data_Edit_Descriptor as its second item so
it triggers the appropriate line of code.
'''
my_input = "2P ('hello')"
with pytest.raises(NoMatchError) as excinfo:
_ = Format_Item_C1002(my_input)
assert "Format_Item_C1002: '2P ('hello')'" in str(excinfo.value)
def test_internal_errors1(f2003_create, monkeypatch):
'''Check that an internal error is raised if the length of the Items
list is not 2 as the str() method assumes that it is.
'''
line = "2P F2.2"
ast = Format_Item_C1002(line)
monkeypatch.setattr(ast, "items", [None, None, None])
with pytest.raises(InternalError) as excinfo:
str(ast)
assert "should be length 2 but found '3'" in str(excinfo.value)
def test_internal_error2(f2003_create, monkeypatch):
'''Check that an internal error is raised if entry 0 of items is empty
or None as the str() method assumes that it has content.
'''
line = "2P F2.2"
ast = Format_Item_C1002(line)
monkeypatch.setattr(ast, "items", [None, ast.items[1]])
with pytest.raises(InternalError) as excinfo:
str(ast)
assert ("items entry 0 should contain a format items object but it "
"is empty or None") in str(excinfo.value)
def test_internal_error3(f2003_create, monkeypatch):
'''Check that an internal error is raised if entry 1 of items is empty
or None as the str() method assumes that it has content.
'''
line = "2P F2.2"
ast = Format_Item_C1002(line)
monkeypatch.setattr(ast, "items", [ast.items[0], None])
with pytest.raises(InternalError) as excinfo:
str(ast)
assert ("items entry 1 should contain a format items object but it "
"is empty or None") in str(excinfo.value)
| [
"fparser.two.Fortran2003.Format_Item_C1002",
"pytest.raises"
] | [((3129, 3152), 'fparser.two.Fortran2003.Format_Item_C1002', 'Format_Item_C1002', (['line'], {}), '(line)\n', (3146, 3152), False, 'from fparser.two.Fortran2003 import Format_Item_C1002\n'), ((3577, 3600), 'fparser.two.Fortran2003.Format_Item_C1002', 'Format_Item_C1002', (['line'], {}), '(line)\n', (3594, 3600), False, 'from fparser.two.Fortran2003 import Format_Item_C1002\n'), ((4086, 4109), 'fparser.two.Fortran2003.Format_Item_C1002', 'Format_Item_C1002', (['line'], {}), '(line)\n', (4103, 4109), False, 'from fparser.two.Fortran2003 import Format_Item_C1002\n'), ((2753, 2780), 'pytest.raises', 'pytest.raises', (['NoMatchError'], {}), '(NoMatchError)\n', (2766, 2780), False, 'import pytest\n'), ((2805, 2832), 'fparser.two.Fortran2003.Format_Item_C1002', 'Format_Item_C1002', (['my_input'], {}), '(my_input)\n', (2822, 2832), False, 'from fparser.two.Fortran2003 import Format_Item_C1002\n'), ((3220, 3248), 'pytest.raises', 'pytest.raises', (['InternalError'], {}), '(InternalError)\n', (3233, 3248), False, 'import pytest\n'), ((3670, 3698), 'pytest.raises', 'pytest.raises', (['InternalError'], {}), '(InternalError)\n', (3683, 3698), False, 'import pytest\n'), ((4179, 4207), 'pytest.raises', 'pytest.raises', (['InternalError'], {}), '(InternalError)\n', (4192, 4207), False, 'import pytest\n')] |
from django import template
from django.utils.http import urlquote
import re
register = template.Library()
@register.filter
def quote_filepath(url):
_, scheme, path = re.split(r'(https?://)', url)
return '{}{}'.format(scheme, urlquote(path))
| [
"re.split",
"django.template.Library",
"django.utils.http.urlquote"
] | [((89, 107), 'django.template.Library', 'template.Library', ([], {}), '()\n', (105, 107), False, 'from django import template\n'), ((173, 201), 're.split', 're.split', (['"""(https?://)"""', 'url'], {}), "('(https?://)', url)\n", (181, 201), False, 'import re\n'), ((236, 250), 'django.utils.http.urlquote', 'urlquote', (['path'], {}), '(path)\n', (244, 250), False, 'from django.utils.http import urlquote\n')] |
from pydantic import BaseModel
from typing import Optional
class RequestDataModel(BaseModel):
loginToken: str
def login_with_google(data: dict):
request_data = RequestDataModel(**data)
from google.oauth2 import id_token
from google.auth.transport.requests import Request as GoogleRequest
user_infos: Optional[dict] = id_token.verify_oauth2_token(
id_token=request_data.loginToken, request=GoogleRequest(), audience='token_id'
)
print(user_infos)
| [
"google.auth.transport.requests.Request"
] | [((420, 435), 'google.auth.transport.requests.Request', 'GoogleRequest', ([], {}), '()\n', (433, 435), True, 'from google.auth.transport.requests import Request as GoogleRequest\n')] |
from PIL import Image, ImageEnhance
import os
import argparse
def change_brightness(source_dir, save_dir, brightness):
os.makedirs(save_dir, exist_ok=True)
image_pathes = [f for f in os.scandir(source_dir) if f.is_file()]
for image_path in image_pathes:
save_path = os.path.join(save_dir, image_path.name.rstrip('.jpg') + "_" + str(brightness) + ".jpg")
ImageEnhance.Brightness(Image.open(image_path.path)).enhance(brightness).save(save_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--imagedir', type=str, required=True)
parser.add_argument('-s', '--savedir', type=str, required=True)
args = parser.parse_args()
for brightness in (0.5, 0.75, 1.25, 1.5):
change_brightness(args.imagedir, os.path.join(args.savedir, str(brightness)), brightness) | [
"os.scandir",
"argparse.ArgumentParser",
"os.makedirs",
"PIL.Image.open"
] | [((124, 160), 'os.makedirs', 'os.makedirs', (['save_dir'], {'exist_ok': '(True)'}), '(save_dir, exist_ok=True)\n', (135, 160), False, 'import os\n'), ((514, 539), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (537, 539), False, 'import argparse\n'), ((192, 214), 'os.scandir', 'os.scandir', (['source_dir'], {}), '(source_dir)\n', (202, 214), False, 'import os\n'), ((407, 434), 'PIL.Image.open', 'Image.open', (['image_path.path'], {}), '(image_path.path)\n', (417, 434), False, 'from PIL import Image, ImageEnhance\n')] |
import json
import errno
import os
import ext_logging
from . import BaseTestCase, log
class TraceCase(BaseTestCase):
def test_multiple_handlers(self):
log_conf_sysl = {
'handler': 'ext_logging.handlers.StdOutExtendedSysLogHandler',
'level': 'DEBUG',
'json_serializer': json.JSONEncoder,
}
log_conf_elk = {
'handler': 'ext_logging.handlers.ELKFileHandler',
'level': 'DEBUG',
'json_serializer': json.JSONEncoder,
'elkdir': '.'
}
ext_logging.configure_logs({
'MODULES': {
'test': [log_conf_sysl, log_conf_elk],
}
})
log.info('here test', json_data={'this': {'does not': 'fail'}})
| [
"ext_logging.configure_logs"
] | [((562, 647), 'ext_logging.configure_logs', 'ext_logging.configure_logs', (["{'MODULES': {'test': [log_conf_sysl, log_conf_elk]}}"], {}), "({'MODULES': {'test': [log_conf_sysl, log_conf_elk]}}\n )\n", (588, 647), False, 'import ext_logging\n')] |
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
#pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(13000.0),
maxEventsToPrint = cms.untracked.int32(0),
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1.0),
crossSection = cms.untracked.double(0.00002497),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'NewGaugeBoson:ffbar2gmZZprime = on',
'Zprime:gmZmode = 0',
'32:m0 =5000',
'32:onMode = off',
'32:onIfAny = 1',
'32:onIfAny = 2',
'32:onIfAny = 3',
'32:onIfAny = 4',
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters',
)
)
)
| [
"FWCore.ParameterSet.Config.untracked.int32",
"FWCore.ParameterSet.Config.untracked.double",
"FWCore.ParameterSet.Config.vstring",
"FWCore.ParameterSet.Config.double"
] | [((335, 354), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(13000.0)'], {}), '(13000.0)\n', (345, 354), True, 'import FWCore.ParameterSet.Config as cms\n'), ((400, 422), 'FWCore.ParameterSet.Config.untracked.int32', 'cms.untracked.int32', (['(0)'], {}), '(0)\n', (419, 422), True, 'import FWCore.ParameterSet.Config as cms\n'), ((473, 495), 'FWCore.ParameterSet.Config.untracked.int32', 'cms.untracked.int32', (['(1)'], {}), '(1)\n', (492, 495), True, 'import FWCore.ParameterSet.Config as cms\n'), ((541, 566), 'FWCore.ParameterSet.Config.untracked.double', 'cms.untracked.double', (['(1.0)'], {}), '(1.0)\n', (561, 566), True, 'import FWCore.ParameterSet.Config as cms\n'), ((608, 639), 'FWCore.ParameterSet.Config.untracked.double', 'cms.untracked.double', (['(2.497e-05)'], {}), '(2.497e-05)\n', (628, 639), True, 'import FWCore.ParameterSet.Config as cms\n'), ((797, 982), 'FWCore.ParameterSet.Config.vstring', 'cms.vstring', (['"""NewGaugeBoson:ffbar2gmZZprime = on"""', '"""Zprime:gmZmode = 0"""', '"""32:m0 =5000"""', '"""32:onMode = off"""', '"""32:onIfAny = 1"""', '"""32:onIfAny = 2"""', '"""32:onIfAny = 3"""', '"""32:onIfAny = 4"""'], {}), "('NewGaugeBoson:ffbar2gmZZprime = on', 'Zprime:gmZmode = 0',\n '32:m0 =5000', '32:onMode = off', '32:onIfAny = 1', '32:onIfAny = 2',\n '32:onIfAny = 3', '32:onIfAny = 4')\n", (808, 982), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1123, 1210), 'FWCore.ParameterSet.Config.vstring', 'cms.vstring', (['"""pythia8CommonSettings"""', '"""pythia8CUEP8M1Settings"""', '"""processParameters"""'], {}), "('pythia8CommonSettings', 'pythia8CUEP8M1Settings',\n 'processParameters')\n", (1134, 1210), True, 'import FWCore.ParameterSet.Config as cms\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copied from multihead_attention.py
# Change it for phrase level gaussian attention
# TODO:
# 1. Graph based function
# 2. Convlution based function
# Phrase_args
# 1. generate_function
# 2. parse_function
# 3. center_first
# 4. window_size
# Phrase_info
# Notimplemented yet
import math
from typing import Dict, Optional, Tuple
from math import ceil
import torch
import torch.nn.functional as F
from fairseq import utils
from torch import Tensor, nn
from torch.nn import Parameter
from fairseq.incremental_decoding_utils import with_incremental_state
# import torchsnooper
# from torch_geometric.nn import GATConv, GCNConv
class PhraseGenerator(nn.Module):
"""
Phrase level representation generator
1. Parsing the seqence for different function
"""
def __init__(
self,
phrase_args,
):
"""
init function
Args:
embed_dim ([int]): [the input dimension (is the same as output dimension)]
generate_function ([str]): using different phrase generate functions
center_first ([bool, default None]): whether let the 1st token to be the center of the phrase
"""
super().__init__()
generate_function = phrase_args.generate_function
center_first = phrase_args.center_first
self.__parse_func__ = PhraseBuilder(phrase_args)
# Basic function
if(generate_function == 'max-pooling'):
self.__type__ = generate_function
self.__repr_func__ = lambda tokens: torch.max(tokens, 2)[0]
elif(generate_function == 'averate-pooling'):
self.__type__ = generate_function
self.__repr_func__ = lambda tokens: torch.mean(tokens, 2)[0]
# Graph based function
# Not implemented
# Undone
elif(generate_function == 'GAT'):
assert type(center_first) == bool
self.__type__ = generate_function
raise NotImplementedError
pass
elif(generate_function == 'GCN'):
assert type(center_first) == bool
self.__type__ = generate_function
raise NotImplementedError
pass
# Conv based function
# Undone
elif(generate_function == 'CNN'):
raise NotImplementedError
pass
else:
# Return first token as outputs
self.__repr_func__ = lambda tokens: tokens[0]
return
def forward(
self,
x,
phrase_info,
):
"""
forward method
Args:
x ([Tensor]): [(bsz*head_num, seq_len, head_dim) the tensor in attention layer]
phrase_info ([dict]): [used for parsing]
Returns:
[Tensor]: [(bsz*head_num, phrase_num, head_dim)]
"""
parsed, phrase_info = self.__parse_func__(x, phrase_info)
output = self.__repr_func__(parsed)
return output, phrase_info
# Undone
# 1. fixed_window √
# 2. graph based ×
class PhraseBuilder:
def __init__(self, phrase_args):
"""
[Parsing the seq into Phrases, each sentence is parsed into multiple phrases]
Args:
phrase_args ([dict]): [used for parsing]
"""
self.parse_function = phrase_args.parse_function
if(self.parse_function == 'fixed_window'):
assert 'window_size' in dir(phrase_args), (
'Using fixed window, but the size of window is not indicated'
)
self.window_size = phrase_args.window_size
def __call__(self, x, phrase_info):
"""
[Parsing the seq into Phrases, each sentence is parsed into multiple phrases]
Args:
x ([Tensor]): (bsz*head_num, seq_len, head_dim) the tensor in attention layer
phrase_info ([dict]): [used for parsing and etc.]
Returns:
result: [Tensor], (phrase_len, phrase_num, bsz, embed_dim)
phrase_info: [dict], contain information like mu and sigma
"""
if(self.parse_function == 'fixed_window'):
device = x.device
seq_length = x.size(1)
# bsz here indicate bsz * head_num
bsz = x.size(0)
chunks = ceil(seq_length / self.window_size)
max_seq_size = self.window_size * chunks
pad = (0, max_seq_size - seq_length)
# Padding Zero to the Tensor X
x = x.transpose(1, -1)
x = F.pad(x, pad)
x = x.transpose(1, -1)
x = x.chunk(chunks, dim=1)
result = torch.stack(x, dim=1)
fixed_mu = torch.arange(
int(self.window_size / 2), max_seq_size, self.window_size, device=device)
fixed_mu = fixed_mu.repeat(bsz, seq_length, 1)
fixed_sigam = torch.full((bsz, seq_length, chunks), self.window_size / 4, device=device)
phrase_info['fixed_mu'] = fixed_mu
phrase_info['fixed_sigma'] = fixed_sigam
phrase_info['padding_size'] = max_seq_size - seq_length
assert fixed_mu.size(2) == chunks
assert fixed_sigam.size(2) == chunks
return result, phrase_info
# Undone
# 1. reset para (for max/mean pooling there is no para ~~)
# 2. forward √
# 3. init √
@with_incremental_state
class MultiPhraseAttention(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
Note:
1. By default the torch version MHA is turned on in MultiHeadAttention, but it is deleted here
2. The add_zero_attention is also deleted here, because i have no idea what it is
"""
def __init__(
self,
embed_dim,
num_heads,
kdim=None,
vdim=None,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
self_attention=False,
encoder_decoder_attention=False,
phrase_args=None,
apply_phrase=False,
):
super().__init__()
# what ever mode is running, phrase args should be given
assert phrase_args is not None
self.phrase_args = phrase_args
# if both attention is turned on, there will be two W_k and W_q (W_v will remain the same as origin)
self.gaussian_attention = self.phrase_args.gaussian_attention
self.multihead_attention = self.phrase_args.multihead_attention
assert self.multihead_attention or self.gaussian_attention, (
'At least one attention should be added'
)
# init for phrase repr
self.apply_phrase = apply_phrase
# If apply_phrase is set True, we supposed that the key is tokens
# If apply_phrase is set False, we sepposed that the key is phrase
if(self.apply_phrase):
self.phrase_encoder = PhraseGenerator(phrase_args)
assert self.gaussian_attention
# original args
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
# Note:
# 1. if self_attention&gaussian_attention = True, apply_phrase should also be True
# 2. if encoder_decoder_attention=True, apply_phrase should be False
self.self_attention = self_attention
if(self.self_attention and self.gaussian_attention):
assert self.apply_phrase
self.encoder_decoder_attention = encoder_decoder_attention
if(self.encoder_decoder_attention):
assert not self.apply_phrase
assert not self.self_attention or self.qkv_same_dim, (
"Self-attention requires query, key and " "value to be of the same size"
)
# projection layers
if(self.gaussian_attention):
self.k_proj_gauss = nn.Linear(self.kdim, embed_dim, bias=bias)
self.q_proj_gauss = nn.Linear(embed_dim, embed_dim, bias=bias)
if(self.multihead_attention):
self.k_proj_base = nn.Linear(self.kdim, embed_dim, bias=bias)
self.q_proj_base = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(self.vdim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
if(self.gaussian_attention):
self.bias_k_gauss = Parameter(torch.Tensor(1, 1, embed_dim))
if(self.multihead_attention):
self.bias_k_base = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k_gauss = self.bias_v = self.bias_k_base = None
self.reset_parameters()
self.onnx_trace = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def reset_parameters(self):
if self.qkv_same_dim:
# Empirically observed the convergence to be much better with
# the scaled initialization
if(self.gaussian_attention):
nn.init.xavier_uniform_(
self.k_proj_gauss.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(
self.q_proj_gauss.weight, gain=1 / math.sqrt(2))
if(self.multihead_attention):
nn.init.xavier_uniform_(
self.k_proj_base.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(
self.q_proj_base.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
else:
if(self.gaussian_attention):
nn.init.xavier_uniform_(self.k_proj_gauss.weight)
nn.init.xavier_uniform_(self.q_proj_gauss.weight)
if(self.multihead_attention):
nn.init.xavier_uniform_(self.k_proj_base.weight)
nn.init.xavier_uniform_(self.q_proj_base.weight)
nn.init.xavier_uniform_(self.v_proj.weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.out_proj.bias is not None:
nn.init.constant_(self.out_proj.bias, 0.)
if self.bias_k_gauss is not None:
nn.init.xavier_normal_(self.bias_k_gauss)
if self.bias_k_base is not None:
nn.init.xavier_normal_(self.bias_k_base)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def gauss_builder(self, mus, sigmas, weights, seq_length):
"""
Generate Gauss attention
Args:
mus (Tensor): the mu of the gauss attention for each sequence (bsz * heads, src_len, phrase_num)
sigmas (Tensor): the sigma of the gauss attention for each sequence (bsz * heads, src_len, phrase_num)
weights (Tensor): the weight of each gauss distribution (bsz * head_num, src_len, phrase_num)
seq_length (int): the length of sequences
Return:
attention (Tensor): The attention generated by token and phrase repr (bsz * heads, seq_len, seq_len)
"""
def gauss_distribution(mu, sigma, x):
x = x.float()
base = torch.exp(-(x - mu) * (x - mu) / (2 * sigma * sigma))
return base / (math.sqrt(2 * math.pi) * sigma)
device = weights.device
bsz, seq_len, phrase_num = mus.size()
x = [torch.arange(0, seq_length, device=device) for i in range(bsz)]
y = torch.zeros(bsz, seq_len, seq_len, device=device)
# for bsz, src_len, phrase_num
for batch, (m, s, w) in enumerate(zip(mus, sigmas, weights)):
for tok, (mu, sigma, weight, i) in enumerate(zip(m, s, w, x)):
for a, b, c in zip(mu, sigma, weight):
y[batch, tok] += c * gauss_distribution(a, b, i)
gauss_attention = y
return gauss_attention
def gauss_builder_v2(self, mus, sigmas, weights, seq_length):
"""
Generate Gauss attention
Args:
mus (Tensor): the mu of the gauss attention for each sequence (bsz * heads, src_len, phrase_num)
sigmas (Tensor): the sigma of the gauss attention for each sequence (bsz * heads, src_len, phrase_num)
weights (Tensor): the weight of each gauss distribution (bsz * head_num, src_len, phrase_num)
seq_length (int): the length of sequences
Return:
attention (Tensor): The attention generated by token and phrase repr (bsz * heads, seq_len, seq_len)
"""
def gauss_distribution(mu, sigma, x):
mu = mu.unsqueeze(-1).expand(-1, -1, -1, x.size(-1))
sigma = sigma.unsqueeze(-1).expand(-1, -1, -1, x.size(-1))
x = x.float()
base = torch.exp(-(x - mu) * (x - mu) / (2 * sigma * sigma))
return base / (math.sqrt(2 * math.pi) * sigma)
device = weights.device
bsz, seq_len, phrase_num = mus.size()
weights = weights.unsqueeze(-1).expand(-1, -1, -1, seq_len)
# size: bsz * head_num, seq_len, phrase_num, seq_len
x = torch.arange(0., seq_length, device=device).repeat(bsz, seq_len, phrase_num, 1)
y = gauss_distribution(mus, sigmas, x) * weights
y = y.sum(dim=-2)
gauss_attention = y
return gauss_attention
def forward(
self,
query,
key: Optional[Tensor],
value: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
need_weights: bool = True,
static_kv: bool = False,
attn_mask: Optional[Tensor] = None,
before_softmax: bool = False,
need_head_weights: bool = False,
phrase_info: dict = None,
need_phrase: bool = False,
) -> Tuple[Tensor, Optional[Tensor]]:
"""Input shape: Time x Batch x Channel
Args:
key_padding_mask (ByteTensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where
padding elements are indicated by 1s.
need_weights (bool, optional): return the attention weights,
averaged over heads (default: False).
attn_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
before_softmax (bool, optional): return the raw attention
weights and values before the attention softmax.
need_head_weights (bool, optional): return the attention
weights for each head. Implies *need_weights*. Default:
return the average attention weights over all heads.
query: tokens(source side: seq, bsz, embed_dim)
key: phrase repr
value: tokens(source/target side)
phrase_info (dict, optional): used for phrase parsing
need_phrase (bool, False): return the phrase repr
"""
if need_head_weights:
need_weights = True
key_phrase = None
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if saved_state is not None and "prev_key" in saved_state:
# previous time steps are cached - no need to recompute
# key and value if they are static
if static_kv:
assert self.encoder_decoder_attention and not self.self_attention
key = value = None
else:
saved_state = None
# Here in self_attention, only query is needed
# project should be applied before multiheads
if self.self_attention:
if(self.multihead_attention):
q_base = self.q_proj_base(query)
k_base = self.k_proj_base(query)
if(self.gaussian_attention):
q_gauss = self.q_proj_gauss(query)
k_gauss = self.k_proj_gauss(query)
v = self.v_proj(query)
# In encoder_decoder attention, phrase(k) and token(v) are provided by encoder
# while token(q) is provided by decoder
elif self.encoder_decoder_attention:
# Basic multihead attention's k&v are provided by encoder and k = v
if(self.multihead_attention):
q_base = self.q_proj_base(query)
if key is None:
assert value is None
k_base = v = None
else:
k_base = self.k_proj_base(key)
v = self.v_proj(key)
# Gaussian attention's key&value are provided by encoder but key!=value
# Not that there is no need to build phrase in decoder, because it is done by the encoder
if(self.gaussian_attention):
q_gauss = self.q_proj_gauss(query)
if key is None:
assert value is None
k_gauss = v = None
else:
assert key is not None
assert value is not None
k_gauss = self.k_proj_gauss(key)
v = self.v_proj(value)
else:
# Note:
# If both key and value are provided, and apply_phrase is set False,
# we supposed that key is phrase repr,
# which means no PhraseEncoder will be added here
assert key is not None and value is not None
if(self.multihead_attention):
q_base = self.q_proj_base(query)
k_base = self.k_proj_base(key)
if(self.gaussian_attention):
q_gauss = self.q_proj_gauss(query)
k_gauss = self.k_proj_gauss(key)
v = self.v_proj(value)
if(self.multihead_attention):
q_base *= self.scaling
if(self.gaussian_attention):
q_gauss *= self.scaling
if self.bias_k_base is not None:
k_base = torch.cat([k_base, self.bias_k_base.repeat(1, bsz, 1)])
if self.bias_k_gauss is not None:
k_gauss = torch.cat([k_gauss, self.bias_k_gauss.repeat(1, bsz, 1)])
if(self.bias_k_base or self.bias_k_gauss):
assert self.bias_v is not None
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
key_padding_mask.new_zeros(
key_padding_mask.size(0), 1),
],
dim=1,
)
# embed_dim = head_dim * head_num
# q: (tgt_len, bsz, embed_dim) -> (bsz * head_num, tgt_len, head_dim)
# k: (phrase_num, bsz, embed_dim) -> (bsz * head_num, phrase_num, head_dim)
# v: (src_len, bsz, embed_dim) -> (bsz * head_num, scr_len, head_dim)
# Now, the implement suppose fixed window~
# TODO graph based function is not supported yet
if(self.multihead_attention):
q_base = (
q_base.contiguous()
.view(tgt_len, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if k_base is not None:
k_base = (
k_base.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if(self.gaussian_attention):
q_gauss = (
q_gauss.contiguous()
.view(tgt_len, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if k_gauss is not None:
k_gauss = (
k_gauss.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if(self.apply_phrase):
key_phrase, phrase_info = self.phrase_encoder(k_gauss, phrase_info)
k_gauss = key_phrase
else:
key_phrase = k_gauss
if v is not None:
v = (
v.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if saved_state is not None:
# saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
# From saved_state get keys
if "prev_key_base" in saved_state:
assert self.multihead_attention
_prev_key_base = saved_state["prev_key_base"]
assert _prev_key_base is not None
prev_key_base = _prev_key_base.view(
bsz * self.num_heads, -1, self.head_dim)
if static_kv:
k_base = prev_key_base
else:
assert k_base is not None
k_base = torch.cat([prev_key_base, k_base], dim=1)
if "prev_key_gauss" in saved_state:
assert self.gaussian_attention
_prev_key_gauss = saved_state["prev_key_gauss"]
assert _prev_key_gauss is not None
prev_key_gauss = _prev_key_gauss.view(
bsz * self.num_heads, -1, self.head_dim)
if static_kv:
k_gauss = prev_key_gauss
else:
assert k_gauss is not None
k_gauss = torch.cat([prev_key_gauss, k_gauss], dim=1)
# From saved_state get values
if "prev_value" in saved_state:
_prev_value = saved_state["prev_value"]
assert _prev_value is not None
prev_value = _prev_value.view(
bsz * self.num_heads, -1, self.head_dim)
if static_kv:
v = prev_value
else:
assert v is not None
v = torch.cat([prev_value, v], dim=1)
# apply saved mask
prev_key_padding_mask: Optional[Tensor] = None
if "prev_key_padding_mask" in saved_state:
prev_key_padding_mask = saved_state["prev_key_padding_mask"]
assert v is not None
assert k_base or k_gauss
key_padding_mask = MultiPhraseAttention._append_prev_key_padding_mask(
key_padding_mask=key_padding_mask,
prev_key_padding_mask=prev_key_padding_mask,
batch_size=bsz,
src_len=k_base.size(1),
static_kv=static_kv,
)
# save the newest state
if(self.multihead_attention):
saved_state["prev_key_base"] = k_base.view(
bsz, self.num_heads, -1, self.head_dim)
if(self.gaussian_attention):
saved_state["prev_key_gauss"] = k_gauss.view(
bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_value"] = v.view(
bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_key_padding_mask"] = key_padding_mask
# In this branch incremental_state is never None
assert incremental_state is not None
incremental_state = self._set_input_buffer(
incremental_state, saved_state)
if(self.multihead_attention):
assert k_base is not None
src_len = k_base.size(1)
else:
assert k_gauss is not None
src_len = k_gauss.size(1)
# This is part of a workaround to get around fork/join parallelism
# not supporting Optional types.
if key_padding_mask is not None and key_padding_mask.dim() == 0:
key_padding_mask = None
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
# calc multihead attention
if(self.multihead_attention):
base_attn = torch.bmm(q_base, k_base.transpose(1, 2))
else:
base_attn = None
# calc gaussian attention
if(self.gaussian_attention):
gauss_weight = torch.bmm(q_gauss, k_gauss.transpose(1, 2))
gauss_attn = self.gauss_builder_v2(
phrase_info['fixed_mu'], phrase_info['fixed_sigma'], gauss_weight, tgt_len)
if(base_attn is None):
base_attn = torch.zeros_like(gauss_attn)
else:
gauss_attn = torch.zeros_like(base_attn)
# add attention together (maybe add after softmax is better? )
gauss_attn = gauss_attn.to(base_attn.device)
attn_weights = gauss_attn + base_attn
attn_weights = MultiPhraseAttention.apply_sparse_mask(
attn_weights, tgt_len, src_len, bsz)
assert list(attn_weights.size()) == [
bsz * self.num_heads, tgt_len, src_len]
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0)
if self.onnx_trace:
attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)
attn_weights += attn_mask
if key_padding_mask is not None:
# don't attend to padding symbols
attn_weights = attn_weights.view(
bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(
2).to(torch.bool), float("-inf")
)
attn_weights = attn_weights.view(
bsz * self.num_heads, tgt_len, src_len)
if before_softmax:
return attn_weights, v
# apply softmax and dropout
attn_weights_float = utils.softmax(
attn_weights, dim=-1, onnx_trace=self.onnx_trace
)
attn_weights = attn_weights_float.type_as(attn_weights)
attn_probs = F.dropout(
attn_weights_float.type_as(attn_weights),
p=self.dropout,
training=self.training,
)
# apply attention
assert v is not None
attn = torch.bmm(attn_probs, v)
assert list(attn.size()) == [
bsz * self.num_heads, tgt_len, self.head_dim]
if self.onnx_trace and attn.size(1) == 1:
# when ONNX tracing a single decoder step (sequence length == 1)
# the transpose is a no-op copy before view, thus unnecessary
attn = attn.contiguous().view(tgt_len, bsz, embed_dim)
else:
attn = attn.transpose(0, 1).contiguous().view(
tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
attn_weights: Optional[Tensor] = None
if need_weights:
attn_weights = attn_weights_float.view(
bsz, self.num_heads, tgt_len, src_len
).transpose(1, 0)
if not need_head_weights:
# average attention weights over heads
attn_weights = attn_weights.mean(dim=0)
if(need_phrase):
assert key_phrase is not None
return attn, attn_weights, key_phrase
return attn, attn_weights
@staticmethod
def _append_prev_key_padding_mask(
key_padding_mask: Optional[Tensor],
prev_key_padding_mask: Optional[Tensor],
batch_size: int,
src_len: int,
static_kv: bool,
) -> Optional[Tensor]:
# saved key padding masks have shape (bsz, seq_len)
if prev_key_padding_mask is not None and static_kv:
new_key_padding_mask = prev_key_padding_mask
elif prev_key_padding_mask is not None and key_padding_mask is not None:
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), key_padding_mask.float()], dim=1
)
# During incremental decoding, as the padding token enters and
# leaves the frame, there will be a time when prev or current
# is None
elif prev_key_padding_mask is not None:
filler = torch.zeros(
(batch_size, src_len - prev_key_padding_mask.size(1)),
device=prev_key_padding_mask.device,
)
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), filler.float()], dim=1
)
elif key_padding_mask is not None:
filler = torch.zeros(
(batch_size, src_len - key_padding_mask.size(1)),
device=key_padding_mask.device,
)
new_key_padding_mask = torch.cat(
[filler.float(), key_padding_mask.float()], dim=1
)
else:
new_key_padding_mask = prev_key_padding_mask
return new_key_padding_mask
@torch.jit.export
def reorder_incremental_state(
self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], new_order: Tensor
):
"""Reorder buffered internal state (for incremental generation)."""
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
for k in input_buffer.keys():
input_buffer_k = input_buffer[k]
if input_buffer_k is not None:
if self.encoder_decoder_attention and input_buffer_k.size(0) == new_order.size(0):
break
input_buffer[k] = input_buffer_k.index_select(0, new_order)
incremental_state = self._set_input_buffer(
incremental_state, input_buffer)
return incremental_state
def _get_input_buffer(
self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]
) -> Dict[str, Optional[Tensor]]:
result = self.get_incremental_state(incremental_state, "attn_state")
if result is not None:
return result
else:
empty_result: Dict[str, Optional[Tensor]] = {}
return empty_result
def _set_input_buffer(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
buffer: Dict[str, Optional[Tensor]],
):
return self.set_incremental_state(incremental_state, "attn_state", buffer)
def apply_sparse_mask(attn_weights, tgt_len: int, src_len: int, bsz: int):
return attn_weights
def upgrade_state_dict_named(self, state_dict, name):
prefix = name + "." if name != "" else ""
items_to_add = {}
keys_to_remove = []
for k in state_dict.keys():
if k.endswith(prefix + "in_proj_weight"):
# in_proj_weight used to be q + k + v with same dimensions
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + "q_proj.weight"] = state_dict[k][:dim]
items_to_add[prefix + "k_proj.weight"] = state_dict[k][dim: 2 * dim]
items_to_add[prefix + "v_proj.weight"] = state_dict[k][2 * dim:]
keys_to_remove.append(k)
k_bias = prefix + "in_proj_bias"
if k_bias in state_dict.keys():
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + "q_proj.bias"] = state_dict[k_bias][:dim]
items_to_add[prefix + "k_proj.bias"] = state_dict[k_bias][
dim: 2 * dim
]
items_to_add[prefix + "v_proj.bias"] = state_dict[k_bias][2 * dim:]
keys_to_remove.append(prefix + "in_proj_bias")
for k in keys_to_remove:
del state_dict[k]
for key, value in items_to_add.items():
state_dict[key] = value
| [
"torch.nn.init.constant_",
"torch.max",
"math.sqrt",
"torch.exp",
"torch.nn.init.xavier_normal_",
"torch.nn.functional.pad",
"torch.bmm",
"torch.arange",
"torch.nn.init.xavier_uniform_",
"torch.mean",
"fairseq.utils.softmax",
"torch.zeros_like",
"torch.Tensor",
"torch.cat",
"math.ceil",
... | [((8686, 8728), 'torch.nn.Linear', 'nn.Linear', (['self.vdim', 'embed_dim'], {'bias': 'bias'}), '(self.vdim, embed_dim, bias=bias)\n', (8695, 8728), False, 'from torch import Tensor, nn\n'), ((8754, 8796), 'torch.nn.Linear', 'nn.Linear', (['embed_dim', 'embed_dim'], {'bias': 'bias'}), '(embed_dim, embed_dim, bias=bias)\n', (8763, 8796), False, 'from torch import Tensor, nn\n'), ((10548, 10593), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.out_proj.weight'], {}), '(self.out_proj.weight)\n', (10571, 10593), False, 'from torch import Tensor, nn\n'), ((11988, 12037), 'torch.zeros', 'torch.zeros', (['bsz', 'seq_len', 'seq_len'], {'device': 'device'}), '(bsz, seq_len, seq_len, device=device)\n', (11999, 12037), False, 'import torch\n'), ((26811, 26874), 'fairseq.utils.softmax', 'utils.softmax', (['attn_weights'], {'dim': '(-1)', 'onnx_trace': 'self.onnx_trace'}), '(attn_weights, dim=-1, onnx_trace=self.onnx_trace)\n', (26824, 26874), False, 'from fairseq import utils\n'), ((27192, 27216), 'torch.bmm', 'torch.bmm', (['attn_probs', 'v'], {}), '(attn_probs, v)\n', (27201, 27216), False, 'import torch\n'), ((4421, 4456), 'math.ceil', 'ceil', (['(seq_length / self.window_size)'], {}), '(seq_length / self.window_size)\n', (4425, 4456), False, 'from math import ceil\n'), ((4653, 4666), 'torch.nn.functional.pad', 'F.pad', (['x', 'pad'], {}), '(x, pad)\n', (4658, 4666), True, 'import torch.nn.functional as F\n'), ((4762, 4783), 'torch.stack', 'torch.stack', (['x'], {'dim': '(1)'}), '(x, dim=1)\n', (4773, 4783), False, 'import torch\n'), ((4996, 5070), 'torch.full', 'torch.full', (['(bsz, seq_length, chunks)', '(self.window_size / 4)'], {'device': 'device'}), '((bsz, seq_length, chunks), self.window_size / 4, device=device)\n', (5006, 5070), False, 'import torch\n'), ((8360, 8402), 'torch.nn.Linear', 'nn.Linear', (['self.kdim', 'embed_dim'], {'bias': 'bias'}), '(self.kdim, embed_dim, bias=bias)\n', (8369, 8402), False, 'from torch import Tensor, nn\n'), ((8435, 8477), 'torch.nn.Linear', 'nn.Linear', (['embed_dim', 'embed_dim'], {'bias': 'bias'}), '(embed_dim, embed_dim, bias=bias)\n', (8444, 8477), False, 'from torch import Tensor, nn\n'), ((8547, 8589), 'torch.nn.Linear', 'nn.Linear', (['self.kdim', 'embed_dim'], {'bias': 'bias'}), '(self.kdim, embed_dim, bias=bias)\n', (8556, 8589), False, 'from torch import Tensor, nn\n'), ((8621, 8663), 'torch.nn.Linear', 'nn.Linear', (['embed_dim', 'embed_dim'], {'bias': 'bias'}), '(embed_dim, embed_dim, bias=bias)\n', (8630, 8663), False, 'from torch import Tensor, nn\n'), ((10495, 10538), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.v_proj.weight'], {}), '(self.v_proj.weight)\n', (10518, 10538), False, 'from torch import Tensor, nn\n'), ((10649, 10691), 'torch.nn.init.constant_', 'nn.init.constant_', (['self.out_proj.bias', '(0.0)'], {}), '(self.out_proj.bias, 0.0)\n', (10666, 10691), False, 'from torch import Tensor, nn\n'), ((10745, 10786), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['self.bias_k_gauss'], {}), '(self.bias_k_gauss)\n', (10767, 10786), False, 'from torch import Tensor, nn\n'), ((10840, 10880), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['self.bias_k_base'], {}), '(self.bias_k_base)\n', (10862, 10880), False, 'from torch import Tensor, nn\n'), ((10929, 10964), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['self.bias_v'], {}), '(self.bias_v)\n', (10951, 10964), False, 'from torch import Tensor, nn\n'), ((11707, 11760), 'torch.exp', 'torch.exp', (['(-(x - mu) * (x - mu) / (2 * sigma * sigma))'], {}), '(-(x - mu) * (x - mu) / (2 * sigma * sigma))\n', (11716, 11760), False, 'import torch\n'), ((11912, 11954), 'torch.arange', 'torch.arange', (['(0)', 'seq_length'], {'device': 'device'}), '(0, seq_length, device=device)\n', (11924, 11954), False, 'import torch\n'), ((13286, 13339), 'torch.exp', 'torch.exp', (['(-(x - mu) * (x - mu) / (2 * sigma * sigma))'], {}), '(-(x - mu) * (x - mu) / (2 * sigma * sigma))\n', (13295, 13339), False, 'import torch\n'), ((25578, 25605), 'torch.zeros_like', 'torch.zeros_like', (['base_attn'], {}), '(base_attn)\n', (25594, 25605), False, 'import torch\n'), ((9094, 9123), 'torch.Tensor', 'torch.Tensor', (['(1)', '(1)', 'embed_dim'], {}), '(1, 1, embed_dim)\n', (9106, 9123), False, 'import torch\n'), ((10195, 10244), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.k_proj_gauss.weight'], {}), '(self.k_proj_gauss.weight)\n', (10218, 10244), False, 'from torch import Tensor, nn\n'), ((10261, 10310), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.q_proj_gauss.weight'], {}), '(self.q_proj_gauss.weight)\n', (10284, 10310), False, 'from torch import Tensor, nn\n'), ((10369, 10417), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.k_proj_base.weight'], {}), '(self.k_proj_base.weight)\n', (10392, 10417), False, 'from torch import Tensor, nn\n'), ((10434, 10482), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.q_proj_base.weight'], {}), '(self.q_proj_base.weight)\n', (10457, 10482), False, 'from torch import Tensor, nn\n'), ((13619, 13663), 'torch.arange', 'torch.arange', (['(0.0)', 'seq_length'], {'device': 'device'}), '(0.0, seq_length, device=device)\n', (13631, 13663), False, 'import torch\n'), ((25510, 25538), 'torch.zeros_like', 'torch.zeros_like', (['gauss_attn'], {}), '(gauss_attn)\n', (25526, 25538), False, 'import torch\n'), ((1704, 1724), 'torch.max', 'torch.max', (['tokens', '(2)'], {}), '(tokens, 2)\n', (1713, 1724), False, 'import torch\n'), ((8909, 8938), 'torch.Tensor', 'torch.Tensor', (['(1)', '(1)', 'embed_dim'], {}), '(1, 1, embed_dim)\n', (8921, 8938), False, 'import torch\n'), ((9027, 9056), 'torch.Tensor', 'torch.Tensor', (['(1)', '(1)', 'embed_dim'], {}), '(1, 1, embed_dim)\n', (9039, 9056), False, 'import torch\n'), ((11788, 11810), 'math.sqrt', 'math.sqrt', (['(2 * math.pi)'], {}), '(2 * math.pi)\n', (11797, 11810), False, 'import math\n'), ((13367, 13389), 'math.sqrt', 'math.sqrt', (['(2 * math.pi)'], {}), '(2 * math.pi)\n', (13376, 13389), False, 'import math\n'), ((21971, 22012), 'torch.cat', 'torch.cat', (['[prev_key_base, k_base]'], {'dim': '(1)'}), '([prev_key_base, k_base], dim=1)\n', (21980, 22012), False, 'import torch\n'), ((22513, 22556), 'torch.cat', 'torch.cat', (['[prev_key_gauss, k_gauss]'], {'dim': '(1)'}), '([prev_key_gauss, k_gauss], dim=1)\n', (22522, 22556), False, 'import torch\n'), ((23007, 23040), 'torch.cat', 'torch.cat', (['[prev_value, v]'], {'dim': '(1)'}), '([prev_value, v], dim=1)\n', (23016, 23040), False, 'import torch\n'), ((1876, 1897), 'torch.mean', 'torch.mean', (['tokens', '(2)'], {}), '(tokens, 2)\n', (1886, 1897), False, 'import torch\n'), ((10110, 10122), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (10119, 10122), False, 'import math\n'), ((9661, 9673), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (9670, 9673), False, 'import math\n'), ((9771, 9783), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (9780, 9783), False, 'import math\n'), ((9922, 9934), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (9931, 9934), False, 'import math\n'), ((10031, 10043), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (10040, 10043), False, 'import math\n')] |
# -*- coding: utf-8 -*-
# Fonte https://realpython.com/k-means-clustering-python/
# Clustering is a set of techniques used to partition data into groups, or clusters. Clusters are loosely defined as groups of data objects that are more similar to other objects in their cluster than they are to data objects in other clusters.
# Partitional clustering
# divides data objects into nonoverlapping groups. In other words, no object can be a member of more than one cluster, and every cluster must have at least one object.Two examples of partitional clustering algorithms are k-means and k-medoids.
# Hierarchical clustering
# determines cluster assignments by building a hierarchy. This is implemented by either a bottom-up or a top-down approach
# Density-based clustering
# determines cluster assignments based on the density of data points in a region. Clusters are assigned where there are high densities of data points separated by low-density regions.
# Conventional k-means requires only a few steps. The first step is to randomly select k centroids, where k is equal to the number of clusters you choose. Centroids are data points representing the center of a cluster.
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from sklearn.preprocessing import StandardScaler
# Generate the synthetic data and labels:
features, true_labels = make_blobs(n_samples=200,centers=3,cluster_std=2.75,random_state=42)
# you’ll use the StandardScaler class. This class implements a type of feature scaling called standardization. Standardization scales, or shifts, the values for each numerical feature in your dataset so that the features have a mean of 0 and standard deviation of 1:
scaler = StandardScaler()
scaled_features = scaler.fit_transform(features)
kmeans = KMeans(
init="random",
n_clusters=3,
n_init=10,
max_iter=300,
random_state=42
)
kmeans.fit(scaled_features)
# The lowest SSE value
kmeans.inertia_
# Final locations of the centroid
kmeans.cluster_centers_
# The number of iterations required to converge
kmeans.n_iter_
kmeans.labels_[:5]
# Choosing the Appropriate Number of Clusters
kmeans_kwargs = {
"init": "random",
"n_init": 10,
"max_iter": 300,
"random_state": 42,
}
# A list holds the SSE values for each k
sse = []
for k in range(1, 11):
kmeans = KMeans(n_clusters=k, **kmeans_kwargs)
kmeans.fit(scaled_features)
sse.append(kmeans.inertia_)
#the best k is elbow point of curve
plt.style.use("fivethirtyeight")
plt.plot(range(1, 11), sse)
plt.xticks(range(1, 11))
plt.xlabel("Number of Clusters")
plt.ylabel("SSE")
plt.show()
# When you plot SSE as a function of the number of clusters, notice that SSE continues to decrease as you increase k. As more centroids are added, the distance from each point to its closest centroid will decrease.
# There’s a sweet spot where the SSE curve starts to bend known as the elbow point. The x-value of this point is thought to be a reasonable trade-off between error and number of clusters. In this example, the elbow is located at x=3:
plt.style.use("fivethirtyeight")
plt.plot(range(1, 11), sse)
plt.xticks(range(1, 11))
plt.xlabel("Number of Clusters")
plt.ylabel("SSE")
plt.show()
# Determining the elbow point in the SSE curve isn’t always straightforward. If you’re having trouble choosing the elbow point of the curve, then you could use a Python package, kneed, to identify the elbow point programmatically:
kl = KneeLocator(
range(1, 11), sse, curve="convex", direction="decreasing"
)
kl.elbow
# The silhouette coefficient is a measure of cluster cohesion and separation. It quantifies how well a data point fits into its assigned cluster based on two factors:
# How close the data point is to other points in the cluster
# How far away the data point is from points in other clusters
# Silhouette coefficient values range between -1 and 1. Larger numbers indicate that samples are closer to their clusters than they are to other clusters.
# In the scikit-learn implementation of the silhouette coefficient, the average silhouette coefficient of all the samples is summarized into one score. The silhouette score() function needs a minimum of two clusters, or it will raise an exception.
# Loop through values of k again. This time, instead of computing SSE, compute the silhouette coefficient:
# A list holds the silhouette coefficients for each k
silhouette_coefficients = []
# Notice you start at 2 clusters for silhouette coefficient
for k in range(2, 11):
kmeans = KMeans(n_clusters=k, **kmeans_kwargs)
kmeans.fit(scaled_features)
score = silhouette_score(scaled_features, kmeans.labels_)
silhouette_coefficients.append(score)
# Plotting the average silhouette scores for each k shows that the best choice for k is 3 since it has the maximum score:
plt.style.use("fivethirtyeight")
plt.plot(range(2, 11), silhouette_coefficients)
plt.xticks(range(2, 11))
plt.xlabel("Number of Clusters")
plt.ylabel("Silhouette Coefficient")
plt.show()
#Evaluating Clustering Performance Using Advanced Techniques
from sklearn.cluster import DBSCAN
from sklearn.datasets import make_moons
from sklearn.metrics import adjusted_rand_score
features, true_labels = make_moons(
n_samples=250, noise=0.05, random_state=42
)
scaled_features = scaler.fit_transform(features)
# Instantiate k-means and dbscan algorithms
kmeans = KMeans(n_clusters=2)
dbscan = DBSCAN(eps=0.3)
# Fit the algorithms to the features
kmeans.fit(scaled_features)
dbscan.fit(scaled_features)
# Compute the silhouette scores for each algorithm
kmeans_silhouette = silhouette_score(
scaled_features, kmeans.labels_
).round(2)
dbscan_silhouette = silhouette_score(
scaled_features, dbscan.labels_
).round (2)
# Print the silhouette coefficient for each of the two algorithms and compare them. A higher silhouette coefficient suggests better clusters, which is misleading in this scenario:
kmeans_silhouette
dbscan_silhouette
# Compare the clustering results of DBSCAN and k-means using ARI as the performance metric:
ari_kmeans = adjusted_rand_score(true_labels, kmeans.labels_)
ari_dbscan = adjusted_rand_score(true_labels, dbscan.labels_)
round(ari_kmeans, 2)
round(ari_dbscan, 2)
| [
"sklearn.cluster.KMeans",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"sklearn.datasets.make_blobs",
"sklearn.metrics.adjusted_rand_score",
"matplotlib.pyplot.style.use",
"sklearn.preprocessing.StandardScaler",
"sklearn.datasets.make_moons",
"sklearn.metrics.silhouette_score",
"sklearn... | [((1483, 1554), 'sklearn.datasets.make_blobs', 'make_blobs', ([], {'n_samples': '(200)', 'centers': '(3)', 'cluster_std': '(2.75)', 'random_state': '(42)'}), '(n_samples=200, centers=3, cluster_std=2.75, random_state=42)\n', (1493, 1554), False, 'from sklearn.datasets import make_blobs\n'), ((1836, 1852), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1850, 1852), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1915, 1992), 'sklearn.cluster.KMeans', 'KMeans', ([], {'init': '"""random"""', 'n_clusters': '(3)', 'n_init': '(10)', 'max_iter': '(300)', 'random_state': '(42)'}), "(init='random', n_clusters=3, n_init=10, max_iter=300, random_state=42)\n", (1921, 1992), False, 'from sklearn.cluster import KMeans\n'), ((2652, 2684), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""fivethirtyeight"""'], {}), "('fivethirtyeight')\n", (2665, 2684), True, 'import matplotlib.pyplot as plt\n'), ((2741, 2773), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Clusters"""'], {}), "('Number of Clusters')\n", (2751, 2773), True, 'import matplotlib.pyplot as plt\n'), ((2775, 2792), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""SSE"""'], {}), "('SSE')\n", (2785, 2792), True, 'import matplotlib.pyplot as plt\n'), ((2794, 2804), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2802, 2804), True, 'import matplotlib.pyplot as plt\n'), ((3264, 3296), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""fivethirtyeight"""'], {}), "('fivethirtyeight')\n", (3277, 3296), True, 'import matplotlib.pyplot as plt\n'), ((3353, 3385), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Clusters"""'], {}), "('Number of Clusters')\n", (3363, 3385), True, 'import matplotlib.pyplot as plt\n'), ((3387, 3404), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""SSE"""'], {}), "('SSE')\n", (3397, 3404), True, 'import matplotlib.pyplot as plt\n'), ((3406, 3416), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3414, 3416), True, 'import matplotlib.pyplot as plt\n'), ((5067, 5099), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""fivethirtyeight"""'], {}), "('fivethirtyeight')\n", (5080, 5099), True, 'import matplotlib.pyplot as plt\n'), ((5176, 5208), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Clusters"""'], {}), "('Number of Clusters')\n", (5186, 5208), True, 'import matplotlib.pyplot as plt\n'), ((5210, 5246), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Silhouette Coefficient"""'], {}), "('Silhouette Coefficient')\n", (5220, 5246), True, 'import matplotlib.pyplot as plt\n'), ((5248, 5258), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5256, 5258), True, 'import matplotlib.pyplot as plt\n'), ((5486, 5540), 'sklearn.datasets.make_moons', 'make_moons', ([], {'n_samples': '(250)', 'noise': '(0.05)', 'random_state': '(42)'}), '(n_samples=250, noise=0.05, random_state=42)\n', (5496, 5540), False, 'from sklearn.datasets import make_moons\n'), ((5656, 5676), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(2)'}), '(n_clusters=2)\n', (5662, 5676), False, 'from sklearn.cluster import KMeans\n'), ((5687, 5702), 'sklearn.cluster.DBSCAN', 'DBSCAN', ([], {'eps': '(0.3)'}), '(eps=0.3)\n', (5693, 5702), False, 'from sklearn.cluster import DBSCAN\n'), ((6367, 6415), 'sklearn.metrics.adjusted_rand_score', 'adjusted_rand_score', (['true_labels', 'kmeans.labels_'], {}), '(true_labels, kmeans.labels_)\n', (6386, 6415), False, 'from sklearn.metrics import adjusted_rand_score\n'), ((6430, 6478), 'sklearn.metrics.adjusted_rand_score', 'adjusted_rand_score', (['true_labels', 'dbscan.labels_'], {}), '(true_labels, dbscan.labels_)\n', (6449, 6478), False, 'from sklearn.metrics import adjusted_rand_score\n'), ((2500, 2537), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'k'}), '(n_clusters=k, **kmeans_kwargs)\n', (2506, 2537), False, 'from sklearn.cluster import KMeans\n'), ((4751, 4788), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'k'}), '(n_clusters=k, **kmeans_kwargs)\n', (4757, 4788), False, 'from sklearn.cluster import KMeans\n'), ((4835, 4884), 'sklearn.metrics.silhouette_score', 'silhouette_score', (['scaled_features', 'kmeans.labels_'], {}), '(scaled_features, kmeans.labels_)\n', (4851, 4884), False, 'from sklearn.metrics import silhouette_score\n'), ((5876, 5925), 'sklearn.metrics.silhouette_score', 'silhouette_score', (['scaled_features', 'kmeans.labels_'], {}), '(scaled_features, kmeans.labels_)\n', (5892, 5925), False, 'from sklearn.metrics import silhouette_score\n'), ((5964, 6013), 'sklearn.metrics.silhouette_score', 'silhouette_score', (['scaled_features', 'dbscan.labels_'], {}), '(scaled_features, dbscan.labels_)\n', (5980, 6013), False, 'from sklearn.metrics import silhouette_score\n')] |
"""
Unit test script for pyeto.thornthwaite.py
"""
import unittest
import pyeto
class TestThornthwaite(unittest.TestCase):
def test_monthly_mean_daylight_hours(self):
# Test against values for latitude 20 deg N from Bautista et al (2009)
# Calibration of the equations of Hargreaves and Thornthwaite to
# estimate the potential evapotranspiration in semi-arid and subhumid
# tropical climates for regional applications. Atmosfera 22(4), 331-
# 348.
test_mmdlh = [
10.9, # Jan
11.3, # Feb
11.9, # Mar
12.5, # Apr
12.9, # May
13.2, # Jun
13.1, # Jul
12.7, # Aug
12.1, # Sep
11.5, # Oct
11.0, # Nov
10.8, # Dec
]
mmdlh = pyeto.monthly_mean_daylight_hours(pyeto.deg2rad(20.0))
# Values were only quoted to 1 decimal place so check they are accurate
# to within 12 minutes (0.2 hours)
for m in range(12):
self.assertAlmostEqual(mmdlh[m], test_mmdlh[m], delta=0.15)
# Test against values for latitude 46 deg N from Mimikou M. and
# Baltas E., Technical hydrology, Second edition, NTUA, 2002.
# cited in PAPADOPOULOU E., VARANOU E., BALTAS E., DASSAKLIS A., and
# MIMIKOU M. (2003) ESTIMATING POTENTIAL EVAPOTRANSPIRATION AND ITS
# SPATIAL DISTRIBUTION IN GREECE USING EMPIRICAL METHODS.
test_mmdlh = [
8.9, # Jan
10.1, # Feb
11.6, # Mar
13.3, # Apr
14.7, # May
15.5, # Jun
15.2, # Jul
13.9, # Aug
12.3, # Sep
10.7, # Oct
9.2, # Nov
8.5, # Dec
]
mmdlh = pyeto.monthly_mean_daylight_hours(pyeto.deg2rad(46.0))
# Values were only quoted to 1 decimal place so check they are accurate
# to within 12 minutes (0.2 hours)
for m in range(12):
self.assertAlmostEqual(mmdlh[m], test_mmdlh[m], delta=0.15)
# Test against values obtained for Los Angles, California,
# latitude 34 deg 05' N, from
# http://aa.usno.navy.mil/data/docs/Dur_OneYear.php
latitude = pyeto.deg2rad(34.0833333)
la_mmdlh = [
10.182, # Jan
10.973, # Feb
11.985, # Mar
13.046, # Apr
13.940, # May
14.388, # Jun
14.163, # Jul
13.404, # Aug
12.374, # Sep
11.320, # Oct
10.401, # Nov
9.928, # Dec
]
mmdlh = pyeto.monthly_mean_daylight_hours(latitude)
# Check that the 2 methods are almost the same (within 15 minutes)
for m in range(12):
self.assertAlmostEqual(mmdlh[m], la_mmdlh[m], delta=0.25)
# Test with year set to a non-leap year
non_leap = pyeto.monthly_mean_daylight_hours(latitude, 2015)
for m in range(12):
self.assertEqual(mmdlh[m], non_leap[m])
# Test with year set to a leap year
leap = pyeto.monthly_mean_daylight_hours(latitude, 2016)
for m in range(12):
if m == 0:
self.assertEqual(leap[m], non_leap[m])
elif m == 1: # Feb
# Because Feb extends further into year in a leap year it
# should have a slightly longer mean day length in northern
# hemisphere
self.assertGreater(leap[m], non_leap[m])
else:
# All months after Feb in a lieap year will be composed of
# diffent Julian days (days of the year) compared to a
# non-leap year so will have different mean daylengths.
self.assertNotEqual(leap[m], non_leap[m])
# Test with bad latitude
with self.assertRaises(ValueError):
_ = pyeto.monthly_mean_daylight_hours(
pyeto.deg2rad(90.01))
with self.assertRaises(ValueError):
_ = pyeto.monthly_mean_daylight_hours(
pyeto.deg2rad(-90.01))
# Test limits of latitude
_ = pyeto.monthly_mean_daylight_hours(
pyeto.deg2rad(90.0))
_ = pyeto.monthly_mean_daylight_hours(
pyeto.deg2rad(-90.0))
def test_thornthwaite(self):
# Test values obtained from a worked example in Hydrology: An
# Environmental Approach, pp 435-436 by <NAME>.
test_monthly_t = [
2.1, 2.5, 4.8, 7.1, 8.3, 10.7, 13.4, 14.5, 11.1, 8.2, 5.4, 3.7]
test_monthly_mean_dlh = [
9.4, 10.6, 11.9, 13.4, 14.6, 15.2, 14.9, 13.9, 12.6, 11.1, 9.8, 9.1]
test_pet = [
10.67, 14.08, 28.49, 45.85, 57.47, 75.20, 89.91, 90.29, 64.26,
43.34, 26.24, 17.31]
# NOTE: The test PET was calculated using rounded coefficients, rounded
# intermediate values and doesn't adjust for the number of days in
# the month. This results in a small difference in estimated monthly
# PET of up to +/- 4 mm.
pet = pyeto.thornthwaite(test_monthly_t, test_monthly_mean_dlh)
for m in range(12):
diff = abs(pet[m] - test_pet[m])
self.assertLess(diff, 4)
# Test with non-leap year
pet_non_leap = pyeto.thornthwaite(
test_monthly_t, test_monthly_mean_dlh, year=2015)
# Test results are same as above when year argument is set
for m in range(12):
self.assertEqual(pet[m], pet_non_leap[m])
# Test with leap year
pet_leap = pyeto.thornthwaite(
test_monthly_t, test_monthly_mean_dlh, year=2016)
for m in range(12):
# 29 days in Feb so PET should be higher than in non-leap year
# results
if m == 1: # Feb
self.assertGreater(pet_leap[m], pet_non_leap[m])
else:
self.assertEqual(pet_leap[m], pet_non_leap[m])
# Test with wrong length args
with self.assertRaises(ValueError):
_ = pyeto.thornthwaite(list(range(11)), test_monthly_mean_dlh)
with self.assertRaises(ValueError):
_ = pyeto.thornthwaite(list(range(13)), test_monthly_mean_dlh)
with self.assertRaises(ValueError):
_ = pyeto.thornthwaite(test_monthly_t, list(range(11)))
with self.assertRaises(ValueError):
_ = pyeto.thornthwaite(test_monthly_t, list(range(13)))
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"pyeto.thornthwaite",
"pyeto.deg2rad",
"pyeto.monthly_mean_daylight_hours"
] | [((6601, 6616), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6614, 6616), False, 'import unittest\n'), ((2299, 2324), 'pyeto.deg2rad', 'pyeto.deg2rad', (['(34.0833333)'], {}), '(34.0833333)\n', (2312, 2324), False, 'import pyeto\n'), ((2697, 2740), 'pyeto.monthly_mean_daylight_hours', 'pyeto.monthly_mean_daylight_hours', (['latitude'], {}), '(latitude)\n', (2730, 2740), False, 'import pyeto\n'), ((2983, 3032), 'pyeto.monthly_mean_daylight_hours', 'pyeto.monthly_mean_daylight_hours', (['latitude', '(2015)'], {}), '(latitude, 2015)\n', (3016, 3032), False, 'import pyeto\n'), ((3173, 3222), 'pyeto.monthly_mean_daylight_hours', 'pyeto.monthly_mean_daylight_hours', (['latitude', '(2016)'], {}), '(latitude, 2016)\n', (3206, 3222), False, 'import pyeto\n'), ((5177, 5234), 'pyeto.thornthwaite', 'pyeto.thornthwaite', (['test_monthly_t', 'test_monthly_mean_dlh'], {}), '(test_monthly_t, test_monthly_mean_dlh)\n', (5195, 5234), False, 'import pyeto\n'), ((5403, 5471), 'pyeto.thornthwaite', 'pyeto.thornthwaite', (['test_monthly_t', 'test_monthly_mean_dlh'], {'year': '(2015)'}), '(test_monthly_t, test_monthly_mean_dlh, year=2015)\n', (5421, 5471), False, 'import pyeto\n'), ((5684, 5752), 'pyeto.thornthwaite', 'pyeto.thornthwaite', (['test_monthly_t', 'test_monthly_mean_dlh'], {'year': '(2016)'}), '(test_monthly_t, test_monthly_mean_dlh, year=2016)\n', (5702, 5752), False, 'import pyeto\n'), ((881, 900), 'pyeto.deg2rad', 'pyeto.deg2rad', (['(20.0)'], {}), '(20.0)\n', (894, 900), False, 'import pyeto\n'), ((1870, 1889), 'pyeto.deg2rad', 'pyeto.deg2rad', (['(46.0)'], {}), '(46.0)\n', (1883, 1889), False, 'import pyeto\n'), ((4287, 4306), 'pyeto.deg2rad', 'pyeto.deg2rad', (['(90.0)'], {}), '(90.0)\n', (4300, 4306), False, 'import pyeto\n'), ((4368, 4388), 'pyeto.deg2rad', 'pyeto.deg2rad', (['(-90.0)'], {}), '(-90.0)\n', (4381, 4388), False, 'import pyeto\n'), ((4036, 4056), 'pyeto.deg2rad', 'pyeto.deg2rad', (['(90.01)'], {}), '(90.01)\n', (4049, 4056), False, 'import pyeto\n'), ((4170, 4191), 'pyeto.deg2rad', 'pyeto.deg2rad', (['(-90.01)'], {}), '(-90.01)\n', (4183, 4191), False, 'import pyeto\n')] |
import cv2
from ml.facial_expression_classification import predict_facial_expression_by_array, IMAGE_WIDTH, IMAGE_HEIGHT
from video.camera import Camera
OPENCV_HAARCASCADE_FRONTALFACE_FILE = 'trained_models/opencv/haarcascades/haarcascade_frontalface_alt.xml'
class EmotionDetectionCamera(Camera):
def __init__(self):
self.face_cascade = cv2.CascadeClassifier(OPENCV_HAARCASCADE_FRONTALFACE_FILE)
self.font = cv2.FONT_HERSHEY_SIMPLEX
super().__init__()
def get_frame(self):
_, frame = self.video.read()
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame_gray = cv2.equalizeHist(frame_gray)
faces = self.face_cascade.detectMultiScale(frame_gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3)
face_roi = frame[y:y + h, x:x + w]
face_roi = cv2.resize(face_roi, (IMAGE_WIDTH, IMAGE_HEIGHT))
result = predict_facial_expression_by_array(face_roi)
cv2.rectangle(frame, (x, y - 40), (x + w, y), (0, 255, 0), -1)
cv2.putText(frame, result, (x + 10, y - 10), self.font, 0.7, (0, 0, 0), 2)
_, jpeg = cv2.imencode('.jpg', frame)
return jpeg.tobytes()
| [
"cv2.rectangle",
"cv2.imencode",
"ml.facial_expression_classification.predict_facial_expression_by_array",
"cv2.putText",
"cv2.equalizeHist",
"cv2.cvtColor",
"cv2.CascadeClassifier",
"cv2.resize"
] | [((354, 412), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['OPENCV_HAARCASCADE_FRONTALFACE_FILE'], {}), '(OPENCV_HAARCASCADE_FRONTALFACE_FILE)\n', (375, 412), False, 'import cv2\n'), ((569, 608), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (581, 608), False, 'import cv2\n'), ((630, 658), 'cv2.equalizeHist', 'cv2.equalizeHist', (['frame_gray'], {}), '(frame_gray)\n', (646, 658), False, 'import cv2\n'), ((1208, 1235), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'frame'], {}), "('.jpg', frame)\n", (1220, 1235), False, 'import cv2\n'), ((778, 838), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x, y)', '(x + w, y + h)', '(0, 255, 0)', '(3)'], {}), '(frame, (x, y), (x + w, y + h), (0, 255, 0), 3)\n', (791, 838), False, 'import cv2\n'), ((910, 959), 'cv2.resize', 'cv2.resize', (['face_roi', '(IMAGE_WIDTH, IMAGE_HEIGHT)'], {}), '(face_roi, (IMAGE_WIDTH, IMAGE_HEIGHT))\n', (920, 959), False, 'import cv2\n'), ((981, 1025), 'ml.facial_expression_classification.predict_facial_expression_by_array', 'predict_facial_expression_by_array', (['face_roi'], {}), '(face_roi)\n', (1015, 1025), False, 'from ml.facial_expression_classification import predict_facial_expression_by_array, IMAGE_WIDTH, IMAGE_HEIGHT\n'), ((1039, 1101), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x, y - 40)', '(x + w, y)', '(0, 255, 0)', '(-1)'], {}), '(frame, (x, y - 40), (x + w, y), (0, 255, 0), -1)\n', (1052, 1101), False, 'import cv2\n'), ((1114, 1188), 'cv2.putText', 'cv2.putText', (['frame', 'result', '(x + 10, y - 10)', 'self.font', '(0.7)', '(0, 0, 0)', '(2)'], {}), '(frame, result, (x + 10, y - 10), self.font, 0.7, (0, 0, 0), 2)\n', (1125, 1188), False, 'import cv2\n')] |
import unittest
from vb2py.test_at_scale import file_tester
class Test_heinsega(file_tester.FileTester):
def test0(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/Module1.bas')
def test1(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/start.frm')
def test2(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/ShutDownWin.frm')
def test3(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/password_win.frm')
def test4(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/OX_CookiesCtrl.bas')
def test5(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/Parsing.bas')
def test6(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/BrowserW.frm')
def test7(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/OX_manifest.bas')
def test8(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/Declare_Function.bas')
def test9(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/OX_function.bas')
def test10(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/OX_FileSystem.bas')
def test11(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/Transcoding.bas')
def test12(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/History_Logs.frm')
def test13(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/script_from.frm')
def test14(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/CMDresult.bas')
def test15(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/variable.bas')
def test16(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/OX_MouseWheel.bas')
def test17(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/OX_Finish_Download.frm')
def test18(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/Ctrl8dot3name.frm')
def test19(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/ComDialog.frm')
def test20(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/sys.frm')
def test21(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/OX_163_Module.bas')
def test22(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/OX163_mainfrm.frm')
if __name__ == '__main__':
unittest.main()
| [
"unittest.main"
] | [((3205, 3220), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3218, 3220), False, 'import unittest\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 17 10:23:50 2019
@author: chiara
"""
import os
import numpy as np # scientific calculation
import pandas as pd # data analysis
import itertools
import warnings
from statsmodels.tsa.arima_model import ARMA
ts1=list(range(0,500,2))
len(ts1)
model=ARMA(ts1,order=(0,1))
#model.information()
fit=model.fit(disp=5)
fit.summary()
# ARMA Model Results
#==============================================================================
#Dep. Variable: y No. Observations: 250
#Model: ARMA(0, 1) Log Likelihood -1428.744
#Method: css-mle S.D. of innovations 72.604
#Date: Thu, 17 Oct 2019 AIC 2863.489
#Time: 10:57:35 BIC 2874.053
#Sample: 0 HQIC 2867.740
#
#==============================================================================
# coef std err z P>|z| [0.025 0.975]
#------------------------------------------------------------------------------
#const 249.0083 9.165 27.169 0.000 231.045 266.972
#ma.L1.y 0.9999 0.010 101.243 0.000 0.981 1.019
# Roots
#=============================================================================
# Real Imaginary Modulus Frequency
#-----------------------------------------------------------------------------
#MA.1 -1.0001 +0.0000j 1.0001 0.5000
#-----------------------------------------------------------------------------
# o) P>\z\ is the p-val
# o) AIC (Akaike Information Criterion) value measures how well a model fits
# the data while taking into account the overall complexity of the model.
# A model that fits the data very well while using lots of features will be
# assigned a larger AIC score than a model that uses fewer features to achieve
# the same goodness-of-fit. Therefore, we are interested in finding the model
# that yields the lowest AIC value.
pred=fit.predict(len(ts1),len(ts1)) #374.49
pred
from statsmodels.tsa.vector_ar.var_model import VAR
#from statsmodels.tsa.statespace.varmax import VARMAX
ts2=list(range(500,1000,2))
ts=pd.DataFrame({"ts1":ts1,"ts2":ts2})
model=VAR(ts) #,order=(0,1)
#model.information()
fit=model.fit()
fit.summary()
# Summary of Regression Results
#==================================
#Model: VAR
#Method: OLS
#Date: Thu, 17, Oct, 2019
#Time: 16:00:22
#--------------------------------------------------------------------
#No. of Equations: 2.00000 BIC: -116.125
#Nobs: 249.000 HQIC: -116.175
#Log likelihood: 13767.4 FPE: 3.39553e-51
#AIC: -116.209 Det(Omega_mle): 3.31516e-51
#--------------------------------------------------------------------
#Results for equation ts1
#=========================================================================
# coefficient std. error t-stat prob
#-------------------------------------------------------------------------
#const -0.001984 NAN NAN NAN
#L1.ts1 0.995996 NAN NAN NAN
#L1.ts2 0.004004 NAN NAN NAN
#=========================================================================
#
#Results for equation ts2
#=========================================================================
# coefficient std. error t-stat prob
#-------------------------------------------------------------------------
#const 0.002016 NAN NAN NAN
#L1.ts1 -0.003996 NAN NAN NAN
#L1.ts2 1.003996 NAN NAN NAN
#=========================================================================
#
#Correlation matrix of residuals
# ts1 ts2
#ts1 1.000000 0.951165
#ts2 0.951165 1.000000
pred=fit.forecast(fit.y,steps=1) #array([[ 500., 1000.]])
pred
pred=fit.forecast(fit.y,steps=3)
pred #array([[ 500., 1000.],
# [ 502., 1002.],
# [ 504., 1004.]])
##################################### SARIMAX
from statsmodels.tsa.statespace.sarimax import SARIMAX
# Create parameters
# Define the p, d and q parameters to take any value between 0 and 2
p = d = q = range(0, 2)
# Generate all different combinations of p, q and q triplets
pdq = list(itertools.product(p, d, q))
# Generate all different combinations of seasonal p, q and q triplets
seasonal_pdq = [(x[0], x[1], x[2], 52) for x in pdq]#list(itertools.product(p, d, q))
warnings.filterwarnings("ignore") # specify to ignore warning messages
param=pdq[0]
param_seasonal=seasonal_pdq[0]
for param in pdq:
for param_seasonal in seasonal_pdq:
try:
mod = SARIMAX(ts1, order=param,seasonal_order=param_seasonal,
enforce_stationarity=False,
enforce_invertibility=False)
results = mod.fit()
print('ARIMA{}x{}12 - AIC:{}'.format(param, param_seasonal, results.aic))
except:
continue
#ARIMA(0, 0, 0)x(0, 0, 0, 52)12 - AIC:3529.4532640333523
#ARIMA(0, 0, 0)x(0, 0, 1, 52)12 - AIC:8524.710121490572
#ARIMA(0, 0, 0)x(0, 1, 0, 52)12 - AIC:2390.951838473629
#ARIMA(0, 0, 0)x(0, 1, 1, 52)12 - AIC:6109.756521634717
#ARIMA(0, 0, 0)x(1, 0, 0, 52)12 - AIC:2132.090287303192
#ARIMA(0, 0, 0)x(1, 0, 1, 52)12 - AIC:2034.1091306333342
#ARIMA(0, 0, 0)x(1, 1, 0, 52)12 - AIC:-3089.4441840755426
#ARIMA(0, 0, 0)x(1, 1, 1, 52)12 - AIC:nan
#ARIMA(0, 0, 1)x(0, 0, 0, 52)12 - AIC:8827.74964853632
#ARIMA(0, 0, 1)x(0, 0, 1, 52)12 - AIC:nan
#ARIMA(0, 0, 1)x(0, 1, 0, 52)12 - AIC:8529.012165403003
#ARIMA(0, 0, 1)x(0, 1, 1, 52)12 - AIC:16764.04877539664
#ARIMA(0, 0, 1)x(1, 0, 0, 52)12 - AIC:9566.733370582071
#ARIMA(0, 0, 1)x(1, 0, 1, 52)12 - AIC:8295.369705647365
#ARIMA(0, 0, 1)x(1, 1, 0, 52)12 - AIC:6356.26416402472
#ARIMA(0, 0, 1)x(1, 1, 1, 52)12 - AIC:6271.2742439695485
#ARIMA(0, 1, 0)x(0, 0, 0, 52)12 - AIC:1049.5945140272559
#ARIMA(0, 1, 0)x(0, 0, 1, 52)12 - AIC:9789.103372012913
#ARIMA(0, 1, 0)x(0, 1, 0, 52)12 - AIC:nan
#ARIMA(0, 1, 0)x(0, 1, 1, 52)12 - AIC:nan
#ARIMA(0, 1, 0)x(1, 0, 0, 52)12 - AIC:-4170.033637108996
#ARIMA(0, 1, 0)x(1, 0, 1, 52)12 - AIC:-4153.431343153703
#ARIMA(0, 1, 0)x(1, 1, 0, 52)12 - AIC:-3013.1187268516032
#ARIMA(0, 1, 0)x(1, 1, 1, 52)12 - AIC:-3202.583612185782
#ARIMA(0, 1, 1)x(0, 0, 0, 52)12 - AIC:10707.71402921827
#ARIMA(0, 1, 1)x(0, 0, 1, 52)12 - AIC:20986.03629024016 worst
#ARIMA(0, 1, 1)x(0, 1, 0, 52)12 - AIC:nan
#ARIMA(0, 1, 1)x(0, 1, 1, 52)12 - AIC:nan
#ARIMA(0, 1, 1)x(1, 0, 0, 52)12 - AIC:8542.970298607246
#ARIMA(0, 1, 1)x(1, 0, 1, 52)12 - AIC:8458.300549382868
#ARIMA(0, 1, 1)x(1, 1, 0, 52)12 - AIC:-3011.1187268516032
#ARIMA(0, 1, 1)x(1, 1, 1, 52)12 - AIC:-3018.8321417660136
#ARIMA(1, 0, 0)x(0, 0, 0, 52)12 - AIC:712.1298895449919
#ARIMA(1, 0, 0)x(0, 0, 1, 52)12 - AIC:10620.112972204352
#ARIMA(1, 0, 0)x(0, 1, 0, 52)12 - AIC:nan
#ARIMA(1, 0, 0)x(0, 1, 1, 52)12 - AIC:6111.756521634712
#ARIMA(1, 0, 0)x(1, 0, 0, 52)12 - AIC:-2365.892284196455
#ARIMA(1, 0, 0)x(1, 0, 1, 52)12 - AIC:-1950.972772140532
#ARIMA(1, 0, 0)x(1, 1, 0, 52)12 - AIC:nan
#ARIMA(1, 0, 0)x(1, 1, 1, 52)12 - AIC:nan
#ARIMA(1, 0, 1)x(0, 0, 0, 52)12 - AIC:372.5044628282068
#ARIMA(1, 0, 1)x(0, 0, 1, 52)12 - AIC:9083.281510795705
#ARIMA(1, 0, 1)x(0, 1, 0, 52)12 - AIC:nan
#ARIMA(1, 0, 1)x(0, 1, 1, 52)12 - AIC:6071.64785596824
#ARIMA(1, 0, 1)x(1, 0, 0, 52)12 - AIC:-2089.2449870039572
#ARIMA(1, 0, 1)x(1, 0, 1, 52)12 - AIC:-1929.925530884988
#ARIMA(1, 0, 1)x(1, 1, 0, 52)12 - AIC:nan
#ARIMA(1, 0, 1)x(1, 1, 1, 52)12 - AIC:nan
#ARIMA(1, 1, 0)x(0, 0, 0, 52)12 - AIC:-5251.66293223826
#ARIMA(1, 1, 0)x(0, 0, 1, 52)12 - AIC:8233.103162467083
#ARIMA(1, 1, 0)x(0, 1, 0, 52)12 - AIC:nan
#ARIMA(1, 1, 0)x(0, 1, 1, 52)12 - AIC:-3202.583612185782
#ARIMA(1, 1, 0)x(1, 0, 0, 52)12 - AIC:-4146.842877252098
#ARIMA(1, 1, 0)x(1, 0, 1, 52)12 - AIC:-5916.636927368082 <====== *
#ARIMA(1, 1, 0)x(1, 1, 0, 52)12 - AIC:-3202.583612185782
#ARIMA(1, 1, 0)x(1, 1, 1, 52)12 - AIC:-3200.583612185782
#ARIMA(1, 1, 1)x(0, 0, 0, 52)12 - AIC:-5242.946995244625
#ARIMA(1, 1, 1)x(0, 0, 1, 52)12 - AIC:8193.128146332323
#ARIMA(1, 1, 1)x(0, 1, 0, 52)12 - AIC:nan
#ARIMA(1, 1, 1)x(0, 1, 1, 52)12 - AIC:-3018.8321417660136
#ARIMA(1, 1, 1)x(1, 0, 0, 52)12 - AIC:-4902.063264828318
#ARIMA(1, 1, 1)x(1, 0, 1, 52)12 - AIC:-5051.314673560011
#ARIMA(1, 1, 1)x(1, 1, 0, 52)12 - AIC:-3200.583612185782
#ARIMA(1, 1, 1)x(1, 1, 1, 52)12 - AIC:-3016.8321417660136
| [
"itertools.product",
"statsmodels.tsa.statespace.sarimax.SARIMAX",
"statsmodels.tsa.arima_model.ARMA",
"pandas.DataFrame",
"warnings.filterwarnings",
"statsmodels.tsa.vector_ar.var_model.VAR"
] | [((322, 345), 'statsmodels.tsa.arima_model.ARMA', 'ARMA', (['ts1'], {'order': '(0, 1)'}), '(ts1, order=(0, 1))\n', (326, 345), False, 'from statsmodels.tsa.arima_model import ARMA\n'), ((2634, 2672), 'pandas.DataFrame', 'pd.DataFrame', (["{'ts1': ts1, 'ts2': ts2}"], {}), "({'ts1': ts1, 'ts2': ts2})\n", (2646, 2672), True, 'import pandas as pd\n'), ((2677, 2684), 'statsmodels.tsa.vector_ar.var_model.VAR', 'VAR', (['ts'], {}), '(ts)\n', (2680, 2684), False, 'from statsmodels.tsa.vector_ar.var_model import VAR\n'), ((5273, 5306), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (5296, 5306), False, 'import warnings\n'), ((5087, 5113), 'itertools.product', 'itertools.product', (['p', 'd', 'q'], {}), '(p, d, q)\n', (5104, 5113), False, 'import itertools\n'), ((5478, 5595), 'statsmodels.tsa.statespace.sarimax.SARIMAX', 'SARIMAX', (['ts1'], {'order': 'param', 'seasonal_order': 'param_seasonal', 'enforce_stationarity': '(False)', 'enforce_invertibility': '(False)'}), '(ts1, order=param, seasonal_order=param_seasonal,\n enforce_stationarity=False, enforce_invertibility=False)\n', (5485, 5595), False, 'from statsmodels.tsa.statespace.sarimax import SARIMAX\n')] |
# Future imports
from __future__ import (
annotations
)
# Standard imports
import argparse
from typing import (
Sequence
)
from pathlib import Path
# Local imports
import reddack
import reddack.config
import reddack.utils
def create_arg_parser() -> argparse.ArgumentParser:
"""Create the argument parser for the CLI"""
parser = argparse.ArgumentParser(
description=(
"Moderate Reddit communities via Slack"
),
argument_default=argparse.SUPPRESS
)
parser.add_argument(
"--config",
dest="config_path",
required=True,
help="The path to the config file."
)
parser.add_argument(
"--queue",
action="store_true"
)
return parser
def process_args(parsedargs):
configpath = Path(parsedargs.configpath)
if configpath.suffix == ".json":
reddack_objs = reddack.config.reddack_from_file(configpath)
if parsedargs.queue:
for objs in reddack_objs:
reddack.utils.sync(objs)
def cli(sys_argv: Sequence[str] | None = None) -> None:
"""Parse the CLI arguments"""
parser = create_arg_parser()
parsed_args = parser.parse_args(sys_argv)
process_args(parsed_args)
def main(sys_argv: Sequence[str] | None = None) -> None:
"""Run through the CLI."""
cli(sys_argv)
| [
"reddack.config.reddack_from_file",
"reddack.utils.sync",
"argparse.ArgumentParser",
"pathlib.Path"
] | [((348, 464), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Moderate Reddit communities via Slack"""', 'argument_default': 'argparse.SUPPRESS'}), "(description='Moderate Reddit communities via Slack',\n argument_default=argparse.SUPPRESS)\n", (371, 464), False, 'import argparse\n'), ((800, 827), 'pathlib.Path', 'Path', (['parsedargs.configpath'], {}), '(parsedargs.configpath)\n', (804, 827), False, 'from pathlib import Path\n'), ((888, 932), 'reddack.config.reddack_from_file', 'reddack.config.reddack_from_file', (['configpath'], {}), '(configpath)\n', (920, 932), False, 'import reddack\n'), ((1004, 1028), 'reddack.utils.sync', 'reddack.utils.sync', (['objs'], {}), '(objs)\n', (1022, 1028), False, 'import reddack\n')] |
import tfcoreml as tf_converter
tf_converter.convert(tf_model_path = 'retrained_graph.pb',
mlmodel_path = 'converted.mlmodel',
output_feature_names = ['final_result:0'],
image_input_names = 'input:0',
class_labels = 'retrained_labels.txt',
red_bias = -1,
green_bias = -1,
blue_bias = -1,
image_scale = 2.0/224.0
)
| [
"tfcoreml.convert"
] | [((32, 309), 'tfcoreml.convert', 'tf_converter.convert', ([], {'tf_model_path': '"""retrained_graph.pb"""', 'mlmodel_path': '"""converted.mlmodel"""', 'output_feature_names': "['final_result:0']", 'image_input_names': '"""input:0"""', 'class_labels': '"""retrained_labels.txt"""', 'red_bias': '(-1)', 'green_bias': '(-1)', 'blue_bias': '(-1)', 'image_scale': '(2.0 / 224.0)'}), "(tf_model_path='retrained_graph.pb', mlmodel_path=\n 'converted.mlmodel', output_feature_names=['final_result:0'],\n image_input_names='input:0', class_labels='retrained_labels.txt',\n red_bias=-1, green_bias=-1, blue_bias=-1, image_scale=2.0 / 224.0)\n", (52, 309), True, 'import tfcoreml as tf_converter\n')] |
from django.test import TestCase
from checkout_backend.entities.offer_entity import OfferEntity
from checkout_backend.entities.product_entity import ProductEntity
from checkout_backend.uses_cases.total_amount_processor import TotalAmountProcessor
class OffersTestCase(TestCase):
def setUp(self):
self.product_pen = ProductEntity(
id=1,
code='PEN',
name='PEN',
price=500,
)
self.product_tshirt = ProductEntity(
id=2,
code='TSHIRT',
name='TSHIRT',
price=2000,
)
self.product_mug = ProductEntity(
id=3,
code='MUG',
name='MUG',
price=750,
)
self.multi_buy_offer = OfferEntity(
id=1,
name='2x1',
product=self.product_pen,
quantity=2,
discount_unit=1,
discount_percent=0,
)
self.depend_discount_offer = OfferEntity(
id=2,
name='3 or more discount 25%',
product=self.product_tshirt,
quantity=3,
discount_unit=0,
discount_percent=25,
)
self.offers = [
self.multi_buy_offer,
self.depend_discount_offer,
]
self.total_amount_processor = TotalAmountProcessor(self.offers)
def test_get_total_amount_with_multi_buy_offer(self):
"""Test get total amount with a multi buy offer"""
total_amount = self.total_amount_processor.get_total_amount(
[
{
'quantity': self.multi_buy_offer.quantity,
'product': self.multi_buy_offer.product,
}
]
)
self.assertEqual(total_amount, 500)
def test_get_total_amount_with_percent_discount_offer(self):
"""Test get total amount with percent discount amount"""
total_amount = self.total_amount_processor.get_total_amount(
[
{
'quantity': self.depend_discount_offer.quantity,
'product': self.depend_discount_offer.product,
}
]
)
self.assertEqual(total_amount, 4500)
def test_get_total_amount_with_lane_case_1(self):
"""Test lana case 1"""
total_amount = self.total_amount_processor.get_total_amount(
[
{
'quantity': 1,
'product': self.product_pen,
},
{
'quantity': 1,
'product': self.product_tshirt,
},
{
'quantity': 1,
'product': self.product_mug,
},
],
)
self.assertEqual(total_amount, 3250)
def test_get_total_amount_with_lane_case_2(self):
"""Test lana case 2"""
total_amount = self.total_amount_processor.get_total_amount(
[
{
'quantity': 2,
'product': self.product_pen,
},
{
'quantity': 1,
'product': self.product_tshirt,
},
],
)
self.assertEqual(total_amount, 2500)
def test_get_total_amount_with_lane_case_3(self):
"""Test lana case 3"""
total_amount = self.total_amount_processor.get_total_amount(
[
{
'quantity': 1,
'product': self.product_pen,
},
{
'quantity': 4,
'product': self.product_tshirt,
},
],
)
self.assertEqual(total_amount, 6500)
def test_get_total_amount_with_lane_case_4(self):
"""Test lana case 4"""
total_amount = self.total_amount_processor.get_total_amount(
[
{
'quantity': 3,
'product': self.product_pen,
},
{
'quantity': 3,
'product': self.product_tshirt,
},
{
'quantity': 1,
'product': self.product_mug,
},
],
)
self.assertEqual(total_amount, 6250)
| [
"checkout_backend.entities.product_entity.ProductEntity",
"checkout_backend.uses_cases.total_amount_processor.TotalAmountProcessor",
"checkout_backend.entities.offer_entity.OfferEntity"
] | [((331, 385), 'checkout_backend.entities.product_entity.ProductEntity', 'ProductEntity', ([], {'id': '(1)', 'code': '"""PEN"""', 'name': '"""PEN"""', 'price': '(500)'}), "(id=1, code='PEN', name='PEN', price=500)\n", (344, 385), False, 'from checkout_backend.entities.product_entity import ProductEntity\n'), ((475, 536), 'checkout_backend.entities.product_entity.ProductEntity', 'ProductEntity', ([], {'id': '(2)', 'code': '"""TSHIRT"""', 'name': '"""TSHIRT"""', 'price': '(2000)'}), "(id=2, code='TSHIRT', name='TSHIRT', price=2000)\n", (488, 536), False, 'from checkout_backend.entities.product_entity import ProductEntity\n'), ((623, 677), 'checkout_backend.entities.product_entity.ProductEntity', 'ProductEntity', ([], {'id': '(3)', 'code': '"""MUG"""', 'name': '"""MUG"""', 'price': '(750)'}), "(id=3, code='MUG', name='MUG', price=750)\n", (636, 677), False, 'from checkout_backend.entities.product_entity import ProductEntity\n'), ((769, 877), 'checkout_backend.entities.offer_entity.OfferEntity', 'OfferEntity', ([], {'id': '(1)', 'name': '"""2x1"""', 'product': 'self.product_pen', 'quantity': '(2)', 'discount_unit': '(1)', 'discount_percent': '(0)'}), "(id=1, name='2x1', product=self.product_pen, quantity=2,\n discount_unit=1, discount_percent=0)\n", (780, 877), False, 'from checkout_backend.entities.offer_entity import OfferEntity\n'), ((995, 1127), 'checkout_backend.entities.offer_entity.OfferEntity', 'OfferEntity', ([], {'id': '(2)', 'name': '"""3 or more discount 25%"""', 'product': 'self.product_tshirt', 'quantity': '(3)', 'discount_unit': '(0)', 'discount_percent': '(25)'}), "(id=2, name='3 or more discount 25%', product=self.\n product_tshirt, quantity=3, discount_unit=0, discount_percent=25)\n", (1006, 1127), False, 'from checkout_backend.entities.offer_entity import OfferEntity\n'), ((1354, 1387), 'checkout_backend.uses_cases.total_amount_processor.TotalAmountProcessor', 'TotalAmountProcessor', (['self.offers'], {}), '(self.offers)\n', (1374, 1387), False, 'from checkout_backend.uses_cases.total_amount_processor import TotalAmountProcessor\n')] |
# test_processor.py, Copyright (c) 2019, Phenome Project - <NAME> <<EMAIL>>
from phenome_core.core.base.base_processor import BaseProcessor
class TestProcessor(BaseProcessor):
__test__ = False
def __init__(self):
super(TestProcessor, self).__init__()
def process(self, results):
from phenome.test.supporting.test_mockobject import MockObject
test_value = 45
object = MockObject()
object.id = 1
# here we would normally POLL the object
# populate the value with 45
results.set_result(object, 'test_value', test_value)
return results
| [
"phenome.test.supporting.test_mockobject.MockObject"
] | [((420, 432), 'phenome.test.supporting.test_mockobject.MockObject', 'MockObject', ([], {}), '()\n', (430, 432), False, 'from phenome.test.supporting.test_mockobject import MockObject\n')] |
import os
from cli.src.commands.BackupRecoveryBase import BackupRecoveryBase
from cli.src.helpers.doc_list_helpers import select_single
class Backup(BackupRecoveryBase):
"""Perform backup operations."""
def __init__(self, input_data):
super(BackupRecoveryBase, self).__init__(__name__) # late call of the Step.__init__(__name__)
super(Backup, self).__init__(input_data)
def backup(self):
"""Backup all enabled components."""
self._process_input_docs()
self._process_configuration_docs()
# Get backup config document
backup_doc = select_single(self.configuration_docs, lambda x: x.kind == 'configuration/backup')
self._update_role_files_and_vars('backup', backup_doc)
# Set env
self.logger.info(f'ANSIBLE_CONFIG={self.ansible_config_file_path}')
os.environ["ANSIBLE_CONFIG"] = self.ansible_config_file_path
# Execute all enabled component playbooks sequentially
for component_name, component_config in sorted(backup_doc.specification.components.items()):
if component_config.enabled:
self._update_playbook_files_and_run('backup', component_name)
return 0
| [
"cli.src.helpers.doc_list_helpers.select_single"
] | [((605, 691), 'cli.src.helpers.doc_list_helpers.select_single', 'select_single', (['self.configuration_docs', "(lambda x: x.kind == 'configuration/backup')"], {}), "(self.configuration_docs, lambda x: x.kind ==\n 'configuration/backup')\n", (618, 691), False, 'from cli.src.helpers.doc_list_helpers import select_single\n')] |
"""
Messages:
https://wiki.theory.org/BitTorrentSpecification#Messages
<length prefix><message ID><payload>
"""
from collections import namedtuple
from struct import pack
from struct import unpack
FORMAT = '>IB{}'
Message = namedtuple('Message', 'len id payload')
KEEP_ALIVE = -1
CHOKE = 0
UNCHOKE = 1
INTERESTED = 2
NOT_INTERESTED = 3
HAVE = 4
BITFIELD = 5
REQUEST = 6
PIECE = 7
CANCEL = 8
PORT = 9
FORMAT_KEEP_ALIVE = \
FORMAT_CHOKE = \
FORMAT_UNCHOKE = \
FORMAT_INTERESTED = \
FORMAT_NOT_INTERESTED = '>IB'
FORMAT_HAVE = '>IBI'
FORMAT_BITFIELD = '>IB{}B'
FORMAT_REQUEST = '>IBIII'
FORMAT_PIECE = '>IBII{}c'
FORMAT_CANCEL = '>IBIII'
FORMAT_PORT = '>IBH'
def decode(message):
if len(message) == 4:
return Message(0, KEEP_ALIVE, None)
len_, id_ = unpack('>IB', message[:5])
return [
decode_choke,
decode_unchoke,
decode_interested,
decode_not_interested,
decode_have,
decode_bitfield,
decode_request,
decode_piece,
decode_cancel,
decode_port,
][id_](message, len_ - 1)
# Messages
def keep_alive():
return b'\x00\x00\x00\x00'
def choke():
return b'\x00\x00\x00\x01\x00'
def unchoke():
return b'\x00\x00\x00\x01\x01'
def interested():
return b'\x00\x00\x00\x01\x02'
def not_interested():
return b'\x00\x00\x00\x01\x03'
def have(piece_index):
return pack(FORMAT_HAVE, 5, 4, piece_index)
def bitfield(bits):
len_ = 1 + len(bits)
return pack(FORMAT_BITFIELD.format(len_), len_, 5, bits)
def request(index, begin, length):
return pack(FORMAT_REQUEST, 13, 6, index, begin, length)
def piece(index, begin, block):
len_ = 9 + len(block)
return pack(FORMAT_PIECE.format(len_), len_, 7, index, begin, block)
def cancel(index, begin, length):
return pack(FORMAT_CANCEL, 13, 8, index, begin, length)
def port(listen_port):
return pack(FORMAT_PORT, 3, 9, listen_port)
# Decoders
def decode_choke(message, _paylen):
return Message(*unpack(FORMAT_CHOKE, message), None)
def decode_unchoke(message, _paylen):
return Message(*unpack(FORMAT_UNCHOKE, message), None)
def decode_interested(message, _paylen):
return Message(*unpack(FORMAT_INTERESTED, message), None)
def decode_not_interested(message, _paylen):
return Message(*unpack(FORMAT_NOT_INTERESTED, message), None)
def decode_have(message, _paylen):
return Message(*unpack(FORMAT_HAVE, message))
def decode_bitfield(message, paylen):
len_, id_, *payload = unpack(FORMAT_BITFIELD.format(paylen), message)
return Message(len_, id_, payload)
def decode_request(message):
pass
def decode_piece(message, paylen):
len_, id_, index, begin, *block = unpack(
FORMAT_PIECE.format(paylen - 8),
message
)
return Message(len_, id_, (index, begin, block))
def decode_cancel(message):
pass
def decode_port(message):
pass
| [
"collections.namedtuple",
"struct.pack",
"struct.unpack"
] | [((229, 268), 'collections.namedtuple', 'namedtuple', (['"""Message"""', '"""len id payload"""'], {}), "('Message', 'len id payload')\n", (239, 268), False, 'from collections import namedtuple\n'), ((772, 798), 'struct.unpack', 'unpack', (['""">IB"""', 'message[:5]'], {}), "('>IB', message[:5])\n", (778, 798), False, 'from struct import unpack\n'), ((1397, 1433), 'struct.pack', 'pack', (['FORMAT_HAVE', '(5)', '(4)', 'piece_index'], {}), '(FORMAT_HAVE, 5, 4, piece_index)\n', (1401, 1433), False, 'from struct import pack\n'), ((1590, 1639), 'struct.pack', 'pack', (['FORMAT_REQUEST', '(13)', '(6)', 'index', 'begin', 'length'], {}), '(FORMAT_REQUEST, 13, 6, index, begin, length)\n', (1594, 1639), False, 'from struct import pack\n'), ((1820, 1868), 'struct.pack', 'pack', (['FORMAT_CANCEL', '(13)', '(8)', 'index', 'begin', 'length'], {}), '(FORMAT_CANCEL, 13, 8, index, begin, length)\n', (1824, 1868), False, 'from struct import pack\n'), ((1905, 1941), 'struct.pack', 'pack', (['FORMAT_PORT', '(3)', '(9)', 'listen_port'], {}), '(FORMAT_PORT, 3, 9, listen_port)\n', (1909, 1941), False, 'from struct import pack\n'), ((2012, 2041), 'struct.unpack', 'unpack', (['FORMAT_CHOKE', 'message'], {}), '(FORMAT_CHOKE, message)\n', (2018, 2041), False, 'from struct import unpack\n'), ((2109, 2140), 'struct.unpack', 'unpack', (['FORMAT_UNCHOKE', 'message'], {}), '(FORMAT_UNCHOKE, message)\n', (2115, 2140), False, 'from struct import unpack\n'), ((2211, 2245), 'struct.unpack', 'unpack', (['FORMAT_INTERESTED', 'message'], {}), '(FORMAT_INTERESTED, message)\n', (2217, 2245), False, 'from struct import unpack\n'), ((2320, 2358), 'struct.unpack', 'unpack', (['FORMAT_NOT_INTERESTED', 'message'], {}), '(FORMAT_NOT_INTERESTED, message)\n', (2326, 2358), False, 'from struct import unpack\n'), ((2423, 2451), 'struct.unpack', 'unpack', (['FORMAT_HAVE', 'message'], {}), '(FORMAT_HAVE, message)\n', (2429, 2451), False, 'from struct import unpack\n')] |
import serial
import os
import json
from pprint import pprint
import mysql.connector
import time
import requests
mydb = mysql.connector.connect(
host="localhost",
user="max",
passwd="<PASSWORD>",
database="SmartKitchenDb"
)
com = serial.Serial('/dev/ttyUSB1', baudrate=9600, timeout=3.0)
com2 = serial.Serial('/dev/ttyUSB0', baudrate=9600, timeout=3.0)
barcode_scanned = False
user = ""
product_name = ""
rfid = ""
while True:
# Declaring the remote serial connection:
rsc = com.readline().strip()
rsc2 = com2.readline().strip()
rsc = rsc.decode('utf-8')
# In my arduino code I first print 'UID tag :' before I print the RFID code. I did this for readablility when writing the arduino code
# After reading this it strips away that string in front of the RFID code
# After this it requests the name of the user from the database:
if "UID tag :" in rsc:
rfid = rsc.lstrip("UID tag :")
try:
# This sends a GET request to the system with the Laravel Database
r = requests.get(f"http://192.168.1.243:8000/api/rfid/{rfid}")
r_text = str(r)
print("RFID lezen: " + r_text)
r.raise_for_status()
resp = json.loads(r.text)
rfid = resp[0]["rfid"]
user = resp[0]["name"]
except requests.HTTPError as e:
print(e.response.text)
# This code first checks if the remote serial connection is not an empty byte and follows this check by checking if there is a username:
if rsc2 != b'' and user != '':
barcode_scanned = True
# This sets the variable barcode_scanned to True and checks if the byte is not empty:
if barcode_scanned == True and rsc2 != b'':
barcode = str(rsc2, 'utf-8')
try:
# Here I use a GET request to the OpenFoodFacts database:
r = requests.get(f'https://world.openfoodfacts.org/api/v0/product/{barcode}.json')
r_text = str(r)
print("Gegevens uit OpenFoodFacts API opvragen: " + r_text)
r.raise_for_status()
resp = json.loads(r.text)
# Here I do a check if the product is in the database, if not it print the status_verbose which is just 'product not found'
# If the product is in the database it gets the productname of the product and stores it in a variable:
if resp["status_verbose"] != "product not found":
product_name = str(resp["product"]["product_name"])
barcode_scanned = False
else:
print(resp["status_verbose"])
except requests.HTTPError as e:
print(e.response.text)
# Here I do a check if the the serial connection reads an 'A' or a 'D', after this it checks if the productname is not empty.
# Reading an 'S' means add to storagelist.
# Reading a 'G' means add to grocerylist.
if "S" in rsc and product_name != "":
# Here I create a JSON with the data I need and send it to the Laravel API using a POST request
# This POST request triggers an database insert with that data
gooi_data = {'product_name':f'{product_name}', 'user_name':f'{user}'}
d = requests.post(f"http://192.168.1.243:8000/api/rfid/{rfid}/create-storage", data=gooi_data)
d_text = str(d)
print("POST request naar de API: " + d_text)
if "G" in rsc and product_name != "":
# Here I create a JSON with the info I need and send it to the Laravel API using a POST request
# This POST request triggers an database insert with that data
gooi_data = {'product_name':f'{product_name}', 'user_name':f'{user}'}
d = requests.post(f"http://192.168.1.243:8000/api/rfid/{rfid}/create-grocery", data=gooi_data)
d_text = str(d)
print("POST request naar de API: " + d_text)
time.sleep(1)
mydb.commit()
mydb.close()
| [
"json.loads",
"requests.post",
"requests.get",
"time.sleep",
"serial.Serial"
] | [((248, 305), 'serial.Serial', 'serial.Serial', (['"""/dev/ttyUSB1"""'], {'baudrate': '(9600)', 'timeout': '(3.0)'}), "('/dev/ttyUSB1', baudrate=9600, timeout=3.0)\n", (261, 305), False, 'import serial\n'), ((313, 370), 'serial.Serial', 'serial.Serial', (['"""/dev/ttyUSB0"""'], {'baudrate': '(9600)', 'timeout': '(3.0)'}), "('/dev/ttyUSB0', baudrate=9600, timeout=3.0)\n", (326, 370), False, 'import serial\n'), ((3954, 3967), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3964, 3967), False, 'import time\n'), ((3305, 3399), 'requests.post', 'requests.post', (['f"""http://192.168.1.243:8000/api/rfid/{rfid}/create-storage"""'], {'data': 'gooi_data'}), "(f'http://192.168.1.243:8000/api/rfid/{rfid}/create-storage',\n data=gooi_data)\n", (3318, 3399), False, 'import requests\n'), ((3781, 3875), 'requests.post', 'requests.post', (['f"""http://192.168.1.243:8000/api/rfid/{rfid}/create-grocery"""'], {'data': 'gooi_data'}), "(f'http://192.168.1.243:8000/api/rfid/{rfid}/create-grocery',\n data=gooi_data)\n", (3794, 3875), False, 'import requests\n'), ((1054, 1112), 'requests.get', 'requests.get', (['f"""http://192.168.1.243:8000/api/rfid/{rfid}"""'], {}), "(f'http://192.168.1.243:8000/api/rfid/{rfid}')\n", (1066, 1112), False, 'import requests\n'), ((1236, 1254), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (1246, 1254), False, 'import json\n'), ((1910, 1988), 'requests.get', 'requests.get', (['f"""https://world.openfoodfacts.org/api/v0/product/{barcode}.json"""'], {}), "(f'https://world.openfoodfacts.org/api/v0/product/{barcode}.json')\n", (1922, 1988), False, 'import requests\n'), ((2157, 2175), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (2167, 2175), False, 'import json\n')] |
import argparse, os
parser = argparse.ArgumentParser()
parser.add_argument('outdir', type=str, help='sphinx output directory')
args = parser.parse_args()
import re
duplicate_tag = '''(<script src="https://unpkg.com/@jupyter-widgets/html-manager@\^[0-9]*\.[0-9]*\.[0-9]*/dist/embed-amd.js"></script>)'''
bad1 = re.compile(duplicate_tag)
bad2 = re.compile(duplicate_tag+"(.*)"+duplicate_tag)
def dedupe_jupyter_widgets_manager(filename):
with open(filename, 'rt') as html_in:
content = html_in.read()
n = len(bad1.findall(content))
if n>1:
content_1 = bad1.sub("", content, count=n-1)
print(f"FIXING [{n}]:",filename)
with open(filename, 'wt') as html_out:
html_out.write(content_1)
else:
print(f"PASSED [{n}]:",filename)
def fixing_walker(filename):
directory = os.path.dirname(os.path.abspath(filename))
for dirpath, dirnames, filenames in os.walk(directory):
for f in filenames:
if f[-5:]==".html":
this_file = os.path.join(dirpath, f)
dedupe_jupyter_widgets_manager(this_file)
fixing_walker(args.outdir)
| [
"argparse.ArgumentParser",
"re.compile",
"os.path.join",
"os.path.abspath",
"os.walk"
] | [((31, 56), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (54, 56), False, 'import argparse, os\n'), ((315, 340), 're.compile', 're.compile', (['duplicate_tag'], {}), '(duplicate_tag)\n', (325, 340), False, 'import re\n'), ((348, 398), 're.compile', 're.compile', (["(duplicate_tag + '(.*)' + duplicate_tag)"], {}), "(duplicate_tag + '(.*)' + duplicate_tag)\n", (358, 398), False, 'import re\n'), ((867, 885), 'os.walk', 'os.walk', (['directory'], {}), '(directory)\n', (874, 885), False, 'import argparse, os\n'), ((803, 828), 'os.path.abspath', 'os.path.abspath', (['filename'], {}), '(filename)\n', (818, 828), False, 'import argparse, os\n'), ((948, 972), 'os.path.join', 'os.path.join', (['dirpath', 'f'], {}), '(dirpath, f)\n', (960, 972), False, 'import argparse, os\n')] |
from finance_manager.database.replaceable import ReplaceableObject as o
from finance_manager.database.views import account_description, p_list_string, p_sum_string
def _view():
view = o("v_input_inc_other", f"""
SELECT i.inc_id, i.account, a.description as account_name, {account_description}, i.description, i.set_id,
{p_list_string}, {p_sum_string} as amount
FROM input_inc_other i
LEFT OUTER JOIN fs_account a ON i.account = a.account""")
return view
| [
"finance_manager.database.replaceable.ReplaceableObject"
] | [((190, 472), 'finance_manager.database.replaceable.ReplaceableObject', 'o', (['"""v_input_inc_other"""', 'f"""\n SELECT i.inc_id, i.account, a.description as account_name, {account_description}, i.description, i.set_id,\n {p_list_string}, {p_sum_string} as amount\n FROM input_inc_other i\n LEFT OUTER JOIN fs_account a ON i.account = a.account"""'], {}), '(\'v_input_inc_other\',\n f"""\n SELECT i.inc_id, i.account, a.description as account_name, {account_description}, i.description, i.set_id,\n {p_list_string}, {p_sum_string} as amount\n FROM input_inc_other i\n LEFT OUTER JOIN fs_account a ON i.account = a.account"""\n )\n', (191, 472), True, 'from finance_manager.database.replaceable import ReplaceableObject as o\n')] |
import re
import lxml.html
import click
import scrapelib
from common import Person
def elem_to_str(item, inside=False):
attribs = " ".join(f"{k}='{v}'" for k, v in item.attrib.items())
return f"<{item.tag} {attribs}> @ line {item.sourceline}"
class XPath:
def __init__(self, xpath, *, min_items=1, max_items=None, num_items=None):
self.xpath = xpath
self.min_items = min_items
self.max_items = max_items
self.num_items = num_items
def match(self, element, *, min_items=None, max_items=None, num_items=None):
items = element.xpath(self.xpath)
num_items = self.num_items if num_items is None else num_items
max_items = self.max_items if max_items is None else max_items
min_items = self.min_items if min_items is None else min_items
if num_items is not None and len(items) != num_items:
raise XPathError(
f"{self.xpath} on {elem_to_str(element)} got {len(items)}, "
f"expected {num_items}"
)
if min_items is not None and len(items) < min_items:
raise XPathError(
f"{self.xpath} on {elem_to_str(element)} got {len(items)}, "
f"expected at least {min_items}"
)
if max_items is not None and len(items) > max_items:
raise XPathError(
f"{self.xpath} on {elem_to_str(element)} got {len(items)}, "
f"expected at most {max_items}"
)
return items
def match_one(self, element):
return str(self.match(element, num_items=1)[0])
class NoSuchScraper(Exception):
pass
class XPathError(ValueError):
pass
# @attr.s
# class ContactDetail:
# note = attr.ib()
# voice = attr.ib()
# email =attr.ib()
# fax = attr.ib()
# address = attr.ib()
# @attr.s
# class Person:
# name = attr.ib()
# state = attr.ib()
# party = attr.ib()
# district = attr.ib()
# chamber = attr.ib()
# image = attr.ib(default=None)
# given_name = attr.ib(default=None)
# family_name = attr.ib(default=None)
# links = attr.ib(default=attr.Factory(list))
# sources = attr.ib(default=attr.Factory(list))
# capitol_office = attr.ib(default=None)
# district_office = attr.ib(default=None)
class Scraper(scrapelib.Scraper):
def fetch_page_data(self, page):
print(f"fetching {page.url} for {page.__class__.__name__}")
data = self.get(page.url)
page.set_raw_data(data)
def augment_item(self, item, subpages):
for subpage_func in subpages:
page = subpage_func(item)
self.fetch_page_data(page)
page_data = page.get_data()
item.update(page_data)
return item
def scrape(self, chamber, session):
for page in self.start_scrape(chamber, session):
self.fetch_page_data(page)
for item in page.get_data():
if page.subpages:
item = self.augment_item(item, page.subpages)
if isinstance(item, dict):
item = self.to_object(item)
yield item
def to_object(self, item):
"""
converts intermediate data (often in a dictionary) to a final object to be validated
"""
return item
def start_scrape(self, chamber, session):
"""
yields one or more Page objects that will kick off the scrape.
It may also raise a ValueError (TBD) when it does not have an appropriate entrypoint
to scrape the requested data.
"""
raise NotImplementedError()
class Page:
def __init__(self, url):
"""
a Page can be instantiated with a url & options (TBD) needed to fetch it
"""
self.url = url
def set_raw_data(self, raw_data):
""" callback to handle raw data returned by grabbing the URL """
self.raw_data = raw_data
def get_data(self):
""" return data extracted from this page and this page alone """
raise NotImplementedError()
class HtmlPage:
def set_raw_data(self, raw_data):
self.raw_data = raw_data
self.root = lxml.html.fromstring(raw_data.content)
self.root.make_links_absolute(self.url)
class HtmlListPage(HtmlPage):
"""
Simplification for HTML pages that get a list of items and process them.
When overriding the class, instead of providing get_data, one must only provide
an xpath and a process_item function.
"""
xpath = None
def get_data(self):
if not self.xpath:
raise NotImplementedError("must either provide xpath or override scrape")
items = self.xpath.match(self.root)
for item in items:
item = self.process_item(item)
yield item
def process_item(self, item):
return item
class MDPersonDetail(HtmlPage):
def __init__(self, url):
self.url = url
def parse_address_block(self, block):
state = "address"
# group lines by type
values = {"address": [], "phone": [], "fax": []}
for line in block.splitlines():
line = line.strip()
if not line:
continue
if line.startswith("Phone"):
state = "phone"
elif line.startswith("Fax"):
state = "fax"
values[state].append(line)
# postprocess values
phones = []
for line in values["phone"]:
for match in re.findall(r"\d{3}-\d{3}-\d{4}", line):
phones.append(match)
faxes = []
for line in values["fax"]:
for match in re.findall(r"\d{3}-\d{3}-\d{4}", line):
faxes.append(match)
return {"address": "; ".join(values["address"]), "phones": phones, "faxes": faxes}
def get_data(self):
# annapolis_info = (
# XPath("//dt[text()='Annapolis Info']/following-sibling::dd[1]")
# .match_one(self.root)
# .text_content()
# )
# interim_info = (
# XPath("//dt[text()='Interim Info']/following-sibling::dd[1]")
# .match_one(self.root)
# .text_content()
# )
# print(self.parse_address_block(annapolis_info))
# print(self.parse_address_block(interim_info))
return dict(
name=XPath("//h2/text()").match_one(self.root).split(" ", 1)[1],
# "email": XPath(
# "//dt[text()='Contact']/following-sibling::dd[1]/a[1]/text()"
# ).match_one(self.root),
)
class MDPersonList(HtmlListPage):
xpath = XPath("//div[@id='myDIV']//div[@class='p-0 member-index-cell']")
subpages = [lambda item: MDPersonDetail(item["link"])]
def __init__(self, url):
self.url = url
def process_item(self, item):
dd_text = XPath(".//dd/text()").match(item)
district = dd_text[2].strip().split()[1]
party = dd_text[4].strip()
return dict(
chamber="upper" if "senate" in self.url else "lower",
image=XPath(".//img/@src").match_one(item),
district=district,
party=party,
link=XPath(".//dd/a[1]/@href").match_one(item),
)
class MDPersonScraper(Scraper):
def start_scrape(self, chamber, session):
""" This function yields one or more Page objects that will kick off the scrape.
It may also raise a ValueError (TBD) when it does not have an appropriate entrypoint
to scrape the requested data.
"""
if session:
raise NoSuchScraper("cannot scrape non-current sessions")
if chamber == "upper":
yield MDPersonList("http://mgaleg.maryland.gov/mgawebsite/Members/Index/senate")
elif chamber == "lower":
yield MDPersonList("http://mgaleg.maryland.gov/mgawebsite/Members/Index/house")
def to_object(self, item):
p = Person(
state="md",
chamber=item["chamber"],
name=item["name"],
party=item["party"],
image=item["image"],
district=item["district"],
)
p.add_link(item["link"])
p.add_source(item["link"])
return p
@click.group()
def cli():
pass
@cli.command()
@click.argument("class_name")
@click.argument("url")
def sample(class_name, url):
# implementation is a stub, this will be able to accept dotted paths once implemented
Cls = globals()[class_name]
page = Cls(url)
s = Scraper()
s.fetch_page_data(page)
print(page.get_data())
@cli.command()
@click.option("--chamber", multiple=True, default=["upper", "lower"])
@click.option("--session", default=None)
def scrape(chamber, session):
for ch in chamber:
for item in MDPersonScraper().scrape(ch, session):
item.save("incoming/md/people")
if __name__ == "__main__":
cli()
| [
"click.argument",
"click.group",
"click.option",
"common.Person",
"re.findall"
] | [((8317, 8330), 'click.group', 'click.group', ([], {}), '()\n', (8328, 8330), False, 'import click\n'), ((8369, 8397), 'click.argument', 'click.argument', (['"""class_name"""'], {}), "('class_name')\n", (8383, 8397), False, 'import click\n'), ((8399, 8420), 'click.argument', 'click.argument', (['"""url"""'], {}), "('url')\n", (8413, 8420), False, 'import click\n'), ((8683, 8751), 'click.option', 'click.option', (['"""--chamber"""'], {'multiple': '(True)', 'default': "['upper', 'lower']"}), "('--chamber', multiple=True, default=['upper', 'lower'])\n", (8695, 8751), False, 'import click\n'), ((8753, 8792), 'click.option', 'click.option', (['"""--session"""'], {'default': 'None'}), "('--session', default=None)\n", (8765, 8792), False, 'import click\n'), ((8014, 8150), 'common.Person', 'Person', ([], {'state': '"""md"""', 'chamber': "item['chamber']", 'name': "item['name']", 'party': "item['party']", 'image': "item['image']", 'district': "item['district']"}), "(state='md', chamber=item['chamber'], name=item['name'], party=item[\n 'party'], image=item['image'], district=item['district'])\n", (8020, 8150), False, 'from common import Person\n'), ((5565, 5605), 're.findall', 're.findall', (['"""\\\\d{3}-\\\\d{3}-\\\\d{4}"""', 'line'], {}), "('\\\\d{3}-\\\\d{3}-\\\\d{4}', line)\n", (5575, 5605), False, 'import re\n'), ((5722, 5762), 're.findall', 're.findall', (['"""\\\\d{3}-\\\\d{3}-\\\\d{4}"""', 'line'], {}), "('\\\\d{3}-\\\\d{3}-\\\\d{4}', line)\n", (5732, 5762), False, 'import re\n')] |
from flask import current_app
#This module is created for interaction with the Elasticsearch index
#Function that adds element to the index of Elasticsearch. Uses model as the SQLAlchemy model
def add_element_index(index,model):
#Check to see if Elasticsearch server is configured or not.
#The application runs witouth errors if the Elasticsearch server doesn't run.
if not current_app.elasticsearch:
return
payload={}
for field in model.__searchit__:
payload[field] = getattr(model,field)
current_app.elasticsearch.index(index=index, doc_type=index, id=model.id,
body=payload)
#Function that removes indexed elements. Uses model as the SQLAlchemy model
def remove_element_from_index(index,model):
if not current_app.elasticsearch:
return
current_app.elasticsearch.delete(index=index, doc_type=index, id=model.id)
#Function that searches the fields specified to be searched in
#the models.py with the variable __searchit_
def search_index(index,query,page,per_page):
if not current_app.elasticsearch:
return [], 0
search = current_app.elasticsearch.search(index=index, doc_type=index,
body={'query':{'multi_match':{'query':query, 'fields': ['*']}},
'from':(page -1)*per_page, 'size':per_page})
#List comprehension used to get the IDs of elements found
ids = [int(hit['_id']) for hit in search['hits']['hits']]
#Return IDS and total number of elements from the elasticsearch
return ids, search['hits']['total']
| [
"flask.current_app.elasticsearch.search",
"flask.current_app.elasticsearch.delete",
"flask.current_app.elasticsearch.index"
] | [((531, 622), 'flask.current_app.elasticsearch.index', 'current_app.elasticsearch.index', ([], {'index': 'index', 'doc_type': 'index', 'id': 'model.id', 'body': 'payload'}), '(index=index, doc_type=index, id=model.id,\n body=payload)\n', (562, 622), False, 'from flask import current_app\n'), ((825, 899), 'flask.current_app.elasticsearch.delete', 'current_app.elasticsearch.delete', ([], {'index': 'index', 'doc_type': 'index', 'id': 'model.id'}), '(index=index, doc_type=index, id=model.id)\n', (857, 899), False, 'from flask import current_app\n'), ((1126, 1312), 'flask.current_app.elasticsearch.search', 'current_app.elasticsearch.search', ([], {'index': 'index', 'doc_type': 'index', 'body': "{'query': {'multi_match': {'query': query, 'fields': ['*']}}, 'from': (page -\n 1) * per_page, 'size': per_page}"}), "(index=index, doc_type=index, body={'query':\n {'multi_match': {'query': query, 'fields': ['*']}}, 'from': (page - 1) *\n per_page, 'size': per_page})\n", (1158, 1312), False, 'from flask import current_app\n')] |
#
# Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/
# Written by <NAME> <<EMAIL>>,
# <NAME> <<EMAIL>>
#
import unittest
import torch
from fast_transformers.attention.attention_layer import AttentionLayer
class TestAttentionLayer(unittest.TestCase):
def _assert_sizes_attention(self, qshape, kshape, vshape):
def inner(q, k, v, m1, m2, m3):
self.assertEqual(q.shape, qshape)
self.assertEqual(k.shape, kshape)
self.assertEqual(v.shape, vshape)
N, L, H, E = q.shape
_, S, _, D = v.shape
return v.new_zeros((N, L, H, D))
return inner
def test_forward(self):
att = AttentionLayer(
self._assert_sizes_attention(
(10, 5, 4, 25),
(10, 8, 4, 25),
(10, 8, 4, 25)
),
100,
4
)
v = att(
torch.rand(10, 5, 100),
torch.rand(10, 8, 100),
torch.rand(10, 8, 100),
None, None, None
)
self.assertEqual(v.shape, (10, 5, 100))
att = AttentionLayer(
self._assert_sizes_attention(
(10, 5, 4, 32),
(10, 8, 4, 32),
(10, 8, 4, 64)
),
100,
4,
d_keys=32,
d_values=64
)
v = att(
torch.rand(10, 5, 100),
torch.rand(10, 8, 100),
torch.rand(10, 8, 100),
None, None, None
)
self.assertEqual(v.shape, (10, 5, 100))
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"torch.rand"
] | [((1627, 1642), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1640, 1642), False, 'import unittest\n'), ((927, 949), 'torch.rand', 'torch.rand', (['(10)', '(5)', '(100)'], {}), '(10, 5, 100)\n', (937, 949), False, 'import torch\n'), ((963, 985), 'torch.rand', 'torch.rand', (['(10)', '(8)', '(100)'], {}), '(10, 8, 100)\n', (973, 985), False, 'import torch\n'), ((999, 1021), 'torch.rand', 'torch.rand', (['(10)', '(8)', '(100)'], {}), '(10, 8, 100)\n', (1009, 1021), False, 'import torch\n'), ((1411, 1433), 'torch.rand', 'torch.rand', (['(10)', '(5)', '(100)'], {}), '(10, 5, 100)\n', (1421, 1433), False, 'import torch\n'), ((1447, 1469), 'torch.rand', 'torch.rand', (['(10)', '(8)', '(100)'], {}), '(10, 8, 100)\n', (1457, 1469), False, 'import torch\n'), ((1483, 1505), 'torch.rand', 'torch.rand', (['(10)', '(8)', '(100)'], {}), '(10, 8, 100)\n', (1493, 1505), False, 'import torch\n')] |
import numpy as np
import torch
import matplotlib.animation as animation
import matplotlib.pyplot as plt
from PIL import Image
import ThinPlateSpline as TPS
# 2048x2048.jpg size: 2048 x 2048
def on_press(event):
p = np.array([
[693.55, 531.26],
[1069.85, 1243.04],
[1243.74, 1238.69],
[472.82, 664.85],
[552.50, 1460.07],
[1021.03, 368.02],
[1260.78, 1571.90],
[93.16, 911.26],
[234.85, 914.14],
[383.34, 1140.97],
[375.46, 853.36],
[256.73, 597.61],
[338.32, 502.28],
[754.67, 337.95],
[1120.42, 1797.99],
[1521.97, 1655.66],
[1371.15, 1832.87],
[1522.78, 1315.94],
[1116.38, 754.82],
[1165.72, 1162.44],
[1024.00, 1024.00]])
v = np.array([
[121.52, 25.00],
[142.31, -10.74],
[150.81, -10.63],
[109.60, 18.24],
[113.58, -22.72],
[139.92, 34.87],
[153.25, -28.63],
[45.29, -25.83],
[95.26, 5.30],
[105.86, -6.01],
[104.90, 8.46],
[96.95, 16.70],
[96.81, 27.64],
[122.71, 37.11],
[147.14, -43.12],
[172.68, -34.63],
[167.75, -42.28],
[166.68, -14.63],
[144.68, 13.25],
[146.93, -6.96],
[141.01, 0.09]])
p = torch.Tensor(p.reshape([1, p.shape[0], 2]))
v = torch.Tensor(v.reshape([1, v.shape[0], 2]))
T = TPS.solve_system(p, v)
point = np.array([event.xdata, event.ydata])
point_T = TPS.point_transform(point, T, p)
print("Longitude:", point_T[0, 0, 0])
print("Latitude:", point_T[0, 1, 0])
if __name__ == '__main__':
print("It is suggested that clicking on the image close to the middle position will be more accurate.")
fig = plt.figure()
img = Image.open('2048x2048.jpg')
plt.imshow(img, animated= True)
fig.canvas.mpl_connect('button_press_event', on_press)
plt.show()
| [
"matplotlib.pyplot.imshow",
"PIL.Image.open",
"numpy.array",
"matplotlib.pyplot.figure",
"ThinPlateSpline.solve_system",
"ThinPlateSpline.point_transform",
"matplotlib.pyplot.show"
] | [((224, 651), 'numpy.array', 'np.array', (['[[693.55, 531.26], [1069.85, 1243.04], [1243.74, 1238.69], [472.82, 664.85],\n [552.5, 1460.07], [1021.03, 368.02], [1260.78, 1571.9], [93.16, 911.26],\n [234.85, 914.14], [383.34, 1140.97], [375.46, 853.36], [256.73, 597.61],\n [338.32, 502.28], [754.67, 337.95], [1120.42, 1797.99], [1521.97, \n 1655.66], [1371.15, 1832.87], [1522.78, 1315.94], [1116.38, 754.82], [\n 1165.72, 1162.44], [1024.0, 1024.0]]'], {}), '([[693.55, 531.26], [1069.85, 1243.04], [1243.74, 1238.69], [472.82,\n 664.85], [552.5, 1460.07], [1021.03, 368.02], [1260.78, 1571.9], [93.16,\n 911.26], [234.85, 914.14], [383.34, 1140.97], [375.46, 853.36], [256.73,\n 597.61], [338.32, 502.28], [754.67, 337.95], [1120.42, 1797.99], [\n 1521.97, 1655.66], [1371.15, 1832.87], [1522.78, 1315.94], [1116.38, \n 754.82], [1165.72, 1162.44], [1024.0, 1024.0]])\n', (232, 651), True, 'import numpy as np\n'), ((812, 1200), 'numpy.array', 'np.array', (['[[121.52, 25.0], [142.31, -10.74], [150.81, -10.63], [109.6, 18.24], [\n 113.58, -22.72], [139.92, 34.87], [153.25, -28.63], [45.29, -25.83], [\n 95.26, 5.3], [105.86, -6.01], [104.9, 8.46], [96.95, 16.7], [96.81, \n 27.64], [122.71, 37.11], [147.14, -43.12], [172.68, -34.63], [167.75, -\n 42.28], [166.68, -14.63], [144.68, 13.25], [146.93, -6.96], [141.01, 0.09]]'], {}), '([[121.52, 25.0], [142.31, -10.74], [150.81, -10.63], [109.6, 18.24\n ], [113.58, -22.72], [139.92, 34.87], [153.25, -28.63], [45.29, -25.83],\n [95.26, 5.3], [105.86, -6.01], [104.9, 8.46], [96.95, 16.7], [96.81, \n 27.64], [122.71, 37.11], [147.14, -43.12], [172.68, -34.63], [167.75, -\n 42.28], [166.68, -14.63], [144.68, 13.25], [146.93, -6.96], [141.01, 0.09]]\n )\n', (820, 1200), True, 'import numpy as np\n'), ((1465, 1487), 'ThinPlateSpline.solve_system', 'TPS.solve_system', (['p', 'v'], {}), '(p, v)\n', (1481, 1487), True, 'import ThinPlateSpline as TPS\n'), ((1500, 1536), 'numpy.array', 'np.array', (['[event.xdata, event.ydata]'], {}), '([event.xdata, event.ydata])\n', (1508, 1536), True, 'import numpy as np\n'), ((1551, 1583), 'ThinPlateSpline.point_transform', 'TPS.point_transform', (['point', 'T', 'p'], {}), '(point, T, p)\n', (1570, 1583), True, 'import ThinPlateSpline as TPS\n'), ((1813, 1825), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1823, 1825), True, 'import matplotlib.pyplot as plt\n'), ((1836, 1863), 'PIL.Image.open', 'Image.open', (['"""2048x2048.jpg"""'], {}), "('2048x2048.jpg')\n", (1846, 1863), False, 'from PIL import Image\n'), ((1868, 1898), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {'animated': '(True)'}), '(img, animated=True)\n', (1878, 1898), True, 'import matplotlib.pyplot as plt\n'), ((1963, 1973), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1971, 1973), True, 'import matplotlib.pyplot as plt\n')] |
# Generated by Django 3.2 on 2021-08-25 14:44
import django.contrib.gis.db.models.fields
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import fontawesome_5.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='aisEncodedModel',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('received_from', models.CharField(max_length=128)),
('received_at', models.DateTimeField(default=django.utils.timezone.now)),
('message', models.CharField(max_length=256)),
],
options={
'verbose_name': 'Encoded AIS message',
'verbose_name_plural': 'Encoded AIS messages',
},
),
migrations.CreateModel(
name='dabModel',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('message_id', models.IntegerField(null=True)),
('message_type', models.IntegerField()),
('message', models.CharField(max_length=256)),
('ship_id', models.CharField(max_length=256)),
],
options={
'verbose_name': 'DAB message',
'verbose_name_plural': 'DAB messages',
},
),
migrations.CreateModel(
name='FontAwesomeIcon',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('icon', fontawesome_5.fields.IconField(blank=True, max_length=60)),
],
),
migrations.CreateModel(
name='gatewayModel',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('rssi', models.IntegerField(blank=True, null=True)),
('snr', models.IntegerField(blank=True, null=True)),
('gateway_id', models.CharField(blank=True, max_length=256, null=True)),
('gateway_eui', models.CharField(blank=True, max_length=256, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='lorawanModel',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('ack', models.IntegerField(blank=True, null=True, verbose_name='Acknowledgement')),
('msg', models.CharField(blank=True, max_length=256, null=True, verbose_name='Message')),
('hdop', models.DecimalField(blank=True, decimal_places=2, max_digits=19, null=True)),
('alt', models.DecimalField(blank=True, decimal_places=2, max_digits=19, null=True)),
('geom', django.contrib.gis.db.models.fields.PointField(blank=True, null=True, srid=4326, verbose_name='Location')),
],
options={
'verbose_name': 'LoRaWAN message',
'verbose_name_plural': 'LoRaWAN messages',
},
),
migrations.CreateModel(
name='aisDecodedModel',
fields=[
('aisencodedmodel_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='core.aisencodedmodel')),
('mmsi', models.IntegerField(null=True)),
('name', models.CharField(blank=True, max_length=128, null=True, verbose_name='Shipname')),
('geom', django.contrib.gis.db.models.fields.PointField(blank=True, null=True, srid=4326, verbose_name='Location (x,y)')),
('course', models.FloatField(blank=True, null=True, verbose_name='Course')),
('ack', models.IntegerField(blank=True, null=True, verbose_name='Acknowledgement')),
('msg', models.IntegerField(blank=True, null=True, verbose_name='Message')),
('rssi', models.IntegerField(blank=True, null=True, verbose_name='RSSI')),
],
options={
'verbose_name': 'Decoded AIS message',
'verbose_name_plural': 'Decoded AIS messages',
},
bases=('core.aisencodedmodel',),
),
migrations.CreateModel(
name='lorawanGatewayConnectionModel',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('gateway', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.gatewaymodel')),
('lorawan', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.lorawanmodel')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='geoPolygonModel',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('font_awesome_iconcolor', models.CharField(max_length=256)),
('polygon', django.contrib.gis.db.models.fields.PolygonField(blank=True, null=True, srid=4326)),
('message', models.CharField(max_length=64)),
('dab', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.dabmodel')),
('lorawan', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.lorawanmodel')),
('aisDecoded', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.aisdecodedmodel')),
],
options={
'verbose_name': 'Geo Polygon Message',
'verbose_name_plural': 'Geo Polygon Messages',
},
),
migrations.CreateModel(
name='geoPointModel',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('font_awesome_iconcolor', models.CharField(max_length=256)),
('location', django.contrib.gis.db.models.fields.PointField(blank=True, null=True, srid=4326, verbose_name='Pivot')),
('message', models.CharField(max_length=64)),
('dab', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.dabmodel')),
('lorawan', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.lorawanmodel')),
('aisDecoded', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.aisdecodedmodel')),
],
options={
'verbose_name': 'Geo Point Message',
'verbose_name_plural': 'Geo Point Messages',
},
),
migrations.CreateModel(
name='geoMessageModel',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('font_awesome_iconcolor', models.CharField(max_length=256)),
('message', models.CharField(max_length=64)),
('dab', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.dabmodel')),
('lorawan', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.lorawanmodel')),
('aisDecoded', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.aisdecodedmodel')),
],
options={
'verbose_name': 'Geo Message',
'verbose_name_plural': 'Geo Messages',
},
),
migrations.CreateModel(
name='geoCircleModel',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('font_awesome_iconcolor', models.CharField(max_length=256)),
('location', django.contrib.gis.db.models.fields.PointField(blank=True, null=True, srid=4326, verbose_name='Pivot')),
('radius', models.DecimalField(blank=True, decimal_places=2, max_digits=20, null=True)),
('message', models.CharField(max_length=64)),
('dab', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.dabmodel')),
('lorawan', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.lorawanmodel')),
('aisDecoded', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.aisdecodedmodel')),
],
options={
'verbose_name': 'Geo Circle Message',
'verbose_name_plural': 'Geo Circle Messages',
},
),
]
| [
"django.db.models.OneToOneField",
"django.db.models.FloatField",
"django.db.models.IntegerField",
"django.db.models.ForeignKey",
"django.db.models.AutoField",
"django.db.models.DateTimeField",
"django.db.models.DecimalField",
"django.db.models.CharField"
] | [((442, 493), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (458, 493), False, 'from django.db import migrations, models\n'), ((527, 566), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (547, 566), False, 'from django.db import migrations, models\n'), ((600, 635), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (620, 635), False, 'from django.db import migrations, models\n'), ((672, 704), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)'}), '(max_length=128)\n', (688, 704), False, 'from django.db import migrations, models\n'), ((739, 794), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'django.utils.timezone.now'}), '(default=django.utils.timezone.now)\n', (759, 794), False, 'from django.db import migrations, models\n'), ((825, 857), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(256)'}), '(max_length=256)\n', (841, 857), False, 'from django.db import migrations, models\n'), ((1146, 1197), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (1162, 1197), False, 'from django.db import migrations, models\n'), ((1231, 1270), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1251, 1270), False, 'from django.db import migrations, models\n'), ((1304, 1339), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (1324, 1339), False, 'from django.db import migrations, models\n'), ((1373, 1403), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)'}), '(null=True)\n', (1392, 1403), False, 'from django.db import migrations, models\n'), ((1439, 1460), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (1458, 1460), False, 'from django.db import migrations, models\n'), ((1491, 1523), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(256)'}), '(max_length=256)\n', (1507, 1523), False, 'from django.db import migrations, models\n'), ((1554, 1586), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(256)'}), '(max_length=256)\n', (1570, 1586), False, 'from django.db import migrations, models\n'), ((1866, 1917), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (1882, 1917), False, 'from django.db import migrations, models\n'), ((2140, 2191), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (2156, 2191), False, 'from django.db import migrations, models\n'), ((2225, 2264), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (2245, 2264), False, 'from django.db import migrations, models\n'), ((2298, 2333), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (2318, 2333), False, 'from django.db import migrations, models\n'), ((2361, 2403), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (2380, 2403), False, 'from django.db import migrations, models\n'), ((2430, 2472), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (2449, 2472), False, 'from django.db import migrations, models\n'), ((2506, 2561), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(256)', 'null': '(True)'}), '(blank=True, max_length=256, null=True)\n', (2522, 2561), False, 'from django.db import migrations, models\n'), ((2596, 2651), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(256)', 'null': '(True)'}), '(blank=True, max_length=256, null=True)\n', (2612, 2651), False, 'from django.db import migrations, models\n'), ((2861, 2912), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (2877, 2912), False, 'from django.db import migrations, models\n'), ((2946, 2985), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (2966, 2985), False, 'from django.db import migrations, models\n'), ((3019, 3054), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (3039, 3054), False, 'from django.db import migrations, models\n'), ((3081, 3155), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""Acknowledgement"""'}), "(blank=True, null=True, verbose_name='Acknowledgement')\n", (3100, 3155), False, 'from django.db import migrations, models\n'), ((3182, 3261), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(256)', 'null': '(True)', 'verbose_name': '"""Message"""'}), "(blank=True, max_length=256, null=True, verbose_name='Message')\n", (3198, 3261), False, 'from django.db import migrations, models\n'), ((3289, 3364), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'blank': '(True)', 'decimal_places': '(2)', 'max_digits': '(19)', 'null': '(True)'}), '(blank=True, decimal_places=2, max_digits=19, null=True)\n', (3308, 3364), False, 'from django.db import migrations, models\n'), ((3391, 3466), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'blank': '(True)', 'decimal_places': '(2)', 'max_digits': '(19)', 'null': '(True)'}), '(blank=True, decimal_places=2, max_digits=19, null=True)\n', (3410, 3466), False, 'from django.db import migrations, models\n'), ((3904, 4078), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'auto_created': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'parent_link': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'to': '"""core.aisencodedmodel"""'}), "(auto_created=True, on_delete=django.db.models.deletion\n .CASCADE, parent_link=True, primary_key=True, serialize=False, to=\n 'core.aisencodedmodel')\n", (3924, 4078), False, 'from django.db import migrations, models\n'), ((4096, 4126), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)'}), '(null=True)\n', (4115, 4126), False, 'from django.db import migrations, models\n'), ((4154, 4239), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(128)', 'null': '(True)', 'verbose_name': '"""Shipname"""'}), "(blank=True, max_length=128, null=True, verbose_name='Shipname'\n )\n", (4170, 4239), False, 'from django.db import migrations, models\n'), ((4403, 4466), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""Course"""'}), "(blank=True, null=True, verbose_name='Course')\n", (4420, 4466), False, 'from django.db import migrations, models\n'), ((4493, 4567), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""Acknowledgement"""'}), "(blank=True, null=True, verbose_name='Acknowledgement')\n", (4512, 4567), False, 'from django.db import migrations, models\n'), ((4594, 4660), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""Message"""'}), "(blank=True, null=True, verbose_name='Message')\n", (4613, 4660), False, 'from django.db import migrations, models\n'), ((4688, 4751), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""RSSI"""'}), "(blank=True, null=True, verbose_name='RSSI')\n", (4707, 4751), False, 'from django.db import migrations, models\n'), ((5106, 5157), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (5122, 5157), False, 'from django.db import migrations, models\n'), ((5191, 5230), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (5211, 5230), False, 'from django.db import migrations, models\n'), ((5264, 5299), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (5284, 5299), False, 'from django.db import migrations, models\n'), ((5330, 5421), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""core.gatewaymodel"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'core.gatewaymodel')\n", (5347, 5421), False, 'from django.db import migrations, models\n'), ((5447, 5538), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""core.lorawanmodel"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'core.lorawanmodel')\n", (5464, 5538), False, 'from django.db import migrations, models\n'), ((5746, 5797), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (5762, 5797), False, 'from django.db import migrations, models\n'), ((5831, 5870), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (5851, 5870), False, 'from django.db import migrations, models\n'), ((5904, 5939), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (5924, 5939), False, 'from django.db import migrations, models\n'), ((5985, 6017), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(256)'}), '(max_length=256)\n', (6001, 6017), False, 'from django.db import migrations, models\n'), ((6161, 6192), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)'}), '(max_length=64)\n', (6177, 6192), False, 'from django.db import migrations, models\n'), ((6219, 6332), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""core.dabmodel"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, to='core.dabmodel')\n", (6239, 6332), False, 'from django.db import migrations, models\n'), ((6358, 6475), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""core.lorawanmodel"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, to='core.lorawanmodel')\n", (6378, 6475), False, 'from django.db import migrations, models\n'), ((6504, 6624), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""core.aisdecodedmodel"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, to='core.aisdecodedmodel')\n", (6524, 6624), False, 'from django.db import migrations, models\n'), ((6913, 6964), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (6929, 6964), False, 'from django.db import migrations, models\n'), ((6998, 7037), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (7018, 7037), False, 'from django.db import migrations, models\n'), ((7071, 7106), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (7091, 7106), False, 'from django.db import migrations, models\n'), ((7152, 7184), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(256)'}), '(max_length=256)\n', (7168, 7184), False, 'from django.db import migrations, models\n'), ((7349, 7380), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)'}), '(max_length=64)\n', (7365, 7380), False, 'from django.db import migrations, models\n'), ((7407, 7520), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""core.dabmodel"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, to='core.dabmodel')\n", (7427, 7520), False, 'from django.db import migrations, models\n'), ((7546, 7663), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""core.lorawanmodel"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, to='core.lorawanmodel')\n", (7566, 7663), False, 'from django.db import migrations, models\n'), ((7692, 7812), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""core.aisdecodedmodel"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, to='core.aisdecodedmodel')\n", (7712, 7812), False, 'from django.db import migrations, models\n'), ((8099, 8150), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (8115, 8150), False, 'from django.db import migrations, models\n'), ((8184, 8223), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (8204, 8223), False, 'from django.db import migrations, models\n'), ((8257, 8292), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (8277, 8292), False, 'from django.db import migrations, models\n'), ((8338, 8370), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(256)'}), '(max_length=256)\n', (8354, 8370), False, 'from django.db import migrations, models\n'), ((8401, 8432), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)'}), '(max_length=64)\n', (8417, 8432), False, 'from django.db import migrations, models\n'), ((8459, 8572), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""core.dabmodel"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, to='core.dabmodel')\n", (8479, 8572), False, 'from django.db import migrations, models\n'), ((8598, 8715), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""core.lorawanmodel"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, to='core.lorawanmodel')\n", (8618, 8715), False, 'from django.db import migrations, models\n'), ((8744, 8864), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""core.aisdecodedmodel"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, to='core.aisdecodedmodel')\n", (8764, 8864), False, 'from django.db import migrations, models\n'), ((9138, 9189), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (9154, 9189), False, 'from django.db import migrations, models\n'), ((9223, 9262), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (9243, 9262), False, 'from django.db import migrations, models\n'), ((9296, 9331), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (9316, 9331), False, 'from django.db import migrations, models\n'), ((9377, 9409), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(256)'}), '(max_length=256)\n', (9393, 9409), False, 'from django.db import migrations, models\n'), ((9573, 9648), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'blank': '(True)', 'decimal_places': '(2)', 'max_digits': '(20)', 'null': '(True)'}), '(blank=True, decimal_places=2, max_digits=20, null=True)\n', (9592, 9648), False, 'from django.db import migrations, models\n'), ((9679, 9710), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)'}), '(max_length=64)\n', (9695, 9710), False, 'from django.db import migrations, models\n'), ((9737, 9850), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""core.dabmodel"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, to='core.dabmodel')\n", (9757, 9850), False, 'from django.db import migrations, models\n'), ((9876, 9993), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""core.lorawanmodel"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, to='core.lorawanmodel')\n", (9896, 9993), False, 'from django.db import migrations, models\n'), ((10022, 10142), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""core.aisdecodedmodel"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, to='core.aisdecodedmodel')\n", (10042, 10142), False, 'from django.db import migrations, models\n')] |
from tlidb.examples.utils import move_to
from .algorithm import Algorithm
class DecoderAlgorithm(Algorithm):
def __init__(self, config, datasets):
super().__init__(config, datasets)
self.generation_config = config.generation_config
self.generate_during_training = config.generate_during_training
def process_batch(self, batch):
"""
A helper function for update() and evaluate() that process the batch
Args:
- batch: a batch of data yielded by the DataLoader
Output:
- results: a dictionary of results
- y_pred: the prediction of the model
- y_true: the ground truth
- metadata: the metadata of the batch
- objective: a dictionary with the loss name and loss value
"""
X, y_true, metadata = batch
# task-specific preprocessing
X, y_true, metadata = getattr(self, f"_{metadata['task_metadata']['type']}_preprocessing")(X, y_true, metadata)
# prepare inputs for generation if necessary
if self.requires_metric_calculation():
X_generate = self.model.transform_generation_inputs(X)
X_generate = move_to(X_generate, self.device)
X, lm_labels = self.model.transform_LM_inputs(X,y_true)
X['lm_labels'] = lm_labels
X = move_to(X, self.device)
# track number of tokens in the batch
num_batch_tokens = X['attention_mask'].sum().item()
loss = self.model(**X)
# generate predictions and convert to labels if necessary
if self.requires_metric_calculation():
# generate predictions
outputs = self.model.generate(X_generate, metadata['task_metadata']['max_decode_tokens'], **self.generation_config)
# task-specific postprocessing
y_pred, y_true = getattr(self, f"_{metadata['task_metadata']['type']}_postprocessing")(outputs, y_true, metadata)
else:
y_pred = []
y_true = []
results = {
'y_pred': y_pred,
'y_true': y_true,
'metadata': metadata,
'batch_loss_divisor': num_batch_tokens, # used for averaging loss
"objective": {
"loss_name": "LM_cross_entropy",
"loss_value": loss.item()*num_batch_tokens}
}
return results, loss
def requires_metric_calculation(self):
# determines whether the model needs to generate predictions
# else only calculates loss
if self.is_training and not self.generate_during_training:
return False
return True
def _classification_preprocessing(self, X, y_true, metadata):
return X, y_true, metadata
def _classification_postprocessing(self, outputs, y_true, metadata):
y_true = self.convert_strings_to_labels(metadata['labels'], y_true)
assert(all(y_true != -1)),str(y_true)
y_pred = self.convert_strings_to_labels(metadata['labels'], outputs)
return y_pred, y_true
def _multioutput_classification_preprocessing(self, X, y_true, metadata):
return X, y_true, metadata
def _multioutput_classification_postprocessing(self, outputs, y_true, metadata):
y_true = self.convert_strings_to_labels(metadata['labels'], y_true)
assert(all(y_true != -1)),str(y_true)
y_pred = self.convert_strings_to_labels(metadata['labels'], outputs)
return y_pred, y_true
def _multilabel_classification_preprocessing(self, X, y_true, metadata):
# format y_true into a string of labels
y_true = [" and ".join([metadata['task_metadata']['class_to_natural_language_map'][c] for c in sample]) for sample in y_true]
return X, y_true, metadata
def _multilabel_classification_postprocessing(self, outputs, y_true, metadata):
# convert model outputs to mutlilabel format
y_pred = []
for output in outputs:
pred = [0 for _ in range(len(metadata['labels']))]
# search for class names in output
for i, natural_language_class in enumerate(metadata['task_metadata']['class_to_natural_language_map'].values()):
if natural_language_class in output:
prediction = list(metadata['task_metadata']['class_to_natural_language_map'].keys())[i]
pred[i] = 1
if sum(pred) == 0:
pred[metadata['labels'].index(metadata['task_metadata']['default_prediction'])] = 1
y_pred.append(pred)
# convert labels to multilabel format
transformed_y_true = []
for y in y_true:
true = [0 for _ in range(len(metadata['labels']))]
natural_language_labels = y.split(" and ")
label_indices = [list(metadata['task_metadata']['class_to_natural_language_map'].values()).index(l) for l in natural_language_labels]
for i in label_indices:
true[i] = 1
transformed_y_true.append(true)
return y_pred, transformed_y_true
def _span_extraction_preprocessing(self, X, y_true, metadata):
if isinstance(y_true[0], list):
y_true = [[y_['text'] for y_ in y] for y in y_true]
else:
y_true = [y['text'] for y in y_true]
return X, y_true, metadata
def _span_extraction_postprocessing(self, outputs, y_true, metadata):
y_pred = outputs
return y_pred, y_true
def _multiple_choice_preprocessing(self, X, y_true, metadata):
return X, y_true, metadata
def _multiple_choice_postprocessing(self, outputs, y_true, metadata):
num_choices = metadata['task_metadata']['num_choices']
metadata['labels'] = [str(i) for i in range(num_choices)]
y_true = self.convert_strings_to_labels(metadata['labels'], y_true)
assert(all(y_true != -1)),str(y_true)
y_pred = self.convert_strings_to_labels(metadata['labels'], outputs)
return y_pred, y_true
def _response_generation_preprocessing(self, X, y_true, metadata):
return X, y_true, metadata
def _response_generation_postprocessing(self, outputs, y_true, metadata):
y_pred = outputs
return y_pred, y_true | [
"tlidb.examples.utils.move_to"
] | [((1371, 1394), 'tlidb.examples.utils.move_to', 'move_to', (['X', 'self.device'], {}), '(X, self.device)\n', (1378, 1394), False, 'from tlidb.examples.utils import move_to\n'), ((1226, 1258), 'tlidb.examples.utils.move_to', 'move_to', (['X_generate', 'self.device'], {}), '(X_generate, self.device)\n', (1233, 1258), False, 'from tlidb.examples.utils import move_to\n')] |
from typing import Tuple, Optional
import ray
from ray import workflow
@ray.remote
def intentional_fail() -> str:
raise RuntimeError("oops")
@ray.remote
def cry(error: Exception) -> None:
print("Sadly", error)
@ray.remote
def celebrate(result: str) -> None:
print("Success!", result)
@ray.remote
def send_email(result: str) -> None:
print("Sending email", result)
@ray.remote
def exit_handler(res: Tuple[Optional[str], Optional[Exception]]) -> None:
result, error = res
email = send_email.bind(f"Raw result: {result}, {error}")
if error:
handler = cry.bind(error)
else:
handler = celebrate.bind(result)
return workflow.continuation(wait_all.bind(handler, email))
@ray.remote
def wait_all(*deps):
return "done"
if __name__ == "__main__":
res = intentional_fail.options(**workflow.options(catch_exceptions=True)).bind()
print(workflow.create(exit_handler.bind(res)).run())
| [
"ray.workflow.options"
] | [((845, 884), 'ray.workflow.options', 'workflow.options', ([], {'catch_exceptions': '(True)'}), '(catch_exceptions=True)\n', (861, 884), False, 'from ray import workflow\n')] |
"""Convert the output format of fairseq.generate to the input format of the evaluation script."""
from argparse import ArgumentParser
from collections import defaultdict
def main():
parser = ArgumentParser()
parser.add_argument('src', help='path to source')
parser.add_argument('tgt', help='path to target')
parser.add_argument('file', help='path to fairseq generate output')
args = parser.parse_args()
with open(args.src, 'r') as f:
src = [line.strip() for line in f]
with open(args.tgt, 'r') as f:
tgt = [line.strip() for line in f]
hyp = defaultdict(list)
with open(args.file, 'r') as f:
for line in f:
if line.startswith('H-'):
idx, _, text = line.split('\t')
hyp[int(idx[2:])].append(text.strip())
for i, k in enumerate(sorted(hyp.keys())):
fields = [src[i], tgt[i]] + hyp[k]
print('\t'.join(fields))
if __name__ == '__main__':
main()
| [
"collections.defaultdict",
"argparse.ArgumentParser"
] | [((198, 214), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (212, 214), False, 'from argparse import ArgumentParser\n'), ((592, 609), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (603, 609), False, 'from collections import defaultdict\n')] |
import json
import os
from bakker.storage import FileSystemStorage
class Config:
USER_DIR = os.path.expanduser('~')
CONFIG_FILE = os.path.join(USER_DIR, '.bakker/config.json')
def __init__(self):
if os.path.isfile(self.CONFIG_FILE):
with open(self.CONFIG_FILE, 'r') as f:
self.config = json.load(f)
else:
self.config = {}
def _save(self):
if not os.path.exists(os.path.dirname(self.CONFIG_FILE)):
os.makedirs(os.path.dirname(self.CONFIG_FILE))
with open(self.CONFIG_FILE, 'w') as f:
json.dump(self.config, f)
def __setitem__(self, key, value):
assert isinstance(value, str)
keys = key.split('.')
current = self.config
for key in keys[:-1]:
current = current.setdefault(key, {})
current[keys[-1]] = value
self._save()
def __getitem__(self, key):
keys = key.split('.')
current = self.config
for key in keys:
current = current[key]
if not isinstance(current, str):
raise KeyError()
return current
def __delitem__(self, key):
def del_dict_item(d, keys):
if len(keys) > 1:
del_dict_item(d[keys[0]], keys[1:])
if len(d[keys[0]]) == 0:
del d[keys[0]]
else:
del d[keys[0]]
keys = key.split('.')
del_dict_item(self.config, keys)
self._save()
def __contains__(self, key):
try:
self.__getitem__(key)
return True
except KeyError:
return False
def items(self):
def build_items(d, prefix):
for key, value in d.items():
next_prefix = prefix + '.' + key if prefix is not None else key
if isinstance(value, dict):
yield from build_items(value, next_prefix)
elif isinstance(value, str):
yield next_prefix, value
return build_items(self.config, None)
DEFAULT_STORAGE_KEY = 'default.storage'
DEFAULT_STORAGE_CHOICES = ['fs']
STORAGE_FILE_SYSTEM_PATH = 'storage.file_system.path'
| [
"os.path.expanduser",
"os.path.join",
"os.path.isfile",
"os.path.dirname",
"json.load",
"json.dump"
] | [((106, 129), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (124, 129), False, 'import os\n'), ((149, 194), 'os.path.join', 'os.path.join', (['USER_DIR', '""".bakker/config.json"""'], {}), "(USER_DIR, '.bakker/config.json')\n", (161, 194), False, 'import os\n'), ((234, 266), 'os.path.isfile', 'os.path.isfile', (['self.CONFIG_FILE'], {}), '(self.CONFIG_FILE)\n', (248, 266), False, 'import os\n'), ((621, 646), 'json.dump', 'json.dump', (['self.config', 'f'], {}), '(self.config, f)\n', (630, 646), False, 'import json\n'), ((351, 363), 'json.load', 'json.load', (['f'], {}), '(f)\n', (360, 363), False, 'import json\n'), ((464, 497), 'os.path.dirname', 'os.path.dirname', (['self.CONFIG_FILE'], {}), '(self.CONFIG_FILE)\n', (479, 497), False, 'import os\n'), ((525, 558), 'os.path.dirname', 'os.path.dirname', (['self.CONFIG_FILE'], {}), '(self.CONFIG_FILE)\n', (540, 558), False, 'import os\n')] |
import requests
url = "http://0.0.0.0:8017/sentrewrite"
data = {
"utterances_histories": [
[["do you know <NAME>?"], ["yes, he is a football player."], ["who is the best, he or c.ronaldo?"]]
],
"annotation_histories": [
[
{"ner": [[{"confidence": 1, "end_pos": 24, "start_pos": 13, "text": "lionel messi", "type": "PER"}]]},
{"ner": [[]]},
{"ner": [[{"confidence": 1, "end_pos": 32, "start_pos": 24, "text": "c.ronaldo", "type": "PER"}]]},
]
],
}
gold = [
{
"clusters": [
[
{
"end": 24,
"ner": {"offset": 1, "type": "PER"},
"resolved": "lionel messi",
"start": 12,
"text": "lionel messi",
},
{
"end": 33,
"ner": {"offset": 10000, "type": "O"},
"resolved": "lionel messi",
"start": 31,
"text": "he",
},
{
"end": 75,
"ner": {"offset": 10000, "type": "O"},
"resolved": "lionel messi",
"start": 73,
"text": "he",
},
]
],
"modified_sents": [
"do you know <NAME>?",
"yes, <NAME> is a football player.",
"who is the best, l<NAME> or c.ronaldo?",
],
}
]
response = requests.post(url, json=data).json()
print(response)
assert response == gold, print(response)
print("SUCCESS!")
| [
"requests.post"
] | [((1532, 1561), 'requests.post', 'requests.post', (['url'], {'json': 'data'}), '(url, json=data)\n', (1545, 1561), False, 'import requests\n')] |
import collections
import os
from itertools import product
from pathlib import Path
from typing import Dict, Iterator, List, NamedTuple, Optional, OrderedDict, Sequence, Tuple, Union
import numpy as np
import xarray as xr
from tqdm import tqdm
from bioimageio.core import image_helper
from bioimageio.core import load_resource_description
from bioimageio.core.prediction_pipeline import PredictionPipeline, create_prediction_pipeline
from bioimageio.core.resource_io.nodes import ImplicitOutputShape, Model, ResourceDescription
from bioimageio.spec.shared import raw_nodes
from bioimageio.spec.shared.raw_nodes import ResourceDescription as RawResourceDescription
def _apply_crop(data, crop):
crop = tuple(crop[ax] for ax in data.dims)
return data[crop]
class TileDef(NamedTuple):
outer: Dict[str, slice]
inner: Dict[str, slice]
local: Dict[str, slice]
def get_tiling(
shape: Sequence[int], tile_shape: Dict[str, int], halo: Dict[str, int], input_axes: Sequence[str]
) -> Iterator[TileDef]:
assert len(shape) == len(input_axes)
shape_ = [sh for sh, ax in zip(shape, input_axes) if ax in "xyz"]
spatial_axes = [ax for ax in input_axes if ax in "xyz"]
inner_tile_shape_ = [tile_shape[ax] - 2 * halo[ax] for ax in spatial_axes]
halo_ = [halo[ax] for ax in spatial_axes]
assert len(shape_) == len(inner_tile_shape_) == len(spatial_axes) == len(halo_)
ranges = [range(sh // tsh if sh % tsh == 0 else sh // tsh + 1) for sh, tsh in zip(shape_, inner_tile_shape_)]
start_points = product(*ranges)
for start_point in start_points:
positions = [sp * tsh for sp, tsh in zip(start_point, inner_tile_shape_)]
inner_tile = {
ax: slice(pos, min(pos + tsh, sh))
for ax, pos, tsh, sh in zip(spatial_axes, positions, inner_tile_shape_, shape_)
}
inner_tile["b"] = slice(None)
inner_tile["c"] = slice(None)
outer_tile = {
ax: slice(max(pos - ha, 0), min(pos + tsh + ha, sh))
for ax, pos, tsh, sh, ha in zip(spatial_axes, positions, inner_tile_shape_, shape_, halo_)
}
outer_tile["b"] = slice(None)
outer_tile["c"] = slice(None)
local_tile = {
ax: slice(
inner_tile[ax].start - outer_tile[ax].start,
-(outer_tile[ax].stop - inner_tile[ax].stop) if outer_tile[ax].stop != inner_tile[ax].stop else None,
)
for ax in spatial_axes
}
local_tile["b"] = slice(None)
local_tile["c"] = slice(None)
yield TileDef(outer_tile, inner_tile, local_tile)
def _predict_with_tiling_impl(
prediction_pipeline: PredictionPipeline,
inputs: Sequence[xr.DataArray],
outputs: Sequence[xr.DataArray],
tile_shapes: Sequence[Dict[str, int]],
halos: Sequence[Dict[str, int]],
verbose: bool = False,
):
if len(inputs) > 1:
raise NotImplementedError("Tiling with multiple inputs not implemented yet")
if len(outputs) > 1:
raise NotImplementedError("Tiling with multiple outputs not implemented yet")
assert len(tile_shapes) == len(outputs)
assert len(halos) == len(outputs)
input_ = inputs[0]
output = outputs[0]
tile_shape = tile_shapes[0]
halo = halos[0]
tiles = get_tiling(shape=input_.shape, tile_shape=tile_shape, halo=halo, input_axes=input_.dims)
assert all(isinstance(ax, str) for ax in input_.dims)
input_axes: Tuple[str, ...] = input_.dims # noqa
def load_tile(tile):
inp = input_[tile]
# whether to pad on the right or left of the dim for the spatial dims
# + placeholders for batch and axis dimension, where we don't pad
pad_right = [tile[ax].start == 0 if ax in "xyz" else None for ax in input_axes]
return inp, pad_right
if verbose:
shape = {ax: sh for ax, sh in zip(prediction_pipeline.input_specs[0].axes, input_.shape)}
n_tiles = int(np.prod([np.ceil(float(shape[ax]) / (tsh - 2 * halo[ax])) for ax, tsh in tile_shape.items()]))
tiles = tqdm(tiles, total=n_tiles, desc="prediction with tiling")
# we need to use padded prediction for the individual tiles in case the
# border tiles don't match the requested tile shape
padding = {ax: tile_shape[ax] for ax in input_axes if ax in "xyz"}
padding["mode"] = "fixed"
for outer_tile, inner_tile, local_tile in tiles:
inp, pad_right = load_tile(outer_tile)
out = predict_with_padding(prediction_pipeline, inp, padding, pad_right)
assert len(out) == 1
out = out[0]
output[inner_tile] = out[local_tile]
#
# prediction functions
#
def predict(
prediction_pipeline: PredictionPipeline,
inputs: Union[xr.DataArray, List[xr.DataArray], Tuple[xr.DataArray]],
) -> List[xr.DataArray]:
"""Run prediction for a single set of input(s) with a bioimage.io model
Args:
prediction_pipeline: the prediction pipeline for the input model.
inputs: the input(s) for this model represented as xarray data.
"""
if not isinstance(inputs, (tuple, list)):
inputs = [inputs]
assert len(inputs) == len(prediction_pipeline.input_specs)
tagged_data = [
xr.DataArray(ipt, dims=ipt_spec.axes) for ipt, ipt_spec in zip(inputs, prediction_pipeline.input_specs)
]
return prediction_pipeline.forward(*tagged_data)
def _parse_padding(padding, input_specs):
if padding is None: # no padding
return padding
if len(input_specs) > 1:
raise NotImplementedError("Padding for multiple inputs not yet implemented")
input_spec = input_specs[0]
pad_keys = tuple(input_spec.axes) + ("mode",)
def check_padding(padding):
assert all(k in pad_keys for k in padding.keys())
if isinstance(padding, dict): # pre-defined padding
check_padding(padding)
elif isinstance(padding, bool): # determine padding from spec
if padding:
axes = input_spec.axes
shape = input_spec.shape
if isinstance(shape, list): # fixed padding
padding = {ax: sh for ax, sh in zip(axes, shape) if ax in "xyz"}
padding["mode"] = "fixed"
else: # dynamic padding
step = shape.step
padding = {ax: st for ax, st in zip(axes, step) if ax in "xyz"}
padding["mode"] = "dynamic"
check_padding(padding)
else: # no padding
padding = None
else:
raise ValueError(f"Invalid argument for padding: {padding}")
return padding
def predict_with_padding(
prediction_pipeline: PredictionPipeline,
inputs: Union[xr.DataArray, List[xr.DataArray], Tuple[xr.DataArray]],
padding: Union[bool, Dict[str, int]] = True,
pad_right: bool = True,
) -> List[xr.DataArray]:
"""Run prediction with padding for a single set of input(s) with a bioimage.io model.
Args:
prediction_pipeline: the prediction pipeline for the input model.
inputs: the input(s) for this model represented as xarray data.
padding: the padding settings. Pass True to derive from the model spec.
pad_right: whether to applying padding to the right or left of the input.
"""
if not padding:
raise ValueError
assert len(inputs) == len(prediction_pipeline.input_specs)
output_spec = prediction_pipeline.output_specs[0]
if hasattr(output_spec.shape, "scale"):
scale = dict(zip(output_spec.axes, output_spec.shape.scale))
offset = dict(zip(output_spec.axes, output_spec.shape.offset))
network_resizes = any(sc != 1 for ax, sc in scale.items() if ax in "xyz") or any(
off != 0 for ax, off in offset.items() if ax in "xyz"
)
else:
network_resizes = False
padding = _parse_padding(padding, prediction_pipeline.input_specs)
if not isinstance(inputs, (tuple, list)):
inputs = [inputs]
if not isinstance(padding, (tuple, list)):
padding = [padding]
assert len(padding) == len(prediction_pipeline.input_specs)
inputs, crops = zip(
*[
image_helper.pad(inp, spec.axes, p, pad_right=pad_right)
for inp, spec, p in zip(inputs, prediction_pipeline.input_specs, padding)
]
)
result = predict(prediction_pipeline, inputs)
if network_resizes:
crops = tuple(
{
ax: slice(int(crp.start * scale[ax] + 2 * offset[ax]), int(crp.stop * scale[ax] + 2 * offset[ax]))
if ax in "xyz"
else crp
for ax, crp in crop.items()
}
for crop in crops
)
return [_apply_crop(res, crop) for res, crop in zip(result, crops)]
# simple heuristic to determine suitable shape from min and step
def _determine_shape(min_shape, step, axes):
is3d = "z" in axes
min_len = 64 if is3d else 256
shape = []
for ax, min_ax, step_ax in zip(axes, min_shape, step):
if ax in "zyx" and step_ax > 0:
len_ax = min_ax
while len_ax < min_len:
len_ax += step_ax
shape.append(len_ax)
else:
shape.append(min_ax)
return shape
def _parse_tiling(tiling, input_specs, output_specs):
if tiling is None: # no tiling
return tiling
if len(input_specs) > 1:
raise NotImplementedError("Tiling for multiple inputs not yet implemented")
if len(output_specs) > 1:
raise NotImplementedError("Tiling for multiple outputs not yet implemented")
input_spec = input_specs[0]
output_spec = output_specs[0]
axes = input_spec.axes
def check_tiling(tiling):
assert "halo" in tiling and "tile" in tiling
spatial_axes = [ax for ax in axes if ax in "xyz"]
halo = tiling["halo"]
tile = tiling["tile"]
assert all(halo.get(ax, 0) >= 0 for ax in spatial_axes)
assert all(tile.get(ax, 0) > 0 for ax in spatial_axes)
if isinstance(tiling, dict):
check_tiling(tiling)
elif isinstance(tiling, bool):
if tiling:
# NOTE we assume here that shape in input and output are the same
# for different input and output shapes, we should actually tile in the
# output space and then request the corresponding input tiles
# so we would need to apply the output scale and offset to the
# input shape to compute the tile size and halo here
shape = input_spec.shape
if not isinstance(shape, list):
shape = _determine_shape(shape.min, shape.step, axes)
assert isinstance(shape, list)
assert len(shape) == len(axes)
halo = output_spec.halo
if halo is None:
halo = [0] * len(axes)
assert len(halo) == len(axes)
tiling = {
"halo": {ax: ha for ax, ha in zip(axes, halo) if ax in "xyz"},
"tile": {ax: sh for ax, sh in zip(axes, shape) if ax in "xyz"},
}
check_tiling(tiling)
else:
tiling = None
else:
raise ValueError(f"Invalid argument for tiling: {tiling}")
return tiling
def predict_with_tiling(
prediction_pipeline: PredictionPipeline,
inputs: Union[xr.DataArray, List[xr.DataArray], Tuple[xr.DataArray]],
tiling: Union[bool, Dict[str, Dict[str, int]]] = True,
verbose: bool = False,
) -> List[xr.DataArray]:
"""Run prediction with tiling for a single set of input(s) with a bioimage.io model.
Args:
prediction_pipeline: the prediction pipeline for the input model.
inputs: the input(s) for this model represented as xarray data.
tiling: the tiling settings. Pass True to derive from the model spec.
verbose: whether to print the prediction progress.
"""
if not tiling:
raise ValueError
assert len(inputs) == len(prediction_pipeline.input_specs)
tiling = _parse_tiling(tiling, prediction_pipeline.input_specs, prediction_pipeline.output_specs)
if not isinstance(inputs, (list, tuple)):
inputs = [inputs]
named_inputs: OrderedDict[str, xr.DataArray] = collections.OrderedDict(
**{
ipt_spec.name: xr.DataArray(ipt_data, dims=tuple(ipt_spec.axes))
for ipt_data, ipt_spec in zip(inputs, prediction_pipeline.input_specs)
}
)
outputs = []
for output_spec in prediction_pipeline.output_specs:
if isinstance(output_spec.shape, ImplicitOutputShape):
scale = dict(zip(output_spec.axes, output_spec.shape.scale))
offset = dict(zip(output_spec.axes, output_spec.shape.offset))
if any(sc != 1 for ax, sc in scale.items() if ax in "xyz") or any(
off != 0 for ax, off in offset.items() if ax in "xyz"
):
raise NotImplementedError("Tiling with a different output shape is not yet supported")
ref_input = named_inputs[output_spec.shape.reference_tensor]
ref_input_shape = dict(zip(ref_input.dims, ref_input.shape))
output_shape = tuple(int(scale[ax] * ref_input_shape[ax] + 2 * offset[ax]) for ax in output_spec.axes)
else:
if len(inputs) > 1:
raise NotImplementedError
input_spec = prediction_pipeline.input_specs[0]
if input_spec.axes != output_spec.axes:
raise NotImplementedError("Tiling with a different output shape is not yet supported")
out_axes = output_spec.axes
fixed_shape = tuple(output_spec.shape)
if not all(fixed_shape[out_axes.index(ax)] == tile_shape for ax, tile_shape in tiling["tile"].items()):
raise NotImplementedError("Tiling with a different output shape is not yet supported")
output_shape = list(inputs[0].shape)
chan_id = out_axes.index("c")
if fixed_shape[chan_id] != output_shape[chan_id]:
output_shape[chan_id] = fixed_shape[chan_id]
output_shape = tuple(output_shape)
outputs.append(xr.DataArray(np.zeros(output_shape, dtype=output_spec.data_type), dims=tuple(output_spec.axes)))
_predict_with_tiling_impl(
prediction_pipeline,
list(named_inputs.values()),
outputs,
tile_shapes=[tiling["tile"]], # todo: update tiling for multiple inputs/outputs
halos=[tiling["halo"]],
verbose=verbose,
)
return outputs
def _predict_sample(prediction_pipeline, inputs, outputs, padding, tiling):
if padding and tiling:
raise ValueError("Only one of padding or tiling is supported")
input_data = image_helper.load_tensors(inputs, prediction_pipeline.input_specs)
if padding is not None:
result = predict_with_padding(prediction_pipeline, input_data, padding)
elif tiling is not None:
result = predict_with_tiling(prediction_pipeline, input_data, tiling)
else:
result = predict(prediction_pipeline, input_data)
assert isinstance(result, list)
assert len(result) == len(outputs)
for res, out in zip(result, outputs):
image_helper.save_image(out, res)
def predict_image(
model_rdf: Union[RawResourceDescription, ResourceDescription, os.PathLike, str, dict, raw_nodes.URI],
inputs: Union[Tuple[Path, ...], List[Path], Path],
outputs: Union[Tuple[Path, ...], List[Path], Path],
padding: Optional[Union[bool, Dict[str, int]]] = None,
tiling: Optional[Union[bool, Dict[str, Dict[str, int]]]] = None,
weight_format: Optional[str] = None,
devices: Optional[List[str]] = None,
verbose: bool = False,
):
"""Run prediction for a single set of input image(s) with a bioimage.io model.
Args:
model_rdf: the bioimageio model.
inputs: the filepaths for the input images.
outputs: the filepaths for saving the input images.
padding: the padding settings for prediction. By default no padding is used.
tiling: the tiling settings for prediction. By default no tiling is used.
weight_format: the weight format to use for predictions.
devices: the devices to use for prediction.
verbose: run prediction in verbose mode.
"""
if not isinstance(inputs, (tuple, list)):
inputs = [inputs]
if not isinstance(outputs, (tuple, list)):
outputs = [outputs]
model = load_resource_description(model_rdf)
assert isinstance(model, Model)
if len(model.inputs) != len(inputs):
raise ValueError
if len(model.outputs) != len(outputs):
raise ValueError
with create_prediction_pipeline(
bioimageio_model=model, weight_format=weight_format, devices=devices
) as prediction_pipeline:
_predict_sample(prediction_pipeline, inputs, outputs, padding, tiling)
def predict_images(
model_rdf: Union[RawResourceDescription, ResourceDescription, os.PathLike, str, dict, raw_nodes.URI],
inputs: Sequence[Union[Tuple[Path, ...], List[Path], Path]],
outputs: Sequence[Union[Tuple[Path, ...], List[Path], Path]],
padding: Optional[Union[bool, Dict[str, int]]] = None,
tiling: Optional[Union[bool, Dict[str, Dict[str, int]]]] = None,
weight_format: Optional[str] = None,
devices: Optional[List[str]] = None,
verbose: bool = False,
):
"""Predict multiple input images with a bioimage.io model.
Args:
model_rdf: the bioimageio model.
inputs: the filepaths for the input images.
outputs: the filepaths for saving the input images.
padding: the padding settings for prediction. By default no padding is used.
tiling: the tiling settings for prediction. By default no tiling is used.
weight_format: the weight format to use for predictions.
devices: the devices to use for prediction.
verbose: run prediction in verbose mode.
"""
model = load_resource_description(model_rdf)
assert isinstance(model, Model)
with create_prediction_pipeline(
bioimageio_model=model, weight_format=weight_format, devices=devices
) as prediction_pipeline:
prog = zip(inputs, outputs)
if verbose:
prog = tqdm(prog, total=len(inputs))
for inp, outp in prog:
if not isinstance(inp, (tuple, list)):
inp = [inp]
if not isinstance(outp, (tuple, list)):
outp = [outp]
_predict_sample(prediction_pipeline, inp, outp, padding, tiling)
| [
"bioimageio.core.load_resource_description",
"bioimageio.core.image_helper.load_tensors",
"bioimageio.core.prediction_pipeline.create_prediction_pipeline",
"itertools.product",
"tqdm.tqdm",
"bioimageio.core.image_helper.save_image",
"numpy.zeros",
"xarray.DataArray",
"bioimageio.core.image_helper.pa... | [((1538, 1554), 'itertools.product', 'product', (['*ranges'], {}), '(*ranges)\n', (1545, 1554), False, 'from itertools import product\n'), ((14736, 14802), 'bioimageio.core.image_helper.load_tensors', 'image_helper.load_tensors', (['inputs', 'prediction_pipeline.input_specs'], {}), '(inputs, prediction_pipeline.input_specs)\n', (14761, 14802), False, 'from bioimageio.core import image_helper\n'), ((16473, 16509), 'bioimageio.core.load_resource_description', 'load_resource_description', (['model_rdf'], {}), '(model_rdf)\n', (16498, 16509), False, 'from bioimageio.core import load_resource_description\n'), ((17984, 18020), 'bioimageio.core.load_resource_description', 'load_resource_description', (['model_rdf'], {}), '(model_rdf)\n', (18009, 18020), False, 'from bioimageio.core import load_resource_description\n'), ((4073, 4130), 'tqdm.tqdm', 'tqdm', (['tiles'], {'total': 'n_tiles', 'desc': '"""prediction with tiling"""'}), "(tiles, total=n_tiles, desc='prediction with tiling')\n", (4077, 4130), False, 'from tqdm import tqdm\n'), ((5234, 5271), 'xarray.DataArray', 'xr.DataArray', (['ipt'], {'dims': 'ipt_spec.axes'}), '(ipt, dims=ipt_spec.axes)\n', (5246, 5271), True, 'import xarray as xr\n'), ((15212, 15245), 'bioimageio.core.image_helper.save_image', 'image_helper.save_image', (['out', 'res'], {}), '(out, res)\n', (15235, 15245), False, 'from bioimageio.core import image_helper\n'), ((16690, 16791), 'bioimageio.core.prediction_pipeline.create_prediction_pipeline', 'create_prediction_pipeline', ([], {'bioimageio_model': 'model', 'weight_format': 'weight_format', 'devices': 'devices'}), '(bioimageio_model=model, weight_format=\n weight_format, devices=devices)\n', (16716, 16791), False, 'from bioimageio.core.prediction_pipeline import PredictionPipeline, create_prediction_pipeline\n'), ((18067, 18168), 'bioimageio.core.prediction_pipeline.create_prediction_pipeline', 'create_prediction_pipeline', ([], {'bioimageio_model': 'model', 'weight_format': 'weight_format', 'devices': 'devices'}), '(bioimageio_model=model, weight_format=\n weight_format, devices=devices)\n', (18093, 18168), False, 'from bioimageio.core.prediction_pipeline import PredictionPipeline, create_prediction_pipeline\n'), ((8153, 8209), 'bioimageio.core.image_helper.pad', 'image_helper.pad', (['inp', 'spec.axes', 'p'], {'pad_right': 'pad_right'}), '(inp, spec.axes, p, pad_right=pad_right)\n', (8169, 8209), False, 'from bioimageio.core import image_helper\n'), ((14171, 14222), 'numpy.zeros', 'np.zeros', (['output_shape'], {'dtype': 'output_spec.data_type'}), '(output_shape, dtype=output_spec.data_type)\n', (14179, 14222), True, 'import numpy as np\n')] |
from app import db
def populate_mongo():
if 'expressions' in db.collection_names():
db.drop_collection('expressions')
expressions_entries = [
{
"name": "1",
"expression": "$A = \\begin{pmatrix}c_{11} & c_{12} & c_{13} & c_{14} & c_{15}\\\\ c_{21} & c_{22} & c_{23} & c_{24} & c_{25}\\\\c_{31} & c_{32} & c_{33} & c_{34} & c_{35}\\\\c_{41} & c_{42} & c_{43} & c_{44} & c_{45}\\\\c_{51} & c_{52} & c_{53} & c_{54} & c_{55}\\end{pmatrix}$"},
{
"name": "2",
"expression": "$B = \\begin{bmatrix}c_{11} & c_{12} & c_{13} & c_{14} & c_{15}\\\\ c_{21} & c_{22} & c_{23} & c_{24} & c_{25}\\\\c_{31} & c_{32} & c_{33} & c_{34} & c_{35}\\\\c_{41} & c_{42} & c_{43} & c_{44} & c_{45}\\\\c_{51} & c_{52} & c_{53} & c_{54} & c_{55}\\end{bmatrix}$"},
{
"name": "3",
"expression": "$C = \\begin{pmatrix}c_{11} & c_{12} & c_{13} & c_{14} & c_{15}\\\\ c_{21} & c_{22} & c_{23} & c_{24} & c_{25}\\\\c_{31} & c_{32} & c_{33} & c_{34} & c_{35}\\\\c_{41} & c_{42} & c_{43} & c_{44} & c_{45}\\\\c_{51} & c_{52} & c_{53} & c_{54} & c_{55}\\end{pmatrix} \\otimes \\begin{pmatrix}c_{11} & c_{12} & c_{13} & c_{14} & c_{15}\\\\ c_{21} & c_{22} & c_{23} & c_{24} & c_{25}\\\\c_{31} & c_{32} & c_{33} & c_{34} & c_{35}\\\\c_{41} & c_{42} & c_{43} & c_{44} & c_{45}\\\\c_{51} & c_{52} & c_{53} & c_{54} & c_{55}\\end{pmatrix}$"},
{
"name": "4",
"expression": "$\\nabla \\times \\bf{E} = -1 {1 \\over c} {\\partial \\bf{B} \\over \\partial t} $ "},
{
"name": "5",
"expression": "$\\oint_{\\partial \\Sigma} \\bf B \\cdot \\rm d \\ell = - {1 \\over c} \\it {d \\over dt} \\bf \\int\\int_{\\Sigma} B \\cdot \\rm d \\bf S$"
}
]
db.expressions.insert(expressions_entries)
print(db.collection_names())
| [
"app.db.drop_collection",
"app.db.collection_names",
"app.db.expressions.insert"
] | [((1791, 1833), 'app.db.expressions.insert', 'db.expressions.insert', (['expressions_entries'], {}), '(expressions_entries)\n', (1812, 1833), False, 'from app import db\n'), ((67, 88), 'app.db.collection_names', 'db.collection_names', ([], {}), '()\n', (86, 88), False, 'from app import db\n'), ((98, 131), 'app.db.drop_collection', 'db.drop_collection', (['"""expressions"""'], {}), "('expressions')\n", (116, 131), False, 'from app import db\n'), ((1844, 1865), 'app.db.collection_names', 'db.collection_names', ([], {}), '()\n', (1863, 1865), False, 'from app import db\n')] |
import random
import numpy as np
def set_seed(random_state: int = 42) -> None:
"""Function fixes random state to ensure results are reproducible"""
np.random.seed(random_state)
random.seed(random_state)
| [
"numpy.random.seed",
"random.seed"
] | [((159, 187), 'numpy.random.seed', 'np.random.seed', (['random_state'], {}), '(random_state)\n', (173, 187), True, 'import numpy as np\n'), ((192, 217), 'random.seed', 'random.seed', (['random_state'], {}), '(random_state)\n', (203, 217), False, 'import random\n')] |
"""
Train a model on the Reddit dataset by Khodak.
"""
import functools
import time
import logging
import pickle
import os
import pandas as pd
from sklearn.model_selection import train_test_split
from simpletransformers.classification import ClassificationModel, ClassificationArgs
from utils import (
hour_min_sec,
has_markdown,
combine_with_context
)
TRAIN_FILE = 'data/train.csv'
TEST_FILE = 'data/test.csv'
EXAMPLES = 100
EPOCHS = 1
USE_CUDA = True
BATCH_SIZE = 16
MAX_COMMENT_LENGTH = 150
MODEL_ARGS = ClassificationArgs(
eval_batch_size=BATCH_SIZE,
train_batch_size=BATCH_SIZE,
evaluate_during_training=True,
evaluate_during_training_verbose=True,
use_multiprocessing=False,
use_multiprocessing_for_evaluation=False,
overwrite_output_dir=True,
save_eval_checkpoints=True,
save_model_every_epoch=True,
#save_steps=-1
)
# Set logging to DEBUG level
logging.basicConfig(filename='sarcasm_run.log', level=logging.DEBUG, format='%(asctime)s %(message)s')
def timer(func):
"""Timer decorator: prints elapsed time for function call and also writes it to log file"""
@functools.wraps(func)
def wrapper_timer(*args, **kwargs):
tic = time.perf_counter()
value = func(*args, **kwargs)
toc = time.perf_counter()
elapsed_time = toc - tic
args_repr = ()
for a in args:
if isinstance(a, pd.DataFrame):
args_repr += ('<DataFrame>',)
else:
args_repr += (a,)
for k, v in kwargs.items():
args_repr += (f'{k}={v}',)
message = f"Called: {func.__name__}{args_repr}\t->\tElapsed time: {hour_min_sec(elapsed_time, hms=True)} seconds"
print(f'{message}')
logging.info(f'{message}')
return value
return wrapper_timer
def read_df_from_csv(filename):
"""Read CSV file into dataframe.
Force string type on `comment`, `subreddit`, and `parent_comment` fields and
convert any NaN for string values to an empty string."""
return pd.read_csv(
filename,
dtype={
'comment': pd.StringDtype(),
'subreddit': pd.StringDtype(),
'parent_comment': pd.StringDtype()
},
keep_default_na=False, # Convert any NaN to empty string (for string dtype)
verbose=True
)
def result_to_metrics(result):
"""Specific for the result dictionary of simpletransformers binary classification,
which is a dictionary including keys: `tp`, `fp`, `tn`, and `fn`.
TP = True Positive
FP = False Positive
TN = True Negative
FN = False Negative
accuracy = (TP + TN) / (TP + FP + TN + FN)
precision = TP / (TP + FP)
recall = TP / (TP + FN)
:returns accuracy, precision, recall
"""
accuracy = (result['tp'] + result['tn']) / (result['tp'] + result['fp'] + result['tn'] + result['fn'])
positives = result['tp'] + result['fp']
if positives > 0:
precision = result['tp'] / (result['tp'] + result['fp'])
else:
# If there are no positives, we set the precision to 1
precision = 1.0
labeled_positives = result['tp'] + result['fn']
if labeled_positives > 0:
recall = result['tp'] / (result['tp'] + result['fn'])
else:
# If there are no labelled positives, we set the recall to 1
recall = 1.0
if precision + recall > 0:
f1 = (2 * precision * recall) / (precision + recall)
else:
f1 = 0.0
return accuracy, precision, recall, f1
def get_new_model(num_train_epochs=EPOCHS, use_cuda=USE_CUDA):
logging.info(MODEL_ARGS)
MODEL_ARGS.num_train_epochs = num_train_epochs
# Create a ClassificationModel
model = ClassificationModel(
"roberta", "roberta-base", args=MODEL_ARGS, use_cuda=use_cuda
)
return model
def eval(model, eval_df):
"""Evaluate a model with a given evaluation dataset"""
result, _, _ = model.eval_model(eval_df)
print(result)
accuracy, precision, recall, f1 = result_to_metrics(result)
metrics_message = f'Accuracy = {accuracy:0.4f}; Precision = {precision:0.4f}; Recall = {recall:0.4f}; F1 = {f1:0.4f}'
print(metrics_message)
logging.info(metrics_message)
return model
@timer
def train(train_df, dev_df, eval_df, epochs=EPOCHS, field='comment'):
"""Train the model and evaluate after training. Uses early stopping.
:train_df Dataframe with training data
:dev_df Dataframe with evaluation data for early stopping
:eval_df Dataframe with evaluation data after training is completed."""
# Optional model configuration
model = get_new_model(num_train_epochs=epochs)
# Convert the train dataframe to training format and train the model
train_df = train_df[[field, 'label']]
train_df.columns = ['text', 'labels']
print('shape of train_df =',train_df.shape)
# Convert the train dataframe to training format and train the model
dev_df = dev_df[[field, 'label']]
dev_df.columns = ['text', 'labels']
print('shape of dev_df =', dev_df.shape)
model.train_model(train_df, eval_df=dev_df)
# Convert the train dataframe to training format and train the model
eval_df = eval_df[[field, 'label']]
eval_df.columns = ['text', 'labels']
print('shape of eval_df = ', eval_df.shape)
eval(model, eval_df)
return model
def prepare_train_dataframes(df, count=100, field='comment'):
"""We split the dataset in train, dev, and eval parts.
Next we clean all parts clean all parts, pickle them (to reproduce in any post-mortem)
and truncate them to the wanted size."""
train_df, _df = train_test_split(df, test_size=0.2)
eval_df, dev_df = train_test_split(_df, test_size=0.3)
if field == 'target':
train_df = combine_with_context(train_df)
dev_df = combine_with_context(dev_df)
eval_df = combine_with_context(eval_df)
count_eval = count
# Minimum and maximum items for evaluation
if count_eval > 20000:
count_eval = 20000
elif count_eval < 10000:
count_eval = 10000
else:
count_eval = count
count_dev = count_eval//2
train_df = train_df.sample(n=count)
dev_df = dev_df.sample(n=count_dev)
eval_df = eval_df.sample(n=count_eval)
# For the case of a post-mortem we save our samples
with open('train_df.pkl', 'wb') as f:
pickle.dump(train_df, f)
with open('dev_df.pkl', 'wb') as f:
pickle.dump(dev_df, f)
with open('eval_df.pkl', 'wb') as f:
pickle.dump(eval_df, f)
return train_df, dev_df, eval_df
# FIXME: allow for including parent_comment if required
def prepare_test_dataframe(df, count=None):
"""Create dataframe suitable for testing"""
if count is None:
select_df = df
else:
select_df = df.sample(n=count)
test_df = select_df[['id', 'comment']]
test_df.columns = ['id', 'text']
return test_df
# Time estimations
@timer
def estimate_training(count=100, epochs=1):
"""Estimate total training time based on time spent training a subset of the training set."""
msg = f'Number of objects: {count}; number of epochs: {epochs}'
print(msg)
logging.info(msg)
train_df = read_df_from_csv(TRAIN_FILE)
number_of_records = train_df.shape[0]
subtrain_df, subdev_df, subeval_df = prepare_train_dataframes(train_df, count=count)
model = get_new_model(num_train_epochs=epochs)
start = time.perf_counter()
# Train and evaluate
model.train_model(subtrain_df)
eval(model, subeval_df)
end = time.perf_counter()
elapsed_time = end - start
msg = f'Training of {count} items for {epochs} epochs took {elapsed_time:0.2f} seconds.\n' + \
f'Total time expected for training {number_of_records} items: {hour_min_sec(elapsed_time*(number_of_records//count), hms=True)}.\n' + \
f'Training 1000 items takes {hour_min_sec(elapsed_time*(1000//count), hms=True)}'
print(msg)
logging.info(msg)
@timer
def estimate_predictions(count=100):
"""Estimate total time based on time spent making predictions for subset of test set."""
test_df = read_df_from_csv(TEST_FILE)
number_of_records = test_df.shape[0]
subtest_df = prepare_test_dataframe(test_df, count=count)
model = load_best_model()
start = time.perf_counter()
create_result_csv(model, subtest_df, filename='dummy.csv')
end = time.perf_counter()
elapsed_time = end - start
msg = f'Prediction of {count} items took {elapsed_time:0.2f} seconds.\n' + \
f'Total time expected for {number_of_records} items: {hour_min_sec(elapsed_time*(number_of_records//count), hms=True)}.\n' + \
f'Predicting 1000 items takes {hour_min_sec(elapsed_time*(1000//count), hms=True)}.'
print(msg)
logging.info(msg)
# Delivery
# -- create CSV file with predictions
def create_result_csv(model, test_df, filename='sarcasm_predictions.csv'):
"""Create a CSV with columns `id` and `label` with predictions for all items in the test dataset"""
predictions = make_predictions(test_df, model)
df = pd.DataFrame({'id': test_df['id'].to_list(), 'label': predictions})
df.to_csv(filename, index=False)
@timer
def make_predictions(test_df, model):
"""Make predictions and time them."""
# predict
predictions, raw_outputs = model.predict(test_df['text'].to_list())
return predictions
def train_and_evaluate(count=EXAMPLES, epochs=EPOCHS, field='comment'):
"""Main function"""
msg = f'Train and evaluate. {count} objects, {epochs} epochs.'
print(msg)
logging.info(msg)
# Read training data
dataset_df = read_df_from_csv(TRAIN_FILE)
train_df, dev_df, eval_df = prepare_train_dataframes(dataset_df, count=count, field=field)
model = train(train_df, dev_df, eval_df, epochs=epochs, field=field)
return model
@timer
def check_markdown_impact(count=EXAMPLES, epochs=EPOCHS):
# Read training data
print("WITH MARKDOWN")
logging.info("WITH MARKDOWN")
dataset_df = read_df_from_csv(TRAIN_FILE)
markdown_df = dataset_df[dataset_df['comment'].apply(has_markdown)]
train_df, eval_df = prepare_train_dataframes(markdown_df, count=count)
model = train(train_df, eval_df, epochs=epochs)
print("WITHOUT MARKDOWN")
logging.info("WITHOUT MARKDOWN")
no_markdown_df = dataset_df[dataset_df['comment'].apply(lambda x: not has_markdown(x))]
train_df, eval_df = prepare_train_dataframes(no_markdown_df, count=count)
model = train(train_df, eval_df, epochs=epochs)
def load_best_model(dir, num_train_epochs=EPOCHS):
"""Load the best model coming out of training."""
model_args = ClassificationArgs(
num_train_epochs=EPOCHS,
eval_batch_size=BATCH_SIZE,
overwrite_output_dir=False)
model = ClassificationModel(
"roberta",
dir,
args=model_args,
use_cuda=True
)
return model
def check_best_model(dir, sample=10000):
model = load_best_model(dir)
df = pd.read_csv(TRAIN_FILE)
df = df.sample(n=sample)
df = df[['comment', 'label']]
df.columns = ['text', 'labels']
eval(model, df)
return model
if __name__ == '__main__':
### Train and evaluate
train_and_evaluate(count=4400, epochs=20, field='comment')
### Utility: check the impact of markup in text on the result
#check_markdown_impact(count=3000, epochs=7)
### Utility: make an estimation on how long it will take to make predictions
#estimate_predictions()
### Utility: make an estimation on how long it will take to train a model
#estimate_training(count=500, epochs=7)
### Utility/verification: check the manually selected 'best model' (see outcome_exploration.ipynb!)
# by evaluating it on a part of the original dataset.
# check_best_model('outputs/checkpoint-36000')
### Create a prediction of the outcome of the test data and write it to CSV
# best_model = load_best_model('top_outputs/checkpoint-10000')
#final_test_df = read_df_from_csv(TEST_FILE)
#final_test_df = final_test_df[['id', 'comment']]
#final_test_df.columns = ['id', 'text']
#create_result_csv(best_model, final_test_df)
| [
"logging.basicConfig",
"simpletransformers.classification.ClassificationArgs",
"pickle.dump",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"pandas.StringDtype",
"utils.has_markdown",
"time.perf_counter",
"functools.wraps",
"utils.combine_with_context",
"simpletransformers.class... | [((525, 839), 'simpletransformers.classification.ClassificationArgs', 'ClassificationArgs', ([], {'eval_batch_size': 'BATCH_SIZE', 'train_batch_size': 'BATCH_SIZE', 'evaluate_during_training': '(True)', 'evaluate_during_training_verbose': '(True)', 'use_multiprocessing': '(False)', 'use_multiprocessing_for_evaluation': '(False)', 'overwrite_output_dir': '(True)', 'save_eval_checkpoints': '(True)', 'save_model_every_epoch': '(True)'}), '(eval_batch_size=BATCH_SIZE, train_batch_size=BATCH_SIZE,\n evaluate_during_training=True, evaluate_during_training_verbose=True,\n use_multiprocessing=False, use_multiprocessing_for_evaluation=False,\n overwrite_output_dir=True, save_eval_checkpoints=True,\n save_model_every_epoch=True)\n', (543, 839), False, 'from simpletransformers.classification import ClassificationModel, ClassificationArgs\n'), ((912, 1019), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': '"""sarcasm_run.log"""', 'level': 'logging.DEBUG', 'format': '"""%(asctime)s %(message)s"""'}), "(filename='sarcasm_run.log', level=logging.DEBUG, format\n ='%(asctime)s %(message)s')\n", (931, 1019), False, 'import logging\n'), ((1135, 1156), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (1150, 1156), False, 'import functools\n'), ((3607, 3631), 'logging.info', 'logging.info', (['MODEL_ARGS'], {}), '(MODEL_ARGS)\n', (3619, 3631), False, 'import logging\n'), ((3732, 3819), 'simpletransformers.classification.ClassificationModel', 'ClassificationModel', (['"""roberta"""', '"""roberta-base"""'], {'args': 'MODEL_ARGS', 'use_cuda': 'use_cuda'}), "('roberta', 'roberta-base', args=MODEL_ARGS, use_cuda=\n use_cuda)\n", (3751, 3819), False, 'from simpletransformers.classification import ClassificationModel, ClassificationArgs\n'), ((4215, 4244), 'logging.info', 'logging.info', (['metrics_message'], {}), '(metrics_message)\n', (4227, 4244), False, 'import logging\n'), ((5657, 5692), 'sklearn.model_selection.train_test_split', 'train_test_split', (['df'], {'test_size': '(0.2)'}), '(df, test_size=0.2)\n', (5673, 5692), False, 'from sklearn.model_selection import train_test_split\n'), ((5715, 5751), 'sklearn.model_selection.train_test_split', 'train_test_split', (['_df'], {'test_size': '(0.3)'}), '(_df, test_size=0.3)\n', (5731, 5751), False, 'from sklearn.model_selection import train_test_split\n'), ((7207, 7224), 'logging.info', 'logging.info', (['msg'], {}), '(msg)\n', (7219, 7224), False, 'import logging\n'), ((7464, 7483), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (7481, 7483), False, 'import time\n'), ((7584, 7603), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (7601, 7603), False, 'import time\n'), ((7992, 8009), 'logging.info', 'logging.info', (['msg'], {}), '(msg)\n', (8004, 8009), False, 'import logging\n'), ((8337, 8356), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (8354, 8356), False, 'import time\n'), ((8430, 8449), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (8447, 8449), False, 'import time\n'), ((8814, 8831), 'logging.info', 'logging.info', (['msg'], {}), '(msg)\n', (8826, 8831), False, 'import logging\n'), ((9610, 9627), 'logging.info', 'logging.info', (['msg'], {}), '(msg)\n', (9622, 9627), False, 'import logging\n'), ((10007, 10036), 'logging.info', 'logging.info', (['"""WITH MARKDOWN"""'], {}), "('WITH MARKDOWN')\n", (10019, 10036), False, 'import logging\n'), ((10318, 10350), 'logging.info', 'logging.info', (['"""WITHOUT MARKDOWN"""'], {}), "('WITHOUT MARKDOWN')\n", (10330, 10350), False, 'import logging\n'), ((10698, 10801), 'simpletransformers.classification.ClassificationArgs', 'ClassificationArgs', ([], {'num_train_epochs': 'EPOCHS', 'eval_batch_size': 'BATCH_SIZE', 'overwrite_output_dir': '(False)'}), '(num_train_epochs=EPOCHS, eval_batch_size=BATCH_SIZE,\n overwrite_output_dir=False)\n', (10716, 10801), False, 'from simpletransformers.classification import ClassificationModel, ClassificationArgs\n'), ((10835, 10902), 'simpletransformers.classification.ClassificationModel', 'ClassificationModel', (['"""roberta"""', 'dir'], {'args': 'model_args', 'use_cuda': '(True)'}), "('roberta', dir, args=model_args, use_cuda=True)\n", (10854, 10902), False, 'from simpletransformers.classification import ClassificationModel, ClassificationArgs\n'), ((11044, 11067), 'pandas.read_csv', 'pd.read_csv', (['TRAIN_FILE'], {}), '(TRAIN_FILE)\n', (11055, 11067), True, 'import pandas as pd\n'), ((1211, 1230), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1228, 1230), False, 'import time\n'), ((1283, 1302), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1300, 1302), False, 'import time\n'), ((1757, 1783), 'logging.info', 'logging.info', (['f"""{message}"""'], {}), "(f'{message}')\n", (1769, 1783), False, 'import logging\n'), ((5798, 5828), 'utils.combine_with_context', 'combine_with_context', (['train_df'], {}), '(train_df)\n', (5818, 5828), False, 'from utils import hour_min_sec, has_markdown, combine_with_context\n'), ((5846, 5874), 'utils.combine_with_context', 'combine_with_context', (['dev_df'], {}), '(dev_df)\n', (5866, 5874), False, 'from utils import hour_min_sec, has_markdown, combine_with_context\n'), ((5893, 5922), 'utils.combine_with_context', 'combine_with_context', (['eval_df'], {}), '(eval_df)\n', (5913, 5922), False, 'from utils import hour_min_sec, has_markdown, combine_with_context\n'), ((6399, 6423), 'pickle.dump', 'pickle.dump', (['train_df', 'f'], {}), '(train_df, f)\n', (6410, 6423), False, 'import pickle\n'), ((6472, 6494), 'pickle.dump', 'pickle.dump', (['dev_df', 'f'], {}), '(dev_df, f)\n', (6483, 6494), False, 'import pickle\n'), ((6544, 6567), 'pickle.dump', 'pickle.dump', (['eval_df', 'f'], {}), '(eval_df, f)\n', (6555, 6567), False, 'import pickle\n'), ((1674, 1710), 'utils.hour_min_sec', 'hour_min_sec', (['elapsed_time'], {'hms': '(True)'}), '(elapsed_time, hms=True)\n', (1686, 1710), False, 'from utils import hour_min_sec, has_markdown, combine_with_context\n'), ((2124, 2140), 'pandas.StringDtype', 'pd.StringDtype', ([], {}), '()\n', (2138, 2140), True, 'import pandas as pd\n'), ((2167, 2183), 'pandas.StringDtype', 'pd.StringDtype', ([], {}), '()\n', (2181, 2183), True, 'import pandas as pd\n'), ((2215, 2231), 'pandas.StringDtype', 'pd.StringDtype', ([], {}), '()\n', (2229, 2231), True, 'import pandas as pd\n'), ((7920, 7974), 'utils.hour_min_sec', 'hour_min_sec', (['(elapsed_time * (1000 // count))'], {'hms': '(True)'}), '(elapsed_time * (1000 // count), hms=True)\n', (7932, 7974), False, 'from utils import hour_min_sec, has_markdown, combine_with_context\n'), ((8741, 8795), 'utils.hour_min_sec', 'hour_min_sec', (['(elapsed_time * (1000 // count))'], {'hms': '(True)'}), '(elapsed_time * (1000 // count), hms=True)\n', (8753, 8795), False, 'from utils import hour_min_sec, has_markdown, combine_with_context\n'), ((7808, 7875), 'utils.hour_min_sec', 'hour_min_sec', (['(elapsed_time * (number_of_records // count))'], {'hms': '(True)'}), '(elapsed_time * (number_of_records // count), hms=True)\n', (7820, 7875), False, 'from utils import hour_min_sec, has_markdown, combine_with_context\n'), ((8627, 8694), 'utils.hour_min_sec', 'hour_min_sec', (['(elapsed_time * (number_of_records // count))'], {'hms': '(True)'}), '(elapsed_time * (number_of_records // count), hms=True)\n', (8639, 8694), False, 'from utils import hour_min_sec, has_markdown, combine_with_context\n'), ((10425, 10440), 'utils.has_markdown', 'has_markdown', (['x'], {}), '(x)\n', (10437, 10440), False, 'from utils import hour_min_sec, has_markdown, combine_with_context\n')] |
from django.dispatch import Signal
# sent when a user creates their own Visitor - can
# be used to send the email with the token
# kwargs: visitor
self_service_visitor_created = Signal()
| [
"django.dispatch.Signal"
] | [((179, 187), 'django.dispatch.Signal', 'Signal', ([], {}), '()\n', (185, 187), False, 'from django.dispatch import Signal\n')] |
#python3
import os
import sys
import statistics as s
import pandas as pd
from find_loss import *
##########################
## statistics on orthogroups
##########################
"""if len(sys.argv) != 5:
print("Need 4 arguments: [Orthologous group input file] [LECA orthologous groups input list] [method name] [stats out file name]")
sys.exit()
"""
OG_file = sys.argv[1] #Orthoglous groups file
leca_file = sys.argv[2] #LECA orthologous groups files
met_name = sys.argv[3] #name of orthology
dollo_tree = sys.argv[4]
out_file = sys.argv[5] #out file
try:
open(sys.argv[1])
open(sys.argv[2])
except IOError:
print("No such input file"); sys.exit()
for file in (sys.argv[1], sys.argv[1]):
if os.path.getsize(file) <= 1:
print(file, "file is empty"); sys.exit()
leca_file1 = open(leca_file, "r")
leca_og_d = {}
for line in leca_file1:
og_id = line.rstrip()
leca_og_d[og_id] = True
print(len(leca_og_d))
def counts_OG(OG_file, leca_og_d):
total_165=2811230
total_167=2865661
count_dict = {} #contains all statistics
og_dict = {} #to count per og species
#count_dict["Number of annotated OGs (incl. singlets)"] = 0 # count the amount of OGs inferred from method
#count_dict["count single protein OGs"] = 0 #all OGs with only 1 protein
count_dict["Number of OGs"] = 0 #count OG with more than 1 protein
seq_counts = [] #list for mean and median with sequence counts per OG
seq_leca_counts = [] #sequence counts for OGs in LECA
count_species_per_OG = [] #unique species per OG counting
total_seqs_assigned = 0 #total proteins assigned to an OG, but a real group and not a singlet
total_seqs = 0 #total proteins assigned in the dataset
leca_og = 0 #counts of LECA OGs
OG_open = open(OG_file, "r")
for lines in OG_open:
line = lines.rstrip().split(":")
OG_id = line[0]
orgsL = line[1].split()
total_seqs += len(orgsL) #total proteins in orthology annotated to OG (singlet or not)
#count_dict["Number of annotated OGs (incl. singlets)"] += 1 #every line is a OG from method (singlets and rest)
if OG_id in leca_og_d:
seq_leca_counts += [len(orgsL)] #total proteins in orthology annoted to LECA OG
leca_og += 1 #total leca_ogs
if len(orgsL) > 1: # a real OG (aka group of more than 1 sequence)
og_dict[OG_id] = []
count_dict["Number of OGs"] += 1
seq_counts += [len(orgsL)] #counts of sequences in og (for median and mean)
total_seqs_assigned += len(orgsL)
for org in orgsL:
org_id = org[0:4]
if org_id not in og_dict[OG_id]:
og_dict[OG_id] += [org_id] #count per OG the # of species
#else: #single protein OGs, not real "group"
# count_dict["count single protein OGs"] +=1
for key, values in og_dict.items():
count_species_per_OG += [len(values)]
max_species = max(count_species_per_OG)
count_dict["Median OG size"] = s.median(seq_counts)
count_dict["Mean OG size"] = round(s.mean(seq_counts),1)
#count_dict["max OG size"] = max(seq_counts)
#count_dict["min OG size"] = min(seq_counts)
#count_dict["single species OGs"] = count_species_per_OG.count(1)
#count_dict[" ".join(["OGs with all", str(max_species), "species present"])] = count_species_per_OG.count(max_species)
if max_species == 165:
count_dict["% proteins assigned by orthology"] = round((float(total_seqs)/float(total_165))*100,1)
count_dict["% proteins assigned to LECA OG from total"] = round((float(sum(seq_leca_counts))/float(total_165))*100,1)
if max_species == 167:
count_dict["% proteins assigned by orthology"] = round((float(total_seqs)/float(total_167))*100,1)
count_dict["% proteins assigned to LECA OG from total"] = round((float(sum(seq_leca_counts))/float(total_167))*100,1)
#count_dict["Total proteins"] = total_seqs
count_dict["% assigned proteins to OGs"] = round((float(total_seqs_assigned)/float(total_seqs))*100, 1)
count_dict["Number LECA OGs"] = leca_og
count_dict["Median LECA OG size"] = s.median(seq_leca_counts)
count_dict["Mean LECA OG size"] = round(s.mean(seq_leca_counts),1)
count_dict["stdev LECA OG size"] = round(s.stdev(seq_leca_counts),1)
count_dict["Max LECA OG size"] = max(seq_leca_counts)
count_dict["% to LECA OG assigned proteins"] =round((float(sum(seq_leca_counts))/float(total_seqs_assigned))*100,1)
return count_dict
dict_out = counts_OG(OG_file, leca_og_d)
loss_dict,_ = loss_dict(dollo_tree, leca_file) #returns loss_dict and independent loss distributions, only need loss dict
dict_out.update(loss_dict)
dict_df = pd.DataFrame.from_dict(dict_out, orient='index', columns = [str(met_name)])
print(dict_df)
if os.path.exists(out_file):
df = pd.read_csv(out_file, sep = ",", index_col = 0)
df_out = pd.concat([df, dict_df], axis=1)#add this to already existing files/calculations
df_out.to_csv(out_file)#, header = False)"""
else:
dict_df.to_csv(out_file)
| [
"statistics.mean",
"os.path.exists",
"os.path.getsize",
"statistics.stdev",
"pandas.read_csv",
"statistics.median",
"sys.exit",
"pandas.concat"
] | [((4851, 4875), 'os.path.exists', 'os.path.exists', (['out_file'], {}), '(out_file)\n', (4865, 4875), False, 'import os\n'), ((3052, 3072), 'statistics.median', 's.median', (['seq_counts'], {}), '(seq_counts)\n', (3060, 3072), True, 'import statistics as s\n'), ((4185, 4210), 'statistics.median', 's.median', (['seq_leca_counts'], {}), '(seq_leca_counts)\n', (4193, 4210), True, 'import statistics as s\n'), ((4886, 4929), 'pandas.read_csv', 'pd.read_csv', (['out_file'], {'sep': '""","""', 'index_col': '(0)'}), "(out_file, sep=',', index_col=0)\n", (4897, 4929), True, 'import pandas as pd\n'), ((4947, 4979), 'pandas.concat', 'pd.concat', (['[df, dict_df]'], {'axis': '(1)'}), '([df, dict_df], axis=1)\n', (4956, 4979), True, 'import pandas as pd\n'), ((655, 665), 'sys.exit', 'sys.exit', ([], {}), '()\n', (663, 665), False, 'import sys\n'), ((714, 735), 'os.path.getsize', 'os.path.getsize', (['file'], {}), '(file)\n', (729, 735), False, 'import os\n'), ((780, 790), 'sys.exit', 'sys.exit', ([], {}), '()\n', (788, 790), False, 'import sys\n'), ((3112, 3130), 'statistics.mean', 's.mean', (['seq_counts'], {}), '(seq_counts)\n', (3118, 3130), True, 'import statistics as s\n'), ((4255, 4278), 'statistics.mean', 's.mean', (['seq_leca_counts'], {}), '(seq_leca_counts)\n', (4261, 4278), True, 'import statistics as s\n'), ((4327, 4351), 'statistics.stdev', 's.stdev', (['seq_leca_counts'], {}), '(seq_leca_counts)\n', (4334, 4351), True, 'import statistics as s\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-17 06:04
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0007_auto_20171005_1713'),
]
operations = [
migrations.CreateModel(
name='Column',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('table_name', models.CharField(max_length=100)),
('column_name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Columns',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('name_id', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='No_Relation_Columns',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('column', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Column')),
],
),
migrations.CreateModel(
name='No_Relation_Options',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('grep_strings', models.CharField(max_length=100)),
('no_relation_column', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.No_Relation_Columns')),
],
),
migrations.CreateModel(
name='No_Relation_Table',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('priority', models.IntegerField()),
('column', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Column')),
('columns', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Columns')),
],
),
migrations.CreateModel(
name='Relation_Columns',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('column', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Column')),
],
),
migrations.CreateModel(
name='Relation_Options',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('condition', models.CharField(max_length=100)),
('relation_column', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Relation_Columns')),
],
),
migrations.CreateModel(
name='Relation_Table',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('priority', models.IntegerField()),
('column', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Column')),
('columns', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Columns')),
],
),
migrations.CreateModel(
name='Tables',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('name_id', models.CharField(max_length=100)),
],
),
migrations.RemoveField(
model_name='skill',
name='category',
),
migrations.DeleteModel(
name='Skill',
),
migrations.DeleteModel(
name='SkillCategory',
),
migrations.AddField(
model_name='relation_columns',
name='relation_table',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Relation_Table'),
),
migrations.AddField(
model_name='no_relation_columns',
name='no_relation_table',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.No_Relation_Table'),
),
]
| [
"django.db.migrations.DeleteModel",
"django.db.models.ForeignKey",
"django.db.models.IntegerField",
"django.db.models.AutoField",
"django.db.migrations.RemoveField",
"django.db.models.CharField"
] | [((3922, 3981), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""skill"""', 'name': '"""category"""'}), "(model_name='skill', name='category')\n", (3944, 3981), False, 'from django.db import migrations, models\n'), ((4026, 4062), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""Skill"""'}), "(name='Skill')\n", (4048, 4062), False, 'from django.db import migrations, models\n'), ((4095, 4139), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""SkillCategory"""'}), "(name='SkillCategory')\n", (4117, 4139), False, 'from django.db import migrations, models\n'), ((4289, 4381), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""api.Relation_Table"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'api.Relation_Table')\n", (4306, 4381), False, 'from django.db import migrations, models\n'), ((4520, 4615), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""api.No_Relation_Table"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'api.No_Relation_Table')\n", (4537, 4615), False, 'from django.db import migrations, models\n'), ((424, 517), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (440, 517), False, 'from django.db import migrations, models\n'), ((547, 579), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (563, 579), False, 'from django.db import migrations, models\n'), ((614, 646), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (630, 646), False, 'from django.db import migrations, models\n'), ((779, 872), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (795, 872), False, 'from django.db import migrations, models\n'), ((896, 928), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (912, 928), False, 'from django.db import migrations, models\n'), ((959, 991), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (975, 991), False, 'from django.db import migrations, models\n'), ((1136, 1229), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1152, 1229), False, 'from django.db import migrations, models\n'), ((1255, 1334), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""api.Column"""'}), "(on_delete=django.db.models.deletion.CASCADE, to='api.Column')\n", (1272, 1334), False, 'from django.db import migrations, models\n'), ((1479, 1572), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1495, 1572), False, 'from django.db import migrations, models\n'), ((1604, 1636), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1620, 1636), False, 'from django.db import migrations, models\n'), ((1678, 1775), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""api.No_Relation_Columns"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'api.No_Relation_Columns')\n", (1695, 1775), False, 'from django.db import migrations, models\n'), ((1913, 2006), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1929, 2006), False, 'from django.db import migrations, models\n'), ((2034, 2055), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (2053, 2055), False, 'from django.db import migrations, models\n'), ((2085, 2164), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""api.Column"""'}), "(on_delete=django.db.models.deletion.CASCADE, to='api.Column')\n", (2102, 2164), False, 'from django.db import migrations, models\n'), ((2195, 2280), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""api.Columns"""'}), "(on_delete=django.db.models.deletion.CASCADE, to='api.Columns'\n )\n", (2212, 2280), False, 'from django.db import migrations, models\n'), ((2417, 2510), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (2433, 2510), False, 'from django.db import migrations, models\n'), ((2536, 2615), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""api.Column"""'}), "(on_delete=django.db.models.deletion.CASCADE, to='api.Column')\n", (2553, 2615), False, 'from django.db import migrations, models\n'), ((2757, 2850), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (2773, 2850), False, 'from django.db import migrations, models\n'), ((2879, 2911), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (2895, 2911), False, 'from django.db import migrations, models\n'), ((2950, 3044), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""api.Relation_Columns"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'api.Relation_Columns')\n", (2967, 3044), False, 'from django.db import migrations, models\n'), ((3179, 3272), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (3195, 3272), False, 'from django.db import migrations, models\n'), ((3300, 3321), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (3319, 3321), False, 'from django.db import migrations, models\n'), ((3351, 3430), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""api.Column"""'}), "(on_delete=django.db.models.deletion.CASCADE, to='api.Column')\n", (3368, 3430), False, 'from django.db import migrations, models\n'), ((3461, 3546), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""api.Columns"""'}), "(on_delete=django.db.models.deletion.CASCADE, to='api.Columns'\n )\n", (3478, 3546), False, 'from django.db import migrations, models\n'), ((3673, 3766), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (3689, 3766), False, 'from django.db import migrations, models\n'), ((3790, 3822), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (3806, 3822), False, 'from django.db import migrations, models\n'), ((3853, 3885), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (3869, 3885), False, 'from django.db import migrations, models\n')] |
# Copyright (C) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
import numpy as np
import pycocotools.mask as mask_util
from ..utils.misc import to_numpy
import torch
import torch.nn.functional as F
def mask2result(det_bboxes,
det_labels,
det_masks,
num_classes,
mask_thr_binary=0.5,
img_size=None):
masks = det_masks
bboxes = det_bboxes[:, :4]
labels = det_labels
if isinstance(masks, np.ndarray):
masks = torch.tensor(masks)
bboxes = torch.tensor(bboxes)
labels = torch.tensor(labels)
cls_masks = [[] for _ in range(num_classes)]
for bbox, label, mask in zip(bboxes, labels, masks):
mask = mask[None, :, :]
x0_int, y0_int = 0, 0
x1_int, y1_int = img_size[::-1]
img_y = torch.arange(
y0_int, y1_int, device=mask.device, dtype=torch.float32) + 0.5
img_x = torch.arange(
x0_int, x1_int, device=mask.device, dtype=torch.float32) + 0.5
x0, y0, x1, y1 = bbox
img_y = (img_y - y0) / (y1 - y0) * 2 - 1
img_x = (img_x - x0) / (x1 - x0) * 2 - 1
if torch.isinf(img_x).any():
inds = torch.where(torch.isinf(img_x))
img_x[inds] = 0
if torch.isinf(img_y).any():
inds = torch.where(torch.isinf(img_y))
img_y[inds] = 0
gx = img_x[None, :].expand(img_y.size(0), img_x.size(0))
gy = img_y[:, None].expand(img_y.size(0), img_x.size(0))
grid = torch.stack([gx, gy], dim=2)
img_masks = F.grid_sample(
mask.to(dtype=torch.float32)[None, :, :, :], grid[None, :, :, :], align_corners=False)
mask = img_masks[0, 0, :, :]
mask = (mask >= mask_thr_binary).to(dtype=torch.uint8)
cls_masks[label].append(to_numpy(mask))
return cls_masks
| [
"torch.tensor",
"torch.isinf",
"torch.stack",
"torch.arange"
] | [((1034, 1053), 'torch.tensor', 'torch.tensor', (['masks'], {}), '(masks)\n', (1046, 1053), False, 'import torch\n'), ((1071, 1091), 'torch.tensor', 'torch.tensor', (['bboxes'], {}), '(bboxes)\n', (1083, 1091), False, 'import torch\n'), ((1109, 1129), 'torch.tensor', 'torch.tensor', (['labels'], {}), '(labels)\n', (1121, 1129), False, 'import torch\n'), ((2059, 2087), 'torch.stack', 'torch.stack', (['[gx, gy]'], {'dim': '(2)'}), '([gx, gy], dim=2)\n', (2070, 2087), False, 'import torch\n'), ((1358, 1427), 'torch.arange', 'torch.arange', (['y0_int', 'y1_int'], {'device': 'mask.device', 'dtype': 'torch.float32'}), '(y0_int, y1_int, device=mask.device, dtype=torch.float32)\n', (1370, 1427), False, 'import torch\n'), ((1463, 1532), 'torch.arange', 'torch.arange', (['x0_int', 'x1_int'], {'device': 'mask.device', 'dtype': 'torch.float32'}), '(x0_int, x1_int, device=mask.device, dtype=torch.float32)\n', (1475, 1532), False, 'import torch\n'), ((1692, 1710), 'torch.isinf', 'torch.isinf', (['img_x'], {}), '(img_x)\n', (1703, 1710), False, 'import torch\n'), ((1749, 1767), 'torch.isinf', 'torch.isinf', (['img_x'], {}), '(img_x)\n', (1760, 1767), False, 'import torch\n'), ((1808, 1826), 'torch.isinf', 'torch.isinf', (['img_y'], {}), '(img_y)\n', (1819, 1826), False, 'import torch\n'), ((1865, 1883), 'torch.isinf', 'torch.isinf', (['img_y'], {}), '(img_y)\n', (1876, 1883), False, 'import torch\n')] |
# 25 February 2019 - <NAME> <<EMAIL>>
import sys
from src.text_classifier_deprn_rates import DeprnPredictor
predict = DeprnPredictor()
print('Evaluate using user input.\n')
user_description = ['']
print('\"QQ\" to quit.')
print('\"CR\" to see classification report.')
print('Otherwise...')
while True:
user_description = input('Enter a depreciable asset description: \n')
if user_description == 'QQ':
print('====================GOODBYE====================\n')
sys.exit()
elif user_description == 'CR':
predict.report_results()
else:
result, predicted_account = predict.predict_description(user_description)
rate_perc = str(result.rate_perc_text) + '% prime cost'
life = str(result.life_years) + ' years effective life'
tax_cat = result.tax_cat
print(f'Input from user:\n\t {user_description}')
print(f'Result:')
print(f'\taccount: \t\t\t{predicted_account}')
print(f'\tdeprn rate: \t\t{rate_perc}')
print(f'\teffective life: \t{life}')
print(f'\ttax category: \t\t{tax_cat}')
print('END of Result')
print()
| [
"src.text_classifier_deprn_rates.DeprnPredictor",
"sys.exit"
] | [((122, 138), 'src.text_classifier_deprn_rates.DeprnPredictor', 'DeprnPredictor', ([], {}), '()\n', (136, 138), False, 'from src.text_classifier_deprn_rates import DeprnPredictor\n'), ((489, 499), 'sys.exit', 'sys.exit', ([], {}), '()\n', (497, 499), False, 'import sys\n')] |
#!/usr/bin/env python
#
# Copyright 2019 DFKI GmbH.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the
# following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
from anim_utils.animation_data import SkeletonBuilder, SKELETON_NODE_TYPE_END_SITE, LEN_EULER, LEN_ROOT,\
LEN_QUAT
import numpy as np
from transformations import euler_matrix, euler_from_matrix
from .motion_plane import Plane
from anim_utils.animation_data.utils import pose_orientation_euler, check_quat, convert_quat_frame_value_to_array,\
euler_to_quaternion, convert_euler_frames_to_quaternion_frames
from anim_utils.utilities.custom_math import angle_between_vectors
class BVHAnalyzer():
def __init__(self, bvhreader):
self.skeleton = SkeletonBuilder().load_from_bvh(bvhreader)
self.bvhreader = bvhreader
self.quat_frames = []
self.euler_frames = bvhreader.frames
self.n_frames = len(self.euler_frames)
self.body_plane = None
def get_global_pos(self, joint_name, frame_index):
joint_chain = self.get_joint_chain(joint_name)
global_trans = np.eye(4)
global_trans[:3, 3] = self.euler_frames[frame_index][:LEN_ROOT]
for joint in joint_chain:
offset = joint.offset
if 'EndSite' in joint.node_name: # end site joint
rot_mat = np.eye(4)
rot_mat[:3, 3] = offset
else:
rot_angles_euler = self.get_relative_orientation_euler(joint.node_name, frame_index)
rot_angles_rad = np.deg2rad(rot_angles_euler)
rot_mat = euler_matrix(rot_angles_rad[0],
rot_angles_rad[1],
rot_angles_rad[2],
'rxyz')
rot_mat[:3, 3] = offset
global_trans = np.dot(global_trans, rot_mat)
return global_trans[:3, 3]
def get_global_joint_positions(self, joint_name):
'''
Get joint positions for the sequence of frames
:param joint_name: str
:return: numpy.array<3d>
'''
joint_pos = np.zeros((self.n_frames, LEN_ROOT))
for i in range(self.n_frames):
joint_pos[i] = self.get_global_pos(joint_name, i)
return joint_pos
def get_relative_joint_position(self, joint_name, frame_index):
"""
relative joint position to Hips
:param joint_name: str
:param frame_index: int
:return:
"""
joint_global_pos = self.get_global_pos(joint_name, frame_index)
root_global_pos = self.get_global_pos('Hips', frame_index)
return joint_global_pos - root_global_pos
def get_filtered_joint_index(self, joint_name):
return self.skeleton.node_name_frame_map.keys().index(joint_name)
def get_parent_joint_name(self, joint_name):
node = self.get_joint_by_joint_name(joint_name)
if node.parent is not None:
return node.parent.node_name
else:
return None
def get_filtered_joint_param_range(self, joint_name):
reduced_joint_index = self.get_filtered_joint_index(joint_name)
start_index = LEN_ROOT + reduced_joint_index * LEN_QUAT
end_index = LEN_ROOT + (reduced_joint_index + 1) * LEN_QUAT
return start_index, end_index
def get_joint_speed_at_frame_each_dim(self, joint_name, frame_idx):
assert frame_idx != 0, ("Index starts from 1")
return self.get_global_pos(joint_name, frame_idx) - self.get_global_pos(joint_name, frame_idx-1)
def get_joint_speed_each_dim(self, joint_name):
speed = [np.zeros(3)]
for i in range(1, self.n_frames):
speed.append(self.get_joint_speed_at_frame_each_dim(joint_name, i))
return np.asarray(speed)
def get_joint_speed(self, joint_name):
speed = []
for i in range(1, self.n_frames):
speed.append(self.get_joint_speed_at_frame(joint_name, i))
return np.asarray(speed)
def get_joint_speed_at_frame(self, joint_name, frame_idx):
assert frame_idx != 0, ("Index starts from 1")
return np.linalg.norm(self.get_global_pos(joint_name, frame_idx) - self.get_global_pos(joint_name, frame_idx-1))
def get_joint_acceleration_at_frame(self, joint_name, frame_idx):
assert frame_idx != self.n_frames - 1 and frame_idx != 0, ("frame index is out of range!")
return self.get_global_pos(joint_name, frame_idx + 1) + self.get_global_pos(joint_name, frame_idx - 1) - \
2 * self.get_global_pos(joint_name, frame_idx)
def get_joint_acceleration(self, joint_name):
acc = [np.zeros(3)]
for i in range(1, self.n_frames-1):
acc.append(self.get_joint_acceleration_at_frame(joint_name, i))
acc.append(np.zeros(3))
return np.asarray(acc)
def get_global_pos_for_all_frames(self, joint_name):
pos = np.zeros((self.n_frames, 3))
for i in range(self.n_frames):
pos[i] = self.get_global_pos(joint_name, i)
return pos
def get_joint_chain(self, joint_name):
joint = self.get_joint_by_joint_name(joint_name)
joint_chain = []
while joint.parent is not None:
joint_chain.append(joint)
joint = joint.parent
joint_chain.append(joint)
joint_chain.reverse()
return joint_chain
def get_relative_pos(self, joint_name, frame_index):
joint_chain = self.get_joint_chain(joint_name)
if len(joint_chain) == 1:
raise ValueError('Root joint has no relative position')
pos = self.get_global_pos(joint_name, frame_index)
parent_pos = self.get_global_pos(joint_chain[-2].node_name, frame_index)
return pos - parent_pos
def get_joint_offset(self, joint_name):
return self.skeleton.nodes[joint_name].offset
def _get_nodes_without_endsite(self):
animated_nodes = self.skeleton.nodes.values()
nodes_without_endsite = [node for node in animated_nodes if node.node_type != SKELETON_NODE_TYPE_END_SITE]
return nodes_without_endsite
def get_relative_orientation_euler(self, joint_name, frame_index):
# assert frame_index in range(self.n_frames), ('Frame index is invalid!')
nodes_without_endsite = self._get_nodes_without_endsite()
# assert (len(nodes_without_endsite)+1) * 3 == len(self.euler_frames[0]), \
# ('The length of euler frame is not corresponding to length of modeled joints')
joint = self.get_joint_by_joint_name(joint_name)
assert joint in nodes_without_endsite, ("The joint is not modeled!")
joint_index = nodes_without_endsite.index(joint)
start_channel_index = joint_index * 3 + LEN_ROOT
end_channel_index = start_channel_index + LEN_EULER
return self.euler_frames[frame_index][start_channel_index: end_channel_index]
def get_global_transform(self, joint_name, frame_index):
joint_chain = self.get_joint_chain(joint_name)
global_trans = np.eye(4)
global_trans[:3, 3] = self.euler_frames[frame_index][:LEN_ROOT]
for joint in joint_chain:
offset = joint.offset
if 'EndSite' in joint.node_name: # end site joint
rot_mat = np.eye(4)
rot_mat[:3, 3] = offset
else:
rot_angles_euler = self.get_relative_orientation_euler(joint.node_name, frame_index)
rot_angles_rad = np.deg2rad(rot_angles_euler)
rot_mat = euler_matrix(rot_angles_rad[0],
rot_angles_rad[1],
rot_angles_rad[2],
'rxyz')
rot_mat[:3, 3] = offset
global_trans = np.dot(global_trans, rot_mat)
return global_trans
def get_global_orientation_euler(self, joint_name, frame_index):
joint_chain = self.get_joint_chain(joint_name)
global_trans = np.eye(4)
global_trans[:3, 3] = self.euler_frames[frame_index][:LEN_ROOT]
for joint in joint_chain:
offset = joint.offset
rot_angles_euler = self.get_relative_orientation_euler(joint.node_name, frame_index)
rot_angles_rad = np.deg2rad(rot_angles_euler)
rot_mat = euler_matrix(rot_angles_rad[0],
rot_angles_rad[1],
rot_angles_rad[2],
'rxyz')
rot_mat[:3, 3] = offset
global_trans = np.dot(global_trans, rot_mat)
global_angles_rad = euler_from_matrix(global_trans,
'rxyz')
return np.rad2deg(global_angles_rad)
def get_global_orientation_quat(self, joint_name, frame_index):
return euler_to_quaternion(self.get_global_orientation_euler(joint_name,
frame_index))
def set_relative_orientation_euler(self, joint_name, frame_index, euler_angles):
"""
:param joint_name: str
:param frame_index: int
:param euler_angles: array<float> degree
:return:
"""
# assert frame_index in range(self.n_frames), ('Frame index is invalid!')
animated_nodes = self.skeleton.nodes.values()
nodes_without_endsite = [node for node in animated_nodes if node.node_type != SKELETON_NODE_TYPE_END_SITE]
assert (len(nodes_without_endsite)+1) * 3 == len(self.euler_frames[0]), \
('The length of euler frame is not corresponding to length of modeled joints')
joint_index = 0
for node in nodes_without_endsite:
if node.node_name == joint_name:
break
else:
joint_index += 1
start_channel_index = (joint_index + 1) * 3
end_channel_index = start_channel_index + LEN_EULER
self.euler_frames[frame_index][start_channel_index: end_channel_index] = euler_angles
def get_joint_index(self, joint_name):
joint_name_list = self.skeleton.nodes.keys()
if joint_name not in joint_name_list:
raise ValueError('joint name is not found!')
return joint_name_list.index(joint_name)
def set_joint_offset(self, joint_name, offset):
assert len(offset) == 3, ('The length of joint is not correct')
joint = self.get_joint_by_joint_name(joint_name)
joint.offset = [offset[0], offset[1], offset[2]]
def get_joint_by_joint_name(self, joint_name):
if joint_name not in self.skeleton.nodes.keys():
print(joint_name)
raise KeyError('Joint name is not found!')
return self.skeleton.nodes[joint_name]
def to_quaternion(self, filter_joints=True):
self.quat_frames = np.array(convert_euler_frames_to_quaternion_frames(self.bvhreader,
self.euler_frames,
filter_joints))
def get_joint_channel_in_full_euler_frame(self, joint):
"""
:param joint: str, joint name
:return:
"""
return self.skeleton.node_channels.index((joint, 'Xrotation'))
def get_closure_kinematic_chain(self, joint):
joint_chain = []
if joint.parent is not None:
joint_chain.append(joint)
return joint_chain.reverse()
def get_body_plane(self, frame_idx):
body_plane_joints = ['Hips', 'Spine', 'LeftShoulder', 'RightShoulder', 'LeftUpLeg', 'RightUpLeg']
points = []
for joint in body_plane_joints:
points.append(self.get_relative_joint_position(joint, frame_idx))
points = np.asarray(points)
return Plane(points)
def get_left_elbow_angle(self, frame_idx):
left_arm_pos = self.get_global_pos('LeftArm', frame_idx)
left_forearm_pos = self.get_global_pos('LeftForeArm', frame_idx)
left_hand_pos = self.get_global_pos('LeftHand', frame_idx)
upper_arm = left_forearm_pos - left_arm_pos
lower_arm = left_forearm_pos - left_hand_pos
theta = np.arccos(np.dot(upper_arm, lower_arm)/(np.linalg.norm(upper_arm) * np.linalg.norm(lower_arm)))
theta = np.rad2deg(theta)
return theta
def get_left_elbow_angles(self):
left_elbow_anlges = []
for i in range(self.n_frames):
left_elbow_anlges.append(self.get_left_elbow_angle(i))
return left_elbow_anlges
def get_right_elbow_angle(self, frame_idx):
right_arm_pos = self.get_global_pos('RightArm', frame_idx)
right_forearm_pos = self.get_global_pos('RightForeArm', frame_idx)
right_hand_pos = self.get_global_pos('RightHand', frame_idx)
upper_arm = right_forearm_pos - right_arm_pos
lower_arm = right_forearm_pos - right_hand_pos
theta = np.arccos(np.dot(upper_arm, lower_arm)/(np.linalg.norm(upper_arm) * np.linalg.norm(lower_arm)))
theta = np.rad2deg(theta)
return theta
def get_right_elbow_anlges(self):
right_elbow_angles = []
for i in range(self.n_frames):
right_elbow_angles.append(self.get_right_elbow_angle(i))
return right_elbow_angles
def right_hand_forward(self):
relative_right_hand_pos = np.zeros((self.n_frames, 3))
for i in range(self.n_frames):
relative_right_hand_pos[i] = self.get_global_pos('RightHand', i) - self.get_global_pos('Hips', i)
moving_offsets = relative_right_hand_pos[1:] - relative_right_hand_pos[:-1]
annotation = [False]
for i in range(self.n_frames-1):
body_dir = pose_orientation_euler(self.euler_frames[i+1])
if np.dot(body_dir, np.array([moving_offsets[i, 0], moving_offsets[i, 2]])) > 0.5:
annotation.append(True)
else:
annotation.append(False)
return annotation
def left_hand_forward(self):
left_hand_pos = np.zeros((self.n_frames, 3))
for i in range(self.n_frames):
left_hand_pos[i] = self.get_global_pos('LeftHand', i)
moving_offsets = left_hand_pos[1:] - left_hand_pos[:-1]
annotation = [False]
for i in range(self.n_frames-1):
body_dir = pose_orientation_euler(self.euler_frames[i+1])
if np.dot(body_dir, np.array([moving_offsets[i, 0], moving_offsets[i, 2]])) > 0.1:
annotation.append(True)
else:
annotation.append(False)
return annotation
def feet_distance_on_ground(self):
left_foot_pos = self.get_global_joint_positions('LeftFoot')
right_foot_pos = self.get_global_joint_positions('RightFoot')
feet_distance = []
for i in range(self.n_frames):
feet_distance.append(np.linalg.norm(left_foot_pos[i, [0, 2]] - right_foot_pos[i, [0, 2]]))
return np.asarray(feet_distance)
def rfoot_behind_lleg(self, frame_index, jointlist=['LeftUpLeg', 'RightUpLeg', 'LeftFoot', 'RightFoot']):
"""
involved joints: Hips, LeftUpLeg, LeftFoot, RightLeg
:return:
"""
points = []
for joint in jointlist:
points.append(self.get_global_pos(joint, frame_index))
# determine the last point is before the body plane defined by the other three joints or behind
# reverse the list of joints, because the direction of the plane is decided by the right-hand rule
body_plane = Plane(points[:3])
return not body_plane.is_before_plane(points[-1])
def lfoot_behind_rleg(self, frame_index, jointlist=['LeftUpLeg', 'RightUpLeg', 'RightFoot', 'LeftFoot']):
"""
involve joints: Hips, RightUpLeg, RightFoot, LeftLeg
:param frame_index:
:return:
"""
points = []
for joint in jointlist:
points.append(self.get_global_pos(joint, frame_index))
body_plane = Plane(points[:3])
return not body_plane.is_before_plane(points[-1])
def rhand_moving_forwards(self, frameIndex):
"""
involved joints: body plane and RightHand
:param frameIndex:
:return:
"""
if self.body_plane is None:
self.get_body_plane(frameIndex)
if frameIndex == self.n_frames - 1:
return False
else:
current_distance = self.joint_disntace_to_body('RightHand', frameIndex)
next_distance = self.joint_disntace_to_body('RightHand', frameIndex + 1)
if next_distance - current_distance > 0.1:
return True
else:
return False
def lhand_moving_forwards(self, frameIndex):
"""
involved joints: body plane and LeftHand
:param frameIndex:
:return:
"""
if self.body_plane is None:
self.get_body_plane(frameIndex)
left_hand_pos = self.get_relative_joint_position('LeftHand', frameIndex)
if frameIndex == self.n_frames - 1:
return False
else:
next_pos = self.get_relative_joint_position('LeftHand', frameIndex + 1)
current_distance = self.body_plane.distance(left_hand_pos)
next_distance = self.body_plane.distance(next_pos)
if next_distance - current_distance > 0.1:
return True
else:
return False
def lhand_moving_forwards_one_frame(self, frameIndex):
threshold = 0.1
if frameIndex <= 0:
return False
else:
current_pos = self.get_relative_joint_position('LeftHand', frameIndex)
previous_pos = self.get_relative_joint_position('LeftHand', frameIndex)
if self.body_plane is None:
self.get_body_plane(frameIndex)
current_dist = self.body_plane.distance(current_pos)
previous_dist = self.body_plane.distance(previous_pos)
if current_dist - previous_dist > threshold:
return True
else:
return False
def lhand_moving_forwards2(self, frameIndex, windowSize=10):
if frameIndex < windowSize:
max_frame = frameIndex
elif self.n_frames - frameIndex < windowSize:
max_frame = self.n_frames - frameIndex - 1
else:
max_frame = windowSize
w = 1
while w <= max_frame:
prev_frame = self.lhand_moving_forwards_one_frame(frameIndex - w)
next_frame = self.lhand_moving_forwards_one_frame(frameIndex + w)
if prev_frame and next_frame:
return 1
elif not prev_frame and not next_frame:
return -1
else:
w += 1
return 0
def joint_disntace_to_body(self, jointname, frameIndex):
body_plane = self.get_body_plane(frameIndex)
joint_pos = self.get_relative_joint_position(jointname, frameIndex)
return body_plane.distance(joint_pos)
def rhand_moving_forwards_one_frame(self, frameIndex):
threshold = 0.1
if frameIndex <= 0:
return False
else:
current_dist = self.joint_disntace_to_body('RightHand', frameIndex)
previous_dist = self.joint_disntace_to_body('RightHand', frameIndex - 1)
# print('current distance: ', current_dist)
# print('previous distance: ', previous_dist)
if current_dist - previous_dist > threshold:
return True
else:
return False
def rhand_moving_forwards2(self, frameIndex, windowSize=10):
if frameIndex < windowSize:
max_frame = frameIndex
elif self.n_frames - frameIndex < windowSize:
max_frame = self.n_frames - frameIndex - 1
else:
max_frame = windowSize
# print("test1 max_frame: ", max_frame)
w = 1
while w <= max_frame:
prev_frame = self.rhand_moving_forwards_one_frame(frameIndex - w)
next_frame = self.rhand_moving_forwards_one_frame(frameIndex + w)
# print("w: ", w)
# print("prev_frame: ", prev_frame)
# print("next_frame: ", next_frame)
if prev_frame and next_frame:
return 1
elif not prev_frame and not next_frame:
return -1
else:
w += 1
return 0
def lknee_angle(self, frameIndex):
"""
involved joints: LeftUpLeg, LeftLeg, LeftFoot
:param frameIndex:
:return:
"""
leftUpLeg_position = self.get_relative_joint_position('LeftUpLeg', frameIndex)
leftLeg_position = self.get_relative_joint_position('LeftLeg', frameIndex)
leftFoot_position = self.get_relative_joint_position('LeftFoot', frameIndex)
upLegBone = leftLeg_position - leftUpLeg_position
lowLegBone = leftFoot_position - leftLeg_position
return angle_between_vectors(upLegBone, lowLegBone)
def rknee_angle(self, frameIndex):
"""
involved joints: RightUpLeg, RightLeg, RightFoot
:param frameIndex:
:return:
"""
rightUpLeg_position = self.get_relative_joint_position('RightUpLeg', frameIndex)
rightLeg_position = self.get_relative_joint_position('RightLeg', frameIndex)
rightFoot_position = self.get_relative_joint_position('RightFoot', frameIndex)
upLegBone = rightLeg_position - rightUpLeg_position
lowLegBone = rightFoot_position - rightLeg_position
return angle_between_vectors(upLegBone, lowLegBone)
def lleg_bending(self, frameIndex):
"""
involved joints: LeftUpLeg, LeftLeg, LeftFoot
:param frameIndex:
:param w (int): window size
:return:
reverse indexing is not supported
"""
angle_threshold = 0.001
if frameIndex <= 0:
return False
else:
previous_angle = self.lknee_angle(frameIndex - 1)
angle = self.lknee_angle(frameIndex)
if angle - previous_angle < -angle_threshold:
return True
else:
return False
def lleg_stretching(self, frameIndex):
"""
involved joints: LeftUpLeg, LeftLeg, LeftFoot
:param frameIndex:
:param w (int): window size
:return:
reverse indexing is not supported
"""
angle_threshold = 0.01
if frameIndex <= 0:
return False
else:
previous_angle = self.lknee_angle(frameIndex - 1)
angle = self.lknee_angle(frameIndex)
if angle - previous_angle >angle_threshold:
return True
else:
return False
def rleg_bending(self, frameIndex):
"""
involved joints: RightUpLeg, RightLeg, RightFoot
:param frameIndex:
:param w (int): window size
:return:
reverse indexing is not supported
"""
angle_threshold = 0.001
if frameIndex <= 0:
return False
else:
previous_angle = self.rknee_angle(frameIndex - 1)
angle = self.rknee_angle(frameIndex)
if angle - previous_angle < -angle_threshold:
return True
else:
return False
def rleg_stretching(self, frameIndex):
"""
involved joints: RightUpLeg, RightLeg, RightFoot
:param frameIndex:
:param w (int): window size
:return:
reverse indexing is not supported
"""
angle_threshold = 0.01
if frameIndex <= 0:
return False
else:
previous_angle = self.rknee_angle(frameIndex - 1)
angle = self.rknee_angle(frameIndex)
if angle - previous_angle > angle_threshold:
return True
else:
return False
def rtoe_before_lleg(self, frameIndex):
"""
involved joints: Hips, LeftUpLeg, LeftLeg, Bip01_R_Toe0
:param frameIndex:
:return:
"""
jointList = ['Hips', 'LeftUpLeg', 'LeftLeg', 'Bip01_R_Toe0']
points = []
for joint in jointList:
points.append(self.get_relative_joint_position(joint, frameIndex))
points.reverse()
relative_plane = Plane(points[1:])
return relative_plane.is_before_plane(points[0])
def ltoe_before_rleg(self, frameIndex):
"""
involved joints: Hips, RightUpLeg, RightLeg, Bip01_L_Toe0
:param frameIndex:
:return:
"""
jointlist = ['Hips', 'RightUpLeg', 'RightLeg', 'Bip01_L_Toe0']
points = []
for joint in jointlist:
points.append(self.get_relative_joint_position(joint, frameIndex))
relative_plane = Plane(points[:3])
return relative_plane.is_before_plane(points[-1])
def spine_horizontal(self, frameIndex):
"""
involved joints:
:param frameIndex:
:return:
"""
pass
def feet_moving_towards_each_other(self):
'''
Feature: Distance between two feet on the ground
involved joints:
:return Boolean: status
'''
pass
def process(self, frame_idx):
'''
use a list of signal processor to process given frame
:return:
'''
pass | [
"numpy.eye",
"transformations.euler_from_matrix",
"transformations.euler_matrix",
"anim_utils.animation_data.SkeletonBuilder",
"numpy.asarray",
"anim_utils.utilities.custom_math.angle_between_vectors",
"numpy.array",
"numpy.zeros",
"numpy.dot",
"numpy.deg2rad",
"numpy.linalg.norm",
"numpy.rad2... | [((2074, 2083), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (2080, 2083), True, 'import numpy as np\n'), ((3114, 3149), 'numpy.zeros', 'np.zeros', (['(self.n_frames, LEN_ROOT)'], {}), '((self.n_frames, LEN_ROOT))\n', (3122, 3149), True, 'import numpy as np\n'), ((4780, 4797), 'numpy.asarray', 'np.asarray', (['speed'], {}), '(speed)\n', (4790, 4797), True, 'import numpy as np\n'), ((4989, 5006), 'numpy.asarray', 'np.asarray', (['speed'], {}), '(speed)\n', (4999, 5006), True, 'import numpy as np\n'), ((5840, 5855), 'numpy.asarray', 'np.asarray', (['acc'], {}), '(acc)\n', (5850, 5855), True, 'import numpy as np\n'), ((5928, 5956), 'numpy.zeros', 'np.zeros', (['(self.n_frames, 3)'], {}), '((self.n_frames, 3))\n', (5936, 5956), True, 'import numpy as np\n'), ((8065, 8074), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (8071, 8074), True, 'import numpy as np\n'), ((9028, 9037), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (9034, 9037), True, 'import numpy as np\n'), ((9659, 9698), 'transformations.euler_from_matrix', 'euler_from_matrix', (['global_trans', '"""rxyz"""'], {}), "(global_trans, 'rxyz')\n", (9676, 9698), False, 'from transformations import euler_matrix, euler_from_matrix\n'), ((9760, 9789), 'numpy.rad2deg', 'np.rad2deg', (['global_angles_rad'], {}), '(global_angles_rad)\n', (9770, 9789), True, 'import numpy as np\n'), ((12845, 12863), 'numpy.asarray', 'np.asarray', (['points'], {}), '(points)\n', (12855, 12863), True, 'import numpy as np\n'), ((13379, 13396), 'numpy.rad2deg', 'np.rad2deg', (['theta'], {}), '(theta)\n', (13389, 13396), True, 'import numpy as np\n'), ((14123, 14140), 'numpy.rad2deg', 'np.rad2deg', (['theta'], {}), '(theta)\n', (14133, 14140), True, 'import numpy as np\n'), ((14444, 14472), 'numpy.zeros', 'np.zeros', (['(self.n_frames, 3)'], {}), '((self.n_frames, 3))\n', (14452, 14472), True, 'import numpy as np\n'), ((15124, 15152), 'numpy.zeros', 'np.zeros', (['(self.n_frames, 3)'], {}), '((self.n_frames, 3))\n', (15132, 15152), True, 'import numpy as np\n'), ((16044, 16069), 'numpy.asarray', 'np.asarray', (['feet_distance'], {}), '(feet_distance)\n', (16054, 16069), True, 'import numpy as np\n'), ((22143, 22187), 'anim_utils.utilities.custom_math.angle_between_vectors', 'angle_between_vectors', (['upLegBone', 'lowLegBone'], {}), '(upLegBone, lowLegBone)\n', (22164, 22187), False, 'from anim_utils.utilities.custom_math import angle_between_vectors\n'), ((22749, 22793), 'anim_utils.utilities.custom_math.angle_between_vectors', 'angle_between_vectors', (['upLegBone', 'lowLegBone'], {}), '(upLegBone, lowLegBone)\n', (22770, 22793), False, 'from anim_utils.utilities.custom_math import angle_between_vectors\n'), ((2831, 2860), 'numpy.dot', 'np.dot', (['global_trans', 'rot_mat'], {}), '(global_trans, rot_mat)\n', (2837, 2860), True, 'import numpy as np\n'), ((4630, 4641), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (4638, 4641), True, 'import numpy as np\n'), ((5660, 5671), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (5668, 5671), True, 'import numpy as np\n'), ((5812, 5823), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (5820, 5823), True, 'import numpy as np\n'), ((8822, 8851), 'numpy.dot', 'np.dot', (['global_trans', 'rot_mat'], {}), '(global_trans, rot_mat)\n', (8828, 8851), True, 'import numpy as np\n'), ((9304, 9332), 'numpy.deg2rad', 'np.deg2rad', (['rot_angles_euler'], {}), '(rot_angles_euler)\n', (9314, 9332), True, 'import numpy as np\n'), ((9355, 9432), 'transformations.euler_matrix', 'euler_matrix', (['rot_angles_rad[0]', 'rot_angles_rad[1]', 'rot_angles_rad[2]', '"""rxyz"""'], {}), "(rot_angles_rad[0], rot_angles_rad[1], rot_angles_rad[2], 'rxyz')\n", (9367, 9432), False, 'from transformations import euler_matrix, euler_from_matrix\n'), ((9601, 9630), 'numpy.dot', 'np.dot', (['global_trans', 'rot_mat'], {}), '(global_trans, rot_mat)\n', (9607, 9630), True, 'import numpy as np\n'), ((11893, 11988), 'anim_utils.animation_data.utils.convert_euler_frames_to_quaternion_frames', 'convert_euler_frames_to_quaternion_frames', (['self.bvhreader', 'self.euler_frames', 'filter_joints'], {}), '(self.bvhreader, self.euler_frames,\n filter_joints)\n', (11934, 11988), False, 'from anim_utils.animation_data.utils import pose_orientation_euler, check_quat, convert_quat_frame_value_to_array, euler_to_quaternion, convert_euler_frames_to_quaternion_frames\n'), ((14799, 14847), 'anim_utils.animation_data.utils.pose_orientation_euler', 'pose_orientation_euler', (['self.euler_frames[i + 1]'], {}), '(self.euler_frames[i + 1])\n', (14821, 14847), False, 'from anim_utils.animation_data.utils import pose_orientation_euler, check_quat, convert_quat_frame_value_to_array, euler_to_quaternion, convert_euler_frames_to_quaternion_frames\n'), ((15415, 15463), 'anim_utils.animation_data.utils.pose_orientation_euler', 'pose_orientation_euler', (['self.euler_frames[i + 1]'], {}), '(self.euler_frames[i + 1])\n', (15437, 15463), False, 'from anim_utils.animation_data.utils import pose_orientation_euler, check_quat, convert_quat_frame_value_to_array, euler_to_quaternion, convert_euler_frames_to_quaternion_frames\n'), ((1709, 1726), 'anim_utils.animation_data.SkeletonBuilder', 'SkeletonBuilder', ([], {}), '()\n', (1724, 1726), False, 'from anim_utils.animation_data import SkeletonBuilder, SKELETON_NODE_TYPE_END_SITE, LEN_EULER, LEN_ROOT, LEN_QUAT\n'), ((2312, 2321), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (2318, 2321), True, 'import numpy as np\n'), ((2514, 2542), 'numpy.deg2rad', 'np.deg2rad', (['rot_angles_euler'], {}), '(rot_angles_euler)\n', (2524, 2542), True, 'import numpy as np\n'), ((2569, 2646), 'transformations.euler_matrix', 'euler_matrix', (['rot_angles_rad[0]', 'rot_angles_rad[1]', 'rot_angles_rad[2]', '"""rxyz"""'], {}), "(rot_angles_rad[0], rot_angles_rad[1], rot_angles_rad[2], 'rxyz')\n", (2581, 2646), False, 'from transformations import euler_matrix, euler_from_matrix\n'), ((8303, 8312), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (8309, 8312), True, 'import numpy as np\n'), ((8505, 8533), 'numpy.deg2rad', 'np.deg2rad', (['rot_angles_euler'], {}), '(rot_angles_euler)\n', (8515, 8533), True, 'import numpy as np\n'), ((8560, 8637), 'transformations.euler_matrix', 'euler_matrix', (['rot_angles_rad[0]', 'rot_angles_rad[1]', 'rot_angles_rad[2]', '"""rxyz"""'], {}), "(rot_angles_rad[0], rot_angles_rad[1], rot_angles_rad[2], 'rxyz')\n", (8572, 8637), False, 'from transformations import euler_matrix, euler_from_matrix\n'), ((13277, 13305), 'numpy.dot', 'np.dot', (['upper_arm', 'lower_arm'], {}), '(upper_arm, lower_arm)\n', (13283, 13305), True, 'import numpy as np\n'), ((14021, 14049), 'numpy.dot', 'np.dot', (['upper_arm', 'lower_arm'], {}), '(upper_arm, lower_arm)\n', (14027, 14049), True, 'import numpy as np\n'), ((15959, 16027), 'numpy.linalg.norm', 'np.linalg.norm', (['(left_foot_pos[i, [0, 2]] - right_foot_pos[i, [0, 2]])'], {}), '(left_foot_pos[i, [0, 2]] - right_foot_pos[i, [0, 2]])\n', (15973, 16027), True, 'import numpy as np\n'), ((13307, 13332), 'numpy.linalg.norm', 'np.linalg.norm', (['upper_arm'], {}), '(upper_arm)\n', (13321, 13332), True, 'import numpy as np\n'), ((13335, 13360), 'numpy.linalg.norm', 'np.linalg.norm', (['lower_arm'], {}), '(lower_arm)\n', (13349, 13360), True, 'import numpy as np\n'), ((14051, 14076), 'numpy.linalg.norm', 'np.linalg.norm', (['upper_arm'], {}), '(upper_arm)\n', (14065, 14076), True, 'import numpy as np\n'), ((14079, 14104), 'numpy.linalg.norm', 'np.linalg.norm', (['lower_arm'], {}), '(lower_arm)\n', (14093, 14104), True, 'import numpy as np\n'), ((14878, 14932), 'numpy.array', 'np.array', (['[moving_offsets[i, 0], moving_offsets[i, 2]]'], {}), '([moving_offsets[i, 0], moving_offsets[i, 2]])\n', (14886, 14932), True, 'import numpy as np\n'), ((15494, 15548), 'numpy.array', 'np.array', (['[moving_offsets[i, 0], moving_offsets[i, 2]]'], {}), '([moving_offsets[i, 0], moving_offsets[i, 2]])\n', (15502, 15548), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
'''
Copyright 2019, University of Freiburg.
Chair of Algorithms and Data Structures.
<NAME> <<EMAIL>>
'''
import argparse
import unittest
import torch
import tests.utils as test_utils
from fairseq.data.tokenizers.character_tokenizer import CharacterTokenizer
class TestCharacterTokenizer(unittest.TestCase):
def test_splits_into_characters(self):
tokenizer = CharacterTokenizer(start_tokens=['<S1>', '<S2>'], end_tokens=['</S2>', '</S1>'])
sentence = "Small sentence."
tokens = [t.text for t in tokenizer.tokenize(sentence)]
expected_ = ["<S1>", "<S2>", "S", "m", "a", "l", "l", " ", "s", "e", "n", "t", "e", "n", "c", "e", ".", "</S2>", "</S1>"]
assert tokens == expected_
def test_batch_tokenization(self):
tokenizer = CharacterTokenizer()
sentences = [
"Small sentence.",
"Second sentence.",
"Third sentence!"
]
batched_tokens = tokenizer.batch_tokenize(sentences)
single_tokens = [tokenizer.tokenize(s) for s in sentences]
assert len(batched_tokens) == len(single_tokens)
for b, s in zip(batched_tokens, single_tokens):
assert len(b) == len(s)
for bw, sw in zip(b, s):
assert bw.text == sw.text
def test_handles_byte_encoding(self):
tokenizer = CharacterTokenizer(byte_encoding='utf-8', start_tokens=[259], end_tokens=[260])
word = "åøâáabe"
tokens = [t.text_id for t in tokenizer.tokenize(word)]
# Note that we've added one to the utf-8 encoded bytes, to account for masking.
expected_ = [259, 196, 166, 196, 185, 196, 163, 196, 162, 98, 99, 102, 260]
assert tokens == expected_ | [
"fairseq.data.tokenizers.character_tokenizer.CharacterTokenizer"
] | [((395, 480), 'fairseq.data.tokenizers.character_tokenizer.CharacterTokenizer', 'CharacterTokenizer', ([], {'start_tokens': "['<S1>', '<S2>']", 'end_tokens': "['</S2>', '</S1>']"}), "(start_tokens=['<S1>', '<S2>'], end_tokens=['</S2>', '</S1>']\n )\n", (413, 480), False, 'from fairseq.data.tokenizers.character_tokenizer import CharacterTokenizer\n'), ((785, 805), 'fairseq.data.tokenizers.character_tokenizer.CharacterTokenizer', 'CharacterTokenizer', ([], {}), '()\n', (803, 805), False, 'from fairseq.data.tokenizers.character_tokenizer import CharacterTokenizer\n'), ((1290, 1369), 'fairseq.data.tokenizers.character_tokenizer.CharacterTokenizer', 'CharacterTokenizer', ([], {'byte_encoding': '"""utf-8"""', 'start_tokens': '[259]', 'end_tokens': '[260]'}), "(byte_encoding='utf-8', start_tokens=[259], end_tokens=[260])\n", (1308, 1369), False, 'from fairseq.data.tokenizers.character_tokenizer import CharacterTokenizer\n')] |
from faktura import app
from flask import request, render_template, send_file, redirect, make_response, jsonify
from faktura.breadcrumbs import breadcrumbs
from faktura.models import db, TemplateVariable, User
from flask.ext.login import login_required
from faktura.csrf import generate_csrf_token
@app.route('/settings')
@login_required
def settings():
variables = TemplateVariable.query.all()
users = User.query.all()
return render_template('settings.html', variables=variables, users=users, breadcrumbs=breadcrumbs("Main Menu"))
@app.route('/vars/create', methods=['POST'])
@login_required
def create_var():
var = TemplateVariable(request.form["key"], request.form["value"])
db.session.add(var)
db.session.commit()
return jsonify(var=var.to_json(), _csrf_token=generate_csrf_token())
@app.route('/vars/save', methods=['POST'])
@login_required
def save_var():
var = TemplateVariable.query.filter(TemplateVariable.key == request.form["key"]).first()
var.value = request.form["value"]
db.session.commit()
return jsonify(var=var.to_json(), _csrf_token=generate_csrf_token())
@app.route('/vars/delete', methods=['POST'])
@login_required
def delete_var():
var = TemplateVariable.query.filter(TemplateVariable.key == request.form["key"]).first()
db.session.delete(var)
db.session.commit()
return jsonify(var=var.to_json(), _csrf_token=generate_csrf_token())
| [
"faktura.csrf.generate_csrf_token",
"faktura.models.TemplateVariable.query.all",
"faktura.models.TemplateVariable.query.filter",
"faktura.models.db.session.add",
"faktura.app.route",
"faktura.models.db.session.delete",
"faktura.breadcrumbs.breadcrumbs",
"faktura.models.db.session.commit",
"faktura.m... | [((301, 323), 'faktura.app.route', 'app.route', (['"""/settings"""'], {}), "('/settings')\n", (310, 323), False, 'from faktura import app\n'), ((549, 592), 'faktura.app.route', 'app.route', (['"""/vars/create"""'], {'methods': "['POST']"}), "('/vars/create', methods=['POST'])\n", (558, 592), False, 'from faktura import app\n'), ((821, 862), 'faktura.app.route', 'app.route', (['"""/vars/save"""'], {'methods': "['POST']"}), "('/vars/save', methods=['POST'])\n", (830, 862), False, 'from faktura import app\n'), ((1126, 1169), 'faktura.app.route', 'app.route', (['"""/vars/delete"""'], {'methods': "['POST']"}), "('/vars/delete', methods=['POST'])\n", (1135, 1169), False, 'from faktura import app\n'), ((372, 400), 'faktura.models.TemplateVariable.query.all', 'TemplateVariable.query.all', ([], {}), '()\n', (398, 400), False, 'from faktura.models import db, TemplateVariable, User\n'), ((413, 429), 'faktura.models.User.query.all', 'User.query.all', ([], {}), '()\n', (427, 429), False, 'from faktura.models import db, TemplateVariable, User\n'), ((637, 697), 'faktura.models.TemplateVariable', 'TemplateVariable', (["request.form['key']", "request.form['value']"], {}), "(request.form['key'], request.form['value'])\n", (653, 697), False, 'from faktura.models import db, TemplateVariable, User\n'), ((702, 721), 'faktura.models.db.session.add', 'db.session.add', (['var'], {}), '(var)\n', (716, 721), False, 'from faktura.models import db, TemplateVariable, User\n'), ((726, 745), 'faktura.models.db.session.commit', 'db.session.commit', ([], {}), '()\n', (743, 745), False, 'from faktura.models import db, TemplateVariable, User\n'), ((1030, 1049), 'faktura.models.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1047, 1049), False, 'from faktura.models import db, TemplateVariable, User\n'), ((1301, 1323), 'faktura.models.db.session.delete', 'db.session.delete', (['var'], {}), '(var)\n', (1318, 1323), False, 'from faktura.models import db, TemplateVariable, User\n'), ((1328, 1347), 'faktura.models.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1345, 1347), False, 'from faktura.models import db, TemplateVariable, User\n'), ((520, 544), 'faktura.breadcrumbs.breadcrumbs', 'breadcrumbs', (['"""Main Menu"""'], {}), "('Main Menu')\n", (531, 544), False, 'from faktura.breadcrumbs import breadcrumbs\n'), ((796, 817), 'faktura.csrf.generate_csrf_token', 'generate_csrf_token', ([], {}), '()\n', (815, 817), False, 'from faktura.csrf import generate_csrf_token\n'), ((905, 979), 'faktura.models.TemplateVariable.query.filter', 'TemplateVariable.query.filter', (["(TemplateVariable.key == request.form['key'])"], {}), "(TemplateVariable.key == request.form['key'])\n", (934, 979), False, 'from faktura.models import db, TemplateVariable, User\n'), ((1101, 1122), 'faktura.csrf.generate_csrf_token', 'generate_csrf_token', ([], {}), '()\n', (1120, 1122), False, 'from faktura.csrf import generate_csrf_token\n'), ((1214, 1288), 'faktura.models.TemplateVariable.query.filter', 'TemplateVariable.query.filter', (["(TemplateVariable.key == request.form['key'])"], {}), "(TemplateVariable.key == request.form['key'])\n", (1243, 1288), False, 'from faktura.models import db, TemplateVariable, User\n'), ((1399, 1420), 'faktura.csrf.generate_csrf_token', 'generate_csrf_token', ([], {}), '()\n', (1418, 1420), False, 'from faktura.csrf import generate_csrf_token\n')] |
from django.contrib import admin
from .models import Payload
class PayloadAdmin(admin.ModelAdmin):
list_display = ('method', 'path','get','post')
search_fields = ('get','post')
admin.site.register(Payload, PayloadAdmin) | [
"django.contrib.admin.site.register"
] | [((187, 229), 'django.contrib.admin.site.register', 'admin.site.register', (['Payload', 'PayloadAdmin'], {}), '(Payload, PayloadAdmin)\n', (206, 229), False, 'from django.contrib import admin\n')] |
import time
import GRBL
start_crdnts_up = {} #start coordinates
start_crdnts_dn = {} #start coordinates
pass_crdnts_up = {} #test pass stack
pass_crdnts_dn = {} #test pass stack
fail_crdnts_up = {} #test fail stack
fail_crdnts_dn = {} #test fail stack
camera_cordnts_up = {} #camera locations
camera_cordnts_dn = {} #camera locations
#Controls functions for the delta
sleep_time = 0.5
def turn_on_vacuum():
print("Turning on vacuum pump")
def pickup():
print("Picking up sample...")
time.sleep(sleep_time)
def drop():
#for x in range(0, SIZE, 1):
#grblCom1.write(CONT_MAT1[x])
print("Dropping sample...")
time.sleep(sleep_time)
def move_to_start():
print("Moving to Start...")
time.sleep(sleep_time)
def move_to_camera():
print("Moving to Camera...")
time.sleep(sleep_time)
def move_to_passed():
print("Moving to Pass Stack...")
time.sleep(sleep_time)
def move_to_failed():
print("Moving to Fail Stack...")
time.sleep(sleep_time)
| [
"time.sleep"
] | [((505, 527), 'time.sleep', 'time.sleep', (['sleep_time'], {}), '(sleep_time)\n', (515, 527), False, 'import time\n'), ((660, 682), 'time.sleep', 'time.sleep', (['sleep_time'], {}), '(sleep_time)\n', (670, 682), False, 'import time\n'), ((753, 775), 'time.sleep', 'time.sleep', (['sleep_time'], {}), '(sleep_time)\n', (763, 775), False, 'import time\n'), ((836, 858), 'time.sleep', 'time.sleep', (['sleep_time'], {}), '(sleep_time)\n', (846, 858), False, 'import time\n'), ((923, 945), 'time.sleep', 'time.sleep', (['sleep_time'], {}), '(sleep_time)\n', (933, 945), False, 'import time\n'), ((1014, 1036), 'time.sleep', 'time.sleep', (['sleep_time'], {}), '(sleep_time)\n', (1024, 1036), False, 'import time\n')] |
# -*- coding: utf-8 -*-
import os
import sys
__author__ = '<NAME>'
__version__ = '0.1'
__ppath__ = os.path.dirname(os.path.realpath(__file__))
if __ppath__ not in sys.path:
sys.path.append(os.path.dirname(__ppath__))
from flask import Flask
app = Flask(__name__)
from cp_validator import extractor
postals = extractor.get_postal()
import cp_validator.views
| [
"cp_validator.extractor.get_postal",
"os.path.dirname",
"os.path.realpath",
"flask.Flask"
] | [((254, 269), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (259, 269), False, 'from flask import Flask\n'), ((316, 338), 'cp_validator.extractor.get_postal', 'extractor.get_postal', ([], {}), '()\n', (336, 338), False, 'from cp_validator import extractor\n'), ((117, 143), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (133, 143), False, 'import os\n'), ((195, 221), 'os.path.dirname', 'os.path.dirname', (['__ppath__'], {}), '(__ppath__)\n', (210, 221), False, 'import os\n')] |
import random
import torch
import torch.nn as nn
from pathlib import Path
from torch.utils.data import Dataset, DataLoader
RAW_PATH = 'src/data/datasets/Stocks/raw'
APIKEY = 'A6YNKD8LYDFDEALD'
class Stocks (Dataset):
def __init__(self, seq_len: int = 20, split: str = 'train'):
self.seq_len = seq_len
self.split = split
self.path = Path(RAW_PATH)
if self.split == 'train':
self.files = self.path.glob('[!TSLA]*')
elif self.split == 'test':
self.files = self.path.glob('TSLA*')
self.data = [torch.load(f) for f in self.files]
self.lengths = [len(d) for d in self.data]
self.len = sum([l // self.seq_len for l in self.lengths])
self.buckets = {}
count = 0
for i, l in enumerate(self.lengths):
for _ in range(l // self.seq_len):
self.buckets[count] = i
count += 1
def __len__(self):
return self.len
def __getitem__(self, i: int):
file = self.buckets[i]
prior = sum([l // self.seq_len for l in self.lengths[:file]])
start = (i - prior) * self.seq_len
end = start + self.seq_len
slice = self.data[file][start:end].unsqueeze(1)
# Normalize Data
slice -= slice.min()
slice /= slice.max()
return slice
def make_dataset():
import csv
import time
import os
import pandas as pd
from alpha_vantage.timeseries import TimeSeries
# symbols = ['GOOGL', 'MSFT', 'TSLA', 'AAPL',
# 'AMZN', 'NVDA', 'FB', 'AMD']
symbols = ['GOOGL', 'MSFT', 'TSLA', 'AAPL',
'AMZN', 'NVDA', 'FB', 'AMD',
'BABA', 'PYPL', 'CRM', 'ATVI',
'EA', 'IBM', 'ASML', 'INTC']
ts = TimeSeries(key=APIKEY, output_format='csv')
def retry_download(year, month, symbol, slice):
print((f'Downloading {symbol:10} '
f'year {year} month {month}\n'
f'Slice {slice}'))
data, meta_data = ts.get_intraday_extended(
symbol=symbol, interval='1min', slice=slice)
data = [d for d in data]
if data:
x = [float(v[4]) for v in data if v[4] != 'close']
x = torch.tensor(x)
else:
print('Retrying...')
return retry_download(year, month, symbol, slice)
print('Download Successful:')
print(len(x))
torch.save(x, path)
time.sleep(20)
return x
for year in [1, 2]:
for month in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]:
for symbol in symbols:
slice = f'year{year}month{month}'
path = f'{RAW_PATH}/{symbol}_{slice}.pt'
if not os.path.exists(path):
retry_download(year, month, symbol, slice)
else:
pass
# print((f'Already Downloaded {symbol:10} '
# f'year {year} month {month}'))
print('Dataset Downloaded Successfully!')
if __name__ == "__main__":
from rich import print
make_dataset()
# seq_len, batch_size = 50, 256
seq_len, batch_size = 100, 256
train_ds = Stocks(seq_len=seq_len, split='train')
train_dl = DataLoader(
train_ds, batch_size=batch_size, drop_last=True, shuffle=True)
test_ds = Stocks(seq_len=seq_len, split='test')
test_dl = DataLoader(
test_ds, batch_size=batch_size, drop_last=True, shuffle=True)
print(len(train_ds))
print(len(test_ds))
print(len(train_dl))
print(len(test_dl))
# for i, d in enumerate(train_dl):
# print(i, d.shape)
# for i, d in enumerate(test_dl):
# print(i, d.shape)
| [
"os.path.exists",
"pathlib.Path",
"torch.load",
"time.sleep",
"torch.tensor",
"rich.print",
"torch.save",
"torch.utils.data.DataLoader",
"alpha_vantage.timeseries.TimeSeries"
] | [((1784, 1827), 'alpha_vantage.timeseries.TimeSeries', 'TimeSeries', ([], {'key': 'APIKEY', 'output_format': '"""csv"""'}), "(key=APIKEY, output_format='csv')\n", (1794, 1827), False, 'from alpha_vantage.timeseries import TimeSeries\n'), ((3007, 3048), 'rich.print', 'print', (['"""Dataset Downloaded Successfully!"""'], {}), "('Dataset Downloaded Successfully!')\n", (3012, 3048), False, 'from rich import print\n'), ((3268, 3341), 'torch.utils.data.DataLoader', 'DataLoader', (['train_ds'], {'batch_size': 'batch_size', 'drop_last': '(True)', 'shuffle': '(True)'}), '(train_ds, batch_size=batch_size, drop_last=True, shuffle=True)\n', (3278, 3341), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((3417, 3489), 'torch.utils.data.DataLoader', 'DataLoader', (['test_ds'], {'batch_size': 'batch_size', 'drop_last': '(True)', 'shuffle': '(True)'}), '(test_ds, batch_size=batch_size, drop_last=True, shuffle=True)\n', (3427, 3489), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((366, 380), 'pathlib.Path', 'Path', (['RAW_PATH'], {}), '(RAW_PATH)\n', (370, 380), False, 'from pathlib import Path\n'), ((1889, 1966), 'rich.print', 'print', (['f"""Downloading {symbol:10} year {year} month {month}\nSlice {slice}"""'], {}), '(f"""Downloading {symbol:10} year {year} month {month}\nSlice {slice}""")\n', (1894, 1966), False, 'from rich import print\n'), ((2375, 2404), 'rich.print', 'print', (['"""Download Successful:"""'], {}), "('Download Successful:')\n", (2380, 2404), False, 'from rich import print\n'), ((2435, 2454), 'torch.save', 'torch.save', (['x', 'path'], {}), '(x, path)\n', (2445, 2454), False, 'import torch\n'), ((2463, 2477), 'time.sleep', 'time.sleep', (['(20)'], {}), '(20)\n', (2473, 2477), False, 'import time\n'), ((572, 585), 'torch.load', 'torch.load', (['f'], {}), '(f)\n', (582, 585), False, 'import torch\n'), ((2242, 2257), 'torch.tensor', 'torch.tensor', (['x'], {}), '(x)\n', (2254, 2257), False, 'import torch\n'), ((2284, 2304), 'rich.print', 'print', (['"""Retrying..."""'], {}), "('Retrying...')\n", (2289, 2304), False, 'from rich import print\n'), ((2747, 2767), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2761, 2767), False, 'import os\n')] |
from datetime import datetime, timezone
from flask import abort
from flask_unchained import BundleConfig
from http import HTTPStatus
from .forms import (
LoginForm,
RegisterForm,
ForgotPasswordForm,
ResetPasswordForm,
ChangePasswordForm,
SendConfirmationForm,
)
from .models import AnonymousUser
class AuthenticationConfig:
"""
Config options for logging in and out.
"""
SECURITY_LOGIN_FORM = LoginForm
"""
The form class to use for the login view.
"""
SECURITY_DEFAULT_REMEMBER_ME = False
"""
Whether or not the login form should default to checking the
"Remember me?" option.
"""
SECURITY_REMEMBER_SALT = 'security-remember-salt'
"""
Salt used for the remember me cookie token.
"""
SECURITY_USER_IDENTITY_ATTRIBUTES = ['email'] # FIXME-identity
"""
List of attributes on the user model that can used for logging in with.
Each must be unique.
"""
SECURITY_POST_LOGIN_REDIRECT_ENDPOINT = '/'
"""
The endpoint or url to redirect to after a successful login.
"""
SECURITY_POST_LOGOUT_REDIRECT_ENDPOINT = '/'
"""
The endpoint or url to redirect to after a user logs out.
"""
class ChangePasswordConfig:
"""
Config options for changing passwords
"""
SECURITY_CHANGEABLE = False
"""
Whether or not to enable change password functionality.
"""
SECURITY_CHANGE_PASSWORD_FORM = ChangePasswordForm
"""
Form class to use for the change password view.
"""
SECURITY_POST_CHANGE_REDIRECT_ENDPOINT = None
"""
Endpoint or url to redirect to after the user changes their password.
"""
SECURITY_SEND_PASSWORD_CHANGED_EMAIL = \
'mail_bundle' in BundleConfig.current_app.unchained.bundles
"""
Whether or not to send the user an email when their password has been changed.
Defaults to True, and it's strongly recommended to leave this option enabled.
"""
class EncryptionConfig:
"""
Config options for encryption hashing.
"""
SECURITY_PASSWORD_SALT = 'security-password-salt'
"""
Specifies the HMAC salt. This is only used if the password hash type is
set to something other than plain text.
"""
SECURITY_PASSWORD_HASH = 'bcrypt'
"""
Specifies the password hash algorithm to use when hashing passwords.
Recommended values for production systems are ``argon2``, ``bcrypt``,
or ``pbkdf2_sha512``. May require extra packages to be installed.
"""
SECURITY_PASSWORD_SINGLE_HASH = False
"""
Specifies that passwords should only be hashed once. By default, passwords
are hashed twice, first with SECURITY_PASSWORD_SALT, and then with a random
salt. May be useful for integrating with other applications.
"""
SECURITY_PASSWORD_SCHEMES = ['argon2',
'bcrypt',
'pbkdf2_sha512',
# and always the last one...
'plaintext']
"""
List of algorithms that can be used for hashing passwords.
"""
SECURITY_PASSWORD_HASH_OPTIONS = {}
"""
Specifies additional options to be passed to the hashing method.
"""
SECURITY_DEPRECATED_PASSWORD_SCHEMES = ['auto']
"""
List of deprecated algorithms for hashing passwords.
"""
SECURITY_HASHING_SCHEMES = ['sha512_crypt']
"""
List of algorithms that can be used for creating and validating tokens.
"""
SECURITY_DEPRECATED_HASHING_SCHEMES = []
"""
List of deprecated algorithms for creating and validating tokens.
"""
class ForgotPasswordConfig:
"""
Config options for recovering forgotten passwords
"""
SECURITY_RECOVERABLE = False
"""
Whether or not to enable forgot password functionality.
"""
SECURITY_FORGOT_PASSWORD_FORM = ForgotPasswordForm
"""
Form class to use for the forgot password form.
"""
# reset password (when the user clicks the link from the email sent by forgot pw)
# --------------
SECURITY_RESET_PASSWORD_FORM = ResetPasswordForm
"""
Form class to use for the reset password form.
"""
SECURITY_RESET_SALT = 'security-reset-salt'
"""
Salt used for the reset token.
"""
SECURITY_RESET_PASSWORD_WITHIN = '5 days'
"""
Specifies the amount of time a user has before their password reset link
expires. Always pluralized the time unit for this value. Defaults to 5 days.
"""
SECURITY_POST_RESET_REDIRECT_ENDPOINT = None
"""
Endpoint or url to redirect to after the user resets their password.
"""
SECURITY_INVALID_RESET_TOKEN_REDIRECT = 'security_controller.forgot_password'
"""
Endpoint or url to redirect to if the reset token is invalid.
"""
SECURITY_EXPIRED_RESET_TOKEN_REDIRECT = 'security_controller.forgot_password'
"""
Endpoint or url to redirect to if the reset token is expired.
"""
SECURITY_API_RESET_PASSWORD_HTTP_GET_REDIRECT = None
"""
Endpoint or url to redirect to if a GET request is made to the reset password
view. Defaults to None, meaning no redirect. Useful for single page apps.
"""
SECURITY_SEND_PASSWORD_RESET_NOTICE_EMAIL = \
'mail_bundle' in BundleConfig.current_app.unchained.bundles
"""
Whether or not to send the user an email when their password has been reset.
Defaults to True, and it's strongly recommended to leave this option enabled.
"""
class RegistrationConfig:
"""
Config options for user registration
"""
SECURITY_REGISTERABLE = False
"""
Whether or not to enable registration.
"""
SECURITY_REGISTER_FORM = RegisterForm
"""
The form class to use for the register view.
"""
SECURITY_POST_REGISTER_REDIRECT_ENDPOINT = None
"""
The endpoint or url to redirect to after a user completes the
registration form.
"""
SECURITY_SEND_REGISTER_EMAIL = \
'mail_bundle' in BundleConfig.current_app.unchained.bundles
"""
Whether or not send a welcome email after a user completes the
registration form.
"""
# email confirmation options
# --------------------------
SECURITY_CONFIRMABLE = False
"""
Whether or not to enable required email confirmation for new users.
"""
SECURITY_SEND_CONFIRMATION_FORM = SendConfirmationForm
"""
Form class to use for the (re)send confirmation email form.
"""
SECURITY_CONFIRM_SALT = 'security-confirm-salt'
"""
Salt used for the confirmation token.
"""
SECURITY_LOGIN_WITHOUT_CONFIRMATION = False
"""
Allow users to login without confirming their email first. (This option
only applies when :attr:`SECURITY_CONFIRMABLE` is True.)
"""
SECURITY_CONFIRM_EMAIL_WITHIN = '5 days'
"""
How long to wait until considering the token in confirmation emails to
be expired.
"""
SECURITY_POST_CONFIRM_REDIRECT_ENDPOINT = None
"""
Endpoint or url to redirect to after the user confirms their email.
Defaults to :attr:`SECURITY_POST_LOGIN_REDIRECT_ENDPOINT`.
"""
SECURITY_CONFIRM_ERROR_REDIRECT_ENDPOINT = None
"""
Endpoint to redirect to if there's an error confirming the user's email.
"""
class TokenConfig:
"""
Config options for token authentication.
"""
SECURITY_TOKEN_AUTHENTICATION_KEY = 'auth_token'
"""
Specifies the query string parameter to read when using token authentication.
"""
SECURITY_TOKEN_AUTHENTICATION_HEADER = 'Authentication-Token'
"""
Specifies the HTTP header to read when using token authentication.
"""
SECURITY_TOKEN_MAX_AGE = None
"""
Specifies the number of seconds before an authentication token expires.
Defaults to None, meaning the token never expires.
"""
class Config(AuthenticationConfig,
ChangePasswordConfig,
EncryptionConfig,
ForgotPasswordConfig,
RegistrationConfig,
TokenConfig,
BundleConfig):
"""
Config options for the Security Bundle.
"""
SECURITY_ANONYMOUS_USER = AnonymousUser
"""
Class to use for representing anonymous users.
"""
SECURITY_UNAUTHORIZED_CALLBACK = lambda: abort(HTTPStatus.UNAUTHORIZED)
"""
This callback gets called when authorization fails. By default we abort with
an HTTP status code of 401 (UNAUTHORIZED).
"""
# make datetimes timezone-aware by default
SECURITY_DATETIME_FACTORY = lambda: datetime.now(timezone.utc)
"""
Factory function to use when creating new dates. By default we use
``datetime.now(timezone.utc)`` to create a timezone-aware datetime.
"""
ADMIN_CATEGORY_ICON_CLASSES = {
'Security': 'fa fa-lock',
}
class TestConfig(Config):
"""
Default test settings for the Security Bundle.
"""
SECURITY_PASSWORD_HASH = '<PASSWORD>'
"""
Disable password-hashing in tests (shaves about 30% off the test-run time)
"""
| [
"flask.abort",
"datetime.datetime.now"
] | [((8358, 8388), 'flask.abort', 'abort', (['HTTPStatus.UNAUTHORIZED'], {}), '(HTTPStatus.UNAUTHORIZED)\n', (8363, 8388), False, 'from flask import abort\n'), ((8621, 8647), 'datetime.datetime.now', 'datetime.now', (['timezone.utc'], {}), '(timezone.utc)\n', (8633, 8647), False, 'from datetime import datetime, timezone\n')] |
try:
from Cython.Build import cythonize
ext_modules = cythonize(['sic/core.py', 'sic/implicit.py'], compiler_directives={'language_level': '3'})
except:
pass
| [
"Cython.Build.cythonize"
] | [((62, 157), 'Cython.Build.cythonize', 'cythonize', (["['sic/core.py', 'sic/implicit.py']"], {'compiler_directives': "{'language_level': '3'}"}), "(['sic/core.py', 'sic/implicit.py'], compiler_directives={\n 'language_level': '3'})\n", (71, 157), False, 'from Cython.Build import cythonize\n')] |
from flask import request
from flask_restful import Resource, abort
from flask_jwt_extended import get_jwt_identity
from helpers import jwt_refresh_required
from helpers.genders import genders
from helpers.email import send_validation_email
from models.user import User, get_full_user
from models.validation import Validation
import secrets
from helpers import Arguments
import traceback
class UserListResource(Resource):
def post(self):
"""
Posting to userlist = Registration
"""
args = Arguments(request.json)
args.email("email", required=True)
args.string("username", required=True, min=3, max=255)
args.string("password", required=True, max=255)
args.string("fname", required=True, min=1, max=255)
args.string("lname", required=True, min=1, max=255)
# Validate method will abort with 400 if needed
args.validate()
if User.get(username=args.username):
return {"message" : "Username already exists"}, 400
if User.get(email=args.email):
return {"message" : "Email address already exists"}, 400
try:
new = User(dict(args))
new.save()
except Exception as e:
return {"message" : str(e)}, 500
user = User.get(username=args.username)
# Create validation entry and send email with verify link
try:
validation = Validation(user_id=user.id, code=secrets.token_urlsafe(256))
validation.save()
except Exception as e:
return {"message" : str(e)}, 500
send_validation_email(user, validation.code)
return user, 200
class UserResource(Resource):
@jwt_refresh_required
def get(self, id):
current_user = get_jwt_identity()
try:
int(id)
user = User.get(id=id)
except ValueError:
user = User.get(username=id)
if not user:
return {"message" : "User does not exist"}, 404
## TODO gdubs look at this and fix it so that email is only returned for the loggedin user and not other users because security
return get_full_user(user.id), 200
@jwt_refresh_required
def put(self, id):
args = Arguments(request.json)
args.dict("user", required=True)
args.validate()
current_user = get_jwt_identity()
try:
id = int(id)
except ValueError:
return {"message" : "Profiles can only be updated using the ID"}, 400
user = User.get(id=id)
if not user or current_user["id"] != id:
return {"message" : "You are not authorized to edit this profile"}, 401
if "id" in args.user:
del args.user["id"]
if "images" in args.user:
del args.user["images"]
try:
args.user["interests"] = args.user["interests"] if args.user["interests"] else ""
except Exception:
pass
try:
args.user["preferences"] = args.user["preferences"] if args.user["preferences"] else ""
except Exception:
pass
mail = args.user.get("email", None)
if mail and mail != user.email:
user.email = mail
user.email_verified = False
try:
validation = Validation(user_id=user.id, code=secrets.token_urlsafe(256))
validation.save()
send_validation_email(user, validation.code)
except Exception as e:
return {"message" : str(e)}, 500
user.update(args.user)
try:
user.save()
return {"message": "User updated"}, 200
except Exception as e:
return {"message": str(e)}, 400
class CurrentUserResource(Resource):
@jwt_refresh_required
def get(self):
current_user = get_jwt_identity()
return get_full_user(current_user["id"]), 200 | [
"helpers.Arguments",
"models.user.get_full_user",
"secrets.token_urlsafe",
"models.user.User.get",
"flask_jwt_extended.get_jwt_identity",
"helpers.email.send_validation_email"
] | [((532, 555), 'helpers.Arguments', 'Arguments', (['request.json'], {}), '(request.json)\n', (541, 555), False, 'from helpers import Arguments\n'), ((936, 968), 'models.user.User.get', 'User.get', ([], {'username': 'args.username'}), '(username=args.username)\n', (944, 968), False, 'from models.user import User, get_full_user\n'), ((1046, 1072), 'models.user.User.get', 'User.get', ([], {'email': 'args.email'}), '(email=args.email)\n', (1054, 1072), False, 'from models.user import User, get_full_user\n'), ((1307, 1339), 'models.user.User.get', 'User.get', ([], {'username': 'args.username'}), '(username=args.username)\n', (1315, 1339), False, 'from models.user import User, get_full_user\n'), ((1621, 1665), 'helpers.email.send_validation_email', 'send_validation_email', (['user', 'validation.code'], {}), '(user, validation.code)\n', (1642, 1665), False, 'from helpers.email import send_validation_email\n'), ((1805, 1823), 'flask_jwt_extended.get_jwt_identity', 'get_jwt_identity', ([], {}), '()\n', (1821, 1823), False, 'from flask_jwt_extended import get_jwt_identity\n'), ((2288, 2311), 'helpers.Arguments', 'Arguments', (['request.json'], {}), '(request.json)\n', (2297, 2311), False, 'from helpers import Arguments\n'), ((2401, 2419), 'flask_jwt_extended.get_jwt_identity', 'get_jwt_identity', ([], {}), '()\n', (2417, 2419), False, 'from flask_jwt_extended import get_jwt_identity\n'), ((2587, 2602), 'models.user.User.get', 'User.get', ([], {'id': 'id'}), '(id=id)\n', (2595, 2602), False, 'from models.user import User, get_full_user\n'), ((3944, 3962), 'flask_jwt_extended.get_jwt_identity', 'get_jwt_identity', ([], {}), '()\n', (3960, 3962), False, 'from flask_jwt_extended import get_jwt_identity\n'), ((1877, 1892), 'models.user.User.get', 'User.get', ([], {'id': 'id'}), '(id=id)\n', (1885, 1892), False, 'from models.user import User, get_full_user\n'), ((2195, 2217), 'models.user.get_full_user', 'get_full_user', (['user.id'], {}), '(user.id)\n', (2208, 2217), False, 'from models.user import User, get_full_user\n'), ((3978, 4011), 'models.user.get_full_user', 'get_full_user', (["current_user['id']"], {}), "(current_user['id'])\n", (3991, 4011), False, 'from models.user import User, get_full_user\n'), ((1939, 1960), 'models.user.User.get', 'User.get', ([], {'username': 'id'}), '(username=id)\n', (1947, 1960), False, 'from models.user import User, get_full_user\n'), ((3511, 3555), 'helpers.email.send_validation_email', 'send_validation_email', (['user', 'validation.code'], {}), '(user, validation.code)\n', (3532, 3555), False, 'from helpers.email import send_validation_email\n'), ((1478, 1504), 'secrets.token_urlsafe', 'secrets.token_urlsafe', (['(256)'], {}), '(256)\n', (1499, 1504), False, 'import secrets\n'), ((3433, 3459), 'secrets.token_urlsafe', 'secrets.token_urlsafe', (['(256)'], {}), '(256)\n', (3454, 3459), False, 'import secrets\n')] |
import numpy as np
import pydensecrf.densecrf as dcrf
from pydensecrf.utils import compute_unary, create_pairwise_bilateral, create_pairwise_gaussian, unary_from_softmax
def dense_crf(img, prob):
'''
input:
img: numpy array of shape (num of channels, height, width)
prob: numpy array of shape (9, height, width), neural network last layer sigmoid output for img
output:
res: (height, width)
Modified from:
http://warmspringwinds.github.io/tensorflow/tf-slim/2016/12/18/image-segmentation-with-tensorflow-using-cnns-and-conditional-random-fields/
https://github.com/yt605155624/tensorflow-deeplab-resnet/blob/e81482d7bb1ae674f07eae32b0953fe09ff1c9d1/inference_crf.py
'''
img = np.swapaxes(img, 0, 2)
# img.shape: (width, height, num of channels)(224,224,3)
num_iter = 50
prob = np.swapaxes(prob, 1, 2) # shape: (1, width, height) (9,224,224)
num_classes = 9 #2
d = dcrf.DenseCRF2D(img.shape[0] , img.shape[1], num_classes)
unary = unary_from_softmax(prob) # shape: (num_classes, width * height)
unary = np.ascontiguousarray(unary)
img = np.ascontiguousarray(img,dtype=np.uint8)
d.setUnaryEnergy(unary)
d.addPairwiseBilateral(sxy=5, srgb=3, rgbim=img, compat=3)
Q = d.inference(num_iter) # set the number of iterations
res = np.argmax(Q, axis=0).reshape((img.shape[0], img.shape[1]))
# res.shape: (width, height)
res = np.swapaxes(res, 0, 1) # res.shape: (height, width)
# res = res[np.newaxis, :, :] # res.shape: (1, height, width)
# func_end = time.time()
# print('{:.2f} sec spent on CRF with {} iterations'.format(func_end - func_start, num_iter))
# about 2 sec for a 1280 * 960 image with 5 iterations
return res | [
"pydensecrf.densecrf.DenseCRF2D",
"numpy.argmax",
"numpy.ascontiguousarray",
"numpy.swapaxes",
"pydensecrf.utils.unary_from_softmax"
] | [((733, 755), 'numpy.swapaxes', 'np.swapaxes', (['img', '(0)', '(2)'], {}), '(img, 0, 2)\n', (744, 755), True, 'import numpy as np\n'), ((848, 871), 'numpy.swapaxes', 'np.swapaxes', (['prob', '(1)', '(2)'], {}), '(prob, 1, 2)\n', (859, 871), True, 'import numpy as np\n'), ((945, 1001), 'pydensecrf.densecrf.DenseCRF2D', 'dcrf.DenseCRF2D', (['img.shape[0]', 'img.shape[1]', 'num_classes'], {}), '(img.shape[0], img.shape[1], num_classes)\n', (960, 1001), True, 'import pydensecrf.densecrf as dcrf\n'), ((1016, 1040), 'pydensecrf.utils.unary_from_softmax', 'unary_from_softmax', (['prob'], {}), '(prob)\n', (1034, 1040), False, 'from pydensecrf.utils import compute_unary, create_pairwise_bilateral, create_pairwise_gaussian, unary_from_softmax\n'), ((1093, 1120), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['unary'], {}), '(unary)\n', (1113, 1120), True, 'import numpy as np\n'), ((1131, 1172), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['img'], {'dtype': 'np.uint8'}), '(img, dtype=np.uint8)\n', (1151, 1172), True, 'import numpy as np\n'), ((1440, 1462), 'numpy.swapaxes', 'np.swapaxes', (['res', '(0)', '(1)'], {}), '(res, 0, 1)\n', (1451, 1462), True, 'import numpy as np\n'), ((1337, 1357), 'numpy.argmax', 'np.argmax', (['Q'], {'axis': '(0)'}), '(Q, axis=0)\n', (1346, 1357), True, 'import numpy as np\n')] |
import os
import numpy
import scipy
import scipy.optimize
from cryspy.A_functions_base.symmetry_elements import \
calc_asymmetric_unit_cell_indexes
from cryspy.A_functions_base.mempy import \
calc_mem_col, \
calc_mem_chi, \
calc_symm_elem_points_by_index_points, \
get_uniform_density_col, \
renormailize_density_col, \
save_spin_density_into_file,\
form_basins,\
calc_point_susceptibility, \
get_uniform_density_chi,\
renormailize_density_chi, \
calc_model_value_by_precalculated_data, \
calc_chi_atoms
from cryspy.A_functions_base.unit_cell import \
calc_volume_uc_by_unit_cell_parameters, \
calc_sthovl_by_unit_cell_parameters, \
calc_eq_ccs_by_unit_cell_parameters
from cryspy.A_functions_base.structure_factor import \
calc_f_nucl_by_dictionary
from cryspy.A_functions_base.flip_ratio import \
calc_iint, calc_flip_ratio_by_iint, \
calc_asymmetry_by_iint
from cryspy.A_functions_base.extinction import \
calc_extinction_sphere
from cryspy.A_functions_base.orbital_functions import \
calc_density_spherical
from cryspy.A_functions_base.matrix_operations import \
calc_vv_as_v1_v2_v1
from cryspy.A_functions_base.function_1_error_simplex import \
error_estimation_simplex
def mempy_reconstruction_by_dictionary(dict_crystal, dict_mem_parameters, l_dict_diffrn, dict_in_out,
parameter_lambda:float=1.e-5, iteration_max:int=1000, parameter_lambda_min:float=1.e-9, delta_density:float=1.e-5):
# **Input information about mem parameters**
print("*******************************************")
print("MEM reconstruction by CrysPy (module MEMPy)")
print("*******************************************\n")
print("MEM iteration parameters")
print("------------------------")
print(f" starting lambda parameter: {parameter_lambda*1e6:.3f}*10^-6")
print(f" maximal number of iterations: {iteration_max:}")
print(f" minimal lambda parameter: {parameter_lambda_min*1e6:}*10^-6")
print(f" delta_density: {delta_density*1e5:}*10^-5\n")
dict_in_out_keys = dict_in_out.keys()
print("Density reconstruction")
print("----------------------")
n_abc = dict_mem_parameters["points_abc"]
print(f"Unit cell is devided on points {n_abc[0]:} x {n_abc[1]:} x {n_abc[2]:}.")
channel_plus_minus = dict_mem_parameters["channel_plus_minus"]
channel_chi = dict_mem_parameters["channel_chi"]
if channel_plus_minus:
magnetization_plus = dict_mem_parameters["magnetization_plus"]
magnetization_minus = dict_mem_parameters["magnetization_minus"]
file_spin_density = dict_mem_parameters["file_spin_density"]
dict_in_out["magnetization_plus"] = magnetization_plus
dict_in_out["magnetization_minus"] = magnetization_minus
if channel_chi:
flag_uniform_prior_density = dict_mem_parameters["flag_uniform_prior_density"]
flag_only_magnetic_basins = dict_mem_parameters["flag_only_magnetic_basins"]
file_magnetization_density = dict_mem_parameters["file_magnetization_density"]
flag_asymmetry = dict_mem_parameters["flag_asymmetry"]
gof_desired = dict_mem_parameters["gof_desired"]
# **Input information about crystal**
unit_cell_parameters = dict_crystal["unit_cell_parameters"]
full_symm_elems = dict_crystal["full_symm_elems"]
volume_unit_cell = calc_volume_uc_by_unit_cell_parameters(unit_cell_parameters, flag_unit_cell_parameters=False)[0]
reduced_symm_elems = dict_crystal["reduced_symm_elems"]
centrosymmetry = dict_crystal["centrosymmetry"]
if centrosymmetry:
centrosymmetry_position = dict_crystal["centrosymmetry_position"]
else:
centrosymmetry_position = None
translation_elems = dict_crystal["translation_elems"]
atom_label = dict_crystal["atom_label"]
atom_fract_xyz = dict_crystal["atom_fract_xyz"]
atom_multiplicity = dict_crystal["atom_multiplicity"]
if channel_chi:
atom_para_label = dict_crystal["atom_para_label"]
atom_para_susceptibility = dict_crystal["atom_para_susceptibility"]
atom_para_sc_chi = dict_crystal["atom_para_sc_chi"]
# **Index in asymmetric unit cell**
print("Calculation of asymmetric unit cell...", end="\r")
index_auc, point_multiplicity = calc_asymmetric_unit_cell_indexes(n_abc, full_symm_elems)
symm_elem_auc = calc_symm_elem_points_by_index_points(index_auc, n_abc)
print(f"Number of points in asymmetric unit cell is {index_auc.shape[1]:}.", end="\n")
# **Basin devision**
if channel_chi and flag_only_magnetic_basins:
print("Devision of asymmetric unit cell on bassins...", end="\r")
flag_atom_para = numpy.any(numpy.expand_dims(atom_label, axis=1) == numpy.expand_dims(atom_para_label, axis=0), axis=1)
flag_chi, atom_label_auc_chi, atom_multiplicity_auc_chi, atom_distance_auc_chi, atom_symm_elems_auc_chi = \
form_basins(symm_elem_auc, full_symm_elems, unit_cell_parameters, atom_label[flag_atom_para],
atom_fract_xyz[:,flag_atom_para], atom_multiplicity[flag_atom_para], atom_para_label)
dict_in_out["atom_multiplicity_channel_chi"] = atom_multiplicity_auc_chi
print(f"Magnetic basins occupy entire unit cell. \n(flag_only_magnetic_basins: {flag_only_magnetic_basins:})\n")
elif channel_chi:
print("Devision of asymmetric unit cell on bassins...", end="\r")
flag_chi, atom_label_auc_chi, atom_multiplicity_auc_chi, atom_distance_auc_chi, atom_symm_elems_auc_chi = \
form_basins(symm_elem_auc, full_symm_elems, unit_cell_parameters, atom_label,
atom_fract_xyz, atom_multiplicity, atom_para_label)
dict_in_out["atom_multiplicity_channel_chi"] = atom_multiplicity_auc_chi
print(f"Magnetic basins occupy area around magnetic atoms. \n(flag_only_magnetic_basins: {flag_only_magnetic_basins:})\n")
if channel_chi:
index_auc_chi = index_auc[:, flag_chi]
point_multiplicity_chi = point_multiplicity[flag_chi]
dict_in_out["point_multiplicity_channel_chi"] = point_multiplicity_chi
symm_elem_auc_chi = symm_elem_auc[:, flag_chi]
dict_in_out["symm_elem_channel_chi"] = symm_elem_auc_chi
if channel_plus_minus and channel_chi:
flag_col = numpy.logical_not(flag_chi)
index_auc_col = index_auc[:, flag_col]
point_multiplicity_col = point_multiplicity[flag_col]
symm_elem_auc_col = symm_elem_auc[:, flag_col]
dict_in_out["point_multiplicity_channel_plus_minus"] = point_multiplicity_col
dict_in_out["symm_elem_channel_plus_minus"] = symm_elem_auc_col
elif channel_plus_minus:
index_auc_col = numpy.copy(index_auc)
point_multiplicity_col = numpy.copy(point_multiplicity)
symm_elem_auc_col = numpy.copy(symm_elem_auc)
dict_in_out["point_multiplicity_channel_plus_minus"] = point_multiplicity_col
dict_in_out["symm_elem_channel_plus_minus"] = symm_elem_auc_col
print(f"channel_plus_minus: {channel_plus_minus:}")
print(f"channel_chi: {channel_chi:}\n")
if channel_plus_minus:
print(f"Magnetization of unit cell: {magnetization_plus+magnetization_minus:.3f} mu_B")
print(f"(positive channel {magnetization_plus:.3f} mu_B, negative channel {magnetization_minus:.3f} mu_B)")
print(f"\nNumber of density points for channel_plus_minus is {index_auc_col.shape[1]}.")
if channel_chi:
print(f"Number of density points for channel_chi is {index_auc_chi.shape[1]}.")
# **Susceptibility tensor $(3\times 3)$ for each point in magnetic basin**
if channel_chi:
print("Calculation of restriction on susceptibility...", end="\r")
point_susceptibility = calc_point_susceptibility(
unit_cell_parameters, atom_symm_elems_auc_chi, atom_label_auc_chi,
atom_para_label, atom_para_susceptibility, atom_para_sc_chi, full_symm_elems, symm_elem_auc_chi)
dict_in_out["susceptibility_channel_chi"] = point_susceptibility
print(80*" ", end="\r")
# **Prior density**
number_unit_cell = numpy.prod(n_abc)
print("\nCalculation of prior density... ", end="\r")
if channel_chi:
if flag_uniform_prior_density:
density_chi_prior = get_uniform_density_chi(point_multiplicity_chi, atom_label_auc_chi, atom_multiplicity_auc_chi, volume_unit_cell, number_unit_cell)
print("Prior density in channel chi is uniform. ")
else:
density_chi_prior = numpy.zeros_like(atom_distance_auc_chi)
for label in atom_para_label:
flag_atom = atom_label_auc_chi==label
dict_shell = dict_crystal[f"shell_{label:}"]
kappa = float(dict_crystal["mag_atom_kappa"][dict_crystal["mag_atom_label"] == label])
den_atom = calc_density_spherical(
atom_distance_auc_chi[flag_atom], dict_shell["core_population"], dict_shell["core_coeff"], dict_shell["core_zeta"],
dict_shell["core_n"], kappa)
density_chi_prior[flag_atom] = den_atom
density_chi_prior = renormailize_density_chi(density_chi_prior, point_multiplicity_chi, atom_label_auc_chi, atom_multiplicity_auc_chi, volume_unit_cell, number_unit_cell)
print("Prior density in channel chi is core. ")
if channel_plus_minus:
density_col_prior = get_uniform_density_col(point_multiplicity_col, volume_unit_cell, number_unit_cell)
print("Prior density in channel plus-minus is uniform. ")
# **Input information about experiments**
flag_use_precalculated_data = False
l_exp_value_sigma = []
l_mem_chi, l_mem_col = [], []
print(f"Number of experiments is {len(l_dict_diffrn):}. ")
for dict_diffrn in l_dict_diffrn:
if "dict_in_out_"+dict_diffrn["type_name"] in dict_in_out_keys:
diffrn_dict_in_out = dict_in_out["dict_in_out_"+dict_diffrn["type_name"]]
else:
diffrn_dict_in_out = {}
dict_in_out["dict_in_out_"+dict_diffrn["type_name"]] = diffrn_dict_in_out
index_hkl = dict_diffrn["index_hkl"]
h_ccs = dict_diffrn["magnetic_field"]
eh_ccs = dict_diffrn["matrix_u"][6:]
print(f"Preliminary calculation for experiment {dict_diffrn['name']:}...", end="\r")
diffrn_dict_in_out["index_hkl"] = index_hkl
diffrn_dict_in_out_keys = diffrn_dict_in_out.keys()
if channel_plus_minus:
if "dict_in_out_col" in diffrn_dict_in_out_keys:
dict_in_out_col = diffrn_dict_in_out["dict_in_out_col"]
else:
dict_in_out_col = {}
diffrn_dict_in_out["dict_in_out_col"] = dict_in_out_col
mem_col = calc_mem_col(
index_hkl, unit_cell_parameters, eh_ccs, full_symm_elems, symm_elem_auc_col,
volume_unit_cell, number_unit_cell,
point_multiplicity=point_multiplicity_col,
dict_in_out=dict_in_out_col, flag_use_precalculated_data=flag_use_precalculated_data)
diffrn_dict_in_out["mem_col"] = mem_col
l_mem_col.append(mem_col)
if channel_chi:
if "dict_in_out_chi" in diffrn_dict_in_out_keys:
dict_in_out_chi = diffrn_dict_in_out["dict_in_out_chi"]
else:
dict_in_out_chi = {}
diffrn_dict_in_out["dict_in_out_chi"] = dict_in_out_chi
mem_chi = calc_mem_chi(
index_hkl, unit_cell_parameters, h_ccs, full_symm_elems, symm_elem_auc_chi,
point_susceptibility, volume_unit_cell, number_unit_cell,
point_multiplicity=point_multiplicity_chi,
dict_in_out=dict_in_out_chi, flag_use_precalculated_data=flag_use_precalculated_data)
diffrn_dict_in_out["mem_chi"] = mem_chi
l_mem_chi.append(mem_chi)
f_nucl, dder = calc_f_nucl_by_dictionary(
dict_crystal, diffrn_dict_in_out, flag_use_precalculated_data=flag_use_precalculated_data)
diffrn_dict_in_out["f_nucl"] = f_nucl
flip_ratio_es = dict_diffrn["flip_ratio_es"]
if flag_asymmetry:
asymmetry_e = (flip_ratio_es[0] -1.)/(flip_ratio_es[0] + 1.)
asymmetry_s = numpy.sqrt(2.)*flip_ratio_es[1] * numpy.sqrt(numpy.square(flip_ratio_es[0]) + 1.)/numpy.square(flip_ratio_es[0] + 1.)
asymmetry_es = numpy.stack([asymmetry_e, asymmetry_s], axis=0)
l_exp_value_sigma.append(asymmetry_es)
else:
l_exp_value_sigma.append(flip_ratio_es)
exp_value_sigma = numpy.concatenate(l_exp_value_sigma, axis=1)
if channel_plus_minus:
mem_col = numpy.concatenate(l_mem_col, axis=1)
if channel_chi:
mem_chi = numpy.concatenate(l_mem_chi, axis=1)
print(f"Total number of reflections is {exp_value_sigma.shape[1]: }. ")
if flag_asymmetry:
print("Density reconstruction is based on asymmetry parameters.")
else:
print("Density reconstruction is based on flip ratios. ")
# **Preaparation to MEM itertion procedure**
if channel_plus_minus:
density_col = numpy.copy(density_col_prior)
density_col_next = numpy.copy(density_col_prior)
if channel_chi:
density_chi = numpy.copy(density_chi_prior)
density_chi_next = numpy.copy(density_chi_prior)
# **MEM iteration**
print("\nMEM iteration procedure")
print("-----------------------")
print(f"Desired GoF is {gof_desired:.2f}.")
c_desired = gof_desired
c_previous = numpy.inf
if channel_plus_minus:
der_c_den_col_previous = numpy.zeros_like(density_col_prior)
if channel_chi:
der_c_den_chi_previous = numpy.zeros_like(density_chi_prior)
iteration = 0
flag_next = True
while flag_next:
iteration += 1
if channel_plus_minus:
density_col = numpy.copy(density_col_next)
if channel_chi:
density_chi = numpy.copy(density_chi_next)
l_model_value = []
l_der_model_den_pm, l_der_model_den_chi = [], []
for dict_diffrn in l_dict_diffrn:
diffrn_dict_in_out = dict_in_out["dict_in_out_"+dict_diffrn['type_name']]
index_hkl = diffrn_dict_in_out["index_hkl"]
f_m_perp = numpy.zeros(index_hkl.shape, dtype=complex)
if channel_plus_minus:
mem_col_exp = diffrn_dict_in_out["mem_col"]
hh = numpy.expand_dims(numpy.expand_dims(magnetization_plus * density_col[0] + magnetization_minus * density_col[1], axis=0), axis=1)
f_m_perp_col = (hh*mem_col_exp).sum(axis=2)
f_m_perp += f_m_perp_col
if channel_chi:
mem_chi_exp = diffrn_dict_in_out["mem_chi"]
f_m_perp_chi = (density_chi*mem_chi_exp).sum(axis=2)
f_m_perp += f_m_perp_chi
beam_polarization = dict_diffrn["beam_polarization"]
flipper_efficiency = dict_diffrn["flipper_efficiency"]
matrix_u = dict_diffrn["matrix_u"]
flip_ratio_es = dict_diffrn["flip_ratio_es"]
f_nucl = diffrn_dict_in_out["f_nucl"]
wavelength = dict_diffrn["wavelength"]
sthovl = calc_sthovl_by_unit_cell_parameters(index_hkl, unit_cell_parameters, flag_unit_cell_parameters=False)[0]
cos_2theta = numpy.cos(2*numpy.arcsin(sthovl*wavelength))
extinction_model = dict_diffrn["extinction_model"]
extinction_radius = dict_diffrn["extinction_radius"]
extinction_mosaicity = dict_diffrn["extinction_mosaicity"]
func_extinction = lambda f_sq, flag_f_sq: calc_extinction_sphere(
f_sq, extinction_radius, extinction_mosaicity, volume_unit_cell, cos_2theta, wavelength,
extinction_model, flag_f_sq=False, flag_radius=False,
flag_mosaicity=False,
flag_volume_unit_cell=False,
flag_cos_2theta=False,
flag_wavelength=False)
iint_plus, iint_minus, dder_plus, dder_minus = calc_iint(
beam_polarization, flipper_efficiency, f_nucl, f_m_perp, matrix_u, func_extinction = func_extinction,
flag_beam_polarization = False, flag_flipper_efficiency = False,
flag_f_nucl = False, flag_f_m_perp = True,
dict_in_out = dict_in_out, flag_use_precalculated_data = flag_use_precalculated_data)
diffrn_dict_in_out["flip_ratio"] = iint_plus/iint_minus
der_int_plus_fm_perp_real = dder_plus["f_m_perp_real"]
der_int_plus_fm_perp_imag = dder_plus["f_m_perp_imag"]
der_int_minus_fm_perp_real = dder_minus["f_m_perp_real"]
der_int_minus_fm_perp_imag = dder_minus["f_m_perp_imag"]
if flag_asymmetry:
model_exp, dder_model_exp = calc_asymmetry_by_iint(
iint_plus, iint_minus, c_lambda2=None, iint_2hkl=None,
flag_iint_plus=True, flag_iint_minus=True,
flag_c_lambda2=False, flag_iint_2hkl=False)
else:
model_exp, dder_model_exp = calc_flip_ratio_by_iint(
iint_plus, iint_minus, c_lambda2=None, iint_2hkl=None,
flag_iint_plus=True, flag_iint_minus=True,
flag_c_lambda2=False, flag_iint_2hkl=False)
l_model_value.append(model_exp)
der_model_int_plus = numpy.expand_dims(dder_model_exp["iint_plus"], axis=0)
der_model_int_minus = numpy.expand_dims(dder_model_exp["iint_minus"], axis=0)
if channel_plus_minus:
der_model_den_pm_exp = (
(mem_col_exp.real*numpy.expand_dims(
der_model_int_plus*der_int_plus_fm_perp_real +
der_model_int_minus*der_int_minus_fm_perp_real, axis=2)
).sum(axis=0) +
(mem_col_exp.imag*numpy.expand_dims(
der_model_int_plus*der_int_plus_fm_perp_imag +
der_model_int_minus*der_int_minus_fm_perp_imag, axis=2)
).sum(axis=0))
l_der_model_den_pm.append(der_model_den_pm_exp)
if channel_chi:
der_model_den_chi_exp = (
(mem_chi_exp.real*numpy.expand_dims(
der_model_int_plus*der_int_plus_fm_perp_real +
der_model_int_minus*der_int_minus_fm_perp_real, axis=2)
).sum(axis=0) +
(mem_chi_exp.imag*numpy.expand_dims(
der_model_int_plus*der_int_plus_fm_perp_imag +
der_model_int_minus*der_int_minus_fm_perp_imag, axis=2)
).sum(axis=0))
l_der_model_den_chi.append(der_model_den_chi_exp)
model_value = numpy.concatenate(l_model_value, axis=0)
diff_value = (exp_value_sigma[0]-model_value)/exp_value_sigma[1]
c = numpy.square(diff_value).sum(axis=0)/diff_value.shape[0]
if channel_plus_minus:
der_model_den_pm = numpy.concatenate(l_der_model_den_pm, axis=0)
der_c_den_pm = (-2.)/diff_value.shape[0] * (
numpy.expand_dims((diff_value/exp_value_sigma[1]),axis=1) *
der_model_den_pm).sum(axis=0)
der_c_den_col = numpy.stack([magnetization_plus * der_c_den_pm, magnetization_minus * der_c_den_pm], axis=0)
if channel_chi:
der_model_den_chi = numpy.concatenate(l_der_model_den_chi, axis=0)
der_c_den_chi = (-2.)/diff_value.shape[0] * (
numpy.expand_dims((diff_value/exp_value_sigma[1]),axis=1) *
der_model_den_chi).sum(axis=0)
if c > c_previous:
parameter_lambda = 0.5 * parameter_lambda
c = c_previous
if channel_plus_minus:
density_col = numpy.copy(density_col_previous)
der_c_den_col = der_c_den_col_previous
if channel_chi:
density_chi = numpy.copy(density_chi_previous)
der_c_den_chi = der_c_den_chi_previous
else:
c_previous = c
parameter_lambda = 1.03 * parameter_lambda
if channel_plus_minus:
density_col_previous = numpy.copy(density_col)
der_c_den_col_previous = der_c_den_col
if channel_chi:
density_chi_previous = numpy.copy(density_chi)
der_c_den_chi_previous = der_c_den_chi
print(f"Iteration {iteration:5}, lambda {parameter_lambda*1e6:.3f}*10^-6, chi_sq: {c:.2f} ", end='\r')
if channel_plus_minus:
coeff = (parameter_lambda*number_unit_cell/(c_desired*volume_unit_cell))/point_multiplicity_col
hh = (density_col+delta_density)*numpy.exp(-coeff*der_c_den_col)-delta_density
hh = numpy.where(hh>0, hh, 0)
density_col_next = renormailize_density_col(hh, point_multiplicity_col, volume_unit_cell, number_unit_cell)
if channel_chi:
coeff = (parameter_lambda*number_unit_cell/(c_desired*volume_unit_cell))*atom_multiplicity_auc_chi/point_multiplicity_chi
hh = (density_chi+delta_density)*numpy.exp(-coeff*der_c_den_chi)-delta_density
hh = numpy.where(hh>0, hh, 0)
density_chi_next = renormailize_density_chi(hh, point_multiplicity_chi, atom_label_auc_chi, atom_multiplicity_auc_chi, volume_unit_cell, number_unit_cell)
if iteration >= iteration_max:
flag_next = False
print(f"Maximal number of iteration is reached ({iteration:}). ", end='\n')
if parameter_lambda < parameter_lambda_min:
flag_next = False
print(f"Minimal value of parameter lambda {parameter_lambda*1e6:.3f}*10^-6 is reached at iteration {iteration:}. ", end='\n')
if c <= c_desired:
flag_next = False
print(f"Desired value is reached at iteration {iteration:}. ", end='\n')
c_best = c_previous
print(f"Chi_sq best is {c_best:.2f}")
if channel_plus_minus:
density_col_best = numpy.copy(density_col_previous)
dict_in_out["density_channel_plus_minus"] = density_col_best
if channel_chi:
density_chi_best = numpy.copy(density_chi_previous)
dict_in_out["density_channel_chi"] = density_chi
# **Save to .den file**
if channel_plus_minus and (file_spin_density is not None):
spin_density = density_col_best * numpy.array([[magnetization_plus, ], [magnetization_minus, ]], dtype=float)
save_spin_density_into_file(file_spin_density, index_auc_col, spin_density, n_abc, unit_cell_parameters,
reduced_symm_elems, translation_elems, centrosymmetry, centrosymmetry_position)
print(f"\nReconstructed spin density is written in file '{file_spin_density:}'.")
if channel_chi and (file_magnetization_density is not None):
spin_density = numpy.stack([density_chi_best, numpy.zeros_like(density_chi_best)], axis=0)
save_spin_density_into_file(file_magnetization_density, index_auc_chi, spin_density, n_abc, unit_cell_parameters,
reduced_symm_elems, translation_elems, centrosymmetry, centrosymmetry_position)
print(f"\nReconstructed magnetization density is written in file '{file_magnetization_density:}'.")
def mempy_susceptibility_refinement(dict_channel_chi, dict_crystal, dict_mem_parameters, l_dict_diffrn, dict_in_out):
print("****************************************")
print("Susceptibility refinement (module MEMPy)")
print("****************************************")
number_points = numpy.prod(dict_mem_parameters["points_abc"])
flag_asymmetry = dict_mem_parameters["flag_asymmetry"]
channel_plus_minus = dict_mem_parameters["channel_plus_minus"]
channel_chi = dict_mem_parameters["channel_chi"]
print(f"Channel plus/minus is {channel_plus_minus:}")
print("ATTENTION: Channel plus/minus is not taken into account.")
print(f"Channel chi is {channel_chi:}")
print(f"Flag asymmetry is {flag_asymmetry:}")
if channel_plus_minus:
magnetization_plus = dict_mem_parameters["magnetization_plus"]
magnetization_minus = dict_mem_parameters["magnetization_minus"]
symm_elem_channel_chi = dict_channel_chi["symm_elem_channel_chi"]
atom_multiplicity_channel_chi = dict_channel_chi["atom_multiplicity_channel_chi"]
density_channel_chi = dict_channel_chi["density_channel_chi"]
point_multiplicity_channel_chi = dict_channel_chi["point_multiplicity_channel_chi"]
unit_cell_parameters = dict_crystal["unit_cell_parameters"]
full_symm_elems = dict_crystal["full_symm_elems"]
atom_fract_xyz = dict_crystal["atom_fract_xyz"]
atom_para_sc_chi = dict_crystal["atom_para_sc_chi"]
atom_para_index = dict_crystal["atom_para_index"]
atom_para_label = dict_crystal["atom_para_label"]
atom_para_susceptibility = dict_crystal["atom_para_susceptibility"]
flags_atom_para_susceptibility = dict_crystal["flags_atom_para_susceptibility"]
print(f"Number of refined parameters is {flags_atom_para_susceptibility.sum():}.")
if flags_atom_para_susceptibility.sum() == 0:
print("There is no refined susceptibility parameters.")
return
atom_para_fract_xyz = atom_fract_xyz[:, atom_para_index]
n_atom_para = atom_para_susceptibility.shape[1]
print("Preliminary calculations of chi atoms ...", end="\r")
l_exp_value_sigma = []
for dict_diffrn in l_dict_diffrn:
flag_use_precalculated_data = False
index_hkl = dict_diffrn["index_hkl"]
diffrn_dict_in_out = {"index_hkl": index_hkl}
chi_atoms = calc_chi_atoms(
unit_cell_parameters, number_points, full_symm_elems,
index_hkl, atom_para_fract_xyz, atom_para_sc_chi,
symm_elem_channel_chi, point_multiplicity_channel_chi, density_channel_chi)
diffrn_dict_in_out["chi_atoms"] = chi_atoms
eq_ccs, dder = calc_eq_ccs_by_unit_cell_parameters(index_hkl, unit_cell_parameters)
vp, dder = calc_vv_as_v1_v2_v1(eq_ccs)
diffrn_dict_in_out["vp"] = vp
f_nucl, dder = calc_f_nucl_by_dictionary(
dict_crystal, diffrn_dict_in_out, flag_use_precalculated_data=flag_use_precalculated_data)
diffrn_dict_in_out["f_nucl"] = f_nucl
dict_in_out["dict_in_out_"+dict_diffrn['type_name']] = diffrn_dict_in_out
flip_ratio_es = dict_diffrn["flip_ratio_es"]
if flag_asymmetry:
asymmetry_e = (flip_ratio_es[0] -1.)/(flip_ratio_es[0] + 1.)
asymmetry_s = numpy.sqrt(2.)*flip_ratio_es[1] * numpy.sqrt(numpy.square(flip_ratio_es[0]) + 1.)/numpy.square(flip_ratio_es[0] + 1.)
asymmetry_es = numpy.stack([asymmetry_e, asymmetry_s], axis=0)
l_exp_value_sigma.append(asymmetry_es)
else:
l_exp_value_sigma.append(flip_ratio_es)
exp_value_sigma = numpy.concatenate(l_exp_value_sigma, axis=1)
def calc_chi_sq(param):
atom_para_susceptibility[flags_atom_para_susceptibility] = param
model_value = calc_model_value_by_precalculated_data(atom_para_susceptibility, unit_cell_parameters, flag_asymmetry, dict_in_out, l_dict_diffrn)
chi_sq = numpy.square((model_value-exp_value_sigma[0])/exp_value_sigma[1]).sum()
return chi_sq
param_0 = atom_para_susceptibility[flags_atom_para_susceptibility]
chi_sq_per_n = calc_chi_sq(param_0)/exp_value_sigma.shape[1]
print(70*" ")
print("Before susceptibility refinement")
print("Susceptibility tensor:")
for ind_at, label in enumerate(atom_para_label):
print(f"{label:5} {atom_para_susceptibility[0, ind_at]:.5f} {atom_para_susceptibility[1, ind_at]:.5f} {atom_para_susceptibility[2, ind_at]:.5f} {atom_para_susceptibility[3, ind_at]:.5f} {atom_para_susceptibility[4, ind_at]:.5f} {atom_para_susceptibility[5, ind_at]:.5f}")
print(f"chi_sq_per_n is {chi_sq_per_n:.2f}.")
print("Minimization procedure ...", end="\r")
res = scipy.optimize.minimize(calc_chi_sq, param_0, method="Nelder-Mead")
apss = None
if "hess_inv" in res.keys():
hess_inv = res["hess_inv"]
dict_in_out["hess_inv"] = hess_inv
sigma_p = numpy.sqrt(numpy.abs(numpy.diag(hess_inv)))
atom_para_susceptibility_sigma = numpy.zeros_like(atom_para_susceptibility)
atom_para_susceptibility_sigma[flags_atom_para_susceptibility] = sigma_p
apss = (atom_para_sc_chi * numpy.expand_dims(atom_para_susceptibility_sigma, axis=0)).sum(axis=1)
dict_in_out["atom_para_susceptibility_sigma"] = apss
elif "final_simplex" in res.keys():
n = exp_value_sigma.shape[1]
m_error, dist_hh = error_estimation_simplex(
res["final_simplex"][0], res["final_simplex"][1], calc_chi_sq)
l_sigma = []
for i, val_2 in zip(range(m_error.shape[0]), dist_hh):
# slightly change definition, instead of (n-k) here is n
error = (abs(m_error[i, i])*1./n)**0.5
if m_error[i, i] < 0.:
pass
# warn("Negative diagonal elements of Hessian.", UserWarning)
if val_2 > error:
pass
# warn("Minimum is not found.", UserWarning)
l_sigma.append(max(error, val_2))
sigma_p = numpy.array(l_sigma)
atom_para_susceptibility_sigma = numpy.zeros_like(atom_para_susceptibility)
atom_para_susceptibility_sigma[flags_atom_para_susceptibility] = sigma_p
apss = (atom_para_sc_chi * numpy.expand_dims(atom_para_susceptibility_sigma, axis=0)).sum(axis=1)
dict_in_out["atom_para_susceptibility_sigma"] = apss
print(sigma_p)
print(70*" ")
chi_sq_per_n = calc_chi_sq(res.x)/exp_value_sigma.shape[1]
atom_para_susceptibility[flags_atom_para_susceptibility] = res.x
atom_para_susceptibility = (atom_para_sc_chi * numpy.expand_dims(atom_para_susceptibility, axis=0)).sum(axis=1)
dict_crystal["atom_para_susceptibility"] = atom_para_susceptibility
print("After susceptibility refinement")
print("Susceptibility tensor:")
for ind_at, label in enumerate(atom_para_label):
print(f"{label:5} {atom_para_susceptibility[0, ind_at]:8.5f} {atom_para_susceptibility[1, ind_at]:8.5f} {atom_para_susceptibility[2, ind_at]:8.5f} {atom_para_susceptibility[3, ind_at]:8.5f} {atom_para_susceptibility[4, ind_at]:8.5f} {atom_para_susceptibility[5, ind_at]:8.5f}")
if apss is not None:
print(f"sigma {apss[0, ind_at]:8.5f} {apss[1, ind_at]:8.5f} {apss[2, ind_at]:8.5f} {apss[3, ind_at]:8.5f} {apss[4, ind_at]:8.5f} {apss[5, ind_at]:8.5f}")
print(f"chi_sq_per_n is {chi_sq_per_n:.2f}.")
print(70*"*")
print("End of MEMPy procedure for susceptibility refinement")
print(70*"*")
return
def mempy_cycle_density_susceptibility(dict_crystal, dict_mem_parameters, l_dict_diffrn, dict_in_out,
parameter_lambda:float=1.e-5, iteration_max:int=1000, parameter_lambda_min:float=1.e-9, delta_density:float=1.e-5, n_cycle:int=10):
print(70*"*")
print("MEMPy: cycle iteration")
print(70*"*")
print(f"Number of cycles is {n_cycle:}")
print(70*" ")
for i_cycle in range(n_cycle):
print(f"Cycle {i_cycle+1:}")
print(len(f"Cycle {i_cycle+1:}")*"-")
dict_in_out_den = {}
mempy_reconstruction_by_dictionary(dict_crystal, dict_mem_parameters, l_dict_diffrn, dict_in_out_den,
parameter_lambda=parameter_lambda, iteration_max=iteration_max, parameter_lambda_min=parameter_lambda_min, delta_density=delta_density)
dict_channel_chi = {
'atom_multiplicity_channel_chi': dict_in_out_den['atom_multiplicity_channel_chi'],
'point_multiplicity_channel_chi': dict_in_out_den['point_multiplicity_channel_chi'],
'symm_elem_channel_chi': dict_in_out_den['symm_elem_channel_chi'],
'susceptibility_channel_chi': dict_in_out_den['susceptibility_channel_chi'],
'density_channel_chi': dict_in_out_den['density_channel_chi'],
}
dict_in_out_susc = {}
mempy_susceptibility_refinement(dict_channel_chi, dict_crystal, dict_mem_parameters, l_dict_diffrn, dict_in_out_susc)
print(70*" ")
dict_in_out["dict_in_out_den"] = dict_in_out_den
dict_in_out["dict_in_out_susc"] = dict_in_out_susc
return | [
"numpy.prod",
"cryspy.A_functions_base.orbital_functions.calc_density_spherical",
"cryspy.A_functions_base.flip_ratio.calc_flip_ratio_by_iint",
"numpy.sqrt",
"cryspy.A_functions_base.matrix_operations.calc_vv_as_v1_v2_v1",
"numpy.logical_not",
"numpy.array",
"cryspy.A_functions_base.flip_ratio.calc_as... | [((4346, 4403), 'cryspy.A_functions_base.symmetry_elements.calc_asymmetric_unit_cell_indexes', 'calc_asymmetric_unit_cell_indexes', (['n_abc', 'full_symm_elems'], {}), '(n_abc, full_symm_elems)\n', (4379, 4403), False, 'from cryspy.A_functions_base.symmetry_elements import calc_asymmetric_unit_cell_indexes\n'), ((4424, 4479), 'cryspy.A_functions_base.mempy.calc_symm_elem_points_by_index_points', 'calc_symm_elem_points_by_index_points', (['index_auc', 'n_abc'], {}), '(index_auc, n_abc)\n', (4461, 4479), False, 'from cryspy.A_functions_base.mempy import calc_mem_col, calc_mem_chi, calc_symm_elem_points_by_index_points, get_uniform_density_col, renormailize_density_col, save_spin_density_into_file, form_basins, calc_point_susceptibility, get_uniform_density_chi, renormailize_density_chi, calc_model_value_by_precalculated_data, calc_chi_atoms\n'), ((8220, 8237), 'numpy.prod', 'numpy.prod', (['n_abc'], {}), '(n_abc)\n', (8230, 8237), False, 'import numpy\n'), ((12814, 12858), 'numpy.concatenate', 'numpy.concatenate', (['l_exp_value_sigma'], {'axis': '(1)'}), '(l_exp_value_sigma, axis=1)\n', (12831, 12858), False, 'import numpy\n'), ((24009, 24054), 'numpy.prod', 'numpy.prod', (["dict_mem_parameters['points_abc']"], {}), "(dict_mem_parameters['points_abc'])\n", (24019, 24054), False, 'import numpy\n'), ((27374, 27418), 'numpy.concatenate', 'numpy.concatenate', (['l_exp_value_sigma'], {'axis': '(1)'}), '(l_exp_value_sigma, axis=1)\n', (27391, 27418), False, 'import numpy\n'), ((28490, 28557), 'scipy.optimize.minimize', 'scipy.optimize.minimize', (['calc_chi_sq', 'param_0'], {'method': '"""Nelder-Mead"""'}), "(calc_chi_sq, param_0, method='Nelder-Mead')\n", (28513, 28557), False, 'import scipy\n'), ((3423, 3520), 'cryspy.A_functions_base.unit_cell.calc_volume_uc_by_unit_cell_parameters', 'calc_volume_uc_by_unit_cell_parameters', (['unit_cell_parameters'], {'flag_unit_cell_parameters': '(False)'}), '(unit_cell_parameters,\n flag_unit_cell_parameters=False)\n', (3461, 3520), False, 'from cryspy.A_functions_base.unit_cell import calc_volume_uc_by_unit_cell_parameters, calc_sthovl_by_unit_cell_parameters, calc_eq_ccs_by_unit_cell_parameters\n'), ((4978, 5166), 'cryspy.A_functions_base.mempy.form_basins', 'form_basins', (['symm_elem_auc', 'full_symm_elems', 'unit_cell_parameters', 'atom_label[flag_atom_para]', 'atom_fract_xyz[:, flag_atom_para]', 'atom_multiplicity[flag_atom_para]', 'atom_para_label'], {}), '(symm_elem_auc, full_symm_elems, unit_cell_parameters,\n atom_label[flag_atom_para], atom_fract_xyz[:, flag_atom_para],\n atom_multiplicity[flag_atom_para], atom_para_label)\n', (4989, 5166), False, 'from cryspy.A_functions_base.mempy import calc_mem_col, calc_mem_chi, calc_symm_elem_points_by_index_points, get_uniform_density_col, renormailize_density_col, save_spin_density_into_file, form_basins, calc_point_susceptibility, get_uniform_density_chi, renormailize_density_chi, calc_model_value_by_precalculated_data, calc_chi_atoms\n'), ((6388, 6415), 'numpy.logical_not', 'numpy.logical_not', (['flag_chi'], {}), '(flag_chi)\n', (6405, 6415), False, 'import numpy\n'), ((7852, 8049), 'cryspy.A_functions_base.mempy.calc_point_susceptibility', 'calc_point_susceptibility', (['unit_cell_parameters', 'atom_symm_elems_auc_chi', 'atom_label_auc_chi', 'atom_para_label', 'atom_para_susceptibility', 'atom_para_sc_chi', 'full_symm_elems', 'symm_elem_auc_chi'], {}), '(unit_cell_parameters, atom_symm_elems_auc_chi,\n atom_label_auc_chi, atom_para_label, atom_para_susceptibility,\n atom_para_sc_chi, full_symm_elems, symm_elem_auc_chi)\n', (7877, 8049), False, 'from cryspy.A_functions_base.mempy import calc_mem_col, calc_mem_chi, calc_symm_elem_points_by_index_points, get_uniform_density_col, renormailize_density_col, save_spin_density_into_file, form_basins, calc_point_susceptibility, get_uniform_density_chi, renormailize_density_chi, calc_model_value_by_precalculated_data, calc_chi_atoms\n'), ((9544, 9631), 'cryspy.A_functions_base.mempy.get_uniform_density_col', 'get_uniform_density_col', (['point_multiplicity_col', 'volume_unit_cell', 'number_unit_cell'], {}), '(point_multiplicity_col, volume_unit_cell,\n number_unit_cell)\n', (9567, 9631), False, 'from cryspy.A_functions_base.mempy import calc_mem_col, calc_mem_chi, calc_symm_elem_points_by_index_points, get_uniform_density_col, renormailize_density_col, save_spin_density_into_file, form_basins, calc_point_susceptibility, get_uniform_density_chi, renormailize_density_chi, calc_model_value_by_precalculated_data, calc_chi_atoms\n'), ((12117, 12237), 'cryspy.A_functions_base.structure_factor.calc_f_nucl_by_dictionary', 'calc_f_nucl_by_dictionary', (['dict_crystal', 'diffrn_dict_in_out'], {'flag_use_precalculated_data': 'flag_use_precalculated_data'}), '(dict_crystal, diffrn_dict_in_out,\n flag_use_precalculated_data=flag_use_precalculated_data)\n', (12142, 12237), False, 'from cryspy.A_functions_base.structure_factor import calc_f_nucl_by_dictionary\n'), ((12908, 12944), 'numpy.concatenate', 'numpy.concatenate', (['l_mem_col'], {'axis': '(1)'}), '(l_mem_col, axis=1)\n', (12925, 12944), False, 'import numpy\n'), ((12983, 13019), 'numpy.concatenate', 'numpy.concatenate', (['l_mem_chi'], {'axis': '(1)'}), '(l_mem_chi, axis=1)\n', (13000, 13019), False, 'import numpy\n'), ((13390, 13419), 'numpy.copy', 'numpy.copy', (['density_col_prior'], {}), '(density_col_prior)\n', (13400, 13419), False, 'import numpy\n'), ((13447, 13476), 'numpy.copy', 'numpy.copy', (['density_col_prior'], {}), '(density_col_prior)\n', (13457, 13476), False, 'import numpy\n'), ((13519, 13548), 'numpy.copy', 'numpy.copy', (['density_chi_prior'], {}), '(density_chi_prior)\n', (13529, 13548), False, 'import numpy\n'), ((13576, 13605), 'numpy.copy', 'numpy.copy', (['density_chi_prior'], {}), '(density_chi_prior)\n', (13586, 13605), False, 'import numpy\n'), ((13871, 13906), 'numpy.zeros_like', 'numpy.zeros_like', (['density_col_prior'], {}), '(density_col_prior)\n', (13887, 13906), False, 'import numpy\n'), ((13960, 13995), 'numpy.zeros_like', 'numpy.zeros_like', (['density_chi_prior'], {}), '(density_chi_prior)\n', (13976, 13995), False, 'import numpy\n'), ((19134, 19174), 'numpy.concatenate', 'numpy.concatenate', (['l_model_value'], {'axis': '(0)'}), '(l_model_value, axis=0)\n', (19151, 19174), False, 'import numpy\n'), ((22472, 22504), 'numpy.copy', 'numpy.copy', (['density_col_previous'], {}), '(density_col_previous)\n', (22482, 22504), False, 'import numpy\n'), ((22621, 22653), 'numpy.copy', 'numpy.copy', (['density_chi_previous'], {}), '(density_chi_previous)\n', (22631, 22653), False, 'import numpy\n'), ((22929, 23121), 'cryspy.A_functions_base.mempy.save_spin_density_into_file', 'save_spin_density_into_file', (['file_spin_density', 'index_auc_col', 'spin_density', 'n_abc', 'unit_cell_parameters', 'reduced_symm_elems', 'translation_elems', 'centrosymmetry', 'centrosymmetry_position'], {}), '(file_spin_density, index_auc_col, spin_density,\n n_abc, unit_cell_parameters, reduced_symm_elems, translation_elems,\n centrosymmetry, centrosymmetry_position)\n', (22956, 23121), False, 'from cryspy.A_functions_base.mempy import calc_mem_col, calc_mem_chi, calc_symm_elem_points_by_index_points, get_uniform_density_col, renormailize_density_col, save_spin_density_into_file, form_basins, calc_point_susceptibility, get_uniform_density_chi, renormailize_density_chi, calc_model_value_by_precalculated_data, calc_chi_atoms\n'), ((23392, 23593), 'cryspy.A_functions_base.mempy.save_spin_density_into_file', 'save_spin_density_into_file', (['file_magnetization_density', 'index_auc_chi', 'spin_density', 'n_abc', 'unit_cell_parameters', 'reduced_symm_elems', 'translation_elems', 'centrosymmetry', 'centrosymmetry_position'], {}), '(file_magnetization_density, index_auc_chi,\n spin_density, n_abc, unit_cell_parameters, reduced_symm_elems,\n translation_elems, centrosymmetry, centrosymmetry_position)\n', (23419, 23593), False, 'from cryspy.A_functions_base.mempy import calc_mem_col, calc_mem_chi, calc_symm_elem_points_by_index_points, get_uniform_density_col, renormailize_density_col, save_spin_density_into_file, form_basins, calc_point_susceptibility, get_uniform_density_chi, renormailize_density_chi, calc_model_value_by_precalculated_data, calc_chi_atoms\n'), ((26097, 26299), 'cryspy.A_functions_base.mempy.calc_chi_atoms', 'calc_chi_atoms', (['unit_cell_parameters', 'number_points', 'full_symm_elems', 'index_hkl', 'atom_para_fract_xyz', 'atom_para_sc_chi', 'symm_elem_channel_chi', 'point_multiplicity_channel_chi', 'density_channel_chi'], {}), '(unit_cell_parameters, number_points, full_symm_elems,\n index_hkl, atom_para_fract_xyz, atom_para_sc_chi, symm_elem_channel_chi,\n point_multiplicity_channel_chi, density_channel_chi)\n', (26111, 26299), False, 'from cryspy.A_functions_base.mempy import calc_mem_col, calc_mem_chi, calc_symm_elem_points_by_index_points, get_uniform_density_col, renormailize_density_col, save_spin_density_into_file, form_basins, calc_point_susceptibility, get_uniform_density_chi, renormailize_density_chi, calc_model_value_by_precalculated_data, calc_chi_atoms\n'), ((26415, 26483), 'cryspy.A_functions_base.unit_cell.calc_eq_ccs_by_unit_cell_parameters', 'calc_eq_ccs_by_unit_cell_parameters', (['index_hkl', 'unit_cell_parameters'], {}), '(index_hkl, unit_cell_parameters)\n', (26450, 26483), False, 'from cryspy.A_functions_base.unit_cell import calc_volume_uc_by_unit_cell_parameters, calc_sthovl_by_unit_cell_parameters, calc_eq_ccs_by_unit_cell_parameters\n'), ((26503, 26530), 'cryspy.A_functions_base.matrix_operations.calc_vv_as_v1_v2_v1', 'calc_vv_as_v1_v2_v1', (['eq_ccs'], {}), '(eq_ccs)\n', (26522, 26530), False, 'from cryspy.A_functions_base.matrix_operations import calc_vv_as_v1_v2_v1\n'), ((26597, 26717), 'cryspy.A_functions_base.structure_factor.calc_f_nucl_by_dictionary', 'calc_f_nucl_by_dictionary', (['dict_crystal', 'diffrn_dict_in_out'], {'flag_use_precalculated_data': 'flag_use_precalculated_data'}), '(dict_crystal, diffrn_dict_in_out,\n flag_use_precalculated_data=flag_use_precalculated_data)\n', (26622, 26717), False, 'from cryspy.A_functions_base.structure_factor import calc_f_nucl_by_dictionary\n'), ((27549, 27683), 'cryspy.A_functions_base.mempy.calc_model_value_by_precalculated_data', 'calc_model_value_by_precalculated_data', (['atom_para_susceptibility', 'unit_cell_parameters', 'flag_asymmetry', 'dict_in_out', 'l_dict_diffrn'], {}), '(atom_para_susceptibility,\n unit_cell_parameters, flag_asymmetry, dict_in_out, l_dict_diffrn)\n', (27587, 27683), False, 'from cryspy.A_functions_base.mempy import calc_mem_col, calc_mem_chi, calc_symm_elem_points_by_index_points, get_uniform_density_col, renormailize_density_col, save_spin_density_into_file, form_basins, calc_point_susceptibility, get_uniform_density_chi, renormailize_density_chi, calc_model_value_by_precalculated_data, calc_chi_atoms\n'), ((28788, 28830), 'numpy.zeros_like', 'numpy.zeros_like', (['atom_para_susceptibility'], {}), '(atom_para_susceptibility)\n', (28804, 28830), False, 'import numpy\n'), ((5618, 5751), 'cryspy.A_functions_base.mempy.form_basins', 'form_basins', (['symm_elem_auc', 'full_symm_elems', 'unit_cell_parameters', 'atom_label', 'atom_fract_xyz', 'atom_multiplicity', 'atom_para_label'], {}), '(symm_elem_auc, full_symm_elems, unit_cell_parameters,\n atom_label, atom_fract_xyz, atom_multiplicity, atom_para_label)\n', (5629, 5751), False, 'from cryspy.A_functions_base.mempy import calc_mem_col, calc_mem_chi, calc_symm_elem_points_by_index_points, get_uniform_density_col, renormailize_density_col, save_spin_density_into_file, form_basins, calc_point_susceptibility, get_uniform_density_chi, renormailize_density_chi, calc_model_value_by_precalculated_data, calc_chi_atoms\n'), ((6793, 6814), 'numpy.copy', 'numpy.copy', (['index_auc'], {}), '(index_auc)\n', (6803, 6814), False, 'import numpy\n'), ((6848, 6878), 'numpy.copy', 'numpy.copy', (['point_multiplicity'], {}), '(point_multiplicity)\n', (6858, 6878), False, 'import numpy\n'), ((6907, 6932), 'numpy.copy', 'numpy.copy', (['symm_elem_auc'], {}), '(symm_elem_auc)\n', (6917, 6932), False, 'import numpy\n'), ((8395, 8529), 'cryspy.A_functions_base.mempy.get_uniform_density_chi', 'get_uniform_density_chi', (['point_multiplicity_chi', 'atom_label_auc_chi', 'atom_multiplicity_auc_chi', 'volume_unit_cell', 'number_unit_cell'], {}), '(point_multiplicity_chi, atom_label_auc_chi,\n atom_multiplicity_auc_chi, volume_unit_cell, number_unit_cell)\n', (8418, 8529), False, 'from cryspy.A_functions_base.mempy import calc_mem_col, calc_mem_chi, calc_symm_elem_points_by_index_points, get_uniform_density_col, renormailize_density_col, save_spin_density_into_file, form_basins, calc_point_susceptibility, get_uniform_density_chi, renormailize_density_chi, calc_model_value_by_precalculated_data, calc_chi_atoms\n'), ((8642, 8681), 'numpy.zeros_like', 'numpy.zeros_like', (['atom_distance_auc_chi'], {}), '(atom_distance_auc_chi)\n', (8658, 8681), False, 'import numpy\n'), ((9267, 9425), 'cryspy.A_functions_base.mempy.renormailize_density_chi', 'renormailize_density_chi', (['density_chi_prior', 'point_multiplicity_chi', 'atom_label_auc_chi', 'atom_multiplicity_auc_chi', 'volume_unit_cell', 'number_unit_cell'], {}), '(density_chi_prior, point_multiplicity_chi,\n atom_label_auc_chi, atom_multiplicity_auc_chi, volume_unit_cell,\n number_unit_cell)\n', (9291, 9425), False, 'from cryspy.A_functions_base.mempy import calc_mem_col, calc_mem_chi, calc_symm_elem_points_by_index_points, get_uniform_density_col, renormailize_density_col, save_spin_density_into_file, form_basins, calc_point_susceptibility, get_uniform_density_chi, renormailize_density_chi, calc_model_value_by_precalculated_data, calc_chi_atoms\n'), ((10929, 11195), 'cryspy.A_functions_base.mempy.calc_mem_col', 'calc_mem_col', (['index_hkl', 'unit_cell_parameters', 'eh_ccs', 'full_symm_elems', 'symm_elem_auc_col', 'volume_unit_cell', 'number_unit_cell'], {'point_multiplicity': 'point_multiplicity_col', 'dict_in_out': 'dict_in_out_col', 'flag_use_precalculated_data': 'flag_use_precalculated_data'}), '(index_hkl, unit_cell_parameters, eh_ccs, full_symm_elems,\n symm_elem_auc_col, volume_unit_cell, number_unit_cell,\n point_multiplicity=point_multiplicity_col, dict_in_out=dict_in_out_col,\n flag_use_precalculated_data=flag_use_precalculated_data)\n', (10941, 11195), False, 'from cryspy.A_functions_base.mempy import calc_mem_col, calc_mem_chi, calc_symm_elem_points_by_index_points, get_uniform_density_col, renormailize_density_col, save_spin_density_into_file, form_basins, calc_point_susceptibility, get_uniform_density_chi, renormailize_density_chi, calc_model_value_by_precalculated_data, calc_chi_atoms\n'), ((11661, 11953), 'cryspy.A_functions_base.mempy.calc_mem_chi', 'calc_mem_chi', (['index_hkl', 'unit_cell_parameters', 'h_ccs', 'full_symm_elems', 'symm_elem_auc_chi', 'point_susceptibility', 'volume_unit_cell', 'number_unit_cell'], {'point_multiplicity': 'point_multiplicity_chi', 'dict_in_out': 'dict_in_out_chi', 'flag_use_precalculated_data': 'flag_use_precalculated_data'}), '(index_hkl, unit_cell_parameters, h_ccs, full_symm_elems,\n symm_elem_auc_chi, point_susceptibility, volume_unit_cell,\n number_unit_cell, point_multiplicity=point_multiplicity_chi,\n dict_in_out=dict_in_out_chi, flag_use_precalculated_data=\n flag_use_precalculated_data)\n', (11673, 11953), False, 'from cryspy.A_functions_base.mempy import calc_mem_col, calc_mem_chi, calc_symm_elem_points_by_index_points, get_uniform_density_col, renormailize_density_col, save_spin_density_into_file, form_basins, calc_point_susceptibility, get_uniform_density_chi, renormailize_density_chi, calc_model_value_by_precalculated_data, calc_chi_atoms\n'), ((12616, 12663), 'numpy.stack', 'numpy.stack', (['[asymmetry_e, asymmetry_s]'], {'axis': '(0)'}), '([asymmetry_e, asymmetry_s], axis=0)\n', (12627, 12663), False, 'import numpy\n'), ((14137, 14165), 'numpy.copy', 'numpy.copy', (['density_col_next'], {}), '(density_col_next)\n', (14147, 14165), False, 'import numpy\n'), ((14216, 14244), 'numpy.copy', 'numpy.copy', (['density_chi_next'], {}), '(density_chi_next)\n', (14226, 14244), False, 'import numpy\n'), ((14537, 14580), 'numpy.zeros', 'numpy.zeros', (['index_hkl.shape'], {'dtype': 'complex'}), '(index_hkl.shape, dtype=complex)\n', (14548, 14580), False, 'import numpy\n'), ((16362, 16670), 'cryspy.A_functions_base.flip_ratio.calc_iint', 'calc_iint', (['beam_polarization', 'flipper_efficiency', 'f_nucl', 'f_m_perp', 'matrix_u'], {'func_extinction': 'func_extinction', 'flag_beam_polarization': '(False)', 'flag_flipper_efficiency': '(False)', 'flag_f_nucl': '(False)', 'flag_f_m_perp': '(True)', 'dict_in_out': 'dict_in_out', 'flag_use_precalculated_data': 'flag_use_precalculated_data'}), '(beam_polarization, flipper_efficiency, f_nucl, f_m_perp, matrix_u,\n func_extinction=func_extinction, flag_beam_polarization=False,\n flag_flipper_efficiency=False, flag_f_nucl=False, flag_f_m_perp=True,\n dict_in_out=dict_in_out, flag_use_precalculated_data=\n flag_use_precalculated_data)\n', (16371, 16670), False, 'from cryspy.A_functions_base.flip_ratio import calc_iint, calc_flip_ratio_by_iint, calc_asymmetry_by_iint\n'), ((17746, 17800), 'numpy.expand_dims', 'numpy.expand_dims', (["dder_model_exp['iint_plus']"], {'axis': '(0)'}), "(dder_model_exp['iint_plus'], axis=0)\n", (17763, 17800), False, 'import numpy\n'), ((17835, 17890), 'numpy.expand_dims', 'numpy.expand_dims', (["dder_model_exp['iint_minus']"], {'axis': '(0)'}), "(dder_model_exp['iint_minus'], axis=0)\n", (17852, 17890), False, 'import numpy\n'), ((19381, 19426), 'numpy.concatenate', 'numpy.concatenate', (['l_der_model_den_pm'], {'axis': '(0)'}), '(l_der_model_den_pm, axis=0)\n', (19398, 19426), False, 'import numpy\n'), ((19635, 19731), 'numpy.stack', 'numpy.stack', (['[magnetization_plus * der_c_den_pm, magnetization_minus * der_c_den_pm]'], {'axis': '(0)'}), '([magnetization_plus * der_c_den_pm, magnetization_minus *\n der_c_den_pm], axis=0)\n', (19646, 19731), False, 'import numpy\n'), ((19785, 19831), 'numpy.concatenate', 'numpy.concatenate', (['l_der_model_den_chi'], {'axis': '(0)'}), '(l_der_model_den_chi, axis=0)\n', (19802, 19831), False, 'import numpy\n'), ((21193, 21219), 'numpy.where', 'numpy.where', (['(hh > 0)', 'hh', '(0)'], {}), '(hh > 0, hh, 0)\n', (21204, 21219), False, 'import numpy\n'), ((21249, 21341), 'cryspy.A_functions_base.mempy.renormailize_density_col', 'renormailize_density_col', (['hh', 'point_multiplicity_col', 'volume_unit_cell', 'number_unit_cell'], {}), '(hh, point_multiplicity_col, volume_unit_cell,\n number_unit_cell)\n', (21273, 21341), False, 'from cryspy.A_functions_base.mempy import calc_mem_col, calc_mem_chi, calc_symm_elem_points_by_index_points, get_uniform_density_col, renormailize_density_col, save_spin_density_into_file, form_basins, calc_point_susceptibility, get_uniform_density_chi, renormailize_density_chi, calc_model_value_by_precalculated_data, calc_chi_atoms\n'), ((21605, 21631), 'numpy.where', 'numpy.where', (['(hh > 0)', 'hh', '(0)'], {}), '(hh > 0, hh, 0)\n', (21616, 21631), False, 'import numpy\n'), ((21661, 21800), 'cryspy.A_functions_base.mempy.renormailize_density_chi', 'renormailize_density_chi', (['hh', 'point_multiplicity_chi', 'atom_label_auc_chi', 'atom_multiplicity_auc_chi', 'volume_unit_cell', 'number_unit_cell'], {}), '(hh, point_multiplicity_chi, atom_label_auc_chi,\n atom_multiplicity_auc_chi, volume_unit_cell, number_unit_cell)\n', (21685, 21800), False, 'from cryspy.A_functions_base.mempy import calc_mem_col, calc_mem_chi, calc_symm_elem_points_by_index_points, get_uniform_density_col, renormailize_density_col, save_spin_density_into_file, form_basins, calc_point_susceptibility, get_uniform_density_chi, renormailize_density_chi, calc_model_value_by_precalculated_data, calc_chi_atoms\n'), ((22845, 22916), 'numpy.array', 'numpy.array', (['[[magnetization_plus], [magnetization_minus]]'], {'dtype': 'float'}), '([[magnetization_plus], [magnetization_minus]], dtype=float)\n', (22856, 22916), False, 'import numpy\n'), ((27187, 27234), 'numpy.stack', 'numpy.stack', (['[asymmetry_e, asymmetry_s]'], {'axis': '(0)'}), '([asymmetry_e, asymmetry_s], axis=0)\n', (27198, 27234), False, 'import numpy\n'), ((29183, 29274), 'cryspy.A_functions_base.function_1_error_simplex.error_estimation_simplex', 'error_estimation_simplex', (["res['final_simplex'][0]", "res['final_simplex'][1]", 'calc_chi_sq'], {}), "(res['final_simplex'][0], res['final_simplex'][1],\n calc_chi_sq)\n", (29207, 29274), False, 'from cryspy.A_functions_base.function_1_error_simplex import error_estimation_simplex\n'), ((29798, 29818), 'numpy.array', 'numpy.array', (['l_sigma'], {}), '(l_sigma)\n', (29809, 29818), False, 'import numpy\n'), ((29860, 29902), 'numpy.zeros_like', 'numpy.zeros_like', (['atom_para_susceptibility'], {}), '(atom_para_susceptibility)\n', (29876, 29902), False, 'import numpy\n'), ((4756, 4793), 'numpy.expand_dims', 'numpy.expand_dims', (['atom_label'], {'axis': '(1)'}), '(atom_label, axis=1)\n', (4773, 4793), False, 'import numpy\n'), ((4797, 4839), 'numpy.expand_dims', 'numpy.expand_dims', (['atom_para_label'], {'axis': '(0)'}), '(atom_para_label, axis=0)\n', (4814, 4839), False, 'import numpy\n'), ((8970, 9146), 'cryspy.A_functions_base.orbital_functions.calc_density_spherical', 'calc_density_spherical', (['atom_distance_auc_chi[flag_atom]', "dict_shell['core_population']", "dict_shell['core_coeff']", "dict_shell['core_zeta']", "dict_shell['core_n']", 'kappa'], {}), "(atom_distance_auc_chi[flag_atom], dict_shell[\n 'core_population'], dict_shell['core_coeff'], dict_shell['core_zeta'],\n dict_shell['core_n'], kappa)\n", (8992, 9146), False, 'from cryspy.A_functions_base.orbital_functions import calc_density_spherical\n'), ((12553, 12589), 'numpy.square', 'numpy.square', (['(flip_ratio_es[0] + 1.0)'], {}), '(flip_ratio_es[0] + 1.0)\n', (12565, 12589), False, 'import numpy\n'), ((15488, 15593), 'cryspy.A_functions_base.unit_cell.calc_sthovl_by_unit_cell_parameters', 'calc_sthovl_by_unit_cell_parameters', (['index_hkl', 'unit_cell_parameters'], {'flag_unit_cell_parameters': '(False)'}), '(index_hkl, unit_cell_parameters,\n flag_unit_cell_parameters=False)\n', (15523, 15593), False, 'from cryspy.A_functions_base.unit_cell import calc_volume_uc_by_unit_cell_parameters, calc_sthovl_by_unit_cell_parameters, calc_eq_ccs_by_unit_cell_parameters\n'), ((15918, 16194), 'cryspy.A_functions_base.extinction.calc_extinction_sphere', 'calc_extinction_sphere', (['f_sq', 'extinction_radius', 'extinction_mosaicity', 'volume_unit_cell', 'cos_2theta', 'wavelength', 'extinction_model'], {'flag_f_sq': '(False)', 'flag_radius': '(False)', 'flag_mosaicity': '(False)', 'flag_volume_unit_cell': '(False)', 'flag_cos_2theta': '(False)', 'flag_wavelength': '(False)'}), '(f_sq, extinction_radius, extinction_mosaicity,\n volume_unit_cell, cos_2theta, wavelength, extinction_model, flag_f_sq=\n False, flag_radius=False, flag_mosaicity=False, flag_volume_unit_cell=\n False, flag_cos_2theta=False, flag_wavelength=False)\n', (15940, 16194), False, 'from cryspy.A_functions_base.extinction import calc_extinction_sphere\n'), ((17151, 17324), 'cryspy.A_functions_base.flip_ratio.calc_asymmetry_by_iint', 'calc_asymmetry_by_iint', (['iint_plus', 'iint_minus'], {'c_lambda2': 'None', 'iint_2hkl': 'None', 'flag_iint_plus': '(True)', 'flag_iint_minus': '(True)', 'flag_c_lambda2': '(False)', 'flag_iint_2hkl': '(False)'}), '(iint_plus, iint_minus, c_lambda2=None, iint_2hkl=\n None, flag_iint_plus=True, flag_iint_minus=True, flag_c_lambda2=False,\n flag_iint_2hkl=False)\n', (17173, 17324), False, 'from cryspy.A_functions_base.flip_ratio import calc_iint, calc_flip_ratio_by_iint, calc_asymmetry_by_iint\n'), ((17440, 17614), 'cryspy.A_functions_base.flip_ratio.calc_flip_ratio_by_iint', 'calc_flip_ratio_by_iint', (['iint_plus', 'iint_minus'], {'c_lambda2': 'None', 'iint_2hkl': 'None', 'flag_iint_plus': '(True)', 'flag_iint_minus': '(True)', 'flag_c_lambda2': '(False)', 'flag_iint_2hkl': '(False)'}), '(iint_plus, iint_minus, c_lambda2=None, iint_2hkl=\n None, flag_iint_plus=True, flag_iint_minus=True, flag_c_lambda2=False,\n flag_iint_2hkl=False)\n', (17463, 17614), False, 'from cryspy.A_functions_base.flip_ratio import calc_iint, calc_flip_ratio_by_iint, calc_asymmetry_by_iint\n'), ((20187, 20219), 'numpy.copy', 'numpy.copy', (['density_col_previous'], {}), '(density_col_previous)\n', (20197, 20219), False, 'import numpy\n'), ((20333, 20365), 'numpy.copy', 'numpy.copy', (['density_chi_previous'], {}), '(density_chi_previous)\n', (20343, 20365), False, 'import numpy\n'), ((20591, 20614), 'numpy.copy', 'numpy.copy', (['density_col'], {}), '(density_col)\n', (20601, 20614), False, 'import numpy\n'), ((20737, 20760), 'numpy.copy', 'numpy.copy', (['density_chi'], {}), '(density_chi)\n', (20747, 20760), False, 'import numpy\n'), ((23339, 23373), 'numpy.zeros_like', 'numpy.zeros_like', (['density_chi_best'], {}), '(density_chi_best)\n', (23355, 23373), False, 'import numpy\n'), ((27124, 27160), 'numpy.square', 'numpy.square', (['(flip_ratio_es[0] + 1.0)'], {}), '(flip_ratio_es[0] + 1.0)\n', (27136, 27160), False, 'import numpy\n'), ((27702, 27771), 'numpy.square', 'numpy.square', (['((model_value - exp_value_sigma[0]) / exp_value_sigma[1])'], {}), '((model_value - exp_value_sigma[0]) / exp_value_sigma[1])\n', (27714, 27771), False, 'import numpy\n'), ((28724, 28744), 'numpy.diag', 'numpy.diag', (['hess_inv'], {}), '(hess_inv)\n', (28734, 28744), False, 'import numpy\n'), ((30380, 30431), 'numpy.expand_dims', 'numpy.expand_dims', (['atom_para_susceptibility'], {'axis': '(0)'}), '(atom_para_susceptibility, axis=0)\n', (30397, 30431), False, 'import numpy\n'), ((14715, 14820), 'numpy.expand_dims', 'numpy.expand_dims', (['(magnetization_plus * density_col[0] + magnetization_minus * density_col[1])'], {'axis': '(0)'}), '(magnetization_plus * density_col[0] + magnetization_minus *\n density_col[1], axis=0)\n', (14732, 14820), False, 'import numpy\n'), ((15630, 15663), 'numpy.arcsin', 'numpy.arcsin', (['(sthovl * wavelength)'], {}), '(sthovl * wavelength)\n', (15642, 15663), False, 'import numpy\n'), ((19261, 19285), 'numpy.square', 'numpy.square', (['diff_value'], {}), '(diff_value)\n', (19273, 19285), False, 'import numpy\n'), ((21130, 21163), 'numpy.exp', 'numpy.exp', (['(-coeff * der_c_den_col)'], {}), '(-coeff * der_c_den_col)\n', (21139, 21163), False, 'import numpy\n'), ((21542, 21575), 'numpy.exp', 'numpy.exp', (['(-coeff * der_c_den_chi)'], {}), '(-coeff * der_c_den_chi)\n', (21551, 21575), False, 'import numpy\n'), ((28947, 29004), 'numpy.expand_dims', 'numpy.expand_dims', (['atom_para_susceptibility_sigma'], {'axis': '(0)'}), '(atom_para_susceptibility_sigma, axis=0)\n', (28964, 29004), False, 'import numpy\n'), ((12471, 12486), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (12481, 12486), False, 'import numpy\n'), ((27042, 27057), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (27052, 27057), False, 'import numpy\n'), ((30019, 30076), 'numpy.expand_dims', 'numpy.expand_dims', (['atom_para_susceptibility_sigma'], {'axis': '(0)'}), '(atom_para_susceptibility_sigma, axis=0)\n', (30036, 30076), False, 'import numpy\n'), ((12516, 12546), 'numpy.square', 'numpy.square', (['flip_ratio_es[0]'], {}), '(flip_ratio_es[0])\n', (12528, 12546), False, 'import numpy\n'), ((19500, 19558), 'numpy.expand_dims', 'numpy.expand_dims', (['(diff_value / exp_value_sigma[1])'], {'axis': '(1)'}), '(diff_value / exp_value_sigma[1], axis=1)\n', (19517, 19558), False, 'import numpy\n'), ((19906, 19964), 'numpy.expand_dims', 'numpy.expand_dims', (['(diff_value / exp_value_sigma[1])'], {'axis': '(1)'}), '(diff_value / exp_value_sigma[1], axis=1)\n', (19923, 19964), False, 'import numpy\n'), ((27087, 27117), 'numpy.square', 'numpy.square', (['flip_ratio_es[0]'], {}), '(flip_ratio_es[0])\n', (27099, 27117), False, 'import numpy\n'), ((18006, 18135), 'numpy.expand_dims', 'numpy.expand_dims', (['(der_model_int_plus * der_int_plus_fm_perp_real + der_model_int_minus *\n der_int_minus_fm_perp_real)'], {'axis': '(2)'}), '(der_model_int_plus * der_int_plus_fm_perp_real + \n der_model_int_minus * der_int_minus_fm_perp_real, axis=2)\n', (18023, 18135), False, 'import numpy\n'), ((18242, 18371), 'numpy.expand_dims', 'numpy.expand_dims', (['(der_model_int_plus * der_int_plus_fm_perp_imag + der_model_int_minus *\n der_int_minus_fm_perp_imag)'], {'axis': '(2)'}), '(der_model_int_plus * der_int_plus_fm_perp_imag + \n der_model_int_minus * der_int_minus_fm_perp_imag, axis=2)\n', (18259, 18371), False, 'import numpy\n'), ((18612, 18741), 'numpy.expand_dims', 'numpy.expand_dims', (['(der_model_int_plus * der_int_plus_fm_perp_real + der_model_int_minus *\n der_int_minus_fm_perp_real)'], {'axis': '(2)'}), '(der_model_int_plus * der_int_plus_fm_perp_real + \n der_model_int_minus * der_int_minus_fm_perp_real, axis=2)\n', (18629, 18741), False, 'import numpy\n'), ((18848, 18977), 'numpy.expand_dims', 'numpy.expand_dims', (['(der_model_int_plus * der_int_plus_fm_perp_imag + der_model_int_minus *\n der_int_minus_fm_perp_imag)'], {'axis': '(2)'}), '(der_model_int_plus * der_int_plus_fm_perp_imag + \n der_model_int_minus * der_int_minus_fm_perp_imag, axis=2)\n', (18865, 18977), False, 'import numpy\n')] |
# -*- coding: utf-8 -*-
"""
Map creation script
"""
import sys
import os
from configparser import ConfigParser
import math
from PIL import Image
import urllib.request, urllib.parse, urllib.error
# tile positions, see https://wiki.openstreetmap.org/wiki/Slippy_map_tilenames#Lon..2Flat._to_tile_numbers_2
def deg2num(lat_deg, lon_deg, zoom):
lat_rad = math.radians(lat_deg)
n = 2.0 ** zoom
xtile = int((lon_deg + 180.0) / 360.0 * n)
ytile = int((1.0 - math.log(math.tan(lat_rad) + (1 / math.cos(lat_rad))) / math.pi) / 2.0 * n)
return (xtile, ytile)
p = ConfigParser()
p.optionxform = str
p.read('settings.ini')
name = sys.argv[1] # section from ini
source = p.get(name,'source')
zoom = p.getint(name,'zoom')
if not os.path.exists( p.get(name,'dest')):
os.mkdir( p.get(name,'dest'))
dest = os.path.join( p.get(name,'dest') , "%s_zoom%i.jpeg" % (name,zoom))
tilestore = p.get(name,'tilestore')
# parse bounding box
txt = p.get(name,'bbox')
c = [float(v) for v in txt.split('"')[1::2]]
bbox = dict(list(zip(['e','n','s','w'], c)))
if not os.path.exists(tilestore):
os.makedirs(tilestore)
top_left = deg2num(bbox['n'],bbox['w'], zoom)
bottom_right = deg2num(bbox['s'],bbox['e'], zoom)
# create tile list
tiles = []
for x in range(top_left[0], bottom_right[0]):
for y in range(top_left[1], bottom_right[1]):
tiles.append((zoom,x,y))
print('Nr tiles: ', len(tiles))
# download tiles and make map
height = (bottom_right[1] - top_left[1]) * 256
width = (bottom_right[0] - top_left[0]) * 256
img = Image.new("RGB", (width,height))
for idx,tile in enumerate(tiles):
zoom,x,y = tile
fName = '_'.join([str(f) for f in tile]) + '.png'
fName = os.path.join(tilestore, fName)
print('[%i/%i] %s' % (idx+1,len(tiles),fName), end=' ')
if not os.path.exists(fName):
url = source.format(*tile)
print(f'Requesting {url} ', end='')
urllib.request.urlretrieve(url,fName)
print(' ok')
else:
print(' cached')
# paste
tmp = Image.open(fName)
img.paste(tmp, (256 * (x - top_left[0]), 256 * (y - top_left[1])))
print('Saving to ', dest)
img.save(dest, "JPEG") | [
"os.path.exists",
"PIL.Image.open",
"configparser.ConfigParser",
"os.makedirs",
"math.tan",
"PIL.Image.new",
"os.path.join",
"math.radians",
"math.cos"
] | [((578, 592), 'configparser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (590, 592), False, 'from configparser import ConfigParser\n'), ((1560, 1593), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(width, height)'], {}), "('RGB', (width, height))\n", (1569, 1593), False, 'from PIL import Image\n'), ((358, 379), 'math.radians', 'math.radians', (['lat_deg'], {}), '(lat_deg)\n', (370, 379), False, 'import math\n'), ((1072, 1097), 'os.path.exists', 'os.path.exists', (['tilestore'], {}), '(tilestore)\n', (1086, 1097), False, 'import os\n'), ((1103, 1125), 'os.makedirs', 'os.makedirs', (['tilestore'], {}), '(tilestore)\n', (1114, 1125), False, 'import os\n'), ((1719, 1749), 'os.path.join', 'os.path.join', (['tilestore', 'fName'], {}), '(tilestore, fName)\n', (1731, 1749), False, 'import os\n'), ((2059, 2076), 'PIL.Image.open', 'Image.open', (['fName'], {}), '(fName)\n', (2069, 2076), False, 'from PIL import Image\n'), ((1821, 1842), 'os.path.exists', 'os.path.exists', (['fName'], {}), '(fName)\n', (1835, 1842), False, 'import os\n'), ((479, 496), 'math.tan', 'math.tan', (['lat_rad'], {}), '(lat_rad)\n', (487, 496), False, 'import math\n'), ((504, 521), 'math.cos', 'math.cos', (['lat_rad'], {}), '(lat_rad)\n', (512, 521), False, 'import math\n')] |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
from tqdm import tqdm
import torch
from torch.utils.data import DataLoader
import torch.nn.functional as F
from model.model import BaseNet
from model.config import arguments
from dataset.dataset import FlowerData
def get_cm_each_round(args, device, dataloader_test, round_num: int=None, all_classes: bool=False):
"""confusion matrix and probabilities each round"""
network = BaseNet(num_class=args.class_num)
if all_classes:
network.load_state_dict(torch.load('../checkpoint/all_class.pth'))
else:
network.load_state_dict(torch.load(
'../checkpoint/round%.2d_epoch%.4d.pth' % (round_num, args.epochs)))
network = network.to(device).half()
network.eval()
prob = np.zeros((args.class_num * args.num_image_per_class // 2, args.class_num))
cm = np.zeros((args.class_num, args.class_num))
with torch.no_grad():
for batch, (data, target) in enumerate(tqdm(dataloader_test)):
data = data.to(device).half()
target = target.to(device).long()
output = network(data)
_, pred = torch.max(output, 1)
target = target.cpu().numpy()
pred = pred.cpu().numpy()
output = F.softmax(output, 1).cpu().numpy()
idx1 = batch * args.test_batch_size
idx2 = idx1 + args.test_batch_size
prob[idx1: idx2, :] = output
for i, j in zip(target, pred):
cm[i, j] += 1
return cm, prob
def get_confidence(cms, normalization: bool=False, save: bool=False):
"""accuracy of each classifier on each class
normalization: weighted by precision
normalization = False: weighted by accuracy
"""
confidences = np.zeros((cms.shape[0], cms.shape[1])) # (10, 17)
for i in range(confidences.shape[0]):
if normalization:
cms[i] /= cms[i].sum(0)
else:
cms[i] /= cms[i].sum(1)
confidences[i] = cms[i].diagonal()
suffix = 'confidences'
if normalization:
suffix += '_normalized'
if save:
np.save('../log/cm/' + suffix, confidences)
return confidences
def plot_cm(matrix, round_num: int=None, suffix=''):
"""draw confusion matrix"""
classes = ['%d' % j for j in range(matrix.shape[0])]
# Normalize by row
matrix = matrix.astype(np.float)
linesum = matrix.sum(1)
linesum = np.dot(linesum.reshape(-1, 1), np.ones((1, matrix.shape[1])))
matrix /= linesum
# plot
plt.switch_backend('agg')
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(matrix)
fig.colorbar(cax)
ax.xaxis.set_major_locator(MultipleLocator(1))
ax.yaxis.set_major_locator(MultipleLocator(1))
for i in range(matrix.shape[0]):
ax.text(i, i, str('%.2f' % (matrix[i, i] * 100)), va='center', ha='center', fontsize=5.5)
ax.set_xticklabels([''] + classes, rotation=90)
ax.set_yticklabels([''] + classes)
if round_num:
suffix += '_round_%.2d' % round_num
plt.savefig('../log/cm/cm%s.png' % suffix, dpi=200)
plt.close()
def get_cm_assemble_prob(confusion_all, probs_all, confidences_all, targets, save: bool=False,
classifier_num=None, use_weight: bool=False, classifier_list=None,
normalization: bool=False):
"""
soft vote
cms: (10, 17, 17)
probs: (10, 680, 17)
confidences: (10, 17)
targets: (680,)
save: save confusion matrix as .npy
classifier_num: use the first `classifier_num` classifiers to assemble a new classifier
"""
cms = confusion_all
probs = probs_all
confidences = confidences_all
if normalization:
confidences = get_confidence(cms, normalization=normalization)
if classifier_num:
cms = cms[:classifier_num]
probs = probs[:classifier_num]
confidences = confidences[:classifier_num]
if classifier_list:
cms = cms[classifier_list]
probs = probs[classifier_list]
confidences = confidences[classifier_list]
cm_assemble = np.zeros(cms.shape[1:])
probs = probs.transpose((1, 0, 2)) # 680 * 10 * 17
if use_weight:
probs = probs * confidences # 680 * 10 * 17
probs = probs.sum(1) # 680 * 17
predictions = probs.argmax(1)
for target, prediction in zip(targets, predictions):
cm_assemble[int(target), prediction] += 1
if save:
if classifier_num:
if use_weight:
np.save('../log/cm/cm_assemble_prob_weight_%.2dclassifiers' % classifier_num, cm_assemble)
else:
np.save('../log/cm/cm_assemble_prob_%.2dclassifiers' % classifier_num, cm_assemble)
acc = cm_assemble.diagonal().sum() / cm_assemble.sum()
suffix = ', soft vote'
if use_weight:
suffix += ', use weight'
else:
suffix += ', no weight'
if classifier_num:
suffix += ', %d classifiers' % classifier_num
if classifier_list:
suffix += ', selected list'
if normalization:
suffix += ', normalization'
print('accuracy of assemble method' + suffix + ' : %.4f' % acc)
return cm_assemble
def get_cm_assemble_vote(confusion_all, probs_all, confidences_all, targets, save: bool=False,
classifier_num: int=None, use_weight: bool=False, classifier_list=None,
normalization: bool = False):
"""
hard vote
cms: (10, 17, 17)
probs: (10, 680, 17)
confidences: (10, 17)
targets: (680,)
save: save confusion matrix as .npy
classifier_num: use the first `classifier_num` classifiers to assemble a new classifier
"""
cms = confusion_all
probs = probs_all
confidences = confidences_all
if normalization:
confidences = get_confidence(cms, normalization=normalization)
if classifier_num:
cms = cms[:classifier_num]
probs = probs[:classifier_num]
confidences = confidences[:classifier_num]
if classifier_list:
cms = cms[classifier_list]
probs = probs[classifier_list]
confidences = confidences[classifier_list]
cm_assemble = np.zeros(cms.shape[1:])
probs = probs.transpose((1, 0, 2)) # 680 * 10 * 17
probs = probs.argmax(2) # 680 * 10, the vote of each classifier
votes = np.zeros((probs.shape[0], cms.shape[2])) # 680 * 17, the vote of each class
for i in range(probs.shape[0]):
for j in range(probs.shape[1]):
if use_weight:
votes[i, probs[i, j]] += confidences[j, probs[i, j]]
else:
votes[i, probs[i, j]] += 1
predictions = votes.argmax(1)
for target, prediction in zip(targets, predictions):
cm_assemble[int(target), prediction] += 1
if save:
if classifier_num:
if use_weight:
np.save('../log/cm/cm_assemble_vote_weight_%.2dclassifiers' % classifier_num, cm_assemble)
else:
np.save('../log/cm/cm_assemble_vote_%.2dclassifiers' % classifier_num, cm_assemble)
acc = cm_assemble.diagonal().sum() / cm_assemble.sum()
suffix = ', hard vote'
if use_weight:
suffix += ', use weight'
else:
suffix += ', no weight'
if classifier_num:
suffix += ', %d classifiers' % classifier_num
if classifier_list:
suffix += ', selected list'
if normalization:
suffix += ', normalization'
print('accuracy of assemble method' + suffix + ' : %.4f' % acc)
return cm_assemble
def main(args, matrix_from_file: bool = False):
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
if not matrix_from_file:
cms = np.zeros((10, args.class_num, args.class_num)) # (10, 17, 17)
probs = np.zeros((10, args.class_num * args.num_image_per_class // 2, args.class_num)) # (10, 680, 17)
dataset_test = FlowerData(args, split='test')
dataloader_test = DataLoader(dataset_test, batch_size=args.test_batch_size,
shuffle=False, num_workers=10)
for i in range(10):
cm, prob = get_cm_each_round(args, device, dataloader_test, round_num=i)
cms[i], probs[i] = cm, prob
confidences = get_confidence(cms)
np.save('../log/cm/cms.npy', cms)
np.save('../log/cm/probabilities.npy', probs)
else:
cms = np.load('../log/cm/cms.npy')
probs = np.load('../log/cm/probabilities.npy')
confidences = np.load('../log/cm/confidences.npy')
targets = np.load('../log/cm/targets.npy')
# for i in range(1, 11):
cm = get_cm_assemble_vote(cms, probs, confidences, targets)
plot_cm(cm, suffix='_hard_no_weight')
# get_cm_assemble_vote(cms, probs, confidences, targets, use_weight=True)
cm = get_cm_assemble_vote(cms, probs, confidences, targets, use_weight=True, normalization=True)
plot_cm(cm, suffix='_hard_weight')
cm = get_cm_assemble_prob(cms, probs, confidences, targets)
plot_cm(cm, suffix='_soft_no_weight')
# get_cm_assemble_prob(cms, probs, confidences, targets, use_weight=True)
cm = get_cm_assemble_prob(cms, probs, confidences, targets, use_weight=True, normalization=True)
plot_cm(cm, suffix='_soft_weight')
# for i in range(10):
# # plot confusion matrix
# plot_cm(cms[i], round_num=i)
if __name__ == '__main__':
argument = arguments()
main(argument, matrix_from_file=True)
# args = argument
# use_cuda = not argument.no_cuda and torch.cuda.is_available()
# device = torch.device("cuda:0" if use_cuda else "cpu")
# dataset_test = FlowerData(args, split='test')
# dataloader_test = DataLoader(dataset_test, batch_size=args.test_batch_size,
# shuffle=False, num_workers=10)
# cm, _ = get_cm_each_round(args, device, dataloader_test, all_classes=True)
# plot_cm(cm, suffix='all_classes')
# print(cm.diagonal().sum() / cm.sum())
| [
"torch.max",
"dataset.dataset.FlowerData",
"torch.cuda.is_available",
"matplotlib.pyplot.switch_backend",
"model.config.arguments",
"torch.nn.functional.softmax",
"numpy.save",
"matplotlib.pyplot.close",
"model.model.BaseNet",
"matplotlib.pyplot.savefig",
"numpy.ones",
"torch.device",
"matpl... | [((485, 518), 'model.model.BaseNet', 'BaseNet', ([], {'num_class': 'args.class_num'}), '(num_class=args.class_num)\n', (492, 518), False, 'from model.model import BaseNet\n'), ((820, 894), 'numpy.zeros', 'np.zeros', (['(args.class_num * args.num_image_per_class // 2, args.class_num)'], {}), '((args.class_num * args.num_image_per_class // 2, args.class_num))\n', (828, 894), True, 'import numpy as np\n'), ((904, 946), 'numpy.zeros', 'np.zeros', (['(args.class_num, args.class_num)'], {}), '((args.class_num, args.class_num))\n', (912, 946), True, 'import numpy as np\n'), ((1819, 1857), 'numpy.zeros', 'np.zeros', (['(cms.shape[0], cms.shape[1])'], {}), '((cms.shape[0], cms.shape[1]))\n', (1827, 1857), True, 'import numpy as np\n'), ((2588, 2613), 'matplotlib.pyplot.switch_backend', 'plt.switch_backend', (['"""agg"""'], {}), "('agg')\n", (2606, 2613), True, 'import matplotlib.pyplot as plt\n'), ((2624, 2636), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2634, 2636), True, 'import matplotlib.pyplot as plt\n'), ((3114, 3165), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('../log/cm/cm%s.png' % suffix)"], {'dpi': '(200)'}), "('../log/cm/cm%s.png' % suffix, dpi=200)\n", (3125, 3165), True, 'import matplotlib.pyplot as plt\n'), ((3171, 3182), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3180, 3182), True, 'import matplotlib.pyplot as plt\n'), ((4172, 4195), 'numpy.zeros', 'np.zeros', (['cms.shape[1:]'], {}), '(cms.shape[1:])\n', (4180, 4195), True, 'import numpy as np\n'), ((6269, 6292), 'numpy.zeros', 'np.zeros', (['cms.shape[1:]'], {}), '(cms.shape[1:])\n', (6277, 6292), True, 'import numpy as np\n'), ((6431, 6471), 'numpy.zeros', 'np.zeros', (['(probs.shape[0], cms.shape[2])'], {}), '((probs.shape[0], cms.shape[2]))\n', (6439, 6471), True, 'import numpy as np\n'), ((7776, 7821), 'torch.device', 'torch.device', (["('cuda:0' if use_cuda else 'cpu')"], {}), "('cuda:0' if use_cuda else 'cpu')\n", (7788, 7821), False, 'import torch\n'), ((9627, 9638), 'model.config.arguments', 'arguments', ([], {}), '()\n', (9636, 9638), False, 'from model.config import arguments\n'), ((957, 972), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (970, 972), False, 'import torch\n'), ((2174, 2217), 'numpy.save', 'np.save', (["('../log/cm/' + suffix)", 'confidences'], {}), "('../log/cm/' + suffix, confidences)\n", (2181, 2217), True, 'import numpy as np\n'), ((2520, 2549), 'numpy.ones', 'np.ones', (['(1, matrix.shape[1])'], {}), '((1, matrix.shape[1]))\n', (2527, 2549), True, 'import numpy as np\n'), ((2749, 2767), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(1)'], {}), '(1)\n', (2764, 2767), False, 'from matplotlib.ticker import MultipleLocator\n'), ((2800, 2818), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(1)'], {}), '(1)\n', (2815, 2818), False, 'from matplotlib.ticker import MultipleLocator\n'), ((7737, 7762), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7760, 7762), False, 'import torch\n'), ((7866, 7912), 'numpy.zeros', 'np.zeros', (['(10, args.class_num, args.class_num)'], {}), '((10, args.class_num, args.class_num))\n', (7874, 7912), True, 'import numpy as np\n'), ((7945, 8023), 'numpy.zeros', 'np.zeros', (['(10, args.class_num * args.num_image_per_class // 2, args.class_num)'], {}), '((10, args.class_num * args.num_image_per_class // 2, args.class_num))\n', (7953, 8023), True, 'import numpy as np\n'), ((8065, 8095), 'dataset.dataset.FlowerData', 'FlowerData', (['args'], {'split': '"""test"""'}), "(args, split='test')\n", (8075, 8095), False, 'from dataset.dataset import FlowerData\n'), ((8122, 8214), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset_test'], {'batch_size': 'args.test_batch_size', 'shuffle': '(False)', 'num_workers': '(10)'}), '(dataset_test, batch_size=args.test_batch_size, shuffle=False,\n num_workers=10)\n', (8132, 8214), False, 'from torch.utils.data import DataLoader\n'), ((8454, 8487), 'numpy.save', 'np.save', (['"""../log/cm/cms.npy"""', 'cms'], {}), "('../log/cm/cms.npy', cms)\n", (8461, 8487), True, 'import numpy as np\n'), ((8496, 8541), 'numpy.save', 'np.save', (['"""../log/cm/probabilities.npy"""', 'probs'], {}), "('../log/cm/probabilities.npy', probs)\n", (8503, 8541), True, 'import numpy as np\n'), ((8566, 8594), 'numpy.load', 'np.load', (['"""../log/cm/cms.npy"""'], {}), "('../log/cm/cms.npy')\n", (8573, 8594), True, 'import numpy as np\n'), ((8611, 8649), 'numpy.load', 'np.load', (['"""../log/cm/probabilities.npy"""'], {}), "('../log/cm/probabilities.npy')\n", (8618, 8649), True, 'import numpy as np\n'), ((8672, 8708), 'numpy.load', 'np.load', (['"""../log/cm/confidences.npy"""'], {}), "('../log/cm/confidences.npy')\n", (8679, 8708), True, 'import numpy as np\n'), ((8727, 8759), 'numpy.load', 'np.load', (['"""../log/cm/targets.npy"""'], {}), "('../log/cm/targets.npy')\n", (8734, 8759), True, 'import numpy as np\n'), ((571, 612), 'torch.load', 'torch.load', (['"""../checkpoint/all_class.pth"""'], {}), "('../checkpoint/all_class.pth')\n", (581, 612), False, 'import torch\n'), ((656, 734), 'torch.load', 'torch.load', (["('../checkpoint/round%.2d_epoch%.4d.pth' % (round_num, args.epochs))"], {}), "('../checkpoint/round%.2d_epoch%.4d.pth' % (round_num, args.epochs))\n", (666, 734), False, 'import torch\n'), ((1021, 1042), 'tqdm.tqdm', 'tqdm', (['dataloader_test'], {}), '(dataloader_test)\n', (1025, 1042), False, 'from tqdm import tqdm\n'), ((1191, 1211), 'torch.max', 'torch.max', (['output', '(1)'], {}), '(output, 1)\n', (1200, 1211), False, 'import torch\n'), ((4590, 4684), 'numpy.save', 'np.save', (["('../log/cm/cm_assemble_prob_weight_%.2dclassifiers' % classifier_num)", 'cm_assemble'], {}), "('../log/cm/cm_assemble_prob_weight_%.2dclassifiers' %\n classifier_num, cm_assemble)\n", (4597, 4684), True, 'import numpy as np\n'), ((4715, 4802), 'numpy.save', 'np.save', (["('../log/cm/cm_assemble_prob_%.2dclassifiers' % classifier_num)", 'cm_assemble'], {}), "('../log/cm/cm_assemble_prob_%.2dclassifiers' % classifier_num,\n cm_assemble)\n", (4722, 4802), True, 'import numpy as np\n'), ((6969, 7063), 'numpy.save', 'np.save', (["('../log/cm/cm_assemble_vote_weight_%.2dclassifiers' % classifier_num)", 'cm_assemble'], {}), "('../log/cm/cm_assemble_vote_weight_%.2dclassifiers' %\n classifier_num, cm_assemble)\n", (6976, 7063), True, 'import numpy as np\n'), ((7094, 7181), 'numpy.save', 'np.save', (["('../log/cm/cm_assemble_vote_%.2dclassifiers' % classifier_num)", 'cm_assemble'], {}), "('../log/cm/cm_assemble_vote_%.2dclassifiers' % classifier_num,\n cm_assemble)\n", (7101, 7181), True, 'import numpy as np\n'), ((1314, 1334), 'torch.nn.functional.softmax', 'F.softmax', (['output', '(1)'], {}), '(output, 1)\n', (1323, 1334), True, 'import torch.nn.functional as F\n')] |
import datetime as dt
from re import T
from sqlalchemy.schema import Column
from sqlalchemy.types import String, DateTime
from uuid import UUID, uuid4
import bigfastapi.db.database as database
class Role(database.Base):
__tablename__ = "roles"
id = Column(String(255), primary_key=True, index=True, default=uuid4().hex)
organization_id = Column(String(255), index=True)
role_name = Column(String(255), index=True) | [
"sqlalchemy.types.String",
"uuid.uuid4"
] | [((265, 276), 'sqlalchemy.types.String', 'String', (['(255)'], {}), '(255)\n', (271, 276), False, 'from sqlalchemy.types import String, DateTime\n'), ((358, 369), 'sqlalchemy.types.String', 'String', (['(255)'], {}), '(255)\n', (364, 369), False, 'from sqlalchemy.types import String, DateTime\n'), ((406, 417), 'sqlalchemy.types.String', 'String', (['(255)'], {}), '(255)\n', (412, 417), False, 'from sqlalchemy.types import String, DateTime\n'), ((316, 323), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (321, 323), False, 'from uuid import UUID, uuid4\n')] |
# Generated by Django 3.0.5 on 2020-04-20 15:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='domain',
name='client_ip',
field=models.GenericIPAddressField(blank=True, null=True, verbose_name='Client IP'),
),
]
| [
"django.db.models.GenericIPAddressField"
] | [((324, 401), 'django.db.models.GenericIPAddressField', 'models.GenericIPAddressField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""Client IP"""'}), "(blank=True, null=True, verbose_name='Client IP')\n", (352, 401), False, 'from django.db import migrations, models\n')] |
import requests
import calendar
import keys
api_call = 'https://api.openweathermap.org/data/2.5/forecast?appid=' + keys.api_key
running = True
# Program loop
while running:
# Asks the user for the city or zip code to be queried
while True:
# Input validation
try:
print('\nThis application supports search by city(0) or search by zip code(1).')
search = int(input('Please input 0 or 1: '))
except ValueError:
print("Sorry, I didn't understand that.")
else:
# Passed the validation test
if search == 0:
city = input('Please input the city name: ')
if city.lower() == 'sf':
city = 'San Francisco, US'
# Appends the city to the api call
api_call += '&q=' + city
break
elif search == 1:
zip_code = input('Please input the zip code: ')
# Appends the zip code to the api call
api_call += '&zip=' + zip_code
break
else:
# Prints the invalid number (not 0 or 1)
print('{} is not a valid option.'.format(search))
# Stores the Json response
json_data = requests.get(api_call).json()
location_data = {
'city': json_data['city']['name'],
'country': json_data['city']['country']
}
print('\n{city}, {country}'.format(**location_data))
# The current date we are iterating through
current_date = ''
# Iterates through the array of dictionaries named list in json_data
for item in json_data['list']:
# Time of the weather data received, partitioned into 3 hour blocks
time = item['dt_txt']
# Split the time into date and hour [2018-04-15 06:00:00]
next_date, hour = time.split(' ')
# Stores the current date and prints it once
if current_date != next_date:
current_date = next_date
year, month, day = current_date.split('-')
date = {'y': year, 'm': month, 'd': day}
print('\n{m}/{d}/{y}'.format(**date))
# Grabs the first 2 integers from our HH:MM:SS string to get the hours
hour = int(hour[:2])
# Sets the AM (ante meridiem) or PM (post meridiem) period
if hour < 12:
if hour == 0:
hour = 12
meridiem = 'AM'
else:
if hour > 12:
hour -= 12
meridiem = 'PM'
# Prints the hours [HH:MM AM/PM]
print('\n%i:00 %s' % (hour, meridiem))
# Temperature is measured in Kelvin
temperature = item['main']['temp']
# Humidity
humidity = item['main']['humidity']
# Weather condition
description = item['weather'][0]['description']
# Prints the description as well as the temperature in Celcius and Farenheit and humidity
print('Weather condition: %s' % description)
print('Celcius: {:.2f}'.format(temperature - 273.15))
print('Farenheit: %.2f' % (temperature * 9/5 - 459.67))
print('Humidity: %s' % humidity)
# Prints a calendar of the current month
calendar = calendar.month(int(year), int(month))
print('\n'+ calendar)
# Asks the user if he/she wants to exit
while True:
running = input('Anything else we can help you with? ')
if running.lower() == 'yes' or running.lower() == 'y':
print('Great!')
break
elif running.lower() == 'no' or running.lower() == 'n' or running == 'exit':
print('Have a great day!')
running = False
break
else:
print('Sorry, I didn\'t get that.') | [
"requests.get"
] | [((1330, 1352), 'requests.get', 'requests.get', (['api_call'], {}), '(api_call)\n', (1342, 1352), False, 'import requests\n')] |
# -*- coding: utf-8 -*-
# Copyright Noronha Development Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TODO: {{module description}}
"""
from mongoengine import CASCADE
from mongoengine.fields import StringField, DictField, ReferenceField, EmbeddedDocumentField, BooleanField
from noronha.common.constants import DBConst, OnBoard
from noronha.db.main import SmartDoc, SmartEmbeddedDoc
from noronha.db.ds import EmbeddedDataset
from noronha.db.model import Model, EmbeddedModel
from noronha.db.train import EmbeddedTraining
class ProtoModelVersion(object):
PK_FIELDS = ['model.name', 'name']
FILE_NAME = OnBoard.Meta.MV
class EmbeddedModelVersion(SmartEmbeddedDoc):
PK_FIELDS = ProtoModelVersion.PK_FIELDS
FILE_NAME = ProtoModelVersion.FILE_NAME
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.use_as_pretrained = False
name = StringField(max_length=DBConst.MAX_NAME_LEN)
model = EmbeddedDocumentField(EmbeddedModel, default=None)
train = EmbeddedDocumentField(EmbeddedTraining, default=None)
ds = EmbeddedDocumentField(EmbeddedDataset, default=None)
compressed = BooleanField(default=False)
details = DictField(default={})
pretrained = StringField(default=None)
lightweight = BooleanField(default=False)
class ModelVersion(SmartDoc):
PK_FIELDS = ProtoModelVersion.PK_FIELDS
FILE_NAME = ProtoModelVersion.FILE_NAME
EMBEDDED_SCHEMA = EmbeddedModelVersion
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
name = StringField(required=True, max_length=DBConst.MAX_NAME_LEN)
model = ReferenceField(Model, required=True, reverse_delete_rule=CASCADE)
train = EmbeddedDocumentField(EmbeddedTraining, default=None)
ds = EmbeddedDocumentField(EmbeddedDataset, default=None)
compressed = BooleanField(default=False)
details = DictField(default={})
pretrained = EmbeddedDocumentField(EmbeddedModelVersion, default=None)
lightweight = BooleanField(default=False)
def to_embedded(self):
emb: EmbeddedModelVersion = super().to_embedded()
if isinstance(self.pretrained, EmbeddedModelVersion):
emb.pretrained = self.pretrained.show()
return emb
| [
"mongoengine.fields.BooleanField",
"mongoengine.fields.ReferenceField",
"mongoengine.fields.StringField",
"mongoengine.fields.EmbeddedDocumentField",
"mongoengine.fields.DictField"
] | [((1437, 1481), 'mongoengine.fields.StringField', 'StringField', ([], {'max_length': 'DBConst.MAX_NAME_LEN'}), '(max_length=DBConst.MAX_NAME_LEN)\n', (1448, 1481), False, 'from mongoengine.fields import StringField, DictField, ReferenceField, EmbeddedDocumentField, BooleanField\n'), ((1494, 1544), 'mongoengine.fields.EmbeddedDocumentField', 'EmbeddedDocumentField', (['EmbeddedModel'], {'default': 'None'}), '(EmbeddedModel, default=None)\n', (1515, 1544), False, 'from mongoengine.fields import StringField, DictField, ReferenceField, EmbeddedDocumentField, BooleanField\n'), ((1557, 1610), 'mongoengine.fields.EmbeddedDocumentField', 'EmbeddedDocumentField', (['EmbeddedTraining'], {'default': 'None'}), '(EmbeddedTraining, default=None)\n', (1578, 1610), False, 'from mongoengine.fields import StringField, DictField, ReferenceField, EmbeddedDocumentField, BooleanField\n'), ((1620, 1672), 'mongoengine.fields.EmbeddedDocumentField', 'EmbeddedDocumentField', (['EmbeddedDataset'], {'default': 'None'}), '(EmbeddedDataset, default=None)\n', (1641, 1672), False, 'from mongoengine.fields import StringField, DictField, ReferenceField, EmbeddedDocumentField, BooleanField\n'), ((1690, 1717), 'mongoengine.fields.BooleanField', 'BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1702, 1717), False, 'from mongoengine.fields import StringField, DictField, ReferenceField, EmbeddedDocumentField, BooleanField\n'), ((1732, 1753), 'mongoengine.fields.DictField', 'DictField', ([], {'default': '{}'}), '(default={})\n', (1741, 1753), False, 'from mongoengine.fields import StringField, DictField, ReferenceField, EmbeddedDocumentField, BooleanField\n'), ((1771, 1796), 'mongoengine.fields.StringField', 'StringField', ([], {'default': 'None'}), '(default=None)\n', (1782, 1796), False, 'from mongoengine.fields import StringField, DictField, ReferenceField, EmbeddedDocumentField, BooleanField\n'), ((1815, 1842), 'mongoengine.fields.BooleanField', 'BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1827, 1842), False, 'from mongoengine.fields import StringField, DictField, ReferenceField, EmbeddedDocumentField, BooleanField\n'), ((2124, 2183), 'mongoengine.fields.StringField', 'StringField', ([], {'required': '(True)', 'max_length': 'DBConst.MAX_NAME_LEN'}), '(required=True, max_length=DBConst.MAX_NAME_LEN)\n', (2135, 2183), False, 'from mongoengine.fields import StringField, DictField, ReferenceField, EmbeddedDocumentField, BooleanField\n'), ((2196, 2261), 'mongoengine.fields.ReferenceField', 'ReferenceField', (['Model'], {'required': '(True)', 'reverse_delete_rule': 'CASCADE'}), '(Model, required=True, reverse_delete_rule=CASCADE)\n', (2210, 2261), False, 'from mongoengine.fields import StringField, DictField, ReferenceField, EmbeddedDocumentField, BooleanField\n'), ((2274, 2327), 'mongoengine.fields.EmbeddedDocumentField', 'EmbeddedDocumentField', (['EmbeddedTraining'], {'default': 'None'}), '(EmbeddedTraining, default=None)\n', (2295, 2327), False, 'from mongoengine.fields import StringField, DictField, ReferenceField, EmbeddedDocumentField, BooleanField\n'), ((2337, 2389), 'mongoengine.fields.EmbeddedDocumentField', 'EmbeddedDocumentField', (['EmbeddedDataset'], {'default': 'None'}), '(EmbeddedDataset, default=None)\n', (2358, 2389), False, 'from mongoengine.fields import StringField, DictField, ReferenceField, EmbeddedDocumentField, BooleanField\n'), ((2407, 2434), 'mongoengine.fields.BooleanField', 'BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (2419, 2434), False, 'from mongoengine.fields import StringField, DictField, ReferenceField, EmbeddedDocumentField, BooleanField\n'), ((2449, 2470), 'mongoengine.fields.DictField', 'DictField', ([], {'default': '{}'}), '(default={})\n', (2458, 2470), False, 'from mongoengine.fields import StringField, DictField, ReferenceField, EmbeddedDocumentField, BooleanField\n'), ((2488, 2545), 'mongoengine.fields.EmbeddedDocumentField', 'EmbeddedDocumentField', (['EmbeddedModelVersion'], {'default': 'None'}), '(EmbeddedModelVersion, default=None)\n', (2509, 2545), False, 'from mongoengine.fields import StringField, DictField, ReferenceField, EmbeddedDocumentField, BooleanField\n'), ((2564, 2591), 'mongoengine.fields.BooleanField', 'BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (2576, 2591), False, 'from mongoengine.fields import StringField, DictField, ReferenceField, EmbeddedDocumentField, BooleanField\n')] |
"""
Run code in background indefinitely
This module allows you to keep running a script in the background indefinitely.
A great usage of this is fetching data in background and sending notifications with :py:mod:`notifications`. You can also run a server or a Discord bot for example.
Note: Because of privacy, apps cannot access to the clipboard in background, so coding a clipboard manager is not possible.
"""
from pyto import __Class__
from datetime import datetime
from time import sleep
from os.path import abspath
import sys
import threading
class BackgroundTask:
"""
Represents a task to run in background.
When started, the audio at the path passed to the initializer is played. If no audio is passed, a blank audio is used so Pyto isn't killed by the system.
Usage:
.. highlight:: python
.. code-block:: python
import background as bg
with bg.BackgroundTask() as b:
while True:
print(b.execution_time())
b.wait(1)
"""
start_date = None
__end_date__ = None
def execution_time(self) -> int:
"""
Returns the total execution time of the task in seconds.
:rtype: int
"""
if self.__end_date__ is not None:
date = self.__end_date__
else:
date = datetime.now()
return int((date - self.start_date).total_seconds())
@property
def notification_delay(self) -> int:
"""
The delay in seconds since each reminder notification.
If set to 3600, a notification will be sent every hour while the task is running.
The default value is ``21600`` (6 hours).
:rtype: int
"""
return self.__background_task__.delay
@notification_delay.setter
def notification_delay(self, new_value: int):
self.__background_task__.delay = new_value
@property
def reminder_notifications(self) -> bool:
"""
A boolean indicating whether a notification should be sent while the task is running.
By default, a notification is sent every 6 hours while the task is running, set this property to ``False`` to disable that,
:rtype: bool
"""
return self.__background_task__.sendNotification
@reminder_notifications.setter
def reminder_notifications(self, new_value: bool):
self.__background_task__.sendNotification = new_value
def __init__(self, audio_path=None):
self.__background_task__ = __Class__("BackgroundTask").new()
if audio_path is not None:
self.__background_task__.soundPath = abspath(audio_path)
def start(self):
"""
Starts the background task. After calling this function, Pyto will not be killed by the system.
"""
self.start_date = datetime.now()
self.__end_date__ = None
try:
self.__background_task__.scriptName = threading.current_thread().script_path.split("/")[-1]
except AttributeError:
pass
except IndexError:
pass
self.__background_task__.startBackgroundTask()
def stop(self):
"""
Stops the background task. After calling this function, Pyto can be killed by the system to free memory.
"""
self.__end_date__ = datetime.now()
self.__background_task__.stopBackgroundTask()
def wait(self, delay: float):
"""
Waits n seconds. Does the same thing as ``time.sleep``.
:param delay: Seconds to wait.
"""
sleep(delay)
def __enter__(self):
self.start()
return self
def __exit__(self, type, value, traceback):
self.stop()
if type is not None and value is not None and traceback is not None:
sys.excepthook(type, value, traceback)
| [
"sys.excepthook",
"threading.current_thread",
"time.sleep",
"datetime.datetime.now",
"pyto.__Class__",
"os.path.abspath"
] | [((2831, 2845), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2843, 2845), False, 'from datetime import datetime\n'), ((3332, 3346), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3344, 3346), False, 'from datetime import datetime\n'), ((3573, 3585), 'time.sleep', 'sleep', (['delay'], {}), '(delay)\n', (3578, 3585), False, 'from time import sleep\n'), ((1334, 1348), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1346, 1348), False, 'from datetime import datetime\n'), ((2634, 2653), 'os.path.abspath', 'abspath', (['audio_path'], {}), '(audio_path)\n', (2641, 2653), False, 'from os.path import abspath\n'), ((3813, 3851), 'sys.excepthook', 'sys.excepthook', (['type', 'value', 'traceback'], {}), '(type, value, traceback)\n', (3827, 3851), False, 'import sys\n'), ((2516, 2543), 'pyto.__Class__', '__Class__', (['"""BackgroundTask"""'], {}), "('BackgroundTask')\n", (2525, 2543), False, 'from pyto import __Class__\n'), ((2943, 2969), 'threading.current_thread', 'threading.current_thread', ([], {}), '()\n', (2967, 2969), False, 'import threading\n')] |
import socket
import re
def get_local_adr():
address_set = socket.getaddrinfo(socket.gethostname(), None, family=2)
for address in address_set:
if re.match("192.168.", address[4][0]):
local_network_addr = address[4][0]
return local_network_addr
return "ADDRESS_NOT_FOUND"
| [
"socket.gethostname",
"re.match"
] | [((84, 104), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (102, 104), False, 'import socket\n'), ((165, 200), 're.match', 're.match', (['"""192.168."""', 'address[4][0]'], {}), "('192.168.', address[4][0])\n", (173, 200), False, 'import re\n')] |
import unittest
import warnings
from parse_python_indentation import parse_indentation
good_output = [
{'key': 'green:',
'offspring': [
{'key': 'follow', 'offspring': []},
{'key': 'blue', 'offspring': []},
{'key': 'yellow', 'offspring': []},
{'key': 'fishing', 'offspring': []},
{'key': 'snowman:',
'offspring': [
{'key': 'gardening',
'offspring': []
}
]},
{'key': 'street:',
'offspring': [{'key': 'great', 'offspring': []}]}]},
{'key': 'religion', 'offspring': []},
{'key': 'flags', 'offspring': []},
{'key': 'houses:',
'offspring': [{'key': 'suffering', 'offspring': []}]}
]
class ParseTests(unittest.TestCase):
maxDiff = None
def test_parsing(self):
""" Tests whether correctly indented file can be parsed
"""
with open ("test.data", "r") as input_file:
rawdata = input_file.read()
a = parse_indentation(rawdata)
self.assertEqual(a,good_output)
def test_warning(self):
""" Tests whether file with two extra indentation spaces is parsed and
creates a warning.
"""
with warnings.catch_warnings(record=True) as w:
with open ("test1.data", "r") as input_file:
rawdata = input_file.read()
warnings.simplefilter("always")
a = parse_indentation(rawdata)
self.assertEqual(a,good_output)
self.assertEqual(len(w),1)
self.assertEqual(str(w[0].message),'Indentation with errors!')
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"warnings.simplefilter",
"warnings.catch_warnings",
"parse_python_indentation.parse_indentation"
] | [((1416, 1431), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1429, 1431), False, 'import unittest\n'), ((869, 895), 'parse_python_indentation.parse_indentation', 'parse_indentation', (['rawdata'], {}), '(rawdata)\n', (886, 895), False, 'from parse_python_indentation import parse_indentation\n'), ((1063, 1099), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (1086, 1099), False, 'import warnings\n'), ((1189, 1220), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""'], {}), "('always')\n", (1210, 1220), False, 'import warnings\n'), ((1228, 1254), 'parse_python_indentation.parse_indentation', 'parse_indentation', (['rawdata'], {}), '(rawdata)\n', (1245, 1254), False, 'from parse_python_indentation import parse_indentation\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2018 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=unused-wildcard-import,wildcard-import,unused-import,redefined-builtin
''' os_service_catalog_facts '''
from ansible.module_utils.basic import AnsibleModule
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_service_catalog_facts
short_description: Retrieve OpenStack service catalog facts
description:
- Retrieves all the available OpenStack services
notes:
- This module creates a new top-level C(openstack_service_catalog) fact
which contains a dictionary of OpenStack service endpoints like
network and load-balancers.
author:
- "<NAME> <<EMAIL>>"
'''
RETURN = '''
openstack_service_catalog:
description: OpenStack available services.
type: dict
returned: always
sample:
alarming:
- adminURL: http://172.16.0.9:8042
id: 2c40b50da0bb44178db91c8a9a29a46e
internalURL: http://172.16.0.9:8042
publicURL: https://mycloud.org:13042
region: regionOne
cloudformation:
- adminURL: http://172.16.0.9:8000/v1
id: 46648eded04e463281a9cba7ddcc45cb
internalURL: http://172.16.0.9:8000/v1
publicURL: https://mycloud.org:13005/v1
region: regionOne
compute:
- adminURL: http://172.16.0.9:8774/v2.1
id: bff1bc5dd92842c281b2358a6d15c5bc
internalURL: http://172.16.0.9:8774/v2.1
publicURL: https://mycloud.org:13774/v2.1
region: regionOne
event:
- adminURL: http://172.16.0.9:8779
id: 608ac3666ef24f2e8f240785b8612efb
internalURL: http://172.16.0.9:8779
publicURL: https://mycloud.org:13779
region: regionOne
identity:
- adminURL: https://mycloud.org:35357
id: 4d07689ce46b4d51a01cc873bc772c80
internalURL: http://172.16.0.9:5000
publicURL: https://mycloud.org:13000
region: regionOne
image:
- adminURL: http://172.16.0.9:9292
id: 1850105115ea493eb65f3f704d421291
internalURL: http://172.16.0.9:9292
publicURL: https://mycloud.org:13292
region: regionOne
metering:
- adminURL: http://172.16.0.9:8777
id: 4cae4dcabe0a4914a6ec6dabd62490ba
internalURL: http://172.16.0.9:8777
publicURL: https://mycloud.org:13777
region: regionOne
metric:
- adminURL: http://172.16.0.9:8041
id: 29bcecf9a06f40f782f19dd7492af352
internalURL: http://172.16.0.9:8041
publicURL: https://mycloud.org:13041
region: regionOne
network:
- adminURL: http://172.16.0.9:9696
id: 5d5785c9b8174c21bfb19dc3b16c87fa
internalURL: http://172.16.0.9:9696
publicURL: https://mycloud.org:13696
region: regionOne
object-store:
- adminURL: http://172.17.0.9:8080
id: 031f1e342fdf4f25b6099d1f3b0847e3
internalURL: http://172.17.0.9:8080/v1/AUTH_6d2847d6a6414308a67644eefc7b98c7
publicURL: https://mycloud.org:13808/v1/AUTH_6d2847d6a6414308a67644eefc7b98c7
region: regionOne
orchestration:
- adminURL: http://172.16.0.9:8004/v1/6d2847d6a6414308a67644eefc7b98c7
id: 1e6cecbd15b3413d9411052c52b9d433
internalURL: http://172.16.0.9:8004/v1/6d2847d6a6414308a67644eefc7b98c7
publicURL: https://mycloud.org:13004/v1/6d2847d6a6414308a67644eefc7b98c7
region: regionOne
placement:
- adminURL: http://172.16.0.9:8778/placement
id: 1f2551e5450c4bd6a9f716f92e93a154
internalURL: http://172.16.0.9:8778/placement
publicURL: https://mycloud.org:13778/placement
region: regionOne
volume:
- adminURL: http://172.16.0.9:8776/v1/6d2847d6a6414308a67644eefc7b98c7
id: 38e369a0e17346fe8e37a20146e005ef
internalURL: http://172.16.0.9:8776/v1/6d2847d6a6414308a67644eefc7b98c7
publicURL: https://mycloud.org:13776/v1/6d2847d6a6414308a67644eefc7b98c7
region: regionOne
volumev2:
- adminURL: http://172.16.0.9:8776/v2/6d2847d6a6414308a67644eefc7b98c7
id: <KEY>
internalURL: http://172.16.0.9:8776/v2/6d2847d6a6414308a67644eefc7b98c7
publicURL: https://mycloud.org:13776/v2/6d2847d6a6414308a67644eefc7b98c7
region: regionOne
volumev3:
- adminURL: http://172.16.0.9:8776/v3/6d2847d6a6414308a67644eefc7b98c7
id: <KEY>
internalURL: http://172.16.0.9:8776/v3/6d2847d6a6414308a67644eefc7b98c7
publicURL: https://mycloud.org:13776/v3/6d2847d6a6414308a67644eefc7b98c7
region: regionOne
'''
def main():
''' Main module function '''
module = AnsibleModule(argument_spec={}, supports_check_mode=True)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
try:
cloud = shade.openstack_cloud()
# pylint: disable=broad-except
except Exception:
module.fail_json(msg='Failed to connect to the cloud')
try:
service_catalog = cloud.cloud_config.get_service_catalog()
# pylint: disable=broad-except
except Exception:
module.fail_json(msg='Failed to retrieve the service catalog')
try:
endpoints = service_catalog.get_endpoints()
# pylint: disable=broad-except
except Exception:
module.fail_json(msg='Failed to retrieve the service catalog '
'endpoints')
module.exit_json(
changed=False,
ansible_facts={'openstack_service_catalog': endpoints})
if __name__ == '__main__':
main()
| [
"ansible.module_utils.basic.AnsibleModule",
"shade.openstack_cloud"
] | [((5349, 5406), 'ansible.module_utils.basic.AnsibleModule', 'AnsibleModule', ([], {'argument_spec': '{}', 'supports_check_mode': '(True)'}), '(argument_spec={}, supports_check_mode=True)\n', (5362, 5406), False, 'from ansible.module_utils.basic import AnsibleModule\n'), ((5522, 5545), 'shade.openstack_cloud', 'shade.openstack_cloud', ([], {}), '()\n', (5543, 5545), False, 'import shade\n')] |
# Copyright 2021 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=line-too-long
r"""Synthetic dataset generated from the PlasmaSpectroscopy model.
This was generated using the following snippet:
```python
import tensorflow.compat.v2 as tf
tf.enable_v2_behavior()
import tensorflow_probability as tfp
from inference_gym.internal import array_to_source
from inference_gym import using_tensorflow as gym
import numpy as np
num_sensors = 40
num_wavelengths = 40
wavelengths = np.linspace(0.01, 0.2, num_wavelengths)
center_wavelength = wavelengths.mean()
model = gym.targets.PlasmaSpectroscopy(
tf.zeros((num_wavelengths, num_sensors)),
wavelengths=wavelengths,
center_wavelength=center_wavelength)
sample, dataset = model._sample_dataset(seed=(0, 8))
sources = []
for k, v in sample._asdict().items():
sources.append(
array_to_source.array_to_source(
k.upper(), v))
for k, v in dataset.items():
sources.append(
array_to_source.array_to_source(
k.upper(), v))
with open('/tmp/synthetic_plasma_spectroscopy.py', 'w') as f:
f.write("\n\n".join(sources))
```
Note that the final `_sample_dataset` is not reproducible reproducible across
software versions, hence the output is checked in.
"""
import numpy as np
AMPLITUDE = np.array([
1.4802036,
1.8915913,
-0.011120212,
1.1328301,
1.2841645,
0.6033605,
-1.887041,
-2.012894,
0.046582267,
1.5555662,
0.4305847,
-1.7179363,
-1.1399889,
-0.4432812,
-1.4721184,
0.35457477,
]).reshape((16,))
TEMPERATURE = np.array([
1.2321296,
-0.020694781,
-1.3441145,
-0.51342154,
-0.6282792,
-0.22180416,
-1.0089059,
1.4475185,
-1.8519154,
0.5540126,
-1.3644233,
1.5542297,
-0.4033564,
-0.029513652,
-0.14812116,
0.93214256,
]).reshape((16,))
VELOCITY = np.array([
0.010279292,
-1.6109133,
0.85784495,
0.8826037,
0.19365458,
-0.36963812,
1.2059057,
-0.93545884,
0.38819882,
1.6983186,
-1.8130875,
0.94406796,
-0.79738003,
-1.0478632,
-0.38848934,
-0.48529625,
]).reshape((16,))
SHIFT = np.array([
-0.5514385,
]).reshape(())
WAVELENGTHS = np.array([
0.01,
0.014871794871794873,
0.019743589743589744,
0.024615384615384615,
0.029487179487179487,
0.03435897435897436,
0.039230769230769236,
0.04410256410256411,
0.04897435897435898,
0.05384615384615385,
0.05871794871794872,
0.06358974358974359,
0.06846153846153846,
0.07333333333333333,
0.0782051282051282,
0.08307692307692308,
0.08794871794871795,
0.09282051282051282,
0.09769230769230769,
0.10256410256410256,
0.10743589743589743,
0.1123076923076923,
0.11717948717948717,
0.12205128205128205,
0.12692307692307694,
0.13179487179487182,
0.1366666666666667,
0.14153846153846156,
0.14641025641025643,
0.1512820512820513,
0.15615384615384617,
0.16102564102564104,
0.1658974358974359,
0.17076923076923078,
0.17564102564102566,
0.18051282051282053,
0.1853846153846154,
0.19025641025641027,
0.19512820512820514,
0.2,
]).reshape((40,))
CENTER_WAVELENGTH = np.array([
0.10500000000000001,
]).reshape(())
MEASUREMENTS = np.array([
-0.66101485,
0.31644753,
-0.5896422,
0.4764485,
2.1545932,
15.793148,
8.2264805,
6.457074,
5.7062893,
6.1811686,
8.777044,
6.9074125,
7.9522552,
7.701313,
8.559349,
8.296498,
6.1969037,
6.4804926,
6.8852997,
8.830744,
14.376627,
0.54612935,
0.124028,
0.44405863,
0.5131382,
0.5987899,
0.008983987,
-0.24756075,
0.7618118,
-0.21146192,
0.4546959,
0.09494688,
-0.26813537,
0.5798886,
-0.10784844,
0.18372172,
0.8161483,
-0.3787802,
0.61460984,
-0.41957632,
0.13647377,
-0.3481221,
0.03326019,
1.7144626,
3.8620698,
14.40822,
9.046495,
7.6838465,
7.2554746,
8.057631,
11.189637,
9.038466,
8.125581,
8.294034,
10.172681,
11.90528,
7.1925435,
6.708079,
7.6085744,
9.414239,
14.608672,
1.5265317,
1.09792,
0.29970562,
0.29824358,
0.36030084,
-0.37960574,
0.47860667,
0.91203105,
-0.6904322,
-0.2722036,
0.23733543,
-0.6658274,
0.62095886,
0.73466265,
-0.8475226,
-0.1700871,
0.9261157,
0.422822,
0.32836267,
0.58122945,
-0.83155084,
-0.20049855,
-0.040298104,
4.014356,
16.160791,
7.2828264,
7.3377733,
6.665611,
8.653453,
11.973017,
9.656379,
10.9801235,
9.05112,
10.565474,
11.942185,
7.2904882,
7.4630857,
6.514908,
9.644132,
14.969957,
0.07107994,
0.11467081,
0.92357284,
0.04355552,
0.6726098,
-0.15279476,
0.713554,
0.5466241,
-0.38109347,
0.5590394,
0.08306945,
0.9525252,
0.6713458,
0.51892877,
-0.1279359,
-0.15663871,
0.020156374,
-0.060285714,
-1.0264076,
-0.53699505,
-0.9786586,
0.015289649,
1.5724823,
4.0689135,
13.646254,
8.417458,
7.3368583,
6.966266,
8.73208,
14.498494,
10.2102165,
11.423929,
11.351579,
12.9430065,
15.01266,
9.051174,
7.077483,
6.785291,
9.483119,
15.76488,
1.1677985,
1.6693239,
-0.21604359,
0.32284033,
-0.22243214,
0.60323435,
-0.11199745,
0.29957047,
0.006062749,
0.7996792,
0.3094816,
-0.7718058,
0.503415,
0.07231447,
-0.2853677,
0.4330218,
0.844616,
-0.19574685,
-0.3879851,
0.5901966,
0.051313907,
-0.29432508,
1.2537544,
3.1426716,
14.615546,
8.347049,
7.4366584,
6.4491363,
9.865336,
15.843064,
12.469691,
11.894229,
12.133173,
14.63979,
16.16245,
9.504371,
8.017702,
7.867693,
9.518961,
14.380217,
0.66953653,
0.60293055,
0.00082825124,
-0.28320992,
0.8367502,
0.12513764,
0.22053392,
-0.10229007,
-0.20082277,
0.63717407,
0.32739908,
-0.093239225,
-0.80318755,
0.9917766,
0.24838758,
-0.07330545,
0.15537623,
0.09008534,
-0.06607497,
1.0962121,
0.55644095,
0.6913326,
0.9021442,
3.8921309,
14.102233,
7.184174,
7.315026,
7.334084,
10.787065,
19.485243,
13.958044,
14.3500805,
13.616628,
15.63192,
17.07027,
9.131023,
6.8167133,
6.970449,
8.922994,
14.361785,
1.7793398,
0.94775784,
0.105669454,
-0.18747061,
0.6676264,
-0.3883816,
-0.6202498,
-0.0833843,
-0.5216094,
1.1268811,
-0.59910476,
0.39042526,
0.47714886,
-0.7111677,
-0.5756576,
0.9333002,
0.1010186,
0.13677923,
-0.75147396,
1.2583244,
-0.23063457,
0.7901664,
0.24705392,
3.6259048,
12.530731,
6.9297647,
7.079164,
7.2256374,
11.940973,
20.025602,
14.700426,
13.519883,
14.241193,
17.55714,
17.386055,
10.167002,
7.536337,
7.0136056,
9.326938,
12.228463,
0.17775005,
0.8319777,
-0.8991761,
-0.01412341,
0.61705685,
-0.14188325,
-0.41435227,
-0.316557,
-0.5893145,
-0.010637931,
0.20675054,
0.44020182,
-0.7080041,
0.16052538,
-0.48142046,
0.9052833,
0.432698,
0.03338314,
0.35594848,
1.1689888,
0.36019892,
0.23971666,
1.4662509,
3.3352752,
11.360069,
8.300535,
7.5611286,
7.2111707,
17.327162,
20.148909,
17.380922,
17.596447,
14.160338,
19.188683,
17.219112,
10.499862,
8.309862,
6.1963353,
7.3864193,
12.878287,
1.4184926,
1.7496321,
-0.082713336,
0.23216072,
0.20258206,
1.0141679,
0.14271286,
-0.29340488,
-0.055605985,
-0.5336929,
-0.54352623,
0.19902669,
0.12139763,
-0.018293247,
-0.20558693,
-0.8606704,
0.22833318,
0.4463366,
0.20494421,
0.7066752,
-0.62247527,
0.117985666,
1.831157,
3.299585,
9.63925,
7.483565,
7.1289496,
6.4751153,
15.985568,
21.507505,
18.539736,
16.699535,
16.726501,
19.698357,
22.443224,
11.952675,
7.005475,
6.2864413,
8.778635,
10.89195,
0.66351974,
1.1440128,
-0.25076824,
0.66586065,
1.0526825,
0.015522989,
0.07891381,
1.104366,
0.7747889,
0.15351877,
-0.12182697,
-0.59052014,
-0.12581429,
0.5053382,
0.17305401,
0.67090386,
1.036633,
0.05909565,
0.28418896,
0.86726683,
0.1763895,
0.33444333,
1.7197226,
2.5705223,
9.934082,
6.614648,
5.9702163,
7.0940704,
18.322672,
24.886862,
18.648033,
19.174364,
17.071978,
18.935146,
20.495438,
13.39125,
7.1744776,
5.476832,
7.2689962,
10.46958,
1.1804211,
1.0994785,
0.64040864,
0.021063149,
0.75519574,
0.40024444,
-0.48553574,
0.87461084,
-0.23675112,
0.1914608,
-0.49892142,
0.2618199,
0.6261685,
-1.4913763,
0.41756257,
0.5763335,
-0.45616063,
0.38227928,
-0.6692691,
1.8232274,
0.7977414,
0.40125495,
2.787939,
3.2074018,
8.831141,
6.6602535,
7.500632,
8.793667,
18.995548,
23.698793,
18.186054,
17.543282,
18.392523,
20.788607,
24.634804,
14.188387,
8.168461,
5.5740485,
6.8008204,
8.531001,
1.4529983,
2.276989,
1.0289037,
0.9468033,
-0.038641334,
-0.39401633,
-1.1387177,
0.49660775,
0.5171432,
-0.6254447,
1.2226907,
-0.13812594,
0.11419458,
-0.36041245,
0.16572447,
-0.2501292,
-0.95744544,
0.6987992,
0.3099944,
1.108943,
0.41807377,
1.350997,
1.2673455,
3.2821457,
8.0927515,
5.9851384,
4.8361425,
8.642136,
20.54146,
23.320255,
20.936903,
19.881096,
18.084406,
20.986282,
22.538109,
15.849695,
7.59143,
5.759286,
7.9955835,
7.542832,
1.5869404,
2.191163,
-0.0054766536,
0.38372415,
1.4580531,
-0.6341528,
-0.20307654,
-0.82046396,
0.30573404,
0.59632486,
-0.12896755,
-0.42806864,
-0.47942856,
-0.7036555,
0.075889945,
0.29308736,
-1.4974035,
-0.036708307,
-0.43896213,
0.54672736,
1.3562044,
1.5058006,
2.0175235,
3.2622445,
7.817541,
6.1968045,
5.7298784,
8.535798,
22.878216,
23.569859,
21.438442,
20.779306,
18.338245,
23.335554,
23.656643,
16.534071,
7.0056953,
5.3699074,
6.2035737,
6.91238,
1.8461741,
2.0328891,
0.6284174,
0.07324934,
0.72266495,
0.43248987,
0.55657876,
-0.36850226,
0.2892055,
0.120979175,
-0.3255677,
0.18210961,
-0.13677588,
-0.79952997,
-0.16948017,
0.27382505,
0.011414817,
-0.002753294,
0.1875501,
1.7294772,
0.86453336,
0.8789885,
2.0237687,
2.686733,
7.0931683,
6.7965593,
5.703301,
9.106176,
19.852842,
22.134148,
24.209602,
20.48003,
19.87589,
22.650255,
24.67572,
17.161873,
7.185769,
5.12218,
5.9893394,
5.907269,
2.1844404,
1.9687537,
1.0286644,
0.052360654,
1.7644687,
0.5339646,
-0.53046066,
-0.2281848,
-1.2462859,
0.6778776,
0.5408989,
-0.14820653,
0.38658077,
-0.65733767,
0.014478714,
0.45866382,
0.47466084,
0.48330665,
0.52647215,
1.6572766,
-0.093874216,
1.0939939,
2.8252633,
3.250628,
7.286972,
5.736179,
5.5879693,
9.545634,
22.925808,
23.213871,
23.39594,
21.748808,
22.024412,
24.974943,
23.57301,
18.065563,
8.397812,
4.8709254,
7.626314,
4.6410003,
1.8595266,
3.0831103,
1.4402436,
1.2672244,
1.312456,
-0.18201214,
0.21097422,
-0.026861114,
0.18476872,
0.7252849,
-0.002409873,
-0.29303908,
1.3546691,
-0.04322617,
-0.053203642,
-0.30067968,
-0.12050266,
-0.5528519,
0.057745364,
1.3053449,
1.8519605,
1.8503615,
2.5469666,
4.2060847,
5.5301046,
7.0553675,
5.9386334,
11.875089,
23.438046,
20.363987,
23.725615,
20.967691,
21.432257,
24.202627,
19.774887,
18.783188,
7.98809,
6.2239876,
7.760503,
5.212336,
2.9735184,
2.7213335,
2.0156252,
1.814288,
2.2770615,
0.01533184,
0.58220863,
-0.49351138,
0.31417957,
-0.36469758,
0.45743746,
0.66627234,
0.3081961,
0.828259,
-0.31382263,
0.26520026,
0.22944771,
-0.6709603,
-0.07570245,
1.5327783,
1.7784487,
2.6468341,
3.198592,
3.7656205,
5.9252257,
6.9020658,
4.9581833,
12.047751,
22.348654,
20.17518,
24.174393,
21.535011,
19.05106,
22.163195,
21.497072,
18.43445,
8.682917,
5.3132563,
7.030179,
3.717919,
2.0626392,
2.4575338,
2.2717822,
0.8625143,
2.4770658,
-0.786061,
1.2881083,
-0.2518999,
0.72405684,
-0.122574806,
-0.34197915,
0.13918422,
0.26873538,
-0.47515658,
-0.54810023,
0.89566797,
-0.54384357,
-0.12311963,
0.567525,
2.7046611,
1.5512958,
1.7786896,
3.8791292,
3.9559023,
4.788476,
8.228316,
5.3946,
12.281274,
21.967098,
20.923243,
23.913458,
20.710938,
19.420635,
25.138704,
18.289383,
19.177135,
8.415327,
4.8929396,
8.965305,
4.3885813,
3.4578655,
3.0384607,
1.5863328,
1.91974,
2.4258208,
0.5892152,
0.048560977,
-0.13528748,
-0.21397328,
0.16264682,
-0.57951355,
-0.40301454,
0.21641892,
-0.22450455,
0.38177252,
-0.967473,
-0.35485935,
0.062246032,
-0.03395147,
2.1338463,
1.9084859,
3.1863737,
1.9375713,
3.4518764,
6.570703,
6.878443,
5.679476,
13.351213,
22.931889,
19.282558,
22.36135,
23.796984,
21.032475,
23.09803,
20.966232,
20.72223,
6.7338567,
6.4885483,
7.190284,
4.9310346,
3.1236634,
3.5150487,
2.9693668,
2.2454295,
1.82249,
-0.09966546,
0.72314006,
-0.79027426,
0.41793302,
-0.14793015,
0.45988762,
0.8456978,
-0.5273398,
0.1830612,
-1.0828326,
-1.0117317,
-0.3019783,
0.17001551,
-0.62556803,
2.961217,
2.6823378,
2.9682546,
5.2445164,
4.9527783,
6.309333,
7.7392774,
6.2129936,
15.35368,
20.683935,
20.589102,
22.10926,
20.185204,
20.562426,
22.645317,
18.869568,
20.659521,
8.880328,
6.4410696,
9.769155,
5.5935693,
5.527752,
4.5683465,
3.4019177,
3.3163903,
2.244741,
0.38402623,
0.2960868,
-0.4828044,
0.13759217,
0.25681636,
0.11657055,
-0.330115,
0.4011577,
-0.7654019,
0.14916949,
-0.6228205,
-0.96823233,
-0.022868,
-0.49047035,
3.20636,
2.6912642,
2.9050756,
4.912674,
5.7441964,
6.489336,
9.632326,
6.2825303,
16.68777,
21.077969,
17.172966,
18.92938,
23.38385,
20.251026,
22.16378,
18.001736,
20.24098,
11.019654,
6.6073513,
8.655663,
6.298364,
6.4654784,
3.6983974,
3.1087956,
2.226927,
2.6668777,
-0.35526595,
1.4488825,
0.20488043,
0.047601122,
-0.6924504,
0.57495445,
0.5399022,
-0.47663862,
0.8161736,
-0.36598107,
-0.59101355,
0.20327158,
0.41677478,
0.27029967,
3.7847342,
3.2484818,
3.747693,
4.7734656,
6.716756,
8.185982,
9.418276,
7.493696,
14.704602,
17.729408,
17.48148,
19.855602,
20.371563,
18.5821,
18.155266,
16.968113,
17.100256,
10.015516,
7.8247633,
8.993816,
6.4911056,
6.2132425,
4.3434267,
3.7000012,
3.7377622,
3.1024928,
-0.30869377,
0.051026687,
-0.34078225,
0.7479868,
0.03696166,
-0.75611556,
1.1542099,
-0.028129257,
0.08181842,
0.09559424,
0.8364861,
0.096545294,
0.5584201,
-0.5194905,
3.589691,
4.05453,
3.794124,
4.707637,
9.231918,
8.564278,
9.2333975,
7.006125,
16.20831,
19.324417,
15.819074,
19.356344,
17.93927,
18.384487,
18.001207,
16.142382,
21.02356,
9.986794,
6.614442,
10.657583,
6.6237283,
8.433239,
4.4907804,
4.2819304,
3.7269611,
3.5132716,
0.4662154,
0.30799574,
0.96793664,
-0.23279454,
-0.65458816,
0.3362532,
-0.25408295,
0.06732974,
0.4873681,
0.51199776,
0.14874719,
-0.29994798,
0.4666868,
0.33490536,
3.3489285,
2.9599032,
3.7671084,
5.274986,
11.143537,
9.2554245,
9.07235,
9.138557,
17.255503,
18.355011,
15.364281,
17.336935,
18.85955,
17.050003,
15.608138,
15.812602,
18.231024,
11.6336155,
6.9478188,
11.149977,
7.419574,
10.250601,
4.7022414,
3.971905,
4.7929826,
3.3438401,
-0.39000547,
-0.28059074,
0.6398243,
0.54544014,
0.6069346,
-0.17257981,
0.22857136,
0.5565434,
0.004583537,
-1.6335539,
-0.8888735,
-0.51765877,
0.25269827,
-0.01876194,
3.6656997,
3.8518455,
5.484056,
6.189166,
12.860901,
9.803692,
10.184517,
8.937886,
17.70772,
18.956602,
15.036017,
18.585073,
18.892986,
18.184309,
15.378883,
13.1691475,
16.713081,
11.373385,
10.050861,
11.757488,
10.44355,
12.29941,
4.694755,
5.29064,
3.8482742,
3.204164,
0.0923521,
0.023937136,
0.1471634,
0.6328977,
0.086753555,
0.4752982,
-0.6725007,
0.39593527,
0.22832835,
-0.27118513,
-0.8305444,
0.61332023,
-0.46385112,
-0.07130288,
3.392937,
5.612763,
5.2056,
5.706025,
15.220109,
11.131699,
11.811647,
9.684384,
18.768026,
16.84839,
13.052551,
16.32535,
17.554602,
17.395172,
14.127713,
12.6871,
17.62177,
11.645812,
8.629343,
11.129438,
11.581531,
14.195255,
4.8469067,
5.1938415,
4.0862703,
3.181031,
-1.0452468,
-0.25019166,
-0.7914238,
0.12144237,
-0.41462633,
0.54280686,
-0.69631076,
0.3511648,
0.004874259,
-0.06835556,
0.8735261,
0.24838078,
-0.31527227,
0.52716863,
3.9399889,
6.0550613,
6.129095,
6.861085,
18.186186,
11.700109,
9.944186,
8.473949,
16.194746,
15.487744,
11.69865,
15.148699,
17.62606,
18.724825,
14.773164,
12.397501,
17.29195,
12.904611,
10.236364,
9.858109,
12.551205,
17.244278,
5.081826,
5.861555,
4.532901,
2.9011462,
-0.6339103,
-0.14527631,
-0.34604034,
0.16419859,
-0.21205892,
1.0102317,
-0.6850754,
-0.35831228,
0.2243401,
-0.12707797,
0.12315286,
0.75053287,
-0.30611196,
0.946708,
3.2013948,
5.563331,
4.7585716,
7.213843,
20.686522,
11.607341,
12.30799,
10.50174,
15.599098,
14.504682,
13.629604,
13.69594,
17.019728,
16.432478,
13.931328,
13.392891,
16.40223,
12.716988,
10.136288,
11.304484,
14.544636,
18.359613,
5.5700507,
5.302722,
5.3971443,
4.0632043,
0.34419727,
-0.43536162,
0.2166448,
-0.95898896,
0.54851377,
0.7104762,
0.73580873,
-0.025371978,
-0.42447037,
-0.055623855,
-0.057257153,
-0.042765763,
-0.32910374,
0.110769786,
4.9113693,
6.042119,
5.789901,
8.213889,
21.399662,
13.620898,
12.268165,
12.022924,
15.812675,
14.541431,
11.235446,
13.432023,
16.380638,
17.424328,
13.075844,
13.108509,
16.125572,
12.70376,
9.833503,
12.167731,
15.966658,
19.35662,
4.726227,
5.754112,
5.277654,
3.513394,
0.27682012,
-0.6424214,
0.63972783,
0.052361738,
0.6900285,
0.8120001,
0.13217215,
-0.06418637,
-0.34938893,
-0.1332957,
-0.14414565,
0.13367409,
0.2113514,
0.013457297,
5.1611977,
5.566288,
5.6893077,
6.982988,
20.4595,
14.453565,
13.59946,
10.934562,
16.137613,
14.927114,
11.994792,
13.434463,
17.021969,
17.274439,
13.322607,
11.919087,
16.481926,
12.076119,
10.847066,
11.398886,
16.077639,
19.727343,
4.5308523,
6.236413,
4.8869467,
3.9474933,
0.5430834,
-0.16916445,
1.1437705,
0.16070405,
0.31188658,
0.8880989,
-0.14495048,
-0.5266939,
0.22656989,
0.3505556,
0.015732061,
-0.005636345,
-0.56870633,
0.40287915,
4.4800043,
4.970619,
4.5086727,
7.2337227,
21.180979,
13.984755,
12.418574,
10.579776,
14.925623,
11.359912,
10.660921,
12.467203,
17.208267,
17.148045,
11.586628,
11.8577,
13.493896,
13.254265,
10.851606,
13.149869,
17.053873,
19.849815,
4.9660897,
5.8460274,
3.998473,
3.6802619,
0.8031087,
-0.013905935,
0.3503995,
0.31186494,
-0.038673762,
-0.07608058,
0.21588215,
-0.23191574,
-0.3952367,
-0.09744672,
0.10716237,
-1.3977432,
-0.2775279,
0.28267142,
3.4341362,
5.5165367,
4.798283,
5.5223513,
23.267078,
15.076336,
13.030845,
10.9562845,
13.846566,
11.140822,
10.528686,
12.319912,
15.81127,
17.356304,
10.330765,
10.917309,
11.82135,
11.22828,
9.395469,
12.859789,
15.528548,
18.173409,
4.9549546,
7.068773,
5.830448,
2.882567,
-0.47524917,
-0.3299339,
0.19532575,
-0.5605442,
-0.05505767,
-0.22165492,
-0.4325593,
0.13398468,
-0.34254703,
0.0140561955,
-0.31874263,
-0.14240773,
-0.91078305,
0.69452536,
4.23155,
5.7011547,
6.0003905,
6.377488,
20.312622,
13.978043,
11.040157,
11.176402,
13.108543,
9.652381,
9.632209,
11.781593,
14.856762,
15.745179,
9.215103,
9.966311,
12.876652,
11.37008,
10.591258,
10.1424675,
14.367625,
19.73172,
3.84762,
7.103483,
3.7233605,
2.376824,
0.5252924,
0.38380843,
0.99321234,
-0.46900645,
0.12149067,
0.42257598,
0.0632253,
-0.6670193,
0.03464376,
0.452787,
0.29236665,
-0.017891373,
-0.075127214,
0.9828477,
2.3365817,
5.2860856,
4.3626456,
5.785785,
20.600492,
12.966171,
11.047343,
9.063554,
10.454045,
10.47048,
9.218836,
11.104739,
15.136548,
14.689532,
10.122101,
9.4212675,
11.134829,
8.617753,
9.327736,
11.278048,
13.085438,
18.43459,
3.9763334,
5.9072723,
3.9930198,
3.4963682,
0.2813723,
1.0457343,
0.31889322,
0.37867522,
1.2037315,
-0.47904515,
0.582204,
0.68306595,
-0.088313825,
-0.107233785,
-0.53984404,
0.39104667,
1.1425363,
0.51777375,
2.9267018,
5.183814,
4.495046,
4.6087675,
18.143732,
12.06679,
8.621597,
7.8071413,
9.6548195,
8.168409,
7.199488,
7.962524,
13.9421425,
12.19501,
8.027851,
8.022394,
8.449041,
8.428407,
7.2122917,
9.045476,
12.2283,
16.851568,
4.1475954,
5.7582254,
3.977257,
1.8516432,
-0.32922924,
-0.12237206,
-0.072756164,
-0.6167613,
0.5225413,
0.37072095,
-0.6287377,
-0.7166235,
-0.37311992,
0.81874573,
0.17337193,
0.17729722,
0.40824133,
-0.3479744,
2.9783738,
4.5450144,
3.9617758,
4.9179983,
15.7159395,
10.0808935,
7.922992,
6.9472337,
9.000638,
7.62391,
6.7539964,
8.514194,
12.004702,
12.731859,
7.173314,
7.301387,
7.240425,
7.4015136,
7.516923,
8.6178665,
9.913477,
14.592376,
4.5969114,
5.9667635,
2.2334886,
2.1020658,
-0.9194653,
0.43381432,
-0.74259335,
-0.8438142,
0.01724637,
-0.6245163,
0.34715256,
-0.24820891,
-0.6074153,
-0.066010244,
-0.05560958,
-0.32758415,
0.3784681,
-0.09629097,
2.7877793,
4.203103,
3.26329,
4.44158,
12.650619,
8.000976,
5.2695656,
5.8276386,
7.0067124,
6.36843,
5.256174,
7.340733,
9.230904,
13.014863,
5.453347,
6.2923303,
6.518343,
6.5802903,
5.615034,
7.000242,
8.82858,
11.683347,
3.8504424,
4.365258,
3.2354295,
2.2202947,
0.5615039,
0.41533247,
0.21722497,
0.3176445,
0.2709266,
-0.2929376,
0.090651914,
-0.32017383,
-0.30647907,
0.15408067,
-0.3604456,
0.6241022,
0.42943946,
0.30790985,
2.0098479,
3.1669462,
3.8518548,
4.0607076,
11.639872,
5.7104745,
7.125849,
5.09103,
5.6111135,
3.951972,
4.0356493,
7.02897,
11.430392,
11.738871,
4.115266,
5.621048,
5.3278913,
5.120655,
5.990115,
5.7664003,
5.7767644,
9.013329,
2.9515538,
5.6055756,
4.1827626,
1.7799046,
-0.21542077,
0.24031225,
-0.6824815,
-0.6190339,
0.6256524,
-0.48574805,
0.09997501,
0.3266095,
0.07135873,
-0.3254111,
-0.047491744,
-0.014772129,
-0.38849118,
0.286563,
2.9551277,
3.957588,
3.0914695,
3.1707056,
8.462824,
4.728864,
5.0381837,
4.0804534,
5.1110387,
4.62399,
4.415538,
6.1308045,
10.654469,
10.723281,
4.4972973,
3.627521,
3.8499038,
4.373936,
4.0010695,
4.3314424,
6.3237967,
7.2798166,
2.3315697,
4.04032,
3.2531312,
2.022844,
-0.5356632,
0.52645034,
0.11135009,
-0.26490784,
0.39241284,
0.13336958,
-0.15545088,
-0.048340384,
0.6705195,
-0.14051451,
-0.7617515,
0.11379189,
0.21909207,
0.63809645,
1.5451268,
4.243852,
3.2245193,
3.3400161,
6.511011,
4.033045,
2.8604522,
3.6116364,
3.5580635,
3.1904101,
2.9593391,
4.813459,
8.871713,
8.875507,
2.922824,
2.6118903,
3.5907378,
2.6278322,
3.5242443,
3.0563798,
4.969574,
5.5496926,
3.3797112,
3.520721,
2.3572729,
1.7771024,
-0.43368375,
-0.6439688,
-0.56648374,
0.25869504,
-0.13318418,
-0.25542453,
-1.2330167,
0.34627095,
1.5127228,
-0.6055812,
0.6232876,
0.23605451,
-0.5616809,
0.500821,
]).reshape((40, 40))
| [
"numpy.array"
] | [((1821, 2027), 'numpy.array', 'np.array', (['[1.4802036, 1.8915913, -0.011120212, 1.1328301, 1.2841645, 0.6033605, -\n 1.887041, -2.012894, 0.046582267, 1.5555662, 0.4305847, -1.7179363, -\n 1.1399889, -0.4432812, -1.4721184, 0.35457477]'], {}), '([1.4802036, 1.8915913, -0.011120212, 1.1328301, 1.2841645, \n 0.6033605, -1.887041, -2.012894, 0.046582267, 1.5555662, 0.4305847, -\n 1.7179363, -1.1399889, -0.4432812, -1.4721184, 0.35457477])\n', (1829, 2027), True, 'import numpy as np\n'), ((2116, 2331), 'numpy.array', 'np.array', (['[1.2321296, -0.020694781, -1.3441145, -0.51342154, -0.6282792, -0.22180416,\n -1.0089059, 1.4475185, -1.8519154, 0.5540126, -1.3644233, 1.5542297, -\n 0.4033564, -0.029513652, -0.14812116, 0.93214256]'], {}), '([1.2321296, -0.020694781, -1.3441145, -0.51342154, -0.6282792, -\n 0.22180416, -1.0089059, 1.4475185, -1.8519154, 0.5540126, -1.3644233, \n 1.5542297, -0.4033564, -0.029513652, -0.14812116, 0.93214256])\n', (2124, 2331), True, 'import numpy as np\n'), ((2417, 2632), 'numpy.array', 'np.array', (['[0.010279292, -1.6109133, 0.85784495, 0.8826037, 0.19365458, -0.36963812, \n 1.2059057, -0.93545884, 0.38819882, 1.6983186, -1.8130875, 0.94406796, \n -0.79738003, -1.0478632, -0.38848934, -0.48529625]'], {}), '([0.010279292, -1.6109133, 0.85784495, 0.8826037, 0.19365458, -\n 0.36963812, 1.2059057, -0.93545884, 0.38819882, 1.6983186, -1.8130875, \n 0.94406796, -0.79738003, -1.0478632, -0.38848934, -0.48529625])\n', (2425, 2632), True, 'import numpy as np\n'), ((2715, 2737), 'numpy.array', 'np.array', (['[-0.5514385]'], {}), '([-0.5514385])\n', (2723, 2737), True, 'import numpy as np\n'), ((2773, 3651), 'numpy.array', 'np.array', (['[0.01, 0.014871794871794873, 0.019743589743589744, 0.024615384615384615, \n 0.029487179487179487, 0.03435897435897436, 0.039230769230769236, \n 0.04410256410256411, 0.04897435897435898, 0.05384615384615385, \n 0.05871794871794872, 0.06358974358974359, 0.06846153846153846, \n 0.07333333333333333, 0.0782051282051282, 0.08307692307692308, \n 0.08794871794871795, 0.09282051282051282, 0.09769230769230769, \n 0.10256410256410256, 0.10743589743589743, 0.1123076923076923, \n 0.11717948717948717, 0.12205128205128205, 0.12692307692307694, \n 0.13179487179487182, 0.1366666666666667, 0.14153846153846156, \n 0.14641025641025643, 0.1512820512820513, 0.15615384615384617, \n 0.16102564102564104, 0.1658974358974359, 0.17076923076923078, \n 0.17564102564102566, 0.18051282051282053, 0.1853846153846154, \n 0.19025641025641027, 0.19512820512820514, 0.2]'], {}), '([0.01, 0.014871794871794873, 0.019743589743589744, \n 0.024615384615384615, 0.029487179487179487, 0.03435897435897436, \n 0.039230769230769236, 0.04410256410256411, 0.04897435897435898, \n 0.05384615384615385, 0.05871794871794872, 0.06358974358974359, \n 0.06846153846153846, 0.07333333333333333, 0.0782051282051282, \n 0.08307692307692308, 0.08794871794871795, 0.09282051282051282, \n 0.09769230769230769, 0.10256410256410256, 0.10743589743589743, \n 0.1123076923076923, 0.11717948717948717, 0.12205128205128205, \n 0.12692307692307694, 0.13179487179487182, 0.1366666666666667, \n 0.14153846153846156, 0.14641025641025643, 0.1512820512820513, \n 0.15615384615384617, 0.16102564102564104, 0.1658974358974359, \n 0.17076923076923078, 0.17564102564102566, 0.18051282051282053, \n 0.1853846153846154, 0.19025641025641027, 0.19512820512820514, 0.2])\n', (2781, 3651), True, 'import numpy as np\n'), ((3792, 3823), 'numpy.array', 'np.array', (['[0.10500000000000001]'], {}), '([0.10500000000000001])\n', (3800, 3823), True, 'import numpy as np\n'), ((3860, 23159), 'numpy.array', 'np.array', (['[-0.66101485, 0.31644753, -0.5896422, 0.4764485, 2.1545932, 15.793148, \n 8.2264805, 6.457074, 5.7062893, 6.1811686, 8.777044, 6.9074125, \n 7.9522552, 7.701313, 8.559349, 8.296498, 6.1969037, 6.4804926, \n 6.8852997, 8.830744, 14.376627, 0.54612935, 0.124028, 0.44405863, \n 0.5131382, 0.5987899, 0.008983987, -0.24756075, 0.7618118, -0.21146192,\n 0.4546959, 0.09494688, -0.26813537, 0.5798886, -0.10784844, 0.18372172,\n 0.8161483, -0.3787802, 0.61460984, -0.41957632, 0.13647377, -0.3481221,\n 0.03326019, 1.7144626, 3.8620698, 14.40822, 9.046495, 7.6838465, \n 7.2554746, 8.057631, 11.189637, 9.038466, 8.125581, 8.294034, 10.172681,\n 11.90528, 7.1925435, 6.708079, 7.6085744, 9.414239, 14.608672, \n 1.5265317, 1.09792, 0.29970562, 0.29824358, 0.36030084, -0.37960574, \n 0.47860667, 0.91203105, -0.6904322, -0.2722036, 0.23733543, -0.6658274,\n 0.62095886, 0.73466265, -0.8475226, -0.1700871, 0.9261157, 0.422822, \n 0.32836267, 0.58122945, -0.83155084, -0.20049855, -0.040298104, \n 4.014356, 16.160791, 7.2828264, 7.3377733, 6.665611, 8.653453, \n 11.973017, 9.656379, 10.9801235, 9.05112, 10.565474, 11.942185, \n 7.2904882, 7.4630857, 6.514908, 9.644132, 14.969957, 0.07107994, \n 0.11467081, 0.92357284, 0.04355552, 0.6726098, -0.15279476, 0.713554, \n 0.5466241, -0.38109347, 0.5590394, 0.08306945, 0.9525252, 0.6713458, \n 0.51892877, -0.1279359, -0.15663871, 0.020156374, -0.060285714, -\n 1.0264076, -0.53699505, -0.9786586, 0.015289649, 1.5724823, 4.0689135, \n 13.646254, 8.417458, 7.3368583, 6.966266, 8.73208, 14.498494, \n 10.2102165, 11.423929, 11.351579, 12.9430065, 15.01266, 9.051174, \n 7.077483, 6.785291, 9.483119, 15.76488, 1.1677985, 1.6693239, -\n 0.21604359, 0.32284033, -0.22243214, 0.60323435, -0.11199745, \n 0.29957047, 0.006062749, 0.7996792, 0.3094816, -0.7718058, 0.503415, \n 0.07231447, -0.2853677, 0.4330218, 0.844616, -0.19574685, -0.3879851, \n 0.5901966, 0.051313907, -0.29432508, 1.2537544, 3.1426716, 14.615546, \n 8.347049, 7.4366584, 6.4491363, 9.865336, 15.843064, 12.469691, \n 11.894229, 12.133173, 14.63979, 16.16245, 9.504371, 8.017702, 7.867693,\n 9.518961, 14.380217, 0.66953653, 0.60293055, 0.00082825124, -0.28320992,\n 0.8367502, 0.12513764, 0.22053392, -0.10229007, -0.20082277, 0.63717407,\n 0.32739908, -0.093239225, -0.80318755, 0.9917766, 0.24838758, -\n 0.07330545, 0.15537623, 0.09008534, -0.06607497, 1.0962121, 0.55644095,\n 0.6913326, 0.9021442, 3.8921309, 14.102233, 7.184174, 7.315026, \n 7.334084, 10.787065, 19.485243, 13.958044, 14.3500805, 13.616628, \n 15.63192, 17.07027, 9.131023, 6.8167133, 6.970449, 8.922994, 14.361785,\n 1.7793398, 0.94775784, 0.105669454, -0.18747061, 0.6676264, -0.3883816,\n -0.6202498, -0.0833843, -0.5216094, 1.1268811, -0.59910476, 0.39042526,\n 0.47714886, -0.7111677, -0.5756576, 0.9333002, 0.1010186, 0.13677923, -\n 0.75147396, 1.2583244, -0.23063457, 0.7901664, 0.24705392, 3.6259048, \n 12.530731, 6.9297647, 7.079164, 7.2256374, 11.940973, 20.025602, \n 14.700426, 13.519883, 14.241193, 17.55714, 17.386055, 10.167002, \n 7.536337, 7.0136056, 9.326938, 12.228463, 0.17775005, 0.8319777, -\n 0.8991761, -0.01412341, 0.61705685, -0.14188325, -0.41435227, -0.316557,\n -0.5893145, -0.010637931, 0.20675054, 0.44020182, -0.7080041, \n 0.16052538, -0.48142046, 0.9052833, 0.432698, 0.03338314, 0.35594848, \n 1.1689888, 0.36019892, 0.23971666, 1.4662509, 3.3352752, 11.360069, \n 8.300535, 7.5611286, 7.2111707, 17.327162, 20.148909, 17.380922, \n 17.596447, 14.160338, 19.188683, 17.219112, 10.499862, 8.309862, \n 6.1963353, 7.3864193, 12.878287, 1.4184926, 1.7496321, -0.082713336, \n 0.23216072, 0.20258206, 1.0141679, 0.14271286, -0.29340488, -\n 0.055605985, -0.5336929, -0.54352623, 0.19902669, 0.12139763, -\n 0.018293247, -0.20558693, -0.8606704, 0.22833318, 0.4463366, 0.20494421,\n 0.7066752, -0.62247527, 0.117985666, 1.831157, 3.299585, 9.63925, \n 7.483565, 7.1289496, 6.4751153, 15.985568, 21.507505, 18.539736, \n 16.699535, 16.726501, 19.698357, 22.443224, 11.952675, 7.005475, \n 6.2864413, 8.778635, 10.89195, 0.66351974, 1.1440128, -0.25076824, \n 0.66586065, 1.0526825, 0.015522989, 0.07891381, 1.104366, 0.7747889, \n 0.15351877, -0.12182697, -0.59052014, -0.12581429, 0.5053382, \n 0.17305401, 0.67090386, 1.036633, 0.05909565, 0.28418896, 0.86726683, \n 0.1763895, 0.33444333, 1.7197226, 2.5705223, 9.934082, 6.614648, \n 5.9702163, 7.0940704, 18.322672, 24.886862, 18.648033, 19.174364, \n 17.071978, 18.935146, 20.495438, 13.39125, 7.1744776, 5.476832, \n 7.2689962, 10.46958, 1.1804211, 1.0994785, 0.64040864, 0.021063149, \n 0.75519574, 0.40024444, -0.48553574, 0.87461084, -0.23675112, 0.1914608,\n -0.49892142, 0.2618199, 0.6261685, -1.4913763, 0.41756257, 0.5763335, -\n 0.45616063, 0.38227928, -0.6692691, 1.8232274, 0.7977414, 0.40125495, \n 2.787939, 3.2074018, 8.831141, 6.6602535, 7.500632, 8.793667, 18.995548,\n 23.698793, 18.186054, 17.543282, 18.392523, 20.788607, 24.634804, \n 14.188387, 8.168461, 5.5740485, 6.8008204, 8.531001, 1.4529983, \n 2.276989, 1.0289037, 0.9468033, -0.038641334, -0.39401633, -1.1387177, \n 0.49660775, 0.5171432, -0.6254447, 1.2226907, -0.13812594, 0.11419458, \n -0.36041245, 0.16572447, -0.2501292, -0.95744544, 0.6987992, 0.3099944,\n 1.108943, 0.41807377, 1.350997, 1.2673455, 3.2821457, 8.0927515, \n 5.9851384, 4.8361425, 8.642136, 20.54146, 23.320255, 20.936903, \n 19.881096, 18.084406, 20.986282, 22.538109, 15.849695, 7.59143, \n 5.759286, 7.9955835, 7.542832, 1.5869404, 2.191163, -0.0054766536, \n 0.38372415, 1.4580531, -0.6341528, -0.20307654, -0.82046396, 0.30573404,\n 0.59632486, -0.12896755, -0.42806864, -0.47942856, -0.7036555, \n 0.075889945, 0.29308736, -1.4974035, -0.036708307, -0.43896213, \n 0.54672736, 1.3562044, 1.5058006, 2.0175235, 3.2622445, 7.817541, \n 6.1968045, 5.7298784, 8.535798, 22.878216, 23.569859, 21.438442, \n 20.779306, 18.338245, 23.335554, 23.656643, 16.534071, 7.0056953, \n 5.3699074, 6.2035737, 6.91238, 1.8461741, 2.0328891, 0.6284174, \n 0.07324934, 0.72266495, 0.43248987, 0.55657876, -0.36850226, 0.2892055,\n 0.120979175, -0.3255677, 0.18210961, -0.13677588, -0.79952997, -\n 0.16948017, 0.27382505, 0.011414817, -0.002753294, 0.1875501, 1.7294772,\n 0.86453336, 0.8789885, 2.0237687, 2.686733, 7.0931683, 6.7965593, \n 5.703301, 9.106176, 19.852842, 22.134148, 24.209602, 20.48003, 19.87589,\n 22.650255, 24.67572, 17.161873, 7.185769, 5.12218, 5.9893394, 5.907269,\n 2.1844404, 1.9687537, 1.0286644, 0.052360654, 1.7644687, 0.5339646, -\n 0.53046066, -0.2281848, -1.2462859, 0.6778776, 0.5408989, -0.14820653, \n 0.38658077, -0.65733767, 0.014478714, 0.45866382, 0.47466084, \n 0.48330665, 0.52647215, 1.6572766, -0.093874216, 1.0939939, 2.8252633, \n 3.250628, 7.286972, 5.736179, 5.5879693, 9.545634, 22.925808, 23.213871,\n 23.39594, 21.748808, 22.024412, 24.974943, 23.57301, 18.065563, \n 8.397812, 4.8709254, 7.626314, 4.6410003, 1.8595266, 3.0831103, \n 1.4402436, 1.2672244, 1.312456, -0.18201214, 0.21097422, -0.026861114, \n 0.18476872, 0.7252849, -0.002409873, -0.29303908, 1.3546691, -\n 0.04322617, -0.053203642, -0.30067968, -0.12050266, -0.5528519, \n 0.057745364, 1.3053449, 1.8519605, 1.8503615, 2.5469666, 4.2060847, \n 5.5301046, 7.0553675, 5.9386334, 11.875089, 23.438046, 20.363987, \n 23.725615, 20.967691, 21.432257, 24.202627, 19.774887, 18.783188, \n 7.98809, 6.2239876, 7.760503, 5.212336, 2.9735184, 2.7213335, 2.0156252,\n 1.814288, 2.2770615, 0.01533184, 0.58220863, -0.49351138, 0.31417957, -\n 0.36469758, 0.45743746, 0.66627234, 0.3081961, 0.828259, -0.31382263, \n 0.26520026, 0.22944771, -0.6709603, -0.07570245, 1.5327783, 1.7784487, \n 2.6468341, 3.198592, 3.7656205, 5.9252257, 6.9020658, 4.9581833, \n 12.047751, 22.348654, 20.17518, 24.174393, 21.535011, 19.05106, \n 22.163195, 21.497072, 18.43445, 8.682917, 5.3132563, 7.030179, 3.717919,\n 2.0626392, 2.4575338, 2.2717822, 0.8625143, 2.4770658, -0.786061, \n 1.2881083, -0.2518999, 0.72405684, -0.122574806, -0.34197915, \n 0.13918422, 0.26873538, -0.47515658, -0.54810023, 0.89566797, -\n 0.54384357, -0.12311963, 0.567525, 2.7046611, 1.5512958, 1.7786896, \n 3.8791292, 3.9559023, 4.788476, 8.228316, 5.3946, 12.281274, 21.967098,\n 20.923243, 23.913458, 20.710938, 19.420635, 25.138704, 18.289383, \n 19.177135, 8.415327, 4.8929396, 8.965305, 4.3885813, 3.4578655, \n 3.0384607, 1.5863328, 1.91974, 2.4258208, 0.5892152, 0.048560977, -\n 0.13528748, -0.21397328, 0.16264682, -0.57951355, -0.40301454, \n 0.21641892, -0.22450455, 0.38177252, -0.967473, -0.35485935, \n 0.062246032, -0.03395147, 2.1338463, 1.9084859, 3.1863737, 1.9375713, \n 3.4518764, 6.570703, 6.878443, 5.679476, 13.351213, 22.931889, \n 19.282558, 22.36135, 23.796984, 21.032475, 23.09803, 20.966232, \n 20.72223, 6.7338567, 6.4885483, 7.190284, 4.9310346, 3.1236634, \n 3.5150487, 2.9693668, 2.2454295, 1.82249, -0.09966546, 0.72314006, -\n 0.79027426, 0.41793302, -0.14793015, 0.45988762, 0.8456978, -0.5273398,\n 0.1830612, -1.0828326, -1.0117317, -0.3019783, 0.17001551, -0.62556803,\n 2.961217, 2.6823378, 2.9682546, 5.2445164, 4.9527783, 6.309333, \n 7.7392774, 6.2129936, 15.35368, 20.683935, 20.589102, 22.10926, \n 20.185204, 20.562426, 22.645317, 18.869568, 20.659521, 8.880328, \n 6.4410696, 9.769155, 5.5935693, 5.527752, 4.5683465, 3.4019177, \n 3.3163903, 2.244741, 0.38402623, 0.2960868, -0.4828044, 0.13759217, \n 0.25681636, 0.11657055, -0.330115, 0.4011577, -0.7654019, 0.14916949, -\n 0.6228205, -0.96823233, -0.022868, -0.49047035, 3.20636, 2.6912642, \n 2.9050756, 4.912674, 5.7441964, 6.489336, 9.632326, 6.2825303, 16.68777,\n 21.077969, 17.172966, 18.92938, 23.38385, 20.251026, 22.16378, \n 18.001736, 20.24098, 11.019654, 6.6073513, 8.655663, 6.298364, \n 6.4654784, 3.6983974, 3.1087956, 2.226927, 2.6668777, -0.35526595, \n 1.4488825, 0.20488043, 0.047601122, -0.6924504, 0.57495445, 0.5399022, \n -0.47663862, 0.8161736, -0.36598107, -0.59101355, 0.20327158, \n 0.41677478, 0.27029967, 3.7847342, 3.2484818, 3.747693, 4.7734656, \n 6.716756, 8.185982, 9.418276, 7.493696, 14.704602, 17.729408, 17.48148,\n 19.855602, 20.371563, 18.5821, 18.155266, 16.968113, 17.100256, \n 10.015516, 7.8247633, 8.993816, 6.4911056, 6.2132425, 4.3434267, \n 3.7000012, 3.7377622, 3.1024928, -0.30869377, 0.051026687, -0.34078225,\n 0.7479868, 0.03696166, -0.75611556, 1.1542099, -0.028129257, 0.08181842,\n 0.09559424, 0.8364861, 0.096545294, 0.5584201, -0.5194905, 3.589691, \n 4.05453, 3.794124, 4.707637, 9.231918, 8.564278, 9.2333975, 7.006125, \n 16.20831, 19.324417, 15.819074, 19.356344, 17.93927, 18.384487, \n 18.001207, 16.142382, 21.02356, 9.986794, 6.614442, 10.657583, \n 6.6237283, 8.433239, 4.4907804, 4.2819304, 3.7269611, 3.5132716, \n 0.4662154, 0.30799574, 0.96793664, -0.23279454, -0.65458816, 0.3362532,\n -0.25408295, 0.06732974, 0.4873681, 0.51199776, 0.14874719, -0.29994798,\n 0.4666868, 0.33490536, 3.3489285, 2.9599032, 3.7671084, 5.274986, \n 11.143537, 9.2554245, 9.07235, 9.138557, 17.255503, 18.355011, \n 15.364281, 17.336935, 18.85955, 17.050003, 15.608138, 15.812602, \n 18.231024, 11.6336155, 6.9478188, 11.149977, 7.419574, 10.250601, \n 4.7022414, 3.971905, 4.7929826, 3.3438401, -0.39000547, -0.28059074, \n 0.6398243, 0.54544014, 0.6069346, -0.17257981, 0.22857136, 0.5565434, \n 0.004583537, -1.6335539, -0.8888735, -0.51765877, 0.25269827, -\n 0.01876194, 3.6656997, 3.8518455, 5.484056, 6.189166, 12.860901, \n 9.803692, 10.184517, 8.937886, 17.70772, 18.956602, 15.036017, \n 18.585073, 18.892986, 18.184309, 15.378883, 13.1691475, 16.713081, \n 11.373385, 10.050861, 11.757488, 10.44355, 12.29941, 4.694755, 5.29064,\n 3.8482742, 3.204164, 0.0923521, 0.023937136, 0.1471634, 0.6328977, \n 0.086753555, 0.4752982, -0.6725007, 0.39593527, 0.22832835, -0.27118513,\n -0.8305444, 0.61332023, -0.46385112, -0.07130288, 3.392937, 5.612763, \n 5.2056, 5.706025, 15.220109, 11.131699, 11.811647, 9.684384, 18.768026,\n 16.84839, 13.052551, 16.32535, 17.554602, 17.395172, 14.127713, 12.6871,\n 17.62177, 11.645812, 8.629343, 11.129438, 11.581531, 14.195255, \n 4.8469067, 5.1938415, 4.0862703, 3.181031, -1.0452468, -0.25019166, -\n 0.7914238, 0.12144237, -0.41462633, 0.54280686, -0.69631076, 0.3511648,\n 0.004874259, -0.06835556, 0.8735261, 0.24838078, -0.31527227, \n 0.52716863, 3.9399889, 6.0550613, 6.129095, 6.861085, 18.186186, \n 11.700109, 9.944186, 8.473949, 16.194746, 15.487744, 11.69865, \n 15.148699, 17.62606, 18.724825, 14.773164, 12.397501, 17.29195, \n 12.904611, 10.236364, 9.858109, 12.551205, 17.244278, 5.081826, \n 5.861555, 4.532901, 2.9011462, -0.6339103, -0.14527631, -0.34604034, \n 0.16419859, -0.21205892, 1.0102317, -0.6850754, -0.35831228, 0.2243401,\n -0.12707797, 0.12315286, 0.75053287, -0.30611196, 0.946708, 3.2013948, \n 5.563331, 4.7585716, 7.213843, 20.686522, 11.607341, 12.30799, 10.50174,\n 15.599098, 14.504682, 13.629604, 13.69594, 17.019728, 16.432478, \n 13.931328, 13.392891, 16.40223, 12.716988, 10.136288, 11.304484, \n 14.544636, 18.359613, 5.5700507, 5.302722, 5.3971443, 4.0632043, \n 0.34419727, -0.43536162, 0.2166448, -0.95898896, 0.54851377, 0.7104762,\n 0.73580873, -0.025371978, -0.42447037, -0.055623855, -0.057257153, -\n 0.042765763, -0.32910374, 0.110769786, 4.9113693, 6.042119, 5.789901, \n 8.213889, 21.399662, 13.620898, 12.268165, 12.022924, 15.812675, \n 14.541431, 11.235446, 13.432023, 16.380638, 17.424328, 13.075844, \n 13.108509, 16.125572, 12.70376, 9.833503, 12.167731, 15.966658, \n 19.35662, 4.726227, 5.754112, 5.277654, 3.513394, 0.27682012, -\n 0.6424214, 0.63972783, 0.052361738, 0.6900285, 0.8120001, 0.13217215, -\n 0.06418637, -0.34938893, -0.1332957, -0.14414565, 0.13367409, 0.2113514,\n 0.013457297, 5.1611977, 5.566288, 5.6893077, 6.982988, 20.4595, \n 14.453565, 13.59946, 10.934562, 16.137613, 14.927114, 11.994792, \n 13.434463, 17.021969, 17.274439, 13.322607, 11.919087, 16.481926, \n 12.076119, 10.847066, 11.398886, 16.077639, 19.727343, 4.5308523, \n 6.236413, 4.8869467, 3.9474933, 0.5430834, -0.16916445, 1.1437705, \n 0.16070405, 0.31188658, 0.8880989, -0.14495048, -0.5266939, 0.22656989,\n 0.3505556, 0.015732061, -0.005636345, -0.56870633, 0.40287915, \n 4.4800043, 4.970619, 4.5086727, 7.2337227, 21.180979, 13.984755, \n 12.418574, 10.579776, 14.925623, 11.359912, 10.660921, 12.467203, \n 17.208267, 17.148045, 11.586628, 11.8577, 13.493896, 13.254265, \n 10.851606, 13.149869, 17.053873, 19.849815, 4.9660897, 5.8460274, \n 3.998473, 3.6802619, 0.8031087, -0.013905935, 0.3503995, 0.31186494, -\n 0.038673762, -0.07608058, 0.21588215, -0.23191574, -0.3952367, -\n 0.09744672, 0.10716237, -1.3977432, -0.2775279, 0.28267142, 3.4341362, \n 5.5165367, 4.798283, 5.5223513, 23.267078, 15.076336, 13.030845, \n 10.9562845, 13.846566, 11.140822, 10.528686, 12.319912, 15.81127, \n 17.356304, 10.330765, 10.917309, 11.82135, 11.22828, 9.395469, \n 12.859789, 15.528548, 18.173409, 4.9549546, 7.068773, 5.830448, \n 2.882567, -0.47524917, -0.3299339, 0.19532575, -0.5605442, -0.05505767,\n -0.22165492, -0.4325593, 0.13398468, -0.34254703, 0.0140561955, -\n 0.31874263, -0.14240773, -0.91078305, 0.69452536, 4.23155, 5.7011547, \n 6.0003905, 6.377488, 20.312622, 13.978043, 11.040157, 11.176402, \n 13.108543, 9.652381, 9.632209, 11.781593, 14.856762, 15.745179, \n 9.215103, 9.966311, 12.876652, 11.37008, 10.591258, 10.1424675, \n 14.367625, 19.73172, 3.84762, 7.103483, 3.7233605, 2.376824, 0.5252924,\n 0.38380843, 0.99321234, -0.46900645, 0.12149067, 0.42257598, 0.0632253,\n -0.6670193, 0.03464376, 0.452787, 0.29236665, -0.017891373, -\n 0.075127214, 0.9828477, 2.3365817, 5.2860856, 4.3626456, 5.785785, \n 20.600492, 12.966171, 11.047343, 9.063554, 10.454045, 10.47048, \n 9.218836, 11.104739, 15.136548, 14.689532, 10.122101, 9.4212675, \n 11.134829, 8.617753, 9.327736, 11.278048, 13.085438, 18.43459, \n 3.9763334, 5.9072723, 3.9930198, 3.4963682, 0.2813723, 1.0457343, \n 0.31889322, 0.37867522, 1.2037315, -0.47904515, 0.582204, 0.68306595, -\n 0.088313825, -0.107233785, -0.53984404, 0.39104667, 1.1425363, \n 0.51777375, 2.9267018, 5.183814, 4.495046, 4.6087675, 18.143732, \n 12.06679, 8.621597, 7.8071413, 9.6548195, 8.168409, 7.199488, 7.962524,\n 13.9421425, 12.19501, 8.027851, 8.022394, 8.449041, 8.428407, 7.2122917,\n 9.045476, 12.2283, 16.851568, 4.1475954, 5.7582254, 3.977257, 1.8516432,\n -0.32922924, -0.12237206, -0.072756164, -0.6167613, 0.5225413, \n 0.37072095, -0.6287377, -0.7166235, -0.37311992, 0.81874573, 0.17337193,\n 0.17729722, 0.40824133, -0.3479744, 2.9783738, 4.5450144, 3.9617758, \n 4.9179983, 15.7159395, 10.0808935, 7.922992, 6.9472337, 9.000638, \n 7.62391, 6.7539964, 8.514194, 12.004702, 12.731859, 7.173314, 7.301387,\n 7.240425, 7.4015136, 7.516923, 8.6178665, 9.913477, 14.592376, \n 4.5969114, 5.9667635, 2.2334886, 2.1020658, -0.9194653, 0.43381432, -\n 0.74259335, -0.8438142, 0.01724637, -0.6245163, 0.34715256, -0.24820891,\n -0.6074153, -0.066010244, -0.05560958, -0.32758415, 0.3784681, -\n 0.09629097, 2.7877793, 4.203103, 3.26329, 4.44158, 12.650619, 8.000976,\n 5.2695656, 5.8276386, 7.0067124, 6.36843, 5.256174, 7.340733, 9.230904,\n 13.014863, 5.453347, 6.2923303, 6.518343, 6.5802903, 5.615034, 7.000242,\n 8.82858, 11.683347, 3.8504424, 4.365258, 3.2354295, 2.2202947, \n 0.5615039, 0.41533247, 0.21722497, 0.3176445, 0.2709266, -0.2929376, \n 0.090651914, -0.32017383, -0.30647907, 0.15408067, -0.3604456, \n 0.6241022, 0.42943946, 0.30790985, 2.0098479, 3.1669462, 3.8518548, \n 4.0607076, 11.639872, 5.7104745, 7.125849, 5.09103, 5.6111135, 3.951972,\n 4.0356493, 7.02897, 11.430392, 11.738871, 4.115266, 5.621048, 5.3278913,\n 5.120655, 5.990115, 5.7664003, 5.7767644, 9.013329, 2.9515538, \n 5.6055756, 4.1827626, 1.7799046, -0.21542077, 0.24031225, -0.6824815, -\n 0.6190339, 0.6256524, -0.48574805, 0.09997501, 0.3266095, 0.07135873, -\n 0.3254111, -0.047491744, -0.014772129, -0.38849118, 0.286563, 2.9551277,\n 3.957588, 3.0914695, 3.1707056, 8.462824, 4.728864, 5.0381837, \n 4.0804534, 5.1110387, 4.62399, 4.415538, 6.1308045, 10.654469, \n 10.723281, 4.4972973, 3.627521, 3.8499038, 4.373936, 4.0010695, \n 4.3314424, 6.3237967, 7.2798166, 2.3315697, 4.04032, 3.2531312, \n 2.022844, -0.5356632, 0.52645034, 0.11135009, -0.26490784, 0.39241284, \n 0.13336958, -0.15545088, -0.048340384, 0.6705195, -0.14051451, -\n 0.7617515, 0.11379189, 0.21909207, 0.63809645, 1.5451268, 4.243852, \n 3.2245193, 3.3400161, 6.511011, 4.033045, 2.8604522, 3.6116364, \n 3.5580635, 3.1904101, 2.9593391, 4.813459, 8.871713, 8.875507, 2.922824,\n 2.6118903, 3.5907378, 2.6278322, 3.5242443, 3.0563798, 4.969574, \n 5.5496926, 3.3797112, 3.520721, 2.3572729, 1.7771024, -0.43368375, -\n 0.6439688, -0.56648374, 0.25869504, -0.13318418, -0.25542453, -\n 1.2330167, 0.34627095, 1.5127228, -0.6055812, 0.6232876, 0.23605451, -\n 0.5616809, 0.500821]'], {}), '([-0.66101485, 0.31644753, -0.5896422, 0.4764485, 2.1545932, \n 15.793148, 8.2264805, 6.457074, 5.7062893, 6.1811686, 8.777044, \n 6.9074125, 7.9522552, 7.701313, 8.559349, 8.296498, 6.1969037, \n 6.4804926, 6.8852997, 8.830744, 14.376627, 0.54612935, 0.124028, \n 0.44405863, 0.5131382, 0.5987899, 0.008983987, -0.24756075, 0.7618118, \n -0.21146192, 0.4546959, 0.09494688, -0.26813537, 0.5798886, -0.10784844,\n 0.18372172, 0.8161483, -0.3787802, 0.61460984, -0.41957632, 0.13647377,\n -0.3481221, 0.03326019, 1.7144626, 3.8620698, 14.40822, 9.046495, \n 7.6838465, 7.2554746, 8.057631, 11.189637, 9.038466, 8.125581, 8.294034,\n 10.172681, 11.90528, 7.1925435, 6.708079, 7.6085744, 9.414239, \n 14.608672, 1.5265317, 1.09792, 0.29970562, 0.29824358, 0.36030084, -\n 0.37960574, 0.47860667, 0.91203105, -0.6904322, -0.2722036, 0.23733543,\n -0.6658274, 0.62095886, 0.73466265, -0.8475226, -0.1700871, 0.9261157, \n 0.422822, 0.32836267, 0.58122945, -0.83155084, -0.20049855, -\n 0.040298104, 4.014356, 16.160791, 7.2828264, 7.3377733, 6.665611, \n 8.653453, 11.973017, 9.656379, 10.9801235, 9.05112, 10.565474, \n 11.942185, 7.2904882, 7.4630857, 6.514908, 9.644132, 14.969957, \n 0.07107994, 0.11467081, 0.92357284, 0.04355552, 0.6726098, -0.15279476,\n 0.713554, 0.5466241, -0.38109347, 0.5590394, 0.08306945, 0.9525252, \n 0.6713458, 0.51892877, -0.1279359, -0.15663871, 0.020156374, -\n 0.060285714, -1.0264076, -0.53699505, -0.9786586, 0.015289649, \n 1.5724823, 4.0689135, 13.646254, 8.417458, 7.3368583, 6.966266, 8.73208,\n 14.498494, 10.2102165, 11.423929, 11.351579, 12.9430065, 15.01266, \n 9.051174, 7.077483, 6.785291, 9.483119, 15.76488, 1.1677985, 1.6693239,\n -0.21604359, 0.32284033, -0.22243214, 0.60323435, -0.11199745, \n 0.29957047, 0.006062749, 0.7996792, 0.3094816, -0.7718058, 0.503415, \n 0.07231447, -0.2853677, 0.4330218, 0.844616, -0.19574685, -0.3879851, \n 0.5901966, 0.051313907, -0.29432508, 1.2537544, 3.1426716, 14.615546, \n 8.347049, 7.4366584, 6.4491363, 9.865336, 15.843064, 12.469691, \n 11.894229, 12.133173, 14.63979, 16.16245, 9.504371, 8.017702, 7.867693,\n 9.518961, 14.380217, 0.66953653, 0.60293055, 0.00082825124, -0.28320992,\n 0.8367502, 0.12513764, 0.22053392, -0.10229007, -0.20082277, 0.63717407,\n 0.32739908, -0.093239225, -0.80318755, 0.9917766, 0.24838758, -\n 0.07330545, 0.15537623, 0.09008534, -0.06607497, 1.0962121, 0.55644095,\n 0.6913326, 0.9021442, 3.8921309, 14.102233, 7.184174, 7.315026, \n 7.334084, 10.787065, 19.485243, 13.958044, 14.3500805, 13.616628, \n 15.63192, 17.07027, 9.131023, 6.8167133, 6.970449, 8.922994, 14.361785,\n 1.7793398, 0.94775784, 0.105669454, -0.18747061, 0.6676264, -0.3883816,\n -0.6202498, -0.0833843, -0.5216094, 1.1268811, -0.59910476, 0.39042526,\n 0.47714886, -0.7111677, -0.5756576, 0.9333002, 0.1010186, 0.13677923, -\n 0.75147396, 1.2583244, -0.23063457, 0.7901664, 0.24705392, 3.6259048, \n 12.530731, 6.9297647, 7.079164, 7.2256374, 11.940973, 20.025602, \n 14.700426, 13.519883, 14.241193, 17.55714, 17.386055, 10.167002, \n 7.536337, 7.0136056, 9.326938, 12.228463, 0.17775005, 0.8319777, -\n 0.8991761, -0.01412341, 0.61705685, -0.14188325, -0.41435227, -0.316557,\n -0.5893145, -0.010637931, 0.20675054, 0.44020182, -0.7080041, \n 0.16052538, -0.48142046, 0.9052833, 0.432698, 0.03338314, 0.35594848, \n 1.1689888, 0.36019892, 0.23971666, 1.4662509, 3.3352752, 11.360069, \n 8.300535, 7.5611286, 7.2111707, 17.327162, 20.148909, 17.380922, \n 17.596447, 14.160338, 19.188683, 17.219112, 10.499862, 8.309862, \n 6.1963353, 7.3864193, 12.878287, 1.4184926, 1.7496321, -0.082713336, \n 0.23216072, 0.20258206, 1.0141679, 0.14271286, -0.29340488, -\n 0.055605985, -0.5336929, -0.54352623, 0.19902669, 0.12139763, -\n 0.018293247, -0.20558693, -0.8606704, 0.22833318, 0.4463366, 0.20494421,\n 0.7066752, -0.62247527, 0.117985666, 1.831157, 3.299585, 9.63925, \n 7.483565, 7.1289496, 6.4751153, 15.985568, 21.507505, 18.539736, \n 16.699535, 16.726501, 19.698357, 22.443224, 11.952675, 7.005475, \n 6.2864413, 8.778635, 10.89195, 0.66351974, 1.1440128, -0.25076824, \n 0.66586065, 1.0526825, 0.015522989, 0.07891381, 1.104366, 0.7747889, \n 0.15351877, -0.12182697, -0.59052014, -0.12581429, 0.5053382, \n 0.17305401, 0.67090386, 1.036633, 0.05909565, 0.28418896, 0.86726683, \n 0.1763895, 0.33444333, 1.7197226, 2.5705223, 9.934082, 6.614648, \n 5.9702163, 7.0940704, 18.322672, 24.886862, 18.648033, 19.174364, \n 17.071978, 18.935146, 20.495438, 13.39125, 7.1744776, 5.476832, \n 7.2689962, 10.46958, 1.1804211, 1.0994785, 0.64040864, 0.021063149, \n 0.75519574, 0.40024444, -0.48553574, 0.87461084, -0.23675112, 0.1914608,\n -0.49892142, 0.2618199, 0.6261685, -1.4913763, 0.41756257, 0.5763335, -\n 0.45616063, 0.38227928, -0.6692691, 1.8232274, 0.7977414, 0.40125495, \n 2.787939, 3.2074018, 8.831141, 6.6602535, 7.500632, 8.793667, 18.995548,\n 23.698793, 18.186054, 17.543282, 18.392523, 20.788607, 24.634804, \n 14.188387, 8.168461, 5.5740485, 6.8008204, 8.531001, 1.4529983, \n 2.276989, 1.0289037, 0.9468033, -0.038641334, -0.39401633, -1.1387177, \n 0.49660775, 0.5171432, -0.6254447, 1.2226907, -0.13812594, 0.11419458, \n -0.36041245, 0.16572447, -0.2501292, -0.95744544, 0.6987992, 0.3099944,\n 1.108943, 0.41807377, 1.350997, 1.2673455, 3.2821457, 8.0927515, \n 5.9851384, 4.8361425, 8.642136, 20.54146, 23.320255, 20.936903, \n 19.881096, 18.084406, 20.986282, 22.538109, 15.849695, 7.59143, \n 5.759286, 7.9955835, 7.542832, 1.5869404, 2.191163, -0.0054766536, \n 0.38372415, 1.4580531, -0.6341528, -0.20307654, -0.82046396, 0.30573404,\n 0.59632486, -0.12896755, -0.42806864, -0.47942856, -0.7036555, \n 0.075889945, 0.29308736, -1.4974035, -0.036708307, -0.43896213, \n 0.54672736, 1.3562044, 1.5058006, 2.0175235, 3.2622445, 7.817541, \n 6.1968045, 5.7298784, 8.535798, 22.878216, 23.569859, 21.438442, \n 20.779306, 18.338245, 23.335554, 23.656643, 16.534071, 7.0056953, \n 5.3699074, 6.2035737, 6.91238, 1.8461741, 2.0328891, 0.6284174, \n 0.07324934, 0.72266495, 0.43248987, 0.55657876, -0.36850226, 0.2892055,\n 0.120979175, -0.3255677, 0.18210961, -0.13677588, -0.79952997, -\n 0.16948017, 0.27382505, 0.011414817, -0.002753294, 0.1875501, 1.7294772,\n 0.86453336, 0.8789885, 2.0237687, 2.686733, 7.0931683, 6.7965593, \n 5.703301, 9.106176, 19.852842, 22.134148, 24.209602, 20.48003, 19.87589,\n 22.650255, 24.67572, 17.161873, 7.185769, 5.12218, 5.9893394, 5.907269,\n 2.1844404, 1.9687537, 1.0286644, 0.052360654, 1.7644687, 0.5339646, -\n 0.53046066, -0.2281848, -1.2462859, 0.6778776, 0.5408989, -0.14820653, \n 0.38658077, -0.65733767, 0.014478714, 0.45866382, 0.47466084, \n 0.48330665, 0.52647215, 1.6572766, -0.093874216, 1.0939939, 2.8252633, \n 3.250628, 7.286972, 5.736179, 5.5879693, 9.545634, 22.925808, 23.213871,\n 23.39594, 21.748808, 22.024412, 24.974943, 23.57301, 18.065563, \n 8.397812, 4.8709254, 7.626314, 4.6410003, 1.8595266, 3.0831103, \n 1.4402436, 1.2672244, 1.312456, -0.18201214, 0.21097422, -0.026861114, \n 0.18476872, 0.7252849, -0.002409873, -0.29303908, 1.3546691, -\n 0.04322617, -0.053203642, -0.30067968, -0.12050266, -0.5528519, \n 0.057745364, 1.3053449, 1.8519605, 1.8503615, 2.5469666, 4.2060847, \n 5.5301046, 7.0553675, 5.9386334, 11.875089, 23.438046, 20.363987, \n 23.725615, 20.967691, 21.432257, 24.202627, 19.774887, 18.783188, \n 7.98809, 6.2239876, 7.760503, 5.212336, 2.9735184, 2.7213335, 2.0156252,\n 1.814288, 2.2770615, 0.01533184, 0.58220863, -0.49351138, 0.31417957, -\n 0.36469758, 0.45743746, 0.66627234, 0.3081961, 0.828259, -0.31382263, \n 0.26520026, 0.22944771, -0.6709603, -0.07570245, 1.5327783, 1.7784487, \n 2.6468341, 3.198592, 3.7656205, 5.9252257, 6.9020658, 4.9581833, \n 12.047751, 22.348654, 20.17518, 24.174393, 21.535011, 19.05106, \n 22.163195, 21.497072, 18.43445, 8.682917, 5.3132563, 7.030179, 3.717919,\n 2.0626392, 2.4575338, 2.2717822, 0.8625143, 2.4770658, -0.786061, \n 1.2881083, -0.2518999, 0.72405684, -0.122574806, -0.34197915, \n 0.13918422, 0.26873538, -0.47515658, -0.54810023, 0.89566797, -\n 0.54384357, -0.12311963, 0.567525, 2.7046611, 1.5512958, 1.7786896, \n 3.8791292, 3.9559023, 4.788476, 8.228316, 5.3946, 12.281274, 21.967098,\n 20.923243, 23.913458, 20.710938, 19.420635, 25.138704, 18.289383, \n 19.177135, 8.415327, 4.8929396, 8.965305, 4.3885813, 3.4578655, \n 3.0384607, 1.5863328, 1.91974, 2.4258208, 0.5892152, 0.048560977, -\n 0.13528748, -0.21397328, 0.16264682, -0.57951355, -0.40301454, \n 0.21641892, -0.22450455, 0.38177252, -0.967473, -0.35485935, \n 0.062246032, -0.03395147, 2.1338463, 1.9084859, 3.1863737, 1.9375713, \n 3.4518764, 6.570703, 6.878443, 5.679476, 13.351213, 22.931889, \n 19.282558, 22.36135, 23.796984, 21.032475, 23.09803, 20.966232, \n 20.72223, 6.7338567, 6.4885483, 7.190284, 4.9310346, 3.1236634, \n 3.5150487, 2.9693668, 2.2454295, 1.82249, -0.09966546, 0.72314006, -\n 0.79027426, 0.41793302, -0.14793015, 0.45988762, 0.8456978, -0.5273398,\n 0.1830612, -1.0828326, -1.0117317, -0.3019783, 0.17001551, -0.62556803,\n 2.961217, 2.6823378, 2.9682546, 5.2445164, 4.9527783, 6.309333, \n 7.7392774, 6.2129936, 15.35368, 20.683935, 20.589102, 22.10926, \n 20.185204, 20.562426, 22.645317, 18.869568, 20.659521, 8.880328, \n 6.4410696, 9.769155, 5.5935693, 5.527752, 4.5683465, 3.4019177, \n 3.3163903, 2.244741, 0.38402623, 0.2960868, -0.4828044, 0.13759217, \n 0.25681636, 0.11657055, -0.330115, 0.4011577, -0.7654019, 0.14916949, -\n 0.6228205, -0.96823233, -0.022868, -0.49047035, 3.20636, 2.6912642, \n 2.9050756, 4.912674, 5.7441964, 6.489336, 9.632326, 6.2825303, 16.68777,\n 21.077969, 17.172966, 18.92938, 23.38385, 20.251026, 22.16378, \n 18.001736, 20.24098, 11.019654, 6.6073513, 8.655663, 6.298364, \n 6.4654784, 3.6983974, 3.1087956, 2.226927, 2.6668777, -0.35526595, \n 1.4488825, 0.20488043, 0.047601122, -0.6924504, 0.57495445, 0.5399022, \n -0.47663862, 0.8161736, -0.36598107, -0.59101355, 0.20327158, \n 0.41677478, 0.27029967, 3.7847342, 3.2484818, 3.747693, 4.7734656, \n 6.716756, 8.185982, 9.418276, 7.493696, 14.704602, 17.729408, 17.48148,\n 19.855602, 20.371563, 18.5821, 18.155266, 16.968113, 17.100256, \n 10.015516, 7.8247633, 8.993816, 6.4911056, 6.2132425, 4.3434267, \n 3.7000012, 3.7377622, 3.1024928, -0.30869377, 0.051026687, -0.34078225,\n 0.7479868, 0.03696166, -0.75611556, 1.1542099, -0.028129257, 0.08181842,\n 0.09559424, 0.8364861, 0.096545294, 0.5584201, -0.5194905, 3.589691, \n 4.05453, 3.794124, 4.707637, 9.231918, 8.564278, 9.2333975, 7.006125, \n 16.20831, 19.324417, 15.819074, 19.356344, 17.93927, 18.384487, \n 18.001207, 16.142382, 21.02356, 9.986794, 6.614442, 10.657583, \n 6.6237283, 8.433239, 4.4907804, 4.2819304, 3.7269611, 3.5132716, \n 0.4662154, 0.30799574, 0.96793664, -0.23279454, -0.65458816, 0.3362532,\n -0.25408295, 0.06732974, 0.4873681, 0.51199776, 0.14874719, -0.29994798,\n 0.4666868, 0.33490536, 3.3489285, 2.9599032, 3.7671084, 5.274986, \n 11.143537, 9.2554245, 9.07235, 9.138557, 17.255503, 18.355011, \n 15.364281, 17.336935, 18.85955, 17.050003, 15.608138, 15.812602, \n 18.231024, 11.6336155, 6.9478188, 11.149977, 7.419574, 10.250601, \n 4.7022414, 3.971905, 4.7929826, 3.3438401, -0.39000547, -0.28059074, \n 0.6398243, 0.54544014, 0.6069346, -0.17257981, 0.22857136, 0.5565434, \n 0.004583537, -1.6335539, -0.8888735, -0.51765877, 0.25269827, -\n 0.01876194, 3.6656997, 3.8518455, 5.484056, 6.189166, 12.860901, \n 9.803692, 10.184517, 8.937886, 17.70772, 18.956602, 15.036017, \n 18.585073, 18.892986, 18.184309, 15.378883, 13.1691475, 16.713081, \n 11.373385, 10.050861, 11.757488, 10.44355, 12.29941, 4.694755, 5.29064,\n 3.8482742, 3.204164, 0.0923521, 0.023937136, 0.1471634, 0.6328977, \n 0.086753555, 0.4752982, -0.6725007, 0.39593527, 0.22832835, -0.27118513,\n -0.8305444, 0.61332023, -0.46385112, -0.07130288, 3.392937, 5.612763, \n 5.2056, 5.706025, 15.220109, 11.131699, 11.811647, 9.684384, 18.768026,\n 16.84839, 13.052551, 16.32535, 17.554602, 17.395172, 14.127713, 12.6871,\n 17.62177, 11.645812, 8.629343, 11.129438, 11.581531, 14.195255, \n 4.8469067, 5.1938415, 4.0862703, 3.181031, -1.0452468, -0.25019166, -\n 0.7914238, 0.12144237, -0.41462633, 0.54280686, -0.69631076, 0.3511648,\n 0.004874259, -0.06835556, 0.8735261, 0.24838078, -0.31527227, \n 0.52716863, 3.9399889, 6.0550613, 6.129095, 6.861085, 18.186186, \n 11.700109, 9.944186, 8.473949, 16.194746, 15.487744, 11.69865, \n 15.148699, 17.62606, 18.724825, 14.773164, 12.397501, 17.29195, \n 12.904611, 10.236364, 9.858109, 12.551205, 17.244278, 5.081826, \n 5.861555, 4.532901, 2.9011462, -0.6339103, -0.14527631, -0.34604034, \n 0.16419859, -0.21205892, 1.0102317, -0.6850754, -0.35831228, 0.2243401,\n -0.12707797, 0.12315286, 0.75053287, -0.30611196, 0.946708, 3.2013948, \n 5.563331, 4.7585716, 7.213843, 20.686522, 11.607341, 12.30799, 10.50174,\n 15.599098, 14.504682, 13.629604, 13.69594, 17.019728, 16.432478, \n 13.931328, 13.392891, 16.40223, 12.716988, 10.136288, 11.304484, \n 14.544636, 18.359613, 5.5700507, 5.302722, 5.3971443, 4.0632043, \n 0.34419727, -0.43536162, 0.2166448, -0.95898896, 0.54851377, 0.7104762,\n 0.73580873, -0.025371978, -0.42447037, -0.055623855, -0.057257153, -\n 0.042765763, -0.32910374, 0.110769786, 4.9113693, 6.042119, 5.789901, \n 8.213889, 21.399662, 13.620898, 12.268165, 12.022924, 15.812675, \n 14.541431, 11.235446, 13.432023, 16.380638, 17.424328, 13.075844, \n 13.108509, 16.125572, 12.70376, 9.833503, 12.167731, 15.966658, \n 19.35662, 4.726227, 5.754112, 5.277654, 3.513394, 0.27682012, -\n 0.6424214, 0.63972783, 0.052361738, 0.6900285, 0.8120001, 0.13217215, -\n 0.06418637, -0.34938893, -0.1332957, -0.14414565, 0.13367409, 0.2113514,\n 0.013457297, 5.1611977, 5.566288, 5.6893077, 6.982988, 20.4595, \n 14.453565, 13.59946, 10.934562, 16.137613, 14.927114, 11.994792, \n 13.434463, 17.021969, 17.274439, 13.322607, 11.919087, 16.481926, \n 12.076119, 10.847066, 11.398886, 16.077639, 19.727343, 4.5308523, \n 6.236413, 4.8869467, 3.9474933, 0.5430834, -0.16916445, 1.1437705, \n 0.16070405, 0.31188658, 0.8880989, -0.14495048, -0.5266939, 0.22656989,\n 0.3505556, 0.015732061, -0.005636345, -0.56870633, 0.40287915, \n 4.4800043, 4.970619, 4.5086727, 7.2337227, 21.180979, 13.984755, \n 12.418574, 10.579776, 14.925623, 11.359912, 10.660921, 12.467203, \n 17.208267, 17.148045, 11.586628, 11.8577, 13.493896, 13.254265, \n 10.851606, 13.149869, 17.053873, 19.849815, 4.9660897, 5.8460274, \n 3.998473, 3.6802619, 0.8031087, -0.013905935, 0.3503995, 0.31186494, -\n 0.038673762, -0.07608058, 0.21588215, -0.23191574, -0.3952367, -\n 0.09744672, 0.10716237, -1.3977432, -0.2775279, 0.28267142, 3.4341362, \n 5.5165367, 4.798283, 5.5223513, 23.267078, 15.076336, 13.030845, \n 10.9562845, 13.846566, 11.140822, 10.528686, 12.319912, 15.81127, \n 17.356304, 10.330765, 10.917309, 11.82135, 11.22828, 9.395469, \n 12.859789, 15.528548, 18.173409, 4.9549546, 7.068773, 5.830448, \n 2.882567, -0.47524917, -0.3299339, 0.19532575, -0.5605442, -0.05505767,\n -0.22165492, -0.4325593, 0.13398468, -0.34254703, 0.0140561955, -\n 0.31874263, -0.14240773, -0.91078305, 0.69452536, 4.23155, 5.7011547, \n 6.0003905, 6.377488, 20.312622, 13.978043, 11.040157, 11.176402, \n 13.108543, 9.652381, 9.632209, 11.781593, 14.856762, 15.745179, \n 9.215103, 9.966311, 12.876652, 11.37008, 10.591258, 10.1424675, \n 14.367625, 19.73172, 3.84762, 7.103483, 3.7233605, 2.376824, 0.5252924,\n 0.38380843, 0.99321234, -0.46900645, 0.12149067, 0.42257598, 0.0632253,\n -0.6670193, 0.03464376, 0.452787, 0.29236665, -0.017891373, -\n 0.075127214, 0.9828477, 2.3365817, 5.2860856, 4.3626456, 5.785785, \n 20.600492, 12.966171, 11.047343, 9.063554, 10.454045, 10.47048, \n 9.218836, 11.104739, 15.136548, 14.689532, 10.122101, 9.4212675, \n 11.134829, 8.617753, 9.327736, 11.278048, 13.085438, 18.43459, \n 3.9763334, 5.9072723, 3.9930198, 3.4963682, 0.2813723, 1.0457343, \n 0.31889322, 0.37867522, 1.2037315, -0.47904515, 0.582204, 0.68306595, -\n 0.088313825, -0.107233785, -0.53984404, 0.39104667, 1.1425363, \n 0.51777375, 2.9267018, 5.183814, 4.495046, 4.6087675, 18.143732, \n 12.06679, 8.621597, 7.8071413, 9.6548195, 8.168409, 7.199488, 7.962524,\n 13.9421425, 12.19501, 8.027851, 8.022394, 8.449041, 8.428407, 7.2122917,\n 9.045476, 12.2283, 16.851568, 4.1475954, 5.7582254, 3.977257, 1.8516432,\n -0.32922924, -0.12237206, -0.072756164, -0.6167613, 0.5225413, \n 0.37072095, -0.6287377, -0.7166235, -0.37311992, 0.81874573, 0.17337193,\n 0.17729722, 0.40824133, -0.3479744, 2.9783738, 4.5450144, 3.9617758, \n 4.9179983, 15.7159395, 10.0808935, 7.922992, 6.9472337, 9.000638, \n 7.62391, 6.7539964, 8.514194, 12.004702, 12.731859, 7.173314, 7.301387,\n 7.240425, 7.4015136, 7.516923, 8.6178665, 9.913477, 14.592376, \n 4.5969114, 5.9667635, 2.2334886, 2.1020658, -0.9194653, 0.43381432, -\n 0.74259335, -0.8438142, 0.01724637, -0.6245163, 0.34715256, -0.24820891,\n -0.6074153, -0.066010244, -0.05560958, -0.32758415, 0.3784681, -\n 0.09629097, 2.7877793, 4.203103, 3.26329, 4.44158, 12.650619, 8.000976,\n 5.2695656, 5.8276386, 7.0067124, 6.36843, 5.256174, 7.340733, 9.230904,\n 13.014863, 5.453347, 6.2923303, 6.518343, 6.5802903, 5.615034, 7.000242,\n 8.82858, 11.683347, 3.8504424, 4.365258, 3.2354295, 2.2202947, \n 0.5615039, 0.41533247, 0.21722497, 0.3176445, 0.2709266, -0.2929376, \n 0.090651914, -0.32017383, -0.30647907, 0.15408067, -0.3604456, \n 0.6241022, 0.42943946, 0.30790985, 2.0098479, 3.1669462, 3.8518548, \n 4.0607076, 11.639872, 5.7104745, 7.125849, 5.09103, 5.6111135, 3.951972,\n 4.0356493, 7.02897, 11.430392, 11.738871, 4.115266, 5.621048, 5.3278913,\n 5.120655, 5.990115, 5.7664003, 5.7767644, 9.013329, 2.9515538, \n 5.6055756, 4.1827626, 1.7799046, -0.21542077, 0.24031225, -0.6824815, -\n 0.6190339, 0.6256524, -0.48574805, 0.09997501, 0.3266095, 0.07135873, -\n 0.3254111, -0.047491744, -0.014772129, -0.38849118, 0.286563, 2.9551277,\n 3.957588, 3.0914695, 3.1707056, 8.462824, 4.728864, 5.0381837, \n 4.0804534, 5.1110387, 4.62399, 4.415538, 6.1308045, 10.654469, \n 10.723281, 4.4972973, 3.627521, 3.8499038, 4.373936, 4.0010695, \n 4.3314424, 6.3237967, 7.2798166, 2.3315697, 4.04032, 3.2531312, \n 2.022844, -0.5356632, 0.52645034, 0.11135009, -0.26490784, 0.39241284, \n 0.13336958, -0.15545088, -0.048340384, 0.6705195, -0.14051451, -\n 0.7617515, 0.11379189, 0.21909207, 0.63809645, 1.5451268, 4.243852, \n 3.2245193, 3.3400161, 6.511011, 4.033045, 2.8604522, 3.6116364, \n 3.5580635, 3.1904101, 2.9593391, 4.813459, 8.871713, 8.875507, 2.922824,\n 2.6118903, 3.5907378, 2.6278322, 3.5242443, 3.0563798, 4.969574, \n 5.5496926, 3.3797112, 3.520721, 2.3572729, 1.7771024, -0.43368375, -\n 0.6439688, -0.56648374, 0.25869504, -0.13318418, -0.25542453, -\n 1.2330167, 0.34627095, 1.5127228, -0.6055812, 0.6232876, 0.23605451, -\n 0.5616809, 0.500821])\n', (3868, 23159), True, 'import numpy as np\n')] |
from django.views.generic import TemplateView
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
class IndexTemplateView(TemplateView):
template_name = 'index.html'
@method_decorator(login_required)
def get(self, request, *args, **kwargs):
return render(request, self.template_name)
class UserView(APIView):
permission_classes = (IsAuthenticated, )
def get(self, request):
content = {'message': 'Hello, World!'}
return Response(content)
| [
"django.shortcuts.render",
"rest_framework.response.Response",
"django.utils.decorators.method_decorator"
] | [((415, 447), 'django.utils.decorators.method_decorator', 'method_decorator', (['login_required'], {}), '(login_required)\n', (431, 447), False, 'from django.utils.decorators import method_decorator\n'), ((508, 543), 'django.shortcuts.render', 'render', (['request', 'self.template_name'], {}), '(request, self.template_name)\n', (514, 543), False, 'from django.shortcuts import render\n'), ((707, 724), 'rest_framework.response.Response', 'Response', (['content'], {}), '(content)\n', (715, 724), False, 'from rest_framework.response import Response\n')] |
import sys
sys.path.append("../src/")
from DEVS import CoupledDEVS, AtomicDEVS, RootDEVS, directConnect
from infinity import INFINITY
from collections import defaultdict
from util import allZeroDict, addDict
from statesavers import PickleHighestState as state_saver
from message import NetworkMessage
from messageScheduler import MessageScheduler
class SimulatedCModel(CoupledDEVS):
def __init__(self):
CoupledDEVS.__init__(self, "root")
self.model1 = self.addSubModel(SimulatedModel(1), 0)
self.model2 = self.addSubModel(SimulatedModel(2), 1)
self.model3 = self.addSubModel(SimulatedModel(3), 1)
self.connectPorts(self.model1.outport, self.model2.inport)
self.connectPorts(self.model2.outport, self.model3.inport)
self.connectPorts(self.model3.outport, self.model1.inport)
class ModelState(object):
def __init__(self, value):
self.value = value
self.stateHistory = []
class SimulatedModel(AtomicDEVS):
def __init__(self, name):
AtomicDEVS.__init__(self, str(name))
self.inport = self.addInPort("inport")
self.outport = self.addOutPort("outport")
if name == 1:
self.state = ModelState(2)
else:
self.state = ModelState(None)
def intTransition(self):
#print("INTERNAL TRANSITION @ %s, %s" % (self.getModelFullName(), self.timeLast))
self.state.value = None
self.state.stateHistory.append("INT " + str(self.timeLast))
print("HISTORY of %s: %s" % (self.getModelFullName(), self.state.stateHistory))
return self.state
def extTransition(self, inputs):
#print("EXTERNAL TRANSITION @ %s, %s" % (self.getModelFullName(), self.timeLast))
self.state.value = inputs[self.inport][0]
self.state.stateHistory.append("EXT " + str(self.timeLast))
print("HISTORY of %s: %s" % (self.getModelFullName(), self.state.stateHistory))
return self.state
def timeAdvance(self):
if self.state.value is not None:
return 0.1
else:
return 1.0
#return INFINITY
def outputFnc(self):
return {self.outport: [self.state.value]}
class Cluster(CoupledDEVS):
def __init__(self, nodes):
CoupledDEVS.__init__(self, "Cluster")
self.nodes = [self.addSubModel(Node(i, nodes)) for i in range(nodes)]
self.network = [[self.addSubModel(Network("%i-->%i" % (j, i))) for i in range(nodes)] for j in range(nodes)]
for startid in range(nodes):
for endid in range(nodes):
self.connectPorts(self.nodes[startid].outports[endid], self.network[startid][endid].inport)
self.connectPorts(self.network[startid][endid].outport, self.nodes[endid].inports[startid])
class NodeState(object):
def __init__(self, name, totalsize):
self.simulationtime = (0, 0)
self.prevtime = (0, 0)
self.terminationtime = (3, 0)
model = SimulatedCModel()
self.model_ids = []
locations = defaultdict(list)
model.finalize(name="", model_counter=0, model_ids=self.model_ids, locations=locations, selectHierarchy=[])
if isinstance(model, CoupledDEVS):
model.componentSet = directConnect(model.componentSet, True)
self.destinations = [None] * len(model.componentSet)
self.kernels = len(locations.keys())
local = []
for m in model.componentSet:
self.destinations[m.model_id] = m if m.location == name else m.location
if m.location == name:
m.timeNext = (m.timeAdvance(), 1)
m.timeLast = (0, 0)
m.oldStates = [state_saver(m.timeLast, m.timeNext, m.state, 0.0, {}, 0.0)]
local.append(m)
self.model = RootDEVS(local, model.componentSet, ("schedulerML", "SchedulerML"))
self.model.setScheduler(self.model.schedulerType)
self.model.setTimeNext()
self.externalQueue = {}
self.color = False
self.sendmsgcounter = 0
self.outputQueue = []
self.messageScheduler = MessageScheduler()
self.V = [{}, {}, {}, {}]
self.Tmin = float('inf')
self.blockOutgoing = None
self.run_GVT = 1.0
self.gvt_check = None
self.GVT = -float('inf')
self.relocation_rules = None
self.kernels_to_relocate = None
from manualRelocator import ManualRelocator
self.relocator = ManualRelocator()
self.relocator.addDirective(1.0, 1, 0)
self.locked = False
self.accumulator = {}
def copy(self):
#TODO keep this up to date
import cPickle
a = cPickle.loads(cPickle.dumps(self))
a.model = self.model
a.model_ids = list(self.model_ids)
a.destinations = list(self.destinations)
a.externalQueue = dict(self.externalQueue)
a.outputQueue = list(self.outputQueue)
return a
def __getstate__(self):
retdict = {}
for i in dir(self):
if getattr(self, i).__class__.__name__ in ["instancemethod", "method-wrapper", "builtin_function_or_method"]:
continue
elif str(i).startswith("__"):
continue
retdict[str(i)] = getattr(self, i)
return retdict
def __setstate__(self, inp):
for i in inp:
setattr(self, i, inp[i])
class Node(AtomicDEVS):
def __init__(self, name, totalsize):
AtomicDEVS.__init__(self, "Node_%i" % name)
self.nodename = name
self.totalsize = totalsize
self.inports = [self.addInPort("inport_%i" % i) for i in range(totalsize)]
self.outports = [self.addOutPort("outport_%i" % i) for i in range(totalsize)]
self.state = NodeState(name, totalsize)
def genUUID(self):
self.state.sendmsgcounter += 1
return "%s-%s" % (self.nodename, self.state.sendmsgcounter)
def send(self, model_id, timestamp, content):
if self.state.blockOutgoing == timestamp:
return
msg = NetworkMessage(timestamp, content, self.genUUID(), self.state.color, model_id)
self.state.outputQueue.append(msg)
self.notifySend(self.state.destinations[model_id], timestamp[0], msg.color)
self.state.externalQueue.setdefault(self.outports[self.state.destinations[model_id]], []).append(msg)
def processMessage(self, clock):
try:
message = self.state.messageScheduler.readFirst()
except IndexError:
# No input messages
return clock
if message.timestamp < clock:
# The message is sent before the timenext, so update the clock
clock = message.timestamp
try:
while (abs(clock[0] - message.timestamp[0]) < 1e-6 and (clock[1] == message.timestamp[1])):
print("Process message with UUID " + str(message.uuid))
for port in message.content:
port.hostDEVS.myInput.setdefault(port, []).extend(message.content[port])
self.state.transitioning[port.hostDEVS] |= 2
self.state.messageScheduler.removeFirst()
message = self.state.messageScheduler.readFirst()
except IndexError:
# At the end of the scheduler, so we are done
pass
return clock
def receiveControl(self, msg, first=False):
self.state.controlmsg = msg
m_clock = msg[0]
m_send = msg[1]
waiting_vector = msg[2]
accumulating_vector = msg[3]
color = self.state.color
finished = (self.nodename == 0 and not first and (color == 0 or color == 2))
if self.nodename == 0 and not first:
if not allZeroDict(waiting_vector):
raise DEVSException("GVT bug detected")
waiting_vector = accumulating_vector
accumulating_vector = {}
if finished:
from math import floor
GVT = floor(min(m_clock, m_send))
self.state.accumulator = waiting_vector
self.state.externalQueue.setdefault(self.outports[self.nodename], []).append(("setGVT_local", [GVT, [], self.state.relocator.useLastStateOnly()]))
return None
else:
return self.tryIfOk(color, waiting_vector, accumulating_vector)
"""
if self.state.color == 0 or self.state.color == 2:
# We are currently white, about to turn red
if self.nodename == 0 and not first:
# The controller received the message that went around completely
# The count != check is needed to distinguish between init and finish
# So we are finished now, don't update the color here!!
if not allZeroDict(count):
raise DEVSException("GVT bug detected")
# Perform some rounding to prevent slight deviations due to floating point errors
from math import floor
GVT = floor(min(m_clock, m_send))
print("Found GVT " + str(GVT))
# Do this with a proxy to make it async
self.state.externalQueue.setdefault(self.outports[self.nodename], []).append(("setGVT_local", [GVT, [], self.state.relocator.useLastStateOnly()]))
else:
# Either at the controller at init
# or just a normal node that is about to turn red
self.state.color = (self.state.color + 1) % 4
addDict(count, self.state.V[v])
self.state.V[v] = {}
msg = [min(m_clock, self.state.prevtime[0]), min(m_send, self.state.Tmin), count]
self.state.externalQueue.setdefault(self.outports[(self.nodename+1)%self.totalsize], []).append(("receiveControl", [msg]))
return None
elif self.state.color == 1 or self.state.color == 3:
# We are currently red, about to turn white
# First wait for all messages in the medium
return self.tryIfOk(v, count)
"""
def findAndPerformRelocations(self, GVT, activities, horizon):
relocate = self.state.relocator.getRelocations(GVT, activities, horizon)
relocate = {key: relocate[key] for key in relocate if self.state.model_ids[key].location != relocate[key] and self.state.model_ids[key].relocatable}
if not relocate:
self.state.run_GVT = 1.0
return
kernels = {}
self.state.locked_kernels = set()
relocation_rules = {}
for model_id in relocate:
source = self.state.model_ids[model_id].location
destination = relocate[model_id]
if source == destination:
continue
kernels[source] = kernels.get(source, 0) + 1
kernels[destination] = kernels.get(destination, 0) + 1
if kernels[source] == 1:
# We are the first to lock it, so actually send the lock
self.state.externalQueue.setdefault(self.outports[source], []).append(("requestMigrationLock", []))
#self.getProxy(source).requestMigrationLock()
if kernels[destination] == 1:
# We are the first to lock it, so actually send the lock
self.state.externalQueue.setdefault(self.outports[destination], []).append(("requestMigrationLock", []))
#self.getProxy(destination).requestMigrationLock()
relocation_rules.setdefault((source, destination), set()).add(model_id)
self.performRelocations(relocation_rules, kernels)
def performRelocations(self, relocation_rules, kernels):
for source, destination in relocation_rules.keys():
if source in self.state.locked_kernels and destination in self.state.locked_kernels:
models = relocation_rules[(source, destination)]
unlock = []
if kernels[source] == 1:
unlock.append(source)
if kernels[destination] == 1:
unlock.append(destination)
self.state.externalQueue.setdefault(self.outports[source], []).append(("migrateTo", [destination, models, unlock]))
#self.getProxy(source).migrateTo(destination, models)
del relocation_rules[(source, destination)]
kernels[source] -= len(models)
kernels[destination] -= len(models)
if relocation_rules:
# Still something to do
self.state.relocation_rules = relocation_rules
self.state.kernels_to_relocate = kernels
else:
# At the end, so a normal return
self.state.relocation_rules = None
self.state.kernels_to_relocate = None
def setGVT_local(self, GVT, activities, lastStateOnly):
if GVT < self.state.GVT:
raise DEVSException("GVT cannot decrease from " + str(self.GVT) + " to " + str(GVT) + "!")
if GVT == self.state.GVT:
# At the controller too
# Restart the GVT algorithm within 1 time unit
if activities:
if self.state.oldGVT == -float('inf'):
self.oldGVT = 0.
horizon = self.state.GVT - self.state.oldGVT
self.findAndPerformRelocations(GVT, activities, horizon)
else:
self.state.oldGVT = self.state.GVT
self.state.GVT = GVT
nqueue = []
self.state.messageScheduler.cleanup((GVT, 1))
#self.performActions(GVT)
found = False
for index in range(len(self.state.outputQueue)):
if self.state.outputQueue[index].timestamp[0] >= GVT:
found = True
self.state.outputQueue = self.state.outputQueue[index:]
break
if not found:
self.state.outputQueue = []
self.state.activities = {}
self.state.model.setGVT(GVT, self.state.activities, lastStateOnly)
if lastStateOnly:
activitySum = 0
else:
activitySum = sum(self.state.activities.values())
activities.append((self.name, activitySum))
self.state.externalQueue.setdefault(self.outports[(self.nodename+1)%self.totalsize], []).append(("setGVT_local", [GVT, activities, lastStateOnly]))
def tryIfOk(self, color, waiting_vector, accumulating_vector):
prevcolor = 3 if color == 0 else color - 1
if self.state.V[prevcolor].get(self.nodename, 0) + self.state.controlmsg[2].get(self.nodename, 0) <= 0:
addDict(waiting_vector, self.state.V[prevcolor])
addDict(accumulating_vector, self.state.V[color])
self.state.V[prevcolor] = {}
self.state.V[color] = {}
ntime = self.state.prevtime[0] if self.nodename == 0 else min(self.state.controlmsg[0], self.state.prevtime[0])
msg = [ntime, min(self.state.controlmsg[1], self.state.Tmin), waiting_vector, accumulating_vector]
self.state.Tmin = float('inf')
self.state.externalQueue.setdefault(self.outports[(self.nodename+1)%self.totalsize], []).append(("receiveControl", [msg]))
self.state.color = (self.state.color + 1) % 4
return False
else:
return color, waiting_vector, accumulating_vector
def activateModel(self, model_id, currentState):
new_model = self.state.model_ids[model_id]
old_location = new_model.location
new_model.location = self.nodename
self.state.model.componentSet.append(new_model)
self.state.model.local_model_ids.add(new_model.model_id)
new_model.timeLast = currentState[0]
new_model.timeNext = currentState[1]
new_model.state = currentState[2]
new_model.oldStates = [state_saver(new_model.timeLast, new_model.timeNext, new_model.state, 0.0, {}, 0.0)]
# It is a new model, so add it to the scheduler too
self.state.model.scheduler.schedule(new_model)
self.state.destinations[model_id] = new_model
self.state.model.setTimeNext()
self.state.activities[model_id] = 0.0
def messageTransfer(self, extraction):
self.state.messageScheduler.insert(extraction, self.state.model_ids)
def migrateTo(self, destination, model_ids, unlock):
# Assumes that the simlock is already acquired
# Make sure that the model that we are migrating is local here
#assert info("Migrating " + str(model_ids) + " to " + str(destination))
models = set()
for model_id in model_ids:
if isinstance(self.state.destinations[model_id], int):
raise DEVSException("Cannot migrate model that is not local to the source!")
if not self.state.destinations[model_id].relocatable:
raise DEVSException("Model %s was marked as fixed and is therefore not allowed to be relocated" % self.state.destinations[model_id].getModelFullName())
models.add(self.state.destinations[model_id])
destination = int(destination)
if destination == self.name:
# Model is already there...
return
#assert info("Migration approved of %s from node %d to node %d" % (model_ids, self.name, destination))
for model in models:
# All models are gone here, so remove them from the scheduler
self.state.model.scheduler.unschedule(model)
for i in range(self.state.kernels):
if i != destination and i != self.name:
self.state.externalQueue.setdefault(self.outports[i], []).append(("notifyMigration", [model_ids, destination]))
#self.getProxy(i).notifyMigration(model_ids, destination)
self.state.externalQueue.setdefault(self.outports[destination], []).append(("messageTransfer", [self.state.messageScheduler.extract(model_ids)]))
#remote.messageTransfer(self.inputScheduler.extract(model_ids))
for model in models:
# No need to ask the new node whether or not there are specific nodes that also have to be informed
self.state.externalQueue.setdefault(self.outports[destination], []).append(("activateModel", [model.model_id, (model.timeLast, model.timeNext, model.state)]))
#remote.activateModel(model.model_id, (model.timeLast, model.timeNext, model.state))
# Delete our representation of the model
model.state = None
model.oldStates = []
del self.state.activities[model.model_id]
for m in unlock:
self.state.externalQueue.setdefault(self.outports[m], []).append(("migrationUnlock", []))
# Remove the model from the componentSet of the RootDEVS
self.state.model.componentSet = [m for m in self.state.model.componentSet if m not in models]
for model_id in model_ids:
self.state.model.local_model_ids.remove(model_id)
self.state.destinations[model_id] = destination
self.state.model_ids[model_id].location = destination
# Now update the timeNext and timeLast values here
self.state.model.setTimeNext()
def notifyMigration(self, model_ids, destination):
if destination == self.nodename:
# No need to notify ourselves, simply here for safety as it shouldn't be called
return
for model_id in model_ids:
self.state.destinations[model_id] = destination
self.state.model_ids[model_id].location = destination
def requestMigrationLock(self):
self.state.locked = True
self.revert_local((self.state.GVT, 0))
self.state.externalQueue.setdefault(self.outports[0], []).append(("notifyLocked", [self.nodename]))
def migrationUnlock(self):
self.state.locked = False
def notifyLocked(self, name):
self.state.locked_kernels.add(name)
def intTransition(self):
# Just do some processing
self.state.run_GVT -= self.timeAdvance()
self.state.externalQueue = {}
self.state.transitioning = defaultdict(int)
if self.state.run_GVT <= 0 and self.nodename == 0:
# Start the GVT algorithm
self.receiveControl([float('inf'), float('inf'), self.state.accumulator, {}], True)
self.state.run_GVT = float('inf')
if self.state.gvt_check is not None:
rv = self.tryIfOk(*self.state.gvt_check)
if not isinstance(rv, tuple):
self.state.gvt_check = None
if self.state.relocation_rules is not None:
self.performRelocations(self.state.relocation_rules, self.state.kernels_to_relocate)
return self.state
if self.state.locked:
return self.state
ctime = self.processMessage(self.state.model.timeNext)
if ctime > self.state.terminationtime:
self.state.simulationtime = ctime
return self.state
outputs = {}
transitioning = self.state.model.scheduler.getImminent(ctime)
for i in transitioning:
outputs.update(i.outputFnc())
self.state.transitioning[i] |= 1
remotes = {}
for i in outputs:
for dest in i.outLine:
destADEVS = dest.hostDEVS
if destADEVS.location == self.nodename:
destADEVS.myInput.setdefault(dest, []).extend(outputs[i])
self.state.transitioning[destADEVS] |= 2
else:
remotes.setdefault(destADEVS.model_id, {}).setdefault(dest.port_id, []).extend(outputs[i])
for destination in remotes:
self.send(destination, ctime, remotes[destination])
for aDEVS in self.state.transitioning:
t = self.state.transitioning[aDEVS]
aDEVS.timeLast = ctime
activityTrackingPreValue = aDEVS.preActivityCalculation()
if t == 1:
aDEVS.state = aDEVS.intTransition()
elif t == 2:
aDEVS.elapsed = ctime[0] - aDEVS.timeLast[0]
aDEVS.state = aDEVS.extTransition(aDEVS.myInput)
elif t == 3:
aDEVS.state = aDEVS.confTransition(aDEVS.myInput)
ta = aDEVS.timeAdvance()
aDEVS.timeNext = (aDEVS.timeLast[0] + ta, 1 if ta != 0 else aDEVS.timeLast[1] + 1)
aDEVS.oldStates.append(state_saver(aDEVS.timeLast, aDEVS.timeNext, aDEVS.state, aDEVS.postActivityCalculation(activityTrackingPreValue), {}, 0))
aDEVS.myInput = {}
self.state.model.scheduler.massReschedule(self.state.transitioning.keys())
self.state.prevtime = ctime
self.state.model.setTimeNext()
self.state.simulationtime = self.state.model.timeNext
return self.state
def notifyReceive(self, color):
self.state.V[color][self.nodename] = self.state.V[color].get(self.nodename, 0) - 1
def notifySend(self, destination, timestamp, color):
self.state.V[color][destination] = self.state.V[color].get(destination, 0) + 1
if color == 1 or color == 3:
self.state.Tmin = min(self.state.Tmin, timestamp)
def revert_local(self, time):
self.state.messageScheduler.revert(time)
self.state.model.revert(time, False)
self.state.model.setTimeNext()
self.state.prevtime = time
self.state.simulationtime = (0, 0)
# Invalidate all output messages after or at time
end = -1
unschedules = {}
unschedules_mintime = {}
print("Reverting to time " + str(time))
for index, value in enumerate(self.state.outputQueue):
# Do not invalidate messages at this time itself, as they are processed in this time step and not generated in this timestep
if value.timestamp > time:
model_id = value.destination
unschedules_mintime[model_id] = min(unschedules_mintime.get(model_id, (float('inf'), 0)), value.timestamp)
unschedules.setdefault(model_id, []).append(value.uuid)
else:
#assert debug("NOT invalidating " + str(value.uuid))
end = index
self.state.outputQueue = self.state.outputQueue[:end+1]
try:
self.state.blockOutgoing = self.state.outputQueue[-1].timestamp
except IndexError:
self.state.blockOutgoing = None
# Don't need the Vlock here, as we already have it
for model_id in unschedules:
dest_kernel = self.state.destinations[model_id]
if not isinstance(dest_kernel, int):
raise DEVSException("Impossible")
continue
mintime = unschedules_mintime[model_id]
# Assume we have the simlock already
self.state.externalQueue.setdefault(self.outports[dest_kernel], []).append(("receiveAntiMessage", [mintime, model_id, unschedules[model_id], self.state.color]))
self.notifySend(dest_kernel, mintime[0], self.state.color)
def extTransition(self, inputs):
self.state.run_GVT -= self.elapsed
for port in inputs:
for msg in inputs[port]:
if isinstance(msg, NetworkMessage):
self.notifyReceive(msg.color)
if msg.destination not in self.state.model.local_model_ids:
print("FORWARD MSG " + str(msg.uuid))
dest = self.state.destinations[msg.destination]
msg.color = self.state.color
self.notifySend(dest, msg.timestamp[0], msg.color)
self.state.externalQueue.setdefault(self.outports[dest], []).append(msg)
continue
msg.content = {self.state.model_ids[msg.destination].ports[port]: msg.content[port] for port in msg.content}
if msg.timestamp <= self.state.prevtime:
self.revert_local(msg.timestamp)
self.state.messageScheduler.schedule(msg)
elif isinstance(msg, tuple):
# Other kind of message
action, args = msg
if action == "receiveControl":
rv = getattr(self, action)(*args)
if isinstance(rv, tuple):
# Try again later
self.state.gvt_check = rv
else:
self.state.gvt_check = None
else:
getattr(self, action)(*args)
# Put the return values in a queue if necessary
self.state.simulationtime = (0, 0)
return self.state
def receiveAntiMessage(self, time, model_id, uuids, color):
self.notifyReceive(color)
print("Received anti message for uuids " + str(uuids))
if model_id not in self.state.model.local_model_ids and model_id is not None:
print("FORWARD ANTIMSG")
self.state.externalQueue.setdefault(self.outports[self.state.destinations[model_id]], []).append(("receiveAntiMessages", [mintime, model_id, uuids, self.state.color]))
self.notifySend(self.state.destinations[model_id], mintime[0], self.state.color)
return
if time <= self.state.prevtime:
self.revert_local(time)
self.state.messageScheduler.massUnschedule(uuids)
def timeAdvance(self):
if self.state.externalQueue:
return 0.01
elif self.state.simulationtime < self.state.terminationtime:
return 0.1
else:
return INFINITY
def outputFnc(self):
return self.state.externalQueue
class NetworkState(object):
def __init__(self):
self.lst = []
def copy(self):
a = NetworkState()
a.lst = list(self.lst)
return a
class Network(AtomicDEVS):
def __init__(self, name):
AtomicDEVS.__init__(self, name)
self.state = NetworkState()
self.inport = self.addInPort("inport")
self.outport = self.addOutPort("outport")
def intTransition(self):
self.state.lst = []
return self.state
def extTransition(self, inputs):
msgs = inputs[self.inport]
self.state.lst.extend(msgs)
return self.state
def timeAdvance(self):
if self.state.lst:
#return 1.0
return 0.1
#return 0.01
else:
return INFINITY
def outputFnc(self):
return {self.outport: self.state.lst}
| [
"statesavers.PickleHighestState",
"DEVS.CoupledDEVS.__init__",
"DEVS.AtomicDEVS.__init__",
"manualRelocator.ManualRelocator",
"cPickle.dumps",
"collections.defaultdict",
"util.addDict",
"messageScheduler.MessageScheduler",
"util.allZeroDict",
"DEVS.directConnect",
"sys.path.append",
"DEVS.Root... | [((11, 37), 'sys.path.append', 'sys.path.append', (['"""../src/"""'], {}), "('../src/')\n", (26, 37), False, 'import sys\n'), ((417, 451), 'DEVS.CoupledDEVS.__init__', 'CoupledDEVS.__init__', (['self', '"""root"""'], {}), "(self, 'root')\n", (437, 451), False, 'from DEVS import CoupledDEVS, AtomicDEVS, RootDEVS, directConnect\n'), ((2272, 2309), 'DEVS.CoupledDEVS.__init__', 'CoupledDEVS.__init__', (['self', '"""Cluster"""'], {}), "(self, 'Cluster')\n", (2292, 2309), False, 'from DEVS import CoupledDEVS, AtomicDEVS, RootDEVS, directConnect\n'), ((3053, 3070), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3064, 3070), False, 'from collections import defaultdict\n'), ((3814, 3881), 'DEVS.RootDEVS', 'RootDEVS', (['local', 'model.componentSet', "('schedulerML', 'SchedulerML')"], {}), "(local, model.componentSet, ('schedulerML', 'SchedulerML'))\n", (3822, 3881), False, 'from DEVS import CoupledDEVS, AtomicDEVS, RootDEVS, directConnect\n'), ((4126, 4144), 'messageScheduler.MessageScheduler', 'MessageScheduler', ([], {}), '()\n', (4142, 4144), False, 'from messageScheduler import MessageScheduler\n'), ((4490, 4507), 'manualRelocator.ManualRelocator', 'ManualRelocator', ([], {}), '()\n', (4505, 4507), False, 'from manualRelocator import ManualRelocator\n'), ((5504, 5547), 'DEVS.AtomicDEVS.__init__', 'AtomicDEVS.__init__', (['self', "('Node_%i' % name)"], {}), "(self, 'Node_%i' % name)\n", (5523, 5547), False, 'from DEVS import CoupledDEVS, AtomicDEVS, RootDEVS, directConnect\n'), ((20345, 20361), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (20356, 20361), False, 'from collections import defaultdict\n'), ((28250, 28281), 'DEVS.AtomicDEVS.__init__', 'AtomicDEVS.__init__', (['self', 'name'], {}), '(self, name)\n', (28269, 28281), False, 'from DEVS import CoupledDEVS, AtomicDEVS, RootDEVS, directConnect\n'), ((3263, 3302), 'DEVS.directConnect', 'directConnect', (['model.componentSet', '(True)'], {}), '(model.componentSet, True)\n', (3276, 3302), False, 'from DEVS import CoupledDEVS, AtomicDEVS, RootDEVS, directConnect\n'), ((4718, 4737), 'cPickle.dumps', 'cPickle.dumps', (['self'], {}), '(self)\n', (4731, 4737), False, 'import cPickle\n'), ((14815, 14863), 'util.addDict', 'addDict', (['waiting_vector', 'self.state.V[prevcolor]'], {}), '(waiting_vector, self.state.V[prevcolor])\n', (14822, 14863), False, 'from util import allZeroDict, addDict\n'), ((14876, 14925), 'util.addDict', 'addDict', (['accumulating_vector', 'self.state.V[color]'], {}), '(accumulating_vector, self.state.V[color])\n', (14883, 14925), False, 'from util import allZeroDict, addDict\n'), ((16058, 16145), 'statesavers.PickleHighestState', 'state_saver', (['new_model.timeLast', 'new_model.timeNext', 'new_model.state', '(0.0)', '{}', '(0.0)'], {}), '(new_model.timeLast, new_model.timeNext, new_model.state, 0.0, {\n }, 0.0)\n', (16069, 16145), True, 'from statesavers import PickleHighestState as state_saver\n'), ((7783, 7810), 'util.allZeroDict', 'allZeroDict', (['waiting_vector'], {}), '(waiting_vector)\n', (7794, 7810), False, 'from util import allZeroDict, addDict\n'), ((3701, 3759), 'statesavers.PickleHighestState', 'state_saver', (['m.timeLast', 'm.timeNext', 'm.state', '(0.0)', '{}', '(0.0)'], {}), '(m.timeLast, m.timeNext, m.state, 0.0, {}, 0.0)\n', (3712, 3759), True, 'from statesavers import PickleHighestState as state_saver\n')] |
import torch
import torch.nn as nn
class LinearRegressionModel(nn.Module):
def __init__(self, input_dim, output_dim):
super().__init__()
self.Linear = nn.Linear(input_dim, output_dim)
def forward(self, x):
out = self.Linear(x)
return out
| [
"torch.nn.Linear"
] | [((180, 212), 'torch.nn.Linear', 'nn.Linear', (['input_dim', 'output_dim'], {}), '(input_dim, output_dim)\n', (189, 212), True, 'import torch.nn as nn\n')] |
import logging
import uuid
from typing import Any
import pytest
import requests
import test_helpers
from dcos_test_utils import marathon
from dcos_test_utils.dcos_api import DcosApiSession
__maintainer__ = 'kensipe'
__contact__ = '<EMAIL>'
log = logging.getLogger(__name__)
def deploy_test_app_and_check(dcos_api_session: DcosApiSession, app: dict, test_uuid: str) -> None:
"""This method deploys the test server app and then
pings its /operating_environment endpoint to retrieve the container
user running the task.
In a mesos container, this will be the marathon user
In a docker container this user comes from the USER setting
from the app's Dockerfile, which, for the test application
is the default, root
"""
expanded_config = test_helpers.get_expanded_config()
default_os_user = 'nobody' if expanded_config.get('security') == 'strict' else 'root'
if 'container' in app and app['container']['type'] == 'DOCKER':
marathon_user = 'root'
else:
marathon_user = app.get('user', default_os_user)
with dcos_api_session.marathon.deploy_and_cleanup(app):
service_points = dcos_api_session.marathon.get_app_service_endpoints(app['id'])
r = requests.get('http://{}:{}/test_uuid'.format(service_points[0].host, service_points[0].port))
if r.status_code != 200:
msg = "Test server replied with non-200 reply: '{0} {1}. "
msg += "Detailed explanation of the problem: {2}"
raise Exception(msg.format(r.status_code, r.reason, r.text))
r_data = r.json()
assert r_data['test_uuid'] == test_uuid
r = requests.get('http://{}:{}/operating_environment'.format(
service_points[0].host,
service_points[0].port))
if r.status_code != 200:
msg = "Test server replied with non-200 reply: '{0} {1}. "
msg += "Detailed explanation of the problem: {2}"
raise Exception(msg.format(r.status_code, r.reason, r.text))
json_uid = r.json()['uid']
if marathon_user == 'root':
assert json_uid == 0, "App running as root should have uid 0."
else:
assert json_uid != 0, ("App running as {} should not have uid 0.".format(marathon_user))
@pytest.mark.first
def test_docker_image_availablity() -> None:
assert test_helpers.docker_pull_image("debian:stretch-slim"), "docker pull failed for image used in the test"
def test_if_marathon_app_can_be_deployed(dcos_api_session: DcosApiSession) -> None:
"""Marathon app deployment integration test
This test verifies that marathon app can be deployed, and that service points
returned by Marathon indeed point to the app that was deployed.
The application being deployed is a simple http server written in python.
Please test_server.py for more details.
This is done by assigning an unique UUID to each app and passing it to the
docker container as an env variable. After successful deployment, the
"GET /test_uuid" request is issued to the app. If the returned UUID matches
the one assigned to test - test succeeds.
"""
deploy_test_app_and_check(dcos_api_session, *test_helpers.marathon_test_app())
def test_if_docker_app_can_be_deployed(dcos_api_session: DcosApiSession) -> None:
"""Marathon app inside docker deployment integration test.
Verifies that a marathon app inside of a docker daemon container can be
deployed and accessed as expected.
"""
deploy_test_app_and_check(
dcos_api_session,
*test_helpers.marathon_test_app(
network=marathon.Network.BRIDGE,
container_type=marathon.Container.DOCKER,
container_port=9080))
@pytest.mark.parametrize('healthcheck', [
marathon.Healthcheck.HTTP,
marathon.Healthcheck.MESOS_HTTP,
])
def test_if_ucr_app_can_be_deployed(dcos_api_session: DcosApiSession, healthcheck: Any) -> None:
"""Marathon app inside ucr deployment integration test.
Verifies that a marathon docker app inside of a ucr container can be
deployed and accessed as expected.
"""
deploy_test_app_and_check(
dcos_api_session,
*test_helpers.marathon_test_app(
container_type=marathon.Container.MESOS,
healthcheck_protocol=healthcheck))
def test_if_marathon_app_can_be_deployed_with_mesos_containerizer(dcos_api_session: DcosApiSession) -> None:
"""Marathon app deployment integration test using the Mesos Containerizer
This test verifies that a Marathon app using the Mesos containerizer with
a Docker image can be deployed.
This is done by assigning an unique UUID to each app and passing it to the
docker container as an env variable. After successfull deployment, the
"GET /test_uuid" request is issued to the app. If the returned UUID matches
the one assigned to test - test succeds.
When port mapping is available (MESOS-4777), this test should be updated to
reflect that.
"""
deploy_test_app_and_check(
dcos_api_session,
*test_helpers.marathon_test_app(container_type=marathon.Container.MESOS))
def test_if_marathon_app_can_be_deployed_with_nfs_csi_volume(dcos_api_session: DcosApiSession) -> None:
"""Marathon app deployment integration test using an NFS CSI volume.
This test verifies that a Marathon app can be deployed which attaches to
an NFS volume provided by the NFS CSI plugin. In order to accomplish this,
we must first set up an NFS share on one agent.
"""
# We will run an NFS server on one agent and an app on another agent to
# verify CSI volume functionality.
if len(dcos_api_session.slaves) < 2:
pytest.skip("CSI Volume Tests require a minimum of two agents.")
expanded_config = test_helpers.get_expanded_config()
if expanded_config.get('security') == 'strict':
pytest.skip('Cannot setup NFS server as root user with EE strict mode enabled')
test_uuid = uuid.uuid4().hex
hosts = dcos_api_session.slaves[0], dcos_api_session.slaves[1]
# A helper to run a Metronome job as root to clean up the NFS share on an agent.
# We define this here so that it can be used during error handling.
def cleanup_nfs() -> None:
cleanup_command = """
sudo systemctl stop nfs-server && \
echo '' | sudo tee /etc/exports && \
sudo systemctl restart nfs-utils && \
sudo exportfs -arv && \
sudo rm -rf /var/lib/dcos-nfs-shares/test-volume-001
"""
cleanup_job = {
'description': 'Clean up NFS share',
'id': 'nfs-share-cleanup-{}'.format(test_uuid),
'run': {
'cmd': cleanup_command,
'cpus': 0.5,
'mem': 256,
'disk': 32,
'user': 'root',
'restart': {'policy': 'ON_FAILURE'},
'placement': {
'constraints': [{
'attribute': '@hostname',
'operator': 'LIKE',
'value': hosts[0]
}]
}
}
}
dcos_api_session.metronome_one_off(cleanup_job)
# Run a Metronome job as root to set up the NFS share on an agent.
command = """sudo mkdir -p /var/lib/dcos-nfs-shares/test-volume-001 && \
sudo chown -R nobody: /var/lib/dcos-nfs-shares/test-volume-001 && \
sudo chmod 777 /var/lib/dcos-nfs-shares/test-volume-001 && \
echo '/var/lib/dcos-nfs-shares/test-volume-001 *(rw,sync)' | sudo tee /etc/exports && \
sudo systemctl restart nfs-utils && \
sudo exportfs -arv && \
sudo systemctl start nfs-server && \
sudo systemctl enable nfs-server
"""
setup_job = {
'description': 'Set up NFS share',
'id': 'nfs-share-setup-{}'.format(test_uuid),
'run': {
'cmd': command,
'cpus': 0.5,
'mem': 256,
'disk': 32,
'user': 'root',
'restart': {'policy': 'ON_FAILURE'},
'placement': {
'constraints': [{
'attribute': '@hostname',
'operator': 'LIKE',
'value': hosts[0]
}]
}
}
}
dcos_api_session.metronome_one_off(setup_job)
# Create an app which writes to the NFS volume.
app = {
'id': 'csi-nfs-write-app-{}'.format(test_uuid),
'instances': 1,
'cpus': 0.5,
'mem': 256,
'cmd': 'echo some-stuff > test-volume-dir/output && sleep 999999',
'user': 'root',
'container': {
'type': 'MESOS',
'volumes': [{
'mode': 'rw',
'containerPath': 'test-volume-dir',
'external': {
'provider': 'csi',
'name': 'test-volume-001',
'options': {
'pluginName': 'nfs.csi.k8s.io',
'capability': {
'accessType': 'mount',
'accessMode': 'MULTI_NODE_MULTI_WRITER',
'fsType': 'nfs'
},
'volumeContext': {
'server': hosts[0],
'share': '/var/lib/dcos-nfs-shares/test-volume-001'
}
}
}
}]
},
'constraints': [
[
'hostname',
'LIKE',
hosts[1]
]
],
'healthChecks': [{
'protocol': 'COMMAND',
'command': {'value': 'test `cat test-volume-dir/output` = some-stuff'},
'gracePeriodSeconds': 5,
'intervalSeconds': 10,
'timeoutSeconds': 10,
'maxConsecutiveFailures': 3
}]
}
try:
with dcos_api_session.marathon.deploy_and_cleanup(app):
# Trivial app if it deploys, there is nothing else to check
pass
except Exception as error:
raise(error)
finally:
cleanup_nfs()
def test_if_marathon_pods_can_be_deployed_with_mesos_containerizer(dcos_api_session: DcosApiSession) -> None:
"""Marathon pods deployment integration test using the Mesos Containerizer
This test verifies that a Marathon pods can be deployed.
"""
test_uuid = uuid.uuid4().hex
# create pod with trivial apps that function as long running processes
pod_definition = {
'id': '/integration-test-pods-{}'.format(test_uuid),
'scaling': {'kind': 'fixed', 'instances': 1},
'environment': {'PING': 'PONG'},
'containers': [
{
'name': 'ct1',
'resources': {'cpus': 0.1, 'mem': 32},
'image': {'kind': 'DOCKER', 'id': 'debian:stretch-slim'},
'exec': {'command': {'shell': 'touch foo; while true; do sleep 1; done'}},
'healthcheck': {'command': {'shell': 'test -f foo'}}
},
{
'name': 'ct2',
'resources': {'cpus': 0.1, 'mem': 32},
'exec': {'command': {'shell': 'echo $PING > foo; while true; do sleep 1; done'}},
'healthcheck': {'command': {'shell': 'test $PING = `cat foo`'}}
}
],
'networks': [{'mode': 'host'}]
}
with dcos_api_session.marathon.deploy_pod_and_cleanup(pod_definition):
# Trivial app if it deploys, there is nothing else to check
pass
| [
"logging.getLogger",
"test_helpers.marathon_test_app",
"uuid.uuid4",
"pytest.mark.parametrize",
"test_helpers.get_expanded_config",
"pytest.skip",
"test_helpers.docker_pull_image"
] | [((251, 278), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (268, 278), False, 'import logging\n'), ((3747, 3852), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""healthcheck"""', '[marathon.Healthcheck.HTTP, marathon.Healthcheck.MESOS_HTTP]'], {}), "('healthcheck', [marathon.Healthcheck.HTTP, marathon\n .Healthcheck.MESOS_HTTP])\n", (3770, 3852), False, 'import pytest\n'), ((776, 810), 'test_helpers.get_expanded_config', 'test_helpers.get_expanded_config', ([], {}), '()\n', (808, 810), False, 'import test_helpers\n'), ((2360, 2413), 'test_helpers.docker_pull_image', 'test_helpers.docker_pull_image', (['"""debian:stretch-slim"""'], {}), "('debian:stretch-slim')\n", (2390, 2413), False, 'import test_helpers\n'), ((5814, 5848), 'test_helpers.get_expanded_config', 'test_helpers.get_expanded_config', ([], {}), '()\n', (5846, 5848), False, 'import test_helpers\n'), ((5726, 5790), 'pytest.skip', 'pytest.skip', (['"""CSI Volume Tests require a minimum of two agents."""'], {}), "('CSI Volume Tests require a minimum of two agents.')\n", (5737, 5790), False, 'import pytest\n'), ((5909, 5988), 'pytest.skip', 'pytest.skip', (['"""Cannot setup NFS server as root user with EE strict mode enabled"""'], {}), "('Cannot setup NFS server as root user with EE strict mode enabled')\n", (5920, 5988), False, 'import pytest\n'), ((6006, 6018), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (6016, 6018), False, 'import uuid\n'), ((10542, 10554), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (10552, 10554), False, 'import uuid\n'), ((3208, 3240), 'test_helpers.marathon_test_app', 'test_helpers.marathon_test_app', ([], {}), '()\n', (3238, 3240), False, 'import test_helpers\n'), ((3579, 3709), 'test_helpers.marathon_test_app', 'test_helpers.marathon_test_app', ([], {'network': 'marathon.Network.BRIDGE', 'container_type': 'marathon.Container.DOCKER', 'container_port': '(9080)'}), '(network=marathon.Network.BRIDGE,\n container_type=marathon.Container.DOCKER, container_port=9080)\n', (3609, 3709), False, 'import test_helpers\n'), ((4203, 4312), 'test_helpers.marathon_test_app', 'test_helpers.marathon_test_app', ([], {'container_type': 'marathon.Container.MESOS', 'healthcheck_protocol': 'healthcheck'}), '(container_type=marathon.Container.MESOS,\n healthcheck_protocol=healthcheck)\n', (4233, 4312), False, 'import test_helpers\n'), ((5092, 5163), 'test_helpers.marathon_test_app', 'test_helpers.marathon_test_app', ([], {'container_type': 'marathon.Container.MESOS'}), '(container_type=marathon.Container.MESOS)\n', (5122, 5163), False, 'import test_helpers\n')] |
import pytest
import numpy as np
import pandas as pd
import xarray as xr
import bmorph
from bmorph.util import mizuroute_utils as mizutil
reference = xr.open_dataset("./bmorph/tests/data/test_reference.nc")
routed = xr.open_dataset("./bmorph/tests/data/test_routed.nc")
topo = xr.open_dataset("./bmorph/tests/data/test_topo.nc")
true_fill = xr.open_dataset("./bmorph/tests/data/true_fill_segs.nc")
true_results = xr.open_dataset("./bmorph/tests/data/true_results.nc")
test_fill_methods = ['kge', 'kldiv', 'r2', 'leave_null']
gauge_flows = xr.Dataset(
{
'reference_flow' : (('seg', 'time'), reference['reference_flow'].transpose().values)
},
{"seg": reference['seg'].values, "time": reference['time'].values},
)
def test_map_headwater_sites(routed=routed.copy()):
routed['down_seg'] = true_results['down_seg']
test_routed = mizutil.map_headwater_sites(routed)
assert 'is_headwaters' in test_routed.var()
for truth, test in zip(true_results['is_headwaters'].values, test_routed['is_headwaters']):
assert truth == test
def test_find_up(routed=routed.copy()):
test_routed = routed
test_routed['down_seg'] = true_results['down_seg']
test_routed['is_headwaters'] = true_results['is_headwaters']
for seg, true_up_seg in zip(test_routed['seg'].values, true_results['up_seg'].values):
test_up_seg = mizutil.find_up(test_routed, seg)
if np.isnan(true_up_seg):
assert np.isnan(test_up_seg)
else:
assert true_up_seg == test_up_seg
def test_find_max_r2(routed=routed.copy()):
true_r2_fill = true_fill.sel(fill_method='r2')['true_seg']
for true_fill_seg, test_flow in zip(true_r2_fill.values, routed['flow'].values):
test_fill_seg = mizutil.find_max_r2(gauge_flows['reference_flow'], test_flow)[1]
assert true_fill_seg == test_fill_seg
def test_find_max_kge(routed=routed.copy()):
true_kge_fill = true_fill.sel(fill_method='kge')['true_seg']
for true_fill_seg, test_flow in zip(true_kge_fill.values, routed['flow'].values):
test_fill_seg = mizutil.find_max_kge(gauge_flows['reference_flow'], test_flow)[1]
assert true_fill_seg == test_fill_seg
def test_find_min_kldiv(routed=routed.copy()):
true_kldiv_fill = true_fill.sel(fill_method='kldiv')['true_seg']
for true_fill_seg, test_flow in zip(true_kldiv_fill.values, routed['flow'].values):
test_fill_seg = mizutil.find_min_kldiv(gauge_flows['reference_flow'], test_flow)[1]
assert true_fill_seg == test_fill_seg
def test_map_ref_sites(routed=routed.copy(), fill_methods=test_fill_methods):
test_routed = routed
test_routed['down_seg'] = true_results['down_seg']
test_routed['is_headwaters'] = true_results['is_headwaters']
for fill_method in fill_methods:
test_routed = mizutil.map_ref_sites(routed=test_routed, gauge_reference=reference,
route_var = 'flow', fill_method = fill_method
)
for true_up_ref_seg, test_up_ref_seg in zip(true_fill.sel(fill_method=f"{fill_method}_up")['true_seg'].values,
test_routed['up_ref_seg'].values):
assert true_up_ref_seg == test_up_ref_seg
for true_down_ref_seg, test_down_ref_seg in zip(true_fill.sel(fill_method=f"{fill_method}_down")['true_seg'].values,
test_routed['down_ref_seg'].values):
assert true_down_ref_seg == test_down_ref_seg
| [
"bmorph.util.mizuroute_utils.find_up",
"bmorph.util.mizuroute_utils.find_max_kge",
"bmorph.util.mizuroute_utils.find_min_kldiv",
"bmorph.util.mizuroute_utils.map_headwater_sites",
"bmorph.util.mizuroute_utils.find_max_r2",
"numpy.isnan",
"bmorph.util.mizuroute_utils.map_ref_sites",
"xarray.open_datase... | [((153, 209), 'xarray.open_dataset', 'xr.open_dataset', (['"""./bmorph/tests/data/test_reference.nc"""'], {}), "('./bmorph/tests/data/test_reference.nc')\n", (168, 209), True, 'import xarray as xr\n'), ((219, 272), 'xarray.open_dataset', 'xr.open_dataset', (['"""./bmorph/tests/data/test_routed.nc"""'], {}), "('./bmorph/tests/data/test_routed.nc')\n", (234, 272), True, 'import xarray as xr\n'), ((280, 331), 'xarray.open_dataset', 'xr.open_dataset', (['"""./bmorph/tests/data/test_topo.nc"""'], {}), "('./bmorph/tests/data/test_topo.nc')\n", (295, 331), True, 'import xarray as xr\n'), ((344, 400), 'xarray.open_dataset', 'xr.open_dataset', (['"""./bmorph/tests/data/true_fill_segs.nc"""'], {}), "('./bmorph/tests/data/true_fill_segs.nc')\n", (359, 400), True, 'import xarray as xr\n'), ((416, 470), 'xarray.open_dataset', 'xr.open_dataset', (['"""./bmorph/tests/data/true_results.nc"""'], {}), "('./bmorph/tests/data/true_results.nc')\n", (431, 470), True, 'import xarray as xr\n'), ((857, 892), 'bmorph.util.mizuroute_utils.map_headwater_sites', 'mizutil.map_headwater_sites', (['routed'], {}), '(routed)\n', (884, 892), True, 'from bmorph.util import mizuroute_utils as mizutil\n'), ((1373, 1406), 'bmorph.util.mizuroute_utils.find_up', 'mizutil.find_up', (['test_routed', 'seg'], {}), '(test_routed, seg)\n', (1388, 1406), True, 'from bmorph.util import mizuroute_utils as mizutil\n'), ((1418, 1439), 'numpy.isnan', 'np.isnan', (['true_up_seg'], {}), '(true_up_seg)\n', (1426, 1439), True, 'import numpy as np\n'), ((2861, 2976), 'bmorph.util.mizuroute_utils.map_ref_sites', 'mizutil.map_ref_sites', ([], {'routed': 'test_routed', 'gauge_reference': 'reference', 'route_var': '"""flow"""', 'fill_method': 'fill_method'}), "(routed=test_routed, gauge_reference=reference,\n route_var='flow', fill_method=fill_method)\n", (2882, 2976), True, 'from bmorph.util import mizuroute_utils as mizutil\n'), ((1460, 1481), 'numpy.isnan', 'np.isnan', (['test_up_seg'], {}), '(test_up_seg)\n', (1468, 1481), True, 'import numpy as np\n'), ((1767, 1828), 'bmorph.util.mizuroute_utils.find_max_r2', 'mizutil.find_max_r2', (["gauge_flows['reference_flow']", 'test_flow'], {}), "(gauge_flows['reference_flow'], test_flow)\n", (1786, 1828), True, 'from bmorph.util import mizuroute_utils as mizutil\n'), ((2107, 2169), 'bmorph.util.mizuroute_utils.find_max_kge', 'mizutil.find_max_kge', (["gauge_flows['reference_flow']", 'test_flow'], {}), "(gauge_flows['reference_flow'], test_flow)\n", (2127, 2169), True, 'from bmorph.util import mizuroute_utils as mizutil\n'), ((2456, 2520), 'bmorph.util.mizuroute_utils.find_min_kldiv', 'mizutil.find_min_kldiv', (["gauge_flows['reference_flow']", 'test_flow'], {}), "(gauge_flows['reference_flow'], test_flow)\n", (2478, 2520), True, 'from bmorph.util import mizuroute_utils as mizutil\n')] |
import asyncio
from types import TracebackType
from typing import Optional, Type, Any
class Wire:
def configure(self, value: Any) -> None:
pass
async def __aenter__(self) -> None:
pass
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
self.close()
await self.wait_closed()
def close(self) -> None:
pass
async def wait_closed(self) -> None:
pass
class WaitMixin:
_event: asyncio.Event
def close(self) -> None:
if not hasattr(self, "_event"):
self._event = asyncio.Event()
self._event.set()
async def wait_closed(self) -> None:
if not hasattr(self, "_event"):
self._event = asyncio.Event()
await self._event.wait()
| [
"asyncio.Event"
] | [((692, 707), 'asyncio.Event', 'asyncio.Event', ([], {}), '()\n', (705, 707), False, 'import asyncio\n'), ((842, 857), 'asyncio.Event', 'asyncio.Event', ([], {}), '()\n', (855, 857), False, 'import asyncio\n')] |
from typing import Tuple
import torch
from torch import nn
from torch.utils.tensorboard import SummaryWriter
from tensorboard_pytorch_examples.common.config import (
CPU_DEVICE,
DEFAULT_EPOCHS_COUNT,
DEVICE,
)
class ClassificationTrainer:
def __init__(
self,
trainloader: torch.utils.data.DataLoader,
cvloader: torch.utils.data.DataLoader,
criterion: torch.nn.modules.loss._Loss,
writer: SummaryWriter,
epochs: int = DEFAULT_EPOCHS_COUNT,
device: torch.device = DEVICE,
train_stats_frequency: int = 10,
) -> None:
"""
Basic class used for training.
Arguments:
trainloader {torch.utils.data.DataLoader} -- training data
cvloader {torch.utils.data.DataLoader} -- cross-validation data
criterion {torch.nn.modules.loss._Loss}
writer {SummaryWriter}
Keyword Arguments:
epochs {int} -- number of epochs to learn (default: {DEFAULT_EPOCHS_COUNT})
device {torch.device} -- GPU or CPU device (default: {DEVICE})
train_stats_frequency {int} -- tensorboard train data update frequency
(default: {10})
"""
self.trainloader = trainloader
self.cvloader = cvloader
self.criterion = criterion
self.writer = writer
self.epochs = epochs
self.device = device
self.train_stats_frequency = train_stats_frequency
self.step = 0
self._first_run = True
def __call__(self, *args, **kwargs):
return self._training_loop(*args, **kwargs)
def _training_loop(
self, model: nn.Module, optimizer: torch.optim.Optimizer
) -> nn.Module:
"""
Update the model by applying optimizer steps.
Perform {self.epochs} number of iterations over whole dataset.
Arguments:
model {torch.nn} -- model to learn
optimizer {torch.optim.Optimizer}
Returns:
nn.Module -- trained model
"""
for epoch in range(self.epochs):
print(epoch + 1) # TODO: Use logger instead
for batch in self.trainloader:
model = self._optimizer_step(model, optimizer, batch)
self.update_cv_stats(model)
self._first_run = False
return model
def _optimizer_step(
self,
model: nn.Module,
optimizer: torch.optim.Optimizer,
batch: Tuple[torch.tensor, torch.tensor],
) -> nn.Module:
"""
Perform one optimizer step on the batch.
Arguments:
model {torch.nn} -- model to learn
optimizer {torch.optim.Optimizer}
batch {Tuple[torch.tensor, torch.tensor]} -- batch with features and targets
Returns:
nn.Module -- updated model
"""
self.step += 1
inputs, targets = batch[0].to(self.device), batch[1].to(self.device)
optimizer.zero_grad()
outputs = model(inputs)
loss = self.criterion(outputs, targets)
loss.backward()
optimizer.step()
if self.step % self.train_stats_frequency == 0:
self.update_train_stats(loss, outputs, targets)
return model
def update_train_stats(
self, loss: torch.tensor, outputs: torch.tensor, targets: torch.tensor
) -> None:
"""[summary]
Arguments:
loss {torch.tensor}
outputs {torch.tensor}
targets {torch.tensor}
"""
acc = (outputs.argmax(1) == targets).sum() / float(targets.shape[0])
self.writer.add_scalar("Loss/train", loss.to(CPU_DEVICE), self.step)
self.writer.add_scalar("Acc/train", acc.to(CPU_DEVICE), self.step)
def update_cv_stats(self, model: torch.nn) -> None:
"""
Update cross-validation statistics in the tensorboard logs.
Arguments:
model {torch.nn} -- model to learn
"""
cv_acc, cv_loss, outputs_dist, targets_dist = self._get_cv_stats(model)
self.writer.add_scalar("Loss/cv", cv_loss, self.step)
self.writer.add_scalar("Acc/cv", cv_acc, self.step)
self.writer.add_histogram("Outputs/cv", outputs_dist, self.step)
if self._first_run:
self.writer.add_histogram("Outputs/cv", targets_dist, 0)
def _get_cv_stats(self, model: torch.nn) -> Tuple[torch.tensor, ...]:
"""
Collect CV data accuracy, loss, prediction distribution, and targets
distribution.
Arguments:
model {torch.nn} -- model to learn
Returns:
Tuple[torch.tensor * 4] -- accuracy, loss, prediction distribution, and
targets distribution.
"""
cv_acc = torch.tensor(0.0).to(self.device)
cv_loss = torch.tensor(0.0).to(self.device)
samples_no = float(len(self.cvloader.dataset))
outputs_dist = None
targets_dist = None
with torch.no_grad():
model = model.eval()
for inputs, targets in self.cvloader:
batch_size = inputs.shape[0] # last sample can have different items
targets = targets.to(self.device)
outputs = model(inputs.to(self.device))
if outputs_dist is None and targets_dist is None:
outputs_dist = outputs.argmax(1).long()
targets_dist = targets.long()
else:
outputs_dist = torch.cat([outputs_dist, outputs.argmax(1).long()])
targets_dist = torch.cat([targets_dist, targets.long()])
cv_acc += (outputs.argmax(1) == targets).sum()
cv_loss += self.criterion(outputs, targets) * batch_size
cv_acc = (cv_acc / samples_no).to(CPU_DEVICE)
cv_loss = (cv_loss / samples_no).to(CPU_DEVICE)
outputs_dist = outputs_dist.to(CPU_DEVICE)
targets_dist = targets_dist.to(CPU_DEVICE)
model = model.train()
return cv_acc, cv_loss, outputs_dist, targets_dist
| [
"torch.no_grad",
"torch.tensor"
] | [((5006, 5021), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5019, 5021), False, 'import torch\n'), ((4795, 4812), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (4807, 4812), False, 'import torch\n'), ((4847, 4864), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (4859, 4864), False, 'import torch\n')] |
from __future__ import absolute_import
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
REMINDER_TRANS = _("Did you receive or transfer stock to another facility last month?"
" Please reply either 'trans no' or 'trans yes'")
TRANS_HELP = _("You can respond 'trans yes' if you have received "
"or transfered stock last month or 'trans no' if you have not")
SOH_OVERSTOCKED = _("You are overstocked for %(overstocked_list)s that you can redistribute to other facilities. "
"Keep %(products_list)s.")
REMINDER_STOCKOUT = _("You are stocked out of %(products_list)s."
" The following facilities are overstocked: %(overstocked_list)s")
| [
"django.utils.translation.ugettext_lazy"
] | [((153, 274), 'django.utils.translation.ugettext_lazy', '_', (['"""Did you receive or transfer stock to another facility last month? Please reply either \'trans no\' or \'trans yes\'"""'], {}), '("Did you receive or transfer stock to another facility last month? Please reply either \'trans no\' or \'trans yes\'"\n )\n', (154, 274), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((305, 424), 'django.utils.translation.ugettext_lazy', '_', (['"""You can respond \'trans yes\' if you have received or transfered stock last month or \'trans no\' if you have not"""'], {}), '("You can respond \'trans yes\' if you have received or transfered stock last month or \'trans no\' if you have not"\n )\n', (306, 424), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((456, 581), 'django.utils.translation.ugettext_lazy', '_', (['"""You are overstocked for %(overstocked_list)s that you can redistribute to other facilities. Keep %(products_list)s."""'], {}), "('You are overstocked for %(overstocked_list)s that you can redistribute to other facilities. Keep %(products_list)s.'\n )\n", (457, 581), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((620, 734), 'django.utils.translation.ugettext_lazy', '_', (['"""You are stocked out of %(products_list)s. The following facilities are overstocked: %(overstocked_list)s"""'], {}), "('You are stocked out of %(products_list)s. The following facilities are overstocked: %(overstocked_list)s'\n )\n", (621, 734), True, 'from django.utils.translation import ugettext_lazy as _\n')] |