code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
from helpers.kafkahelpers import (
create_producer,
publish_run_start_message,
publish_f142_message,
)
from helpers.nexushelpers import OpenNexusFileWhenAvailable
from helpers.timehelpers import unix_time_milliseconds
from time import sleep
from datetime import datetime
import pytest
def check(condition, fail_string):
if not condition:
pytest.fail(fail_string)
def test_two_different_writer_modules_with_same_flatbuffer_id(docker_compose):
producer = create_producer()
start_time = unix_time_milliseconds(datetime.utcnow()) - 10000
for i in range(10):
publish_f142_message(
producer,
"TEST_sampleEnv",
int(start_time + i * 1000),
source_name="test_source_1",
)
publish_f142_message(
producer,
"TEST_sampleEnv",
int(start_time + i * 1000),
source_name="test_source_2",
)
check(producer.flush(5) == 0, "Unable to flush kafka messages.")
# Start file writing
publish_run_start_message(
producer,
"commands/nexus_structure_multiple_modules.json",
"output_file_multiple_modules.nxs",
start_time=int(start_time),
stop_time=int(start_time + 5 * 1000),
)
# Give it some time to accumulate data
sleep(10)
filepath = "output-files/output_file_multiple_modules.nxs"
with OpenNexusFileWhenAvailable(filepath) as file:
assert (
len(file["entry/sample/dataset1/time"][:]) > 0
and len(file["entry/sample/dataset1/value"][:]) > 0
), "f142 module should have written this dataset, it should have written a value and time"
assert (
"cue_timestamp_zero" not in file["entry/sample/dataset2"]
), "f142_test module should have written this dataset, it writes cue_index but no cue_timestamp_zero"
assert (
len(file["entry/sample/dataset2/cue_index"][:]) > 0
), "Expected index values, found none."
for i in range(len(file["entry/sample/dataset2/cue_index"][:])):
assert (
file["entry/sample/dataset2/cue_index"][i] == i
), "Expect consecutive integers to be written by f142_test"
|
[
"pytest.fail",
"time.sleep",
"helpers.kafkahelpers.create_producer",
"datetime.datetime.utcnow",
"helpers.nexushelpers.OpenNexusFileWhenAvailable"
] |
[((485, 502), 'helpers.kafkahelpers.create_producer', 'create_producer', ([], {}), '()\n', (500, 502), False, 'from helpers.kafkahelpers import create_producer, publish_run_start_message, publish_f142_message\n'), ((1320, 1329), 'time.sleep', 'sleep', (['(10)'], {}), '(10)\n', (1325, 1329), False, 'from time import sleep\n'), ((364, 388), 'pytest.fail', 'pytest.fail', (['fail_string'], {}), '(fail_string)\n', (375, 388), False, 'import pytest\n'), ((1403, 1439), 'helpers.nexushelpers.OpenNexusFileWhenAvailable', 'OpenNexusFileWhenAvailable', (['filepath'], {}), '(filepath)\n', (1429, 1439), False, 'from helpers.nexushelpers import OpenNexusFileWhenAvailable\n'), ((543, 560), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (558, 560), False, 'from datetime import datetime\n')]
|
"""
DEPRECATED
USE kwcoco.metrics instead!
Faster pure-python versions of sklearn functions that avoid expensive checks
and label rectifications. It is assumed that all labels are consecutive
non-negative integers.
"""
from scipy.sparse import coo_matrix
import numpy as np
def confusion_matrix(y_true, y_pred, n_labels=None, labels=None,
sample_weight=None):
"""
faster version of sklearn confusion matrix that avoids the
expensive checks and label rectification
Runs in about 0.7ms
Returns:
ndarray: matrix where rows represent real and cols represent pred
Example:
>>> y_true = np.array([0, 0, 0, 0, 1, 1, 1, 0, 0, 1])
>>> y_pred = np.array([0, 0, 0, 0, 0, 0, 0, 1, 1, 1])
>>> confusion_matrix(y_true, y_pred, 2)
array([[4, 2],
[3, 1]])
>>> confusion_matrix(y_true, y_pred, 2).ravel()
array([4, 2, 3, 1])
Benchmarks:
import ubelt as ub
y_true = np.random.randint(0, 2, 10000)
y_pred = np.random.randint(0, 2, 10000)
n = 1000
for timer in ub.Timerit(n, bestof=10, label='py-time'):
sample_weight = [1] * len(y_true)
confusion_matrix(y_true, y_pred, 2, sample_weight=sample_weight)
for timer in ub.Timerit(n, bestof=10, label='np-time'):
sample_weight = np.ones(len(y_true), dtype=np.int)
confusion_matrix(y_true, y_pred, 2, sample_weight=sample_weight)
"""
if sample_weight is None:
sample_weight = np.ones(len(y_true), dtype=np.int)
if n_labels is None:
n_labels = len(labels)
CM = coo_matrix((sample_weight, (y_true, y_pred)),
shape=(n_labels, n_labels),
dtype=np.int64).toarray()
return CM
def global_accuracy_from_confusion(cfsn):
# real is rows, pred is columns
n_ii = np.diag(cfsn)
# sum over pred = columns = axis1
t_i = cfsn.sum(axis=1)
global_acc = n_ii.sum() / t_i.sum()
return global_acc
def class_accuracy_from_confusion(cfsn):
# real is rows, pred is columns
n_ii = np.diag(cfsn)
# sum over pred = columns = axis1
t_i = cfsn.sum(axis=1)
per_class_acc = (n_ii / t_i).mean()
class_acc = np.nan_to_num(per_class_acc).mean()
return class_acc
|
[
"numpy.diag",
"scipy.sparse.coo_matrix",
"numpy.nan_to_num"
] |
[((1890, 1903), 'numpy.diag', 'np.diag', (['cfsn'], {}), '(cfsn)\n', (1897, 1903), True, 'import numpy as np\n'), ((2121, 2134), 'numpy.diag', 'np.diag', (['cfsn'], {}), '(cfsn)\n', (2128, 2134), True, 'import numpy as np\n'), ((1645, 1738), 'scipy.sparse.coo_matrix', 'coo_matrix', (['(sample_weight, (y_true, y_pred))'], {'shape': '(n_labels, n_labels)', 'dtype': 'np.int64'}), '((sample_weight, (y_true, y_pred)), shape=(n_labels, n_labels),\n dtype=np.int64)\n', (1655, 1738), False, 'from scipy.sparse import coo_matrix\n'), ((2256, 2284), 'numpy.nan_to_num', 'np.nan_to_num', (['per_class_acc'], {}), '(per_class_acc)\n', (2269, 2284), True, 'import numpy as np\n')]
|
import nextcord, asyncio, os, io, contextlib
from nextcord.ext import commands
from nextcord.ui import Modal, TextInput
from util.messages import DeleteMessageSlash
from util.constants import Client
class SnekBox_Eval(nextcord.ui.Modal):
def __init__(self) -> None:
super().__init__(title="Evaluate Your Code", custom_id="evaluate_code")
self.add_item(
nextcord.ui.TextInput(
label="Your Eval Code",
placeholder="print('Hello')",
custom_id="evaluated code",
style=nextcord.TextInputStyle.paragraph,
min_length=10
),
)
async def callback(self, inter: nextcord.Interaction) -> None:
view = DeleteMessageSlash(inter)
embed = nextcord.Embed(title="Your code", description="✅ Your eval job has been completed and the result is provided below.", color=0x00FF00)
code = self.children[0].value
stdout = io.StringIO()
with contextlib.redirect_stdout(stdout):
exec(code)
res = stdout.getvalue()
if Client.token in res:
res = ":warning: We can't reveal any sensitive info."
embed.add_field(name="Input Code", value=f"```py\n{code}\n```", inline=False)
embed.add_field(name="Evaluated Code:", value=res, inline=False)
await inter.response.send_message(embed=embed,view=view)
async def on_error(self, error, interaction: nextcord.Interaction):
view = DeleteMessageSlash(interaction)
embed = nextcord.Embed(title="Code Status", description=":x: An error occurred.", color=0xFF0000)
embed.add_field(name=":warning: The Error", value=f"```{error}```", inline=False)
await interaction.response.send_message(embed=embed,view=view)
class Eval(commands.Cog, description='Evaluate Your Code.'):
COG_EMOJI = "💻"
def __init__(self, bot):
self.bot = bot
@nextcord.slash_command(name="eval", description="Evaluates the given python code")
async def eval(self, interaction: nextcord.Interaction):
await interaction.response.send_modal(modal=SnekBox_Eval())
|
[
"io.StringIO",
"nextcord.slash_command",
"util.messages.DeleteMessageSlash",
"nextcord.Embed",
"contextlib.redirect_stdout",
"nextcord.ui.TextInput"
] |
[((1943, 2030), 'nextcord.slash_command', 'nextcord.slash_command', ([], {'name': '"""eval"""', 'description': '"""Evaluates the given python code"""'}), "(name='eval', description=\n 'Evaluates the given python code')\n", (1965, 2030), False, 'import nextcord, asyncio, os, io, contextlib\n'), ((725, 750), 'util.messages.DeleteMessageSlash', 'DeleteMessageSlash', (['inter'], {}), '(inter)\n', (743, 750), False, 'from util.messages import DeleteMessageSlash\n'), ((767, 906), 'nextcord.Embed', 'nextcord.Embed', ([], {'title': '"""Your code"""', 'description': '"""✅ Your eval job has been completed and the result is provided below."""', 'color': '(65280)'}), "(title='Your code', description=\n '✅ Your eval job has been completed and the result is provided below.',\n color=65280)\n", (781, 906), False, 'import nextcord, asyncio, os, io, contextlib\n'), ((956, 969), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (967, 969), False, 'import nextcord, asyncio, os, io, contextlib\n'), ((1488, 1519), 'util.messages.DeleteMessageSlash', 'DeleteMessageSlash', (['interaction'], {}), '(interaction)\n', (1506, 1519), False, 'from util.messages import DeleteMessageSlash\n'), ((1536, 1629), 'nextcord.Embed', 'nextcord.Embed', ([], {'title': '"""Code Status"""', 'description': '""":x: An error occurred."""', 'color': '(16711680)'}), "(title='Code Status', description=':x: An error occurred.',\n color=16711680)\n", (1550, 1629), False, 'import nextcord, asyncio, os, io, contextlib\n'), ((380, 547), 'nextcord.ui.TextInput', 'nextcord.ui.TextInput', ([], {'label': '"""Your Eval Code"""', 'placeholder': '"""print(\'Hello\')"""', 'custom_id': '"""evaluated code"""', 'style': 'nextcord.TextInputStyle.paragraph', 'min_length': '(10)'}), '(label=\'Your Eval Code\', placeholder="print(\'Hello\')",\n custom_id=\'evaluated code\', style=nextcord.TextInputStyle.paragraph,\n min_length=10)\n', (401, 547), False, 'import nextcord, asyncio, os, io, contextlib\n'), ((983, 1017), 'contextlib.redirect_stdout', 'contextlib.redirect_stdout', (['stdout'], {}), '(stdout)\n', (1009, 1017), False, 'import nextcord, asyncio, os, io, contextlib\n')]
|
from rgbd_seg.utils import build_from_cfg
from .registry import HEADS
def build_head(cfg, default_args=None):
head = build_from_cfg(cfg, HEADS, default_args)
return head
|
[
"rgbd_seg.utils.build_from_cfg"
] |
[((124, 164), 'rgbd_seg.utils.build_from_cfg', 'build_from_cfg', (['cfg', 'HEADS', 'default_args'], {}), '(cfg, HEADS, default_args)\n', (138, 164), False, 'from rgbd_seg.utils import build_from_cfg\n')]
|
import taichi as ti
import taichi_glsl as ts
import math
from utils import Vector, Matrix, tiNormalize, Float
from config.base_cfg import error
## unity gameobject.transform
# ref: https://github.com/JYLeeLYJ/Fluid-Engine-Dev-on-Taichi/blob/master/src/python/geometry.py
@ti.data_oriented
class Transform2:
def __init__(self,
translation=ti.Vector([0.0, 0.0]),
orientation=0.0,
localscale=1.0):
self._translation = ti.Vector.field(2, dtype=ti.f32, shape=[])
self._orientation = ti.field(dtype=ti.f32, shape=[])
self._localScale = ti.Vector.field(2, dtype=ti.f32, shape=[])
# use buffer for later materialization
self.translation_buf = translation
self.orientation_buf = orientation % (2 * math.pi)
self.localscale_buf = localscale
def __repr__(self):
return '{} ( Trsln : {}, Ornttn: {}, lclScl: {})'.format(
self.__class__.__name__,
self.translation,
self.orientation,
self.localScale)
@ti.pyfunc
def kern_materialize(self):
self._translation[None] = self.translation_buf
self._orientation[None] = self.orientation_buf
self.localScale = self.localscale_buf
@property
@ti.pyfunc
def translation(self) -> Vector:
return self._translation[None]
@translation.setter
def translation(self, translation: ti.Vector):
self._translation[None] = translation
# @property
# def orientation(self) -> Float:
# return self._orientation[None]
@property
@ti.pyfunc
def orientation(self) -> Float:
return self._orientation[None]
@orientation.setter
def orientation(self, orientation: Float):
self._orientation[None] = orientation % (2 * math.pi)
# @property
# def localScale(self) -> Float:
# return self._localScale[None]
@property
@ti.pyfunc
def localScale(self) -> Vector:
return self._localScale[None]
@localScale.setter
def localScale(self, localScale: Vector):
# clamp above zero
self._localScale[None] = ti.max(ts.vec2(localScale), ts.vec2(error))
@ti.pyfunc
def to_local(self, p_world: Vector) -> Vector:
# translate
out = p_world - self.translation
# rotate back
out = apply_rot(-self.orientation, out)
# scale
out /= self.localScale
return out
@ti.func
def to_world(self, p_local: Vector) -> Vector:
# scale
out = p_local * self.localScale
# rotate
out = apply_rot(self.orientation, out)
# translate
out += self.translation
return out
@ti.func
def dir_2world(self, dir_local: Vector) -> Vector:
out = apply_rot(self.orientation, dir_local)
return tiNormalize(out)
@ti.func
def getRotMat2D(rotation) -> Matrix:
return ti.Matrix([[ti.cos(rotation), -ti.sin(rotation)], [ti.sin(rotation), ti.cos(rotation)]])
@ti.pyfunc
def apply_rot(rot, p) -> Vector:
cos = ti.cos(rot)
sin = ti.sin(rot)
return ti.Vector([cos * p[0] - sin * p[1], sin * p[0] + cos * p[1]])
@ti.kernel
def test_rotate():
# a._orientation[None] = ti.static(math.pi / 2)
a.orientation = math.pi / 2
b = ti.Vector([0, 1])
# print(apply_rot(2.0, b))
c = a.to_local(b)
d = a.to_world(c)
# should be the same
print("world b: ", b)
print("world d: ", d)
if __name__ == '__main__':
ti.init(ti.cpu, debug=True)
a = Transform2(ti.Vector([2.0, 4.0]), 15)
a.kern_materialize()
a.orientation = 100.0
a.localScale = 2.0
a.translation = ti.Vector([5.0, 2.0])
t = a.orientation
print(a.to_local(ti.Vector([2.0, 2.0])))
# print(a.translation)
# print(a.orientation)
# print(a.localScale)
#
# print(a._translation[None])
# print(a._orientation[None])
# print(a._localScale[None])
# test_rotate()
|
[
"utils.tiNormalize",
"taichi.field",
"taichi.Vector.field",
"taichi.sin",
"taichi.cos",
"taichi.init",
"taichi_glsl.vec2",
"taichi.Vector"
] |
[((3078, 3089), 'taichi.cos', 'ti.cos', (['rot'], {}), '(rot)\n', (3084, 3089), True, 'import taichi as ti\n'), ((3100, 3111), 'taichi.sin', 'ti.sin', (['rot'], {}), '(rot)\n', (3106, 3111), True, 'import taichi as ti\n'), ((3123, 3184), 'taichi.Vector', 'ti.Vector', (['[cos * p[0] - sin * p[1], sin * p[0] + cos * p[1]]'], {}), '([cos * p[0] - sin * p[1], sin * p[0] + cos * p[1]])\n', (3132, 3184), True, 'import taichi as ti\n'), ((3309, 3326), 'taichi.Vector', 'ti.Vector', (['[0, 1]'], {}), '([0, 1])\n', (3318, 3326), True, 'import taichi as ti\n'), ((3513, 3540), 'taichi.init', 'ti.init', (['ti.cpu'], {'debug': '(True)'}), '(ti.cpu, debug=True)\n', (3520, 3540), True, 'import taichi as ti\n'), ((3681, 3702), 'taichi.Vector', 'ti.Vector', (['[5.0, 2.0]'], {}), '([5.0, 2.0])\n', (3690, 3702), True, 'import taichi as ti\n'), ((361, 382), 'taichi.Vector', 'ti.Vector', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (370, 382), True, 'import taichi as ti\n'), ((480, 522), 'taichi.Vector.field', 'ti.Vector.field', (['(2)'], {'dtype': 'ti.f32', 'shape': '[]'}), '(2, dtype=ti.f32, shape=[])\n', (495, 522), True, 'import taichi as ti\n'), ((551, 583), 'taichi.field', 'ti.field', ([], {'dtype': 'ti.f32', 'shape': '[]'}), '(dtype=ti.f32, shape=[])\n', (559, 583), True, 'import taichi as ti\n'), ((611, 653), 'taichi.Vector.field', 'ti.Vector.field', (['(2)'], {'dtype': 'ti.f32', 'shape': '[]'}), '(2, dtype=ti.f32, shape=[])\n', (626, 653), True, 'import taichi as ti\n'), ((2857, 2873), 'utils.tiNormalize', 'tiNormalize', (['out'], {}), '(out)\n', (2868, 2873), False, 'from utils import Vector, Matrix, tiNormalize, Float\n'), ((3560, 3581), 'taichi.Vector', 'ti.Vector', (['[2.0, 4.0]'], {}), '([2.0, 4.0])\n', (3569, 3581), True, 'import taichi as ti\n'), ((2163, 2182), 'taichi_glsl.vec2', 'ts.vec2', (['localScale'], {}), '(localScale)\n', (2170, 2182), True, 'import taichi_glsl as ts\n'), ((2184, 2198), 'taichi_glsl.vec2', 'ts.vec2', (['error'], {}), '(error)\n', (2191, 2198), True, 'import taichi_glsl as ts\n'), ((3748, 3769), 'taichi.Vector', 'ti.Vector', (['[2.0, 2.0]'], {}), '([2.0, 2.0])\n', (3757, 3769), True, 'import taichi as ti\n'), ((2945, 2961), 'taichi.cos', 'ti.cos', (['rotation'], {}), '(rotation)\n', (2951, 2961), True, 'import taichi as ti\n'), ((2984, 3000), 'taichi.sin', 'ti.sin', (['rotation'], {}), '(rotation)\n', (2990, 3000), True, 'import taichi as ti\n'), ((3002, 3018), 'taichi.cos', 'ti.cos', (['rotation'], {}), '(rotation)\n', (3008, 3018), True, 'import taichi as ti\n'), ((2964, 2980), 'taichi.sin', 'ti.sin', (['rotation'], {}), '(rotation)\n', (2970, 2980), True, 'import taichi as ti\n')]
|
from math import ceil
a = 1
b = 2
print(a/b)
print(ceil(1.6))
|
[
"math.ceil"
] |
[((52, 61), 'math.ceil', 'ceil', (['(1.6)'], {}), '(1.6)\n', (56, 61), False, 'from math import ceil\n')]
|
import random
def generate(width, height, percentage):
map = [[1 for i in range(height)] for j in range(width)]
min_x = 1
max_x = width - 2
min_y = 1
max_y = height - 2
x = random.randint(min_x, max_x)
y = random.randint(min_y, max_y)
map_cells = width * height
filled_cells = 0
filled_percentage = 0
previous_delta_x = 0
previous_delta_y = 0
while filled_percentage <= percentage:
if map[x][y] == 1:
map[x][y] = 0
filled_cells += 1
filled_percentage = filled_cells / map_cells * 100
if random.choice([True, False]):
delta_x = random.choice([1, -1, previous_delta_x])
if x + delta_x < min_x or x + delta_x > max_x:
x = x - delta_x
previous_delta_x = -delta_x
else:
x = x + delta_x
previous_delta_x = delta_x
else:
delta_y = random.choice([1, -1, previous_delta_y])
if y + delta_y < min_y or y + delta_y > max_y:
y = y - delta_y
previous_delta_y = -delta_y
else:
y = y + delta_y
previous_delta_y = delta_y
return map
|
[
"random.choice",
"random.randint"
] |
[((201, 229), 'random.randint', 'random.randint', (['min_x', 'max_x'], {}), '(min_x, max_x)\n', (215, 229), False, 'import random\n'), ((238, 266), 'random.randint', 'random.randint', (['min_y', 'max_y'], {}), '(min_y, max_y)\n', (252, 266), False, 'import random\n'), ((599, 627), 'random.choice', 'random.choice', (['[True, False]'], {}), '([True, False])\n', (612, 627), False, 'import random\n'), ((651, 691), 'random.choice', 'random.choice', (['[1, -1, previous_delta_x]'], {}), '([1, -1, previous_delta_x])\n', (664, 691), False, 'import random\n'), ((956, 996), 'random.choice', 'random.choice', (['[1, -1, previous_delta_y]'], {}), '([1, -1, previous_delta_y])\n', (969, 996), False, 'import random\n')]
|
import unittest
from api.controllers.simulation import SimulationController
from api.server import rest
class SimulationControllerTest(unittest.TestCase):
def setUp(self):
self.controller = SimulationController()
def test_get_active_load_fails(self):
with self.assertRaises(Exception):
self.controller.active_load
def test_get_reactive_load_fails(self):
with self.assertRaises(Exception):
self.controller.reactive_load
def test_run_simulation(self):
active_load, reactive_load = self.controller.run_simulation()
self.assertEqual(active_load, 0.1)
self.assertEqual(reactive_load, 0.05)
def test_get_active_load(self):
self.controller.run_simulation()
self.assertEqual(self.controller.active_load, 0.1)
def test_get_reactive_load(self):
self.controller.run_simulation()
self.assertEqual(self.controller.reactive_load, 0.05)
class RestAPIv1Test(unittest.TestCase):
def setUp(self):
rest.config['TESTING'] = True
self.app = rest.test_client()
def test_get_active_load(self):
self.app.post('/api/v1/run')
simulation_res = self.app.get('/api/v1/simulation/0/load/active')
self.assertEqual(simulation_res.status_code, 200)
self.assertEqual(simulation_res.json, {'value': 0.1})
def test_get_reactive_load(self):
self.app.post('/api/v1/run')
simulation_res = self.app.get('/api/v1/simulation/0/load/reactive')
self.assertEqual(simulation_res.status_code, 200)
self.assertEqual(simulation_res.json, {'value': 0.05})
def test_get_simulation_by_id(self):
simulation_res = self.app.get('/api/v1/simulation/0')
self.assertEqual(simulation_res.status_code, 200)
self.assertEqual(simulation_res.json, {'id': 0, 'results': {'load': {'active': 0.1, 'reactive': 0.05}}})
def test_get_simulations_list(self):
simulation_res = self.app.get('/api/v1/simulations')
self.assertEqual(simulation_res.status_code, 200)
self.assertEqual(simulation_res.json, {'0': {'id': 0, 'results': {'load': {'active': 0.1, 'reactive': 0.05}}}})
def test_run_simulation(self):
simulation_res = self.app.post('/api/v1/simulations')
self.assertEqual(simulation_res.status_code, 201)
self.assertEqual(simulation_res.json, {'id': '10', 'results': {'load': {'active': 0.1, 'reactive': 0.05}}})
def test_run_simulation_raises(self):
simulation_res = self.app.post('/api/v1/simulations', data=dict(active=0.9, reactive=0.8))
self.assertEqual(simulation_res.status_code, 417)
def test_put_simulation_replace(self):
self.app.put('/api/v1/simulation/9', data=dict(active=0.4, reactive=0.01))
simulation_res = self.app.put('/api/v1/simulation/9', data=dict(active=0.2, reactive=0.02))
self.assertEqual(simulation_res.status_code, 201)
self.assertEqual(simulation_res.json, {'id': '9', 'results': {'load': {'active': 0.2, 'reactive': 0.02}}})
def test_put_simulation_new(self):
simulation_res = self.app.put('/api/v1/simulation/8', data=dict(active=0.2, reactive=0.02))
self.assertEqual(simulation_res.status_code, 201)
def test_put_simulation_raises(self):
simulation_res = self.app.put('/api/v1/simulation/1', data=dict(active=0.9, reactive=0.8))
self.assertEqual(simulation_res.status_code, 417)
def test_delete_simulation(self):
self.app.put('/api/v1/simulation/5', data=dict(active=0.2, reactive=0.02))
simulation_res = self.app.delete('/api/v1/simulation/5')
self.assertEqual(simulation_res.status_code, 204)
|
[
"api.controllers.simulation.SimulationController",
"api.server.rest.test_client"
] |
[((206, 228), 'api.controllers.simulation.SimulationController', 'SimulationController', ([], {}), '()\n', (226, 228), False, 'from api.controllers.simulation import SimulationController\n'), ((1080, 1098), 'api.server.rest.test_client', 'rest.test_client', ([], {}), '()\n', (1096, 1098), False, 'from api.server import rest\n')]
|
import cv2 as cv
import numpy as np
cameraman = cv.imread('./Photos/cameraman.tif')
saturn = cv.imread('./Photos/saturn.png')
saturn = cv.resize(saturn, (cameraman.shape[0], cameraman.shape[1]), interpolation=cv.INTER_AREA)
# we can split channels by using this
cameraman = cv.cvtColor(cameraman,cv.COLOR_BGR2GRAY)
b, g, r = cv.split(saturn)
r = r >> 2
r = r << 2
g = g >> 2
g = g << 2
b = b >> 2
b = b << 2
cr = cameraman >> 6
cg = cameraman << 2
cg = cg >> 6
cb = cameraman << 4
cb= cb >> 6
# bitwise or perfoms
r = cv.bitwise_or(r, cr)
g = cv.bitwise_or(g, cg)
b = cv.bitwise_or(b, cb)
merged = cv.merge([b,g,r])
b,g,r=cv.split(merged)
redpart = r<<6
greenpart = g<<6
greenpart = greenpart>>2
bluepart= b<<6
bluepart = b>>4
# if we use bit wise or here the imgae gets distorted.
# if we use merge here the image gets red.
image=bluepart|greenpart|redpart
cv.imshow('saturn',merged)
cv.imshow('hiddenimage',image)
cv.waitKey(0)
|
[
"cv2.cvtColor",
"cv2.waitKey",
"cv2.imread",
"cv2.split",
"cv2.bitwise_or",
"cv2.merge",
"cv2.imshow",
"cv2.resize"
] |
[((48, 83), 'cv2.imread', 'cv.imread', (['"""./Photos/cameraman.tif"""'], {}), "('./Photos/cameraman.tif')\n", (57, 83), True, 'import cv2 as cv\n'), ((93, 125), 'cv2.imread', 'cv.imread', (['"""./Photos/saturn.png"""'], {}), "('./Photos/saturn.png')\n", (102, 125), True, 'import cv2 as cv\n'), ((135, 228), 'cv2.resize', 'cv.resize', (['saturn', '(cameraman.shape[0], cameraman.shape[1])'], {'interpolation': 'cv.INTER_AREA'}), '(saturn, (cameraman.shape[0], cameraman.shape[1]), interpolation=\n cv.INTER_AREA)\n', (144, 228), True, 'import cv2 as cv\n'), ((274, 315), 'cv2.cvtColor', 'cv.cvtColor', (['cameraman', 'cv.COLOR_BGR2GRAY'], {}), '(cameraman, cv.COLOR_BGR2GRAY)\n', (285, 315), True, 'import cv2 as cv\n'), ((325, 341), 'cv2.split', 'cv.split', (['saturn'], {}), '(saturn)\n', (333, 341), True, 'import cv2 as cv\n'), ((520, 540), 'cv2.bitwise_or', 'cv.bitwise_or', (['r', 'cr'], {}), '(r, cr)\n', (533, 540), True, 'import cv2 as cv\n'), ((545, 565), 'cv2.bitwise_or', 'cv.bitwise_or', (['g', 'cg'], {}), '(g, cg)\n', (558, 565), True, 'import cv2 as cv\n'), ((570, 590), 'cv2.bitwise_or', 'cv.bitwise_or', (['b', 'cb'], {}), '(b, cb)\n', (583, 590), True, 'import cv2 as cv\n'), ((600, 619), 'cv2.merge', 'cv.merge', (['[b, g, r]'], {}), '([b, g, r])\n', (608, 619), True, 'import cv2 as cv\n'), ((624, 640), 'cv2.split', 'cv.split', (['merged'], {}), '(merged)\n', (632, 640), True, 'import cv2 as cv\n'), ((860, 887), 'cv2.imshow', 'cv.imshow', (['"""saturn"""', 'merged'], {}), "('saturn', merged)\n", (869, 887), True, 'import cv2 as cv\n'), ((887, 918), 'cv2.imshow', 'cv.imshow', (['"""hiddenimage"""', 'image'], {}), "('hiddenimage', image)\n", (896, 918), True, 'import cv2 as cv\n'), ((918, 931), 'cv2.waitKey', 'cv.waitKey', (['(0)'], {}), '(0)\n', (928, 931), True, 'import cv2 as cv\n')]
|
"""
YANK Health Report Notebook formatter
This module handles all the figure formatting and processing to minimize the code shown in the Health Report Jupyter
Notebook. All data processing and analysis is handled by the main multistate.analyzers package,
mainly image formatting is passed here.
"""
import os
import yaml
import numpy as np
from scipy import interpolate
from matplotlib import pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
from matplotlib import gridspec
from pymbar import MBAR
import seaborn as sns
from simtk import unit as units
from .. import analyze
kB = units.BOLTZMANN_CONSTANT_kB * units.AVOGADRO_CONSTANT_NA
class HealthReportData(analyze.ExperimentAnalyzer):
"""
Class which houses the data used for the notebook and the generation of all plots including formatting
"""
def general_simulation_data(self):
"""
General purpose simulation data on number of iterations, number of states, and number of atoms.
This just prints out this data in a regular, formatted pattern.
"""
general = self.get_general_simulation_data()
iterations = {}
nreplicas = {}
nstates = {}
natoms = {}
for phase_name in self.phase_names:
iterations[phase_name] = general[phase_name]['iterations']
nreplicas[phase_name] = general[phase_name]['nreplicas']
nstates[phase_name] = general[phase_name]['nstates']
natoms[phase_name] = general[phase_name]['natoms']
leniter = max(len('Iterations'), *[len(str(i)) for i in iterations.values()]) + 2
lenreplica = max(len('Replicas'), *[len(str(i)) for i in nreplicas.values()]) + 2
lenstates = max(len('States'), *[len(str(i)) for i in nstates.values()]) + 2
lennatoms = max(len('Num Atoms'), *[len(str(i)) for i in natoms.values()]) + 2
lenleftcol = max(len('Phase'), *[len(phase) for phase in self.phase_names]) + 2
lines = []
headstring = ''
headstring += ('{:^' + '{}'.format(lenleftcol) + '}').format('Phase') + '|'
headstring += ('{:^' + '{}'.format(leniter) + '}').format('Iterations') + '|'
headstring += ('{:^' + '{}'.format(lenreplica) + '}').format('Replicas') + '|'
headstring += ('{:^' + '{}'.format(lenstates) + '}').format('States') + '|'
headstring += ('{:^' + '{}'.format(lennatoms) + '}').format('Num Atoms')
lines.append(headstring)
lenline = len(headstring)
topdiv = '=' * lenline
lines.append(topdiv)
for phase in self.phase_names:
phasestring = ''
phasestring += ('{:^' + '{}'.format(lenleftcol) + '}').format(phase) + '|'
phasestring += ('{:^' + '{}'.format(leniter) + '}').format(iterations[phase]) + '|'
phasestring += ('{:^' + '{}'.format(lenreplica) + '}').format(nreplicas[phase]) + '|'
phasestring += ('{:^' + '{}'.format(lenstates) + '}').format(nstates[phase]) + '|'
phasestring += ('{:^' + '{}'.format(lennatoms) + '}').format(natoms[phase])
lines.append(phasestring)
lines.append('-' * lenline)
for line in lines:
print(line)
def generate_equilibration_plots(self, discard_from_start=1):
"""
Create the equilibration scatter plots showing the trend lines, correlation time,
and number of effective samples
Returns
-------
equilibration_figure : matplotlib.figure
Figure showing the equilibration between both phases
"""
serial_data = self.get_equilibration_data(discard_from_start=discard_from_start)
# Adjust figure size
plt.rcParams['figure.figsize'] = 20, 6 * self.nphases * 2
plot_grid = gridspec.GridSpec(self.nphases, 1) # Vertical distribution
equilibration_figure = plt.figure()
# Add some space between the figures
equilibration_figure.subplots_adjust(hspace=0.4)
for i, phase_name in enumerate(self.phase_names):
phase_data = serial_data[phase_name]
sub_grid = gridspec.GridSpecFromSubplotSpec(3, 1, subplot_spec=plot_grid[i])
# FIRST SUBPLOT: energy scatter
# Attach subplot to figure
p = equilibration_figure.add_subplot(sub_grid[0])
# Data assignment for plot generation
y = self.u_ns[phase_name]
N = y.size
x = np.arange(N)
# Scatter plot
p.plot(x, y, 'k.')
# Smoothed equilibrium, this is very crude but it works for large data
tck = interpolate.splrep(x, y, k=5, s=N * 1E7)
smoothed = interpolate.splev(x, tck, der=0)
p.plot(x, smoothed, '-r', linewidth=4)
# Nequil line
ylim = p.get_ylim()
p.vlines(self.nequils[phase_name], *ylim, colors='b', linewidth=4)
p.set_ylim(*ylim) # Reset limits in case vlines expanded them
p.set_xlim([0, N])
# Set text
p.set_title(phase_name + " phase", fontsize=20)
p.set_ylabel(r'$\Sigma_n u_n$ in kT', fontsize=20)
# Extra info in text boxes
subsample_string = 'Subsample Rate: {0:.2f}\nDecorelated Samples: {1:d}'.format(self.g_ts[phase_name], int(
np.floor(self.Neff_maxs[phase_name])))
if np.mean([0, N]) > self.nequils[phase_name]:
txt_horz = 'right'
txt_xcoord = 0.95
else:
txt_horz = 'left'
txt_xcoord = 0.05
smooth_index = {'right': -1, 'left': 0} # condition y
if np.mean(ylim) > smoothed[smooth_index[txt_horz]]:
txt_vert = 'top'
txt_ycoord = 0.95
else:
txt_vert = 'bottom'
txt_ycoord = 0.05
p.text(txt_xcoord, txt_ycoord,
subsample_string,
verticalalignment=txt_vert, horizontalalignment=txt_horz,
transform=p.transAxes,
fontsize=15,
bbox={'alpha': 1.0, 'facecolor': 'white'}
)
# SECOND SUBPLOT: g_t trace
i_t = phase_data['iterations_considered']
g_i = phase_data['subsample_rate_by_iterations_considered']
n_effective_i = phase_data['effective_samples_by_iterations_considered']
x = i_t
g = equilibration_figure.add_subplot(sub_grid[1])
g.plot(x, g_i)
ylim = g.get_ylim()
g.vlines(self.nequils[phase_name], *ylim, colors='b', linewidth=4)
g.set_ylim(*ylim) # Reset limits in case vlines expanded them
g.set_xlim([0, N])
g.set_ylabel(r'Decor. Time', fontsize=20)
# THRID SUBPLOT: Neff trace
ne = equilibration_figure.add_subplot(sub_grid[2])
ne.plot(x, n_effective_i)
ylim = ne.get_ylim()
ne.vlines(self.nequils[phase_name], *ylim, colors='b', linewidth=4)
ne.set_ylim(*ylim) # Reset limits in case vlines expanded them
ne.set_xlim([0, N])
ne.set_ylabel(r'Neff samples', fontsize=20)
ne.set_xlabel(r'Iteration', fontsize=20)
return equilibration_figure
def compute_rmsds(self):
return NotImplementedError("This function is still a prototype and has segfault issues, please disable for now")
# """Compute the RMSD of the ligand and the receptor by state"""
# if not self._equilibration_run:
# raise RuntimeError("Cannot run RMSD without first running the equilibration. Please run the "
# "corresponding function/cell first!")
# plt.rcParams['figure.figsize'] = 20, 6 * self.nphases * 2
# rmsd_figure, subplots = plt.subplots(2, 1)
# for i, phase_name in enumerate(self.phase_names):
# if phase_name not in self._serialized_data:
# self._serialized_data[phase_name] = {}
# self._serialized_data[phase_name]['rmsd'] = {}
# serial = self._serialized_data[phase_name]['rmsd']
# analyzer = self.analyzers[phase_name]
# reporter = analyzer.reporter
# metadata = reporter.read_dict('metadata')
# topography = mmtools.utils.deserialize(metadata['topography'])
# topology = topography.topology
# test_positions = reporter.read_sampler_states(0, analysis_particles_only=True)[0]
# atoms_analysis = test_positions.positions.shape[0]
# topology = topology.subset(range(atoms_analysis))
# iterations = self.iterations[phase_name]
# positions = np.zeros([iterations, atoms_analysis, 3])
# for j in range(iterations):
# sampler_states = reporter.read_sampler_states(j, analysis_particles_only=True)
# # Deconvolute
# thermo_states = reporter.read_replica_thermodynamic_states(iteration=j)
# sampler = sampler_states[thermo_states[0]]
# positions[j, :, :] = sampler.positions
# trajectory = md.Trajectory(positions, topology)
# rmsd_ligand = md.rmsd(trajectory, trajectory, frame=0, atom_indices=topography.ligand_atoms)
# rmsd_recpetor = md.rmsd(trajectory, trajectory, frame=0, atom_indices=topography.receptor_atoms)
# serial['ligand'] = rmsd_ligand.tolist()
# serial['receptor'] = rmsd_recpetor.tolist()
# p = subplots[i]
# x = range(iterations)
# p.set_title(phase_name + " phase", fontsize=20)
# p.plot(x, rmsd_ligand, label='Ligand RMSD')
# p.plot(x, rmsd_recpetor, label='Receptor RMSD')
# p.legend()
# p.set_xlim([0, iterations])
# ylim = p.get_ylim()
# p.set_ylim([0, ylim[-1]])
# p.set_ylabel(r'RMSD (nm)', fontsize=20)
# p.set_xlabel(r'Iteration', fontsize=20)
# return rmsd_figure
def generate_decorrelation_plots(self, decorrelation_threshold=0.1):
"""
Parameters
----------
decorrelation_threshold : float, Optional
When number of decorrelated samples is less than this percent of the total number of samples, raise a
warning. Default: `0.1`.
Returns
-------
decorrelation_figure : matplotlib.figure
Figure showing the decorrelation pie chart data of how the samples are distributed between equilibration,
correlation, and decorrelation.
"""
if not self._general_run or not self._equilibration_run:
raise RuntimeError("Cannot generate decorrelation data without general simulation data and equilibration "
"data first! Please run the corresponding functions/cells.")
# This will exist because of _equilibration_run
eq_data = self.get_equilibration_data(discard_from_start=self._n_discarded)
# Readjust figure output
plt.rcParams['figure.figsize'] = 20, 8
decorrelation_figure = plt.figure()
decorrelation_figure.subplots_adjust(wspace=0.2)
plotkeys = [100 + (10 * self.nphases) + (i + 1) for i in range(self.nphases)] # Horizontal distribution
for phase_name, plotid in zip(self.phase_names, plotkeys):
serial = eq_data[phase_name]
# Create subplot
p = decorrelation_figure.add_subplot(plotid)
labels = ['Decorrelated', 'Correlated', 'Equilibration']
colors = ['#2c7bb6', '#abd0e0', '#fdae61'] # blue, light blue, and orange
explode = [0, 0, 0.0]
n_iter = self.iterations[phase_name]
decor = serial['count_decorrelated_samples']
eq = serial['count_total_equilibration_samples']
cor = serial['count_correlated_samples']
dat = np.array([decor, cor, eq]) / float(n_iter)
if dat[0] <= decorrelation_threshold:
colors[0] = '#d7191c' # Red for warning
patch, txt, autotxt = p.pie(
dat,
explode=explode,
labels=labels,
colors=colors,
autopct='%1.1f%%',
shadow=True,
startangle=90 + 360 * dat[0] / 2, # put center of decor at top
counterclock=False,
textprops={'fontsize': 14}
)
for tx in txt: # This is the only way I have found to adjust the label font size
tx.set_fontsize(18)
p.axis('equal')
p.set_title(phase_name + " phase", fontsize=20, y=1.05)
# Generate warning if need be
if dat[0] <= decorrelation_threshold:
p.text(
0.5, -0.1,
"Warning! Fewer than {0:.1f}% samples are\nequilibrated and decorelated!".format(
decorrelation_threshold * 100),
verticalalignment='bottom', horizontalalignment='center',
transform=p.transAxes,
fontsize=20,
color='red',
bbox={'alpha': 1.0, 'facecolor': 'white', 'lw': 0, 'pad': 0}
)
return decorrelation_figure
def generate_mixing_plot(self, mixing_cutoff=0.05, mixing_warning_threshold=0.90, cmap_override=None):
"""
Generate the state diffusion mixing map as an image instead of array of number
Parameters
----------
mixing_cutoff : float
Minimal level of mixing percent from state `i` to `j` that will be plotted.
Domain: [0,1]
Default: 0.05.
mixing_warning_threshold : float
Level of mixing where transition from state `i` to `j` generates a warning based on percent of total swaps.
Domain (mixing_cutoff, 1)
Default: `0.90`.
cmap_override : None or string
Override the custom colormap that is used for this figure in case the figure is too white or you wnat to
do something besides the custom one here.
Returns
-------
mixing_figure : matplotlib.figure
Figure showing the state mixing as a color diffusion map instead of grid of numbers
"""
mixing_serial = self.get_mixing_data()
# Set up image
mixing_figure, subplots = plt.subplots(1, 2)
# Create custom cmap goes from white to pure blue, goes red if the threshold is reached
if mixing_cutoff is None:
mixing_cutoff = 0
if mixing_warning_threshold <= mixing_cutoff:
raise ValueError("mixing_warning_threshold must be larger than mixing_cutoff")
if (mixing_warning_threshold > 1 or mixing_cutoff > 1 or
mixing_warning_threshold < 0 or mixing_cutoff < 0):
raise ValueError("mixing_warning_threshold and mixing_cutoff must be between [0,1]")
cdict = {'red': ((0.0, 1.0, 1.0),
(mixing_cutoff, 1.0, 1.0),
(mixing_warning_threshold, 0.0, 0.0),
(mixing_warning_threshold, 1.0, 1.0),
(1.0, 1.0, 1.0)),
'green': ((0.0, 1.0, 1.0),
(mixing_cutoff, 1.0, 1.0),
(mixing_warning_threshold, 0.0, 0.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 1.0, 1.0),
(mixing_cutoff, 1.0, 1.0),
(mixing_warning_threshold, 1.0, 1.0),
(mixing_warning_threshold, 0.0, 0.0),
(1.0, 0.0, 0.0))}
if cmap_override is not None:
# Use this cmap instead if your results are too diffuse to see over the white
cmap = plt.get_cmap("Blues")
else:
cmap = LinearSegmentedColormap('BlueWarnRed', cdict)
# Plot a diffusing mixing map for each phase.
for phase_name, subplot in zip(self.phase_names, subplots):
serial = mixing_serial[phase_name]
transition_matrix = serial['transitions']
eigenvalues = serial['eigenvalues']
statistical_inefficiency = serial['stat_inefficiency']
# Without vmin/vmax, the image normalizes the values to mixing_data.max
# which screws up the warning colormap.
# Can also use norm=NoNorm(), but that makes the colorbar manipulation fail.
output_image = subplot.imshow(transition_matrix, aspect='equal',
cmap=cmap, vmin=0, vmax=1)
# Add colorbar.
decimal = 2 # Precision setting
nticks = 11
# The color bar has to be configured independently of the source image
# or it cant be truncated to only show the data. i.e. it would instead
# go 0-1 always.
ubound = np.min([np.around(transition_matrix.max(), decimals=decimal) + 10 ** (-decimal), 1])
lbound = np.max([np.around(transition_matrix.min(), decimals=decimal) - 10 ** (-decimal), 0])
boundslice = np.linspace(lbound, ubound, 256)
cbar = plt.colorbar(output_image, ax=subplot, orientation='vertical',
boundaries=boundslice,
values=boundslice[1:],
format='%.{}f'.format(decimal))
# Update ticks.
ticks = np.linspace(lbound, ubound, nticks)
cbar.set_ticks(ticks)
# Title: Perron eigenvalue, equilibration time and statistical inefficiency.
perron_eigenvalue = eigenvalues[1]
title_txt = (phase_name + ' phase\n'
'Perron eigenvalue: {}\n'
'State equilibration timescale: ~{} iterations\n')
if perron_eigenvalue >= 1:
title_txt = title_txt.format('1.0', '$\infty$')
else:
equilibration_timescale = 1.0 / (1.0 - perron_eigenvalue)
title_txt = title_txt.format('{:.5f}', '{:.1f}')
title_txt = title_txt.format(perron_eigenvalue, equilibration_timescale)
title_txt += 'Replica state index statistical inefficiency: {:.3f}'.format(statistical_inefficiency)
subplot.set_title(title_txt, fontsize=20, y=1.05)
# Display Warning.
if np.any(transition_matrix >= mixing_warning_threshold):
subplot.text(
0.5, -0.2,
("Warning!\nThere were states that less than {0:.2f}% swaps!\n"
"Consider adding more states!".format((1 - mixing_warning_threshold) * 100)),
verticalalignment='bottom', horizontalalignment='center',
transform=subplot.transAxes,
fontsize=20,
color='red',
bbox={'alpha': 1.0, 'facecolor': 'white', 'lw': 0, 'pad': 0}
)
return mixing_figure
def generate_replica_mixing_plot(self, phase_stacked_replica_plots=False):
"""
Generate the replica trajectory mixing plots. Show the state of each replica as a function of simulation time
Parameters
----------
phase_stacked_replica_plots : boolean, Default: False
Determine if the phases should be shown side by side, or one on top of the other. If True, the two phases
will be shown with phase 1 on top and phase 2 on bottom.
Returns
-------
replica_figure : matplotlib.figure
Figure showing the replica state trajectories for both phases
"""
# Determine max number of states
max_n_replicas = 0
for i, phase_name in enumerate(self.phase_names):
# Gather state NK
analyzer = self.analyzers[phase_name]
n_replicas = analyzer.reporter.n_replicas
max_n_replicas = max(n_replicas, max_n_replicas)
# Create Parent Gridspec
if phase_stacked_replica_plots:
plot_grid = gridspec.GridSpec(2, 1)
plt.rcParams['figure.figsize'] = 20, max_n_replicas * 6
else:
plot_grid = gridspec.GridSpec(1, 2)
plt.rcParams['figure.figsize'] = 20, max_n_replicas * 3
replica_figure = plt.figure()
for i, phase_name in enumerate(self.phase_names):
# Gather state NK
analyzer = self.analyzers[phase_name]
sampled_energies, _, _, state_kn = analyzer.read_energies()
n_replicas, n_states, n_iterations = sampled_energies.shape
# Create subgrid
sub_grid = gridspec.GridSpecFromSubplotSpec(n_replicas, 1, subplot_spec=plot_grid[i])
# Loop through all states
for replica_index in range(n_replicas):
# Add plot
plot = replica_figure.add_subplot(sub_grid[replica_index])
# Actually plot
plot.plot(state_kn[replica_index, :], 'k.')
# Format plot
plot.set_yticks([])
plot.set_xlim([0, n_iterations])
plot.set_ylim([0, n_states])
if replica_index < n_replicas - 1:
plot.set_xticks([])
plot.set_ylabel('{}'.format(replica_index))
if replica_index == 0: # Title
plot.set_title('{} phase'.format(phase_name), fontsize=20)
self._replica_mixing_run = True
return replica_figure
def generate_free_energy(self):
fe_data = self.get_experiment_free_energy_data()
delta_f = fe_data['free_energy_diff']
delta_h = fe_data['enthalpy_diff']
delta_f_err = fe_data['free_energy_diff_error']
delta_h_err = fe_data['enthalpy_diff_error']
delta_f_unit = fe_data['free_energy_diff_unit']
delta_h_unit = fe_data['enthalpy_diff_unit']
delta_f_err_unit = fe_data['free_energy_diff_error_unit']
delta_h_err_unit = fe_data['enthalpy_diff_error_unit']
# Attempt to guess type of calculation
calculation_type = ''
for phase in self.phase_names:
if 'complex' in phase:
calculation_type = ' of binding'
elif 'solvent1' in phase:
calculation_type = ' of solvation'
print('Free energy{:<13}: {:9.3f} +- {:.3f} kT ({:.3f} +- {:.3f} kcal/mol)'.format(
calculation_type, delta_f, delta_f_err, delta_f_unit / units.kilocalories_per_mole,
delta_f_err_unit / units.kilocalories_per_mole))
for phase in self.phase_names:
delta_f_phase = fe_data[phase]['free_energy_diff']
delta_f_err_phase = fe_data[phase]['free_energy_diff_error']
detla_f_ssc_phase = fe_data[phase]['free_energy_diff_standard_state_correction']
print('DeltaG {:<17}: {:9.3f} +- {:.3f} kT'.format(phase, delta_f_phase,
delta_f_err_phase))
if detla_f_ssc_phase != 0.0:
print('DeltaG {:<17}: {:18.3f} kT'.format('standard state correction', detla_f_ssc_phase))
print('')
print('Enthalpy{:<16}: {:9.3f} +- {:.3f} kT ({:.3f} +- {:.3f} kcal/mol)'.format(
calculation_type, delta_h, delta_h_err, delta_h_unit / units.kilocalories_per_mole,
delta_h_err_unit / units.kilocalories_per_mole)
)
def free_energy_trace(self, discard_from_start=1, n_trace=10):
"""
Trace the free energy by keeping fewer and fewer samples in both forward and reverse direction
Returns
-------
free_energy_trace_figure : matplotlib.figure
Figure showing the equilibration between both phases
"""
trace_spacing = 1.0/n_trace
def format_trace_plot(plot: plt.Axes, trace_forward: np.ndarray, trace_reverse: np.ndarray):
x = np.arange(n_trace + 1)[1:] * trace_spacing * 100
plot.errorbar(x, trace_forward[:, 0], yerr=2 * trace_forward[:, 1], ecolor='b',
elinewidth=0, mec='none', mew=0, linestyle='None',
zorder=10)
plot.plot(x, trace_forward[:, 0], 'b-', marker='o', mec='b', mfc='w', label='Forward', zorder=20,)
plot.errorbar(x, trace_reverse[:, 0], yerr=2 * trace_reverse[:, 1], ecolor='r',
elinewidth=0, mec='none', mew=0, linestyle='None',
zorder=10)
plot.plot(x, trace_reverse[:, 0], 'r-', marker='o', mec='r', mfc='w', label='Reverse', zorder=20)
y_fill_upper = [trace_forward[-1, 0] + 2 * trace_forward[-1, 1]] * 2
y_fill_lower = [trace_forward[-1, 0] - 2 * trace_forward[-1, 1]] * 2
xlim = [0, 100]
plot.fill_between(xlim, y_fill_lower, y_fill_upper, color='orchid', zorder=5)
plot.set_xlim(xlim)
plot.legend()
plot.set_xlabel("% Samples Analyzed", fontsize=20)
plot.set_ylabel(r"$\Delta G$ in kcal/mol", fontsize=20)
# Adjust figure size
plt.rcParams['figure.figsize'] = 15, 6 * (self.nphases + 1) * 2
plot_grid = gridspec.GridSpec(self.nphases + 1, 1) # Vertical distribution
free_energy_trace_figure = plt.figure()
# Add some space between the figures
free_energy_trace_figure.subplots_adjust(hspace=0.4)
traces = {}
for i, phase_name in enumerate(self.phase_names):
traces[phase_name] = {}
if phase_name not in self._serialized_data:
self._serialized_data[phase_name] = {}
serial = self._serialized_data[phase_name]
if "free_energy" not in serial:
serial["free_energy"] = {}
serial = serial["free_energy"]
free_energy_trace_f = np.zeros([n_trace, 2], dtype=float)
free_energy_trace_r = np.zeros([n_trace, 2], dtype=float)
p = free_energy_trace_figure.add_subplot(plot_grid[i])
analyzer = self.analyzers[phase_name]
kcal = analyzer.kT / units.kilocalorie_per_mole
# Data crunching to get timeseries
sampled_energies, _, _, states = analyzer.read_energies()
n_replica, n_states, _ = sampled_energies.shape
# Sample at index 0 is actually the minimized structure and NOT from the equilibrium distribution
# This throws off all of the equilibrium data
sampled_energies = sampled_energies[:, :, discard_from_start:]
states = states[:, discard_from_start:]
total_iterations = sampled_energies.shape[-1]
for trace_factor in range(n_trace, 0, -1): # Reverse order tracing
trace_percent = trace_spacing*trace_factor
j = trace_factor - 1 # Indexing
kept_iterations = int(np.ceil(trace_percent*total_iterations))
u_forward = sampled_energies[:, :, :kept_iterations]
s_forward = states[:, :kept_iterations]
u_reverse = sampled_energies[:, :, -1:-kept_iterations-1:-1]
s_reverse = states[:, -1:-kept_iterations - 1:-1]
for energy_sub, state_sub, storage in [
(u_forward, s_forward, free_energy_trace_f), (u_reverse, s_reverse, free_energy_trace_r)]:
u_n = analyzer.get_effective_energy_timeseries(energies=energy_sub,
replica_state_indices=state_sub)
i_t, g_i, n_effective_i = analyze.multistate.get_equilibration_data_per_sample(u_n)
i_max = n_effective_i.argmax()
number_equilibrated = i_t[i_max]
g_t = g_i[i_max]
if not self.use_full_trajectory:
energy_sub = analyze.multistate.utils.remove_unequilibrated_data(energy_sub,
number_equilibrated,
-1)
state_sub = analyze.multistate.utils.remove_unequilibrated_data(state_sub,
number_equilibrated, -1)
energy_sub = analyze.multistate.utils.subsample_data_along_axis(energy_sub, g_t, -1)
state_sub = analyze.multistate.utils.subsample_data_along_axis(state_sub, g_t, -1)
samples_per_state = np.zeros([n_states], dtype=int)
unique_sampled_states, counts = np.unique(state_sub, return_counts=True)
# Assign those counts to the correct range of states
samples_per_state[unique_sampled_states] = counts
mbar = MBAR(energy_sub, samples_per_state)
fe_data = mbar.getFreeEnergyDifferences(compute_uncertainty=True)
# Trap theta_ij output
try:
fe, dfe, _ = fe_data
except ValueError:
fe, dfe = fe_data
ref_i, ref_j = analyzer.reference_states
storage[j, :] = fe[ref_i, ref_j] * kcal, dfe[ref_i, ref_j] * kcal
format_trace_plot(p, free_energy_trace_f, free_energy_trace_r)
p.set_title("{} Phase".format(phase_name.title()), fontsize=20)
traces[phase_name]['f'] = free_energy_trace_f
traces[phase_name]['r'] = free_energy_trace_r
serial['forward'] = free_energy_trace_f.tolist()
serial['reverse'] = free_energy_trace_r.tolist()
# Finally handle last combined plot
combined_trace_f = np.zeros([n_trace, 2], dtype=float)
combined_trace_r = np.zeros([n_trace, 2], dtype=float)
for phase_name in self.phase_names:
phase_f = traces[phase_name]['f']
phase_r = traces[phase_name]['r']
combined_trace_f[:, 0] += phase_f[:, 0]
combined_trace_f[:, 1] = np.sqrt(combined_trace_f[:, 1]**2 + phase_f[:, 1]**2)
combined_trace_r[:, 0] += phase_r[:, 0]
combined_trace_r[:, 1] = np.sqrt(combined_trace_r[:, 1] ** 2 + phase_r[:, 1] ** 2)
p = free_energy_trace_figure.add_subplot(plot_grid[-1])
format_trace_plot(p, combined_trace_f, combined_trace_r)
p.set_title("Combined Phases", fontsize=20)
return free_energy_trace_figure
def restraint_distributions_plot(self):
ENERGIES_IDX = 0
DISTANCES_IDX = 1
# Find the phase that defines the restraint energies and distances.
for phase_name in self.phase_names:
analyzer = self.analyzers[phase_name]
lambda1_data = list(analyzer._get_restraint_energies_distances_at_state(0))
if len(lambda1_data[ENERGIES_IDX]) != 0:
break
# Check if we have a restraint at all.
if len(lambda1_data[ENERGIES_IDX]) == 0:
print('The restraint unbiasing step was not performed for this calculation.')
return
# The restraint distances are not computed if there's no distance cutoff.
lambda0_data = list(analyzer._get_restraint_energies_distances_at_state(-1))
cutoffs = list(analyzer._get_restraint_cutoffs())
xlabels = ['Restraint energies [kT]', 'Restraint distances [Angstrom]']
for data in [lambda1_data, lambda0_data, cutoffs, xlabels]:
if len(lambda1_data[DISTANCES_IDX]) == 0:
del data[DISTANCES_IDX]
elif isinstance(data[DISTANCES_IDX], units.Quantity):
# Convert the distances into the units that will be printed.
data[DISTANCES_IDX] /= units.angstroms
# Plot the lambda=1 and lambda=0 restraints data.
figure, axes = plt.subplots(ncols=len(lambda1_data), figsize=(20, 10))
if len(lambda1_data) == 1:
axes = [axes]
for ax, lambda1, lambda0 in zip(axes, lambda1_data, lambda0_data):
sns.distplot(lambda1, ax=ax, kde=False, label='bound state')
sns.distplot(lambda0, ax=ax, kde=False, label='non-interacting state')
# Plot the cutoffs used for the restraint unbiasing.
for ax, cutoff in zip(axes, cutoffs):
limits = ax.get_ylim()
ax.plot([cutoff for _ in range(100)], np.linspace(limits[0], limits[1]/2, num=100))
# Labels and legend.
for i, (ax, xlabel) in enumerate(zip(axes, xlabels)):
ax.set_xlabel(xlabel)
if i == 0:
ax.set_ylabel('Number of samples')
elif i == 1:
ax.legend(loc='upper right')
return figure
def report_version(self):
current_version = self._serialized_data['yank_version']
print("Rendered with YANK Version {}".format(current_version))
def dump_serial_data(self, path):
"""Dump the serialized data to YAML file"""
true_path, ext = os.path.splitext(path)
if not ext: # empty string check
ext = '.yaml'
true_path += ext
with open(true_path, 'w') as f:
f.write(yaml.dump(self._serialized_data))
|
[
"matplotlib.colors.LinearSegmentedColormap",
"numpy.floor",
"yaml.dump",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.arange",
"pymbar.MBAR",
"numpy.unique",
"numpy.linspace",
"scipy.interpolate.splrep",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.get_cmap",
"numpy.ceil",
"numpy.zeros",
"numpy.any",
"numpy.array",
"os.path.splitext",
"seaborn.distplot",
"scipy.interpolate.splev",
"matplotlib.gridspec.GridSpec",
"matplotlib.gridspec.GridSpecFromSubplotSpec",
"numpy.sqrt"
] |
[((3800, 3834), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['self.nphases', '(1)'], {}), '(self.nphases, 1)\n', (3817, 3834), False, 'from matplotlib import gridspec\n'), ((3891, 3903), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3901, 3903), True, 'from matplotlib import pyplot as plt\n'), ((11248, 11260), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (11258, 11260), True, 'from matplotlib import pyplot as plt\n'), ((14592, 14610), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (14604, 14610), True, 'from matplotlib import pyplot as plt\n'), ((20617, 20629), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (20627, 20629), True, 'from matplotlib import pyplot as plt\n'), ((25534, 25572), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(self.nphases + 1)', '(1)'], {}), '(self.nphases + 1, 1)\n', (25551, 25572), False, 'from matplotlib import gridspec\n'), ((25633, 25645), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (25643, 25645), True, 'from matplotlib import pyplot as plt\n'), ((30192, 30227), 'numpy.zeros', 'np.zeros', (['[n_trace, 2]'], {'dtype': 'float'}), '([n_trace, 2], dtype=float)\n', (30200, 30227), True, 'import numpy as np\n'), ((30255, 30290), 'numpy.zeros', 'np.zeros', (['[n_trace, 2]'], {'dtype': 'float'}), '([n_trace, 2], dtype=float)\n', (30263, 30290), True, 'import numpy as np\n'), ((33484, 33506), 'os.path.splitext', 'os.path.splitext', (['path'], {}), '(path)\n', (33500, 33506), False, 'import os\n'), ((4136, 4201), 'matplotlib.gridspec.GridSpecFromSubplotSpec', 'gridspec.GridSpecFromSubplotSpec', (['(3)', '(1)'], {'subplot_spec': 'plot_grid[i]'}), '(3, 1, subplot_spec=plot_grid[i])\n', (4168, 4201), False, 'from matplotlib import gridspec\n'), ((4475, 4487), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (4484, 4487), True, 'import numpy as np\n'), ((4647, 4694), 'scipy.interpolate.splrep', 'interpolate.splrep', (['x', 'y'], {'k': '(5)', 's': '(N * 10000000.0)'}), '(x, y, k=5, s=N * 10000000.0)\n', (4665, 4694), False, 'from scipy import interpolate\n'), ((4711, 4743), 'scipy.interpolate.splev', 'interpolate.splev', (['x', 'tck'], {'der': '(0)'}), '(x, tck, der=0)\n', (4728, 4743), False, 'from scipy import interpolate\n'), ((16038, 16059), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""Blues"""'], {}), "('Blues')\n", (16050, 16059), True, 'from matplotlib import pyplot as plt\n'), ((16093, 16138), 'matplotlib.colors.LinearSegmentedColormap', 'LinearSegmentedColormap', (['"""BlueWarnRed"""', 'cdict'], {}), "('BlueWarnRed', cdict)\n", (16116, 16138), False, 'from matplotlib.colors import LinearSegmentedColormap\n'), ((17379, 17411), 'numpy.linspace', 'np.linspace', (['lbound', 'ubound', '(256)'], {}), '(lbound, ubound, 256)\n', (17390, 17411), True, 'import numpy as np\n'), ((17716, 17751), 'numpy.linspace', 'np.linspace', (['lbound', 'ubound', 'nticks'], {}), '(lbound, ubound, nticks)\n', (17727, 17751), True, 'import numpy as np\n'), ((18670, 18723), 'numpy.any', 'np.any', (['(transition_matrix >= mixing_warning_threshold)'], {}), '(transition_matrix >= mixing_warning_threshold)\n', (18676, 18723), True, 'import numpy as np\n'), ((20370, 20393), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(2)', '(1)'], {}), '(2, 1)\n', (20387, 20393), False, 'from matplotlib import gridspec\n'), ((20500, 20523), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(2)'], {}), '(1, 2)\n', (20517, 20523), False, 'from matplotlib import gridspec\n'), ((20965, 21039), 'matplotlib.gridspec.GridSpecFromSubplotSpec', 'gridspec.GridSpecFromSubplotSpec', (['n_replicas', '(1)'], {'subplot_spec': 'plot_grid[i]'}), '(n_replicas, 1, subplot_spec=plot_grid[i])\n', (20997, 21039), False, 'from matplotlib import gridspec\n'), ((26196, 26231), 'numpy.zeros', 'np.zeros', (['[n_trace, 2]'], {'dtype': 'float'}), '([n_trace, 2], dtype=float)\n', (26204, 26231), True, 'import numpy as np\n'), ((26266, 26301), 'numpy.zeros', 'np.zeros', (['[n_trace, 2]'], {'dtype': 'float'}), '([n_trace, 2], dtype=float)\n', (26274, 26301), True, 'import numpy as np\n'), ((30516, 30573), 'numpy.sqrt', 'np.sqrt', (['(combined_trace_f[:, 1] ** 2 + phase_f[:, 1] ** 2)'], {}), '(combined_trace_f[:, 1] ** 2 + phase_f[:, 1] ** 2)\n', (30523, 30573), True, 'import numpy as np\n'), ((30659, 30716), 'numpy.sqrt', 'np.sqrt', (['(combined_trace_r[:, 1] ** 2 + phase_r[:, 1] ** 2)'], {}), '(combined_trace_r[:, 1] ** 2 + phase_r[:, 1] ** 2)\n', (30666, 30716), True, 'import numpy as np\n'), ((32526, 32586), 'seaborn.distplot', 'sns.distplot', (['lambda1'], {'ax': 'ax', 'kde': '(False)', 'label': '"""bound state"""'}), "(lambda1, ax=ax, kde=False, label='bound state')\n", (32538, 32586), True, 'import seaborn as sns\n'), ((32599, 32669), 'seaborn.distplot', 'sns.distplot', (['lambda0'], {'ax': 'ax', 'kde': '(False)', 'label': '"""non-interacting state"""'}), "(lambda0, ax=ax, kde=False, label='non-interacting state')\n", (32611, 32669), True, 'import seaborn as sns\n'), ((5414, 5429), 'numpy.mean', 'np.mean', (['[0, N]'], {}), '([0, N])\n', (5421, 5429), True, 'import numpy as np\n'), ((5695, 5708), 'numpy.mean', 'np.mean', (['ylim'], {}), '(ylim)\n', (5702, 5708), True, 'import numpy as np\n'), ((12053, 12079), 'numpy.array', 'np.array', (['[decor, cor, eq]'], {}), '([decor, cor, eq])\n', (12061, 12079), True, 'import numpy as np\n'), ((32863, 32909), 'numpy.linspace', 'np.linspace', (['limits[0]', '(limits[1] / 2)'], {'num': '(100)'}), '(limits[0], limits[1] / 2, num=100)\n', (32874, 32909), True, 'import numpy as np\n'), ((33660, 33692), 'yaml.dump', 'yaml.dump', (['self._serialized_data'], {}), '(self._serialized_data)\n', (33669, 33692), False, 'import yaml\n'), ((5360, 5396), 'numpy.floor', 'np.floor', (['self.Neff_maxs[phase_name]'], {}), '(self.Neff_maxs[phase_name])\n', (5368, 5396), True, 'import numpy as np\n'), ((27235, 27276), 'numpy.ceil', 'np.ceil', (['(trace_percent * total_iterations)'], {}), '(trace_percent * total_iterations)\n', (27242, 27276), True, 'import numpy as np\n'), ((28974, 29005), 'numpy.zeros', 'np.zeros', (['[n_states]'], {'dtype': 'int'}), '([n_states], dtype=int)\n', (28982, 29005), True, 'import numpy as np\n'), ((29058, 29098), 'numpy.unique', 'np.unique', (['state_sub'], {'return_counts': '(True)'}), '(state_sub, return_counts=True)\n', (29067, 29098), True, 'import numpy as np\n'), ((29269, 29304), 'pymbar.MBAR', 'MBAR', (['energy_sub', 'samples_per_state'], {}), '(energy_sub, samples_per_state)\n', (29273, 29304), False, 'from pymbar import MBAR\n'), ((24262, 24284), 'numpy.arange', 'np.arange', (['(n_trace + 1)'], {}), '(n_trace + 1)\n', (24271, 24284), True, 'import numpy as np\n')]
|
from django.test import TestCase
from zerver.lib.initial_password import initial_password
from zerver.lib.db import TimeTrackingCursor
from zerver.lib import cache
from zerver.lib import event_queue
from zerver.worker import queue_processors
from zerver.lib.actions import (
check_send_message, create_stream_if_needed, do_add_subscription,
get_display_recipient,
)
from zerver.models import (
get_realm,
get_user_profile_by_email,
resolve_email_to_domain,
Client,
Message,
Realm,
Recipient,
Stream,
Subscription,
UserMessage,
)
import base64
import os
import re
import time
import ujson
import urllib
from contextlib import contextmanager
API_KEYS = {}
@contextmanager
def stub(obj, name, f):
old_f = getattr(obj, name)
setattr(obj, name, f)
yield
setattr(obj, name, old_f)
@contextmanager
def simulated_queue_client(client):
real_SimpleQueueClient = queue_processors.SimpleQueueClient
queue_processors.SimpleQueueClient = client
yield
queue_processors.SimpleQueueClient = real_SimpleQueueClient
@contextmanager
def tornado_redirected_to_list(lst):
real_event_queue_process_notification = event_queue.process_notification
event_queue.process_notification = lst.append
yield
event_queue.process_notification = real_event_queue_process_notification
@contextmanager
def simulated_empty_cache():
cache_queries = []
def my_cache_get(key, cache_name=None):
cache_queries.append(('get', key, cache_name))
return None
def my_cache_get_many(keys, cache_name=None):
cache_queries.append(('getmany', keys, cache_name))
return None
old_get = cache.cache_get
old_get_many = cache.cache_get_many
cache.cache_get = my_cache_get
cache.cache_get_many = my_cache_get_many
yield cache_queries
cache.cache_get = old_get
cache.cache_get_many = old_get_many
@contextmanager
def queries_captured():
'''
Allow a user to capture just the queries executed during
the with statement.
'''
queries = []
def wrapper_execute(self, action, sql, params=()):
start = time.time()
try:
return action(sql, params)
finally:
stop = time.time()
duration = stop - start
queries.append({
'sql': self.mogrify(sql, params),
'time': "%.3f" % duration,
})
old_execute = TimeTrackingCursor.execute
old_executemany = TimeTrackingCursor.executemany
def cursor_execute(self, sql, params=()):
return wrapper_execute(self, super(TimeTrackingCursor, self).execute, sql, params)
TimeTrackingCursor.execute = cursor_execute
def cursor_executemany(self, sql, params=()):
return wrapper_execute(self, super(TimeTrackingCursor, self).executemany, sql, params)
TimeTrackingCursor.executemany = cursor_executemany
yield queries
TimeTrackingCursor.execute = old_execute
TimeTrackingCursor.executemany = old_executemany
def find_key_by_email(address):
from django.core.mail import outbox
key_regex = re.compile("accounts/do_confirm/([a-f0-9]{40})>")
for message in reversed(outbox):
if address in message.to:
return key_regex.search(message.body).groups()[0]
def message_ids(result):
return set(message['id'] for message in result['messages'])
def message_stream_count(user_profile):
return UserMessage.objects. \
select_related("message"). \
filter(user_profile=user_profile). \
count()
def most_recent_usermessage(user_profile):
query = UserMessage.objects. \
select_related("message"). \
filter(user_profile=user_profile). \
order_by('-message')
return query[0] # Django does LIMIT here
def most_recent_message(user_profile):
usermessage = most_recent_usermessage(user_profile)
return usermessage.message
def get_user_messages(user_profile):
query = UserMessage.objects. \
select_related("message"). \
filter(user_profile=user_profile). \
order_by('message')
return [um.message for um in query]
class DummyObject:
pass
class DummyTornadoRequest:
def __init__(self):
self.connection = DummyObject()
self.connection.stream = DummyStream()
class DummyHandler(object):
def __init__(self, assert_callback):
self.assert_callback = assert_callback
self.request = DummyTornadoRequest()
# Mocks RequestHandler.async_callback, which wraps a callback to
# handle exceptions. We return the callback as-is.
def async_callback(self, cb):
return cb
def write(self, response):
raise NotImplemented
def zulip_finish(self, response, *ignore):
if self.assert_callback:
self.assert_callback(response)
class DummySession(object):
session_key = "0"
class DummyStream:
def closed(self):
return False
class POSTRequestMock(object):
method = "POST"
def __init__(self, post_data, user_profile, assert_callback=None):
self.REQUEST = self.POST = post_data
self.user = user_profile
self._tornado_handler = DummyHandler(assert_callback)
self.session = DummySession()
self._log_data = {}
self.META = {'PATH_INFO': 'test'}
self._log_data = {}
class AuthedTestCase(TestCase):
# Helper because self.client.patch annoying requires you to urlencode
def client_patch(self, url, info={}, **kwargs):
info = urllib.urlencode(info)
return self.client.patch(url, info, **kwargs)
def client_put(self, url, info={}, **kwargs):
info = urllib.urlencode(info)
return self.client.put(url, info, **kwargs)
def client_delete(self, url, info={}, **kwargs):
info = urllib.urlencode(info)
return self.client.delete(url, info, **kwargs)
def login(self, email, password=None):
if password is None:
password = initial_password(email)
return self.client.post('/accounts/login/',
{'username':email, 'password':password})
def register(self, username, password, domain="zulip.com"):
self.client.post('/accounts/home/',
{'email': username + "@" + domain})
return self.submit_reg_form_for_user(username, password, domain=domain)
def submit_reg_form_for_user(self, username, password, domain="zulip.com"):
"""
Stage two of the two-step registration process.
If things are working correctly the account should be fully
registered after this call.
"""
return self.client.post('/accounts/register/',
{'full_name': username, 'password': password,
'key': find_key_by_email(username + '@' + domain),
'terms': True})
def get_api_key(self, email):
if email not in API_KEYS:
API_KEYS[email] = get_user_profile_by_email(email).api_key
return API_KEYS[email]
def api_auth(self, email):
credentials = "%s:%s" % (email, self.get_api_key(email))
return {
'HTTP_AUTHORIZATION': 'Basic ' + base64.b64encode(credentials)
}
def get_streams(self, email):
"""
Helper function to get the stream names for a user
"""
user_profile = get_user_profile_by_email(email)
subs = Subscription.objects.filter(
user_profile = user_profile,
active = True,
recipient__type = Recipient.STREAM)
return [get_display_recipient(sub.recipient) for sub in subs]
def send_message(self, sender_name, recipient_list, message_type,
content="test content", subject="test", **kwargs):
sender = get_user_profile_by_email(sender_name)
if message_type == Recipient.PERSONAL:
message_type_name = "private"
else:
message_type_name = "stream"
if isinstance(recipient_list, basestring):
recipient_list = [recipient_list]
(sending_client, _) = Client.objects.get_or_create(name="<NAME>")
return check_send_message(
sender, sending_client, message_type_name, recipient_list, subject,
content, forged=False, forged_timestamp=None,
forwarder_user_profile=sender, realm=sender.realm, **kwargs)
def get_old_messages(self, anchor=1, num_before=100, num_after=100):
post_params = {"anchor": anchor, "num_before": num_before,
"num_after": num_after}
result = self.client.post("/json/get_old_messages", dict(post_params))
data = ujson.loads(result.content)
return data['messages']
def users_subscribed_to_stream(self, stream_name, realm_domain):
realm = get_realm(realm_domain)
stream = Stream.objects.get(name=stream_name, realm=realm)
recipient = Recipient.objects.get(type_id=stream.id, type=Recipient.STREAM)
subscriptions = Subscription.objects.filter(recipient=recipient, active=True)
return [subscription.user_profile for subscription in subscriptions]
def assert_json_success(self, result):
"""
Successful POSTs return a 200 and JSON of the form {"result": "success",
"msg": ""}.
"""
self.assertEqual(result.status_code, 200, result)
json = ujson.loads(result.content)
self.assertEqual(json.get("result"), "success")
# We have a msg key for consistency with errors, but it typically has an
# empty value.
self.assertIn("msg", json)
return json
def get_json_error(self, result, status_code=400):
self.assertEqual(result.status_code, status_code)
json = ujson.loads(result.content)
self.assertEqual(json.get("result"), "error")
return json['msg']
def assert_json_error(self, result, msg, status_code=400):
"""
Invalid POSTs return an error status code and JSON of the form
{"result": "error", "msg": "reason"}.
"""
self.assertEqual(self.get_json_error(result, status_code=status_code), msg)
def assert_length(self, queries, count, exact=False):
actual_count = len(queries)
if exact:
return self.assertTrue(actual_count == count,
"len(%s) == %s, != %s" % (queries, actual_count, count))
return self.assertTrue(actual_count <= count,
"len(%s) == %s, > %s" % (queries, actual_count, count))
def assert_json_error_contains(self, result, msg_substring):
self.assertIn(msg_substring, self.get_json_error(result))
def fixture_data(self, type, action, file_type='json'):
return open(os.path.join(os.path.dirname(__file__),
"../fixtures/%s/%s_%s.%s" % (type, type, action,file_type))).read()
# Subscribe to a stream directly
def subscribe_to_stream(self, email, stream_name, realm=None):
realm = get_realm(resolve_email_to_domain(email))
stream, _ = create_stream_if_needed(realm, stream_name)
user_profile = get_user_profile_by_email(email)
do_add_subscription(user_profile, stream, no_log=True)
# Subscribe to a stream by making an API request
def common_subscribe_to_streams(self, email, streams, extra_post_data = {}, invite_only=False):
post_data = {'subscriptions': ujson.dumps([{"name": stream} for stream in streams]),
'invite_only': ujson.dumps(invite_only)}
post_data.update(extra_post_data)
result = self.client.post("/api/v1/users/me/subscriptions", post_data, **self.api_auth(email))
return result
def send_json_payload(self, email, url, payload, stream_name=None, **post_params):
if stream_name != None:
self.subscribe_to_stream(email, stream_name)
result = self.client.post(url, payload, **post_params)
self.assert_json_success(result)
# Check the correct message was sent
msg = Message.objects.filter().order_by('-id')[0]
self.assertEqual(msg.sender.email, email)
self.assertEqual(get_display_recipient(msg.recipient), stream_name)
return msg
|
[
"zerver.lib.actions.check_send_message",
"zerver.models.get_user_profile_by_email",
"zerver.lib.actions.get_display_recipient",
"os.path.dirname",
"zerver.models.Message.objects.filter",
"zerver.models.Recipient.objects.get",
"zerver.lib.initial_password.initial_password",
"ujson.dumps",
"zerver.models.get_realm",
"zerver.models.Stream.objects.get",
"ujson.loads",
"urllib.urlencode",
"zerver.models.resolve_email_to_domain",
"zerver.lib.actions.do_add_subscription",
"zerver.models.Subscription.objects.filter",
"re.compile",
"zerver.lib.actions.create_stream_if_needed",
"zerver.models.Client.objects.get_or_create",
"time.time",
"base64.b64encode",
"zerver.models.UserMessage.objects.select_related"
] |
[((3145, 3194), 're.compile', 're.compile', (['"""accounts/do_confirm/([a-f0-9]{40})>"""'], {}), "('accounts/do_confirm/([a-f0-9]{40})>')\n", (3155, 3194), False, 'import re\n'), ((2149, 2160), 'time.time', 'time.time', ([], {}), '()\n', (2158, 2160), False, 'import time\n'), ((5558, 5580), 'urllib.urlencode', 'urllib.urlencode', (['info'], {}), '(info)\n', (5574, 5580), False, 'import urllib\n'), ((5700, 5722), 'urllib.urlencode', 'urllib.urlencode', (['info'], {}), '(info)\n', (5716, 5722), False, 'import urllib\n'), ((5843, 5865), 'urllib.urlencode', 'urllib.urlencode', (['info'], {}), '(info)\n', (5859, 5865), False, 'import urllib\n'), ((7464, 7496), 'zerver.models.get_user_profile_by_email', 'get_user_profile_by_email', (['email'], {}), '(email)\n', (7489, 7496), False, 'from zerver.models import get_realm, get_user_profile_by_email, resolve_email_to_domain, Client, Message, Realm, Recipient, Stream, Subscription, UserMessage\n'), ((7512, 7617), 'zerver.models.Subscription.objects.filter', 'Subscription.objects.filter', ([], {'user_profile': 'user_profile', 'active': '(True)', 'recipient__type': 'Recipient.STREAM'}), '(user_profile=user_profile, active=True,\n recipient__type=Recipient.STREAM)\n', (7539, 7617), False, 'from zerver.models import get_realm, get_user_profile_by_email, resolve_email_to_domain, Client, Message, Realm, Recipient, Stream, Subscription, UserMessage\n'), ((7899, 7937), 'zerver.models.get_user_profile_by_email', 'get_user_profile_by_email', (['sender_name'], {}), '(sender_name)\n', (7924, 7937), False, 'from zerver.models import get_realm, get_user_profile_by_email, resolve_email_to_domain, Client, Message, Realm, Recipient, Stream, Subscription, UserMessage\n'), ((8209, 8252), 'zerver.models.Client.objects.get_or_create', 'Client.objects.get_or_create', ([], {'name': '"""<NAME>"""'}), "(name='<NAME>')\n", (8237, 8252), False, 'from zerver.models import get_realm, get_user_profile_by_email, resolve_email_to_domain, Client, Message, Realm, Recipient, Stream, Subscription, UserMessage\n'), ((8269, 8470), 'zerver.lib.actions.check_send_message', 'check_send_message', (['sender', 'sending_client', 'message_type_name', 'recipient_list', 'subject', 'content'], {'forged': '(False)', 'forged_timestamp': 'None', 'forwarder_user_profile': 'sender', 'realm': 'sender.realm'}), '(sender, sending_client, message_type_name,\n recipient_list, subject, content, forged=False, forged_timestamp=None,\n forwarder_user_profile=sender, realm=sender.realm, **kwargs)\n', (8287, 8470), False, 'from zerver.lib.actions import check_send_message, create_stream_if_needed, do_add_subscription, get_display_recipient\n'), ((8782, 8809), 'ujson.loads', 'ujson.loads', (['result.content'], {}), '(result.content)\n', (8793, 8809), False, 'import ujson\n'), ((8928, 8951), 'zerver.models.get_realm', 'get_realm', (['realm_domain'], {}), '(realm_domain)\n', (8937, 8951), False, 'from zerver.models import get_realm, get_user_profile_by_email, resolve_email_to_domain, Client, Message, Realm, Recipient, Stream, Subscription, UserMessage\n'), ((8969, 9018), 'zerver.models.Stream.objects.get', 'Stream.objects.get', ([], {'name': 'stream_name', 'realm': 'realm'}), '(name=stream_name, realm=realm)\n', (8987, 9018), False, 'from zerver.models import get_realm, get_user_profile_by_email, resolve_email_to_domain, Client, Message, Realm, Recipient, Stream, Subscription, UserMessage\n'), ((9039, 9102), 'zerver.models.Recipient.objects.get', 'Recipient.objects.get', ([], {'type_id': 'stream.id', 'type': 'Recipient.STREAM'}), '(type_id=stream.id, type=Recipient.STREAM)\n', (9060, 9102), False, 'from zerver.models import get_realm, get_user_profile_by_email, resolve_email_to_domain, Client, Message, Realm, Recipient, Stream, Subscription, UserMessage\n'), ((9127, 9188), 'zerver.models.Subscription.objects.filter', 'Subscription.objects.filter', ([], {'recipient': 'recipient', 'active': '(True)'}), '(recipient=recipient, active=True)\n', (9154, 9188), False, 'from zerver.models import get_realm, get_user_profile_by_email, resolve_email_to_domain, Client, Message, Realm, Recipient, Stream, Subscription, UserMessage\n'), ((9509, 9536), 'ujson.loads', 'ujson.loads', (['result.content'], {}), '(result.content)\n', (9520, 9536), False, 'import ujson\n'), ((9881, 9908), 'ujson.loads', 'ujson.loads', (['result.content'], {}), '(result.content)\n', (9892, 9908), False, 'import ujson\n'), ((11220, 11263), 'zerver.lib.actions.create_stream_if_needed', 'create_stream_if_needed', (['realm', 'stream_name'], {}), '(realm, stream_name)\n', (11243, 11263), False, 'from zerver.lib.actions import check_send_message, create_stream_if_needed, do_add_subscription, get_display_recipient\n'), ((11287, 11319), 'zerver.models.get_user_profile_by_email', 'get_user_profile_by_email', (['email'], {}), '(email)\n', (11312, 11319), False, 'from zerver.models import get_realm, get_user_profile_by_email, resolve_email_to_domain, Client, Message, Realm, Recipient, Stream, Subscription, UserMessage\n'), ((11328, 11382), 'zerver.lib.actions.do_add_subscription', 'do_add_subscription', (['user_profile', 'stream'], {'no_log': '(True)'}), '(user_profile, stream, no_log=True)\n', (11347, 11382), False, 'from zerver.lib.actions import check_send_message, create_stream_if_needed, do_add_subscription, get_display_recipient\n'), ((2249, 2260), 'time.time', 'time.time', ([], {}), '()\n', (2258, 2260), False, 'import time\n'), ((6017, 6040), 'zerver.lib.initial_password.initial_password', 'initial_password', (['email'], {}), '(email)\n', (6033, 6040), False, 'from zerver.lib.initial_password import initial_password\n'), ((7685, 7721), 'zerver.lib.actions.get_display_recipient', 'get_display_recipient', (['sub.recipient'], {}), '(sub.recipient)\n', (7706, 7721), False, 'from zerver.lib.actions import check_send_message, create_stream_if_needed, do_add_subscription, get_display_recipient\n'), ((11168, 11198), 'zerver.models.resolve_email_to_domain', 'resolve_email_to_domain', (['email'], {}), '(email)\n', (11191, 11198), False, 'from zerver.models import get_realm, get_user_profile_by_email, resolve_email_to_domain, Client, Message, Realm, Recipient, Stream, Subscription, UserMessage\n'), ((11575, 11628), 'ujson.dumps', 'ujson.dumps', (["[{'name': stream} for stream in streams]"], {}), "([{'name': stream} for stream in streams])\n", (11586, 11628), False, 'import ujson\n'), ((11666, 11690), 'ujson.dumps', 'ujson.dumps', (['invite_only'], {}), '(invite_only)\n', (11677, 11690), False, 'import ujson\n'), ((12320, 12356), 'zerver.lib.actions.get_display_recipient', 'get_display_recipient', (['msg.recipient'], {}), '(msg.recipient)\n', (12341, 12356), False, 'from zerver.lib.actions import check_send_message, create_stream_if_needed, do_add_subscription, get_display_recipient\n'), ((7048, 7080), 'zerver.models.get_user_profile_by_email', 'get_user_profile_by_email', (['email'], {}), '(email)\n', (7073, 7080), False, 'from zerver.models import get_realm, get_user_profile_by_email, resolve_email_to_domain, Client, Message, Realm, Recipient, Stream, Subscription, UserMessage\n'), ((7279, 7308), 'base64.b64encode', 'base64.b64encode', (['credentials'], {}), '(credentials)\n', (7295, 7308), False, 'import base64\n'), ((3470, 3515), 'zerver.models.UserMessage.objects.select_related', 'UserMessage.objects.select_related', (['"""message"""'], {}), "('message')\n", (3504, 3515), False, 'from zerver.models import get_realm, get_user_profile_by_email, resolve_email_to_domain, Client, Message, Realm, Recipient, Stream, Subscription, UserMessage\n'), ((3647, 3692), 'zerver.models.UserMessage.objects.select_related', 'UserMessage.objects.select_related', (['"""message"""'], {}), "('message')\n", (3681, 3692), False, 'from zerver.models import get_realm, get_user_profile_by_email, resolve_email_to_domain, Client, Message, Realm, Recipient, Stream, Subscription, UserMessage\n'), ((4003, 4048), 'zerver.models.UserMessage.objects.select_related', 'UserMessage.objects.select_related', (['"""message"""'], {}), "('message')\n", (4037, 4048), False, 'from zerver.models import get_realm, get_user_profile_by_email, resolve_email_to_domain, Client, Message, Realm, Recipient, Stream, Subscription, UserMessage\n'), ((12201, 12225), 'zerver.models.Message.objects.filter', 'Message.objects.filter', ([], {}), '()\n', (12223, 12225), False, 'from zerver.models import get_realm, get_user_profile_by_email, resolve_email_to_domain, Client, Message, Realm, Recipient, Stream, Subscription, UserMessage\n'), ((10909, 10934), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (10924, 10934), False, 'import os\n')]
|
from django.conf import settings
from django.contrib import messages
from django.shortcuts import render, redirect, reverse, get_object_or_404
from django.contrib.auth.decorators import login_required, permission_required
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.views.generic import DetailView
from scrumate.core.issue.filters import IssueFilter
from scrumate.core.issue.models import Issue
from scrumate.core.issue.forms import IssueForm
from scrumate.core.project.models import Project
from scrumate.general.views import HistoryList
@login_required(login_url='/login/')
def issue_list(request, project_id, **kwargs):
issue_filter = IssueFilter(request.GET, queryset=Issue.objects.filter(project_id=project_id).order_by('-id'))
issue_list = issue_filter.qs
page = request.GET.get('page', 1)
paginator = Paginator(issue_list, settings.PAGE_SIZE)
try:
issues = paginator.page(page)
except PageNotAnInteger:
issues = paginator.page(1)
except EmptyPage:
issues = paginator.page(paginator.num_pages)
project = Project.objects.get(pk=project_id)
return render(request, 'core/issue_list.html', {'issues': issues, 'filter': issue_filter, 'project': project})
@login_required(login_url='/login/')
def issue_add(request, project_id, **kwargs):
if request.method == 'POST':
form = IssueForm(request.POST)
if form.is_valid():
issue = form.save(commit=False)
issue.project_id = project_id
issue.save()
messages.success(request, "Issue added successfully!")
return redirect('issue_list', permanent=True, project_id=project_id)
else:
form = IssueForm()
title = 'New Issue'
project = Project.objects.get(pk=project_id)
return render(request, 'core/common_add.html', {'form': form, 'title': title, 'list_url_name': 'issue_list', 'project': project})
@login_required(login_url='/login/')
def issue_edit(request, project_id, pk, **kwargs):
instance = get_object_or_404(Issue, id=pk)
form = IssueForm(request.POST or None, instance=instance)
if form.is_valid():
form.save()
messages.success(request, "Issue updated successfully!")
return redirect('issue_list', project_id=project_id)
title = 'Edit Issue'
project = Project.objects.get(pk=project_id)
return render(request, 'core/common_add.html', {'form': form, 'title': title, 'list_url_name': 'issue_list', 'project': project})
@login_required(login_url='/login/')
@permission_required('core.update_issue_status', raise_exception=True)
def update_issue_status(request, project_id, pk, **kwargs):
instance = get_object_or_404(Issue, id=pk)
form = IssueForm(request.POST or None, instance=instance)
if request.POST:
status = request.POST.get('status')
instance.status = status
instance.save()
messages.success(request, "Issue status updated successfurrl!")
return redirect('issue_list', project_id=project_id)
return render(request, 'includes/single_field.html', {
'field': form.visible_fields()[5],
'title': 'Update Status',
'url': reverse('issue_list', kwargs={'project_id': project_id}),
'project': Project.objects.get(pk=project_id),
'base_template': 'general/index_project_view.html'
})
class IssueHistoryList(HistoryList):
permission_required = 'scrumate.core.issue_history'
def get_issue_id(self):
return self.kwargs.get('pk')
def get_project_id(self):
return self.kwargs.get('project_id')
def get_queryset(self):
return Issue.history.filter(id=self.get_issue_id())
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
project = Project.objects.get(pk=self.get_project_id())
issue = Issue.objects.get(pk=self.get_issue_id())
context['project'] = project
context['title'] = f'History of {issue.name}'
context['back_url'] = reverse('issue_list', kwargs={'project_id': self.get_project_id()})
context['base_template'] = 'general/index_project_view.html'
return context
class IssueDetailView(DetailView):
queryset = Issue.objects.all()
template_name = 'includes/generic_view.html'
context_object_name = 'issue'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
project_id = self.kwargs.get('project_id')
instance = self.get_object()
context['form'] = IssueForm(instance=instance)
context['edit_url'] = reverse('issue_edit', kwargs={'project_id': project_id, 'pk': instance.pk})
context['list_url'] = reverse('issue_list', kwargs={'project_id': project_id})
context['title'] = instance.name
context['project'] = Project.objects.get(pk=project_id)
context['base_template'] = 'general/index_project_view.html'
return context
|
[
"django.contrib.auth.decorators.login_required",
"django.contrib.auth.decorators.permission_required",
"django.shortcuts.redirect",
"scrumate.core.project.models.Project.objects.get",
"django.shortcuts.get_object_or_404",
"scrumate.core.issue.forms.IssueForm",
"django.core.paginator.Paginator",
"scrumate.core.issue.models.Issue.objects.filter",
"django.shortcuts.render",
"django.contrib.messages.success",
"scrumate.core.issue.models.Issue.objects.all",
"django.shortcuts.reverse"
] |
[((584, 619), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {'login_url': '"""/login/"""'}), "(login_url='/login/')\n", (598, 619), False, 'from django.contrib.auth.decorators import login_required, permission_required\n'), ((1265, 1300), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {'login_url': '"""/login/"""'}), "(login_url='/login/')\n", (1279, 1300), False, 'from django.contrib.auth.decorators import login_required, permission_required\n'), ((1954, 1989), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {'login_url': '"""/login/"""'}), "(login_url='/login/')\n", (1968, 1989), False, 'from django.contrib.auth.decorators import login_required, permission_required\n'), ((2532, 2567), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {'login_url': '"""/login/"""'}), "(login_url='/login/')\n", (2546, 2567), False, 'from django.contrib.auth.decorators import login_required, permission_required\n'), ((2569, 2638), 'django.contrib.auth.decorators.permission_required', 'permission_required', (['"""core.update_issue_status"""'], {'raise_exception': '(True)'}), "('core.update_issue_status', raise_exception=True)\n", (2588, 2638), False, 'from django.contrib.auth.decorators import login_required, permission_required\n'), ((869, 910), 'django.core.paginator.Paginator', 'Paginator', (['issue_list', 'settings.PAGE_SIZE'], {}), '(issue_list, settings.PAGE_SIZE)\n', (878, 910), False, 'from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\n'), ((1112, 1146), 'scrumate.core.project.models.Project.objects.get', 'Project.objects.get', ([], {'pk': 'project_id'}), '(pk=project_id)\n', (1131, 1146), False, 'from scrumate.core.project.models import Project\n'), ((1158, 1265), 'django.shortcuts.render', 'render', (['request', '"""core/issue_list.html"""', "{'issues': issues, 'filter': issue_filter, 'project': project}"], {}), "(request, 'core/issue_list.html', {'issues': issues, 'filter':\n issue_filter, 'project': project})\n", (1164, 1265), False, 'from django.shortcuts import render, redirect, reverse, get_object_or_404\n'), ((1782, 1816), 'scrumate.core.project.models.Project.objects.get', 'Project.objects.get', ([], {'pk': 'project_id'}), '(pk=project_id)\n', (1801, 1816), False, 'from scrumate.core.project.models import Project\n'), ((1828, 1954), 'django.shortcuts.render', 'render', (['request', '"""core/common_add.html"""', "{'form': form, 'title': title, 'list_url_name': 'issue_list', 'project':\n project}"], {}), "(request, 'core/common_add.html', {'form': form, 'title': title,\n 'list_url_name': 'issue_list', 'project': project})\n", (1834, 1954), False, 'from django.shortcuts import render, redirect, reverse, get_object_or_404\n'), ((2056, 2087), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Issue'], {'id': 'pk'}), '(Issue, id=pk)\n', (2073, 2087), False, 'from django.shortcuts import render, redirect, reverse, get_object_or_404\n'), ((2099, 2149), 'scrumate.core.issue.forms.IssueForm', 'IssueForm', (['(request.POST or None)'], {'instance': 'instance'}), '(request.POST or None, instance=instance)\n', (2108, 2149), False, 'from scrumate.core.issue.forms import IssueForm\n'), ((2360, 2394), 'scrumate.core.project.models.Project.objects.get', 'Project.objects.get', ([], {'pk': 'project_id'}), '(pk=project_id)\n', (2379, 2394), False, 'from scrumate.core.project.models import Project\n'), ((2406, 2532), 'django.shortcuts.render', 'render', (['request', '"""core/common_add.html"""', "{'form': form, 'title': title, 'list_url_name': 'issue_list', 'project':\n project}"], {}), "(request, 'core/common_add.html', {'form': form, 'title': title,\n 'list_url_name': 'issue_list', 'project': project})\n", (2412, 2532), False, 'from django.shortcuts import render, redirect, reverse, get_object_or_404\n'), ((2714, 2745), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Issue'], {'id': 'pk'}), '(Issue, id=pk)\n', (2731, 2745), False, 'from django.shortcuts import render, redirect, reverse, get_object_or_404\n'), ((2757, 2807), 'scrumate.core.issue.forms.IssueForm', 'IssueForm', (['(request.POST or None)'], {'instance': 'instance'}), '(request.POST or None, instance=instance)\n', (2766, 2807), False, 'from scrumate.core.issue.forms import IssueForm\n'), ((4272, 4291), 'scrumate.core.issue.models.Issue.objects.all', 'Issue.objects.all', ([], {}), '()\n', (4289, 4291), False, 'from scrumate.core.issue.models import Issue\n'), ((1395, 1418), 'scrumate.core.issue.forms.IssueForm', 'IssueForm', (['request.POST'], {}), '(request.POST)\n', (1404, 1418), False, 'from scrumate.core.issue.forms import IssueForm\n'), ((1731, 1742), 'scrumate.core.issue.forms.IssueForm', 'IssueForm', ([], {}), '()\n', (1740, 1742), False, 'from scrumate.core.issue.forms import IssueForm\n'), ((2202, 2258), 'django.contrib.messages.success', 'messages.success', (['request', '"""Issue updated successfully!"""'], {}), "(request, 'Issue updated successfully!')\n", (2218, 2258), False, 'from django.contrib import messages\n'), ((2274, 2319), 'django.shortcuts.redirect', 'redirect', (['"""issue_list"""'], {'project_id': 'project_id'}), "('issue_list', project_id=project_id)\n", (2282, 2319), False, 'from django.shortcuts import render, redirect, reverse, get_object_or_404\n'), ((2938, 3001), 'django.contrib.messages.success', 'messages.success', (['request', '"""Issue status updated successfurrl!"""'], {}), "(request, 'Issue status updated successfurrl!')\n", (2954, 3001), False, 'from django.contrib import messages\n'), ((3017, 3062), 'django.shortcuts.redirect', 'redirect', (['"""issue_list"""'], {'project_id': 'project_id'}), "('issue_list', project_id=project_id)\n", (3025, 3062), False, 'from django.shortcuts import render, redirect, reverse, get_object_or_404\n'), ((4585, 4613), 'scrumate.core.issue.forms.IssueForm', 'IssueForm', ([], {'instance': 'instance'}), '(instance=instance)\n', (4594, 4613), False, 'from scrumate.core.issue.forms import IssueForm\n'), ((4644, 4719), 'django.shortcuts.reverse', 'reverse', (['"""issue_edit"""'], {'kwargs': "{'project_id': project_id, 'pk': instance.pk}"}), "('issue_edit', kwargs={'project_id': project_id, 'pk': instance.pk})\n", (4651, 4719), False, 'from django.shortcuts import render, redirect, reverse, get_object_or_404\n'), ((4750, 4806), 'django.shortcuts.reverse', 'reverse', (['"""issue_list"""'], {'kwargs': "{'project_id': project_id}"}), "('issue_list', kwargs={'project_id': project_id})\n", (4757, 4806), False, 'from django.shortcuts import render, redirect, reverse, get_object_or_404\n'), ((4877, 4911), 'scrumate.core.project.models.Project.objects.get', 'Project.objects.get', ([], {'pk': 'project_id'}), '(pk=project_id)\n', (4896, 4911), False, 'from scrumate.core.project.models import Project\n'), ((1570, 1624), 'django.contrib.messages.success', 'messages.success', (['request', '"""Issue added successfully!"""'], {}), "(request, 'Issue added successfully!')\n", (1586, 1624), False, 'from django.contrib import messages\n'), ((1644, 1705), 'django.shortcuts.redirect', 'redirect', (['"""issue_list"""'], {'permanent': '(True)', 'project_id': 'project_id'}), "('issue_list', permanent=True, project_id=project_id)\n", (1652, 1705), False, 'from django.shortcuts import render, redirect, reverse, get_object_or_404\n'), ((3215, 3271), 'django.shortcuts.reverse', 'reverse', (['"""issue_list"""'], {'kwargs': "{'project_id': project_id}"}), "('issue_list', kwargs={'project_id': project_id})\n", (3222, 3271), False, 'from django.shortcuts import render, redirect, reverse, get_object_or_404\n'), ((3292, 3326), 'scrumate.core.project.models.Project.objects.get', 'Project.objects.get', ([], {'pk': 'project_id'}), '(pk=project_id)\n', (3311, 3326), False, 'from scrumate.core.project.models import Project\n'), ((720, 763), 'scrumate.core.issue.models.Issue.objects.filter', 'Issue.objects.filter', ([], {'project_id': 'project_id'}), '(project_id=project_id)\n', (740, 763), False, 'from scrumate.core.issue.models import Issue\n')]
|
import data
import our_colours
def our_palettes(palette = None, n = None, reverse = False):
'''
Access our colour palettes as hexcodes
- palette: string, which palette should be accessed, should match a name from our_palettes_raw
- n: integer, number of colours to generate from palette
- reverse: boolean, should the order of colours be reversed?
Returns: If palette is NA, return the raw palette data. If n is NA, return
the hexcodes of colours in the data, otherwise return n colours interpolated
from the chosen palette
Examples:
our_palettes()
our_palettes('default')
our_palettes('default', reverse = TRUE)
our_palettes('default', 10)
our_palettes('default', 2)
'''
if palette is None:
return data.our_palettes_raw
else:
if n is None:
pal = our_colours.our_colours(data.our_palettes_raw[palette])
if reverse:
pal = rev(pal)
return pal
else:
return our_palettes_interpolator(palette, reverse)(n)
|
[
"our_colours.our_colours"
] |
[((848, 903), 'our_colours.our_colours', 'our_colours.our_colours', (['data.our_palettes_raw[palette]'], {}), '(data.our_palettes_raw[palette])\n', (871, 903), False, 'import our_colours\n')]
|
import math
import numpy as np
from typing import Dict
from typing import List
from typing import Union
from typing import Iterator
from typing import Optional
from .types import *
from .data_types import *
from .normalizers import *
from .distributions import *
from ...misc import *
params_type = Dict[str, Union[DataType, Iterable, "params_type"]]
class ParamsGenerator:
"""
Parameter generator for param searching, see cftool.ml.hpo.base.HPOBase for usage.
Parameters
----------
params : params_type, parameter settings.
Examples
----------
>>> grid = ParamsGenerator({
>>> "a": Any(Choice(values=[1, 2, 3])),
>>> "c": {
>>> "d": Int(Choice(values=[1, 2, 3])),
>>> "e": Float(Choice(values=[1, 2])),
>>> }
>>> })
>>> for param in grid.all():
>>> print(param)
>>> # output : {'a': 1, 'c': {'d': 1, 'e': 1, 'f': 3}}, {'a': 1, 'c': {'d': 1, 'e': 1, 'f': 4}}
>>> # {'a': 1, 'c': {'d': 1, 'e': 2, 'f': 3}}, {'a': 1, 'c': {'d': 1, 'e': 2, 'f': 4}}
>>> # {'a': 1, 'c': {'d': 2, 'e': 1, 'f': 3}}, {'a': 1, 'c': {'d': 2, 'e': 1, 'f': 4}}
>>> # {'a': 1, 'c': {'d': 2, 'e': 2, 'f': 3}}, {'a': 1, 'c': {'d': 2, 'e': 2, 'f': 4}}
>>> # ......
>>> # {'a': 3, 'c': {'d': 3, 'e': 2, 'f': 3}}, {'a': 3, 'c': {'d': 3, 'e': 2, 'f': 4}}
"""
def __init__(
self,
params: params_type,
*,
normalize_method: Optional[str] = None,
normalize_config: Optional[Dict[str, Any]] = None,
):
self._data_types = params
def _data_type_offset(value: DataType) -> int:
if not isinstance(value, Iterable):
return 1
return len(value.values)
self._data_types_nested = Nested(params, offset_fn=_data_type_offset)
if normalize_method is None:
self._normalizers_flattened = None
else:
if normalize_config is None:
normalize_config = {}
def _data_type_normalizer(value: DataType) -> Normalizer:
return Normalizer(normalize_method, value, **normalize_config)
normalizers_nested = self._data_types_nested.apply(_data_type_normalizer)
self._normalizers_flattened = normalizers_nested.flattened
self._all_params_nested = self._all_flattened_data_types = None
self._array_dim = self._all_bounds = None
@property
def params(self) -> params_type:
return self._data_types
@property
def num_params(self) -> number_type:
def _num_params(params):
if isinstance(params, (DataType, Iterable)):
return params.num_params
assert isinstance(params, dict)
num_params = prod(_num_params(v) for v in params.values())
if math.isinf(num_params):
return num_params
return int(num_params)
return _num_params(self._data_types)
@property
def array_dim(self) -> int:
if self._array_dim is None:
self._array_dim = self.flattened2array(
self.flatten_nested(self.pop())
).shape[0]
return self._array_dim
@property
def all_bounds(self) -> np.ndarray:
if self._all_bounds is None:
bounds_list = []
for key in self.sorted_flattened_keys:
if self._normalizers_flattened is None:
normalizer = None
else:
normalizer = self._normalizers_flattened[key]
if normalizer is None:
data_type = self._data_types_nested.get_value_from(key)
if not isinstance(data_type, Iterable):
bounds_list.append(list(data_type.bounds))
else:
bounds_list.extend(list(map(list, data_type.bounds)))
else:
if normalizer.is_iterable:
bounds_list.extend(list(map(list, normalizer.bounds)))
else:
bounds_list.append(list(normalizer.bounds))
self._all_bounds = np.array(bounds_list, np.float32)
return self._all_bounds
@property
def all_flattened_params(self) -> all_flattened_type:
if self._all_params_nested is None:
apply = lambda data_type: data_type.all()
self._all_params_nested = self._data_types_nested.apply(apply)
return self._all_params_nested.flattened
@property
def sorted_flattened_keys(self) -> List[str]:
return self._data_types_nested.sorted_flattened_keys
def pop(self) -> nested_type:
def _pop(src: dict, tgt: dict):
for k, v in src.items():
if isinstance(v, dict):
next_tgt = tgt.setdefault(k, {})
_pop(v, next_tgt)
else:
tgt[k] = v.pop()
return tgt
return _pop(self._data_types, {})
def all(self) -> Iterator[nested_type]:
for flattened_params in Grid(self.all_flattened_params):
yield self._data_types_nested.nest_flattened(flattened_params)
def flatten_nested(self, nested: nested_type) -> nested_type:
return self._data_types_nested.flatten_nested(nested)
def nest_flattened(self, flattened: flattened_type) -> nested_type:
return self._data_types_nested.nest_flattened(flattened)
def flattened2array(self, flattened: flattened_type) -> np.ndarray:
if self._normalizers_flattened is None:
normalized = flattened
else:
normalized = {
k: self._normalizers_flattened[k].normalize(v)
for k, v in flattened.items()
}
return self._data_types_nested.flattened2array(normalized)
def array2flattened(self, array: np.ndarray) -> flattened_type:
normalized = self._data_types_nested.array2flattened(array)
if self._normalizers_flattened is None:
flattened = normalized
else:
flattened = {
k: self._normalizers_flattened[k].recover(v)
for k, v in normalized.items()
}
for key, value in flattened.items():
data_type = self._data_types_nested.get_value_from(key)
flattened[key] = data_type.transform(value)
return flattened
__all__ = ["ParamsGenerator", "params_type"]
|
[
"math.isinf",
"numpy.array"
] |
[((2877, 2899), 'math.isinf', 'math.isinf', (['num_params'], {}), '(num_params)\n', (2887, 2899), False, 'import math\n'), ((4226, 4259), 'numpy.array', 'np.array', (['bounds_list', 'np.float32'], {}), '(bounds_list, np.float32)\n', (4234, 4259), True, 'import numpy as np\n')]
|
from bs4 import BeautifulSoup as soup
import requests
import re
from word2number import w2n
import pandas as pd
response = requests.get('https://www.zameen.com/Houses_Property/Lahore-1-1.html')
Price=[]
Location=[]
Beds=[]
Size = []
#file1 = open("myfile.txt","w")
#file1.writelines(response.text)
#file1.close
#print(response.text)
data = soup(response.text)
dataa = data.find_all('li',role = 'article')
for info in dataa:
Pirces = info.find_all('span',class_ = 'f343d9ce')
Locations = info.find_all('div',class_ = '_162e6469')
Bedss = info.find_all('span',class_ = 'b6a29bc0')
Sizes = info.find_all('h2',class_='c0df3811')
#print(Locations[0].text)
#print(Pirces[0].text)
#print(Bedss[0].text)
Sizes = Sizes[0].text
sizer = Sizes.split(' ')
Sizes = str(sizer[0])
Price.append(Pirces[0].text)
Location.append(Locations[0].text)
Beds.append(Bedss[0].text)
Size.append(Sizes)
'''
print(Price)
print()
print(Location)
print()
print(Beds)
print()
print(Size)
print()
'''
i = 0
for items in Price:
if(str(items).endswith('Crore')):
num = items.split(' ')
number = float(num[0])*pow(10,7)
Price[i] = number
i+=1
else:
num = items.split(' ')
number = float(num[0])*pow(10,5)
Price[i]=number
i+=1
df = pd.DataFrame(list(zip(Location,Size,Beds,Price)),columns =['Location', 'Size(Marla)','Beds','Price in Pkr'])
print(df)
df.to_csv('dataset.csv', index=False)
#info = dataa[0]
'''
file1 = open("myfile.txt","w")
file1.writelines(str(info))
file1.close
#print(response.text)
'''
#print(Size)
#span aria-label = Listing price
#span aria-label = Beds
#span aria-label = Listing price
|
[
"bs4.BeautifulSoup",
"requests.get"
] |
[((131, 201), 'requests.get', 'requests.get', (['"""https://www.zameen.com/Houses_Property/Lahore-1-1.html"""'], {}), "('https://www.zameen.com/Houses_Property/Lahore-1-1.html')\n", (143, 201), False, 'import requests\n'), ((360, 379), 'bs4.BeautifulSoup', 'soup', (['response.text'], {}), '(response.text)\n', (364, 379), True, 'from bs4 import BeautifulSoup as soup\n')]
|
"""EM 算法的实现
"""
import copy
import math
import matplotlib.pyplot as plt
import numpy as np
isdebug = True
# 指定k个高斯分布参数,这里指定k=2。注意2个高斯分布具有相同均方差Sigma,均值分别为Mu1,Mu2。
def init_data(Sigma, Mu1, Mu2, k, N):
global X
global Mu
global Expectations
X = np.zeros((1, N))
Mu = np.random.random(k)
Expectations = np.zeros((N, k))
for i in range(0, N):
if np.random.random(1) > 0.5:
X[0, i] = np.random.normal(Mu1, Sigma)
else:
X[0, i] = np.random.normal(Mu2, Sigma)
if isdebug:
print("***********")
print("初始观测数据X:")
print(X)
# EM算法:步骤1,计算E[zij]
def e_step(Sigma, k, N):
global Expectations
global Mu
global X
for i in range(0, N):
Denom = 0
Numer = [0.0] * k
for j in range(0, k):
Numer[j] = math.exp((-1 / (2 * (float(Sigma**2)))) * (float(X[0, i] - Mu[j]))**2)
Denom += Numer[j]
for j in range(0, k):
Expectations[i, j] = Numer[j] / Denom
if isdebug:
print("***********")
print("隐藏变量E(Z):")
print(Expectations)
# EM算法:步骤2,求最大化E[zij]的参数Mu
def m_step(k, N):
global Expectations
global X
for j in range(0, k):
Numer = 0
Denom = 0
for i in range(0, N):
Numer += Expectations[i, j] * X[0, i]
Denom += Expectations[i, j]
Mu[j] = Numer / Denom
# 算法迭代iter_num次,或达到精度Epsilon停止迭代
def run(Sigma, Mu1, Mu2, k, N, iter_num, Epsilon):
init_data(Sigma, Mu1, Mu2, k, N)
print("初始<u1,u2>:", Mu)
for i in range(iter_num):
Old_Mu = copy.deepcopy(Mu)
e_step(Sigma, k, N)
m_step(k, N)
print(i, Mu)
if sum(abs(Mu - Old_Mu)) < Epsilon:
break
if __name__ == '__main__':
sigma = 6 # 高斯分布具有相同的方差
mu1 = 40 # 第一个高斯分布的均值 用于产生样本
mu2 = 20 # 第二个高斯分布的均值 用于产生样本
k = 2 # 高斯分布的个数
N = 1000 # 样本个数
iter_num = 1000 # 最大迭代次数
epsilon = 0.0001 # 当两次误差小于这个时退出
run(sigma, mu1, mu2, k, N, iter_num, epsilon)
plt.hist(X[0, :], 50)
plt.show()
|
[
"copy.deepcopy",
"matplotlib.pyplot.show",
"matplotlib.pyplot.hist",
"numpy.zeros",
"numpy.random.random",
"numpy.random.normal"
] |
[((264, 280), 'numpy.zeros', 'np.zeros', (['(1, N)'], {}), '((1, N))\n', (272, 280), True, 'import numpy as np\n'), ((290, 309), 'numpy.random.random', 'np.random.random', (['k'], {}), '(k)\n', (306, 309), True, 'import numpy as np\n'), ((329, 345), 'numpy.zeros', 'np.zeros', (['(N, k)'], {}), '((N, k))\n', (337, 345), True, 'import numpy as np\n'), ((2064, 2085), 'matplotlib.pyplot.hist', 'plt.hist', (['X[0, :]', '(50)'], {}), '(X[0, :], 50)\n', (2072, 2085), True, 'import matplotlib.pyplot as plt\n'), ((2090, 2100), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2098, 2100), True, 'import matplotlib.pyplot as plt\n'), ((1610, 1627), 'copy.deepcopy', 'copy.deepcopy', (['Mu'], {}), '(Mu)\n', (1623, 1627), False, 'import copy\n'), ((383, 402), 'numpy.random.random', 'np.random.random', (['(1)'], {}), '(1)\n', (399, 402), True, 'import numpy as np\n'), ((432, 460), 'numpy.random.normal', 'np.random.normal', (['Mu1', 'Sigma'], {}), '(Mu1, Sigma)\n', (448, 460), True, 'import numpy as np\n'), ((497, 525), 'numpy.random.normal', 'np.random.normal', (['Mu2', 'Sigma'], {}), '(Mu2, Sigma)\n', (513, 525), True, 'import numpy as np\n')]
|
from qaz import settings
from qaz.managers import git, shell
def update_qaz() -> None:
"""
Update QAZ.
This pulls the latest version of QAZ and installs the necessary Python dependencies
for this tool.
"""
root_dir = settings.get_root_dir()
git.pull(root_dir)
shell.run(
"poetry install --no-dev --remove-untracked",
cwd=root_dir,
env=dict(VIRTUAL_ENV=str(root_dir / ".venv")),
)
|
[
"qaz.settings.get_root_dir",
"qaz.managers.git.pull"
] |
[((244, 267), 'qaz.settings.get_root_dir', 'settings.get_root_dir', ([], {}), '()\n', (265, 267), False, 'from qaz import settings\n'), ((272, 290), 'qaz.managers.git.pull', 'git.pull', (['root_dir'], {}), '(root_dir)\n', (280, 290), False, 'from qaz.managers import git, shell\n')]
|
import os
import h5py
import pytest
import numpy as np
import pandas as pd
import automatic_speech_recognition as asr
@pytest.fixture
def dataset() -> asr.dataset.Features:
file_path = 'test.h5'
reference = pd.DataFrame({
'path': [f'dataset/{i}' for i in range(10)],
'transcript': [f'transcript-{i}' for i in range(10)],
})
with h5py.File(file_path, 'w') as store:
for path in reference.path:
store[path] = np.random.random([20, 10])
with pd.HDFStore(file_path, mode='r+') as store:
store['references'] = reference
return asr.dataset.Features.from_hdf(file_path, batch_size=3)
def test_get_batch(dataset):
batch_audio, transcripts = dataset.get_batch(index=1)
a, b, c = transcripts
assert b == 'transcript-4'
a, b, c = batch_audio
assert b.shape == (20, 10)
# Remove store at the end of tests
os.remove('test.h5')
|
[
"automatic_speech_recognition.dataset.Features.from_hdf",
"os.remove",
"h5py.File",
"pandas.HDFStore",
"numpy.random.random"
] |
[((595, 649), 'automatic_speech_recognition.dataset.Features.from_hdf', 'asr.dataset.Features.from_hdf', (['file_path'], {'batch_size': '(3)'}), '(file_path, batch_size=3)\n', (624, 649), True, 'import automatic_speech_recognition as asr\n'), ((896, 916), 'os.remove', 'os.remove', (['"""test.h5"""'], {}), "('test.h5')\n", (905, 916), False, 'import os\n'), ((364, 389), 'h5py.File', 'h5py.File', (['file_path', '"""w"""'], {}), "(file_path, 'w')\n", (373, 389), False, 'import h5py\n'), ((499, 532), 'pandas.HDFStore', 'pd.HDFStore', (['file_path'], {'mode': '"""r+"""'}), "(file_path, mode='r+')\n", (510, 532), True, 'import pandas as pd\n'), ((462, 488), 'numpy.random.random', 'np.random.random', (['[20, 10]'], {}), '([20, 10])\n', (478, 488), True, 'import numpy as np\n')]
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import time
import json
import unittest
import argparse
import tempfile
import traceback
from warnings import catch_warnings
from paddle.distributed.fleet.elastic.collective import CollectiveLauncher
from paddle.distributed.fleet.launch import launch_collective
fake_python_code = """
print("test")
"""
class TestCollectiveLauncher(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
self.code_path = os.path.join(self.temp_dir.name,
"fake_python_for_elastic.py")
with open(self.code_path, "w") as f:
f.write(fake_python_code)
def tearDown(self):
self.temp_dir.cleanup()
def test_launch(self):
class Argument:
elastic_server = "127.0.0.1:2379"
job_id = "test_job_id_123"
np = "1"
gpus = "0"
nproc_per_node = 1
host = None
curr_host = None
ips = "127.0.0.1"
scale = None
force = None
backend = 'gloo'
enable_auto_mapping = False
run_mode = "cpuonly"
servers = None
rank_mapping_path = None
training_script = self.code_path
training_script_args = ["--use_amp false"]
log_dir = None
args = Argument()
launch = CollectiveLauncher(args)
try:
args.backend = "gloo"
launch.launch()
launch.stop()
except Exception as e:
pass
try:
args.backend = "gloo"
launch_collective(args)
except Exception as e:
pass
def test_stop(self):
class Argument:
elastic_server = "127.0.0.1:2379"
job_id = "test_job_id_123"
np = "1"
gpus = "0"
nproc_per_node = 1
host = None
curr_host = None
ips = "127.0.0.1"
scale = None
force = None
backend = 'gloo'
enable_auto_mapping = False
run_mode = "cpuonly"
servers = None
rank_mapping_path = None
training_script = self.code_path
training_script_args = ["--use_amp false"]
log_dir = None
args = Argument()
try:
launch = CollectiveLauncher(args)
launch.tmp_dir = tempfile.mkdtemp()
launch.stop()
except Exception as e:
pass
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"paddle.distributed.fleet.elastic.collective.CollectiveLauncher",
"tempfile.TemporaryDirectory",
"paddle.distributed.fleet.launch.launch_collective",
"tempfile.mkdtemp",
"os.path.join"
] |
[((3229, 3244), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3242, 3244), False, 'import unittest\n'), ((1064, 1093), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (1091, 1093), False, 'import tempfile\n'), ((1119, 1181), 'os.path.join', 'os.path.join', (['self.temp_dir.name', '"""fake_python_for_elastic.py"""'], {}), "(self.temp_dir.name, 'fake_python_for_elastic.py')\n", (1131, 1181), False, 'import os\n'), ((2044, 2068), 'paddle.distributed.fleet.elastic.collective.CollectiveLauncher', 'CollectiveLauncher', (['args'], {}), '(args)\n', (2062, 2068), False, 'from paddle.distributed.fleet.elastic.collective import CollectiveLauncher\n'), ((2279, 2302), 'paddle.distributed.fleet.launch.launch_collective', 'launch_collective', (['args'], {}), '(args)\n', (2296, 2302), False, 'from paddle.distributed.fleet.launch import launch_collective\n'), ((3049, 3073), 'paddle.distributed.fleet.elastic.collective.CollectiveLauncher', 'CollectiveLauncher', (['args'], {}), '(args)\n', (3067, 3073), False, 'from paddle.distributed.fleet.elastic.collective import CollectiveLauncher\n'), ((3103, 3121), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (3119, 3121), False, 'import tempfile\n')]
|
"""
This is an implementation of paper
"Attention-based LSTM for Aspect-level Sentiment Classification" with Keras.
Based on dataset from "SemEval 2014 Task 4".
"""
import os
from time import time
# TODO, Here we need logger!
import numpy as np
from lxml import etree
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils.np_utils import to_categorical
from keras.layers import Input, Embedding, LSTM, Dense
from keras.layers import RepeatVector, Dot, Concatenate, Reshape
from keras.activations import softmax
from keras.models import Model, load_model
from keras import regularizers, initializers, optimizers
from keras.layers import Lambda
import keras.backend as K
TEXT_KEY = 'text'
TERM_KEY = 'aspect_terms'
CATEGORY_KEY = 'aspect_categories'
I_TEXT, I_ASPECT, I_POLARITY = 0, 1, 2
# Correspond to settings in paper.
EMBEDDING_DIM = 300
ASPECT_EMBEDDING_DIM = 300
HIDDEN_LAYER_SIZE = 300
# Hyper-parameters for training.
L2_REGULARIZATION = 0.001
MOMENTUM = 0.9
LEARNING_RATE = 0.001
MINI_BATCH_SIZE = 25
RANDOM_UNIFORM = .01
POLARITY_TO_INDEX = {
'positive': 0,
'negative': 1,
'neutral': 2,
'conflict': 3
}
def extract_data(data_file='Restaurants_Train_v2.xml'):
"""
Extract train data from xml file provided buy 'SemEval 2014 Task 4."
:param file: XML file that contains training data.
:return: A list of dictionaries of training data with TEXT_KEY, 'aspect
terms' and 'aspect categories'.
"""
tree = etree.parse(data_file)
sents_root = tree.getroot()
data = []
def get_content(sent):
"""
Get all contents from a single 'sentence node', including TEXT_KEY,
values of 'aspect terms' and 'aspect categories'.
:param sent: a single xml node of sentence.
:type: _Element
:return: A dictionary of contents.
"""
content = {}
# We assume that there is must a text node here.
content[TEXT_KEY] = sent.xpath(TEXT_KEY)[0].text
terms = sent.xpath('aspectTerms')
if terms:
# As there is only one element of 'aspectTerms'.
# And we only need the first two values, 'aspect' and 'polarity'.
content[TERM_KEY] = list(map(lambda term: term.values()[:2],
terms[0].iterchildren()))
else:
pass
categories = sent.xpath('aspectCategories')
if categories:
content[CATEGORY_KEY] = list(
map(lambda category: category.values(),
categories[0].iterchildren()))
else:
pass
return content
for sent in sents_root.iterchildren():
data.append(get_content(sent))
return data
def check_absent(data):
"""
Checking absent 'aspect terms' or 'aspect categories'.
And check if there is sentence missing both 'terms' and 'categories'.
:param data: dataset with all contents. And the max length of all sentence.
:type: list of dictionary.
:return: sentence indices that with absent terms, categories and flag of
both missing as well as their count separately in tuple.
:type: tuple of (list, list, boolean)
"""
exist_both_missing = False
term_absent_indices = []
term_absent_cnt = 0
category_absent_indices = []
category_absent_cnt = 0
max_len = 0
for idx, sent in enumerate(data):
max_len = max(len(sent[TEXT_KEY]), max_len)
term_absent = TERM_KEY not in sent.keys()
category_absent = CATEGORY_KEY not in sent.keys()
if term_absent and category_absent:
exist_both_missing = True
if term_absent:
term_absent_indices.append(idx)
term_absent_cnt += 1
if category_absent:
category_absent_indices.append(idx)
category_absent_cnt += 1
return (term_absent_indices, term_absent_cnt,
category_absent_indices, category_absent_cnt,
exist_both_missing, max_len)
def combine_data(data, mess=True, replace_space=True, replace_space_char='_'):
"""
If `mess` is True, means we would mess all data together.
Combine text with all aspects related to it, both aspect
terms and aspect categories. And mess them up.
But if `mess` is False. we will combined TEXT_KEY and aspect separately
with 'terms' or 'categories', and return them as tuple.
And also return the max length of sentence per term or category
if `mess` is True or separate max length if `mess` is False.
:param data: all data with TEXT_KEY and lists of 'aspect terms' and
'categories'.
:return: all combined data or combined data with 'aspect terms' and
'categories' separately along with their max length or in all.
"""
term_data, category_data = [], []
term_max_len, category_max_len = 0, 0
# TODO, How do we treat multi-word token as aspect term?
# 1. take whole as one token an replace space with other mask.
# 2. split into multiple tokens and average all embeddings.
# 3. only take one word into consideration.
# Note for aspect terms, it could contains spaces in the word, so should
# not use space to split tokenizer, and take all as one token.
# And also, there are other special characters in the phrase, like '-'.
# They should be keep.
for sent in data:
text = sent[TEXT_KEY]
is_term_exist = TERM_KEY in sent.keys()
is_category_exist = CATEGORY_KEY in sent.keys()
if is_term_exist:
term_max_len = max(term_max_len, len(sent[TEXT_KEY]))
for term, polarity in sent[TERM_KEY]:
if replace_space:
term = term.replace(' ', replace_space_char)
term_data.append([text, term, polarity])
if is_category_exist:
category_max_len = max(category_max_len, len(sent[TEXT_KEY]))
for category, polarity in sent[CATEGORY_KEY]:
if replace_space:
category = category.replace(' ', replace_space_char)
category_data.append([text, category, polarity])
# print(len(term_data), len(category_data))
if mess:
max_len = max(term_max_len, category_max_len)
term_data.extend(category_data)
return term_data, max_len
else:
return (term_data, term_max_len), (category_data, category_max_len)
def convert_data(data, max_len=None, with_label=True, extra_data=False):
"""
Convert data to tuples of (word_vectors, aspect_indices, polarity) to
word indices sequences and labels to one hot. In order to lookup in
embedding layer.
And convert polarity to class identifier, as defined by default in
polarity to index.
NOTE: keep in mind to match label and 'text' and 'aspect'!
:param data: List of data with element of (text, aspect, polarity).
:param word_vectors: Word Vector lookup table.
:param with_label: Whether it is training data with label or
test/customized data without label.
:return: Arrays contain (word vectors, aspect indices, polarity class
index), and each of them is a numpy array, along with the word to index
dictionary.
:type: numpy array.
"""
# Set indicator for 'text', 'aspect' and 'polarity(label)'.
converted_data, lookups = [], []
texts, aspects, labels = [], [], []
# TODO, we should count max length here?!
for d in data:
texts.append(d[I_TEXT])
aspects.append(d[I_ASPECT])
if with_label:
labels.append(d[I_POLARITY])
def convert_to_indices(examples, max_len=None, need_tokenizer=False,
customized_filter='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n'):
"""
Fit and convert word to indices sequences and word index lookup, and if
needed, return tokenizer as well.
:param examples: list of words or sentences.
:param max_len: the max length of indices sequences.
:param need_tokenizer: return tokenizer or not.
:type: boolean
:return: (indices sequence, word index lookup, <tokenizer>)
:type: tuple
"""
tokenizer = Tokenizer(filters=customized_filter)
tokenizer.fit_on_texts(examples)
seqs = tokenizer.texts_to_sequences(examples)
word_idx = tokenizer.word_index
# TODO, do we need to pad, if yes, 'pre' or 'post'?
if max_len:
seqs = pad_sequences(seqs, maxlen=max_len)
if need_tokenizer:
return seqs, word_idx, tokenizer
else:
return seqs, word_idx
text_seqs, text_word_idx = convert_to_indices(texts, max_len)
converted_data.append(np.asarray(text_seqs, dtype='int32'))
lookups.append(text_word_idx)
# For aspect term maybe we should not use tokenizer and filter.
aspects_seqs, aspects_idx = convert_to_indices(
aspects,
# TODO, should use less filter.
customized_filter='#$%&/:;<=>?@[\\]^`{|}~\t\n')
converted_data.append(np.asarray(aspects_seqs, dtype='int32'))
lookups.append(aspects_idx)
if with_label:
labels_seqs, labels_idx = convert_to_indices(labels)
# Normalize label sequences as we only need '4' classes and do not need
# extra class for 'other'.
labels_arr = np.asarray(labels_seqs, dtype='int') - 1
labels_one_hot = to_categorical(labels_arr) # aspects_seqs,
# [:, np.newaxis],
converted_data.append(labels_one_hot)
lookups.append(labels_idx)
# print(aspects_seqs)
# # Preprocessing text without max number of words.
# text_tokenizer = Tokenizer()
# text_tokenizer.fit_on_texts(texts)
# text_seqs = text_tokenizer.texts_to_sequences(texts)
# text_word_idx = text_tokenizer.word_index
# # Just get indices of words, and does not categorize it as we won't
# # multiply one-hot vector in practice as it is computation costly.
# # Instead we just lookup with embedding layer.
# text_data = pad_sequences(text_seqs, maxlen=max_len)
#
# # Preprocessing aspects.
# # The same as word in text, it will be lookup in embedding layer.
# aspects_tokenizer = Tokenizer()
# aspects_tokenizer.fit_on_texts(aspects)
# aspects_seqs = aspects_tokenizer.texts_to_sequences(aspects)
# aspects_idx = aspects_tokenizer.word_index
#
# # Processing labels
# # Convert labels from words into indices and then to one-hot categorical
# # indices.
# labels_tokenizer = Tokenizer()
# labels_tokenizer.fit_on_texts(labels)
# labels_seqs = labels_tokenizer.texts_to_sequences(labels)
# labels_idx = labels_tokenizer.
return converted_data, lookups
def load_w2v(idxes, emb_file, save_to_file=None):
"""
Load pre-trained embedding and match words in training data to form a
small set of word embedding matrix with OOV with all '0's.
NOTE: Keras tokenizer.word_index start from 1, in order to use '0'
padding in pad_sequence and mask_zero in embedding layer and following
layer.
:param idxes: the word loopup dictionary of word indices.
:param emb_file: pre-trained embedding file.
:return: word embedding matrix fit for the training data.
"""
# Only need the lookup for 'text'.
idx = idxes[I_TEXT]
# Initial word embedding matrix with all '0's.
# TODO, here we could set embedding dimesion automatically.
emb_matrix = np.zeros((len(idx) + 1, EMBEDDING_DIM))
# Timing it.
start_time = time()
with open(emb_file) as emb:
for line in emb:
pieces = line.strip().split()
word, coef = pieces[0].strip(), pieces[1:]
begin_idx = 0
for elem_idx, elem in enumerate(coef):
# In case there is space in the word,
# continuously test if the string could be interpret as float,
# if yes, it means this piece element is the beginning of the
# coefficient and if no, then append to word as part of the
# token.
try:
# Test if an element in coefficient is an actual
# coefficient of a part of key token.
float(elem)
# Record begin index of actual coefficient.
begin_idx = elem_idx + 1
# Only break when we find the begin index of actual
# coefficient.
break
except Exception as e:
word += elem
# print(e)
# TODO, we could record the trail and error in log.
# print("Filed to load record with word: '{}' and "
# "coefficient: {}".format(word, coef))
# print(word)
coef = np.asarray(pieces[begin_idx:], dtype=np.float32)
if word in idx.keys():
# Lookup the indices(index) of word and set the corresponding
# vector to the one in pre-trained embedding matrix.
emb_matrix[idx[word]] = coef
print('Loaded word embedding matrix within {}'.format(
time() - start_time))
# Save loaded subset of word embedding into files.
if save_to_file:
np.save(save_to_file, emb_matrix)
return emb_matrix
def build_net(data, max_len, w2is, atae=True, extra_outputs=True,
emb_mtrx_file=None, save_to_file=None):
"""
Build ATAE-LSTM mentioned in paper 'Attention-based LSTM for Aspect-level
Sentiment Classification', with uniform randomly initialized aspect
embedding and word embedding subset according training data and given
pre-trained embedding file.
Adapt 'inter' attention before do multiple classes classification by
softmax, which introduce aspect-level attention as part of the encoding
of source sentence.`
:param data: Indices of training data including (sentences, aspect,
polarity(one-hot label))
:param max_len: the max length of sentence as it has been padding with
'0's and need to set for the input shape with mini-batch.
:param w2is: Index lookup table of components above.
:param atae: If 'False' then only use 'AE'.
:param extra_outputs: return extra outputs like attention weights,
aspect embeddings or so.
:param emb_mtrx_file: Pre-saved embedding matrix corresponding to
training data and given pre-trained embedding. If 'None' is set,
then reload from embedding file.
:param save_to_file: File path to save model, if 'None' is set, then its
a one way training.
:return: Training loss and accuracy for all classes?
"""
# TODO, max length should be fixed.
sents, aspects, labels = data
sents_idx, aspects_idx, _ = w2is
emb_mtrx = np.load(emb_mtrx_file)
# Input of sentences.
sents_tensor_input = Input(shape=(sents.shape[1],), dtype='int32')
# Do not retrain embedding of sentences.
sents_tensor = Embedding(len(sents_idx) + 1,
# EMBEDDING_DIM
emb_mtrx.shape[1],
weights=[emb_mtrx],
input_length=max_len,
trainable=False)(sents_tensor_input)
# Input of aspect
# As we use ATAE-LSTM, aspect embedding need to be concated to each time
# steps in sentences.
# Aspect is a single index of integer.
aspects_tensor_input = Input(shape=(1,), dtype='int32')
# Randomly initialize aspect embedding.
aspects_emb_initializer = initializers.RandomUniform(minval=-RANDOM_UNIFORM,
maxval=RANDOM_UNIFORM)
aspects_emb_layer = Embedding(len(aspects_idx) + 1,
ASPECT_EMBEDDING_DIM,
embeddings_initializer=aspects_emb_initializer,
trainable=True,
name='asp_emb_layer')
# In order to get embedding weights.
# aspects_emb_matrix = Lambda(lambda x: x, name='asp_emb_weight')(
# aspects_emb_layer.weights)
aspects_emb = aspects_emb_layer(aspects_tensor_input)
# Here, before repeat we need reshape aspect_tensor act as 'squeeze' with
# the dimension with '1', say Reshape((10, ), input_shape=(1, 10))(...)
# then got keras tensor with shape of (10,), which will then feed into
# `RepeatVector`.
aspects_tensor = Reshape((ASPECT_EMBEDDING_DIM,))(aspects_emb)
# Repeat aspects tensor in order to correspond to the time step of
# sentences, with shape of (max_len, ASPECT_EMBEDDNING_DIM).
# TODO, could use Timedistributed?
aspects_tensor = RepeatVector(max_len)(aspects_tensor)
lstm_input = Concatenate()([sents_tensor, aspects_tensor])
if atae:
lstm_output = LSTM(HIDDEN_LAYER_SIZE, return_sequences=True)(lstm_input)
# Attention with concatenation of sequential output of LSTM and
# aspect embedding.
attention_input = Concatenate()([lstm_output, aspects_tensor])
attention_score = Dense(EMBEDDING_DIM + ASPECT_EMBEDDING_DIM,
use_bias=False,
name='attention_score_1')(attention_input)
# We need an extra `Dense/Activation` layer here for axis related
# softmax with should be align on time step instead the last axis.
attention_weight = Dense(1, use_bias=False,
name='attention_score_2')(attention_score)
attention_weight = Lambda(lambda x: softmax(x, axis=1))(
attention_weight, name='attention_weights')
# permuted_weight = Permute((2, 1))(attention_weight)
# attention_represent = Multiply(name='r')([lstm_output, permuted_weight])
# attention_represent = Multiply(name='r')([lstm_output, attention_weight])
attention_represent = Dot(axes=1, name='r')([lstm_output,
attention_weight])
attention_represent = Reshape((EMBEDDING_DIM,))(attention_represent)
last_hidden = Lambda(lambda tensor: tensor[:, -1, :])(lstm_output)
final_represent = Concatenate(name='final_concatenate')([
attention_represent, last_hidden])
final_represent = Dense(EMBEDDING_DIM, activation='tanh',
use_bias=False, name='final_representation')(
final_represent)
model_output = Dense(labels.shape[1],
activation='softmax',
activity_regularizer=regularizers.l2(
L2_REGULARIZATION),
name='ATAE_LSTM_output')(final_represent)
# outs = [model_output]
# if extra_outputs:
# outs.append(attention_weight)
# TODO, get from model outside
# outs.append(aspects_emb_matrix)
# print(outs)
else:
lstm_output = LSTM(HIDDEN_LAYER_SIZE,
return_sequences=False)(lstm_input)
model_output = Dense(labels.shape[1],
activation='softmax',
name='Simple_AE_LSTM_ouptut')(lstm_output)
# outs = [model_output]
model = Model(inputs=[sents_tensor_input,
aspects_tensor_input],
outputs=model_output)
if save_to_file:
model.save(save_to_file)
return model
def train(data, model, model_optimizer=None, metrics=None, valid_ratio=0.1,
epoch=10, mini_batch=25, save_to_file=None):
"""
:param data: Training data in tuples of lists with form of (sentences,
aspect word, polarity).
:param model: Predefined model generated by `build_net`, if None,
then if will be build with default values.
:param optimizer: Optimizer used to train/compile model. Default is
Adagrad with learning rate as '0.001'.
:param metrics: Metrics are interested in list. If not set then default
is ['accuracy']
:return: None
"""
if not model and not data:
print('Please passed in data and model!')
return
if not metrics:
metrics = ['accuracy']
if not model_optimizer:
model_optimizer = optimizers.Adagrad(lr=0.001)
print("Training Model ...")
print(model.summary())
# print('\t\twith data as')
# print('\t\t{}'.format(check_absent(data)))
print('\t\twith hyper-parametes as')
print('\t\t\tMini-Batch : {}'.format(mini_batch))
print('\t\t\tEpoch : {}'.format(epoch))
model.compile(model_optimizer, 'categorical_crossentropy', metrics=metrics)
model.fit([seqs_data[I_TEXT], seqs_data[I_ASPECT]], seqs_data[I_POLARITY],
mini_batch, epochs=epoch, validation_split=valid_ratio)
if save_to_file:
model.save(save_to_file)
def train_dev_split(data, ratio=0.8, seed=42):
"""
Function to split train and dev set with given ratio.
:param data: whole dataset.
:param ratio: percentage that training data occupied.
:return: tuple of list of (training, dev), and each of them should be
formed as (sentences, aspect word, polarity)
"""
np.random.seed(42)
sents, aspects, labels = data[I_TEXT], data[I_ASPECT], data[I_POLARITY]
idx = np.arange(sents.shape[0])
np.random.shuffle(idx)
sents = sents[idx]
aspects = aspects[idx]
labels = labels[idx]
# Calculate split boundary.
bnd = int(len(idx) * ratio)
train_set = [sents[:bnd], aspects[:bnd], labels[:bnd]]
dev_set = [sents[bnd:], aspects[bnd:], labels[bnd:]]
return train_set, dev_set
def predict(data, lookup, max_len, model=None, save_to_file=None,
extra_output=True):
"""
Predict with given data and model or load model from saved pre-trained
model in file.
:param data: data in tuple or list (sentence, aspect)
:param w2is: index to lookup for predictions.
:param max_len: length to padding to.
:param model: pre-trained model, if not set loaded from file,
and if file for model is also not set, return with error.
:param save_to_file: file saved with model.
:return: prediction
"""
# Omit word index lookups.
converted_data, _ = convert_data(data, max_len, with_label=False)
# print(converted_data)
if not model:
if save_to_file:
model = load_model(save_to_file,
custom_objects={'softmax': softmax})
else:
# TODO, should raise exception?
raise ValueError('Please pass in model instance or '
'the path of file model saved to.')
pred_vec = model.predict([converted_data[I_TEXT],
converted_data[I_ASPECT]])
pred_idx = np.argmax(pred_vec, axis=1)
func_get_label = np.vectorize(lambda p: lookup.get(p))
# print(pred_idx, func_get_label(pred_idx), lookup.get(0))
# Need to add '1' for keras labels start from '0'.
pred = func_get_label(pred_idx + 1)
# if extra_output:
# model.layers
return pred
def get_layer(model, layer_name):
"""
Get layer from model by name or index.
:param layer_name: the name or index of layer.
:return: layer instance extract from model.
"""
if isinstance(layer_name, int):
return model.layers[layer_name]
elif isinstance(layer_name, str):
return model.get_layer(layer_name)
else:
raise ValueError('The layer name should only be `int` or `str`.')
def get_aspect_embeddings(model, layer_name, save_to_file=None):
"""
Get aspect embedding from specific layer with given name.
:param model: the pre-trained model, if not set, reload form saved model
file. If it also failed to load model from file, 'ValueError' will be thrown.
:param layer_name: the name or index of embedding layer, or ValueError
will be thrown.
:param save_to_file: file saved pre-trained model, load model if model is 'None'.
:return: tensor of apsect embeddings.
"""
if not model:
if not save_to_file:
raise ValueError('No model found from parameter or file!')
else:
model = load_model(save_to_file)
# Get embeddings of aspect words.
emb_layer = get_layer(model, layer_name)
return K.eval(emb_layer.embeddings)
def get_attention_weighs(data, att_layer_name, input_layers_names: list,
model=None, save_to_file=None):
"""
Get attention weights(intermediate) from specific layer with given layer
name and input layers.
:param data: data to attendant to.
:param model: the pre-trained model, if not set, reload form saved model
file. If it also failed to load model from file, 'ValueError' will be thrown.
:param att_layer_name: the name or index of embedding layer, or ValueError
will be thrown.
:param input_layers: the name or index list of all input layer in order.
:param save_to_file: file saved pre-trained model, load model if model is 'None'.
:return: tensor of attention indices.
"""
if not model:
if not save_to_file:
raise ValueError('No model found from parameter or file!')
else:
model = load_model(save_to_file,
custom_objects={'softmax': softmax})
# Must be sure input layers are in order.
att_layer = get_layer(model, att_layer_name)
input_layers = []
for layer_name in input_layers_names:
layer = get_layer(model, layer_name)
if layer:
input_layers.append(layer.input)
get_attention_weights = K.function(input_layers, [att_layer.output])
weights = get_attention_weights([data[I_TEXT], data[I_ASPECT]])[0]
# print(weights.shape)
return weights
def plot_attention_weight(weights, focus_len):
"""
Plot attention weights within the focus length.
:param weights: attention weights.
:param focus_len: the length to focus to, usually the length of sentences.
:return: None
"""
# score_file = os.path.join(RAW_DATA_FILE_BASE, 'intermeidate_score')
# np.save(score_file, weights)
# score_input = Input(shape=(term_max_len, 600))
# get_weights = Dense(1, use_bias=False)(score_input)
# get_weights = Activation('softmax', axis=1)(get_weights)
# get_weights = Lambda(lambda x: tf.nn.softmax())
# from keras.activations import softmax
# # # get_weights = Lambda(lambda x: softmax(x, axis=1))(get_weights)
# # # score_model = Model(score_input, get_weights)
# # # print(score_model.summary())
# #
# # score_model.compile(optimizer='adam', loss='categorical_crossentropy')
# weight_result = score_model.predict(weights)
# print(weight_result[0].shape)
# begin_idx = len(converted_data[I_TEXT][0])
# print(begin_idx)
import matplotlib.pyplot as plt
# hist, bins = np.histogram(weight_result[0].reshape((1, -1)))
# We have to remember the length of input sentences in order to align the
# attention weights.
# plt.imshow(weight_result[0][-20:].reshape((1, -1)), cmap="plasma",
# aspect="auto", extent=[0, 20, 0, 1])
# TODO, Here is 'pre pad', so its '-focus_len' for the actual token.
attentions = weights.reshape((1, -1))[:, -focus_len:]
print(attentions.shape)
plt.imshow(attentions, cmap='plasma',
aspect='auto', extent=[0, focus_len, 0, 1])
# plt.grid(True)
plt.colorbar()
plt.show()
if __name__ == '__main__':
RAW_DATA_FILE_BASE = '/Users/jiazhen/datasets/SemEval' \
'/SemEval_2014_task4/ABSA_v2'
RES_RAW_DATA_FILE = os.path.join(RAW_DATA_FILE_BASE,
'Restaurants_Train_v2.xml')
LAP_RAW_DATA_FILE = os.path.join(RAW_DATA_FILE_BASE, 'Laptop_Train_v2.xml')
WORD_EMB_BASE = '/Users/jiazhen/datasets'
WORD_EMB_FILE = os.path.join(WORD_EMB_BASE, 'glove.840B.300d.txt')
SAVED_EMB_FILE = os.path.join(RAW_DATA_FILE_BASE, 'glove_res_emb.npy')
SAVED_MDL_FILE = os.path.join(RAW_DATA_FILE_BASE, 'atae_model.keras')
res_data = extract_data(RES_RAW_DATA_FILE)
# print(res_data[7])
check_absent(res_data)
(term_data, term_max_len), _ = combine_data(res_data, mess=False)
# print(term_data[7])
# No padding here according to the paper.
# Need padding for mini-batch.
seqs_data, w2is = convert_data(term_data, max_len=term_max_len)
# emb_matrix = load_w2v(w2is, WORD_EMB_FILE, SAVED_EMB_FILE)
# print(emb_matrix[1])
# print(len(seqs_data))
# print(seqs_data[0].shape, seqs_data[1].shape, seqs_data[2].shape)
# print(seqs_data[1])
# for i, d in enumerate(seqs_data[1]):
# if len(d) > 1:
# print(i, d)
# print(term_data[i][I_ASPECT])
# print('raw data', res_data[92]['aspect_terms'])
# print(type(seqs_data[1][0][0]))
# print(type(seqs_data[2][0][0]))
# print(w2is[0])
# reloaded_emb = np.load(SAVED_EMB_FILE)
# print(reloaded_emb[1])
# Train model.
# model = build_net(seqs_data, term_max_len, w2is,
# atae=True, extra_outputs=True,
# emb_mtrx_file=SAVED_EMB_FILE,
# save_to_file=SAVED_MDL_FILE + '2')
# train(seqs_data, model, epoch=3)
label_lookup = {idx: polarity
for polarity, idx in w2is[I_POLARITY].items()}
# print(label_lookup)
customized_data = [['The food is really delicious but '
'I hate the service', 'food'],
['The food is really delicious but '
'I hate the service', 'serivce'],
['I have to say there is no on could be faster than '
'him, but he need to take care of his bad motion as '
'a bar attendant, which will impact his serivce.',
'serivce']]
pred = predict(customized_data, label_lookup, term_max_len,
save_to_file=SAVED_MDL_FILE + '2')
print(pred)
# Get attention weights for sentences.
converted_data, _ = convert_data(customized_data,
term_max_len,
with_label=False)
weights = get_attention_weighs(converted_data,
att_layer_name='attention_weight',
# att_layer_name='attention_weights',
input_layers_names=[2, 0],
save_to_file=SAVED_MDL_FILE + '2')
# print(weights[0])
print(len(customized_data[0][I_TEXT].split()))
focus_len = len(customized_ata[0][I_TEXT].split())
plot_attention_weight(weights[0], focus_len=focus_len)
# for weight in weights:
# print(weight.shape)
# TODO, Use gemsim to visualize aspect word embeddings.
|
[
"keras.models.load_model",
"keras.regularizers.l2",
"numpy.load",
"numpy.random.seed",
"numpy.argmax",
"keras.preprocessing.sequence.pad_sequences",
"keras.optimizers.Adagrad",
"keras.models.Model",
"numpy.arange",
"keras.layers.Input",
"keras.activations.softmax",
"keras.layers.Reshape",
"os.path.join",
"keras.initializers.RandomUniform",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.colorbar",
"keras.backend.eval",
"keras.preprocessing.text.Tokenizer",
"keras.utils.np_utils.to_categorical",
"lxml.etree.parse",
"numpy.random.shuffle",
"numpy.save",
"matplotlib.pyplot.show",
"numpy.asarray",
"keras.backend.function",
"keras.layers.Concatenate",
"keras.layers.RepeatVector",
"keras.layers.LSTM",
"keras.layers.Dot",
"time.time",
"keras.layers.Dense",
"keras.layers.Lambda"
] |
[((1528, 1550), 'lxml.etree.parse', 'etree.parse', (['data_file'], {}), '(data_file)\n', (1539, 1550), False, 'from lxml import etree\n'), ((11765, 11771), 'time.time', 'time', ([], {}), '()\n', (11769, 11771), False, 'from time import time\n'), ((15107, 15129), 'numpy.load', 'np.load', (['emb_mtrx_file'], {}), '(emb_mtrx_file)\n', (15114, 15129), True, 'import numpy as np\n'), ((15182, 15227), 'keras.layers.Input', 'Input', ([], {'shape': '(sents.shape[1],)', 'dtype': '"""int32"""'}), "(shape=(sents.shape[1],), dtype='int32')\n", (15187, 15227), False, 'from keras.layers import Input, Embedding, LSTM, Dense\n'), ((15777, 15809), 'keras.layers.Input', 'Input', ([], {'shape': '(1,)', 'dtype': '"""int32"""'}), "(shape=(1,), dtype='int32')\n", (15782, 15809), False, 'from keras.layers import Input, Embedding, LSTM, Dense\n'), ((15884, 15957), 'keras.initializers.RandomUniform', 'initializers.RandomUniform', ([], {'minval': '(-RANDOM_UNIFORM)', 'maxval': 'RANDOM_UNIFORM'}), '(minval=-RANDOM_UNIFORM, maxval=RANDOM_UNIFORM)\n', (15910, 15957), False, 'from keras import regularizers, initializers, optimizers\n'), ((19635, 19713), 'keras.models.Model', 'Model', ([], {'inputs': '[sents_tensor_input, aspects_tensor_input]', 'outputs': 'model_output'}), '(inputs=[sents_tensor_input, aspects_tensor_input], outputs=model_output)\n', (19640, 19713), False, 'from keras.models import Model, load_model\n'), ((21587, 21605), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (21601, 21605), True, 'import numpy as np\n'), ((21692, 21717), 'numpy.arange', 'np.arange', (['sents.shape[0]'], {}), '(sents.shape[0])\n', (21701, 21717), True, 'import numpy as np\n'), ((21722, 21744), 'numpy.random.shuffle', 'np.random.shuffle', (['idx'], {}), '(idx)\n', (21739, 21744), True, 'import numpy as np\n'), ((23197, 23224), 'numpy.argmax', 'np.argmax', (['pred_vec'], {'axis': '(1)'}), '(pred_vec, axis=1)\n', (23206, 23224), True, 'import numpy as np\n'), ((24747, 24775), 'keras.backend.eval', 'K.eval', (['emb_layer.embeddings'], {}), '(emb_layer.embeddings)\n', (24753, 24775), True, 'import keras.backend as K\n'), ((26085, 26129), 'keras.backend.function', 'K.function', (['input_layers', '[att_layer.output]'], {}), '(input_layers, [att_layer.output])\n', (26095, 26129), True, 'import keras.backend as K\n'), ((27798, 27884), 'matplotlib.pyplot.imshow', 'plt.imshow', (['attentions'], {'cmap': '"""plasma"""', 'aspect': '"""auto"""', 'extent': '[0, focus_len, 0, 1]'}), "(attentions, cmap='plasma', aspect='auto', extent=[0, focus_len, \n 0, 1])\n", (27808, 27884), True, 'import matplotlib.pyplot as plt\n'), ((27920, 27934), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (27932, 27934), True, 'import matplotlib.pyplot as plt\n'), ((27939, 27949), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (27947, 27949), True, 'import matplotlib.pyplot as plt\n'), ((28119, 28179), 'os.path.join', 'os.path.join', (['RAW_DATA_FILE_BASE', '"""Restaurants_Train_v2.xml"""'], {}), "(RAW_DATA_FILE_BASE, 'Restaurants_Train_v2.xml')\n", (28131, 28179), False, 'import os\n'), ((28241, 28296), 'os.path.join', 'os.path.join', (['RAW_DATA_FILE_BASE', '"""Laptop_Train_v2.xml"""'], {}), "(RAW_DATA_FILE_BASE, 'Laptop_Train_v2.xml')\n", (28253, 28296), False, 'import os\n'), ((28364, 28414), 'os.path.join', 'os.path.join', (['WORD_EMB_BASE', '"""glove.840B.300d.txt"""'], {}), "(WORD_EMB_BASE, 'glove.840B.300d.txt')\n", (28376, 28414), False, 'import os\n'), ((28437, 28490), 'os.path.join', 'os.path.join', (['RAW_DATA_FILE_BASE', '"""glove_res_emb.npy"""'], {}), "(RAW_DATA_FILE_BASE, 'glove_res_emb.npy')\n", (28449, 28490), False, 'import os\n'), ((28512, 28564), 'os.path.join', 'os.path.join', (['RAW_DATA_FILE_BASE', '"""atae_model.keras"""'], {}), "(RAW_DATA_FILE_BASE, 'atae_model.keras')\n", (28524, 28564), False, 'import os\n'), ((8251, 8287), 'keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {'filters': 'customized_filter'}), '(filters=customized_filter)\n', (8260, 8287), False, 'from keras.preprocessing.text import Tokenizer\n'), ((8772, 8808), 'numpy.asarray', 'np.asarray', (['text_seqs'], {'dtype': '"""int32"""'}), "(text_seqs, dtype='int32')\n", (8782, 8808), True, 'import numpy as np\n'), ((9104, 9143), 'numpy.asarray', 'np.asarray', (['aspects_seqs'], {'dtype': '"""int32"""'}), "(aspects_seqs, dtype='int32')\n", (9114, 9143), True, 'import numpy as np\n'), ((9461, 9487), 'keras.utils.np_utils.to_categorical', 'to_categorical', (['labels_arr'], {}), '(labels_arr)\n', (9475, 9487), False, 'from keras.utils.np_utils import to_categorical\n'), ((13562, 13595), 'numpy.save', 'np.save', (['save_to_file', 'emb_matrix'], {}), '(save_to_file, emb_matrix)\n', (13569, 13595), True, 'import numpy as np\n'), ((16794, 16826), 'keras.layers.Reshape', 'Reshape', (['(ASPECT_EMBEDDING_DIM,)'], {}), '((ASPECT_EMBEDDING_DIM,))\n', (16801, 16826), False, 'from keras.layers import RepeatVector, Dot, Concatenate, Reshape\n'), ((17036, 17057), 'keras.layers.RepeatVector', 'RepeatVector', (['max_len'], {}), '(max_len)\n', (17048, 17057), False, 'from keras.layers import RepeatVector, Dot, Concatenate, Reshape\n'), ((17092, 17105), 'keras.layers.Concatenate', 'Concatenate', ([], {}), '()\n', (17103, 17105), False, 'from keras.layers import RepeatVector, Dot, Concatenate, Reshape\n'), ((20649, 20677), 'keras.optimizers.Adagrad', 'optimizers.Adagrad', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (20667, 20677), False, 'from keras import regularizers, initializers, optimizers\n'), ((8522, 8557), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['seqs'], {'maxlen': 'max_len'}), '(seqs, maxlen=max_len)\n', (8535, 8557), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((9395, 9431), 'numpy.asarray', 'np.asarray', (['labels_seqs'], {'dtype': '"""int"""'}), "(labels_seqs, dtype='int')\n", (9405, 9431), True, 'import numpy as np\n'), ((13103, 13151), 'numpy.asarray', 'np.asarray', (['pieces[begin_idx:]'], {'dtype': 'np.float32'}), '(pieces[begin_idx:], dtype=np.float32)\n', (13113, 13151), True, 'import numpy as np\n'), ((17174, 17220), 'keras.layers.LSTM', 'LSTM', (['HIDDEN_LAYER_SIZE'], {'return_sequences': '(True)'}), '(HIDDEN_LAYER_SIZE, return_sequences=True)\n', (17178, 17220), False, 'from keras.layers import Input, Embedding, LSTM, Dense\n'), ((17359, 17372), 'keras.layers.Concatenate', 'Concatenate', ([], {}), '()\n', (17370, 17372), False, 'from keras.layers import RepeatVector, Dot, Concatenate, Reshape\n'), ((17430, 17520), 'keras.layers.Dense', 'Dense', (['(EMBEDDING_DIM + ASPECT_EMBEDDING_DIM)'], {'use_bias': '(False)', 'name': '"""attention_score_1"""'}), "(EMBEDDING_DIM + ASPECT_EMBEDDING_DIM, use_bias=False, name=\n 'attention_score_1')\n", (17435, 17520), False, 'from keras.layers import Input, Embedding, LSTM, Dense\n'), ((17773, 17823), 'keras.layers.Dense', 'Dense', (['(1)'], {'use_bias': '(False)', 'name': '"""attention_score_2"""'}), "(1, use_bias=False, name='attention_score_2')\n", (17778, 17823), False, 'from keras.layers import Input, Embedding, LSTM, Dense\n'), ((18255, 18276), 'keras.layers.Dot', 'Dot', ([], {'axes': '(1)', 'name': '"""r"""'}), "(axes=1, name='r')\n", (18258, 18276), False, 'from keras.layers import RepeatVector, Dot, Concatenate, Reshape\n'), ((18393, 18418), 'keras.layers.Reshape', 'Reshape', (['(EMBEDDING_DIM,)'], {}), '((EMBEDDING_DIM,))\n', (18400, 18418), False, 'from keras.layers import RepeatVector, Dot, Concatenate, Reshape\n'), ((18463, 18502), 'keras.layers.Lambda', 'Lambda', (['(lambda tensor: tensor[:, -1, :])'], {}), '(lambda tensor: tensor[:, -1, :])\n', (18469, 18502), False, 'from keras.layers import Lambda\n'), ((18542, 18579), 'keras.layers.Concatenate', 'Concatenate', ([], {'name': '"""final_concatenate"""'}), "(name='final_concatenate')\n", (18553, 18579), False, 'from keras.layers import RepeatVector, Dot, Concatenate, Reshape\n'), ((18655, 18744), 'keras.layers.Dense', 'Dense', (['EMBEDDING_DIM'], {'activation': '"""tanh"""', 'use_bias': '(False)', 'name': '"""final_representation"""'}), "(EMBEDDING_DIM, activation='tanh', use_bias=False, name=\n 'final_representation')\n", (18660, 18744), False, 'from keras.layers import Input, Embedding, LSTM, Dense\n'), ((19334, 19381), 'keras.layers.LSTM', 'LSTM', (['HIDDEN_LAYER_SIZE'], {'return_sequences': '(False)'}), '(HIDDEN_LAYER_SIZE, return_sequences=False)\n', (19338, 19381), False, 'from keras.layers import Input, Embedding, LSTM, Dense\n'), ((19444, 19518), 'keras.layers.Dense', 'Dense', (['labels.shape[1]'], {'activation': '"""softmax"""', 'name': '"""Simple_AE_LSTM_ouptut"""'}), "(labels.shape[1], activation='softmax', name='Simple_AE_LSTM_ouptut')\n", (19449, 19518), False, 'from keras.layers import Input, Embedding, LSTM, Dense\n'), ((22790, 22851), 'keras.models.load_model', 'load_model', (['save_to_file'], {'custom_objects': "{'softmax': softmax}"}), "(save_to_file, custom_objects={'softmax': softmax})\n", (22800, 22851), False, 'from keras.models import Model, load_model\n'), ((24628, 24652), 'keras.models.load_model', 'load_model', (['save_to_file'], {}), '(save_to_file)\n', (24638, 24652), False, 'from keras.models import Model, load_model\n'), ((25694, 25755), 'keras.models.load_model', 'load_model', (['save_to_file'], {'custom_objects': "{'softmax': softmax}"}), "(save_to_file, custom_objects={'softmax': softmax})\n", (25704, 25755), False, 'from keras.models import Model, load_model\n'), ((13455, 13461), 'time.time', 'time', ([], {}), '()\n', (13459, 13461), False, 'from time import time\n'), ((17918, 17936), 'keras.activations.softmax', 'softmax', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (17925, 17936), False, 'from keras.activations import softmax\n'), ((18949, 18983), 'keras.regularizers.l2', 'regularizers.l2', (['L2_REGULARIZATION'], {}), '(L2_REGULARIZATION)\n', (18964, 18983), False, 'from keras import regularizers, initializers, optimizers\n')]
|
# Generated by Django 2.1 on 2018-12-23 13:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('preferences', '0002_auto_20181221_2151'),
]
operations = [
migrations.AddField(
model_name='generalpreferences',
name='lost_pintes_allowed',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='historicalgeneralpreferences',
name='lost_pintes_allowed',
field=models.PositiveIntegerField(default=0),
),
]
|
[
"django.db.models.PositiveIntegerField"
] |
[((361, 399), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (388, 399), False, 'from django.db import migrations, models\n'), ((554, 592), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (581, 592), False, 'from django.db import migrations, models\n')]
|
import os
import sys
import io
import warnings
from pygen_structures.convenience_functions import (
load_charmm_dir,
pdb_to_mol
)
from pygen_structures import __main__ as cmd_interface
FILE_DIR, _ = os.path.split(__file__)
TEST_TOPPAR = os.path.join(FILE_DIR, 'test_toppar')
def test_arg_parsing():
argv = ["HEY", "-o", "HEY_out", "--histidine", "HSP"]
args = cmd_interface.parse_args(argv)
assert(args.sequence == "HEY")
assert(args.segid == "PROT")
assert(args.patches == None)
assert(args.toppar == None)
assert(args.verify == True)
assert(args.output == "HEY_out")
assert(args.histidine == "HSP")
assert(args.use_charmm_names == False)
argv = [
"-u", "HSE-TRP-LYS", "-o", "HWK", "--patches", "CT2", "LAST",
"-v", "--segid", "HWK"
]
args = cmd_interface.parse_args(argv)
assert(args.sequence == "HSE-TRP-LYS")
assert(args.segid == "HWK")
assert(args.patches == ["CT2", "LAST"])
assert(args.toppar == None)
assert(args.verify == False)
assert(args.output == "HWK")
assert(args.histidine == "HSE")
assert(args.use_charmm_names == True)
def test_molecule_creation_raff():
argv = [
"-u", "AGLC-BFRU-AGAL", "-o", "RAFF",
"--patches", "RAFF", "0", "1", "2",
"--segid", "RAFF", "--name", "Raffinose"
]
cmd_interface.main(argv)
assert(os.path.exists("RAFF.psf"))
os.remove('RAFF.psf')
assert(os.path.exists("RAFF.pdb"))
rtf, prm = load_charmm_dir()
with warnings.catch_warnings():
warnings.simplefilter('ignore')
molecule = pdb_to_mol("RAFF.pdb", rtf, patches={"RAFF": (0, 1, 2)})
os.remove('RAFF.pdb')
assert(molecule.name == "Raffinose")
assert(molecule.segment == "RAFF")
assert(molecule.check_parameters(prm))
ref_atoms = {
(0, 'C1'),
(0, 'H1'),
(0, 'O1'),
(0, 'C5'),
(0, 'H5'),
(0, 'O5'),
(0, 'C2'),
(0, 'H2'),
(0, 'O2'),
(0, 'HO2'),
(0, 'C3'),
(0, 'H3'),
(0, 'O3'),
(0, 'HO3'),
(0, 'C4'),
(0, 'H4'),
(0, 'O4'),
(0, 'HO4'),
(0, 'C6'),
(0, 'H61'),
(0, 'H62'),
(1, 'O5'),
(1, 'C2'),
(1, 'C5'),
(1, 'H5'),
(1, 'C6'),
(1, 'H61'),
(1, 'H62'),
(1, 'O6'),
(1, 'HO6'),
(1, 'C1'),
(1, 'H11'),
(1, 'H12'),
(1, 'O1'),
(1, 'HO1'),
(1, 'C3'),
(1, 'H3'),
(1, 'O3'),
(1, 'HO3'),
(1, 'C4'),
(1, 'H4'),
(1, 'O4'),
(1, 'HO4'),
(2, 'C1'),
(2, 'H1'),
(2, 'O1'),
(2, 'C5'),
(2, 'H5'),
(2, 'O5'),
(2, 'C2'),
(2, 'H2'),
(2, 'O2'),
(2, 'HO2'),
(2, 'C3'),
(2, 'H3'),
(2, 'O3'),
(2, 'HO3'),
(2, 'C4'),
(2, 'H4'),
(2, 'O4'),
(2, 'HO4'),
(2, 'C6'),
(2, 'H61'),
(2, 'H62'),
(2, 'O6'),
(2, 'HO6')
}
ref_bonds = {
((0, 'C1'), (0, 'O1')),
((0, 'C1'), (0, 'H1')),
((0, 'C1'), (0, 'O5')),
((0, 'C1'), (0, 'C2')),
((0, 'C2'), (0, 'H2')),
((0, 'C2'), (0, 'O2')),
((0, 'O2'), (0, 'HO2')),
((0, 'C2'), (0, 'C3')),
((0, 'C3'), (0, 'H3')),
((0, 'C3'), (0, 'O3')),
((0, 'O3'), (0, 'HO3')),
((0, 'C3'), (0, 'C4')),
((0, 'C4'), (0, 'H4')),
((0, 'C4'), (0, 'O4')),
((0, 'O4'), (0, 'HO4')),
((0, 'C4'), (0, 'C5')),
((0, 'C5'), (0, 'H5')),
((0, 'C5'), (0, 'C6')),
((0, 'C6'), (0, 'H61')),
((0, 'C6'), (0, 'H62')),
((0, 'C5'), (0, 'O5')),
((0, 'O1'), (1, 'C2')),
((1, 'O5'), (1, 'C2')),
((1, 'C2'), (1, 'C1')),
((1, 'C2'), (1, 'C3')),
((1, 'C3'), (1, 'H3')),
((1, 'C3'), (1, 'O3')),
((1, 'O3'), (1, 'HO3')),
((1, 'C3'), (1, 'C4')),
((1, 'C4'), (1, 'H4')),
((1, 'C4'), (1, 'O4')),
((1, 'O4'), (1, 'HO4')),
((1, 'C4'), (1, 'C5')),
((1, 'C5'), (1, 'H5')),
((1, 'C5'), (1, 'C6')),
((1, 'C5'), (1, 'O5')),
((1, 'C6'), (1, 'H61')),
((1, 'C6'), (1, 'H62')),
((1, 'C6'), (1, 'O6')),
((1, 'O6'), (1, 'HO6')),
((1, 'C1'), (1, 'H11')),
((1, 'C1'), (1, 'H12')),
((1, 'C1'), (1, 'O1')),
((1, 'O1'), (1, 'HO1')),
((2, 'C1'), (2, 'O1')),
((2, 'C1'), (2, 'H1')),
((2, 'C1'), (2, 'O5')),
((2, 'C1'), (2, 'C2')),
((2, 'C2'), (2, 'H2')),
((2, 'C2'), (2, 'O2')),
((2, 'O2'), (2, 'HO2')),
((2, 'C2'), (2, 'C3')),
((2, 'C3'), (2, 'H3')),
((2, 'C3'), (2, 'O3')),
((2, 'O3'), (2, 'HO3')),
((2, 'C3'), (2, 'C4')),
((2, 'C4'), (2, 'H4')),
((2, 'C4'), (2, 'O4')),
((2, 'O4'), (2, 'HO4')),
((2, 'C4'), (2, 'C5')),
((2, 'C5'), (2, 'H5')),
((2, 'C5'), (2, 'C6')),
((2, 'C6'), (2, 'H61')),
((2, 'C6'), (2, 'H62')),
((2, 'C6'), (2, 'O6')),
((2, 'O6'), (2, 'HO6')),
((2, 'C5'), (2, 'O5')),
((2, 'O1'), (0, 'C6')),
}
atoms = set()
for atom in molecule.atoms:
atoms.add((atom.residue_number - 1, atom.atom_name))
assert(atoms == ref_atoms)
bonds = set()
for residue in molecule.residues:
for bond in residue.bonds:
if bond in ref_bonds:
bonds.add(bond)
else:
bonds.add((bond[1], bond[0]))
assert(bonds == ref_bonds)
def test_molecule_creation_hey():
argv = [
"HEY", "-o", "HEY", "-t", TEST_TOPPAR, "--histidine", "HSP"
]
cmd_interface.main(argv)
assert(os.path.exists("HEY.psf"))
os.remove('HEY.psf')
assert(os.path.exists("HEY.pdb"))
rtf, prm = load_charmm_dir()
with warnings.catch_warnings():
warnings.simplefilter('ignore')
molecule = pdb_to_mol("HEY.pdb", rtf)
os.remove('HEY.pdb')
assert(molecule.name == "H[+]EY")
assert(molecule.segment == "PROT")
assert(molecule.check_parameters(prm))
def test_verify():
old_stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
argv = [
"PdP", "-o", "PdP", "-t", TEST_TOPPAR
]
try:
cmd_interface.main(argv)
except SystemExit:
# Missing parameters call exit()
pass
assert(not os.path.exists("PdP.psf"))
assert(not os.path.exists("PdP.pdb"))
argv = [
"PdP", "-o", "PdP", "-t", TEST_TOPPAR, "-v"
]
cmd_interface.main(argv)
assert(os.path.exists("PdP.psf"))
os.remove("PdP.psf")
assert(os.path.exists("PdP.pdb"))
os.remove("PdP.pdb")
sys.stdout.close()
sys.stdout = io.StringIO()
argv = [
"PdP"
]
cmd_interface.main(argv)
sys.stdout.seek(0)
assert(sys.stdout.read() != "")
sys.stdout.close()
sys.stdout = old_stdout
|
[
"os.remove",
"io.StringIO",
"os.path.join",
"warnings.simplefilter",
"sys.stdout.seek",
"sys.stdout.close",
"pygen_structures.convenience_functions.pdb_to_mol",
"os.path.exists",
"sys.stdout.read",
"warnings.catch_warnings",
"pygen_structures.__main__.parse_args",
"pygen_structures.convenience_functions.load_charmm_dir",
"os.path.split",
"pygen_structures.__main__.main"
] |
[((208, 231), 'os.path.split', 'os.path.split', (['__file__'], {}), '(__file__)\n', (221, 231), False, 'import os\n'), ((246, 283), 'os.path.join', 'os.path.join', (['FILE_DIR', '"""test_toppar"""'], {}), "(FILE_DIR, 'test_toppar')\n", (258, 283), False, 'import os\n'), ((378, 408), 'pygen_structures.__main__.parse_args', 'cmd_interface.parse_args', (['argv'], {}), '(argv)\n', (402, 408), True, 'from pygen_structures import __main__ as cmd_interface\n'), ((823, 853), 'pygen_structures.__main__.parse_args', 'cmd_interface.parse_args', (['argv'], {}), '(argv)\n', (847, 853), True, 'from pygen_structures import __main__ as cmd_interface\n'), ((1347, 1371), 'pygen_structures.__main__.main', 'cmd_interface.main', (['argv'], {}), '(argv)\n', (1365, 1371), True, 'from pygen_structures import __main__ as cmd_interface\n'), ((1384, 1410), 'os.path.exists', 'os.path.exists', (['"""RAFF.psf"""'], {}), "('RAFF.psf')\n", (1398, 1410), False, 'import os\n'), ((1416, 1437), 'os.remove', 'os.remove', (['"""RAFF.psf"""'], {}), "('RAFF.psf')\n", (1425, 1437), False, 'import os\n'), ((1449, 1475), 'os.path.exists', 'os.path.exists', (['"""RAFF.pdb"""'], {}), "('RAFF.pdb')\n", (1463, 1475), False, 'import os\n'), ((1493, 1510), 'pygen_structures.convenience_functions.load_charmm_dir', 'load_charmm_dir', ([], {}), '()\n', (1508, 1510), False, 'from pygen_structures.convenience_functions import load_charmm_dir, pdb_to_mol\n'), ((5857, 5881), 'pygen_structures.__main__.main', 'cmd_interface.main', (['argv'], {}), '(argv)\n', (5875, 5881), True, 'from pygen_structures import __main__ as cmd_interface\n'), ((5894, 5919), 'os.path.exists', 'os.path.exists', (['"""HEY.psf"""'], {}), "('HEY.psf')\n", (5908, 5919), False, 'import os\n'), ((5925, 5945), 'os.remove', 'os.remove', (['"""HEY.psf"""'], {}), "('HEY.psf')\n", (5934, 5945), False, 'import os\n'), ((5957, 5982), 'os.path.exists', 'os.path.exists', (['"""HEY.pdb"""'], {}), "('HEY.pdb')\n", (5971, 5982), False, 'import os\n'), ((6000, 6017), 'pygen_structures.convenience_functions.load_charmm_dir', 'load_charmm_dir', ([], {}), '()\n', (6015, 6017), False, 'from pygen_structures.convenience_functions import load_charmm_dir, pdb_to_mol\n'), ((6722, 6746), 'pygen_structures.__main__.main', 'cmd_interface.main', (['argv'], {}), '(argv)\n', (6740, 6746), True, 'from pygen_structures import __main__ as cmd_interface\n'), ((6758, 6783), 'os.path.exists', 'os.path.exists', (['"""PdP.psf"""'], {}), "('PdP.psf')\n", (6772, 6783), False, 'import os\n'), ((6789, 6809), 'os.remove', 'os.remove', (['"""PdP.psf"""'], {}), "('PdP.psf')\n", (6798, 6809), False, 'import os\n'), ((6821, 6846), 'os.path.exists', 'os.path.exists', (['"""PdP.pdb"""'], {}), "('PdP.pdb')\n", (6835, 6846), False, 'import os\n'), ((6852, 6872), 'os.remove', 'os.remove', (['"""PdP.pdb"""'], {}), "('PdP.pdb')\n", (6861, 6872), False, 'import os\n'), ((6878, 6896), 'sys.stdout.close', 'sys.stdout.close', ([], {}), '()\n', (6894, 6896), False, 'import sys\n'), ((6915, 6928), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (6926, 6928), False, 'import io\n'), ((6966, 6990), 'pygen_structures.__main__.main', 'cmd_interface.main', (['argv'], {}), '(argv)\n', (6984, 6990), True, 'from pygen_structures import __main__ as cmd_interface\n'), ((6995, 7013), 'sys.stdout.seek', 'sys.stdout.seek', (['(0)'], {}), '(0)\n', (7010, 7013), False, 'import sys\n'), ((7054, 7072), 'sys.stdout.close', 'sys.stdout.close', ([], {}), '()\n', (7070, 7072), False, 'import sys\n'), ((1520, 1545), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (1543, 1545), False, 'import warnings\n'), ((1555, 1586), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (1576, 1586), False, 'import warnings\n'), ((1606, 1662), 'pygen_structures.convenience_functions.pdb_to_mol', 'pdb_to_mol', (['"""RAFF.pdb"""', 'rtf'], {'patches': "{'RAFF': (0, 1, 2)}"}), "('RAFF.pdb', rtf, patches={'RAFF': (0, 1, 2)})\n", (1616, 1662), False, 'from pygen_structures.convenience_functions import load_charmm_dir, pdb_to_mol\n'), ((1671, 1692), 'os.remove', 'os.remove', (['"""RAFF.pdb"""'], {}), "('RAFF.pdb')\n", (1680, 1692), False, 'import os\n'), ((6027, 6052), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (6050, 6052), False, 'import warnings\n'), ((6062, 6093), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (6083, 6093), False, 'import warnings\n'), ((6113, 6139), 'pygen_structures.convenience_functions.pdb_to_mol', 'pdb_to_mol', (['"""HEY.pdb"""', 'rtf'], {}), "('HEY.pdb', rtf)\n", (6123, 6139), False, 'from pygen_structures.convenience_functions import load_charmm_dir, pdb_to_mol\n'), ((6148, 6168), 'os.remove', 'os.remove', (['"""HEY.pdb"""'], {}), "('HEY.pdb')\n", (6157, 6168), False, 'import os\n'), ((6460, 6484), 'pygen_structures.__main__.main', 'cmd_interface.main', (['argv'], {}), '(argv)\n', (6478, 6484), True, 'from pygen_structures import __main__ as cmd_interface\n'), ((6577, 6602), 'os.path.exists', 'os.path.exists', (['"""PdP.psf"""'], {}), "('PdP.psf')\n", (6591, 6602), False, 'import os\n'), ((6619, 6644), 'os.path.exists', 'os.path.exists', (['"""PdP.pdb"""'], {}), "('PdP.pdb')\n", (6633, 6644), False, 'import os\n'), ((7025, 7042), 'sys.stdout.read', 'sys.stdout.read', ([], {}), '()\n', (7040, 7042), False, 'import sys\n')]
|
#coding:utf-8
import cv2
import os
import sys
#测试相机能否使用
cap = cv2.VideoCapture(0)
while True:
ret,frame=cap.read()
cv2.imshow('MyVideo',frame)
cv2.waitKey(25)
|
[
"cv2.VideoCapture",
"cv2.imshow",
"cv2.waitKey"
] |
[((62, 81), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (78, 81), False, 'import cv2\n'), ((123, 151), 'cv2.imshow', 'cv2.imshow', (['"""MyVideo"""', 'frame'], {}), "('MyVideo', frame)\n", (133, 151), False, 'import cv2\n'), ((155, 170), 'cv2.waitKey', 'cv2.waitKey', (['(25)'], {}), '(25)\n', (166, 170), False, 'import cv2\n')]
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from CTFd import create_app
from CTFd.utils import get_config as get_config_util, set_config as set_config_util
from CTFd.models import *
app = create_app()
manager = Manager(app)
manager.add_command("db", MigrateCommand)
def jsenums():
from CTFd.constants import JS_ENUMS
import json
import os
path = os.path.join(app.root_path, "themes/core/assets/js/constants.js")
with open(path, "w+") as f:
for k, v in JS_ENUMS.items():
f.write("const {} = Object.freeze({});".format(k, json.dumps(v)))
BUILD_COMMANDS = {"jsenums": jsenums}
@manager.command
def get_config(key):
with app.app_context():
print(get_config_util(key))
@manager.command
def set_config(key, value):
with app.app_context():
print(set_config_util(key, value).value)
@manager.command
def build(cmd):
with app.app_context():
cmd = BUILD_COMMANDS.get(cmd)
cmd()
if __name__ == "__main__":
manager.run()
|
[
"os.path.join",
"flask_script.Manager",
"CTFd.constants.JS_ENUMS.items",
"json.dumps",
"CTFd.utils.set_config",
"CTFd.utils.get_config",
"CTFd.create_app"
] |
[((300, 312), 'CTFd.create_app', 'create_app', ([], {}), '()\n', (310, 312), False, 'from CTFd import create_app\n'), ((326, 338), 'flask_script.Manager', 'Manager', (['app'], {}), '(app)\n', (333, 338), False, 'from flask_script import Manager\n'), ((489, 554), 'os.path.join', 'os.path.join', (['app.root_path', '"""themes/core/assets/js/constants.js"""'], {}), "(app.root_path, 'themes/core/assets/js/constants.js')\n", (501, 554), False, 'import os\n'), ((611, 627), 'CTFd.constants.JS_ENUMS.items', 'JS_ENUMS.items', ([], {}), '()\n', (625, 627), False, 'from CTFd.constants import JS_ENUMS\n'), ((839, 859), 'CTFd.utils.get_config', 'get_config_util', (['key'], {}), '(key)\n', (854, 859), True, 'from CTFd.utils import get_config as get_config_util, set_config as set_config_util\n'), ((956, 983), 'CTFd.utils.set_config', 'set_config_util', (['key', 'value'], {}), '(key, value)\n', (971, 983), True, 'from CTFd.utils import get_config as get_config_util, set_config as set_config_util\n'), ((692, 705), 'json.dumps', 'json.dumps', (['v'], {}), '(v)\n', (702, 705), False, 'import json\n')]
|
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import axes3d
from matplotlib.patches import Rectangle, PathPatch
from matplotlib.text import TextPath
from matplotlib.transforms import Affine2D
import mpl_toolkits.mplot3d.art3d as art3d
import numpy as np
import pandas as pd
from config import conf
import eigen as eig
import region as reg
import hiperbolica as hyp
import matrices_acoplamiento as m_acop
import distorsionador as v_dist
import matriz_gauss as m_gauss
import v_transpuestos as v_trans
import potencial as pot
import flujo as flj
__doc__ = """
Este modulo esta hecho para graficar los valores con error y sin error de los
diferentes componentes del calculo del problema plano. Es equivalente a todas
las funciones de prueba.
"""
def prueba_valor_eigen(valores_eigen, vectores_valores_eigen, vectores_valores_eigen_err,\
n_dimension=100, error=1):
"""
Funcion encargada de graficar con matplotlib los vectores eigen versus los
vectores eigen calculados con error.Equivalente a : f03_Valor_Eigen_Prueba()
Pametros de entrada:
* valores_eigen : DataFrame encargado de guardar los valores eigen
representativos de todas las regiones.
* vectores_valores_eigen: DataFrame que almacena los valores calculados
de cada vector eigen. Es decir Pn = [Veig[0],..., Veig[n_dimension]] ,
Qn = [Veig[0],..., Veig[n_dimension]] y así conlos demas vectores.
Salida:
* Guarda las figuras en la carpeta ../graficas/vectores eigen . Esta car
peta debe estar previamente creada para que no haya conflictos al mo-
mento de guardar las graficas.
Nota: Para un ejemplo dado remitirse a la funcion main de graficacion.py.
"""
error_t = 'sin error ' if error == 1 else 'con error'
N = range(1, n_dimension + 1) # Solo para propositos de graficacion
for chr_eigen in valores_eigen.index:
fig = plt.Figure()
ax = fig.add_subplot(111)
# Encuentre el maximo de valor eigen entre el comparado y el original (para efectos de graficacion)
maximo = np.max((vectores_valores_eigen.loc[chr_eigen].max(), vectores_valores_eigen_err.loc[chr_eigen].max()))
fig.suptitle(f"Control {error_t} nr={n_dimension} del Valor Eigen {chr_eigen+'='+valores_eigen.loc[chr_eigen]['calcular_str']}")
ax.text(n_dimension / 8, 0.95 * maximo, """Prueba correcta si se imprime una sola curva.
Error si imprime dos curvas""")
# Grafique vector transpuesto de Valores eigen de la funcion (sin error) Color rojo
ax.plot(N, vectores_valores_eigen.loc[chr_eigen], 'r', label='sin error')
# Grafique vector transpuesto de Valores eigen de la ecuacion (con error) Color azul
ax.plot(N, vectores_valores_eigen_err.loc[chr_eigen], 'b', label='con error')
ax.legend(loc='lower right')
filename = 'graficas/' + conf.data['env']['path'] + '/vectores eigen/control ' +\
error_t + " " + chr_eigen + ".png"
canvas = FigureCanvas(fig)
canvas.print_figure(filename)
# Salida por consola del proceso que se esta realizando
print(f"* {chr_eigen+'='+valores_eigen.loc[chr_eigen]['calcular_str']}")
def prueba_matrices_diagonal_valores_eigen(valores_eigen, vectores_valores_eigen,\
vectores_valores_eigen_err_matriz, n_dimension=100, error=1):
"""
Funcion encargada de graficar con matplotlib las matrices diagonales de va-
lores eigen versus las matrices de valores eigen calculados con error.
Equivalente a: f04_Diag_Valor_Eigen_Prueba()
Pametros de entrada:
* valores_eigen : DataFrame encargado de guardar los valores eigen
representativos de todas las regiones.
* vectores_valores_eigen: DataFrame que almacena los valores calculados
de cada vector eigen. Es decir Pn = [Veig[0],..., Veig[n_dimension]] ,
Qn = [Veig[0],..., Veig[n_dimension]] y así conlos demas vectores.
* vectores_valores_eigen_err_matriz: Dataframe en donde esta almacenado
la matriz diagonal de los valores eigen calculados con un error dado.
Salida:
* Guarda las figuras en la carpeta ../graficas/matrices diagonales de
valores eigen . Esta carpeta debe estar previamente creada para que no
haya conflictos al momento de guardar las graficas.
"""
error_t = 'sin error ' if error == 1 else 'con error'
N = range(1, n_dimension + 1) # Solo para propositos de graficacion
for chr_eigen in vectores_valores_eigen.index:
fig = plt.Figure()
ax = fig.add_subplot(111)
# Encuentre el maximo de valor eigen entre el comparado y el original (para efectos de graficacion del texto)
maximo = np.max((vectores_valores_eigen.loc[chr_eigen].max(), vectores_valores_eigen_err_matriz.loc[chr_eigen].max()))
fig.suptitle(f"{error_t.capitalize()} - nr={n_dimension} - Matriz diagonal del valor Eigen: {chr_eigen+'='+valores_eigen.loc[chr_eigen]['calcular_str']}")
ax.text(n_dimension / 8, 0.95 * maximo, """Prueba correcta si se imprime una sola curva.
Error si imprime dos curvas""")
ax.plot(N, vectores_valores_eigen.loc[chr_eigen], 'r', label='sin error')
ax.plot(N, vectores_valores_eigen_err_matriz.loc[chr_eigen], 'b', label='con error')
ax.legend(loc='lower right')
filename = 'graficas/' + conf.data['env']['path'] + '/matrices diagonales de valores eigen/' +\
'control ' + error_t + " " + chr_eigen + ".png"
canvas = FigureCanvas(fig)
canvas.print_figure(filename)
print(f"* Matriz diagonal {chr_eigen+'='+valores_eigen.loc[chr_eigen]['calcular_str']}")
def prueba_matrices_diagonal_funciones_hiperbolicas(funciones_hiperbolicas, vectores_funciones_hiperbolicas,\
vectores_funciones_hiperbolicas_err, n_dimension=100, error=1):
"""
Funcion encargada de graficar con matplotlib las matrices diagonal de las
funciones hiperbólicas eigen versus las matrices de valores eigen calculados
con error. Equivalente a: f05_Diag_Func_Hiper_Prueba()
Pametros de entrada:
* funciones_hiperbolicas: Es el DataFrame creado en donde estan almacena
dos todos los valores necesarios para poder calcular los vectores de
funciones hiperbolicas.
* vectores_funciones_hiperbolicas: Es un DataFrame que contiene todos
los valores calculados de las funciones hiperbolicas de todos los
vectores.
* vectores_funciones_hiperbolicas_err: Es un DataFrame que contiene todos
los valores calculados dado un error de las funciones hiperbolicas de
todos los vectores.
Salida:
* Guarda las figuras en la carpeta "../graficas/matrices diagonales de
funciones hiperbolicas". Esta carpeta debe estar previamente creada
para que no haya conflictos al momento de guardar las graficas.
"""
error_t = 'sin error ' if error == 1 else 'con error'
N = range(1, n_dimension + 1) # Solo para propositos de graficacion
# Se obtiene un index a partir de la matrices diagonales
for nro_diagonal in funciones_hiperbolicas.index:
# Encuentre el maximo de valor eigen entre el comparado y el original (para efectos de graficacion)
fig = plt.Figure()
ax = fig.add_subplot(111)
maximo = np.max((vectores_funciones_hiperbolicas.loc[nro_diagonal].max(), vectores_funciones_hiperbolicas_err.loc[nro_diagonal].max()))
minimo = np.min((vectores_funciones_hiperbolicas.loc[nro_diagonal].min(), vectores_funciones_hiperbolicas_err.loc[nro_diagonal].min()))
fig.suptitle(f"{error_t.capitalize()} - nr={n_dimension} - Control de la matriz diagonal: {nro_diagonal+'='+funciones_hiperbolicas.loc[nro_diagonal]['calcular_str']}")
ax.text( 0.1 * n_dimension, minimo + ((maximo - minimo) / 2), """Prueba correcta si se imprime una sola curva.
Error si imprime dos curvas""")
# plt.axvline(0.1 * n_dimension, color='k', linestyle='solid')
# plt.axhline(.00005 * maximo, color='k', linestyle='solid')
ax.plot(N, vectores_funciones_hiperbolicas.loc[nro_diagonal], 'r', label='sin error')
ax.plot(N, vectores_funciones_hiperbolicas_err.loc[nro_diagonal], 'b', label='con error')
ax.legend(loc='lower right')
filename = 'graficas/' + conf.data['env']['path'] + '/matrices diagonales de funciones hiperbolicas/' +\
'control ' + error_t + " " + nro_diagonal + ".png"
canvas = FigureCanvas(fig)
canvas.print_figure(filename)
print(f"* Matriz diagonal {nro_diagonal+'='+funciones_hiperbolicas.loc[nro_diagonal]['calcular_str']}")
def prueba_matrices_cuadradas_acoplamiento(integrandos_matrices_acoplamiento, matrices_acoplamiento_int,\
matrices_acoplamiento_sol, n_dimension=100, error=1):
"""
Funcion encargada de graficar con matplotlib las matrices cuadradas de aco-
acoplamiento solucion analitica versus solucion por quad de scipy.
Equivalente a: f05_Diag_Func_Hiper_Prueba()
Pametros de entrada:
Salida:
* Guarda las figuras en la carpeta "../graficas/matrices cuadradas de
acoplamiento". Esta carpeta debe estar previamente creada
para que no haya conflictos al momento de guardar las graficas.
"""
error_t = 'sin error ' if error == 1 else 'con error'
N = range(1, (n_dimension * n_dimension) + 1) # Solo para propositos de graficacion
# Se obtiene un index a partir del df integrandos_matrices_acoplamiento
matrices_acoplamiento_sol = error * matrices_acoplamiento_sol
for M in integrandos_matrices_acoplamiento.index:
fig = plt.Figure()
ax = fig.add_subplot(111)
# Encuentre el maximo de valor de las dos matrices (la matriz sin error y la matriz con error) (para efectos de graficacion)
# Nota importante: A cada matriz se le hace un stack durante todo el proceso para obtener un vector de todos los valores de la Matriz
maximo = np.max((matrices_acoplamiento_int.loc[M].stack().loc[:n_dimension,:n_dimension].max(), matrices_acoplamiento_sol.loc[M].stack().loc[:n_dimension,:n_dimension].max()))
fig.suptitle(f"{error_t.capitalize()} - nr={n_dimension} - {M + '=' + integrandos_matrices_acoplamiento.loc[M, 'calcular_str']}")
# ax.text( 0.5 * (n_dimension ** 2), maximo, """Prueba correcta si se imprime una sola grafica.
# Error si imprime dos graficas""")
ax.plot(N, matrices_acoplamiento_int.loc[M].stack().loc[:n_dimension,:n_dimension], 'r', label='sol. integrate.quad')
ax.plot(N, matrices_acoplamiento_sol.loc[M].stack().loc[:n_dimension,:n_dimension], 'b', label='sol. analitica ' + error_t)
ax.legend(loc='lower right')
filename = 'graficas/' + conf.data['env']['path'] + '/matrices cuadradas de acoplamiento/' +\
'control ' + error_t + " " + M + ".png"
canvas = FigureCanvas(fig)
canvas.print_figure(filename)
print(f"* Matriz acompladora {M + '=' + integrandos_matrices_acoplamiento.loc[M, 'calcular_str']}")
def prueba_vectores_distorsionadores(integrandos_vectores_distorsionadores, vectores_distorsionadores_int,\
vectores_distorsionadores_sol, n_dimension=100, error=1):
"""
Funcion encargada de graficar con matplotlib los vectores distorsionadores
versus solucion por quad de scipy.
Equivalente a: f05_Diag_Func_Hiper_Prueba()
Pametros de entrada:
Salida:
* Guarda las figuras en la carpeta "../graficas/matrices cuadradas de
acoplamiento". Esta carpeta debe estar previamente creada
para que no haya conflictos al momento de guardar las graficas.
"""
error_t = 'sin error ' if error == 1 else 'con error'
N = range(1, n_dimension + 1) # Solo para propositos de graficacion
# Se agrega un error a la solucion analitica
vectores_distorsionadores_sol = error * vectores_distorsionadores_sol
# Se obtiene un index a partir del df integrandos_vectores_distorsionadores
for Sm in integrandos_vectores_distorsionadores.index:
fig = plt.Figure()
ax = fig.add_subplot(111)
# Encuentre el maximo de valor de las dos matrices (la matriz sin error y la matriz con error) (para efectos de graficacion)
maximo = np.max((vectores_distorsionadores_int.loc[Sm][:n_dimension].max(), vectores_distorsionadores_sol.loc[Sm][:n_dimension].max()))
plt.xticks(N)
fig.suptitle(f"{error_t.capitalize()} - nr={n_dimension} - Vector Dist.: {Sm + '=' + integrandos_vectores_distorsionadores.loc[Sm, 'calcular_str']}")
# ax.text( 0.5 * (n_dimension ** 2), maximo, """Prueba correcta si se imprime una sola grafica.
# Error si imprime dos graficas""")
ax.plot(N, vectores_distorsionadores_int.loc[Sm][:n_dimension], 'r', label='sol. integrate.quad')
ax.plot(N, vectores_distorsionadores_sol.loc[Sm][:n_dimension], 'b', label='sol. analitica '+error_t)
ax.legend(loc='upper right')
filename = 'graficas/' + conf.data['env']['path'] + '/vectores distorsionadores/' +\
'control ' + error_t + " " + Sm + ".png"
canvas = FigureCanvas(fig)
canvas.print_figure(filename)
print(f"* Vector distorsionador {Sm + '=' + integrandos_vectores_distorsionadores.loc[Sm, 'calcular_str']}")
def prueba_potencial(regiones, recursos_potencial, potenciales, potenciales_err, dimension_mesh,\
n_dimension=100, error =1):
"""
Funcion encargada de graficar con matplotlib los vectores eigen versus los
vectores eigen calculados con error.Equivalente a : f12_V_dV_Prueba()
Pametros de entrada:
Salida:
* Guarda las figuras en la carpeta ../graficas/potenciales . Esta car-
peta debe estar previamente creada para que no haya conflictos al mo-
mento de guardar las graficas.
"""
error_t = 'sin error ' if error == 1 else 'con error'
N = range(1, dimension_mesh + 1) # Solo para propositos de graficacion
for n_potencial in potenciales.index:
# Reg.1, Reg.2, ... Reg.n - Iteradores de las regiones
index_reg_actual = "Reg." + n_potencial.split('V')[1]
# Encuentre el maximo de valor eigen entre el comparado y el original (para efectos de graficacion)
fig = plt.Figure()
ax = fig.add_subplot(111)
maximo = np.max((potenciales.loc[n_potencial].max(), potenciales_err.loc[n_potencial].max()))
minimo = np.max((potenciales.loc[n_potencial].min(), potenciales_err.loc[n_potencial].min()))
fig.suptitle(f"Con nr={n_dimension}- Prueba del potencial {error_t} de la {index_reg_actual}-{regiones.loc[index_reg_actual, 'eps']}.")
ax.text(n_dimension / 8, 0.95 * maximo, """Prueba correcta si se imprime una sola curva.
Error si imprime dos curvas""")
# Grafique potenciales (con error) Color rojo
ax.plot(N, potenciales_err.loc[n_potencial], 'r', label='con error')
# Grafique potenciales (sin error) Color negro
ax.plot(N, potenciales.loc[n_potencial], 'k', label='sin error')
ax.legend(loc='lower right')
filename = 'graficas/' + conf.data['env']['path'] + '/potenciales/' +\
'control ' + error_t + " " + n_potencial + ".png"
canvas = FigureCanvas(fig)
canvas.print_figure(filename)
# Salida por consola del proceso que se esta realizando
print(f"* {n_potencial}={recursos_potencial.loc[n_potencial,'calcular_str']}")
def prueba_flujo(regiones, recursos_flujo, flujos, flujos_err, dimension_mesh,\
n_dimension=100, error=1):
"""
Funcion encargada de graficar con matplotlib los vectores eigen versus los
vectores eigen calculados con error.Equivalente a : f12_V_dV_Prueba()
Pametros de entrada:
Salida:
* Guarda las figuras en la carpeta ../graficas/flujos. Esta carpeta
debe estar previamente creada para que no haya conflictos al momento
de guardar las graficas.
"""
error_t = 'sin error ' if error == 1 else 'con error'
N = range(1, dimension_mesh + 1) # Solo para propositos de graficacion
for n_flujo in flujos.index:
# Reg.1, Reg.2, ... Reg.n - Iteradores de las regiones
index_reg_actual = "Reg." + n_flujo.split('V')[1]
# Encuentre el maximo de valor eigen entre el comparado y el original (para efectos de graficacion)
fig = plt.Figure()
ax = fig.add_subplot(111)
maximo = np.max((flujos.loc[n_flujo].max(), flujos_err.loc[n_flujo].max()))
minimo = np.max((flujos.loc[n_flujo].min(), flujos_err.loc[n_flujo].min()))
fig.suptitle(f"Con nr={n_dimension}- Prueba del flujo {error_t} de la {index_reg_actual}-{regiones.loc[index_reg_actual, 'eps']}.")
# ax.text(n_dimension / 8, 0.95 * maximo, """Prueba correcta si se imprime una sola curva.
# Error si imprime dos curvas""")
# Grafique flujos (con error) Color rojo
ax.plot(N, flujos_err.loc[n_flujo], 'r', label='con error')
# Grafique flujos (sin error) Color negro
ax.plot(N, flujos.loc[n_flujo], 'k', label='sin error')
ax.legend(loc='lower right')
filename = 'graficas/' + conf.data['env']['path'] + '/flujos/' +\
'control ' + error_t + " " + n_flujo + ".png"
canvas = FigureCanvas(fig)
canvas.print_figure(filename)
# Salida por consola del proceso que se esta realizando
print(f"* {n_flujo}={recursos_flujo.loc[n_flujo,'calcular_str']}")
def control_de_continuidad(regiones, potenciales, mesh_regiones, n_dimension):
continuidad = pd.read_csv('csv/' + conf.data['env']['path'] + '/continuidad.csv')
for index in continuidad.index:
fig = plt.figure()
R_sup = continuidad.loc[index,'region_superior'].split('R')[1]
R_inf = continuidad.loc[index,'region_inferior'].split('R')[1]
X_sup = mesh_regiones.loc['Reg.'+R_sup,'x'].to_numpy()
X_sup = np.reshape(X_sup, (int(np.sqrt(len(X_sup))),int(np.sqrt(len(X_sup)))))[0]
X_inf = mesh_regiones.loc['Reg.'+R_inf,'x'].to_numpy()
X_inf = np.reshape(X_inf, (int(np.sqrt(len(X_inf))),int(np.sqrt(len(X_inf)))))[0]
pot_superior = potenciales.loc['V'+R_sup].to_numpy()
pot_superior = np.reshape(pot_superior , (int(np.sqrt(len(pot_superior))),int(np.sqrt(len(pot_superior)))))[0]
pot_inferior = potenciales.loc['V'+R_inf].to_numpy()
pot_inferior = np.reshape(pot_inferior, (int(np.sqrt(len(pot_inferior))),int(np.sqrt(len(pot_inferior)))))[-1]
left_bar = [continuidad.loc[index,'xi'],continuidad.loc[index,'xi']]
right_bar = [continuidad.loc[index,'xf'],continuidad.loc[index,'xf']]
plt.title(f"Con nr={n_dimension}- Prueba de continuidad potencial de la Reg.{R_inf} a la Reg.{R_sup}")
plt.plot(X_sup, pot_superior, 'r')
plt.plot(X_inf, pot_inferior, 'b')
#ESTO SON LOS PUNTOS DONDE DEBEN COINCIDIR LAS GRAFICAS
plt.plot(left_bar, [-2,2])
plt.plot(right_bar, [-2,2])
filename ='graficas/' + conf.data['env']['path'] + '/continuidad de potencial/'+ f'Reg.{R_inf} a la Reg.{R_sup}.png'
canvas = FigureCanvas(fig)
canvas.print_figure(filename)
plt.close()
def graficas_potencial(regiones, potenciales, mesh_regiones, n_dimension):
for n_potencial in potenciales.index:
index_reg_actual = "Reg." + n_potencial.split('V')[1]
pot = potenciales.loc[n_potencial].to_numpy()
pot = np.reshape(pot, (int(np.sqrt(len(pot))),int(np.sqrt(len(pot)))))
x_flat = mesh_regiones.loc[index_reg_actual,'x'].to_numpy()
x_flat = np.reshape(x_flat, (int(np.sqrt(len(x_flat))),int(np.sqrt(len(x_flat)))))
y_flat = mesh_regiones.loc[index_reg_actual,'y'].to_numpy()
y_flat = np.reshape(y_flat, (int(np.sqrt(len(y_flat))),int(np.sqrt(len(y_flat)))))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
fig.suptitle(f"Con nr={n_dimension}- surf del potencial de la {index_reg_actual}-{regiones.loc[index_reg_actual, 'eps']}.")
ax.plot_surface(x_flat,y_flat,pot,cmap=cm.autumn)
#ax.view_init(0,-90)
filename = 'graficas/' + conf.data['env']['path'] + '/potenciales/surf/' +\
'Surf' + " " + n_potencial + ".png"
canvas = FigureCanvas(fig)
canvas.print_figure(filename)
plt.close()
print('.', end='')
print()
def grafica_de_potencial_total(regiones, potenciales, mesh_regiones, n_dimension):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
plt.title(f"Grafica de los niveles de potencial de todas las regiones")
for n_potencial in potenciales.index:
index_reg_actual = "Reg." + n_potencial.split('V')[1]
pot = potenciales.loc[n_potencial].to_numpy()
pot = np.reshape(pot, (int(np.sqrt(len(pot))),int(np.sqrt(len(pot)))))
x_flat = mesh_regiones.loc[index_reg_actual,'x'].to_numpy()
x_flat = np.reshape(x_flat, (int(np.sqrt(len(x_flat))),int(np.sqrt(len(x_flat)))))
y_flat = mesh_regiones.loc[index_reg_actual,'y'].to_numpy()
y_flat = np.reshape(y_flat, (int(np.sqrt(len(y_flat))),int(np.sqrt(len(y_flat)))))
ax.plot_surface(x_flat,y_flat,pot,cmap=cm.autumn)
ax.view_init(0,-90)
filename ='graficas/' + conf.data['env']['path'] + '/Grafica de Potencial total.png'
canvas = FigureCanvas(fig)
canvas.print_figure(filename)
plt.close()
def draw_rectangle(ax, inicio= 0, ancho= 2,direction = 'y',desp= 2, alto= 3, fill= True):
rect = Rectangle((inicio,0), width= ancho, height=alto, fill= fill)
if not fill:
rect.set_edgecolor('r')
else:
rect.set_edgecolor('k')
ax.add_patch(rect)
art3d.pathpatch_2d_to_3d(rect, z=desp, zdir=direction)
def draw_text(ax, x, y, z=1, cadena=''):
text_path = TextPath((0, 0), cadena, size=.35)
trans = Affine2D().translate(x, y)
t1 = PathPatch(trans.transform_path(text_path), fc='k')
ax.add_patch(t1)
art3d.pathpatch_2d_to_3d(t1, z=z, zdir='z')
def draw_region3d(ax, xi, xf, yi, yf, fronteras, n_region, material, z=1, xmax=None):
#texto
x_texto = xi if xi<xf else xf
desp_t = abs(xf-xi)*.2 if abs(xf-xi)==1 else abs(xf-xi)*.4
draw_text(ax,x_texto+desp_t,yi + (yf-yi)*.3,z,f'R{n_region} {material}')
for lugar, valor in fronteras.items():
if lugar=='arriba':
x_t = xi+(xf-xi)/2
y_t = yf
elif lugar=='abajo':
x_t = xi+(xf-xi)/2
y_t = yi
elif lugar=='derecha':
x_t = xf
y_t = yi+(yf-yi)/2
elif lugar=='izquierda':
x_t = xi
y_t = yi+(yf-yi)/2
if valor == 'Uno': texto = f'V{n_region}=1'
elif valor == 'Cero': texto = f'V{n_region}=0'
elif valor == 'SIM': texto = 'SIM'
if valor in ['Uno','Cero','SIM']: draw_text(ax,x_t,y_t,z,texto)
direccion = 'y' if lugar=='arriba' or lugar=='abajo' else 'x'
punto_inicial = xi if lugar=='arriba' or lugar=='abajo' else yi
ancho = (xf-xi) if lugar=='arriba' or lugar=='abajo' else (yf-yi)
if lugar=='arriba': desp=yf
elif lugar=='abajo': desp=yi
elif lugar=='derecha': desp=xf
elif lugar=='izquierda': desp=xi
if valor == 'Uno' or valor == 'Cero':
draw_rectangle(ax, inicio= punto_inicial, ancho= ancho, direction= direccion, desp= desp, alto= z)
elif valor=='no' or valor=='SIM':
draw_rectangle(ax, inicio= punto_inicial, ancho= ancho, direction= direccion, desp= desp, alto= z,fill=False)
else:
#Fronteras Compuestas:
front_list = [x.split('-') for x in valor.split('/')]
for pseudo_frontera in front_list:
punto_inicial = int(pseudo_frontera[1])
ancho = (int(pseudo_frontera[2])-int(pseudo_frontera[1]))
if pseudo_frontera[0] == 'Uno' or pseudo_frontera[0] == 'Cero':
#SIM Izquierda
draw_rectangle(ax, inicio= -punto_inicial, ancho= -ancho, direction= direccion, desp= desp, alto= z)
#Centro
draw_rectangle(ax, inicio= punto_inicial, ancho= ancho, direction= direccion, desp= desp, alto= z)
#SIM Derecha
draw_rectangle(ax, inicio= -punto_inicial+2*xmax, ancho= -ancho, direction= direccion, desp= desp, alto= z)
elif pseudo_frontera[0]=='no' or pseudo_frontera[0]=='SIM':
#SIM Izquierda
draw_rectangle(ax, inicio= -punto_inicial, ancho= -ancho, direction= direccion, desp= desp, alto= z,fill=False)
#Centro
draw_rectangle(ax, inicio= punto_inicial, ancho= ancho, direction= direccion, desp= desp, alto= z,fill=False)
#SIM Derecha
draw_rectangle(ax, inicio= -punto_inicial+2*xmax, ancho= -ancho, direction= direccion, desp= desp, alto= z,fill=False)
def graficar_problema_plano_3D(regiones,z=2):
fronteras = pd.read_csv('csv/' + conf.data['env']['path'] + '/fronteras.csv')
xmax, ymax = max(regiones['xf']), max(regiones['yf'])
fig = plt.figure()
fig.suptitle('Grafica tridimensional del problema plano con 2 simetrias')
ax = fig.add_subplot(111, projection='3d')
filename = 'graficas/' + conf.data['env']['path'] + "/Problema Plano 3D.png"
for i,region in enumerate(regiones.index):
xi, xf = regiones.loc[region,'xi'], regiones.loc[region,'xf']
yi, yf = regiones.loc[region,'yi'], regiones.loc[region,'yf']
#Izquierda
draw_region3d(ax,-xi,-xf,yi,yf,fronteras.loc[i],i+1,regiones.loc[region, 'eps'],z,xmax)
#Central
draw_region3d(ax,xi,xf,yi,yf,fronteras.loc[i],i+1,regiones.loc[region, 'eps'],z,xmax)
#Derecha
draw_region3d(ax,-xi+2*xmax,-xf+2*xmax,yi,yf,fronteras.loc[i],i+1,regiones.loc[region, 'eps'],z,xmax)
ax.set_xlim(-xmax, 2*xmax)
ax.set_ylim(0, ymax)
ax.set_zlim(0, z+2)
#ax.view_init(60,-60)
ax.view_init(80,-70)
fig.set_size_inches(14,8)
canvas = FigureCanvas(fig)
canvas.print_figure(filename)
def draw_region(ax, xi, xf, yi, yf, fronteras,n_region,material, xmax,sim='der'):
x_texto = xi if xi<xf else xf
desp_t = abs(xf-xi)*.2 if abs(xf-xi)==1 else abs(xf-xi)*.4
ax.annotate(f'Reg{n_region}\n{material}',(x_texto+desp_t,yi + (yf-yi)*.3))
for lugar, valor in fronteras.items():
if lugar=='arriba':
angulo = 0
x_t = xi+(xf-xi)*.2 if sim ==None else xi+(xf-xi)*.8
y_t = yf
elif lugar=='abajo':
angulo = 0
x_t = xi+(xf-xi)*.2 if sim ==None else xi+(xf-xi)*.8
y_t = yi
elif lugar=='derecha':
angulo = 90
x_t = xf
y_t = yi+ (yf-yi)*.1
elif lugar=='izquierda':
angulo = 90
x_t = xi
y_t = yi+ (yf-yi)*.1
if valor == 'Uno': texto = f'V{n_region}=1'
elif valor == 'Cero': texto = f'V{n_region}=0'
elif valor == 'SIM': texto = 'SIM'
if valor in ['Uno','Cero','SIM']: ax.annotate(texto,(x_t,y_t),rotation=angulo)
if valor == 'Uno' or valor == 'Cero':
if lugar == 'arriba': ax.plot([xi,xf],[yf,yf],'k',lw=3)
if lugar == 'abajo': ax.plot([xi,xf],[yi,yi],'k',lw=3)
if lugar == 'derecha': ax.plot([xf,xf],[yi,yf],'k',lw=3)
if lugar == 'izquierda': ax.plot([xi,xi],[yi,yf],'k',lw=3)
elif valor == 'SIM' or valor == 'no':
if lugar == 'arriba': ax.plot([xi,xf],[yf,yf],'r',lw=2)
if lugar == 'abajo': ax.plot([xi,xf],[yi,yi],'r',lw=2)
if lugar == 'derecha': ax.plot([xf,xf],[yi,yf],'r',lw=2)
if lugar == 'izquierda': ax.plot([xi,xi],[yi,yf],'r',lw=2)
else:
#fronteras Compuestas
front_list = [x.split('-') for x in valor.split('/')]
for pseudo_frontera in front_list:
pi = int(pseudo_frontera[1])
pf = int(pseudo_frontera[2])
if pseudo_frontera[0] == 'Uno' or pseudo_frontera[0] == 'Cero':
color ='k'
ancho = 3
elif pseudo_frontera[0] == 'SIM' or pseudo_frontera[0] == 'no':
color ='r'
ancho = 2
if lugar == 'arriba':
if sim == 'izq': ax.plot([-pi,-pf],[yf,yf],color,lw=ancho)
ax.plot([pi,pf],[yf,yf],color,lw=ancho)
if sim == 'der':ax.plot([-pi+2*xmax,-pf+2*xmax],[yf,yf],color,lw=ancho)
if lugar == 'abajo':
if sim == 'izq': ax.plot([-pi,-pf],[yi,yi],color,lw=ancho)
ax.plot([pi,pf],[yi,yi],color,lw=ancho)
if sim == 'der': ax.plot([-pi+2*xmax,-pf+2*xmax],[yi,yi],color,lw=ancho)
def graficar_problema_plano_2D(regiones):
fronteras = pd.read_csv('csv/' + conf.data['env']['path'] + '/fronteras.csv')
xmax, ymax = max(regiones['xf']), max(regiones['yf'])
fig, ax= plt.subplots()
fig.suptitle('Grafica bidimensional del problema plano con 2 simetrias')
filename = 'graficas/' + conf.data['env']['path'] + "/Problema Plano 2D.png"
for i,region in enumerate(regiones.index):
xi, xf = regiones.loc[region,'xi'], regiones.loc[region,'xf']
yi, yf = regiones.loc[region,'yi'], regiones.loc[region,'yf']
#Izquierda
draw_region(ax,-xi,-xf,yi,yf,fronteras.loc[i],i+1,regiones.loc[region, 'eps'],xmax,sim='izq')
#Central
draw_region(ax,xi,xf,yi,yf,fronteras.loc[i],i+1,regiones.loc[region, 'eps'],xmax,sim=None)
#Derecha
draw_region(ax,-xi+2*xmax,-xf+2*xmax,yi,yf,fronteras.loc[i],i+1,regiones.loc[region, 'eps'],xmax,sim='der')
ax.set_xticks(range(xmax+1))
ax.set_yticks(range(ymax+1))
ax.grid()
fig.set_size_inches(14,8)
canvas = FigureCanvas(fig)
canvas.print_figure(filename)
|
[
"matplotlib.pyplot.title",
"matplotlib.text.TextPath",
"matplotlib.pyplot.plot",
"matplotlib.patches.Rectangle",
"pandas.read_csv",
"matplotlib.pyplot.close",
"matplotlib.backends.backend_agg.FigureCanvasAgg",
"matplotlib.pyplot.Figure",
"matplotlib.pyplot.figure",
"mpl_toolkits.mplot3d.art3d.pathpatch_2d_to_3d",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.subplots",
"matplotlib.transforms.Affine2D"
] |
[((18171, 18238), 'pandas.read_csv', 'pd.read_csv', (["('csv/' + conf.data['env']['path'] + '/continuidad.csv')"], {}), "('csv/' + conf.data['env']['path'] + '/continuidad.csv')\n", (18182, 18238), True, 'import pandas as pd\n'), ((21122, 21134), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (21132, 21134), True, 'import matplotlib.pyplot as plt\n'), ((21186, 21257), 'matplotlib.pyplot.title', 'plt.title', (['f"""Grafica de los niveles de potencial de todas las regiones"""'], {}), "(f'Grafica de los niveles de potencial de todas las regiones')\n", (21195, 21257), True, 'import matplotlib.pyplot as plt\n'), ((22002, 22019), 'matplotlib.backends.backend_agg.FigureCanvasAgg', 'FigureCanvas', (['fig'], {}), '(fig)\n', (22014, 22019), True, 'from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\n'), ((22058, 22069), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (22067, 22069), True, 'import matplotlib.pyplot as plt\n'), ((22172, 22231), 'matplotlib.patches.Rectangle', 'Rectangle', (['(inicio, 0)'], {'width': 'ancho', 'height': 'alto', 'fill': 'fill'}), '((inicio, 0), width=ancho, height=alto, fill=fill)\n', (22181, 22231), False, 'from matplotlib.patches import Rectangle, PathPatch\n'), ((22351, 22405), 'mpl_toolkits.mplot3d.art3d.pathpatch_2d_to_3d', 'art3d.pathpatch_2d_to_3d', (['rect'], {'z': 'desp', 'zdir': 'direction'}), '(rect, z=desp, zdir=direction)\n', (22375, 22405), True, 'import mpl_toolkits.mplot3d.art3d as art3d\n'), ((22465, 22500), 'matplotlib.text.TextPath', 'TextPath', (['(0, 0)', 'cadena'], {'size': '(0.35)'}), '((0, 0), cadena, size=0.35)\n', (22473, 22500), False, 'from matplotlib.text import TextPath\n'), ((22625, 22668), 'mpl_toolkits.mplot3d.art3d.pathpatch_2d_to_3d', 'art3d.pathpatch_2d_to_3d', (['t1'], {'z': 'z', 'zdir': '"""z"""'}), "(t1, z=z, zdir='z')\n", (22649, 22668), True, 'import mpl_toolkits.mplot3d.art3d as art3d\n'), ((25727, 25792), 'pandas.read_csv', 'pd.read_csv', (["('csv/' + conf.data['env']['path'] + '/fronteras.csv')"], {}), "('csv/' + conf.data['env']['path'] + '/fronteras.csv')\n", (25738, 25792), True, 'import pandas as pd\n'), ((25862, 25874), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (25872, 25874), True, 'import matplotlib.pyplot as plt\n'), ((26801, 26818), 'matplotlib.backends.backend_agg.FigureCanvasAgg', 'FigureCanvas', (['fig'], {}), '(fig)\n', (26813, 26818), True, 'from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\n'), ((29693, 29758), 'pandas.read_csv', 'pd.read_csv', (["('csv/' + conf.data['env']['path'] + '/fronteras.csv')"], {}), "('csv/' + conf.data['env']['path'] + '/fronteras.csv')\n", (29704, 29758), True, 'import pandas as pd\n'), ((29831, 29845), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (29843, 29845), True, 'import matplotlib.pyplot as plt\n'), ((30689, 30706), 'matplotlib.backends.backend_agg.FigureCanvasAgg', 'FigureCanvas', (['fig'], {}), '(fig)\n', (30701, 30706), True, 'from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\n'), ((2090, 2102), 'matplotlib.pyplot.Figure', 'plt.Figure', ([], {}), '()\n', (2100, 2102), True, 'import matplotlib.pyplot as plt\n'), ((3200, 3217), 'matplotlib.backends.backend_agg.FigureCanvasAgg', 'FigureCanvas', (['fig'], {}), '(fig)\n', (3212, 3217), True, 'from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\n'), ((4793, 4805), 'matplotlib.pyplot.Figure', 'plt.Figure', ([], {}), '()\n', (4803, 4805), True, 'import matplotlib.pyplot as plt\n'), ((5795, 5812), 'matplotlib.backends.backend_agg.FigureCanvasAgg', 'FigureCanvas', (['fig'], {}), '(fig)\n', (5807, 5812), True, 'from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\n'), ((7606, 7618), 'matplotlib.pyplot.Figure', 'plt.Figure', ([], {}), '()\n', (7616, 7618), True, 'import matplotlib.pyplot as plt\n'), ((8854, 8871), 'matplotlib.backends.backend_agg.FigureCanvasAgg', 'FigureCanvas', (['fig'], {}), '(fig)\n', (8866, 8871), True, 'from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\n'), ((10063, 10075), 'matplotlib.pyplot.Figure', 'plt.Figure', ([], {}), '()\n', (10073, 10075), True, 'import matplotlib.pyplot as plt\n'), ((11337, 11354), 'matplotlib.backends.backend_agg.FigureCanvasAgg', 'FigureCanvas', (['fig'], {}), '(fig)\n', (11349, 11354), True, 'from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\n'), ((12560, 12572), 'matplotlib.pyplot.Figure', 'plt.Figure', ([], {}), '()\n', (12570, 12572), True, 'import matplotlib.pyplot as plt\n'), ((12892, 12905), 'matplotlib.pyplot.xticks', 'plt.xticks', (['N'], {}), '(N)\n', (12902, 12905), True, 'import matplotlib.pyplot as plt\n'), ((13644, 13661), 'matplotlib.backends.backend_agg.FigureCanvasAgg', 'FigureCanvas', (['fig'], {}), '(fig)\n', (13656, 13661), True, 'from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\n'), ((14802, 14814), 'matplotlib.pyplot.Figure', 'plt.Figure', ([], {}), '()\n', (14812, 14814), True, 'import matplotlib.pyplot as plt\n'), ((15804, 15821), 'matplotlib.backends.backend_agg.FigureCanvasAgg', 'FigureCanvas', (['fig'], {}), '(fig)\n', (15816, 15821), True, 'from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\n'), ((16949, 16961), 'matplotlib.pyplot.Figure', 'plt.Figure', ([], {}), '()\n', (16959, 16961), True, 'import matplotlib.pyplot as plt\n'), ((17878, 17895), 'matplotlib.backends.backend_agg.FigureCanvasAgg', 'FigureCanvas', (['fig'], {}), '(fig)\n', (17890, 17895), True, 'from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\n'), ((18290, 18302), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (18300, 18302), True, 'import matplotlib.pyplot as plt\n'), ((19282, 19394), 'matplotlib.pyplot.title', 'plt.title', (['f"""Con nr={n_dimension}- Prueba de continuidad potencial de la Reg.{R_inf} a la Reg.{R_sup}"""'], {}), "(\n f'Con nr={n_dimension}- Prueba de continuidad potencial de la Reg.{R_inf} a la Reg.{R_sup}'\n )\n", (19291, 19394), True, 'import matplotlib.pyplot as plt\n'), ((19394, 19428), 'matplotlib.pyplot.plot', 'plt.plot', (['X_sup', 'pot_superior', '"""r"""'], {}), "(X_sup, pot_superior, 'r')\n", (19402, 19428), True, 'import matplotlib.pyplot as plt\n'), ((19437, 19471), 'matplotlib.pyplot.plot', 'plt.plot', (['X_inf', 'pot_inferior', '"""b"""'], {}), "(X_inf, pot_inferior, 'b')\n", (19445, 19471), True, 'import matplotlib.pyplot as plt\n'), ((19545, 19572), 'matplotlib.pyplot.plot', 'plt.plot', (['left_bar', '[-2, 2]'], {}), '(left_bar, [-2, 2])\n', (19553, 19572), True, 'import matplotlib.pyplot as plt\n'), ((19580, 19608), 'matplotlib.pyplot.plot', 'plt.plot', (['right_bar', '[-2, 2]'], {}), '(right_bar, [-2, 2])\n', (19588, 19608), True, 'import matplotlib.pyplot as plt\n'), ((19751, 19768), 'matplotlib.backends.backend_agg.FigureCanvasAgg', 'FigureCanvas', (['fig'], {}), '(fig)\n', (19763, 19768), True, 'from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\n'), ((19815, 19826), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (19824, 19826), True, 'import matplotlib.pyplot as plt\n'), ((20473, 20485), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (20483, 20485), True, 'import matplotlib.pyplot as plt\n'), ((20912, 20929), 'matplotlib.backends.backend_agg.FigureCanvasAgg', 'FigureCanvas', (['fig'], {}), '(fig)\n', (20924, 20929), True, 'from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\n'), ((20976, 20987), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (20985, 20987), True, 'import matplotlib.pyplot as plt\n'), ((22512, 22522), 'matplotlib.transforms.Affine2D', 'Affine2D', ([], {}), '()\n', (22520, 22522), False, 'from matplotlib.transforms import Affine2D\n')]
|
import csv
def raw_data_gen(n):
'''
generator for mock data
yields str generators
'''
for i in range(n):
yield (f'{i}_{j}' for j in range(4))
#create/overwirte a file with rawdata
with open('data_file.csv', 'w', newline='') as data_buffer:
file_writer = csv.writer(data_buffer)
file_writer.writerows(raw_data_gen(5))
#reads a file with rawdata and prints it
with open('data_file.csv', 'r', newline='') as data_buffer:
file_reader = csv.reader(data_buffer)
for row in file_reader:
print(row)
|
[
"csv.reader",
"csv.writer"
] |
[((291, 314), 'csv.writer', 'csv.writer', (['data_buffer'], {}), '(data_buffer)\n', (301, 314), False, 'import csv\n'), ((479, 502), 'csv.reader', 'csv.reader', (['data_buffer'], {}), '(data_buffer)\n', (489, 502), False, 'import csv\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2020-2022 Barcelona Supercomputing Center (BSC), Spain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import json
import lzma
import os
import shutil
import subprocess
import tempfile
from typing import Dict, List, Mapping, Optional, Sequence, Tuple, Union
from typing import cast
import uuid
from .common import AbsPath, RelPath, URIType
from .common import Container, ContainerType
from .common import ContainerFileNamingMethod, ContainerTaggedName
from .common import DEFAULT_DOCKER_CMD
from .container import ContainerFactory, ContainerFactoryException
from .utils.contents import link_or_copy
from .utils.digests import ComputeDigestFromFile, ComputeDigestFromObject, nihDigester
DOCKER_PROTO = 'docker://'
class DockerContainerFactory(ContainerFactory):
def __init__(self, cacheDir=None, local_config=None, engine_name='unset', tempDir=None):
super().__init__(cacheDir=cacheDir, local_config=local_config, engine_name=engine_name, tempDir=tempDir)
self.runtime_cmd = local_config.get('tools', {}).get('dockerCommand', DEFAULT_DOCKER_CMD)
@classmethod
def ContainerType(cls) -> ContainerType:
return ContainerType.Docker
def _inspect(self, dockerTag : ContainerTaggedName, matEnv: Mapping[str,str]) -> Tuple[int, str, str]:
with tempfile.NamedTemporaryFile() as d_out, tempfile.NamedTemporaryFile() as d_err:
self.logger.debug(f"querying docker container {dockerTag}")
d_retval = subprocess.Popen(
[self.runtime_cmd, 'inspect', dockerTag],
env=matEnv,
stdout=d_out,
stderr=d_err
).wait()
self.logger.debug(f"docker inspect {dockerTag} retval: {d_retval}")
with open(d_out.name, mode="rb") as c_stF:
d_out_v = c_stF.read().decode('utf-8', errors='continue')
with open(d_err.name, mode="rb") as c_stF:
d_err_v = c_stF.read().decode('utf-8', errors='continue')
self.logger.debug(f"docker inspect stdout: {d_out_v}")
self.logger.debug(f"docker inspect stderr: {d_err_v}")
return d_retval , d_out_v , d_err_v
def _pull(self, dockerTag : ContainerTaggedName, matEnv: Mapping[str,str]) -> Tuple[int, str, str]:
with tempfile.NamedTemporaryFile() as d_out, tempfile.NamedTemporaryFile() as d_err:
self.logger.debug(f"pulling docker container {dockerTag}")
d_retval = subprocess.Popen(
[self.runtime_cmd, 'pull', dockerTag],
env=matEnv,
stdout=d_out,
stderr=d_err
).wait()
self.logger.debug(f"docker pull {dockerTag} retval: {d_retval}")
with open(d_out.name, mode="r") as c_stF:
d_out_v = c_stF.read()
with open(d_err.name,"r") as c_stF:
d_err_v = c_stF.read()
self.logger.debug(f"docker pull stdout: {d_out_v}")
self.logger.debug(f"docker pull stderr: {d_err_v}")
return d_retval , d_out_v , d_err_v
def _save(self, dockerTag: ContainerTaggedName, destfile: AbsPath, matEnv: Mapping[str,str]) -> Tuple[int, str]:
with lzma.open(destfile, mode='wb') as d_out, tempfile.NamedTemporaryFile() as d_err:
self.logger.debug(f"saving docker container {dockerTag}")
with subprocess.Popen(
[self.runtime_cmd, 'save', dockerTag],
env=matEnv,
stdout=subprocess.PIPE,
stderr=d_err
) as sp:
if sp.stdout is not None:
shutil.copyfileobj(sp.stdout, d_out)
d_retval = sp.wait()
self.logger.debug(f"docker save {dockerTag} retval: {d_retval}")
with open(d_err.name, "r") as c_stF:
d_err_v = c_stF.read()
self.logger.debug(f"docker save stderr: {d_err_v}")
return d_retval , d_err_v
def materializeContainers(self, tagList: Sequence[ContainerTaggedName], simpleFileNameMethod: ContainerFileNamingMethod, containers_dir: Optional[Union[RelPath, AbsPath]] = None, offline: bool = False) -> Sequence[Container]:
"""
It is assured the containers are materialized
"""
containersList = []
matEnv = dict(os.environ)
matEnv.update(self.environment)
for tag in tagList:
# It is an absolute URL, we are removing the docker://
dockerTag = cast(ContainerTaggedName, tag[len(DOCKER_PROTO):] if tag.startswith(DOCKER_PROTO) else tag)
self.logger.info(f"downloading docker container: {tag}")
d_retval , d_out_v , d_err_v = self._inspect(dockerTag, matEnv)
# Time to pull the image
if d_retval != 0:
d_retval , d_out_v , d_err_v = self._pull(dockerTag, matEnv)
if d_retval == 0:
# Second try
d_retval , d_out_v , d_err_v = self._inspect(dockerTag, matEnv)
if d_retval != 0:
errstr = """Could not materialize docker image {}. Retval {}
======
STDOUT
======
{}
======
STDERR
======
{}""".format(dockerTag, d_retval, d_out_v, d_err_v)
raise ContainerFactoryException(errstr)
# Parsing the output from docker inspect
try:
manifests = json.loads(d_out_v)
manifest = manifests[0]
except Exception as e:
raise ContainerFactoryException(f"FATAL ERROR: Docker finished properly but it did not properly materialize {tag}: {e}")
# Then, compute the signature
tagId = manifest['Id']
fingerprint = None
if len(manifest['RepoDigests']) > 0:
fingerprint = manifest['RepoDigests'][0]
# Last but one, let's save a copy of the container locally
containerFilename = simpleFileNameMethod(cast(URIType, tag))
containerFilenameMeta = containerFilename + self.META_JSON_POSTFIX
localContainerPath = cast(AbsPath, os.path.join(self.engineContainersSymlinkDir, containerFilename))
localContainerPathMeta = cast(AbsPath, os.path.join(self.engineContainersSymlinkDir, containerFilenameMeta))
self.logger.info("saving docker container (for reproducibility matters): {} => {}".format(tag, localContainerPath))
# First, let's materialize the container image
manifestsImageSignature = ComputeDigestFromObject(manifests)
canonicalContainerPath = os.path.join(self.containersCacheDir, manifestsImageSignature.replace('=','~').replace('/','-').replace('+','_'))
canonicalContainerPathMeta = canonicalContainerPath + self.META_JSON_POSTFIX
# Defining the destinations
if os.path.isfile(canonicalContainerPathMeta):
with open(canonicalContainerPathMeta, mode="r", encoding="utf-8") as tcpm:
metadataLocal = json.load(tcpm)
manifestsImageSignatureLocal = metadataLocal.get('manifests_signature')
manifestsImageSignatureLocalRead = ComputeDigestFromObject(metadataLocal.get('manifests', []))
if manifestsImageSignature != manifestsImageSignatureLocal or manifestsImageSignature != manifestsImageSignatureLocalRead:
self.logger.warning("Corrupted canonical container metadata {tag}. Re-saving")
saveContainerPathMeta = True
imageSignatureLocal = None
else:
saveContainerPathMeta = False
imageSignatureLocal = metadataLocal.get('image_signature')
else:
saveContainerPathMeta = True
imageSignature = None
imageSignatureLocal = None
# Only trust when they match
tmpContainerPath: Optional[str] = os.path.join(self.containersCacheDir,str(uuid.uuid4()))
if os.path.isfile(canonicalContainerPath) and (imageSignatureLocal is not None):
imageSignatureLocalRead = ComputeDigestFromFile(canonicalContainerPath)
if imageSignatureLocalRead != imageSignatureLocal:
self.logger.warning("Corrupted canonical container {tag}. Re-saving")
else:
imageSignature = imageSignatureLocal
tmpContainerPath = None
if tmpContainerPath is not None:
saveContainerPathMeta = True
d_retval, d_err_ev = self._save(dockerTag, cast(AbsPath, tmpContainerPath), matEnv)
self.logger.debug("docker save retval: {}".format(d_retval))
self.logger.debug("docker save stderr: {}".format(d_err_v))
if d_retval != 0:
errstr = """Could not save docker image {}. Retval {}
======
STDERR
======
{}""".format(dockerTag, d_retval, d_err_v)
if os.path.exists(tmpContainerPath):
try:
os.unlink(tmpContainerPath)
except:
pass
raise ContainerFactoryException(errstr)
shutil.move(tmpContainerPath, canonicalContainerPath)
imageSignature = ComputeDigestFromFile(canonicalContainerPath)
if saveContainerPathMeta:
with open(canonicalContainerPathMeta, mode="w", encoding='utf-8') as tcpM:
json.dump({
"image_signature": imageSignature,
"manifests_signature": manifestsImageSignature,
"manifests": manifests
}, tcpM)
# Now, check the relative symbolic link of image
createSymlink = True
if os.path.lexists(localContainerPath):
if os.path.realpath(localContainerPath) != os.path.realpath(canonicalContainerPath):
os.unlink(localContainerPath)
else:
createSymlink = False
if createSymlink:
os.symlink(os.path.relpath(canonicalContainerPath, self.engineContainersSymlinkDir), localContainerPath)
# Now, check the relative symbolic link of metadata
createSymlink = True
if os.path.lexists(localContainerPathMeta):
if os.path.realpath(localContainerPathMeta) != os.path.realpath(canonicalContainerPathMeta):
os.unlink(localContainerPathMeta)
else:
createSymlink = False
if createSymlink:
os.symlink(os.path.relpath(canonicalContainerPathMeta, self.engineContainersSymlinkDir), localContainerPathMeta)
# Last, hardlink or copy the container and its metadata
if containers_dir is not None:
containerPath = cast(AbsPath, os.path.join(containers_dir, containerFilename))
containerPathMeta = cast(AbsPath, os.path.join(containers_dir, containerFilenameMeta))
# Do not allow overwriting in offline mode
if not offline or not os.path.exists(containerPath):
link_or_copy(localContainerPath, containerPath)
if not offline or not os.path.exists(containerPathMeta):
link_or_copy(localContainerPathMeta, containerPathMeta)
else:
containerPath = localContainerPath
# And add to the list of containers
containersList.append(
Container(
origTaggedName=tag,
taggedName=cast(URIType, dockerTag),
signature=tagId,
fingerprint=fingerprint,
type=self.containerType,
localPath=containerPath
)
)
return containersList
|
[
"tempfile.NamedTemporaryFile",
"lzma.open",
"subprocess.Popen",
"os.path.lexists",
"json.loads",
"json.load",
"uuid.uuid4",
"typing.cast",
"json.dump",
"os.path.realpath",
"os.unlink",
"os.path.exists",
"os.path.isfile",
"os.path.relpath",
"shutil.move",
"shutil.copyfileobj",
"os.path.join"
] |
[((1894, 1923), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (1921, 1923), False, 'import tempfile\n'), ((1934, 1963), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (1961, 1963), False, 'import tempfile\n'), ((2960, 2989), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (2987, 2989), False, 'import tempfile\n'), ((3000, 3029), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (3027, 3029), False, 'import tempfile\n'), ((3948, 3978), 'lzma.open', 'lzma.open', (['destfile'], {'mode': '"""wb"""'}), "(destfile, mode='wb')\n", (3957, 3978), False, 'import lzma\n'), ((3989, 4018), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (4016, 4018), False, 'import tempfile\n'), ((7785, 7827), 'os.path.isfile', 'os.path.isfile', (['canonicalContainerPathMeta'], {}), '(canonicalContainerPathMeta)\n', (7799, 7827), False, 'import os\n'), ((10915, 10950), 'os.path.lexists', 'os.path.lexists', (['localContainerPath'], {}), '(localContainerPath)\n', (10930, 10950), False, 'import os\n'), ((11443, 11482), 'os.path.lexists', 'os.path.lexists', (['localContainerPathMeta'], {}), '(localContainerPathMeta)\n', (11458, 11482), False, 'import os\n'), ((4116, 4226), 'subprocess.Popen', 'subprocess.Popen', (["[self.runtime_cmd, 'save', dockerTag]"], {'env': 'matEnv', 'stdout': 'subprocess.PIPE', 'stderr': 'd_err'}), "([self.runtime_cmd, 'save', dockerTag], env=matEnv, stdout=\n subprocess.PIPE, stderr=d_err)\n", (4132, 4226), False, 'import subprocess\n'), ((6275, 6294), 'json.loads', 'json.loads', (['d_out_v'], {}), '(d_out_v)\n', (6285, 6294), False, 'import json\n'), ((6871, 6889), 'typing.cast', 'cast', (['URIType', 'tag'], {}), '(URIType, tag)\n', (6875, 6889), False, 'from typing import cast\n'), ((7017, 7081), 'os.path.join', 'os.path.join', (['self.engineContainersSymlinkDir', 'containerFilename'], {}), '(self.engineContainersSymlinkDir, containerFilename)\n', (7029, 7081), False, 'import os\n'), ((7134, 7202), 'os.path.join', 'os.path.join', (['self.engineContainersSymlinkDir', 'containerFilenameMeta'], {}), '(self.engineContainersSymlinkDir, containerFilenameMeta)\n', (7146, 7202), False, 'import os\n'), ((8988, 9026), 'os.path.isfile', 'os.path.isfile', (['canonicalContainerPath'], {}), '(canonicalContainerPath)\n', (9002, 9026), False, 'import os\n'), ((10279, 10332), 'shutil.move', 'shutil.move', (['tmpContainerPath', 'canonicalContainerPath'], {}), '(tmpContainerPath, canonicalContainerPath)\n', (10290, 10332), False, 'import shutil\n'), ((2069, 2171), 'subprocess.Popen', 'subprocess.Popen', (["[self.runtime_cmd, 'inspect', dockerTag]"], {'env': 'matEnv', 'stdout': 'd_out', 'stderr': 'd_err'}), "([self.runtime_cmd, 'inspect', dockerTag], env=matEnv,\n stdout=d_out, stderr=d_err)\n", (2085, 2171), False, 'import subprocess\n'), ((3134, 3234), 'subprocess.Popen', 'subprocess.Popen', (["[self.runtime_cmd, 'pull', dockerTag]"], {'env': 'matEnv', 'stdout': 'd_out', 'stderr': 'd_err'}), "([self.runtime_cmd, 'pull', dockerTag], env=matEnv, stdout=\n d_out, stderr=d_err)\n", (3150, 3234), False, 'import subprocess\n'), ((4369, 4405), 'shutil.copyfileobj', 'shutil.copyfileobj', (['sp.stdout', 'd_out'], {}), '(sp.stdout, d_out)\n', (4387, 4405), False, 'import shutil\n'), ((7956, 7971), 'json.load', 'json.load', (['tcpm'], {}), '(tcpm)\n', (7965, 7971), False, 'import json\n'), ((8958, 8970), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (8968, 8970), False, 'import uuid\n'), ((9596, 9627), 'typing.cast', 'cast', (['AbsPath', 'tmpContainerPath'], {}), '(AbsPath, tmpContainerPath)\n', (9600, 9627), False, 'from typing import cast\n'), ((10002, 10034), 'os.path.exists', 'os.path.exists', (['tmpContainerPath'], {}), '(tmpContainerPath)\n', (10016, 10034), False, 'import os\n'), ((10574, 10702), 'json.dump', 'json.dump', (["{'image_signature': imageSignature, 'manifests_signature':\n manifestsImageSignature, 'manifests': manifests}", 'tcpM'], {}), "({'image_signature': imageSignature, 'manifests_signature':\n manifestsImageSignature, 'manifests': manifests}, tcpM)\n", (10583, 10702), False, 'import json\n'), ((10971, 11007), 'os.path.realpath', 'os.path.realpath', (['localContainerPath'], {}), '(localContainerPath)\n', (10987, 11007), False, 'import os\n'), ((11011, 11051), 'os.path.realpath', 'os.path.realpath', (['canonicalContainerPath'], {}), '(canonicalContainerPath)\n', (11027, 11051), False, 'import os\n'), ((11073, 11102), 'os.unlink', 'os.unlink', (['localContainerPath'], {}), '(localContainerPath)\n', (11082, 11102), False, 'import os\n'), ((11224, 11296), 'os.path.relpath', 'os.path.relpath', (['canonicalContainerPath', 'self.engineContainersSymlinkDir'], {}), '(canonicalContainerPath, self.engineContainersSymlinkDir)\n', (11239, 11296), False, 'import os\n'), ((11503, 11543), 'os.path.realpath', 'os.path.realpath', (['localContainerPathMeta'], {}), '(localContainerPathMeta)\n', (11519, 11543), False, 'import os\n'), ((11547, 11591), 'os.path.realpath', 'os.path.realpath', (['canonicalContainerPathMeta'], {}), '(canonicalContainerPathMeta)\n', (11563, 11591), False, 'import os\n'), ((11613, 11646), 'os.unlink', 'os.unlink', (['localContainerPathMeta'], {}), '(localContainerPathMeta)\n', (11622, 11646), False, 'import os\n'), ((11768, 11844), 'os.path.relpath', 'os.path.relpath', (['canonicalContainerPathMeta', 'self.engineContainersSymlinkDir'], {}), '(canonicalContainerPathMeta, self.engineContainersSymlinkDir)\n', (11783, 11844), False, 'import os\n'), ((12040, 12087), 'os.path.join', 'os.path.join', (['containers_dir', 'containerFilename'], {}), '(containers_dir, containerFilename)\n', (12052, 12087), False, 'import os\n'), ((12139, 12190), 'os.path.join', 'os.path.join', (['containers_dir', 'containerFilenameMeta'], {}), '(containers_dir, containerFilenameMeta)\n', (12151, 12190), False, 'import os\n'), ((12306, 12335), 'os.path.exists', 'os.path.exists', (['containerPath'], {}), '(containerPath)\n', (12320, 12335), False, 'import os\n'), ((12443, 12476), 'os.path.exists', 'os.path.exists', (['containerPathMeta'], {}), '(containerPathMeta)\n', (12457, 12476), False, 'import os\n'), ((12817, 12841), 'typing.cast', 'cast', (['URIType', 'dockerTag'], {}), '(URIType, dockerTag)\n', (12821, 12841), False, 'from typing import cast\n'), ((10093, 10120), 'os.unlink', 'os.unlink', (['tmpContainerPath'], {}), '(tmpContainerPath)\n', (10102, 10120), False, 'import os\n')]
|
import importlib
import pytest
import tornado.web
from shelter.core.cmdlineparser import ArgumentParser
from shelter.core.config import Config
from shelter.core.context import Context
import tests.test_core_app
class ContextTest(Context):
pass
def test_config_cls():
config = Config(1, 2)
assert "<shelter.core.config.Config: 0x" in repr(config)
assert config.settings == 1
assert config.args_parser == 2
def test_config_context_class_default():
config = Config(
importlib.import_module('tests.settings1'),
ArgumentParser()
)
assert config.context_class is Context
def test_config_context_class_user():
config = Config(
importlib.import_module('tests.settings2'),
ArgumentParser()
)
assert config.context_class is not Context
assert config.context_class is ContextTest
def test_config_interfaces():
config = Config(
importlib.import_module('tests.settings1'),
ArgumentParser()
)
interfaces = sorted(config.interfaces, key=lambda x: x.name)
assert len(interfaces) == 4
assert interfaces[0].name == 'fastrpc'
assert interfaces[0].host == '192.168.1.0'
assert interfaces[0].port == 4445
assert interfaces[0].unix_socket is None
assert interfaces[0].app_cls is tornado.web.Application
assert interfaces[0].processes == 1
assert interfaces[0].start_timeout == 5.0
assert len(interfaces[0].urls) == 0
assert interfaces[1].name == 'http'
assert interfaces[1].host == ''
assert interfaces[1].port == 4443
assert interfaces[1].unix_socket is None
assert interfaces[1].app_cls is tornado.web.Application
assert interfaces[1].processes == 12
assert interfaces[1].start_timeout == 30.0
assert len(interfaces[1].urls) == 2
assert interfaces[2].name == 'rest'
assert interfaces[2].host == ''
assert interfaces[2].port == 4447
assert interfaces[2].unix_socket is None
assert interfaces[2].app_cls is tests.test_core_app.ApplicationTest
assert interfaces[2].processes == 2
assert interfaces[2].start_timeout == 5.0
assert len(interfaces[2].urls) == 0
assert interfaces[3].name == 'unix'
assert interfaces[3].host is None
assert interfaces[3].port is None
assert interfaces[3].unix_socket == '/tmp/tornado.socket'
assert interfaces[3].app_cls is tests.test_core_app.ApplicationTest
assert interfaces[3].processes == 6
assert interfaces[3].start_timeout == 5.0
assert len(interfaces[3].urls) == 3
def test_config_interfaces_both_tcp_and_unix():
config = Config(
importlib.import_module('tests.settings5'),
ArgumentParser()
)
interface = config.interfaces[0]
assert interface.name == 'http_both_tcp_and_unix'
assert interface.host == ''
assert interface.port == 4443
assert interface.unix_socket == '/tmp/tornado.socket'
def test_config_interface_fail_when_neither_tcp_nor_unix():
config = Config(
importlib.import_module('tests.settings6'),
ArgumentParser()
)
with pytest.raises(ValueError) as e:
_ = config.interfaces
assert "Interface MUST listen either on TCP or UNIX socket" in str(e)
|
[
"shelter.core.config.Config",
"pytest.raises",
"shelter.core.cmdlineparser.ArgumentParser",
"importlib.import_module"
] |
[((292, 304), 'shelter.core.config.Config', 'Config', (['(1)', '(2)'], {}), '(1, 2)\n', (298, 304), False, 'from shelter.core.config import Config\n'), ((505, 547), 'importlib.import_module', 'importlib.import_module', (['"""tests.settings1"""'], {}), "('tests.settings1')\n", (528, 547), False, 'import importlib\n'), ((557, 573), 'shelter.core.cmdlineparser.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (571, 573), False, 'from shelter.core.cmdlineparser import ArgumentParser\n'), ((692, 734), 'importlib.import_module', 'importlib.import_module', (['"""tests.settings2"""'], {}), "('tests.settings2')\n", (715, 734), False, 'import importlib\n'), ((744, 760), 'shelter.core.cmdlineparser.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (758, 760), False, 'from shelter.core.cmdlineparser import ArgumentParser\n'), ((922, 964), 'importlib.import_module', 'importlib.import_module', (['"""tests.settings1"""'], {}), "('tests.settings1')\n", (945, 964), False, 'import importlib\n'), ((974, 990), 'shelter.core.cmdlineparser.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (988, 990), False, 'from shelter.core.cmdlineparser import ArgumentParser\n'), ((2618, 2660), 'importlib.import_module', 'importlib.import_module', (['"""tests.settings5"""'], {}), "('tests.settings5')\n", (2641, 2660), False, 'import importlib\n'), ((2670, 2686), 'shelter.core.cmdlineparser.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (2684, 2686), False, 'from shelter.core.cmdlineparser import ArgumentParser\n'), ((3001, 3043), 'importlib.import_module', 'importlib.import_module', (['"""tests.settings6"""'], {}), "('tests.settings6')\n", (3024, 3043), False, 'import importlib\n'), ((3053, 3069), 'shelter.core.cmdlineparser.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (3067, 3069), False, 'from shelter.core.cmdlineparser import ArgumentParser\n'), ((3086, 3111), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3099, 3111), False, 'import pytest\n')]
|
"""
Fixed policies to test our sim integration with. These are intended to take
Brain states and return Brain actions.
"""
import random
def random_policy(state):
"""
Ignore the state, select randomly.
"""
action = {
'command': random.randint(1, 2)
}
return action
def coast(state):
"""
Ignore the state, always select one exported brain.
"""
action = {
'command': 1
}
return action
POLICIES = {"random": random_policy,
"coast": coast}
|
[
"random.randint"
] |
[((254, 274), 'random.randint', 'random.randint', (['(1)', '(2)'], {}), '(1, 2)\n', (268, 274), False, 'import random\n')]
|
import os
import shelve
from typing import Dict, List, Optional, Set, Tuple
from flask import current_app
from google.cloud import datastore
class DatastoreAdapter:
@property
def ds_client(self):
if not hasattr(current_app, "_datastore_client"):
config = current_app.config
current_app._datastore_client = datastore.Client(
project=config.get("DATASTORE_PROJECT"),
credentials=config.get("GCP_CREDENTIALS"),
)
return current_app._datastore_client
db = DatastoreAdapter()
class CredentialsAdapter:
@property
def credentials(self):
credentials = current_app.config.get("GCP_CREDENTIALS")
assert credentials is not None
return credentials
gcp = CredentialsAdapter()
|
[
"flask.current_app.config.get"
] |
[((658, 699), 'flask.current_app.config.get', 'current_app.config.get', (['"""GCP_CREDENTIALS"""'], {}), "('GCP_CREDENTIALS')\n", (680, 699), False, 'from flask import current_app\n')]
|
import math
import itertools as itt
import numpy as np
from collections import namedtuple
from datetime import datetime
from scipy.special import gamma
from sklearn.neighbors import BallTree
import random
from pywde.pywt_ext import WaveletTensorProduct
from pywde.common import all_zs_tensor
class dictwithfactory(dict):
def __init__(self, factory):
super(dictwithfactory, self).__init__()
self._factory = factory
def __getitem__(self, key):
if key in self:
return self.get(key)
val = self._factory(key)
self[key] = val
return val
class SPWDE(object):
def __init__(self, waves, k=1):
self.wave = WaveletTensorProduct([wave_desc[0] for wave_desc in waves])
self.j0s = [wave_desc[1] for wave_desc in waves]
self.k = k
self.minx = None
self.maxx = None
# target distance
TARGET_NORMED = 'normed'
TARGET_DIFF = 'diff'
# threshold calculation
TH_CLASSIC = 'classic' # Donoho et al
TH_ADJUSTED = 'adjusted' # Delyon & Judistky
TH_EMP_STD = 'emp-var' # New
def best_j(self, xs, mode, stop_on_max=False):
t0 = datetime.now()
assert mode in [self.TARGET_NORMED, self.TARGET_DIFF], 'Wrong mode'
best_j_data = []
balls_info = calc_sqrt_vs(xs, self.k)
self.minx = np.amin(xs, axis=0)
self.maxx = np.amax(xs, axis=0)
omega = calc_omega(xs.shape[0], self.k)
best_b_hat_j = None
best_j = None
for j in range(8):
# In practice, one would stop when maximum is reached, i.e. after first decreasing value of B Hat
g_ring_no_i_xs = []
wave_base_j_00_ZS, wave_base_j_00_ZS_at_xs, wave_dual_j_00_ZS_at_xs = self.calc_funs_at(j, (0, 0), xs)
if mode == self.TARGET_DIFF:
coeff_j_00_ZS = self.calc_coeffs(wave_base_j_00_ZS_at_xs, wave_dual_j_00_ZS_at_xs, j, xs, balls_info, (0, 0))
coeffs = np.array(list(coeff_j_00_ZS.values()))
alphas_norm_2 = (coeffs[:,0] * coeffs[:,1]).sum()
for i, x in enumerate(xs):
coeff_no_i_j_00_ZS = self.calc_coeffs_no_i(wave_base_j_00_ZS_at_xs, wave_dual_j_00_ZS_at_xs, j, xs, i, balls_info, (0, 0))
g_ring_no_i_at_xi = 0.0
norm2 = 0.0
for zs in coeff_no_i_j_00_ZS:
if zs not in wave_base_j_00_ZS_at_xs:
continue
alpha_zs, alpha_d_zs = coeff_no_i_j_00_ZS[zs]
g_ring_no_i_at_xi += alpha_zs * wave_base_j_00_ZS_at_xs[zs][i]
norm2 += alpha_zs * alpha_d_zs
# q_ring_x ^ 2 / norm2 == f_at_x
if norm2 == 0.0:
if g_ring_no_i_at_xi == 0.0:
g_ring_no_i_xs.append(0.0)
else:
raise RuntimeError('Got norms but no value')
else:
if mode == self.TARGET_NORMED:
g_ring_no_i_xs.append(g_ring_no_i_at_xi * g_ring_no_i_at_xi / norm2)
else: # mode == self.MODE_DIFF:
g_ring_no_i_xs.append(g_ring_no_i_at_xi * g_ring_no_i_at_xi)
g_ring_no_i_xs = np.array(g_ring_no_i_xs)
if mode == self.TARGET_NORMED:
b_hat_j = omega * (np.sqrt(g_ring_no_i_xs) * balls_info.sqrt_vol_k).sum()
else: # mode == self.MODE_DIFF:
b_hat_j = 2 * omega * (np.sqrt(g_ring_no_i_xs) * balls_info.sqrt_vol_k).sum() - alphas_norm_2
print(mode, j, b_hat_j)
if best_j is None:
best_j = j
best_b_hat_j = b_hat_j
elif b_hat_j > best_b_hat_j:
best_j = j
best_b_hat_j = b_hat_j
elif stop_on_max:
self.the_best_j = best_j
return best_j
if stop_on_max:
continue
# if calculating pdf
name = 'WDE Alphas, dj=%d' % j
if mode == self.TARGET_DIFF:
pdf = self.calc_pdf(wave_base_j_00_ZS, coeff_j_00_ZS, name)
else:
coeff_j_00_ZS = self.calc_coeffs(wave_base_j_00_ZS_at_xs, wave_dual_j_00_ZS_at_xs, j, xs, balls_info, (0, 0))
pdf = self.calc_pdf(wave_base_j_00_ZS, coeff_j_00_ZS, name)
elapsed = (datetime.now() - t0).total_seconds()
best_j_data.append((j, b_hat_j, pdf, elapsed))
best_b_hat = max([info_j[1] for info_j in best_j_data])
best_j = list(filter(lambda info_j: info_j[1] == best_b_hat, best_j_data))[0][0]
self.best_j_data = [
tuple([info_j[0], info_j[0] == best_j, info_j[1], info_j[2], info_j[3]])
for info_j in best_j_data]
def best_c(self, xs, delta_j, opt_target, th_mode):
"""best c - hard thresholding"""
assert delta_j > 0, 'delta_j must be 1 or more'
assert opt_target in [self.TARGET_NORMED, self.TARGET_DIFF], 'Wrong optimisation target'
assert th_mode in [self.TH_CLASSIC, self.TH_ADJUSTED, self.TH_EMP_STD], 'Wrong threshold strategy'
balls_info = calc_sqrt_vs(xs, self.k)
self.minx = np.amin(xs, axis=0)
self.maxx = np.amax(xs, axis=0)
qqs = self.wave.qq
# base funs for levels of interest
dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at = {}
dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at[(0, qqs[0])] = self.calc_funs_at(0, qqs[0], xs)
for j, qq in itt.product(range(delta_j), qqs[1:]):
dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at[(j, qq)] = self.calc_funs_at(j, qq, xs)
# dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at [ (j, qq) ] => a triple with
# wave_base_0_00_ZS, wave_base_0_00_ZS_at_xs, wave_dual_j_00_ZS_at_xs
# memoise balls
all_balls = []
for i in range(len(xs)):
balls = balls_no_i(balls_info, i)
all_balls.append(balls)
# rank betas from large to smallest; we will incrementaly calculate
# the HD_i for each in turn
beta_var = True
all_betas = []
for (j, qq), triple in dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at.items():
_, wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs = triple
if qq == (0, 0):
alphas_dict = self.calc_coeffs(wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs, 0, xs, balls_info, (0, 0))
continue
cc = self.calc_coeffs(wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs, j, xs, balls_info, qq)
for zs in cc:
coeff_zs, coeff_d_zs = cc[zs]
if coeff_zs == 0.0:
continue
if beta_var:
coeff_i_vals = []
for i, x in enumerate(xs):
coeff_i, coeff_d_i = self.calc_1_coeff_no_i(wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs, j,
xs, i, all_balls[i], qq, zs)
coeff_i_vals.append(coeff_i)
# coeff_i_std = np.array(coeff_i_vals).std()
coeff_i_std = (np.array(coeff_i_vals) - coeff_zs).std()
else:
coeff_i_std = 0.
all_betas.append((j, qq, zs, coeff_zs, coeff_d_zs, coeff_i_std))
# order2 : 1995, Donoho, Johnstone, Kerkyacharian, Picard - Wavelet Shrinkage, Asymptopia
order1 = lambda tt: math.fabs(tt[3])
# order1 : 1996, Delyon, Juditsky - On Minimax Wavelet Estimators
order2 = lambda tt: math.fabs(tt[3]) / math.sqrt(delta_j - tt[0])
# order3 : New things
# order3 = lambda tt: math.fabs(tt[3]) - 4 * tt[5] ## kind of work for low n
# order3 = lambda tt: math.fabs(tt[3]) / (math.fabs(tt[3]) * 0.5 + tt[5]) # ??
# order3 = lambda tt: tt[5]
# order3 = lambda tt: math.fabs(tt[3]) / tt[5] / math.sqrt(delta_j - tt[0])
order4 = lambda tt: math.fabs(tt[3]) / tt[5]
if th_mode == self.TH_CLASSIC:
key_order = order1
subtitle = r"$\left| \beta_{j,q,z} \right| \geq C$"
elif th_mode == self.TH_ADJUSTED:
key_order = order2
subtitle = r"$\left| \beta_{j,q,z} \right| \geq C \sqrt{j + 1}$"
elif th_mode == self.TH_EMP_STD:
key_order = order4
subtitle = r"$\left| \beta_{j,q,z} \right| \geq C \hat{\sigma}\left[\beta_{j,q,z}^{(-i)}\right]$"
else:
raise RuntimeError('Unknown threshold mode')
all_betas = sorted(all_betas, key=key_order, reverse=True)
# get base line for acummulated values by computing alphas and the
# target HD_i functions
_, wave_base_0_00_ZS_at_xs, wave_dual_0_00_ZS_at_xs = dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at[(0, (0, 0))]
g_ring_no_i_xs = np.zeros(xs.shape[0])
norm2_xs = np.zeros(xs.shape[0])
for i, x in enumerate(xs):
coeff_no_i_0_00_ZS = self.calc_coeffs_no_i(wave_base_0_00_ZS_at_xs, wave_dual_0_00_ZS_at_xs, 0, xs, i,
balls_info, (0, 0))
for zs in coeff_no_i_0_00_ZS:
if zs not in wave_base_0_00_ZS_at_xs:
continue
alpha_zs, alpha_d_zs = coeff_no_i_0_00_ZS[zs]
g_ring_no_i_xs[i] += alpha_zs * wave_base_0_00_ZS_at_xs[zs][i]
norm2_xs[i] += alpha_zs * alpha_d_zs
## print('g_ring_no_i_xs', g_ring_no_i_xs * g_ring_no_i_xs) << !!! OK !!!
num_alphas = 0
for zs in alphas_dict:
alpha_zs, alpha_d_zs = alphas_dict[zs]
if alpha_zs == 0.0 or alpha_d_zs == 0.0:
continue
num_alphas += 1
omega_nk = calc_omega(xs.shape[0], self.k)
best_c_data = []
best_hat = None
self.best_c_found = None
for cx, beta_info in enumerate(all_betas):
j, qq, zs, coeff , coeff_d, coeff_i_std = beta_info
_, wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs = dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at[(j, qq)]
coeff_i_vals = []
for i, x in enumerate(xs):
coeff_i, coeff_d_i = self.calc_1_coeff_no_i(wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs, j, xs, i, all_balls[i], qq, zs)
if zs not in wave_base_j_qq_ZS_at_xs:
continue
g_ring_no_i_xs[i] += coeff_i * wave_base_j_qq_ZS_at_xs[zs][i]
norm2_xs[i] += coeff_i * coeff_d_i
coeff_i_vals.append(coeff_i)
if opt_target == self.TARGET_NORMED:
b_hat_beta = omega_nk * (np.sqrt(g_ring_no_i_xs * g_ring_no_i_xs / norm2_xs) * balls_info.sqrt_vol_k).sum()
else: # mode == self.MODE_DIFF:
b_hat_beta = 2 * omega_nk * (np.sqrt(g_ring_no_i_xs * g_ring_no_i_xs) * balls_info.sqrt_vol_k).sum() - norm2_xs.mean()
best_c_data.append((key_order(beta_info), b_hat_beta, np.array(coeff_i_vals).std(), num_alphas + cx + 1))
# calc best
if len(best_c_data) > 0:
pos_c = np.argmax(np.array([tt[1] for tt in best_c_data]))
print('Best C', best_c_data[pos_c], '@ %d' % pos_c)
name = 'WDE C = %f (%d + %d)' % (best_c_data[pos_c][0], num_alphas, pos_c + 1)
the_betas = all_betas[:pos_c + 1]
else:
name = 'WDE C = None'
the_betas = []
pdf = self.calc_pdf_with_betas(dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at, alphas_dict, the_betas, name, subtitle)
if len(best_c_data) > 0:
self.best_c_found = (pdf, best_c_data[pos_c])
self.best_c_data = best_c_data
else:
self.best_c_found = (pdf, None)
self.best_c_data = best_c_data
def best_greedy_not_working(self, xs, delta_j, mode):
"best c - greedy optimisation `go`"
assert delta_j > 0, 'delta_j must be 1 or more'
assert mode in [self.MODE_NORMED, self.MODE_DIFF], 'Wrong mode'
random.seed(1)
balls_info = calc_sqrt_vs(xs, self.k)
self.minx = np.amin(xs, axis=0)
self.maxx = np.amax(xs, axis=0)
qqs = self.wave.qq
# base funs for levels of interest
calc_funs_at = lambda key: self.calc_funs_at(key[0], key[1], xs)
dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at = dictwithfactory(calc_funs_at)
# dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at [ (j, qq) ] => a triple with
# wave_base_0_00_ZS, wave_base_0_00_ZS_at_xs, wave_dual_j_00_ZS_at_xs
# memoise balls
all_balls = []
for i in range(len(xs)):
balls = balls_no_i(balls_info, i)
all_balls.append(balls)
# rank betas from large to smallest; we will incrementaly calculate
# the HD_i for each in turn
triple = dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at[(0, (0, 0))]
_, wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs = triple
alphas_dict = self.calc_coeffs(wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs, 0, xs, balls_info, (0, 0))
# get base line for acummulated values by computing alphas and the
# target HD_i functions
# >> calculate alphas >> same as best_c
_, wave_base_0_00_ZS_at_xs, wave_dual_0_00_ZS_at_xs = dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at[(0, (0, 0))]
g_ring_no_i_xs = np.zeros(xs.shape[0])
norm2_xs = np.zeros(xs.shape[0])
for i, x in enumerate(xs):
coeff_no_i_0_00_ZS = self.calc_coeffs_no_i(wave_base_0_00_ZS_at_xs, wave_dual_0_00_ZS_at_xs, 0, xs, i,
balls_info, (0, 0))
for zs in coeff_no_i_0_00_ZS:
if zs not in wave_base_0_00_ZS_at_xs:
continue
alpha_zs, alpha_d_zs = coeff_no_i_0_00_ZS[zs]
g_ring_no_i_xs[i] += alpha_zs * wave_base_0_00_ZS_at_xs[zs][i]
norm2_xs[i] += alpha_zs * alpha_d_zs
## print('g_ring_no_i_xs', g_ring_no_i_xs * g_ring_no_i_xs) << !!! OK !!!
def populate_at(new_key, populate_mode):
if populate_mode == 'by_j':
j, _, _ = new_key
if len(curr_betas.keys()) == 0:
# add new level
j = j + 1
print('populate_at - new level', j)
for qq in qqs[1:]:
triple = dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at[(j, qq)]
_, wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs = triple
cc = self.calc_coeffs(wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs, j, xs, balls_info, qq)
for zs in cc:
coeff_zs, coeff_d_zs = cc[zs]
if coeff_zs == 0.0:
continue
curr_betas[(j, qq, zs)] = coeff_zs, coeff_d_zs
print('curr_betas #', len(curr_betas))
return
if populate_mode == 'by_near_zs':
raise RuntimeError('by_near_zs not implemented')
raise RuntimeError('populate_mode_wrong')
def beta_factory(key):
j, qq, zs, i = key
coeff_i, coeff_d_i = self.calc_1_coeff_no_i(wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs, j, xs, i,
all_balls[i], qq, zs)
return coeff_i, coeff_d_i
betas_no_i_j_qq_zz_i = dictwithfactory(beta_factory)
def g_ring_calc(j, qq, zs):
loc_g_ring_no_i_xs = g_ring_no_i_xs.copy()
loc_norm2_xs = norm2_xs.copy()
coeff_i_vals = []
for i, x in enumerate(xs):
coeff_i, coeff_d_i = betas_no_i_j_qq_zz_i[(j, qq, zs, i)]
if zs not in wave_base_j_qq_ZS_at_xs:
continue
loc_g_ring_no_i_xs[i] += coeff_i * wave_base_j_qq_ZS_at_xs[zs][i]
loc_norm2_xs[i] += coeff_i * coeff_d_i
coeff_i_vals.append(coeff_i)
return loc_g_ring_no_i_xs, loc_norm2_xs, np.array(coeff_i_vals)
ball_std = balls_info.sqrt_vol_k.std()
def get_all_betas():
resp = []
for k, v in curr_betas.items():
j, qq, zs = k
coeff_zs, coeff_d_zs = v
loc_g_ring_no_i_xs, loc_norm2_xs, betas_j_qq_zs_no_i = g_ring_calc(j, qq, zs)
if mode == self.MODE_NORMED:
b_hat_beta = omega_nk * (np.sqrt(loc_g_ring_no_i_xs * loc_g_ring_no_i_xs / loc_norm2_xs) * balls_info.sqrt_vol_k).sum()
else: # mode == self.MODE_DIFF:
b_hat_beta = 2 * omega_nk * (np.sqrt(loc_g_ring_no_i_xs * loc_g_ring_no_i_xs) * balls_info.sqrt_vol_k).sum() - loc_norm2_xs.mean()
if len(betas_j_qq_zs_no_i) == 0:
continue
#print(j, qq, zs, b_hat_beta, coeff_zs, 3 * math.sqrt(betas_j_qq_zs_no_i.std()))
correction = 2 * math.sqrt(betas_j_qq_zs_no_i.std()) ##np.abs(loc_g_ring_no_i_xs).std() ## * (j+1) ##* ball_std
b_hat_std = betas_j_qq_zs_no_i.std()
resp.append((j, qq, zs, coeff_zs, coeff_d_zs, b_hat_beta + correction, b_hat_beta, b_hat_std))
return resp
popu_mode = 'by_j'
the_betas = []
omega_nk = calc_omega(xs.shape[0], self.k)
found = True
curr_betas = {}
curr_b_hat_beta = None
# populate w/ j = 0, all QQ
populate_at((-1, None, None), 'by_j')
betas_num = 10
## << BEST !! count number of betas of current level as we know it
## 180 or 90 give very good results
curr_j = 0
used_level = False
while curr_j < 6:
all_betas = get_all_betas()
if len(all_betas) == 0:
populate_at((curr_j, None, None), popu_mode)
curr_j += 1
used_level = False
continue
fkey1 = lambda tt: tt[5]
fkey2 = lambda tt: math.fabs(tt[3])*tt[5]
fkey3 = lambda tt: tt[3]*tt[3]*tt[5]
fkey4 = lambda tt: math.fabs(tt[3])*tt[5]/tt[6]
fkey5 = lambda tt: math.fabs(tt[3]) * tt[5] - tt[6]
fkey6 = lambda tt: tt[5] - tt[6] / (curr_j + 1)
fkey7 = lambda tt: tt[5] / tt[6]
fkey8 = lambda tt: math.fabs(tt[3])/tt[6]
fkey = fkey1
all_betas = sorted(all_betas, key=fkey, reverse=True)
##print(all_betas)
# print(all_betas[0], ':', fkey(all_betas[0]), '..(%d)..' % len(all_betas), all_betas[-1], ':', fkey(all_betas[-1]))
# import seaborn as sns
# import matplotlib.pyplot as plt
# xx = np.array([(tt[3], fkey(tt)) for tt in all_betas])
# ##xx = xx - xx.min()
# sns.scatterplot(xx[:,0], xx[:,1])
# plt.show()
# raise RuntimeError('blah')
## ix = random.choices(list(range(all_betas)), weights=[fkey(tt) for tt in all_betas])
chosen_betas = all_betas[:betas_num]
new_b_hat_beta = max([tt[5] for tt in chosen_betas])
if curr_b_hat_beta is None or new_b_hat_beta > curr_b_hat_beta:
## print('.'*betas_num, end='')
curr_b_hat_beta = min([tt[5] for tt in chosen_betas])
used_level = True
print(all_betas[0], curr_b_hat_beta)
for ix_tuple in chosen_betas:
the_betas.append(ix_tuple)
del curr_betas[ix_tuple[:3]]
## populate_at(ix_tuple[:3], popu_mode)
g_ring_no_i_xs, norm2_xs, _ = g_ring_calc(*ix_tuple[:3])
continue
if not used_level:
break
if curr_j + 1 >= 6:
break
print('\n next level, # betas =', len(the_betas))
for k in list(curr_betas.keys()):
del curr_betas[k]
populate_at((curr_j, None, None), popu_mode)
curr_j += 1
used_level = False
print('')
name = 'WDE greedy = %f' % curr_b_hat_beta
the_betas_p = [tt[:6] for tt in the_betas]
pdf = self.calc_pdf_with_betas(dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at, alphas_dict, the_betas_p, name)
self.best_c_found = (pdf, curr_b_hat_beta)
self.best_c_data = [(ix, tt[5]) for ix, tt in enumerate(the_betas)]
def best_greedy(self, xs, delta_j, j0, opt_target):
"best c - greedy optimisation `go`"
assert delta_j > 0, 'delta_j must be 1 or more'
assert opt_target in [self.TARGET_NORMED, self.TARGET_DIFF], 'Wrong optimisation target'
balls_info = calc_sqrt_vs(xs, self.k)
self.minx = np.amin(xs, axis=0)
self.maxx = np.amax(xs, axis=0)
qqs = self.wave.qq
# base funs for levels of interest
calc_funs_at = lambda key: self.calc_funs_at(key[0], key[1], xs)
dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at = dictwithfactory(calc_funs_at)
# dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at [ (j, qq) ] => a triple with
# wave_base_0_00_ZS, wave_base_0_00_ZS_at_xs, wave_dual_j_00_ZS_at_xs
# memoise balls
all_balls = []
for i in range(len(xs)):
balls = balls_no_i(balls_info, i)
all_balls.append(balls)
# rank betas from large to smallest; we will incrementaly calculate
# the HD_i for each in turn
triple = dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at[(0, (0, 0))]
_, wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs = triple
alphas_dict = self.calc_coeffs(wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs, 0, xs, balls_info, (0, 0))
# get base line for acummulated values by computing alphas and the
# target HD_i functions
# >> calculate alphas >> same as best_c
_, wave_base_0_00_ZS_at_xs, wave_dual_0_00_ZS_at_xs = dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at[(0, (0, 0))]
g_ring_no_i_xs = np.zeros(xs.shape[0])
norm2_xs = np.zeros(xs.shape[0])
for i, x in enumerate(xs):
coeff_no_i_0_00_ZS = self.calc_coeffs_no_i(wave_base_0_00_ZS_at_xs, wave_dual_0_00_ZS_at_xs, 0, xs, i,
balls_info, (0, 0))
for zs in coeff_no_i_0_00_ZS:
if zs not in wave_base_0_00_ZS_at_xs:
continue
alpha_zs, alpha_d_zs = coeff_no_i_0_00_ZS[zs]
g_ring_no_i_xs[i] += alpha_zs * wave_base_0_00_ZS_at_xs[zs][i]
norm2_xs[i] += alpha_zs * alpha_d_zs
## print('g_ring_no_i_xs', g_ring_no_i_xs * g_ring_no_i_xs) << !!! OK !!!
def populate_betas():
for dj in range(delta_j):
j = j0 + dj
print('Calc. betas for level %d' % j)
for qq in qqs[1:]:
triple = dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at[(j, qq)]
_, wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs = triple
cc = self.calc_coeffs(wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs, j, xs, balls_info, qq)
for zs in cc:
coeff_zs, coeff_d_zs = cc[zs]
if coeff_zs == 0.0:
continue
curr_betas[(j, qq, zs)] = coeff_zs, coeff_d_zs
print('curr_betas #', len(curr_betas))
def beta_factory(key):
j, qq, zs, i = key
coeff_i, coeff_d_i = self.calc_1_coeff_no_i(wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs, j, xs, i,
all_balls[i], qq, zs)
return coeff_i, coeff_d_i
betas_no_i_j_qq_zz_i = dictwithfactory(beta_factory)
def g_ring_calc(j, qq, zs):
loc_g_ring_no_i_xs = g_ring_no_i_xs.copy()
loc_norm2_xs = norm2_xs.copy()
coeff_i_vals = []
for i, x in enumerate(xs):
coeff_i, coeff_d_i = betas_no_i_j_qq_zz_i[(j, qq, zs, i)]
if zs not in wave_base_j_qq_ZS_at_xs:
continue
loc_g_ring_no_i_xs[i] += coeff_i * wave_base_j_qq_ZS_at_xs[zs][i]
loc_norm2_xs[i] += coeff_i * coeff_d_i
coeff_i_vals.append(coeff_i)
return loc_g_ring_no_i_xs, loc_norm2_xs, np.array(coeff_i_vals)
ball_std = balls_info.sqrt_vol_k.std()
def calc_b_hat():
resp = []
for k, v in curr_betas.items():
j, qq, zs = k
coeff_zs, coeff_d_zs = v
loc_g_ring_no_i_xs, loc_norm2_xs, betas_j_qq_zs_no_i = g_ring_calc(j, qq, zs)
if opt_target == self.TARGET_NORMED:
b_hat_beta = omega_nk * (np.sqrt(loc_g_ring_no_i_xs * loc_g_ring_no_i_xs / loc_norm2_xs) * balls_info.sqrt_vol_k).sum()
else: # mode == self.MODE_DIFF:
b_hat_beta = 2 * omega_nk * (np.sqrt(loc_g_ring_no_i_xs * loc_g_ring_no_i_xs) * balls_info.sqrt_vol_k).sum() - loc_norm2_xs.mean()
if len(betas_j_qq_zs_no_i) == 0:
continue
#print(j, qq, zs, b_hat_beta, coeff_zs, 3 * math.sqrt(betas_j_qq_zs_no_i.std()))
## correction = 3 * math.sqrt(betas_j_qq_zs_no_i.std()) ##np.abs(loc_g_ring_no_i_xs).std() ## * (j+1) ##* ball_std
correction = math.fabs(coeff_zs) / betas_j_qq_zs_no_i.std()
b_hat_std = betas_j_qq_zs_no_i.std()
resp.append((j, qq, zs, coeff_zs, coeff_d_zs, b_hat_beta + correction, b_hat_beta, b_hat_std))
return resp
popu_mode = 'by_j'
the_betas = []
omega_nk = calc_omega(xs.shape[0], self.k)
found = True
curr_betas = {}
curr_b_hat_beta = None
# populate w/ j = 0, all QQ
populate_betas()
# betas_ref : position of b_hat that we will use to stop iteration. If we can't improve \hat{B}
# beyond the value at position betas_ref next time, we consider the optimum reached.
betas_ref = 3
while True:
curr_b_hat = calc_b_hat()
if len(curr_b_hat) == 0:
break
fkey1 = lambda tt: tt[5]
fkey = fkey1
curr_b_hat = sorted(curr_b_hat, key=fkey, reverse=True)
new_b_hat_beta = fkey(curr_b_hat[0])
if curr_b_hat_beta is None or new_b_hat_beta > curr_b_hat_beta:
## we use a slightly less optimal value to smooth target a little bit
curr_b_hat_beta = fkey(curr_b_hat[betas_ref - 1])
print(curr_b_hat[0], curr_b_hat_beta)
the_betas.append(curr_b_hat[0])
del curr_betas[curr_b_hat[0][:3]]
g_ring_no_i_xs, norm2_xs, _ = g_ring_calc(*curr_b_hat[0][:3])
continue
else:
break
print('')
name = 'WDE greedy = %f' % curr_b_hat_beta
the_betas_p = [tt[:6] for tt in the_betas]
pdf = self.calc_pdf_with_betas(dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at, alphas_dict, the_betas_p, name)
self.best_c_found = (pdf, curr_b_hat_beta)
self.best_c_data = [(ix, tt[5]) for ix, tt in enumerate(the_betas)]
def calc_pdf(self, base_fun, alphas, name):
norm2 = 0.0
for zs in alphas:
if zs not in base_fun:
continue
alpha_zs, alpha_d_zs = alphas[zs]
norm2 += alpha_zs * alpha_d_zs
if norm2 == 0.0:
raise RuntimeError('No norm')
def pdf(xs, alphas=alphas, norm2=norm2, base_fun=base_fun):
g_ring_xs = np.zeros(xs.shape[0])
for zs in alphas:
if zs not in base_fun:
continue
alpha_zs, alpha_d_zs = alphas[zs]
g_ring_xs += alpha_zs * base_fun[zs](xs)
# q_ring_x ^ 2 / norm2 == f_at_x
return g_ring_xs * g_ring_xs / norm2
pdf.name = name
return pdf
def calc_pdf_with_betas(self, base_funs_j, alphas, betas, name, subtitle=None):
"Calculate the pdf for given alphas and betas"
norm2 = 0.0
base_fun, _, _ = base_funs_j[(0, (0, 0))]
for zs in alphas:
if zs not in base_fun:
continue
alpha_zs, alpha_d_zs = alphas[zs]
norm2 += alpha_zs * alpha_d_zs
for j, qq, zs, coeff_zs, coeff_d_zs, coeff_std in betas:
base_fun, _, _ = base_funs_j[(j, qq)]
if zs not in base_fun:
continue
norm2 += coeff_zs * coeff_d_zs
if norm2 == 0.0:
raise RuntimeError('No norm')
def pdf(xs, alphas=alphas, betas=betas, norm2=norm2, base_funs_j=base_funs_j):
g_ring_xs = np.zeros(xs.shape[0])
base_fun, _, _ = base_funs_j[(0, (0, 0))]
for zs in alphas:
if zs not in base_fun:
continue
alpha_zs, alpha_d_zs = alphas[zs]
g_ring_xs += alpha_zs * base_fun[zs](xs)
for j, qq, zs, coeff_zs, coeff_d_zs, coeff_std in betas:
base_fun, _, _ = base_funs_j[(j, qq)]
if zs not in base_fun:
continue
g_ring_xs += coeff_zs * base_fun[zs](xs)
# q_ring_x ^ 2 / norm2 == f_at_x
return g_ring_xs * g_ring_xs / norm2
pdf.name = name
pdf.subtitle = subtitle
return pdf
def calc_funs_at(self, j, qq, xs):
"""
:param j: int, resolution level
:param qq: tensor index in R^d
:param xs: data in R^d
:return: (base funs, base @ xs, dual @ xs)
funs[zs] = base-wave _{j,zs}^{(qq)}
base @ xs[zs] = base-wave _{j,zs}^{(qq)}(xs)
dual @ xs[zs] = dual-wave _{j,zs}^{(qq)}(xs)
"""
wave_base_j_qq_ZS, wave_dual_j_qq_ZS = self.calc_funs(j, qq)
base_fun_xs = {}
for zs in wave_base_j_qq_ZS:
base_fun_xs[zs] = wave_base_j_qq_ZS[zs](xs)
dual_fun_xs = {}
for zs in wave_dual_j_qq_ZS:
dual_fun_xs[zs] = wave_dual_j_qq_ZS[zs](xs)
return wave_base_j_qq_ZS, base_fun_xs, dual_fun_xs
def calc_funs(self, j, qq):
"""
:param j: int, resolution level
:param qq: tensor index in R^d
:return: (base funs, dual funs)
funs[zs] = base|dual wave _{j,zs}^{(qq)}
wave_base_j_qq_ZS, wave_dual_j_qq_ZS
"""
jj = [j + j0 for j0 in self.j0s]
jpow2 = np.array([2 ** j for j in jj])
funs = {}
for what in ['dual', 'base']:
zs_min, zs_max = self.wave.z_range(what, (qq, jpow2, None), self.minx, self.maxx)
funs[what] = {}
for zs in itt.product(*all_zs_tensor(zs_min, zs_max)):
funs[what][zs] = self.wave.fun_ix(what, (qq, jpow2, zs))
return funs['base'], funs['dual']
def calc_coeffs(self, wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs, j, xs, balls_info, qq):
jj = [j + j0 for j0 in self.j0s]
jpow2 = np.array([2 ** j for j in jj])
zs_min, zs_max = self.wave.z_range('dual', (qq, jpow2, None), self.minx, self.maxx)
omega = calc_omega(xs.shape[0], self.k)
resp = {}
balls = balls_info.sqrt_vol_k
for zs in itt.product(*all_zs_tensor(zs_min, zs_max)):
alpha_zs = omega * (wave_dual_j_qq_ZS_at_xs[zs] * balls).sum()
resp[zs] = (alpha_zs, alpha_zs)
if self.wave.orthogonal:
# we are done
return resp
zs_min, zs_max = self.wave.z_range('base', (qq, jpow2, None), self.minx, self.maxx)
for zs in itt.product(*all_zs_tensor(zs_min, zs_max)):
if zs not in resp:
continue
alpha_d_zs = omega * (wave_base_j_qq_ZS_at_xs[zs] * balls).sum()
resp[zs] = (resp[zs][0], alpha_d_zs)
return resp
def calc_coeffs_no_i(self, wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs, j, xs, i, balls_info, qq):
"Calculate alphas (w/ dual) and alpha-duals (w/ base)"
jj = [j + j0 for j0 in self.j0s]
jpow2 = np.array([2 ** j for j in jj])
zs_min, zs_max = self.wave.z_range('dual', (qq, jpow2, None), self.minx, self.maxx)
omega_no_i = calc_omega(xs.shape[0] - 1, self.k)
resp = {}
vol_no_i = balls_no_i(balls_info, i)
for zs in itt.product(*all_zs_tensor(zs_min, zs_max)):
# below, we remove factor for i from sum << this has the biggest impact in performance
# also, we calculated alpha_zs previously and cen be further optimised w/ calc_coeffs
alpha_zs = omega_no_i * ((wave_dual_j_qq_ZS_at_xs[zs] * vol_no_i).sum() - wave_dual_j_qq_ZS_at_xs[zs][i] * vol_no_i[i])
resp[zs] = (alpha_zs, alpha_zs)
if self.wave.orthogonal:
# we are done
return resp
zs_min, zs_max = self.wave.z_range('base', (qq, jpow2, None), self.minx, self.maxx)
for zs in itt.product(*all_zs_tensor(zs_min, zs_max)):
if zs not in resp:
continue
# below, we remove factor for i from sum << this has the biggest impact in performance
alpha_d_zs = omega_no_i * ((wave_base_j_qq_ZS_at_xs[zs] * vol_no_i).sum() - wave_base_j_qq_ZS_at_xs[zs][i] * vol_no_i[i])
resp[zs] = (resp[zs][0], alpha_d_zs)
return resp
def calc_1_coeff_no_i(self, base_fun_xs, dual_fun_xs, j, xs, i, balls, qq, zs):
omega_no_i = calc_omega(xs.shape[0] - 1, self.k)
if zs in dual_fun_xs:
coeff = omega_no_i * ((dual_fun_xs[zs] * balls).sum() - dual_fun_xs[zs][i] * balls[i])
else:
coeff = 0.0
if self.wave.orthogonal:
# we are done
return coeff, coeff
if zs in base_fun_xs:
coeff_d = omega_no_i * ((base_fun_xs[zs] * balls).sum() - base_fun_xs[zs][i] * balls[i])
else:
coeff_d = 0.0
return coeff, coeff_d
def balls_no_i(balls_info, i):
n = balls_info.nn_indexes.shape[0]
resp = []
for i_prim in range(n):
# note index i is removed at callers site
if i in balls_info.nn_indexes[i_prim, :-1]:
resp.append(balls_info.sqrt_vol_k_plus_1[i_prim])
else:
resp.append(balls_info.sqrt_vol_k[i_prim])
return np.array(resp)
def calc_omega(n, k):
"Bias correction for k-th nearest neighbours sum for sample size n"
return math.sqrt(n - 1) * gamma(k) / gamma(k + 0.5) / n
BallsInfo = namedtuple('BallsInfo', ['sqrt_vol_k', 'sqrt_vol_k_plus_1', 'nn_indexes'])
def calc_sqrt_vs(xs, k):
"Returns BallsInfo object with sqrt of volumes of k-th balls and (k+1)-th balls"
dim = xs.shape[1]
ball_tree = BallTree(xs)
# as xs is both data and query, xs's nearest neighbour would be xs itself, hence the k+2 below
dist, inx = ball_tree.query(xs, k + 2)
k_near_radious = dist[:, -2:]
xs_balls_both = np.power(k_near_radious, dim / 2)
xs_balls = xs_balls_both[:, 0] * sqrt_vunit(dim)
xs_balls2 = xs_balls_both[:, 1] * sqrt_vunit(dim)
return BallsInfo(xs_balls, xs_balls2, inx)
def sqrt_vunit(dim):
"Square root of Volume of unit hypersphere in d dimensions"
return math.sqrt((np.pi ** (dim / 2)) / gamma(dim / 2 + 1))
|
[
"numpy.amin",
"math.fabs",
"math.sqrt",
"numpy.power",
"scipy.special.gamma",
"numpy.zeros",
"numpy.amax",
"sklearn.neighbors.BallTree",
"numpy.array",
"collections.namedtuple",
"random.seed",
"pywde.pywt_ext.WaveletTensorProduct",
"pywde.common.all_zs_tensor",
"datetime.datetime.now",
"numpy.sqrt"
] |
[((35069, 35143), 'collections.namedtuple', 'namedtuple', (['"""BallsInfo"""', "['sqrt_vol_k', 'sqrt_vol_k_plus_1', 'nn_indexes']"], {}), "('BallsInfo', ['sqrt_vol_k', 'sqrt_vol_k_plus_1', 'nn_indexes'])\n", (35079, 35143), False, 'from collections import namedtuple\n'), ((34883, 34897), 'numpy.array', 'np.array', (['resp'], {}), '(resp)\n', (34891, 34897), True, 'import numpy as np\n'), ((35294, 35306), 'sklearn.neighbors.BallTree', 'BallTree', (['xs'], {}), '(xs)\n', (35302, 35306), False, 'from sklearn.neighbors import BallTree\n'), ((35503, 35536), 'numpy.power', 'np.power', (['k_near_radious', '(dim / 2)'], {}), '(k_near_radious, dim / 2)\n', (35511, 35536), True, 'import numpy as np\n'), ((682, 741), 'pywde.pywt_ext.WaveletTensorProduct', 'WaveletTensorProduct', (['[wave_desc[0] for wave_desc in waves]'], {}), '([wave_desc[0] for wave_desc in waves])\n', (702, 741), False, 'from pywde.pywt_ext import WaveletTensorProduct\n'), ((1163, 1177), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1175, 1177), False, 'from datetime import datetime\n'), ((1346, 1365), 'numpy.amin', 'np.amin', (['xs'], {'axis': '(0)'}), '(xs, axis=0)\n', (1353, 1365), True, 'import numpy as np\n'), ((1386, 1405), 'numpy.amax', 'np.amax', (['xs'], {'axis': '(0)'}), '(xs, axis=0)\n', (1393, 1405), True, 'import numpy as np\n'), ((5248, 5267), 'numpy.amin', 'np.amin', (['xs'], {'axis': '(0)'}), '(xs, axis=0)\n', (5255, 5267), True, 'import numpy as np\n'), ((5288, 5307), 'numpy.amax', 'np.amax', (['xs'], {'axis': '(0)'}), '(xs, axis=0)\n', (5295, 5307), True, 'import numpy as np\n'), ((8951, 8972), 'numpy.zeros', 'np.zeros', (['xs.shape[0]'], {}), '(xs.shape[0])\n', (8959, 8972), True, 'import numpy as np\n'), ((8992, 9013), 'numpy.zeros', 'np.zeros', (['xs.shape[0]'], {}), '(xs.shape[0])\n', (9000, 9013), True, 'import numpy as np\n'), ((12164, 12178), 'random.seed', 'random.seed', (['(1)'], {}), '(1)\n', (12175, 12178), False, 'import random\n'), ((12246, 12265), 'numpy.amin', 'np.amin', (['xs'], {'axis': '(0)'}), '(xs, axis=0)\n', (12253, 12265), True, 'import numpy as np\n'), ((12286, 12305), 'numpy.amax', 'np.amax', (['xs'], {'axis': '(0)'}), '(xs, axis=0)\n', (12293, 12305), True, 'import numpy as np\n'), ((13536, 13557), 'numpy.zeros', 'np.zeros', (['xs.shape[0]'], {}), '(xs.shape[0])\n', (13544, 13557), True, 'import numpy as np\n'), ((13577, 13598), 'numpy.zeros', 'np.zeros', (['xs.shape[0]'], {}), '(xs.shape[0])\n', (13585, 13598), True, 'import numpy as np\n'), ((21049, 21068), 'numpy.amin', 'np.amin', (['xs'], {'axis': '(0)'}), '(xs, axis=0)\n', (21056, 21068), True, 'import numpy as np\n'), ((21089, 21108), 'numpy.amax', 'np.amax', (['xs'], {'axis': '(0)'}), '(xs, axis=0)\n', (21096, 21108), True, 'import numpy as np\n'), ((22339, 22360), 'numpy.zeros', 'np.zeros', (['xs.shape[0]'], {}), '(xs.shape[0])\n', (22347, 22360), True, 'import numpy as np\n'), ((22380, 22401), 'numpy.zeros', 'np.zeros', (['xs.shape[0]'], {}), '(xs.shape[0])\n', (22388, 22401), True, 'import numpy as np\n'), ((31021, 31053), 'numpy.array', 'np.array', (['[(2 ** j) for j in jj]'], {}), '([(2 ** j) for j in jj])\n', (31029, 31053), True, 'import numpy as np\n'), ((31571, 31603), 'numpy.array', 'np.array', (['[(2 ** j) for j in jj]'], {}), '([(2 ** j) for j in jj])\n', (31579, 31603), True, 'import numpy as np\n'), ((32649, 32681), 'numpy.array', 'np.array', (['[(2 ** j) for j in jj]'], {}), '([(2 ** j) for j in jj])\n', (32657, 32681), True, 'import numpy as np\n'), ((3277, 3301), 'numpy.array', 'np.array', (['g_ring_no_i_xs'], {}), '(g_ring_no_i_xs)\n', (3285, 3301), True, 'import numpy as np\n'), ((7554, 7570), 'math.fabs', 'math.fabs', (['tt[3]'], {}), '(tt[3])\n', (7563, 7570), False, 'import math\n'), ((28089, 28110), 'numpy.zeros', 'np.zeros', (['xs.shape[0]'], {}), '(xs.shape[0])\n', (28097, 28110), True, 'import numpy as np\n'), ((29236, 29257), 'numpy.zeros', 'np.zeros', (['xs.shape[0]'], {}), '(xs.shape[0])\n', (29244, 29257), True, 'import numpy as np\n'), ((35035, 35049), 'scipy.special.gamma', 'gamma', (['(k + 0.5)'], {}), '(k + 0.5)\n', (35040, 35049), False, 'from scipy.special import gamma\n'), ((35822, 35840), 'scipy.special.gamma', 'gamma', (['(dim / 2 + 1)'], {}), '(dim / 2 + 1)\n', (35827, 35840), False, 'from scipy.special import gamma\n'), ((7673, 7689), 'math.fabs', 'math.fabs', (['tt[3]'], {}), '(tt[3])\n', (7682, 7689), False, 'import math\n'), ((7692, 7718), 'math.sqrt', 'math.sqrt', (['(delta_j - tt[0])'], {}), '(delta_j - tt[0])\n', (7701, 7718), False, 'import math\n'), ((8069, 8085), 'math.fabs', 'math.fabs', (['tt[3]'], {}), '(tt[3])\n', (8078, 8085), False, 'import math\n'), ((11248, 11287), 'numpy.array', 'np.array', (['[tt[1] for tt in best_c_data]'], {}), '([tt[1] for tt in best_c_data])\n', (11256, 11287), True, 'import numpy as np\n'), ((16312, 16334), 'numpy.array', 'np.array', (['coeff_i_vals'], {}), '(coeff_i_vals)\n', (16320, 16334), True, 'import numpy as np\n'), ((24740, 24762), 'numpy.array', 'np.array', (['coeff_i_vals'], {}), '(coeff_i_vals)\n', (24748, 24762), True, 'import numpy as np\n'), ((31829, 31858), 'pywde.common.all_zs_tensor', 'all_zs_tensor', (['zs_min', 'zs_max'], {}), '(zs_min, zs_max)\n', (31842, 31858), False, 'from pywde.common import all_zs_tensor\n'), ((32186, 32215), 'pywde.common.all_zs_tensor', 'all_zs_tensor', (['zs_min', 'zs_max'], {}), '(zs_min, zs_max)\n', (32199, 32215), False, 'from pywde.common import all_zs_tensor\n'), ((32923, 32952), 'pywde.common.all_zs_tensor', 'all_zs_tensor', (['zs_min', 'zs_max'], {}), '(zs_min, zs_max)\n', (32936, 32952), False, 'from pywde.common import all_zs_tensor\n'), ((33534, 33563), 'pywde.common.all_zs_tensor', 'all_zs_tensor', (['zs_min', 'zs_max'], {}), '(zs_min, zs_max)\n', (33547, 33563), False, 'from pywde.common import all_zs_tensor\n'), ((35005, 35021), 'math.sqrt', 'math.sqrt', (['(n - 1)'], {}), '(n - 1)\n', (35014, 35021), False, 'import math\n'), ((35024, 35032), 'scipy.special.gamma', 'gamma', (['k'], {}), '(k)\n', (35029, 35032), False, 'from scipy.special import gamma\n'), ((18297, 18313), 'math.fabs', 'math.fabs', (['tt[3]'], {}), '(tt[3])\n', (18306, 18313), False, 'import math\n'), ((18629, 18645), 'math.fabs', 'math.fabs', (['tt[3]'], {}), '(tt[3])\n', (18638, 18645), False, 'import math\n'), ((25800, 25819), 'math.fabs', 'math.fabs', (['coeff_zs'], {}), '(coeff_zs)\n', (25809, 25819), False, 'import math\n'), ((31266, 31295), 'pywde.common.all_zs_tensor', 'all_zs_tensor', (['zs_min', 'zs_max'], {}), '(zs_min, zs_max)\n', (31279, 31295), False, 'from pywde.common import all_zs_tensor\n'), ((4419, 4433), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4431, 4433), False, 'from datetime import datetime\n'), ((18400, 18416), 'math.fabs', 'math.fabs', (['tt[3]'], {}), '(tt[3])\n', (18409, 18416), False, 'import math\n'), ((18460, 18476), 'math.fabs', 'math.fabs', (['tt[3]'], {}), '(tt[3])\n', (18469, 18476), False, 'import math\n'), ((11112, 11134), 'numpy.array', 'np.array', (['coeff_i_vals'], {}), '(coeff_i_vals)\n', (11120, 11134), True, 'import numpy as np\n'), ((3380, 3403), 'numpy.sqrt', 'np.sqrt', (['g_ring_no_i_xs'], {}), '(g_ring_no_i_xs)\n', (3387, 3403), True, 'import numpy as np\n'), ((7246, 7268), 'numpy.array', 'np.array', (['coeff_i_vals'], {}), '(coeff_i_vals)\n', (7254, 7268), True, 'import numpy as np\n'), ((10781, 10832), 'numpy.sqrt', 'np.sqrt', (['(g_ring_no_i_xs * g_ring_no_i_xs / norm2_xs)'], {}), '(g_ring_no_i_xs * g_ring_no_i_xs / norm2_xs)\n', (10788, 10832), True, 'import numpy as np\n'), ((3518, 3541), 'numpy.sqrt', 'np.sqrt', (['g_ring_no_i_xs'], {}), '(g_ring_no_i_xs)\n', (3525, 3541), True, 'import numpy as np\n'), ((10955, 10995), 'numpy.sqrt', 'np.sqrt', (['(g_ring_no_i_xs * g_ring_no_i_xs)'], {}), '(g_ring_no_i_xs * g_ring_no_i_xs)\n', (10962, 10995), True, 'import numpy as np\n'), ((16736, 16799), 'numpy.sqrt', 'np.sqrt', (['(loc_g_ring_no_i_xs * loc_g_ring_no_i_xs / loc_norm2_xs)'], {}), '(loc_g_ring_no_i_xs * loc_g_ring_no_i_xs / loc_norm2_xs)\n', (16743, 16799), True, 'import numpy as np\n'), ((25169, 25232), 'numpy.sqrt', 'np.sqrt', (['(loc_g_ring_no_i_xs * loc_g_ring_no_i_xs / loc_norm2_xs)'], {}), '(loc_g_ring_no_i_xs * loc_g_ring_no_i_xs / loc_norm2_xs)\n', (25176, 25232), True, 'import numpy as np\n'), ((16929, 16977), 'numpy.sqrt', 'np.sqrt', (['(loc_g_ring_no_i_xs * loc_g_ring_no_i_xs)'], {}), '(loc_g_ring_no_i_xs * loc_g_ring_no_i_xs)\n', (16936, 16977), True, 'import numpy as np\n'), ((25362, 25410), 'numpy.sqrt', 'np.sqrt', (['(loc_g_ring_no_i_xs * loc_g_ring_no_i_xs)'], {}), '(loc_g_ring_no_i_xs * loc_g_ring_no_i_xs)\n', (25369, 25410), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# <examples/doc_mode_savemodel.py>
import numpy as np
from lmfit.model import Model, save_model
def mysine(x, amp, freq, shift):
return amp * np.sin(x*freq + shift)
sinemodel = Model(mysine)
pars = sinemodel.make_params(amp=1, freq=0.25, shift=0)
save_model(sinemodel, 'sinemodel.sav')
# <end examples/doc_model_savemodel.py>
|
[
"lmfit.model.save_model",
"numpy.sin",
"lmfit.model.Model"
] |
[((209, 222), 'lmfit.model.Model', 'Model', (['mysine'], {}), '(mysine)\n', (214, 222), False, 'from lmfit.model import Model, save_model\n'), ((280, 318), 'lmfit.model.save_model', 'save_model', (['sinemodel', '"""sinemodel.sav"""'], {}), "(sinemodel, 'sinemodel.sav')\n", (290, 318), False, 'from lmfit.model import Model, save_model\n'), ((172, 196), 'numpy.sin', 'np.sin', (['(x * freq + shift)'], {}), '(x * freq + shift)\n', (178, 196), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
import pyxel
class App:
def __init__(self):
pyxel.init(160, 120, caption="test lol")
pyxel.load("assets/data.pyxres")
pyxel.run(self.update, self.draw)
def update(self):
if pyxel.btnp(pyxel.KEY_Q):
pyxel.quit()
def draw(self):
pyxel.cls(0)
pyxel.text(55, 41, "test lol", 15)
pyxel.blt(61, 66, 0, 0, 0, 38, 16)
if __name__ == "__main__":
App()
|
[
"pyxel.load",
"pyxel.text",
"pyxel.init",
"pyxel.blt",
"pyxel.cls",
"pyxel.btnp",
"pyxel.quit",
"pyxel.run"
] |
[((82, 122), 'pyxel.init', 'pyxel.init', (['(160)', '(120)'], {'caption': '"""test lol"""'}), "(160, 120, caption='test lol')\n", (92, 122), False, 'import pyxel\n'), ((131, 163), 'pyxel.load', 'pyxel.load', (['"""assets/data.pyxres"""'], {}), "('assets/data.pyxres')\n", (141, 163), False, 'import pyxel\n'), ((172, 205), 'pyxel.run', 'pyxel.run', (['self.update', 'self.draw'], {}), '(self.update, self.draw)\n', (181, 205), False, 'import pyxel\n'), ((240, 263), 'pyxel.btnp', 'pyxel.btnp', (['pyxel.KEY_Q'], {}), '(pyxel.KEY_Q)\n', (250, 263), False, 'import pyxel\n'), ((319, 331), 'pyxel.cls', 'pyxel.cls', (['(0)'], {}), '(0)\n', (328, 331), False, 'import pyxel\n'), ((340, 374), 'pyxel.text', 'pyxel.text', (['(55)', '(41)', '"""test lol"""', '(15)'], {}), "(55, 41, 'test lol', 15)\n", (350, 374), False, 'import pyxel\n'), ((383, 417), 'pyxel.blt', 'pyxel.blt', (['(61)', '(66)', '(0)', '(0)', '(0)', '(38)', '(16)'], {}), '(61, 66, 0, 0, 0, 38, 16)\n', (392, 417), False, 'import pyxel\n'), ((277, 289), 'pyxel.quit', 'pyxel.quit', ([], {}), '()\n', (287, 289), False, 'import pyxel\n')]
|
from flask_wtf.recaptcha.validators import Recaptcha, RECAPTCHA_ERROR_CODES
from flask import current_app, request
from wtforms import ValidationError
import urllib.parse
import urllib.request
import json
class Hcaptcha(Recaptcha):
def __call__(self, form, field):
if current_app.testing:
return True
if request.json:
response = request.json.get("h-captcha-response", "")
else:
response = request.form.get("h-captcha-response", "")
remote_ip = request.remote_addr
if not response:
raise ValidationError(field.gettext(self.message))
if not self._validate_recaptcha(response, remote_ip):
field.recaptcha_error = "incorrect-captcha-sol"
raise ValidationError(field.gettext(self.message))
def _validate_recaptcha(self, response, remote_addr):
"""Performs the actual validation."""
try:
private_key = current_app.config["RECAPTCHA_PRIVATE_KEY"]
except KeyError:
raise RuntimeError("No RECAPTCHA_PRIVATE_KEY config set") from None
verify_server = current_app.config.get("RECAPTCHA_VERIFY_SERVER")
if not verify_server:
raise ValidationError("No RECAPTCHA_VALIDATION_SERVER config set.")
data = urllib.parse.urlencode(
{"secret": private_key, "remoteip": remote_addr, "response": response}
).encode("utf-8")
http_response = urllib.request.urlopen(verify_server, data)
if http_response.code != 200:
return False
json_resp = json.loads(http_response.read())
if json_resp["success"]:
return True
for error in json_resp.get("error-codes", []):
if error in RECAPTCHA_ERROR_CODES:
raise ValidationError(RECAPTCHA_ERROR_CODES[error])
return False
|
[
"flask.request.json.get",
"flask.current_app.config.get",
"wtforms.ValidationError",
"flask.request.form.get"
] |
[((1132, 1181), 'flask.current_app.config.get', 'current_app.config.get', (['"""RECAPTCHA_VERIFY_SERVER"""'], {}), "('RECAPTCHA_VERIFY_SERVER')\n", (1154, 1181), False, 'from flask import current_app, request\n'), ((376, 418), 'flask.request.json.get', 'request.json.get', (['"""h-captcha-response"""', '""""""'], {}), "('h-captcha-response', '')\n", (392, 418), False, 'from flask import current_app, request\n'), ((456, 498), 'flask.request.form.get', 'request.form.get', (['"""h-captcha-response"""', '""""""'], {}), "('h-captcha-response', '')\n", (472, 498), False, 'from flask import current_app, request\n'), ((1230, 1291), 'wtforms.ValidationError', 'ValidationError', (['"""No RECAPTCHA_VALIDATION_SERVER config set."""'], {}), "('No RECAPTCHA_VALIDATION_SERVER config set.')\n", (1245, 1291), False, 'from wtforms import ValidationError\n'), ((1810, 1855), 'wtforms.ValidationError', 'ValidationError', (['RECAPTCHA_ERROR_CODES[error]'], {}), '(RECAPTCHA_ERROR_CODES[error])\n', (1825, 1855), False, 'from wtforms import ValidationError\n')]
|
import typing
from app.util import log as logging
from .executor import Executor
from .settings import Settings
from .request import Request
from .response import Response
from ..info import Info
class Plugin:
"""Base Plugin Class.
This class defines, which Executor, Settings, Request and Response class is used.
The Methods defined here should not be overwritten.
"""
Settings = Settings
Executor = Executor
Request = Request
Response = Response
def __init__(self, info: Info, path: str):
with logging.LogCall(__file__, "__init__", self.__class__):
self.info = info
self.path = path
self.logger = logging.PluginLogger(self.info.uid)
self.logger.debug("%s initialized!", self.__class__.__name__)
def execute(self, request: Request) -> Response:
with logging.LogCall(__file__, "execute", self.__class__):
res = self.Response()
try:
exec = self.Executor(self, request)
exec.execute()
res.error = exec.get_error() # pylint: disable=assignment-from-none
if res.error:
res.error_text = exec.get_error_text()
res.text = exec.get_text()
res.points = exec.get_points()
except Exception as e:
res.set_exception(e)
return res
|
[
"app.util.log.PluginLogger",
"app.util.log.LogCall"
] |
[((545, 598), 'app.util.log.LogCall', 'logging.LogCall', (['__file__', '"""__init__"""', 'self.__class__'], {}), "(__file__, '__init__', self.__class__)\n", (560, 598), True, 'from app.util import log as logging\n'), ((684, 719), 'app.util.log.PluginLogger', 'logging.PluginLogger', (['self.info.uid'], {}), '(self.info.uid)\n', (704, 719), True, 'from app.util import log as logging\n'), ((861, 913), 'app.util.log.LogCall', 'logging.LogCall', (['__file__', '"""execute"""', 'self.__class__'], {}), "(__file__, 'execute', self.__class__)\n", (876, 913), True, 'from app.util import log as logging\n')]
|
from django.db import models
from django.core import validators
from django.contrib.auth.models import AbstractUser
from django.utils.translation import gettext_lazy as _
from django.utils import timezone
class User(AbstractUser):
"""
Top most - for authentication purpose only
"""
is_admin = models.BooleanField(default=False)
class Admin(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
class Gender(models.IntegerChoices):
male = 1, _('Male')
female = 2, _('Female')
prefer_not_to_say = 3, _('Prefer not to say')
class Account(models.Model):
"""
The main class that stores all the common information for a mentor and a mentee
"""
user = models.OneToOneField(User, on_delete=models.CASCADE)
age = models.IntegerField(
null=True,
validators=[
validators.MinValueValidator(16),
validators.MaxValueValidator(100),
],
default=20
)
gender = models.IntegerField(choices=Gender.choices, default=Gender.choices[-1][0])
mobile = models.CharField(
max_length=10,
null=True,
validators=[
validators.MinLengthValidator(10),
]
)
introduction = models.TextField(max_length=512, null=True)
# education = models.TextField(max_length=512, null=True)
# research_experience = models.TextField(max_length=512, null=True)
expertise = models.TextField(max_length=512, null=True)
social_handle = models.URLField(null=True, help_text="Link to your personal website/LinkedIn profile")
rating = models.DecimalField(
null=True,
max_digits=3,
decimal_places=1,
validators=[
validators.MinValueValidator(0.0),
validators.MaxValueValidator(5.0),
]
)
is_mentor = models.BooleanField(default=False)
is_mentee = models.BooleanField(default=False)
def __str__(self):
return self.user.username
class AccountEducation(models.Model):
"""
Stores the education fields of accounts
"""
account = models.ForeignKey(Account, on_delete=models.CASCADE)
qualification = models.CharField(max_length=128)
start_date = models.DateField()
end_date = models.DateField()
organization = models.CharField(max_length=128)
detail = models.TextField(max_length=512, null=True)
def __str__(self):
return self.account.user.username
class AccountResearchExperience(models.Model):
"""
Stores the research experience of accounts
"""
account = models.ForeignKey(Account, on_delete=models.CASCADE)
position = models.CharField(max_length=128)
start_date = models.DateField()
end_date = models.DateField()
organization = models.CharField(max_length=128)
detail = models.TextField(max_length=512, null=True)
def __str__(self):
return self.account.user.username
class Mentor(models.Model):
"""
The mentor class, stores attributes specific to a mentor
"""
account = models.OneToOneField(Account, on_delete=models.CASCADE)
mentorship_duration = models.IntegerField(
default=6,
validators=[
validators.MinValueValidator(1),
validators.MaxValueValidator(24),
]
)
mentee_group_size = models.IntegerField(
default=1,
validators=[
validators.MinValueValidator(1),
]
)
verified = models.BooleanField(default=False)
is_open_to_mentorship = models.BooleanField(default=True)
will_mentor_faculty = models.BooleanField(default=False)
will_mentor_phd = models.BooleanField(default=False)
will_mentor_mtech = models.BooleanField(default=False)
will_mentor_btech = models.BooleanField(default=False)
# Responsibilities
responsibility1 = models.BooleanField(default=False)
responsibility2 = models.BooleanField(default=False)
responsibility3 = models.BooleanField(default=False)
responsibility4 = models.BooleanField(default=False)
responsibility5 = models.BooleanField(default=False)
responsibility6 = models.BooleanField(default=False)
responsibility7 = models.BooleanField(default=False)
responsibility8 = models.BooleanField(default=False)
other_responsibility = models.TextField(null=True, blank=True, max_length=512)
def __str__(self):
return self.account.user.username
class Mentee(models.Model):
"""
The mentee class, stores attributes specific to a mentee
"""
account = models.OneToOneField(Account, on_delete=models.CASCADE)
needs_mentoring = models.BooleanField(default=True)
needs_urgent_mentoring = models.BooleanField(default=False)
topics = models.TextField(max_length=512, null=True)
def __str__(self):
return self.account.user.username
class MyMentee(models.Model):
"""
Stores the mentees assigned to a mentor, you can get the mentees assigned to a mentor by
querying, MyMentee.objects.filter(mentor='current-mentor')
"""
mentor = models.ForeignKey(Mentor, on_delete=models.CASCADE)
mentee = models.ForeignKey(Mentee, on_delete=models.CASCADE)
def __str__(self):
return self.mentor.account.user.username + ' -> ' + self.mentee.account.user.username
class MyMentor(models.Model):
"""
For performance gains
Stores the mentors assigned to a mentee, you can get the mentors assigned to a mentee by
querying, MyMentor.objects.filter(mentee='current-mentee')
"""
mentee = models.ForeignKey(Mentee, on_delete=models.CASCADE)
mentor = models.ForeignKey(Mentor, on_delete=models.CASCADE)
def __str__(self):
return self.mentee.account.user.username + ' -> ' + self.mentor.account.user.username
class MenteeSentRequest(models.Model):
"""
For a mentee to view mentorship requests
"""
mentee = models.ForeignKey(Mentee, on_delete=models.CASCADE)
mentor = models.ForeignKey(Mentor, on_delete=models.CASCADE)
def __str__(self):
return self.mentee.account.user.username + ' -> ' + self.mentor.account.user.username
class MenteeRoles(models.IntegerChoices):
"""
The different type of users that can exist. These types are accessed in the
types of mentee a mentor needs, and also the types of mentor a mentee needs.
"""
faculty = 1, _('Faculty')
developer = 2, _('Industry Researcher')
undergraduate = 3, _('BTech')
graduate = 4, _('MTech')
post_graduate = 5, _('PhD')
class MentorRoles(models.IntegerChoices):
faculty = 1, _('Faculty')
developer = 2, _('Industry Researcher')
class Fields(models.IntegerChoices):
"""
The different fields of users that can exist
"""
computer_science = 1, _('Computer Science and Engineering')
electronics_and_communication = 2, _('Electronics and Communication Engineering')
computer_science_and_design = 3, _('Computer Science and Design')
computer_science_and_mathematics = 4, _('Computer Science and Mathematics')
computer_science_and_social_sciences = 5, _('Computer Science and Social Sciences')
computer_science_and_artificial_intelligence = 6, _('Computer Science and Artificial Intelligence')
class MentorRoleField(models.Model):
"""
Stores the mentors qualifications, their role (current / past), their fields(current / past)
"""
mentor = models.OneToOneField(Mentor, on_delete=models.CASCADE)
role = models.IntegerField(choices=MentorRoles.choices, null=True)
field = models.IntegerField(choices=Fields.choices, null=True)
def __str__(self):
return "{} -> {} -> {}".format(self.mentor.account.user.username, self.get_role_display(), self.get_field_display())
class MenteeRoleField(models.Model):
"""
Stores the mentees qualifications, their role (current / past), their fields (current / past)
"""
mentee = models.OneToOneField(Mentee, on_delete=models.CASCADE)
role = models.IntegerField(choices=MenteeRoles.choices, null=True)
field = models.IntegerField(choices=Fields.choices, null=True)
def __str__(self):
return "{} -> {} -> {}".format(self.mentee.account.user.username, self.get_role_display(), self.get_field_display())
class MentorExpectedRoleField(models.Model):
"""
Stores what the mentors expect from mentees in terms of their
qualifications, their role (current / past), their fields(current / past)
"""
mentor = models.ForeignKey(Mentor, on_delete=models.CASCADE)
role = models.IntegerField(choices=MentorRoles.choices, null=True)
field = models.IntegerField(choices=Fields.choices, null=True)
def __str__(self):
return self.mentor.account.user.username + ' -> ' + self.get_role_display() + ' -> ' + self.get_field_display()
class MenteeExpectedRoleField(models.Model):
"""
Stores what the mentees expect from mentors in terms of their
qualifications, their role (current / past), their fields (current / past)
NOTE: this might be deleted later on...
"""
mentee = models.ForeignKey(Mentee, on_delete=models.CASCADE)
role = models.IntegerField(choices=MenteeRoles.choices, null=True)
field = models.IntegerField(choices=Fields.choices, null=True)
def __str__(self):
return self.mentee.account.user.username + ' -> ' + self.get_role_display() + ' -> ' + self.get_field_display()
class Message(models.Model):
"""
Table to store the chat messages among users.
"""
sender = models.ForeignKey(Account, on_delete=models.CASCADE, related_name='message_sender')
receiver = models.ForeignKey(Account, on_delete=models.CASCADE, related_name='message_receiver')
content = models.TextField(max_length=512, null=True)
time_posted = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.sender.user.username + ' messaged ' + self.receiver.user.username
class Meeting(models.Model):
creator = models.ForeignKey(Account, on_delete=models.CASCADE, related_name='meeting_creator')
guest = models.ForeignKey(Account, on_delete=models.CASCADE, related_name='meeting_guest')
title = models.CharField(max_length=64, default="Untitled Meeting")
agenda = models.CharField(max_length=128, default="")
time = models.DateTimeField(auto_now_add=False)
meeting_url = models.CharField(max_length=128, default="https://www.meet.google.com")
def __str__(self):
return self.creator.user.username + ' created a meeting with ' + self.guest.user.username
class MentorResponsibility(models.IntegerChoices):
"""
Reference: Mail/Github Issue
"""
responsibility1 = 1, _('Listen to research proposals/initial research and give suggestions for improvement')
responsibility2 = 2, _('Read papers written (the final version which the author wants to submit) and give inputs')
responsibility3 = 3, _('Guide in literature reading')
responsibility4 = 4, _('Help in understanding difficult concepts, discussing some papers/results')
responsibility5 = 5, _('Guidance on where to submit a research paper')
responsibility6 = 6, _('Guidance on the proper conduct of research and literature review')
responsibility7 = 7, _('Review and comment on the resume')
responsibility8 = 8, _('Guide on postdoc and other research job possibilities')
class Areas(models.IntegerChoices):
'''
Reference: http://csrankings.org/
'''
algorithms_and_complexity = 1, _('Algorithms and Complexity')
artificial_intelligence = 2, _('Artificial Intelligence')
computational_bio_and_bioinformatics = 3, _('Computational Bio and Bioinformatics')
computer_architecture = 4, _('Computer Architecture')
computer_graphics = 5, _('Computer Graphics')
computer_networks = 6, _('Computer Networks')
computer_security = 7, _('Computer Security')
computer_vision = 8, _('Computer Vision')
cryptography = 9, _('Cryptography')
databases = 10, _('Databases')
design_automation = 11, _('Design Automation')
economics_and_computation = 12, _('Economics and Computation')
embedded_and_real_time_systems = 13, _('Embedded and Real-Time Systems')
high_performance_computing = 14, _('High-Performance Computing')
human_computer_interaction = 15, _('Human-Computer Interaction')
logic_and_verification = 16, _('Logic and Verification')
machine_learning_and_data_mining = 17, _('Machine Learning and Data Mining')
measurement_and_performance_analysis = 18, _('Measurement and Performance Analysis')
mobile_computing = 19, _('Mobile Computing')
natural_language_processing = 20, _('Natural Language Processing')
operating_systems = 21, _('Operating Systems')
programming_languages = 22, _('Programming Languages')
robotics = 23, _('Robotics')
software_engineering = 24, _('Software Engineering')
the_web_and_information_retrieval = 25, _('The Web and Information Retrieval')
visualization = 26, _('Visualization')
class MentorArea(models.Model):
mentor = models.OneToOneField(Mentor, on_delete=models.CASCADE)
area = models.IntegerField(choices=Areas.choices, null=True)
subarea = models.CharField(max_length=64, null=True)
def __str__(self):
return "{} of area {}".format(self.mentor.account.user.username, self.get_area_display())
class MentorshipRequestMessage(models.Model):
"""
Store the SOP, commitment, expectations of the mentee which is sent to the mentor at the time of requesting for
mentorship
"""
mentor = models.ForeignKey(Mentor, on_delete=models.CASCADE)
mentee = models.ForeignKey(Mentee, on_delete=models.CASCADE)
sop = models.TextField(max_length=512, null=True)
expectations = models.TextField(max_length=256, null=True)
commitment = models.TextField(max_length=256, null=True)
def __str__(self):
return "{} sent a request to {}".format(
self.mentee.account.user.username, self.mentor.account.user.username)
class MeetingSummary(models.Model):
"""
Store:
1. Meeting date
2. Meeting length (in hours)
3. Meeting agenda
4. Meeting todos (action items)
5. Next meeting date (tentative)
6. Next meeting agenda
"""
mentor = models.ForeignKey(Mentor, on_delete=models.CASCADE)
mentee = models.ForeignKey(Mentee, on_delete=models.CASCADE)
meeting_date = models.DateTimeField(auto_now_add=False)
meeting_length = models.FloatField()
meeting_details = models.TextField(max_length=512)
meeting_todos = models.TextField(max_length=512, null=True)
next_meeting_date = models.DateTimeField(auto_now_add=False)
next_meeting_agenda = models.TextField(max_length=512)
def __str__(self):
return "Meeting held at {} of length {} hours".format(
self.meeting_date, self.meeting_length)
class Milestone(models.Model):
mentor = models.ForeignKey(Mentor, on_delete=models.CASCADE)
mentee = models.ForeignKey(Mentee, on_delete=models.CASCADE)
content = models.TextField(max_length=512)
timestamp = models.DateField(null=True, blank=True)
def __str__(self):
return f'Mentor: {self.mentor}, Mentee: {self.mentee}'
class DeletedMentorMenteeRelation(models.Model):
mentor = models.ForeignKey(Mentor, on_delete=models.CASCADE)
mentee = models.ForeignKey(Mentee, on_delete=models.CASCADE)
end_reason = models.TextField(max_length=512)
date_ended = models.DateTimeField(default=timezone.now)
def __str__(self):
return f'[ENDED] Mentor: {self.mentor}, Mentee: {self.mentee}'
class RejectedMentorshipRequest(models.Model):
mentor = models.ForeignKey(Mentor, on_delete=models.CASCADE)
mentee = models.ForeignKey(Mentee, on_delete=models.CASCADE)
reject_reason = models.TextField(max_length=512)
date_rejected = models.DateTimeField(default=timezone.now)
def __str__(self):
return f'[REJECTED] Mentor: {self.mentor}, Mentee: {self.mentee}'
|
[
"django.db.models.TextField",
"django.db.models.OneToOneField",
"django.db.models.URLField",
"django.core.validators.MinLengthValidator",
"django.utils.translation.gettext_lazy",
"django.db.models.ForeignKey",
"django.db.models.CharField",
"django.core.validators.MinValueValidator",
"django.db.models.FloatField",
"django.db.models.BooleanField",
"django.db.models.IntegerField",
"django.db.models.DateField",
"django.db.models.DateTimeField",
"django.core.validators.MaxValueValidator"
] |
[((299, 333), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (318, 333), False, 'from django.db import models\n'), ((371, 423), 'django.db.models.OneToOneField', 'models.OneToOneField', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (391, 423), False, 'from django.db import models\n'), ((695, 747), 'django.db.models.OneToOneField', 'models.OneToOneField', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (715, 747), False, 'from django.db import models\n'), ((913, 987), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': 'Gender.choices', 'default': 'Gender.choices[-1][0]'}), '(choices=Gender.choices, default=Gender.choices[-1][0])\n', (932, 987), False, 'from django.db import models\n'), ((1124, 1167), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(512)', 'null': '(True)'}), '(max_length=512, null=True)\n', (1140, 1167), False, 'from django.db import models\n'), ((1309, 1352), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(512)', 'null': '(True)'}), '(max_length=512, null=True)\n', (1325, 1352), False, 'from django.db import models\n'), ((1370, 1461), 'django.db.models.URLField', 'models.URLField', ([], {'null': '(True)', 'help_text': '"""Link to your personal website/LinkedIn profile"""'}), "(null=True, help_text=\n 'Link to your personal website/LinkedIn profile')\n", (1385, 1461), False, 'from django.db import models\n'), ((1653, 1687), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1672, 1687), False, 'from django.db import models\n'), ((1701, 1735), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1720, 1735), False, 'from django.db import models\n'), ((1887, 1939), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Account'], {'on_delete': 'models.CASCADE'}), '(Account, on_delete=models.CASCADE)\n', (1904, 1939), False, 'from django.db import models\n'), ((1957, 1989), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)'}), '(max_length=128)\n', (1973, 1989), False, 'from django.db import models\n'), ((2004, 2022), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (2020, 2022), False, 'from django.db import models\n'), ((2035, 2053), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (2051, 2053), False, 'from django.db import models\n'), ((2070, 2102), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)'}), '(max_length=128)\n', (2086, 2102), False, 'from django.db import models\n'), ((2113, 2156), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(512)', 'null': '(True)'}), '(max_length=512, null=True)\n', (2129, 2156), False, 'from django.db import models\n'), ((2328, 2380), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Account'], {'on_delete': 'models.CASCADE'}), '(Account, on_delete=models.CASCADE)\n', (2345, 2380), False, 'from django.db import models\n'), ((2393, 2425), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)'}), '(max_length=128)\n', (2409, 2425), False, 'from django.db import models\n'), ((2440, 2458), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (2456, 2458), False, 'from django.db import models\n'), ((2471, 2489), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (2487, 2489), False, 'from django.db import models\n'), ((2506, 2538), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)'}), '(max_length=128)\n', (2522, 2538), False, 'from django.db import models\n'), ((2549, 2592), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(512)', 'null': '(True)'}), '(max_length=512, null=True)\n', (2565, 2592), False, 'from django.db import models\n'), ((2759, 2814), 'django.db.models.OneToOneField', 'models.OneToOneField', (['Account'], {'on_delete': 'models.CASCADE'}), '(Account, on_delete=models.CASCADE)\n', (2779, 2814), False, 'from django.db import models\n'), ((3094, 3128), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (3113, 3128), False, 'from django.db import models\n'), ((3154, 3187), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (3173, 3187), False, 'from django.db import models\n'), ((3213, 3247), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (3232, 3247), False, 'from django.db import models\n'), ((3267, 3301), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (3286, 3301), False, 'from django.db import models\n'), ((3323, 3357), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (3342, 3357), False, 'from django.db import models\n'), ((3379, 3413), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (3398, 3413), False, 'from django.db import models\n'), ((3456, 3490), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (3475, 3490), False, 'from django.db import models\n'), ((3510, 3544), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (3529, 3544), False, 'from django.db import models\n'), ((3564, 3598), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (3583, 3598), False, 'from django.db import models\n'), ((3618, 3652), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (3637, 3652), False, 'from django.db import models\n'), ((3672, 3706), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (3691, 3706), False, 'from django.db import models\n'), ((3726, 3760), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (3745, 3760), False, 'from django.db import models\n'), ((3780, 3814), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (3799, 3814), False, 'from django.db import models\n'), ((3834, 3868), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (3853, 3868), False, 'from django.db import models\n'), ((3893, 3948), 'django.db.models.TextField', 'models.TextField', ([], {'null': '(True)', 'blank': '(True)', 'max_length': '(512)'}), '(null=True, blank=True, max_length=512)\n', (3909, 3948), False, 'from django.db import models\n'), ((4115, 4170), 'django.db.models.OneToOneField', 'models.OneToOneField', (['Account'], {'on_delete': 'models.CASCADE'}), '(Account, on_delete=models.CASCADE)\n', (4135, 4170), False, 'from django.db import models\n'), ((4190, 4223), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (4209, 4223), False, 'from django.db import models\n'), ((4250, 4284), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (4269, 4284), False, 'from django.db import models\n'), ((4295, 4338), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(512)', 'null': '(True)'}), '(max_length=512, null=True)\n', (4311, 4338), False, 'from django.db import models\n'), ((4599, 4650), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Mentor'], {'on_delete': 'models.CASCADE'}), '(Mentor, on_delete=models.CASCADE)\n', (4616, 4650), False, 'from django.db import models\n'), ((4661, 4712), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Mentee'], {'on_delete': 'models.CASCADE'}), '(Mentee, on_delete=models.CASCADE)\n', (4678, 4712), False, 'from django.db import models\n'), ((5048, 5099), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Mentee'], {'on_delete': 'models.CASCADE'}), '(Mentee, on_delete=models.CASCADE)\n', (5065, 5099), False, 'from django.db import models\n'), ((5110, 5161), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Mentor'], {'on_delete': 'models.CASCADE'}), '(Mentor, on_delete=models.CASCADE)\n', (5127, 5161), False, 'from django.db import models\n'), ((5374, 5425), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Mentee'], {'on_delete': 'models.CASCADE'}), '(Mentee, on_delete=models.CASCADE)\n', (5391, 5425), False, 'from django.db import models\n'), ((5436, 5487), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Mentor'], {'on_delete': 'models.CASCADE'}), '(Mentor, on_delete=models.CASCADE)\n', (5453, 5487), False, 'from django.db import models\n'), ((6892, 6946), 'django.db.models.OneToOneField', 'models.OneToOneField', (['Mentor'], {'on_delete': 'models.CASCADE'}), '(Mentor, on_delete=models.CASCADE)\n', (6912, 6946), False, 'from django.db import models\n'), ((6955, 7014), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': 'MentorRoles.choices', 'null': '(True)'}), '(choices=MentorRoles.choices, null=True)\n', (6974, 7014), False, 'from django.db import models\n'), ((7024, 7078), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': 'Fields.choices', 'null': '(True)'}), '(choices=Fields.choices, null=True)\n', (7043, 7078), False, 'from django.db import models\n'), ((7373, 7427), 'django.db.models.OneToOneField', 'models.OneToOneField', (['Mentee'], {'on_delete': 'models.CASCADE'}), '(Mentee, on_delete=models.CASCADE)\n', (7393, 7427), False, 'from django.db import models\n'), ((7436, 7495), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': 'MenteeRoles.choices', 'null': '(True)'}), '(choices=MenteeRoles.choices, null=True)\n', (7455, 7495), False, 'from django.db import models\n'), ((7505, 7559), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': 'Fields.choices', 'null': '(True)'}), '(choices=Fields.choices, null=True)\n', (7524, 7559), False, 'from django.db import models\n'), ((7905, 7956), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Mentor'], {'on_delete': 'models.CASCADE'}), '(Mentor, on_delete=models.CASCADE)\n', (7922, 7956), False, 'from django.db import models\n'), ((7965, 8024), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': 'MentorRoles.choices', 'null': '(True)'}), '(choices=MentorRoles.choices, null=True)\n', (7984, 8024), False, 'from django.db import models\n'), ((8034, 8088), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': 'Fields.choices', 'null': '(True)'}), '(choices=Fields.choices, null=True)\n', (8053, 8088), False, 'from django.db import models\n'), ((8472, 8523), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Mentee'], {'on_delete': 'models.CASCADE'}), '(Mentee, on_delete=models.CASCADE)\n', (8489, 8523), False, 'from django.db import models\n'), ((8532, 8591), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': 'MenteeRoles.choices', 'null': '(True)'}), '(choices=MenteeRoles.choices, null=True)\n', (8551, 8591), False, 'from django.db import models\n'), ((8601, 8655), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': 'Fields.choices', 'null': '(True)'}), '(choices=Fields.choices, null=True)\n', (8620, 8655), False, 'from django.db import models\n'), ((8889, 8977), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Account'], {'on_delete': 'models.CASCADE', 'related_name': '"""message_sender"""'}), "(Account, on_delete=models.CASCADE, related_name=\n 'message_sender')\n", (8906, 8977), False, 'from django.db import models\n'), ((8985, 9075), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Account'], {'on_delete': 'models.CASCADE', 'related_name': '"""message_receiver"""'}), "(Account, on_delete=models.CASCADE, related_name=\n 'message_receiver')\n", (9002, 9075), False, 'from django.db import models\n'), ((9082, 9125), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(512)', 'null': '(True)'}), '(max_length=512, null=True)\n', (9098, 9125), False, 'from django.db import models\n'), ((9141, 9180), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (9161, 9180), False, 'from django.db import models\n'), ((9324, 9413), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Account'], {'on_delete': 'models.CASCADE', 'related_name': '"""meeting_creator"""'}), "(Account, on_delete=models.CASCADE, related_name=\n 'meeting_creator')\n", (9341, 9413), False, 'from django.db import models\n'), ((9418, 9505), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Account'], {'on_delete': 'models.CASCADE', 'related_name': '"""meeting_guest"""'}), "(Account, on_delete=models.CASCADE, related_name=\n 'meeting_guest')\n", (9435, 9505), False, 'from django.db import models\n'), ((9510, 9569), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)', 'default': '"""Untitled Meeting"""'}), "(max_length=64, default='Untitled Meeting')\n", (9526, 9569), False, 'from django.db import models\n'), ((9580, 9624), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)', 'default': '""""""'}), "(max_length=128, default='')\n", (9596, 9624), False, 'from django.db import models\n'), ((9633, 9673), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(False)'}), '(auto_now_add=False)\n', (9653, 9673), False, 'from django.db import models\n'), ((9689, 9760), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)', 'default': '"""https://www.meet.google.com"""'}), "(max_length=128, default='https://www.meet.google.com')\n", (9705, 9760), False, 'from django.db import models\n'), ((12641, 12695), 'django.db.models.OneToOneField', 'models.OneToOneField', (['Mentor'], {'on_delete': 'models.CASCADE'}), '(Mentor, on_delete=models.CASCADE)\n', (12661, 12695), False, 'from django.db import models\n'), ((12704, 12757), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': 'Areas.choices', 'null': '(True)'}), '(choices=Areas.choices, null=True)\n', (12723, 12757), False, 'from django.db import models\n'), ((12769, 12811), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)', 'null': '(True)'}), '(max_length=64, null=True)\n', (12785, 12811), False, 'from django.db import models\n'), ((13119, 13170), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Mentor'], {'on_delete': 'models.CASCADE'}), '(Mentor, on_delete=models.CASCADE)\n', (13136, 13170), False, 'from django.db import models\n'), ((13181, 13232), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Mentee'], {'on_delete': 'models.CASCADE'}), '(Mentee, on_delete=models.CASCADE)\n', (13198, 13232), False, 'from django.db import models\n'), ((13240, 13283), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(512)', 'null': '(True)'}), '(max_length=512, null=True)\n', (13256, 13283), False, 'from django.db import models\n'), ((13300, 13343), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(256)', 'null': '(True)'}), '(max_length=256, null=True)\n', (13316, 13343), False, 'from django.db import models\n'), ((13358, 13401), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(256)', 'null': '(True)'}), '(max_length=256, null=True)\n', (13374, 13401), False, 'from django.db import models\n'), ((13763, 13814), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Mentor'], {'on_delete': 'models.CASCADE'}), '(Mentor, on_delete=models.CASCADE)\n', (13780, 13814), False, 'from django.db import models\n'), ((13825, 13876), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Mentee'], {'on_delete': 'models.CASCADE'}), '(Mentee, on_delete=models.CASCADE)\n', (13842, 13876), False, 'from django.db import models\n'), ((13894, 13934), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(False)'}), '(auto_now_add=False)\n', (13914, 13934), False, 'from django.db import models\n'), ((13953, 13972), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (13970, 13972), False, 'from django.db import models\n'), ((13992, 14024), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(512)'}), '(max_length=512)\n', (14008, 14024), False, 'from django.db import models\n'), ((14042, 14085), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(512)', 'null': '(True)'}), '(max_length=512, null=True)\n', (14058, 14085), False, 'from django.db import models\n'), ((14108, 14148), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(False)'}), '(auto_now_add=False)\n', (14128, 14148), False, 'from django.db import models\n'), ((14172, 14204), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(512)'}), '(max_length=512)\n', (14188, 14204), False, 'from django.db import models\n'), ((14369, 14420), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Mentor'], {'on_delete': 'models.CASCADE'}), '(Mentor, on_delete=models.CASCADE)\n', (14386, 14420), False, 'from django.db import models\n'), ((14431, 14482), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Mentee'], {'on_delete': 'models.CASCADE'}), '(Mentee, on_delete=models.CASCADE)\n', (14448, 14482), False, 'from django.db import models\n'), ((14494, 14526), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(512)'}), '(max_length=512)\n', (14510, 14526), False, 'from django.db import models\n'), ((14540, 14579), 'django.db.models.DateField', 'models.DateField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (14556, 14579), False, 'from django.db import models\n'), ((14719, 14770), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Mentor'], {'on_delete': 'models.CASCADE'}), '(Mentor, on_delete=models.CASCADE)\n', (14736, 14770), False, 'from django.db import models\n'), ((14781, 14832), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Mentee'], {'on_delete': 'models.CASCADE'}), '(Mentee, on_delete=models.CASCADE)\n', (14798, 14832), False, 'from django.db import models\n'), ((14847, 14879), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(512)'}), '(max_length=512)\n', (14863, 14879), False, 'from django.db import models\n'), ((14894, 14936), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'timezone.now'}), '(default=timezone.now)\n', (14914, 14936), False, 'from django.db import models\n'), ((15082, 15133), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Mentor'], {'on_delete': 'models.CASCADE'}), '(Mentor, on_delete=models.CASCADE)\n', (15099, 15133), False, 'from django.db import models\n'), ((15144, 15195), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Mentee'], {'on_delete': 'models.CASCADE'}), '(Mentee, on_delete=models.CASCADE)\n', (15161, 15195), False, 'from django.db import models\n'), ((15213, 15245), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(512)'}), '(max_length=512)\n', (15229, 15245), False, 'from django.db import models\n'), ((15263, 15305), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'timezone.now'}), '(default=timezone.now)\n', (15283, 15305), False, 'from django.db import models\n'), ((478, 487), 'django.utils.translation.gettext_lazy', '_', (['"""Male"""'], {}), "('Male')\n", (479, 487), True, 'from django.utils.translation import gettext_lazy as _\n'), ((505, 516), 'django.utils.translation.gettext_lazy', '_', (['"""Female"""'], {}), "('Female')\n", (506, 516), True, 'from django.utils.translation import gettext_lazy as _\n'), ((542, 564), 'django.utils.translation.gettext_lazy', '_', (['"""Prefer not to say"""'], {}), "('Prefer not to say')\n", (543, 564), True, 'from django.utils.translation import gettext_lazy as _\n'), ((5827, 5839), 'django.utils.translation.gettext_lazy', '_', (['"""Faculty"""'], {}), "('Faculty')\n", (5828, 5839), True, 'from django.utils.translation import gettext_lazy as _\n'), ((5860, 5884), 'django.utils.translation.gettext_lazy', '_', (['"""Industry Researcher"""'], {}), "('Industry Researcher')\n", (5861, 5884), True, 'from django.utils.translation import gettext_lazy as _\n'), ((5905, 5915), 'django.utils.translation.gettext_lazy', '_', (['"""BTech"""'], {}), "('BTech')\n", (5906, 5915), True, 'from django.utils.translation import gettext_lazy as _\n'), ((5936, 5946), 'django.utils.translation.gettext_lazy', '_', (['"""MTech"""'], {}), "('MTech')\n", (5937, 5946), True, 'from django.utils.translation import gettext_lazy as _\n'), ((5967, 5975), 'django.utils.translation.gettext_lazy', '_', (['"""PhD"""'], {}), "('PhD')\n", (5968, 5975), True, 'from django.utils.translation import gettext_lazy as _\n'), ((6036, 6048), 'django.utils.translation.gettext_lazy', '_', (['"""Faculty"""'], {}), "('Faculty')\n", (6037, 6048), True, 'from django.utils.translation import gettext_lazy as _\n'), ((6065, 6089), 'django.utils.translation.gettext_lazy', '_', (['"""Industry Researcher"""'], {}), "('Industry Researcher')\n", (6066, 6089), True, 'from django.utils.translation import gettext_lazy as _\n'), ((6236, 6273), 'django.utils.translation.gettext_lazy', '_', (['"""Computer Science and Engineering"""'], {}), "('Computer Science and Engineering')\n", (6237, 6273), True, 'from django.utils.translation import gettext_lazy as _\n'), ((6325, 6371), 'django.utils.translation.gettext_lazy', '_', (['"""Electronics and Communication Engineering"""'], {}), "('Electronics and Communication Engineering')\n", (6326, 6371), True, 'from django.utils.translation import gettext_lazy as _\n'), ((6423, 6455), 'django.utils.translation.gettext_lazy', '_', (['"""Computer Science and Design"""'], {}), "('Computer Science and Design')\n", (6424, 6455), True, 'from django.utils.translation import gettext_lazy as _\n'), ((6507, 6544), 'django.utils.translation.gettext_lazy', '_', (['"""Computer Science and Mathematics"""'], {}), "('Computer Science and Mathematics')\n", (6508, 6544), True, 'from django.utils.translation import gettext_lazy as _\n'), ((6596, 6637), 'django.utils.translation.gettext_lazy', '_', (['"""Computer Science and Social Sciences"""'], {}), "('Computer Science and Social Sciences')\n", (6597, 6637), True, 'from django.utils.translation import gettext_lazy as _\n'), ((6689, 6738), 'django.utils.translation.gettext_lazy', '_', (['"""Computer Science and Artificial Intelligence"""'], {}), "('Computer Science and Artificial Intelligence')\n", (6690, 6738), True, 'from django.utils.translation import gettext_lazy as _\n'), ((9989, 10081), 'django.utils.translation.gettext_lazy', '_', (['"""Listen to research proposals/initial research and give suggestions for improvement"""'], {}), "('Listen to research proposals/initial research and give suggestions for improvement'\n )\n", (9990, 10081), True, 'from django.utils.translation import gettext_lazy as _\n'), ((10099, 10197), 'django.utils.translation.gettext_lazy', '_', (['"""Read papers written (the final version which the author wants to submit) and give inputs"""'], {}), "('Read papers written (the final version which the author wants to submit) and give inputs'\n )\n", (10100, 10197), True, 'from django.utils.translation import gettext_lazy as _\n'), ((10215, 10247), 'django.utils.translation.gettext_lazy', '_', (['"""Guide in literature reading"""'], {}), "('Guide in literature reading')\n", (10216, 10247), True, 'from django.utils.translation import gettext_lazy as _\n'), ((10270, 10347), 'django.utils.translation.gettext_lazy', '_', (['"""Help in understanding difficult concepts, discussing some papers/results"""'], {}), "('Help in understanding difficult concepts, discussing some papers/results')\n", (10271, 10347), True, 'from django.utils.translation import gettext_lazy as _\n'), ((10370, 10419), 'django.utils.translation.gettext_lazy', '_', (['"""Guidance on where to submit a research paper"""'], {}), "('Guidance on where to submit a research paper')\n", (10371, 10419), True, 'from django.utils.translation import gettext_lazy as _\n'), ((10442, 10511), 'django.utils.translation.gettext_lazy', '_', (['"""Guidance on the proper conduct of research and literature review"""'], {}), "('Guidance on the proper conduct of research and literature review')\n", (10443, 10511), True, 'from django.utils.translation import gettext_lazy as _\n'), ((10534, 10571), 'django.utils.translation.gettext_lazy', '_', (['"""Review and comment on the resume"""'], {}), "('Review and comment on the resume')\n", (10535, 10571), True, 'from django.utils.translation import gettext_lazy as _\n'), ((10594, 10652), 'django.utils.translation.gettext_lazy', '_', (['"""Guide on postdoc and other research job possibilities"""'], {}), "('Guide on postdoc and other research job possibilities')\n", (10595, 10652), True, 'from django.utils.translation import gettext_lazy as _\n'), ((10780, 10810), 'django.utils.translation.gettext_lazy', '_', (['"""Algorithms and Complexity"""'], {}), "('Algorithms and Complexity')\n", (10781, 10810), True, 'from django.utils.translation import gettext_lazy as _\n'), ((10855, 10883), 'django.utils.translation.gettext_lazy', '_', (['"""Artificial Intelligence"""'], {}), "('Artificial Intelligence')\n", (10856, 10883), True, 'from django.utils.translation import gettext_lazy as _\n'), ((10928, 10969), 'django.utils.translation.gettext_lazy', '_', (['"""Computational Bio and Bioinformatics"""'], {}), "('Computational Bio and Bioinformatics')\n", (10929, 10969), True, 'from django.utils.translation import gettext_lazy as _\n'), ((11014, 11040), 'django.utils.translation.gettext_lazy', '_', (['"""Computer Architecture"""'], {}), "('Computer Architecture')\n", (11015, 11040), True, 'from django.utils.translation import gettext_lazy as _\n'), ((11085, 11107), 'django.utils.translation.gettext_lazy', '_', (['"""Computer Graphics"""'], {}), "('Computer Graphics')\n", (11086, 11107), True, 'from django.utils.translation import gettext_lazy as _\n'), ((11152, 11174), 'django.utils.translation.gettext_lazy', '_', (['"""Computer Networks"""'], {}), "('Computer Networks')\n", (11153, 11174), True, 'from django.utils.translation import gettext_lazy as _\n'), ((11219, 11241), 'django.utils.translation.gettext_lazy', '_', (['"""Computer Security"""'], {}), "('Computer Security')\n", (11220, 11241), True, 'from django.utils.translation import gettext_lazy as _\n'), ((11286, 11306), 'django.utils.translation.gettext_lazy', '_', (['"""Computer Vision"""'], {}), "('Computer Vision')\n", (11287, 11306), True, 'from django.utils.translation import gettext_lazy as _\n'), ((11351, 11368), 'django.utils.translation.gettext_lazy', '_', (['"""Cryptography"""'], {}), "('Cryptography')\n", (11352, 11368), True, 'from django.utils.translation import gettext_lazy as _\n'), ((11413, 11427), 'django.utils.translation.gettext_lazy', '_', (['"""Databases"""'], {}), "('Databases')\n", (11414, 11427), True, 'from django.utils.translation import gettext_lazy as _\n'), ((11472, 11494), 'django.utils.translation.gettext_lazy', '_', (['"""Design Automation"""'], {}), "('Design Automation')\n", (11473, 11494), True, 'from django.utils.translation import gettext_lazy as _\n'), ((11539, 11569), 'django.utils.translation.gettext_lazy', '_', (['"""Economics and Computation"""'], {}), "('Economics and Computation')\n", (11540, 11569), True, 'from django.utils.translation import gettext_lazy as _\n'), ((11614, 11649), 'django.utils.translation.gettext_lazy', '_', (['"""Embedded and Real-Time Systems"""'], {}), "('Embedded and Real-Time Systems')\n", (11615, 11649), True, 'from django.utils.translation import gettext_lazy as _\n'), ((11694, 11725), 'django.utils.translation.gettext_lazy', '_', (['"""High-Performance Computing"""'], {}), "('High-Performance Computing')\n", (11695, 11725), True, 'from django.utils.translation import gettext_lazy as _\n'), ((11770, 11801), 'django.utils.translation.gettext_lazy', '_', (['"""Human-Computer Interaction"""'], {}), "('Human-Computer Interaction')\n", (11771, 11801), True, 'from django.utils.translation import gettext_lazy as _\n'), ((11846, 11873), 'django.utils.translation.gettext_lazy', '_', (['"""Logic and Verification"""'], {}), "('Logic and Verification')\n", (11847, 11873), True, 'from django.utils.translation import gettext_lazy as _\n'), ((11918, 11955), 'django.utils.translation.gettext_lazy', '_', (['"""Machine Learning and Data Mining"""'], {}), "('Machine Learning and Data Mining')\n", (11919, 11955), True, 'from django.utils.translation import gettext_lazy as _\n'), ((12000, 12041), 'django.utils.translation.gettext_lazy', '_', (['"""Measurement and Performance Analysis"""'], {}), "('Measurement and Performance Analysis')\n", (12001, 12041), True, 'from django.utils.translation import gettext_lazy as _\n'), ((12086, 12107), 'django.utils.translation.gettext_lazy', '_', (['"""Mobile Computing"""'], {}), "('Mobile Computing')\n", (12087, 12107), True, 'from django.utils.translation import gettext_lazy as _\n'), ((12152, 12184), 'django.utils.translation.gettext_lazy', '_', (['"""Natural Language Processing"""'], {}), "('Natural Language Processing')\n", (12153, 12184), True, 'from django.utils.translation import gettext_lazy as _\n'), ((12229, 12251), 'django.utils.translation.gettext_lazy', '_', (['"""Operating Systems"""'], {}), "('Operating Systems')\n", (12230, 12251), True, 'from django.utils.translation import gettext_lazy as _\n'), ((12296, 12322), 'django.utils.translation.gettext_lazy', '_', (['"""Programming Languages"""'], {}), "('Programming Languages')\n", (12297, 12322), True, 'from django.utils.translation import gettext_lazy as _\n'), ((12367, 12380), 'django.utils.translation.gettext_lazy', '_', (['"""Robotics"""'], {}), "('Robotics')\n", (12368, 12380), True, 'from django.utils.translation import gettext_lazy as _\n'), ((12425, 12450), 'django.utils.translation.gettext_lazy', '_', (['"""Software Engineering"""'], {}), "('Software Engineering')\n", (12426, 12450), True, 'from django.utils.translation import gettext_lazy as _\n'), ((12495, 12533), 'django.utils.translation.gettext_lazy', '_', (['"""The Web and Information Retrieval"""'], {}), "('The Web and Information Retrieval')\n", (12496, 12533), True, 'from django.utils.translation import gettext_lazy as _\n'), ((12578, 12596), 'django.utils.translation.gettext_lazy', '_', (['"""Visualization"""'], {}), "('Visualization')\n", (12579, 12596), True, 'from django.utils.translation import gettext_lazy as _\n'), ((809, 841), 'django.core.validators.MinValueValidator', 'validators.MinValueValidator', (['(16)'], {}), '(16)\n', (837, 841), False, 'from django.core import validators\n'), ((846, 879), 'django.core.validators.MaxValueValidator', 'validators.MaxValueValidator', (['(100)'], {}), '(100)\n', (874, 879), False, 'from django.core import validators\n'), ((1065, 1098), 'django.core.validators.MinLengthValidator', 'validators.MinLengthValidator', (['(10)'], {}), '(10)\n', (1094, 1098), False, 'from django.core import validators\n'), ((1558, 1591), 'django.core.validators.MinValueValidator', 'validators.MinValueValidator', (['(0.0)'], {}), '(0.0)\n', (1586, 1591), False, 'from django.core import validators\n'), ((1596, 1629), 'django.core.validators.MaxValueValidator', 'validators.MaxValueValidator', (['(5.0)'], {}), '(5.0)\n', (1624, 1629), False, 'from django.core import validators\n'), ((2890, 2921), 'django.core.validators.MinValueValidator', 'validators.MinValueValidator', (['(1)'], {}), '(1)\n', (2918, 2921), False, 'from django.core import validators\n'), ((2926, 2958), 'django.core.validators.MaxValueValidator', 'validators.MaxValueValidator', (['(24)'], {}), '(24)\n', (2954, 2958), False, 'from django.core import validators\n'), ((3041, 3072), 'django.core.validators.MinValueValidator', 'validators.MinValueValidator', (['(1)'], {}), '(1)\n', (3069, 3072), False, 'from django.core import validators\n')]
|
#!/usr/bin/env python3
# Author: <NAME> <zhb _at_ iredmail.org>
# Purpose: Add missing attribute/value pairs required by Dovecot-2.3.
# Date: Apr 12, 2018.
import ldap
# Note:
# * bind_dn must have write privilege on LDAP server.
uri = 'ldap://127.0.0.1:389'
basedn = 'o=domains,dc=example,dc=com'
bind_dn = 'cn=Manager,dc=example,dc=com'
bind_pw = 'password'
# Initialize LDAP connection.
print("* Connecting to LDAP server: {}".format(uri))
conn = ldap.initialize(uri=uri, trace_level=0,)
conn.bind_s(bind_dn, bind_pw)
# Get all mail users.
print("* Get mail accounts ...")
allUsers = conn.search_s(
basedn,
ldap.SCOPE_SUBTREE,
"(&(objectClass=mailUser)(|(enabledService=imapsecured)(enabledService=pop3secured)(enabledService=smtpsecured)(enabledService=sievesecured)(enabledService=managesievesecured)))",
['mail', 'enabledService'],
)
total = len(allUsers)
print("* Updating {} user(s).".format(total))
# Counter.
count = 1
for (dn, entry) in allUsers:
mail = entry['mail'][0]
if 'enabledService' not in entry:
continue
enabledService = entry['enabledService']
_update = False
# If old service is disabled for the user, then no need to add the new one.
for old, new in [(b'imapsecured', b'imaptls'),
(b'pop3secured', b'pop3tls'),
(b'smtpsecured', b'smtptls'),
(b'sievesecured', b'sievetls')]:
if (old in enabledService) and (new not in enabledService):
enabledService.append(new)
_update = True
if _update:
print("* ({} of {}) Updating user: {}".format(count, total, mail))
mod_attr = [(ldap.MOD_REPLACE, 'enabledService', enabledService)]
try:
conn.modify_s(dn, mod_attr)
except Exception as e:
print("Error while updating user {}: {}".format(mail, repr(e)))
else:
print("* [SKIP] No update required for user: {}".format(mail))
count += 1
# Unbind connection.
print("* Unbind LDAP server.")
conn.unbind()
print("* Update completed.")
|
[
"ldap.initialize"
] |
[((462, 501), 'ldap.initialize', 'ldap.initialize', ([], {'uri': 'uri', 'trace_level': '(0)'}), '(uri=uri, trace_level=0)\n', (477, 501), False, 'import ldap\n')]
|
"""
Demo/test program for the MQTT utilities.
See https://github.com/sensemakersamsterdam/astroplant_explorer
"""
# (c) Sensemakersams.org and others. See https://github.com/sensemakersamsterdam/astroplant_explorer
# Author: <NAME>
#
##
# H O W T O U S E
#
# Edit configuration.json and pick a nice 'ae_id' for yourself.
#
# Now start a terminal window #1 on your Pi and run:
# python 1_mqtt_receiver_demo.py
# To monitor MQTT traffic open a second terminal window #2 and run:
# mosquitto_sub -v -t "#"
# Then open a terminal window #3 and run:
# python 1_mqtt_sender_demo.py
# This should get things starting. You can run rhe 1_mqtt_sender_demo.py
# repeatedly. The 1_mqtt_receiver_demo and mosquitto_sub will show the
# messages each time you run it.
# And if you want to send the stop-request to the 1_mqtt_receiver_demo.py, run
# python 1_mqtt_stop_demo.py
# in terminal window #3.
# The mosquitto_sub in terminal #2 you can abort with control-c.
###
# Warning: if import of ae_* module(s) fails, then you need to set up PYTHONPATH.
# To test start python, import sys and type sys.path. The ae 'lib' directory
# should be included in the printed path
# From the standard time library we now import the function sleep()
from time import sleep
# From the mqtt library we import the AE_Local_MQTT class which contains a bunch
# of functions we will use in this script
from ae_util.mqtt import AE_Local_MQTT
# Here we initialize our local MQTT agent.
# It imports your MQTT settings automatically from the configuration.json file.
loc_mqtt = AE_Local_MQTT()
# And now we activate the MQTT connection.
loc_mqtt.setup()
# Further down this program loops, doing the same code over and over again, until
# we set the following global variable to 'True'
stop = False
# Now we define a so-called call-back function. This fuction is automatically
# executed when 'something' happens. What 'something' is in this case comes
# further down. Here we just define that we do a print when 'something' happens.
def cb1(sub_topic, payload, rec_time):
print('call_back 1:', sub_topic, payload, rec_time)
# And here we have more of the same. It will be executed when 'another something'
# happens. And the print out is also a wee bit different.
def cb2(sub_topic, payload, rec_time):
print('call_back 2:', sub_topic, payload, rec_time)
# And here in number three. It will be called when 'something #3' happens.
# But it is different than the ones before. It actually does something.
# It sets the variable 'stop' to 'True'. Look again at the explanation
# a couple of lines higher when we initialized the 'stop' variable.
# what do you think that will happen when this function runs?
def cb_stop(sub_topic, payload, rec_time):
global stop
print('Received stop request. 1_mqtt_receiver_demo bailing out!')
stop = True
# In this script we want to recaive MQTT messages. So we need to tell MQTT what
# we want it to send to us. We do this by subscribing to so called 'topics'
# The topic '#' is special. It just means everything. Aren't we greedy?
# We also tell MQTT to stash the incoming messages for us for later pick-up.
loc_mqtt.subscribe('#', None) # All messages ques without callback
# ANd here we will do another subscription. This time the topic needs to start
# with 'aap/'. Remember that '#' means anything, so we subscribe to 'aap/one'
# and 'aap/two' and indefinately more.
# and this time we also tell mqtt to run the function 'cb1' when we actually
# get a message with a topic that starts with 'aap/'.
# So (please read the coment back where cb1() was defined), the 'something'
# for 'cb1()' is nothing other than recieving a message with a topic that starts
# with 'aap/'.
loc_mqtt.subscribe('aap/#', cb1)
# And the 'another something' we need to happen for 'cb2()' to run is nothing more
# than receiving a message with a topic starting with 'aap/noot/'.
# But hey, 'aap/noot' also starts with 'aap/'. And this is will trigger the 'cb1()'
# call back too. So if I send 'aap/noot/yes', then both cb1() and cb2() will be
# run. But if I send 'aap/hello', then only cb1() will run.
loc_mqtt.subscribe('aap/noot/#', cb2)
# And now the 3rd one for the 'control/stop' topic. When we get exactly this one,
# we will run the 'cb_stop()' call-back. Which will .....
loc_mqtt.subscribe('control/stop', cb_stop)
# Finally our main loop. Which will run until 'stop' will be set to
# true, or alternatively when we do a manual abort with contol-c
print('Abort with control-c to end prematurely.')
try:
while not stop:
# Remember that we also did a subscription to '#', meaning
# everything. And without a call-back. Which means that MQTT
# will stash incoming messages for later pick-up?
# Well, in the line below we check and get the oldest
# message in the stash, and -if found- print its content.
sub_topic, payload, rec_time = loc_mqtt.get_message()
if sub_topic is not None:
print('Dequeued:', sub_topic, payload, rec_time)
sleep(0.1)
except KeyboardInterrupt:
print('\nManually aborted....\nBye bye')
|
[
"time.sleep",
"ae_util.mqtt.AE_Local_MQTT"
] |
[((1577, 1592), 'ae_util.mqtt.AE_Local_MQTT', 'AE_Local_MQTT', ([], {}), '()\n', (1590, 1592), False, 'from ae_util.mqtt import AE_Local_MQTT\n'), ((5080, 5090), 'time.sleep', 'sleep', (['(0.1)'], {}), '(0.1)\n', (5085, 5090), False, 'from time import sleep\n')]
|
from rb.processings.pipeline.estimator import Regressor
from rb.processings.pipeline.dataset import Dataset, Task
from typing import List, Dict
from sklearn import svm
class SVR(Regressor):
def __init__(self, dataset: Dataset, tasks: List[Task], params: Dict[str, str]):
super().__init__(dataset, tasks, params)
self.model = svm.SVR(gamma='scale', kernel=params["kernel"], degree=params["degree"])
self.kernel = params["kernel"]
self.degree = params["degree"]
@classmethod
def parameters(cls):
return {
"kernel": ["rbf", "poly", "sigmoid"],
"degree": [2,3,4,5],
}
@classmethod
def valid_config(cls, config):
return config["kernel"] == "poly" or config["degree"] == 3
def __str__(self):
return f"SVR - {self.kernel}" + (f"({self.degree})" if self.kernel == "poly" else "")
|
[
"sklearn.svm.SVR"
] |
[((346, 418), 'sklearn.svm.SVR', 'svm.SVR', ([], {'gamma': '"""scale"""', 'kernel': "params['kernel']", 'degree': "params['degree']"}), "(gamma='scale', kernel=params['kernel'], degree=params['degree'])\n", (353, 418), False, 'from sklearn import svm\n')]
|
import pytest
import ast
from .ReflectivityExample import *
import reflectivipy
from reflectivipy import MetaLink
@pytest.fixture(autouse=True)
def setup():
reflectivipy.uninstall_all()
def test_wrap_expr():
node = expr_sample_node()
assert type(node) is ast.Expr
transformation = node.wrapper.flat_wrap()
assert len(transformation) == 1
assert transformation[0] is node
link = MetaLink(ReflectivityExample(), 'tag_exec_', 'before', [])
node.links.append(link)
assert type(node) is ast.Expr
transformation = node.wrapper.flat_wrap()
assert len(transformation) == 1
assert transformation[0] is node
def test_wrap_call():
node = call_sample_node().value
assert type(node) is ast.Call
transformation = node.wrapper.flat_wrap()
assert len(transformation) == 1
assert transformation[0] is node
link = MetaLink(ReflectivityExample(), 'tag_exec_', 'before', [])
node.links.append(link)
assert type(node) is ast.Call
transformation = node.wrapper.flat_wrap()
assert len(transformation) == 4
assert type(transformation[0]) is ast.Assign
assert transformation[0].value is node.args[0]
assert transformation[0] is not node
assert len(transformation[3].value.args) == 1
assert type(transformation[3].value.args[0]) is ast.Name
assert transformation[3].value.args[0].id is node.args[0].temp_name
assert transformation[3].value.func.value.id is node.func.value.temp_name
def test_wrap_call_in_assign():
node = method_with_args_sample_node().body[0].body[0]
link = MetaLink(ReflectivityExample(), 'tag_exec_', 'before', [])
node.value.links.append(link)
assert type(node) is ast.Assign
assert type(node.value) is ast.Call
transformation = node.wrapper.flat_wrap()
assert len(transformation) == 5
assert type(transformation[0]) == ast.Assign
assert transformation[0].value is node.value.args[0]
assert type(transformation[1]) is ast.Assign
assert transformation[1].value.id == 'self'
assert transformation[0] is not node
assert len(transformation[3].value.args) == 1
assert type(transformation[3].value.args[0]) is ast.Name
assert transformation[3].value.args[0].id is node.value.args[0].temp_name
assert transformation[3].value.func.value.id is node.value.func.value.temp_name
def test_wrap_complex_expr_call():
node = complex_expr_call_sample_node()
link = MetaLink(ReflectivityExample(), 'tag_exec_', 'before', [])
node.value.args[0].links.append(link)
transformation = node.wrapper.flat_wrap()
assert len(transformation) == 6
assert type(transformation[3]) is ast.Assign
assert transformation[3].value.rf_id is node.value.args[0].rf_id
assert transformation[3] is not node
def test_call_receiver_flattening():
node = call_with_complex_receiver_sample_node()
link = MetaLink(ReflectivityExample(), 'tag_exec_', 'before', [])
node.value.links.append(link)
transformation = node.wrapper.flat_wrap()
assert len(transformation) == 4
assert transformation[1].value.func.value.id == 'self'
assert transformation[3].value.func.value.id == transformation[1].targets[0].id
def test_call_flattening():
node = call_with_complex_receiver_sample_node()
link = MetaLink(ReflectivityExample(), 'tag_exec_', 'before', [])
node.value.func.value.links.append(link)
transformation = node.wrapper.flat_wrap()
assert len(transformation) == 5
assert transformation[1].value.id == 'self'
assert transformation[3].value.func.value.id == transformation[1].targets[0].id
assert transformation[4].value.func.value.id == transformation[3].targets[0].id
def test_wrap_assign():
node = sample_node()
assert type(node) is ast.Assign
transformation = node.wrapper.flat_wrap()
assert len(transformation) == 1
assert transformation[0] is node
link = MetaLink(ReflectivityExample(), 'tag_exec_', 'before', [])
node.links.append(link)
assert type(node) is ast.Assign
transformation = node.wrapper.flat_wrap()
assert len(transformation) == 3
assert type(transformation[0]) is ast.Assign
assert transformation[0].value is node.value
assert transformation[0] is not node
def test_flatten_children():
pass
|
[
"pytest.fixture",
"reflectivipy.uninstall_all"
] |
[((117, 145), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (131, 145), False, 'import pytest\n'), ((163, 191), 'reflectivipy.uninstall_all', 'reflectivipy.uninstall_all', ([], {}), '()\n', (189, 191), False, 'import reflectivipy\n')]
|
from PyPDF4 import PdfFileReader, PdfFileWriter
from PyPDF4.pdf import ContentStream
from PyPDF4.generic import TextStringObject, NameObject
from PyPDF4.utils import b_
import os
import argparse
from io import BytesIO
from typing import Tuple
# Import the reportlab library
from reportlab.pdfgen import canvas
# The size of the page supposedly A4
from reportlab.lib.pagesizes import A4
# The color of the watermark
from reportlab.lib import colors
PAGESIZE = A4
FONTNAME = 'Helvetica-Bold'
FONTSIZE = 40
# using colors module
# COLOR = colors.lightgrey
# or simply RGB
# COLOR = (190, 190, 190)
COLOR = colors.red
# The position attributes of the watermark
X = 250
Y = 10
# The rotation angle in order to display the watermark diagonally if needed
ROTATION_ANGLE = 45
def get_info(input_file: str):
"""
Extracting the file info
"""
# If PDF is encrypted the file metadata cannot be extracted
with open(input_file, 'rb') as pdf_file:
pdf_reader = PdfFileReader(pdf_file, strict=False)
output = {
"File": input_file, "Encrypted": ("True" if pdf_reader.isEncrypted else "False")
}
if not pdf_reader.isEncrypted:
info = pdf_reader.getDocumentInfo()
num_pages = pdf_reader.getNumPages()
output["Author"] = info.author
output["Creator"] = info.creator
output["Producer"] = info.producer
output["Subject"] = info.subject
output["Title"] = info.title
output["Number of pages"] = num_pages
# To Display collected metadata
print("## File Information ##################################################")
print("\n".join("{}:{}".format(i, j) for i, j in output.items()))
print("######################################################################")
return True, output
def get_output_file(input_file: str, output_file: str):
"""
Check whether a temporary output file is needed or not
"""
input_path = os.path.dirname(input_file)
input_filename = os.path.basename(input_file)
# If output file is empty -> generate a temporary output file
# If output file is equal to input_file -> generate a temporary output file
if not output_file or input_file == output_file:
tmp_file = os.path.join(input_path, 'tmp_' + input_filename)
return True, tmp_file
return False, output_file
def create_watermark(wm_text: str):
"""
Creates a watermark template.
"""
if wm_text:
# Generate the output to a memory buffer
output_buffer = BytesIO()
# Default Page Size = A4
c = canvas.Canvas(output_buffer, pagesize=PAGESIZE)
# you can also add image instead of text
# c.drawImage("logo.png", X, Y, 160, 160)
# Set the size and type of the font
c.setFont(FONTNAME, FONTSIZE)
# Set the color
if isinstance(COLOR, tuple):
color = (c/255 for c in COLOR)
c.setFillColorRGB(*color)
else:
c.setFillColor(COLOR)
# Rotate according to the configured parameter
c.rotate(ROTATION_ANGLE)
# Position according to the configured parameter
c.drawString(X, Y, wm_text)
c.save()
return True, output_buffer
return False, None
def save_watermark(wm_buffer, output_file):
"""
Saves the generated watermark template to disk
"""
with open(output_file, mode='wb') as f:
f.write(wm_buffer.getbuffer())
f.close()
return True
def watermark_pdf(input_file: str, wm_text: str, pages: Tuple = None):
"""
Adds watermark to a pdf file.
"""
result, wm_buffer = create_watermark(wm_text)
if result:
wm_reader = PdfFileReader(wm_buffer)
pdf_reader = PdfFileReader(open(input_file, 'rb'), strict=False)
pdf_writer = PdfFileWriter()
try:
for page in range(pdf_reader.getNumPages()):
# If required to watermark specific pages not all the document pages
if pages:
if str(page) not in pages:
continue
page = pdf_reader.getPage(page)
page.mergePage(wm_reader.getPage(0))
pdf_writer.addPage(page)
except Exception as e:
print("Exception = ", e)
return False, None, None
return True, pdf_reader, pdf_writer
def unwatermark_pdf(input_file: str, wm_text: str, pages: Tuple = None):
"""
Removes watermark from the pdf file.
"""
pdf_reader = PdfFileReader(open(input_file, 'rb'), strict=False)
pdf_writer = PdfFileWriter()
for page in range(pdf_reader.getNumPages()):
# If required for specific pages
if pages:
if str(page) not in pages:
continue
page = pdf_reader.getPage(page)
# Get the page content
content_object = page["/Contents"].getObject()
content = ContentStream(content_object, pdf_reader)
# Loop through all the elements page elements
for operands, operator in content.operations:
# Checks the TJ operator and replaces the corresponding string operand (Watermark text) with ''
if operator == b_("Tj"):
text = operands[0]
if isinstance(text, str) and text.startswith(wm_text):
operands[0] = TextStringObject('')
page.__setitem__(NameObject('/Contents'), content)
pdf_writer.addPage(page)
return True, pdf_reader, pdf_writer
def watermark_unwatermark_file(**kwargs):
input_file = kwargs.get('input_file')
wm_text = kwargs.get('wm_text')
# watermark -> Watermark
# unwatermark -> Unwatermark
action = kwargs.get('action')
# HDD -> Temporary files are saved on the Hard Disk Drive and then deleted
# RAM -> Temporary files are saved in memory and then deleted.
mode = kwargs.get('mode')
pages = kwargs.get('pages')
temporary, output_file = get_output_file(
input_file, kwargs.get('output_file'))
if action == "watermark":
result, pdf_reader, pdf_writer = watermark_pdf(
input_file=input_file, wm_text=wm_text, pages=pages)
elif action == "unwatermark":
result, pdf_reader, pdf_writer = unwatermark_pdf(
input_file=input_file, wm_text=wm_text, pages=pages)
# Completed successfully
if result:
# Generate to memory
if mode == "RAM":
output_buffer = BytesIO()
pdf_writer.write(output_buffer)
pdf_reader.stream.close()
# No need to create a temporary file in RAM Mode
if temporary:
output_file = input_file
with open(output_file, mode='wb') as f:
f.write(output_buffer.getbuffer())
f.close()
elif mode == "HDD":
# Generate to a new file on the hard disk
with open(output_file, 'wb') as pdf_output_file:
pdf_writer.write(pdf_output_file)
pdf_output_file.close()
pdf_reader.stream.close()
if temporary:
if os.path.isfile(input_file):
os.replace(output_file, input_file)
output_file = input_file
def watermark_unwatermark_folder(**kwargs):
"""
Watermarks all PDF Files within a specified path
Unwatermarks all PDF Files within a specified path
"""
input_folder = kwargs.get('input_folder')
wm_text = kwargs.get('wm_text')
# Run in recursive mode
recursive = kwargs.get('recursive')
# watermark -> Watermark
# unwatermark -> Unwatermark
action = kwargs.get('action')
# HDD -> Temporary files are saved on the Hard Disk Drive and then deleted
# RAM -> Temporary files are saved in memory and then deleted.
mode = kwargs.get('mode')
pages = kwargs.get('pages')
# Loop though the files within the input folder.
for foldername, dirs, filenames in os.walk(input_folder):
for filename in filenames:
# Check if pdf file
if not filename.endswith('.pdf'):
continue
# PDF File found
inp_pdf_file = os.path.join(foldername, filename)
print("Processing file:", inp_pdf_file)
watermark_unwatermark_file(input_file=inp_pdf_file, output_file=None,
wm_text=wm_text, action=action, mode=mode, pages=pages)
if not recursive:
break
def is_valid_path(path):
"""
Validates the path inputted and checks whether it is a file path or a folder path
"""
if not path:
raise ValueError(f"Invalid Path")
if os.path.isfile(path):
return path
elif os.path.isdir(path):
return path
else:
raise ValueError(f"Invalid Path {path}")
def parse_args():
"""
Get user command line parameters
"""
parser = argparse.ArgumentParser(description="Available Options")
parser.add_argument('-i', '--input_path', dest='input_path', type=is_valid_path,
required=True, help="Enter the path of the file or the folder to process")
parser.add_argument('-a', '--action', dest='action', choices=[
'watermark', 'unwatermark'], type=str, default='watermark',
help="Choose whether to watermark or to unwatermark")
parser.add_argument('-m', '--mode', dest='mode', choices=['RAM', 'HDD'], type=str,
default='RAM', help="Choose whether to process on the hard disk drive or in memory")
parser.add_argument('-w', '--watermark_text', dest='watermark_text',
type=str, required=True, help="Enter a valid watermark text")
parser.add_argument('-p', '--pages', dest='pages', type=tuple,
help="Enter the pages to consider e.g.: [2,4]")
path = parser.parse_known_args()[0].input_path
if os.path.isfile(path):
parser.add_argument('-o', '--output_file', dest='output_file',
type=str, help="Enter a valid output file")
if os.path.isdir(path):
parser.add_argument('-r', '--recursive', dest='recursive', default=False, type=lambda x: (
str(x).lower() in ['true', '1', 'yes']), help="Process Recursively or Non-Recursively")
# To Porse The Command Line Arguments
args = vars(parser.parse_args())
# To Display The Command Line Arguments
print("## Command Arguments #################################################")
print("\n".join("{}:{}".format(i, j) for i, j in args.items()))
print("######################################################################")
return args
if __name__ == '__main__':
# Parsing command line arguments entered by user
args = parse_args()
# If File Path
if os.path.isfile(args['input_path']):
# Extracting File Info
get_info(input_file=args['input_path'])
# Encrypting or Decrypting a File
watermark_unwatermark_file(
input_file=args['input_path'], wm_text=args['watermark_text'], action=args[
'action'], mode=args['mode'], output_file=args['output_file'], pages=args['pages']
)
# If Folder Path
elif os.path.isdir(args['input_path']):
# Encrypting or Decrypting a Folder
watermark_unwatermark_folder(
input_folder=args['input_path'], wm_text=args['watermark_text'],
action=args['action'], mode=args['mode'], recursive=args['recursive'], pages=args['pages']
)
|
[
"io.BytesIO",
"PyPDF4.PdfFileReader",
"PyPDF4.generic.NameObject",
"argparse.ArgumentParser",
"PyPDF4.PdfFileWriter",
"os.path.basename",
"os.path.isdir",
"os.path.dirname",
"os.walk",
"PyPDF4.pdf.ContentStream",
"PyPDF4.generic.TextStringObject",
"reportlab.pdfgen.canvas.Canvas",
"os.path.isfile",
"os.replace",
"PyPDF4.utils.b_",
"os.path.join"
] |
[((1992, 2019), 'os.path.dirname', 'os.path.dirname', (['input_file'], {}), '(input_file)\n', (2007, 2019), False, 'import os\n'), ((2041, 2069), 'os.path.basename', 'os.path.basename', (['input_file'], {}), '(input_file)\n', (2057, 2069), False, 'import os\n'), ((4645, 4660), 'PyPDF4.PdfFileWriter', 'PdfFileWriter', ([], {}), '()\n', (4658, 4660), False, 'from PyPDF4 import PdfFileReader, PdfFileWriter\n'), ((8022, 8043), 'os.walk', 'os.walk', (['input_folder'], {}), '(input_folder)\n', (8029, 8043), False, 'import os\n'), ((8742, 8762), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (8756, 8762), False, 'import os\n'), ((8979, 9035), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Available Options"""'}), "(description='Available Options')\n", (9002, 9035), False, 'import argparse\n'), ((10001, 10021), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (10015, 10021), False, 'import os\n'), ((10173, 10192), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (10186, 10192), False, 'import os\n'), ((10900, 10934), 'os.path.isfile', 'os.path.isfile', (["args['input_path']"], {}), "(args['input_path'])\n", (10914, 10934), False, 'import os\n'), ((977, 1014), 'PyPDF4.PdfFileReader', 'PdfFileReader', (['pdf_file'], {'strict': '(False)'}), '(pdf_file, strict=False)\n', (990, 1014), False, 'from PyPDF4 import PdfFileReader, PdfFileWriter\n'), ((2288, 2337), 'os.path.join', 'os.path.join', (['input_path', "('tmp_' + input_filename)"], {}), "(input_path, 'tmp_' + input_filename)\n", (2300, 2337), False, 'import os\n'), ((2575, 2584), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (2582, 2584), False, 'from io import BytesIO\n'), ((2630, 2677), 'reportlab.pdfgen.canvas.Canvas', 'canvas.Canvas', (['output_buffer'], {'pagesize': 'PAGESIZE'}), '(output_buffer, pagesize=PAGESIZE)\n', (2643, 2677), False, 'from reportlab.pdfgen import canvas\n'), ((3739, 3763), 'PyPDF4.PdfFileReader', 'PdfFileReader', (['wm_buffer'], {}), '(wm_buffer)\n', (3752, 3763), False, 'from PyPDF4 import PdfFileReader, PdfFileWriter\n'), ((3858, 3873), 'PyPDF4.PdfFileWriter', 'PdfFileWriter', ([], {}), '()\n', (3871, 3873), False, 'from PyPDF4 import PdfFileReader, PdfFileWriter\n'), ((4977, 5018), 'PyPDF4.pdf.ContentStream', 'ContentStream', (['content_object', 'pdf_reader'], {}), '(content_object, pdf_reader)\n', (4990, 5018), False, 'from PyPDF4.pdf import ContentStream\n'), ((8793, 8812), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (8806, 8812), False, 'import os\n'), ((11320, 11353), 'os.path.isdir', 'os.path.isdir', (["args['input_path']"], {}), "(args['input_path'])\n", (11333, 11353), False, 'import os\n'), ((5458, 5481), 'PyPDF4.generic.NameObject', 'NameObject', (['"""/Contents"""'], {}), "('/Contents')\n", (5468, 5481), False, 'from PyPDF4.generic import TextStringObject, NameObject\n'), ((6521, 6530), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (6528, 6530), False, 'from io import BytesIO\n'), ((8239, 8273), 'os.path.join', 'os.path.join', (['foldername', 'filename'], {}), '(foldername, filename)\n', (8251, 8273), False, 'import os\n'), ((5262, 5270), 'PyPDF4.utils.b_', 'b_', (['"""Tj"""'], {}), "('Tj')\n", (5264, 5270), False, 'from PyPDF4.utils import b_\n'), ((5412, 5432), 'PyPDF4.generic.TextStringObject', 'TextStringObject', (['""""""'], {}), "('')\n", (5428, 5432), False, 'from PyPDF4.generic import TextStringObject, NameObject\n'), ((7179, 7205), 'os.path.isfile', 'os.path.isfile', (['input_file'], {}), '(input_file)\n', (7193, 7205), False, 'import os\n'), ((7227, 7262), 'os.replace', 'os.replace', (['output_file', 'input_file'], {}), '(output_file, input_file)\n', (7237, 7262), False, 'import os\n')]
|
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image classification Trainer."""
import torch
import torch.nn as nn
from modnas import backend
from modnas.registry.trainer import register
from ..base import TrainerBase
def accuracy(output, target, topk=(1, )):
"""Compute the precision@k for the specified values of k."""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
# one-hot case
if target.ndimension() > 1:
target = target.max(1)[1]
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0)
res.append(correct_k.mul_(1.0 / batch_size))
return res
@register
class ImageClsTrainer(TrainerBase):
"""Image classification Trainer class."""
def __init__(self,
writer=None,
expman=None,
data_provider=None,
optimizer=None,
lr_scheduler=None,
criterion='CrossEntropyLoss',
w_grad_clip=0):
super().__init__(writer)
self.w_grad_clip = w_grad_clip
self.expman = expman
self.optimizer = None
self.lr_scheduler = None
self.data_provider = None
self.criterion = None
config = {
'optimizer': optimizer,
'lr_scheduler': lr_scheduler,
'data_provider': data_provider,
'criterion': criterion,
}
self.config = config
def init(self, model, config=None):
"""Initialize trainer states."""
self.config.update(config or {})
if self.config['optimizer']:
self.optimizer = backend.get_optimizer(model.parameters(), self.config['optimizer'], config)
if self.config['lr_scheduler']:
self.lr_scheduler = backend.get_lr_scheduler(self.optimizer, self.config['lr_scheduler'], config)
if self.config['data_provider']:
self.data_provider = backend.get_data_provider(self.config['data_provider'])
if self.config['criterion']:
self.criterion = backend.get_criterion(self.config['criterion'], getattr(model, 'device_ids', None))
self.device = self.config.get('device', backend.get_device())
def get_num_train_batch(self, epoch):
"""Return number of train batches."""
return 0 if self.data_provider is None else self.data_provider.get_num_train_batch(epoch=epoch)
def get_num_valid_batch(self, epoch):
"""Return number of valid batches."""
return 0 if self.data_provider is None else self.data_provider.get_num_valid_batch(epoch=epoch)
def get_next_train_batch(self):
"""Return next train batch."""
return self.proc_batch(self.data_provider.get_next_train_batch())
def get_next_valid_batch(self):
"""Return next valid batch."""
return self.proc_batch(self.data_provider.get_next_valid_batch())
def proc_batch(self, batch):
"""Return processed data batch."""
return tuple(v.to(device=self.device, non_blocking=True) for v in batch)
def state_dict(self):
"""Return current states."""
return {
'optimizer': self.optimizer.state_dict(),
'lr_scheduler': self.lr_scheduler.state_dict(),
}
def load_state_dict(self, sd):
"""Resume states."""
if self.optimizer is not None:
self.optimizer.load_state_dict(sd['optimizer'])
if self.lr_scheduler is not None:
self.lr_scheduler.load_state_dict(sd['lr_scheduler'])
def get_lr(self):
"""Return current learning rate."""
if self.lr_scheduler:
if hasattr(self.lr_scheduler, 'get_last_lr'):
return self.lr_scheduler.get_last_lr()[0]
return self.lr_scheduler.get_lr()[0]
return self.optimizer.param_groups[0]['lr']
def get_optimizer(self):
"""Return optimizer."""
return self.optimizer
def loss(self, output=None, data=None, model=None):
"""Return loss."""
return None if self.criterion is None else self.criterion(None, None, output, *data)
def train_epoch(self, estim, model, tot_steps, epoch, tot_epochs):
"""Train for one epoch."""
for step in range(tot_steps):
self.train_step(estim, model, epoch, tot_epochs, step, tot_steps)
def train_step(self, estim, model, epoch, tot_epochs, step, tot_steps):
"""Train for one step."""
optimizer = self.optimizer
lr_scheduler = self.lr_scheduler
lr = self.get_lr()
if step == 0:
self.data_provider.reset_train_iter()
model.train()
batch = self.get_next_train_batch()
trn_X, trn_y = batch
optimizer.zero_grad()
loss, logits = estim.loss_output(batch, model=model, mode='train')
loss.backward()
# gradient clipping
if self.w_grad_clip > 0:
nn.utils.clip_grad_norm_(model.parameters(), self.w_grad_clip)
optimizer.step()
prec1, prec5 = accuracy(logits, trn_y, topk=(1, 5))
if step == tot_steps - 1:
lr_scheduler.step()
return {
'acc_top1': prec1.item(),
'acc_top5': prec5.item(),
'loss': loss.item(),
'LR': lr,
'N': len(trn_y),
}
def valid_epoch(self, estim, model, tot_steps, epoch=0, tot_epochs=1):
"""Validate for one epoch."""
if not tot_steps:
return None
for step in range(tot_steps):
self.valid_step(estim, model, epoch, tot_epochs, step, tot_steps)
def valid_step(self, estim, model, epoch, tot_epochs, step, tot_steps):
"""Validate for one step."""
if step == 0:
self.data_provider.reset_valid_iter()
model.eval()
with torch.no_grad():
batch = self.get_next_valid_batch()
val_X, val_y = batch
loss, logits = estim.loss_output(batch, model=model, mode='eval')
prec1, prec5 = accuracy(logits, val_y, topk=(1, 5))
return {
'acc_top1': prec1.item(),
'acc_top5': prec5.item(),
'loss': loss.item(),
'N': len(val_y),
}
|
[
"torch.no_grad",
"modnas.backend.get_lr_scheduler",
"modnas.backend.get_data_provider",
"modnas.backend.get_device"
] |
[((2498, 2575), 'modnas.backend.get_lr_scheduler', 'backend.get_lr_scheduler', (['self.optimizer', "self.config['lr_scheduler']", 'config'], {}), "(self.optimizer, self.config['lr_scheduler'], config)\n", (2522, 2575), False, 'from modnas import backend\n'), ((2650, 2705), 'modnas.backend.get_data_provider', 'backend.get_data_provider', (["self.config['data_provider']"], {}), "(self.config['data_provider'])\n", (2675, 2705), False, 'from modnas import backend\n'), ((2904, 2924), 'modnas.backend.get_device', 'backend.get_device', ([], {}), '()\n', (2922, 2924), False, 'from modnas import backend\n'), ((6537, 6552), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6550, 6552), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
# Name: coverageM21.py
# Purpose: Starts Coverage w/ default arguments
#
# Authors: <NAME>
# <NAME>
#
# Copyright: Copyright © 2014-22 <NAME>
# License: LGPL or BSD, see license.txt
# ------------------------------------------------------------------------------
omit_modules = [
'daseki/ext/*',
]
exclude_lines = [
r'\s*import daseki\s*',
r'\s*daseki.mainTest\(\)\s*',
]
def getCoverage():
try:
import coverage
cov = coverage.coverage(omit=omit_modules)
for e in exclude_lines:
cov.exclude(e, which='exclude')
cov.start()
except ImportError:
cov = None
return cov
def stopCoverage(cov):
if cov is not None:
cov.stop()
cov.save()
|
[
"coverage.coverage"
] |
[((667, 703), 'coverage.coverage', 'coverage.coverage', ([], {'omit': 'omit_modules'}), '(omit=omit_modules)\n', (684, 703), False, 'import coverage\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020-2021 by <NAME>. All rights reserved. This file is part
# of the Robot Operating System project, released under the MIT License. Please
# see the LICENSE file included as part of this package.
#
# author: <NAME>
# created: 2020-03-27
# modified: 2020-03-27
#
# Support for the Adafruit 9-DOF Orientation IMU Fusion Breakout - BNO085 (BNO080).
#
# See:
# https://www.adafruit.com/product/4754
# https://learn.adafruit.com/adafruit-9-dof-orientation-imu-fusion-breakout-bno085
# https://learn.adafruit.com/adafruit-9-dof-orientation-imu-fusion-breakout-bno085/report-types
# https://www.ceva-dsp.com/wp-content/uploads/2019/10/BNO080_085-Datasheet.pdf
# https://cdn-learn.adafruit.com/downloads/pdf/adafruit-9-dof-orientation-imu-fusion-breakout-bno085.pdf
# https://circuitpython.readthedocs.io/projects/bno08x/en/latest/
#
import math, sys, time, traceback
import board, busio
from enum import Enum
from colorama import init, Fore, Style
init()
# if not busio.I2C, then use this:
#try:
# from adafruit_extended_bus import ExtendedI2C as I2C
#except ImportError as ie:
# sys.exit("This script requires the adafruit_extended_bus module.\n"\
# + "Install with: pip3 install --user adafruit_extended_bus")
try:
import adafruit_bno08x
from adafruit_bno08x.i2c import BNO08X_I2C
# from adafruit_bno08x.i2c import BNO08X
from adafruit_bno08x import (
PacketError,
BNO_REPORT_ACCELEROMETER,
BNO_REPORT_GYROSCOPE,
BNO_REPORT_MAGNETOMETER,
REPORT_ACCURACY_STATUS,
BNO_REPORT_ACTIVITY_CLASSIFIER,
BNO_REPORT_STABILITY_CLASSIFIER,
# BNO_REPORT_ROTATION_VECTOR,
# BNO_REPORT_GEOMAGNETIC_ROTATION_VECTOR,
# BNO_REPORT_GAME_ROTATION_VECTOR,
)
except ImportError as ie:
sys.exit("This script requires the adafruit_bno08x module.\n"\
+ "Install with: pip3 install --user adafruit-circuitpython-bno08x")
try:
from pyquaternion import Quaternion
except ImportError:
sys.exit("This script requires the pyquaternion module.\nInstall with: pip3 install --user pyquaternion")
from lib.logger import Level, Logger
from lib.config_loader import ConfigLoader
from lib.message import Message
from lib.queue import MessageQueue
from lib.message_factory import MessageFactory
from lib.convert import Convert
# ..............................................................................
class BNO08x:
'''
Reads from a BNO08x 9DoF sensor.
'''
def __init__(self, config, queue, level):
self._log = Logger("bno085", level)
if config is None:
raise ValueError("no configuration provided.")
self._queue = queue
self._config = config
# config
_config = self._config['ros'].get('bno085')
self._loop_delay_sec = _config.get('loop_delay_sec')
_i2c_device = _config.get('i2c_device')
# default trim, can be overridden by methods
self.set_heading_trim(_config.get('heading_trim'))
self.set_pitch_trim(_config.get('pitch_trim'))
self.set_roll_trim(_config.get('roll_trim'))
i2c = busio.I2C(board.SCL, board.SDA, frequency=800000)
self._bno = BNO08X_I2C(i2c, debug=False)
# self._bno = BNO08X()
self._error_range = 5.0 # permitted error between Euler and Quaternion (in degrees) to allow setting value, was 3.0
self._min_calib_status = 1
self._settle_sec = 0.0
self._calibrated = False
self._verbose = False # True for stack traces
self._configure()
self._log.info('ready.')
# ..........................................................................
def set_heading_trim(self, trim):
'''
Set the heading trim in degrees.
'''
self._heading_trim = trim
self._log.info('heading trim:\t{:>6.2f}°'.format(self._heading_trim))
# ..........................................................................
@property
def heading_trim(self):
'''
Return the heading trim in degrees.
'''
return self._heading_trim
# ..........................................................................
def set_pitch_trim(self, trim):
'''
Set the pitch trim in degrees.
'''
self._pitch_trim = trim
self._log.info('pitch trim: \t{:>6.2f}°'.format(self._pitch_trim))
# ..........................................................................
@property
def pitch_trim(self):
'''
Return the pitch trim in degrees.
'''
return self._pitch_trim
# ..........................................................................
def set_roll_trim(self, trim):
'''
Set the roll trim in degrees.
'''
self._roll_trim = trim
self._log.info('roll trim: \t{:>6.2f}°'.format(self._roll_trim))
# ..........................................................................
@property
def roll_trim(self):
'''
Return the roll trim in degrees.
'''
return self._roll_trim
# ..........................................................................
def _configure(self):
self._log.info('settle time... ({:1.0f}s)'.format(self._settle_sec))
time.sleep(self._settle_sec) # settle before calibration
self._log.info(Fore.YELLOW + 'begin configuration/calibration...')
self._bno.begin_calibration()
time.sleep(0.1)
try:
self._log.info(Fore.YELLOW + 'setting features...')
_features = [
BNO_REPORT_ACCELEROMETER,
BNO_REPORT_GYROSCOPE,
BNO_REPORT_MAGNETOMETER,
BNO_REPORT_ACTIVITY_CLASSIFIER,
BNO_REPORT_STABILITY_CLASSIFIER,
# BNO_REPORT_ROTATION_VECTOR,
# BNO_REPORT_GEOMAGNETIC_ROTATION_VECTOR,
# BNO_REPORT_GAME_ROTATION_VECTOR,
# BNO_REPORT_LINEAR_ACCELERATION,
# BNO_REPORT_STEP_COUNTER,
# BNO_REPORT_SHAKE_DETECTOR,
# BNO_REPORT_RAW_ACCELEROMETER,
# BNO_REPORT_RAW_GYROSCOPE,
# BNO_REPORT_RAW_MAGNETOMETER,
]
for feature in _features:
self._log.debug('feature {}'.format(feature))
self._bno.enable_feature(feature)
time.sleep(0.01)
self._log.info(Fore.YELLOW + 'features set. ------------------- ')
# now calibrate...
time.sleep(2.0)
self._log.info(Fore.YELLOW + 'calibrating...')
start_time = time.monotonic()
_fail_count = 0
_confidence = 0
while not self._calibrated \
and _confidence < 50 \
and _fail_count < 30:
_fail_count += 1
try:
_confidence = self._activity_report()
self._stability_report()
_calibration_status = self._calibration_report()
self._log.info("Magnetometer:")
mag_x, mag_y, mag_z = self._bno.magnetic # pylint:disable=no-member
self._log.info("X: {:0.6f} Y: {:0.6f} Z: {:0.6f} uT".format(mag_x, mag_y, mag_z))
# self._log.info("Game Rotation Vector Quaternion:")
# ( game_quat_i, game_quat_j, game_quat_k, game_quat_real,) = self._bno.game_quaternion # pylint:disable=no-member
# self._log.info("I: {:0.6f} J: {:0.6f} K: {:0.6f} Real: {:0.6f}".format(game_quat_i, game_quat_j, game_quat_k, game_quat_real))
if _calibration_status >= self._min_calib_status: # we'll settle for Low Accuracy to start
self._calibrated = True
self._log.info(Fore.GREEN + "Calibrated.")
if _calibration_status > 1: # but save it if better than Low.
self._log.info(Fore.GREEN + Style.BRIGHT + "better than low quality calibration, saving status...")
self._bno.save_calibration_data()
break
time.sleep(0.2)
except Exception as e:
self._log.error("calibration error: {}".format(e))
finally:
self._min_calib_status = 1
self._calibrated = True
self._log.info(Fore.BLACK + "fail count: {:d}".format(_fail_count))
except Exception as e:
self._log.error('error setting features: {}'.format(e))
self._log.info("calibration complete.")
# ..........................................................................
@property
def calibrated(self):
return self._calibrated
# ..........................................................................
def _stability_report(self):
'''
Prints a report indicating the current stability status, returning
a text string.
'''
_stability_classification = self._bno.stability_classification
self._log.info(Fore.BLUE + "Stability: \t{}".format(_stability_classification))
pass
# ..........................................................................
def _activity_report(self):
'''
Prints a report indicating the current activity, returning an int
indicating a confidence level from 0-100%.
'''
_activity_classification = self._bno.activity_classification
_most_likely = _activity_classification["most_likely"]
_confidence = _activity_classification[_most_likely]
self._log.info(Fore.BLUE + "Activity: \t{}".format(_most_likely))
self._log.info(Fore.BLUE + "Confidence: \t{}%".format(_confidence))
return _confidence
# ..........................................................................
def _calibration_report(self):
'''
Prints a report indicating the current calibration status,
returning an int value from 0-3, as follows: "Accuracy Unreliable",
"Low Accuracy", "Medium Accuracy", or "High Accuracy".
'''
_calibration_status = self._bno.calibration_status
self._log.info(Fore.BLUE + "Calibration:\t{} ({:d})".format(REPORT_ACCURACY_STATUS[_calibration_status], _calibration_status))
return _calibration_status
# ..........................................................................
def _process_quaternion(self, color, title, quaternion):
# ( quat_i, quat_j, quat_k, quat_real ) = self._bno.quaternion
( quat_i, quat_j, quat_k, quat_real ) = quaternion
_q = Quaternion(real=quat_real, imaginary=[quat_i, quat_j, quat_k])
_q_heading = _q.degrees
_q_yaw_pitch_roll = _q.yaw_pitch_roll
_q_yaw = _q_yaw_pitch_roll[0]
_q_pitch = _q_yaw_pitch_roll[1]
_q_roll = _q_yaw_pitch_roll[2]
self._log.info(color + 'heading: {:>6.2f}°\t({})\t'.format(_q_heading, title) + Fore.BLACK + 'p={:>5.4f}\t r={:>5.4f}\t y={:>5.4f}'.format(_q_pitch, _q_roll, _q_yaw))
# ..........................................................................
def magneto(self):
'''
Returns the current x, y, z reading of the magnetometer.
'''
try:
self._calibration_report()
# _calibration_status = self._calibration_report()
return self._bno.magnetic
except Exception as e:
return None
# ..........................................................................
def read(self):
'''
The function that reads sensor values in a loop. This checks to see
if the 'sys' calibration is at least 3 (True), and if the Euler and
Quaternion values are within an error range of each other, this sets
the class variable for heading, pitch and roll. If they aren't within
range for more than n polls, the values revert to None.
'''
# self._log.info('starting sensor read...')
try:
# reports ......................................
# _confidence = self._activity_report()
# self._stability_report()
# _calibration_status = self._calibration_report()
# if _calibration_status >= self._min_calib_status:
if True:
# Accelerometer ..................................................................
# gyro_x, gyro_y, gyro_z = self._bno.gyro # pylint:disable=no-member
# self._log.info(Fore.RED + 'Gyroscope:\tX: {:0.6f} Y: {:0.6f} Z: {:0.6f} rads/s'.format(gyro_x, gyro_y, gyro_z))
# Magnetometer ...................................................................
# self._log.info(Fore.BLACK + 'self._bno.magnetic...')
mag_x, mag_y, mag_z = self._bno.magnetic # pylint:disable=no-member
_mag_degrees = Convert.convert_to_degrees(mag_x, mag_y, mag_z)
if self._heading_trim != 0.0:
_mag_degrees = Convert.offset_in_degrees(_mag_degrees, self._heading_trim)
self._log.info(Fore.YELLOW + 'heading: {:>6.2f}°\t(magneto)'.format(_mag_degrees))
# Rotation Vector Quaternion .....................................................
# self._log.info(Fore.BLACK + 'self._bno.quaternion...')
( quat_i, quat_j, quat_k, quat_real ) = self._bno.quaternion # pylint:disable=no-member
_quaternion = self._bno.quaternion
self._process_quaternion(Fore.GREEN, 'rot-quat', self._bno.quaternion)
# Geomagnetic Rotation Vector Quatnernion ........................................
# self._log.info(Fore.BLACK + 'self._bno.geometric_quaternion...')
_geomagnetic_quaternion = self._bno.geomagnetic_quaternion
( geo_quat_i, geo_quat_j, geo_quat_k, geo_quat_real, ) = _geomagnetic_quaternion # pylint:disable=no-member
self._process_quaternion(Fore.GREEN + Style.BRIGHT, 'geo-quat', _geomagnetic_quaternion)
return _mag_degrees, _quaternion[0], _geomagnetic_quaternion[0]
else:
self._log.warning('uncalibrated...')
return 0.0, 0.0, 0.0
self._log.debug('read ended.')
except KeyError as ke:
if self._verbose:
self._log.error('bno08x key error: {} {}'.format(ke, traceback.format_exc()))
else:
self._log.error('bno08x key error: {}'.format(ke))
except RuntimeError as re:
if self._verbose:
self._log.error('bno08x runtime error: {} {}'.format(re, traceback.format_exc()))
else:
self._log.error('bno08x runtime error: {}'.format(re))
except IndexError as ie:
if self._verbose:
self._log.error('bno08x index error: {} {}'.format(ie, traceback.format_exc()))
else:
self._log.error('bno08x index error: {}'.format(ie))
except OSError as oe:
if self._verbose:
self._log.error('bno08x os error: {} {}'.format(oe, traceback.format_exc()))
else:
self._log.error('bno08x OS error: {}'.format(oe))
except PacketError as pe:
if self._verbose:
self._log.error('bno08x packet error: {} {}'.format(pe, traceback.format_exc()))
else:
self._log.error('bno08x packet error: {}'.format(pe))
except Exception as e:
if self._verbose:
self._log.error('bno08x error: {} {}'.format(e, traceback.format_exc()))
else:
self._log.error('bno08x error: {}'.format(e))
## ..............................................................................
class Calibration(Enum):
UNKNOWN = ( 0, "Unknown", "The sensor is unable to classify the current stability.", False)
ON_TABLE = ( 1, "On Table", "The sensor is at rest on a stable surface with very little vibration.", False)
STATIONARY = ( 2, "Stationary", "The sensor’s motion is below the stable threshold but the stable duration requirement has not been met. This output is only available when gyro calibration is enabled.", False)
STABLE = ( 3, "Stable", "The sensor’s motion has met the stable threshold and duration requirements.", True)
IN_MOTION = ( 4, "In motion", "The sensor is moving.", False)
# ignore the first param since it's already set by __new__
def __init__(self, num, name, description, calibrated):
self._num = num
self._name = name
self._description = description
self._calibrated = calibrated
@property
def name(self):
return self._name
@property
def description(self):
return self._description
@property
def calibrated(self):
return self._calibrated
#EOF
|
[
"colorama.init",
"busio.I2C",
"time.sleep",
"lib.convert.Convert.convert_to_degrees",
"pyquaternion.Quaternion",
"time.monotonic",
"lib.convert.Convert.offset_in_degrees",
"lib.logger.Logger",
"traceback.format_exc",
"sys.exit",
"adafruit_bno08x.i2c.BNO08X_I2C"
] |
[((1019, 1025), 'colorama.init', 'init', ([], {}), '()\n', (1023, 1025), False, 'from colorama import init, Fore, Style\n'), ((1848, 1982), 'sys.exit', 'sys.exit', (["('This script requires the adafruit_bno08x module.\\n' +\n 'Install with: pip3 install --user adafruit-circuitpython-bno08x')"], {}), "('This script requires the adafruit_bno08x module.\\n' +\n 'Install with: pip3 install --user adafruit-circuitpython-bno08x')\n", (1856, 1982), False, 'import math, sys, time, traceback\n'), ((2060, 2178), 'sys.exit', 'sys.exit', (['"""This script requires the pyquaternion module.\nInstall with: pip3 install --user pyquaternion"""'], {}), '(\n """This script requires the pyquaternion module.\nInstall with: pip3 install --user pyquaternion"""\n )\n', (2068, 2178), False, 'import math, sys, time, traceback\n'), ((2612, 2635), 'lib.logger.Logger', 'Logger', (['"""bno085"""', 'level'], {}), "('bno085', level)\n", (2618, 2635), False, 'from lib.logger import Level, Logger\n'), ((3208, 3257), 'busio.I2C', 'busio.I2C', (['board.SCL', 'board.SDA'], {'frequency': '(800000)'}), '(board.SCL, board.SDA, frequency=800000)\n', (3217, 3257), False, 'import board, busio\n'), ((3278, 3306), 'adafruit_bno08x.i2c.BNO08X_I2C', 'BNO08X_I2C', (['i2c'], {'debug': '(False)'}), '(i2c, debug=False)\n', (3288, 3306), False, 'from adafruit_bno08x.i2c import BNO08X_I2C\n'), ((5430, 5458), 'time.sleep', 'time.sleep', (['self._settle_sec'], {}), '(self._settle_sec)\n', (5440, 5458), False, 'import math, sys, time, traceback\n'), ((5608, 5623), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (5618, 5623), False, 'import math, sys, time, traceback\n'), ((10979, 11041), 'pyquaternion.Quaternion', 'Quaternion', ([], {'real': 'quat_real', 'imaginary': '[quat_i, quat_j, quat_k]'}), '(real=quat_real, imaginary=[quat_i, quat_j, quat_k])\n', (10989, 11041), False, 'from pyquaternion import Quaternion\n'), ((6741, 6756), 'time.sleep', 'time.sleep', (['(2.0)'], {}), '(2.0)\n', (6751, 6756), False, 'import math, sys, time, traceback\n'), ((6841, 6857), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (6855, 6857), False, 'import math, sys, time, traceback\n'), ((6600, 6616), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (6610, 6616), False, 'import math, sys, time, traceback\n'), ((13326, 13373), 'lib.convert.Convert.convert_to_degrees', 'Convert.convert_to_degrees', (['mag_x', 'mag_y', 'mag_z'], {}), '(mag_x, mag_y, mag_z)\n', (13352, 13373), False, 'from lib.convert import Convert\n'), ((8432, 8447), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (8442, 8447), False, 'import math, sys, time, traceback\n'), ((13455, 13514), 'lib.convert.Convert.offset_in_degrees', 'Convert.offset_in_degrees', (['_mag_degrees', 'self._heading_trim'], {}), '(_mag_degrees, self._heading_trim)\n', (13480, 13514), False, 'from lib.convert import Convert\n'), ((14878, 14900), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (14898, 14900), False, 'import math, sys, time, traceback\n'), ((15126, 15148), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (15146, 15148), False, 'import math, sys, time, traceback\n'), ((15374, 15396), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (15394, 15396), False, 'import math, sys, time, traceback\n'), ((15614, 15636), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (15634, 15636), False, 'import math, sys, time, traceback\n'), ((15859, 15881), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (15879, 15881), False, 'import math, sys, time, traceback\n'), ((16097, 16119), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (16117, 16119), False, 'import math, sys, time, traceback\n')]
|
from setuptools import setup, find_packages
setup(
name='edtw',
version='0.0.1',
license='MIT',
author="<NAME>",
author_email='<EMAIL>',
packages=find_packages('src'),
package_dir={'': 'src'},
url='https://github.com/qkudev/edtw',
keywords='python, dwt, entropy, mutual information',
install_requires=[
'scikit-learn',
'numpy'
],
)
|
[
"setuptools.find_packages"
] |
[((173, 193), 'setuptools.find_packages', 'find_packages', (['"""src"""'], {}), "('src')\n", (186, 193), False, 'from setuptools import setup, find_packages\n')]
|
import torch.nn as nn
import pytorch_lightning as pl
import torchvision.models as models
class ResNet101Encoder(pl.LightningModule):
def __init__(
self,
pretrained,
show_progress,
depth_adapted
):
super().__init__()
self.depth_adapted = depth_adapted
self.image_modules = list(models.resnet101(pretrained=pretrained, progress=show_progress).children())
if self.depth_adapted:
self.depth_adapt_first_layer()
self.core = nn.Sequential(*self.image_modules)
self.before_last_layer = self.core[:-2]
self.last_layer = self.core[-2:-1]
def forward(self, image):
f = self.core(image)
return f.squeeze()
def get_spatial_features(self, image):
return self.before_last_layer(image)
def get_flattened_features(self, spatial_features):
return self.last_layer(spatial_features).squeeze()
def depth_adapt_first_layer(self):
l1_weights = self.image_modules[0].weight.data.clone()
depth_adapted_conv = nn.Conv2d(4, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
depth_adapted_conv.weight.data[:, :3] = l1_weights
depth_adapted_conv.weight.data[:, 3] = depth_adapted_conv.weight.data[:, 0]
self.image_modules[0] = depth_adapted_conv
print("[INFO.RESNET_ENCODER.DEPTH_ADAPTATION_COMPLETED]")
|
[
"torchvision.models.resnet101",
"torch.nn.Conv2d",
"torch.nn.Sequential"
] |
[((519, 553), 'torch.nn.Sequential', 'nn.Sequential', (['*self.image_modules'], {}), '(*self.image_modules)\n', (532, 553), True, 'import torch.nn as nn\n'), ((1073, 1152), 'torch.nn.Conv2d', 'nn.Conv2d', (['(4)', '(64)'], {'kernel_size': '(7, 7)', 'stride': '(2, 2)', 'padding': '(3, 3)', 'bias': '(False)'}), '(4, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)\n', (1082, 1152), True, 'import torch.nn as nn\n'), ((347, 410), 'torchvision.models.resnet101', 'models.resnet101', ([], {'pretrained': 'pretrained', 'progress': 'show_progress'}), '(pretrained=pretrained, progress=show_progress)\n', (363, 410), True, 'import torchvision.models as models\n')]
|
import argparse
import logging
import os
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader, Dataset, TensorDataset
import torchattacks
from advertorch.defenses import MedianSmoothing2D, BitSqueezing, JPEGFilter
from mnist_net import Le_Net, classifier_A, classifier_B, classifier_C
from cluster import Kmeans_cluster
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=128, help='batch_size')
parser.add_argument('--fname', type=str, default='test')
parser.add_argument('--model', default='LeNet', type=str,
choices=['LeNet', 'A', 'B', 'C'], help='models type')
parser.add_argument('--attack-type', type=str, default='fgsm',
choices=['fgsm', 'pgd', 'rfgsm', 'deepfool'])
parser.add_argument('--iter', type=int, default=50,
help='The number of iterations for iterative attacks')
parser.add_argument('--eps', type=float, default=0.3)
parser.add_argument('--alpha', type=float, default=0.01)
parser.add_argument('--defense', type=str, default='none',
choices=['km','bs','ms','jf'])
parser.add_argument('--k', type=int, default=2)
parser.add_argument('--data-dir', type=str, default='../../datasets/')
return parser.parse_args()
def main():
args = get_args()
logfile = './mnist/'+args.fname+'.log'
logger = logging.getLogger(__name__)
logging.basicConfig(
filename=logfile,
format='[%(asctime)s] - %(message)s',
datefmt='%Y/%m/%d %H:%M:%S',
level=logging.INFO)
logger.info(args)
if not os.path.exists('../advdata'):
os.mkdir('../advdata')
if args.model == 'A':
model = classifier_A().cuda()
checkpoint = torch.load('../models/MNIST_A.pth')
elif args.model == 'B':
model = classifier_B().cuda()
checkpoint = torch.load('../models/MNIST_B.pth')
elif args.model == 'C':
model = classifier_C().cuda()
checkpoint = torch.load('../models/MNIST_C.pth')
elif args.model == 'LeNet':
model = Le_Net().cuda()
checkpoint = torch.load('../models/MNIST_LeNet.pth')
model.load_state_dict(checkpoint)
model.eval()
mnist_test = datasets.MNIST(args.data_dir, train=False, download=True, transform=transforms.ToTensor())
test_loader = torch.utils.data.DataLoader(mnist_test, batch_size=128, shuffle=False)
if args.attack_type == 'pgd':
data_dir = "../advdata/MNIST_{}_pgd_{}-{}.pt".format(args.model,args.eps,args.iter)
if not os.path.exists(data_dir):
pgd_attack = torchattacks.PGD(model, eps = args.eps, alpha = args.alpha, iters=args.iter, random_start=False)
pgd_attack.set_mode('int')
pgd_attack.save(data_loader=test_loader, file_name=data_dir, accuracy=True)
adv_images, adv_labels = torch.load(data_dir)
elif args.attack_type == 'fgsm':
data_dir = "../advdata/MNIST_{}_fgsm_{}.pt".format(args.model, args.eps)
if not os.path.exists(data_dir):
fgsm_attack = torchattacks.FGSM(model, eps=args.eps)
fgsm_attack.set_mode('int')
fgsm_attack.save(data_loader=test_loader, file_name=data_dir, accuracy=True)
adv_images, adv_labels = torch.load(data_dir)
elif args.attack_type == 'deepfool':
data_dir = "../advdata/MNIST_{}_df_{}.pt".format(args.model, args.iter)
if not os.path.exists(data_dir):
df_attack = torchattacks.DeepFool(model, iters=args.iter)
df_attack.set_mode('int')
df_attack.save(data_loader=test_loader, file_name=data_dir, accuracy=True)
adv_images, adv_labels = torch.load(data_dir)
adv_data = TensorDataset(adv_images.float()/255, adv_labels)
adv_loader = DataLoader(adv_data, batch_size=128, shuffle=False)
model.eval()
correct = 0
total = 0
for images, labels in test_loader:
images = images.cuda()
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels.cuda()).sum()
logger.info('Accuracy with Clean images:%.4f',(float(correct) / total))
model.eval()
correct = 0
total = 0
for images, labels in adv_loader:
images = images.cuda()
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels.cuda()).sum()
logger.info('Accuracy with Adversarial images: %.4f',(float(correct) / total))
if args.defense == 'km':
def cluster_def(in_tensor,k=args.k):
return Kmeans_cluster(in_tensor,k)
defense = cluster_def
elif args.defense == 'bs':
bits_squeezing = BitSqueezing(bit_depth=2)
defense = nn.Sequential(
bits_squeezing,
)
elif args.defense == 'ms':
median_filter = MedianSmoothing2D(kernel_size=3)
defense = nn.Sequential(
median_filter,
)
elif args.defense == 'jf':
jpeg_filter = JPEGFilter(10)
defense = nn.Sequential(
jpeg_filter,
)
model.eval()
correct = 0
total = 0
for images, labels in adv_loader:
images = images.cuda()
images = defense(images)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels.cuda()).sum()
logger.info('Accuracy with Defenced images: %.4f',(float(correct) / total))
if __name__ == "__main__":
main()
|
[
"os.mkdir",
"torchattacks.DeepFool",
"argparse.ArgumentParser",
"mnist_net.classifier_A",
"mnist_net.classifier_B",
"mnist_net.Le_Net",
"torch.utils.data.DataLoader",
"torch.load",
"os.path.exists",
"advertorch.defenses.MedianSmoothing2D",
"mnist_net.classifier_C",
"advertorch.defenses.BitSqueezing",
"cluster.Kmeans_cluster",
"torch.max",
"torchattacks.FGSM",
"advertorch.defenses.JPEGFilter",
"logging.basicConfig",
"torch.nn.Sequential",
"torchattacks.PGD",
"logging.getLogger",
"torchvision.transforms.ToTensor"
] |
[((448, 473), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (471, 473), False, 'import argparse\n'), ((1514, 1541), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1531, 1541), False, 'import logging\n'), ((1546, 1674), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': 'logfile', 'format': '"""[%(asctime)s] - %(message)s"""', 'datefmt': '"""%Y/%m/%d %H:%M:%S"""', 'level': 'logging.INFO'}), "(filename=logfile, format='[%(asctime)s] - %(message)s',\n datefmt='%Y/%m/%d %H:%M:%S', level=logging.INFO)\n", (1565, 1674), False, 'import logging\n'), ((2473, 2543), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['mnist_test'], {'batch_size': '(128)', 'shuffle': '(False)'}), '(mnist_test, batch_size=128, shuffle=False)\n', (2500, 2543), False, 'import torch\n'), ((3922, 3973), 'torch.utils.data.DataLoader', 'DataLoader', (['adv_data'], {'batch_size': '(128)', 'shuffle': '(False)'}), '(adv_data, batch_size=128, shuffle=False)\n', (3932, 3973), False, 'from torch.utils.data import DataLoader, Dataset, TensorDataset\n'), ((1737, 1765), 'os.path.exists', 'os.path.exists', (['"""../advdata"""'], {}), "('../advdata')\n", (1751, 1765), False, 'import os\n'), ((1775, 1797), 'os.mkdir', 'os.mkdir', (['"""../advdata"""'], {}), "('../advdata')\n", (1783, 1797), False, 'import os\n'), ((1884, 1919), 'torch.load', 'torch.load', (['"""../models/MNIST_A.pth"""'], {}), "('../models/MNIST_A.pth')\n", (1894, 1919), False, 'import torch\n'), ((2994, 3014), 'torch.load', 'torch.load', (['data_dir'], {}), '(data_dir)\n', (3004, 3014), False, 'import torch\n'), ((4147, 4173), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (4156, 4173), False, 'import torch\n'), ((4517, 4543), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (4526, 4543), False, 'import torch\n'), ((5506, 5532), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (5515, 5532), False, 'import torch\n'), ((2007, 2042), 'torch.load', 'torch.load', (['"""../models/MNIST_B.pth"""'], {}), "('../models/MNIST_B.pth')\n", (2017, 2042), False, 'import torch\n'), ((2432, 2453), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2451, 2453), False, 'from torchvision import datasets, transforms\n'), ((2686, 2710), 'os.path.exists', 'os.path.exists', (['data_dir'], {}), '(data_dir)\n', (2700, 2710), False, 'import os\n'), ((2737, 2833), 'torchattacks.PGD', 'torchattacks.PGD', (['model'], {'eps': 'args.eps', 'alpha': 'args.alpha', 'iters': 'args.iter', 'random_start': '(False)'}), '(model, eps=args.eps, alpha=args.alpha, iters=args.iter,\n random_start=False)\n', (2753, 2833), False, 'import torchattacks\n'), ((3402, 3422), 'torch.load', 'torch.load', (['data_dir'], {}), '(data_dir)\n', (3412, 3422), False, 'import torch\n'), ((4808, 4836), 'cluster.Kmeans_cluster', 'Kmeans_cluster', (['in_tensor', 'k'], {}), '(in_tensor, k)\n', (4822, 4836), False, 'from cluster import Kmeans_cluster\n'), ((4922, 4947), 'advertorch.defenses.BitSqueezing', 'BitSqueezing', ([], {'bit_depth': '(2)'}), '(bit_depth=2)\n', (4934, 4947), False, 'from advertorch.defenses import MedianSmoothing2D, BitSqueezing, JPEGFilter\n'), ((4966, 4995), 'torch.nn.Sequential', 'nn.Sequential', (['bits_squeezing'], {}), '(bits_squeezing)\n', (4979, 4995), True, 'import torch.nn as nn\n'), ((1841, 1855), 'mnist_net.classifier_A', 'classifier_A', ([], {}), '()\n', (1853, 1855), False, 'from mnist_net import Le_Net, classifier_A, classifier_B, classifier_C\n'), ((2130, 2165), 'torch.load', 'torch.load', (['"""../models/MNIST_C.pth"""'], {}), "('../models/MNIST_C.pth')\n", (2140, 2165), False, 'import torch\n'), ((3149, 3173), 'os.path.exists', 'os.path.exists', (['data_dir'], {}), '(data_dir)\n', (3163, 3173), False, 'import os\n'), ((3201, 3239), 'torchattacks.FGSM', 'torchattacks.FGSM', (['model'], {'eps': 'args.eps'}), '(model, eps=args.eps)\n', (3218, 3239), False, 'import torchattacks\n'), ((3814, 3834), 'torch.load', 'torch.load', (['data_dir'], {}), '(data_dir)\n', (3824, 3834), False, 'import torch\n'), ((5070, 5102), 'advertorch.defenses.MedianSmoothing2D', 'MedianSmoothing2D', ([], {'kernel_size': '(3)'}), '(kernel_size=3)\n', (5087, 5102), False, 'from advertorch.defenses import MedianSmoothing2D, BitSqueezing, JPEGFilter\n'), ((5121, 5149), 'torch.nn.Sequential', 'nn.Sequential', (['median_filter'], {}), '(median_filter)\n', (5134, 5149), True, 'import torch.nn as nn\n'), ((1964, 1978), 'mnist_net.classifier_B', 'classifier_B', ([], {}), '()\n', (1976, 1978), False, 'from mnist_net import Le_Net, classifier_A, classifier_B, classifier_C\n'), ((2251, 2290), 'torch.load', 'torch.load', (['"""../models/MNIST_LeNet.pth"""'], {}), "('../models/MNIST_LeNet.pth')\n", (2261, 2290), False, 'import torch\n'), ((3560, 3584), 'os.path.exists', 'os.path.exists', (['data_dir'], {}), '(data_dir)\n', (3574, 3584), False, 'import os\n'), ((3610, 3655), 'torchattacks.DeepFool', 'torchattacks.DeepFool', (['model'], {'iters': 'args.iter'}), '(model, iters=args.iter)\n', (3631, 3655), False, 'import torchattacks\n'), ((5222, 5236), 'advertorch.defenses.JPEGFilter', 'JPEGFilter', (['(10)'], {}), '(10)\n', (5232, 5236), False, 'from advertorch.defenses import MedianSmoothing2D, BitSqueezing, JPEGFilter\n'), ((5255, 5281), 'torch.nn.Sequential', 'nn.Sequential', (['jpeg_filter'], {}), '(jpeg_filter)\n', (5268, 5281), True, 'import torch.nn as nn\n'), ((2087, 2101), 'mnist_net.classifier_C', 'classifier_C', ([], {}), '()\n', (2099, 2101), False, 'from mnist_net import Le_Net, classifier_A, classifier_B, classifier_C\n'), ((2214, 2222), 'mnist_net.Le_Net', 'Le_Net', ([], {}), '()\n', (2220, 2222), False, 'from mnist_net import Le_Net, classifier_A, classifier_B, classifier_C\n')]
|
import csv
import datetime
from django.conf.urls import url
from django.contrib import admin
from django.http import HttpResponse, HttpResponseForbidden
from .models import Feedback
class FeedbackAdmin(admin.ModelAdmin):
list_filter = ("found_useful",)
list_display = ("id", "found_useful", "comments", "created")
readonly_fields = [f.name for f in Feedback._meta.get_fields()]
ordering = ("-created", "id")
def has_delete_permission(self, request, obj=None):
return False
def has_add_permission(self, request):
return False
def get_urls(self):
urls = super().get_urls()
my_urls = [
url("export_all/", self.export_all_feedback),
url("export_comments/", self.export_feedback_with_comments),
]
return my_urls + urls
def export_all_feedback(self, request):
if not request.user.is_superuser:
return HttpResponseForbidden("Access Denied")
return self.export(Feedback.objects.all().order_by("-created", "id"))
def export_feedback_with_comments(self, request):
if not request.user.is_superuser:
return HttpResponseForbidden("Access Denied")
return self.export(
Feedback.objects.all()
.exclude(comments="")
.order_by("found_useful", "-created", "id")
)
def export(self, qs):
response = HttpResponse(content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="feedback-%s.csv"' % (
datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S")
)
fields = ["id", "created", "comments", "found_useful", "source_url"]
writer = csv.writer(response)
writer.writerow(fields)
for row in qs:
writer.writerow([getattr(row, field) for field in fields])
return response
admin.site.register(Feedback, FeedbackAdmin)
|
[
"csv.writer",
"django.http.HttpResponse",
"django.contrib.admin.site.register",
"django.http.HttpResponseForbidden",
"django.conf.urls.url",
"datetime.datetime.now"
] |
[((1874, 1918), 'django.contrib.admin.site.register', 'admin.site.register', (['Feedback', 'FeedbackAdmin'], {}), '(Feedback, FeedbackAdmin)\n', (1893, 1918), False, 'from django.contrib import admin\n'), ((1406, 1443), 'django.http.HttpResponse', 'HttpResponse', ([], {'content_type': '"""text/csv"""'}), "(content_type='text/csv')\n", (1418, 1443), False, 'from django.http import HttpResponse, HttpResponseForbidden\n'), ((1701, 1721), 'csv.writer', 'csv.writer', (['response'], {}), '(response)\n', (1711, 1721), False, 'import csv\n'), ((660, 704), 'django.conf.urls.url', 'url', (['"""export_all/"""', 'self.export_all_feedback'], {}), "('export_all/', self.export_all_feedback)\n", (663, 704), False, 'from django.conf.urls import url\n'), ((718, 777), 'django.conf.urls.url', 'url', (['"""export_comments/"""', 'self.export_feedback_with_comments'], {}), "('export_comments/', self.export_feedback_with_comments)\n", (721, 777), False, 'from django.conf.urls import url\n'), ((925, 963), 'django.http.HttpResponseForbidden', 'HttpResponseForbidden', (['"""Access Denied"""'], {}), "('Access Denied')\n", (946, 963), False, 'from django.http import HttpResponse, HttpResponseForbidden\n'), ((1158, 1196), 'django.http.HttpResponseForbidden', 'HttpResponseForbidden', (['"""Access Denied"""'], {}), "('Access Denied')\n", (1179, 1196), False, 'from django.http import HttpResponse, HttpResponseForbidden\n'), ((1543, 1566), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1564, 1566), False, 'import datetime\n')]
|
import numpy as np
import scipy.special as sp
import matplotlib.pyplot as plt
# radius of the oberservation circle
def NMLA_radius(omega,Rest=1):
# Input: omega--frequency; Rest--estimate of the distance from source to observation point
#
# Output: the radius of the oberservation circle
poly = [1,0,1,-2.5-0.775*(omega*Rest)**0.5]
rt = np.roots(poly)
rs = np.real(rt[2])**3/omega
return rs
# NMLA filtering in the fourier space
def BGFiltrage(fu,kr,imp,L,gau,M):
# INPUT: fu: FFT of impedance quantity U
# kr: k*r
# imp: parameter in impedance quantity
# L: truncation level
# gau: parameter in gaussian kernel
# M: number of sample points on the observation cicle
#
# OUTPUT: filterd quantity BU
#
# bessel and derivative of bessel
LP = max( L+2, 3)
idx = np.array(list(range(LP)))
Bj = sp.jv(idx, kr) # bessel J_l(kr)
DBj = np.array([0.0]*(LP-1))
DBj[0] = -Bj[1]
DBj[1:] = 0.5*(Bj[:LP-2] - Bj[2:LP]) # derivative of bessel
# gausian kernel
A = gau/L
G = np.array([0.0]*(L+1))
G[0] = 1.0
idx = np.array(list(xrange(1,L+1)))
G[1:] = np.exp(-0.5*(A*idx)**2)
G /= 2*np.sum(G) - 1
# filtering operator
Fltr = np.array([0.0 + 0.0*1j]*(L+1))
Fltr[0] = Bj[0]-1j*DBj[0]*imp
Fltr[1:] = (Bj[1:L+1]-1j*DBj[1:L+1]*imp)*(1j**idx)
Fltr = G/Fltr
fb = np.array([0.0 + 0.0*1j]*(M))
fb[0] = Fltr[0]*fu[0] # FU_0
fb[idx] = Fltr[idx]*fu[idx] # FU_{1,...,L}
fb[M-idx] = Fltr[idx]*fu[M-idx] # FU_{-1,...,-L}
return fb
# NMLA to estimate the ray direction
def NMLA(x0,y0,c0,omega,Rest,u,ux,uy):
imp = 0.5 # parameter in impedance quantity
gau = 3.5 # Parameter in Gaussian function
r = NMLA_radius(omega,Rest) # radius of the oberservation circle
kr = r*omega/c0 # k*r
L = int(round(kr + (kr)**(1.0/3) -2.5)) # truncation level to obtain needed precision
L = max(1,L)
M = 2*(4*L)+1 # number of samples on the observation circle
# Angle discretizaion on the circle
angl = np.linspace(0,2*np.pi,M+1)
ang = angl[:M]
X = x0 + r*np.cos(ang)
Y = y0 + r*np.sin(ang)
# compute the impedance quantity
Field = u(X, Y, omega)
DUx = ux(X, Y, omega)
DUy = uy(X, Y, omega)
DField = DUx*np.cos(ang) + DUy*np.sin(ang)
U = imp*DField/(1j*omega/c0) + Field
# filtering
fu = np.fft.fft(U)
fbeta = BGFiltrage(fu,kr,imp,L,gau,M)
beta = np.fft.ifft(fbeta)
# estimate the ray angle
sorted_index = sorted(range(len(beta)),key=lambda x:abs(beta[x]), reverse = True)
est_ang = ang[sorted_index[0]]
# plot
plt.plot(ang/np.pi,np.abs(beta))
plt.xlabel(r'$\theta/\pi$')
plt.show()
return est_ang
|
[
"numpy.roots",
"numpy.fft.ifft",
"matplotlib.pyplot.show",
"numpy.abs",
"numpy.sum",
"numpy.fft.fft",
"numpy.sin",
"numpy.array",
"numpy.exp",
"numpy.linspace",
"scipy.special.jv",
"numpy.real",
"numpy.cos",
"matplotlib.pyplot.xlabel"
] |
[((363, 377), 'numpy.roots', 'np.roots', (['poly'], {}), '(poly)\n', (371, 377), True, 'import numpy as np\n'), ((918, 932), 'scipy.special.jv', 'sp.jv', (['idx', 'kr'], {}), '(idx, kr)\n', (923, 932), True, 'import scipy.special as sp\n'), ((961, 987), 'numpy.array', 'np.array', (['([0.0] * (LP - 1))'], {}), '([0.0] * (LP - 1))\n', (969, 987), True, 'import numpy as np\n'), ((1117, 1142), 'numpy.array', 'np.array', (['([0.0] * (L + 1))'], {}), '([0.0] * (L + 1))\n', (1125, 1142), True, 'import numpy as np\n'), ((1206, 1235), 'numpy.exp', 'np.exp', (['(-0.5 * (A * idx) ** 2)'], {}), '(-0.5 * (A * idx) ** 2)\n', (1212, 1235), True, 'import numpy as np\n'), ((1296, 1334), 'numpy.array', 'np.array', (['([0.0 + 0.0 * 1.0j] * (L + 1))'], {}), '([0.0 + 0.0 * 1.0j] * (L + 1))\n', (1304, 1334), True, 'import numpy as np\n'), ((1448, 1480), 'numpy.array', 'np.array', (['([0.0 + 0.0 * 1.0j] * M)'], {}), '([0.0 + 0.0 * 1.0j] * M)\n', (1456, 1480), True, 'import numpy as np\n'), ((2257, 2289), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(M + 1)'], {}), '(0, 2 * np.pi, M + 1)\n', (2268, 2289), True, 'import numpy as np\n'), ((2603, 2616), 'numpy.fft.fft', 'np.fft.fft', (['U'], {}), '(U)\n', (2613, 2616), True, 'import numpy as np\n'), ((2672, 2690), 'numpy.fft.ifft', 'np.fft.ifft', (['fbeta'], {}), '(fbeta)\n', (2683, 2690), True, 'import numpy as np\n'), ((2902, 2930), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\theta/\\\\pi$"""'], {}), "('$\\\\theta/\\\\pi$')\n", (2912, 2930), True, 'import matplotlib.pyplot as plt\n'), ((2934, 2944), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2942, 2944), True, 'import matplotlib.pyplot as plt\n'), ((2884, 2896), 'numpy.abs', 'np.abs', (['beta'], {}), '(beta)\n', (2890, 2896), True, 'import numpy as np\n'), ((387, 401), 'numpy.real', 'np.real', (['rt[2]'], {}), '(rt[2])\n', (394, 401), True, 'import numpy as np\n'), ((1241, 1250), 'numpy.sum', 'np.sum', (['G'], {}), '(G)\n', (1247, 1250), True, 'import numpy as np\n'), ((2322, 2333), 'numpy.cos', 'np.cos', (['ang'], {}), '(ang)\n', (2328, 2333), True, 'import numpy as np\n'), ((2350, 2361), 'numpy.sin', 'np.sin', (['ang'], {}), '(ang)\n', (2356, 2361), True, 'import numpy as np\n'), ((2502, 2513), 'numpy.cos', 'np.cos', (['ang'], {}), '(ang)\n', (2508, 2513), True, 'import numpy as np\n'), ((2520, 2531), 'numpy.sin', 'np.sin', (['ang'], {}), '(ang)\n', (2526, 2531), True, 'import numpy as np\n')]
|
'''
Created on Oct 26, 2015
@author: wirkert
'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
def preprocess2(df, nr_samples=None, snr=None, movement_noise_sigma=None,
magnification=None, bands_to_sortout=None):
# first set 0 reflectances to nan
df["reflectances"] = df["reflectances"].replace(to_replace=0.,
value=np.nan)
# remove nan
df.dropna(inplace=True)
# extract nr_samples samples from data
if nr_samples is not None:
df = df.sample(nr_samples)
# get reflectance and oxygenation
X = df.reflectances
if bands_to_sortout is not None and bands_to_sortout.size > 0:
X.drop(X.columns[bands_to_sortout], axis=1, inplace=True)
snr = np.delete(snr, bands_to_sortout)
X = X.values
y = df.layer0[["sao2", "vhb"]]
# do data magnification
if magnification is not None:
X_temp = X
y_temp = y
for i in range(magnification - 1):
X = np.vstack((X, X_temp))
y = pd.concat([y, y_temp])
# add noise to reflectances
camera_noise = 0.
if snr is not None:
sigmas = X / snr
noises = np.random.normal(loc=0., scale=1, size=X.shape)
camera_noise = sigmas*noises
movement_noise = 0.
if movement_noise_sigma is not None:
nr_bands = X.shape[1]
nr_samples = X.shape[0]
# we assume no correlation between neighboring bands
CORRELATION_COEFFICIENT = 0.0
movement_variance = movement_noise_sigma ** 2
movement_variances = np.ones(nr_bands) * movement_variance
movement_covariances = np.ones(nr_bands-1) * CORRELATION_COEFFICIENT * \
movement_variance
movement_covariance_matrix = np.diag(movement_variances) + \
np.diag(movement_covariances, -1) + \
np.diag(movement_covariances, 1)
# percentual sample errors
sample_errors_p = np.random.multivariate_normal(mean=np.zeros(nr_bands),
cov=movement_covariance_matrix,
size=nr_samples)
# errors w.r.t. the curve height.
movement_noise = X * sample_errors_p
X += camera_noise + movement_noise
X = np.clip(X, 0.00001, 1.)
# do normalizations
X = normalize(X)
return X, y
def preprocess(batch, nr_samples=None, snr=None, movement_noise_sigma=None,
magnification=None, bands_to_sortout=None):
X, y = preprocess2(batch, nr_samples, snr, movement_noise_sigma,
magnification, bands_to_sortout)
return X, y["sao2"]
def normalize(X):
# normalize reflectances
normalizer = Normalizer(norm='l1')
X = normalizer.transform(X)
# reflectances to absorption
absorptions = -np.log(X)
X = absorptions
# get rid of sorted out bands
normalizer = Normalizer(norm='l2')
X = normalizer.transform(X)
return X
|
[
"numpy.log",
"numpy.zeros",
"numpy.ones",
"numpy.clip",
"numpy.random.normal",
"numpy.diag",
"sklearn.preprocessing.Normalizer",
"pandas.concat",
"numpy.delete",
"numpy.vstack"
] |
[((2277, 2299), 'numpy.clip', 'np.clip', (['X', '(1e-05)', '(1.0)'], {}), '(X, 1e-05, 1.0)\n', (2284, 2299), True, 'import numpy as np\n'), ((2715, 2736), 'sklearn.preprocessing.Normalizer', 'Normalizer', ([], {'norm': '"""l1"""'}), "(norm='l1')\n", (2725, 2736), False, 'from sklearn.preprocessing import Normalizer\n'), ((2902, 2923), 'sklearn.preprocessing.Normalizer', 'Normalizer', ([], {'norm': '"""l2"""'}), "(norm='l2')\n", (2912, 2923), False, 'from sklearn.preprocessing import Normalizer\n'), ((808, 840), 'numpy.delete', 'np.delete', (['snr', 'bands_to_sortout'], {}), '(snr, bands_to_sortout)\n', (817, 840), True, 'import numpy as np\n'), ((1236, 1284), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': '(1)', 'size': 'X.shape'}), '(loc=0.0, scale=1, size=X.shape)\n', (1252, 1284), True, 'import numpy as np\n'), ((2821, 2830), 'numpy.log', 'np.log', (['X'], {}), '(X)\n', (2827, 2830), True, 'import numpy as np\n'), ((1053, 1075), 'numpy.vstack', 'np.vstack', (['(X, X_temp)'], {}), '((X, X_temp))\n', (1062, 1075), True, 'import numpy as np\n'), ((1092, 1114), 'pandas.concat', 'pd.concat', (['[y, y_temp]'], {}), '([y, y_temp])\n', (1101, 1114), True, 'import pandas as pd\n'), ((1631, 1648), 'numpy.ones', 'np.ones', (['nr_bands'], {}), '(nr_bands)\n', (1638, 1648), True, 'import numpy as np\n'), ((1911, 1943), 'numpy.diag', 'np.diag', (['movement_covariances', '(1)'], {}), '(movement_covariances, 1)\n', (1918, 1943), True, 'import numpy as np\n'), ((1700, 1721), 'numpy.ones', 'np.ones', (['(nr_bands - 1)'], {}), '(nr_bands - 1)\n', (1707, 1721), True, 'import numpy as np\n'), ((1817, 1844), 'numpy.diag', 'np.diag', (['movement_variances'], {}), '(movement_variances)\n', (1824, 1844), True, 'import numpy as np\n'), ((1861, 1894), 'numpy.diag', 'np.diag', (['movement_covariances', '(-1)'], {}), '(movement_covariances, -1)\n', (1868, 1894), True, 'import numpy as np\n'), ((2040, 2058), 'numpy.zeros', 'np.zeros', (['nr_bands'], {}), '(nr_bands)\n', (2048, 2058), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-05-30 08:43
from __future__ import unicode_literals
import data.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('data', '0047_officerallegation_outcome'),
]
operations = [
migrations.AddField(
model_name='investigator',
name='gender',
field=models.CharField(blank=True, max_length=1),
),
migrations.AddField(
model_name='investigator',
name='race',
field=models.CharField(default=b'Unknown', max_length=50, validators=[data.validators.validate_race]),
),
migrations.AddField(
model_name='investigatorallegation',
name='investigator_type',
field=models.CharField(max_length=32, null=True),
),
]
|
[
"django.db.models.CharField"
] |
[((432, 474), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(1)'}), '(blank=True, max_length=1)\n', (448, 474), False, 'from django.db import migrations, models\n'), ((598, 698), 'django.db.models.CharField', 'models.CharField', ([], {'default': "b'Unknown'", 'max_length': '(50)', 'validators': '[data.validators.validate_race]'}), "(default=b'Unknown', max_length=50, validators=[data.\n validators.validate_race])\n", (614, 698), False, 'from django.db import migrations, models\n'), ((840, 882), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)', 'null': '(True)'}), '(max_length=32, null=True)\n', (856, 882), False, 'from django.db import migrations, models\n')]
|
from setuptools import setup
setup(
name='DE_LibUtil',
version='0.0.19',
packages=[''],
url='https://github.com/almirjgomes/DE_LibUtil.git',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
description='LibUtil - Biblioteca de Utilidades'
)
|
[
"setuptools.setup"
] |
[((30, 261), 'setuptools.setup', 'setup', ([], {'name': '"""DE_LibUtil"""', 'version': '"""0.0.19"""', 'packages': "['']", 'url': '"""https://github.com/almirjgomes/DE_LibUtil.git"""', 'license': '"""MIT"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'description': '"""LibUtil - Biblioteca de Utilidades"""'}), "(name='DE_LibUtil', version='0.0.19', packages=[''], url=\n 'https://github.com/almirjgomes/DE_LibUtil.git', license='MIT', author=\n '<NAME>', author_email='<EMAIL>', description=\n 'LibUtil - Biblioteca de Utilidades')\n", (35, 261), False, 'from setuptools import setup\n')]
|
from models.ModelManager import ModelManager
from models.relation_classifier import DATA_DIR, split_line, MODEL_PATH, read_file, DATA_DEV, DATA_TRAIN
from models.relation_classifier.RelationClassifierRNNBased import RelationClassifierRNNBased
def main():
X_train, Y_train = read_file(DATA_TRAIN)
X_dev, Y_dev = read_file(DATA_DEV)
rc = RelationClassifierRNNBased()
rc.make_vocab(X_train)
rc.train(X_train, Y_train, dev=(X_dev, Y_dev))
rc.save(MODEL_PATH)
if __name__ == '__main__':
main()
|
[
"models.relation_classifier.read_file",
"models.relation_classifier.RelationClassifierRNNBased.RelationClassifierRNNBased"
] |
[((280, 301), 'models.relation_classifier.read_file', 'read_file', (['DATA_TRAIN'], {}), '(DATA_TRAIN)\n', (289, 301), False, 'from models.relation_classifier import DATA_DIR, split_line, MODEL_PATH, read_file, DATA_DEV, DATA_TRAIN\n'), ((321, 340), 'models.relation_classifier.read_file', 'read_file', (['DATA_DEV'], {}), '(DATA_DEV)\n', (330, 340), False, 'from models.relation_classifier import DATA_DIR, split_line, MODEL_PATH, read_file, DATA_DEV, DATA_TRAIN\n'), ((351, 379), 'models.relation_classifier.RelationClassifierRNNBased.RelationClassifierRNNBased', 'RelationClassifierRNNBased', ([], {}), '()\n', (377, 379), False, 'from models.relation_classifier.RelationClassifierRNNBased import RelationClassifierRNNBased\n')]
|
'''
Functions to go in here (I think!?):
KC: 01/12/2018, ideas-
KC: 19/12/2018, added-
~NuSTAR class
'''
from . import data_handling
import sys
#from os.path import *
import os
from os.path import isfile
import astropy
from astropy.io import fits
import astropy.units as u
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from matplotlib.colors import LogNorm
from pylab import figure, cm
from astropy.coordinates import SkyCoord
import numpy as np
import nustar_pysolar as nustar
from . import filter_with_tmrng ######Kris
from . import custom_map ######Kris
import sunpy.map
from scipy import ndimage
from scipy.optimize import curve_fit
from scipy.ndimage import rotate
import re #for regular expressions
import warnings #suppress astropy warnings
import datetime
from datetime import timedelta
from astropy.io.fits.verify import VerifyWarning
import matplotlib.dates as mdates
import pickle
import subprocess
import pytz
from skimage import restoration
# from . import interp
from scipy import interpolate
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters() # was told to do this by the machine
'''
Alterations:
KC: 22/01/2019 - .
'''
#NuSTAR class for Python
class NustarDo:
np.seterr(divide='ignore', invalid='ignore') #ignore warnings resulting from missing header info
warnings.simplefilter('ignore', VerifyWarning)
warnings.simplefilter('ignore', RuntimeWarning)
warnings.simplefilter('ignore', UserWarning)
def __init__(self, evt_filename='', energy_range=[2.5,79], time_range = None): #set-up parameters
#if a filename is not given then the static functions can still be used
if evt_filename == '':
return
#directory of the file
directory_regex = re.compile(r'\w+/')
directory = directory_regex.findall(evt_filename)
self.evt_directory = '/'+''.join(directory)
#search of the form of stuff (no slashes included), dot, then more stuff
evt_filename_regex = re.compile(r'\w+\.\w+')
name_of_file = evt_filename_regex.findall(evt_filename)[0]
#for a sunpy map object to be made then the file has to be positioned on the Sun
sunpos_regex = re.compile(r'sunpos')
sunpos = sunpos_regex.findall(name_of_file)
if sunpos == []:
raise ValueError('\nThe file must be a \'sunpos\' file, i.e. the observation is converted to appropriate solar coordinates.')
#search for 2 digits, a non-digit, then 2 digits again
fpm_regex = re.compile(r'\d{2}\D\d{2}')
focal_plane_module = fpm_regex.findall(name_of_file)[0][2]
#search for chu followed by however many consecutive digits
chu_regex = re.compile(r'chu\d+')
chu = chu_regex.findall(name_of_file)
if chu != []:
chu_state = chu[0]
else:
chu_state = 'not_split'
# search for a underscore, a non-digit, and an underscore (for the mode the pipeline was run if a chu file is given)
mode_regex = re.compile(r"_\D_")
mode = mode_regex.findall(name_of_file)
self.pipeline_mode = mode[0] if len(mode)>0 else ""
#search for all seperate sub-strings composed of digits, first one in evt_filename is observation id
obs_id_regex = re.compile(r'\d+')
obs_id = obs_id_regex.findall(name_of_file)[0]
self.obs_id = obs_id
#set attributes of the file and parameters used in other functions on the class
self.evt_filename = name_of_file
self.fpm = focal_plane_module
self.time_range = time_range
self.energy_range = energy_range
self.chu_state = chu_state
self.rectangles = None #set so that you don't have to plot a map to get a light curve
# for plot title
self.e_range_str = str(self.energy_range[0])+'-'+str(self.energy_range[1]) if self.energy_range[1]<79 else ">"+str(self.energy_range[0])
self.rel_t = data_handling.getTimeFromFormat("2010/01/01, 00:00:00") # nustar times are measured in seconds from this date
#extract the data within the provided parameters
hdulist = fits.open(evt_filename) #not self.evt_filename as fits.open needs to know the full path to the file
self.evt_data = hdulist[1].data
self.evt_header = hdulist[1].header
hdulist.close()
############*********** this is a hacky fix but will do for now ***********############
# if Python code is used for the sunpos file creation the re-written header keywords aren't saved properly, so...
if (round(self.evt_header['TCDLT13'], 1)!=2.5) or (round(self.evt_header['TCDLT14'], 1)==2.5):
self.evt_header['TCDLT13'] = 2.45810736 # x
self.evt_header['TCDLT14'] = 2.45810736 # y
#check evt_filename matches evt_header info
assert obs_id == self.evt_header['OBS_ID'], 'Observation ID in the .evt filename does not match ID in the .evt header info. {} =/= {}'.format(obs_id, self.evt_header['OBS_ID'])
assert focal_plane_module == self.evt_header['INSTRUME'][-1], 'Focal Plane Module (FPM) in the .evt filename does not match FPM in the .evt header info. {} =/= {}'.format(focal_plane_module, self.evt_header['INSTRUME'][-1])
if self.time_range == None:
#filter away the non grade zero counts and bad pixels
self.cleanevt = filter_with_tmrng.event_filter(self.evt_data, fpm=focal_plane_module,
energy_low=self.energy_range[0],
energy_high=self.energy_range[1])
#start and end time of the NuSTAR observation as datetime objects
self.time_range = [(self.rel_t+ timedelta(seconds=np.min(self.cleanevt['TIME']))).strftime('%Y/%m/%d, %H:%M:%S'),
(self.rel_t + timedelta(seconds=np.max(self.cleanevt['TIME']))).strftime('%Y/%m/%d, %H:%M:%S')]
elif len(self.time_range) == 2:
try:
self.cleanevt = filter_with_tmrng.event_filter(self.evt_data, fpm=focal_plane_module,
energy_low=self.energy_range[0],
energy_high=self.energy_range[1],
tmrng=self.time_range) ######Kris
except TypeError as error:
raise TypeError('\nTimes need to be a string in the form \'%y/%m/%d, %H:%M:%S\', '
'e.g.\'2018/12/25, 12:30:52\'')
else:
raise TypeError('\nCheck that it is only a start time and end time you are giving.')
#if there are no counts in cleanevt
if len(self.cleanevt) == 0:
raise ValueError('\nThere there are no counts within these paramenters. '
'\nThis may be because no counts were recorded or that the paramenters are outwith the '
'scope of NuSTAR and/or the observation.')
# now for the time tick marks...
clevt_duration = np.max(self.cleanevt['TIME'])-np.min(self.cleanevt['TIME'])
if clevt_duration > 3600*0.5:
self.xlocator = mdates.MinuteLocator(byminute=[0, 10, 20, 30, 40, 50], interval = 1)
elif 600 < clevt_duration <= 3600*0.5:
self.xlocator = mdates.MinuteLocator(byminute=[0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55], interval = 1)
elif 240 < clevt_duration <= 600:
self.xlocator = mdates.MinuteLocator(interval = 2)
else:
self.xlocator = mdates.MinuteLocator(interval = 1)
@staticmethod
def shift(evt_data, pix_xshift=None, pix_yshift=None):
if pix_xshift != None:
for X in evt_data:
X['X'] = X['X'] + pix_xshift
if pix_yshift != None:
for Y in evt_data:
Y['Y'] = Y['Y'] + pix_yshift
return evt_data
@staticmethod
def arcsec_to_pixel(*args, **kwargs):
#NuSTAR values: ['crpix1'+0.5,'crpix2','cdelt1']
meta = {'centre_pix_val': [1499.5+0.5, 1500], 'arc_per_pix':[2.45810736], 'length':False}
#change list with kwargs
for key, kwarg in kwargs.items():
meta[key] = kwarg
#convert numbers so that they are easier to work with
indices_for_centre = {'x':meta['centre_pix_val'][0], 'y':meta['centre_pix_val'][1]}
assert 1 <= len(meta['arc_per_pix']) <= 2, '\'arc_per_pix\' needs to have one or two arguments only.'
if len(meta['arc_per_pix']) == 2:
delta_x = meta['arc_per_pix'][0]
delta_y = meta['arc_per_pix'][1]
elif len(meta['arc_per_pix']) == 1:
delta_x = meta['arc_per_pix'][0]
delta_y = meta['arc_per_pix'][0]
# if have an arcsec length and want the length in pixels
pixel_lengths = []
if meta['length'] == True:
for arg in args:
x_length = (arg[0] / delta_x)
y_length = (arg[1] / delta_y)
pixel_lengths.append([int(round(x_length,0)), int(round(y_length,0))])
return pixel_lengths
#input coordinates as [x,y] in arcseconds
pixel_coords = []
for arg in args:
x_index = indices_for_centre['x'] + (arg[0] / delta_x)
y_index = indices_for_centre['y'] + (arg[1] / delta_y)
pixel_coords.append([int(round(x_index,0)), int(round(y_index,0))])
return pixel_coords
@staticmethod
def pixel_to_arcsec(*args, **kwargs):
#NuSTAR values: ['crpix1'+0.5,'crpix2','cdelt1']
meta = {'centre_pix_val': [1499.5+0.5, 1500], 'arc_per_pix':[2.45810736], 'length':False}
#change list with kwargs
for key, kwarg in kwargs.items():
meta[key] = kwarg
#convert numbers so that they are easier to work with
indices_for_centre = {'x':meta['centre_pix_val'][0], 'y':meta['centre_pix_val'][1]}
assert 1 <= len(meta['arc_per_pix']) <= 2, '\'arc_per_pix\' needs to have one or two arguments only.'
if len(meta['arc_per_pix']) == 2:
delta_x = meta['arc_per_pix'][0]
delta_y = meta['arc_per_pix'][1]
elif len(meta['arc_per_pix']) == 1:
delta_x = meta['arc_per_pix'][0]
delta_y = meta['arc_per_pix'][0]
# if have a pixel length and want the length in arcsec
arcsec_lengths = []
if meta['length'] == True:
for arg in args:
x_length = arg[0] * delta_x
y_length = arg[1] * delta_y
arcsec_lengths.append([x_length, y_length])
return arcsec_lengths
#input coordinates as [col,row] in pixels
arcsec_coords = []
for arg in args:
# arg[0] is x pixel position, so column
x_arcsec = (arg[0] - indices_for_centre['x']) * delta_x
# arg[1] is y pixel position, so row
y_arcsec = (arg[1] - indices_for_centre['y']) * delta_y
arcsec_coords.append([x_arcsec, y_arcsec])
return arcsec_coords
def nustar_shift_map(self, x_shift_arc, y_shift_arc):
#find shift in pix
shift_pix = self.arcsec_to_pixel([x_shift_arc, y_shift_arc], length=True)
#shift data now
shift_cleanevt = self.shift(self.cleanevt, pix_xshift=shift_pix[0][0], pix_yshift=shift_pix[0][1])
self.cleanevt = shift_cleanevt
@staticmethod
def fov_rotation(evt_data):
""" Returns the average rotation of the NuSTAR FoV from the gradient of the edges between
det0&3 and 1&2.
Parameters
----------
*args : list [rawx0, rawy0, solx0, soly0, int]
Each input should contain the raw X and Y coordinates from the (sunpos) evt file and the
solar X and Y coordinates from the sunpos evt file as well as the detector these
coordinates come from as an integer from 0 to 3.
Returns
-------
A float of the average rotation from "North" in degrees where anticlockwise is positive.
This assumes the rotation is between 90 and -90 degrees.
Examples
--------
getMeanAngle([rawx0, rawy0, solx0, soly0, 0],
[rawx1, rawy1, solx1, soly1, 1],
[rawx2, rawy2, solx2, soly2, 2],
[rawx3, rawy3, solx3, soly3, 3])
>>> a number
"""
## split the detectors
d0_counts = evt_data[evt_data["det_id"]==0]
d1_counts = evt_data[evt_data["det_id"]==1]
d2_counts = evt_data[evt_data["det_id"]==2]
d3_counts = evt_data[evt_data["det_id"]==3]
## now split up for the coordinates
rawx0, rawy0, solx0, soly0 = d0_counts["RAWX"], d0_counts["RAWY"], d0_counts["X"], d0_counts["Y"]
rawx1, rawy1, solx1, soly1 = d1_counts["RAWX"], d1_counts["RAWY"], d1_counts["X"], d1_counts["Y"]
rawx2, rawy2, solx2, soly2 = d2_counts["RAWX"], d2_counts["RAWY"], d2_counts["X"], d2_counts["Y"]
rawx3, rawy3, solx3, soly3 = d3_counts["RAWX"], d3_counts["RAWY"], d3_counts["X"], d3_counts["Y"]
args = [[rawx0, rawy0, solx0, soly0, 0],
[rawx1, rawy1, solx1, soly1, 1],
[rawx2, rawy2, solx2, soly2, 2],
[rawx3, rawy3, solx3, soly3, 3]]
gradients = 0
for a in args:
rawx, rawy, solx, soly, det = a
# use the pixel edges between det 0&3 and 1&2, use the raw pixel coordinates for this
# orientation from the nustar_swguide.pdf, Figure 3
if det==0:
cols = collectSameXs(rawy, rawx, solx, soly)
m_row_per_col = maxRowInCol(cols)
elif det==1:
cols = collectSameXs(rawx, rawy, solx, soly)
m_row_per_col = maxRowInCol(cols)
elif det==2:
cols = collectSameXs(rawy, rawx, solx, soly)
m_row_per_col = maxRowInCol(cols)
elif det==3:
cols = collectSameXs(rawx, rawy, solx, soly)
m_row_per_col = maxRowInCol(cols)
# working with rawx and y to make sure using correct edge then find the
# corresponding entries in solar coords
aAndY = getXandY(m_row_per_col)
x, y = aAndY[0], aAndY[1]
## do I want to filter some out?
## leave for now
#if det in [0, 1]:
# x = x[y>np.median(y)]
# y = y[y>np.median(y)]
#elif det in [2, 3]:
# x = x[y<np.median(y)]
# y = y[y<np.median(y)]
# fit a straight line to the edge
popt, pcov = curve_fit(straightLine, x, y, p0=[0, np.mean(y)])
gradients += getDegrees(popt[0])
return gradients/len(args)
def nustar_deconv(self, map_array=None, psf_array=None, it=10, OA2source_offset=None, hor2SourceAngle=None, clip=False):
"""Class mathod to take a map (map_array) and a point spread function (psf_array) and deconvolve using
the Richardson-Lucy method with a number of iterations (it).
Parameters
----------
map_array : 2d array
The map of the data. Should be over the field of view. If "None" then the self.nustar_map class
attribute is used.
Default: None
psf_array : file string or 2d array
The PSF you want to use. This can be a string of the fits file for the PSF or a 2d numpy array.
If "None" then several common paths for nu'+self.fpm+'2dpsfen1_20100101v001.fits' are check and
if the file cannot be found the original map is returned. Currently this won't be rescaled if
it is a different resolution to the map data, it will just crash instead.
Default: None
it : int
Number of iterations for the deconvolution.
Default: 10
OA2source_offset : float
Angle subtended between the optical axis (OA), observer, and the X-ray source in arcminutes
(0<=OA2source_angle<8.5 arcminutes), i.e. radial distance to the source from the OA. Chooses
the correct PSF data to use.
Default: None
hor2SourceAngle : float
Angle subtended between horizontal through the optical axis (OA), and the line through the X-ray source and OA in degrees.
Clockwise is positive and anticlockwise is negative. Symmetric reflected in the origin so -90<=hor2SourceAngle<=90.
Default: None
clip : bool
Set values >1 and <-1 to 1 and -1 respectively after each iteration. Unless working with a
normalised image this should be "False" otherwise it's a mess.
Default: False
Returns
-------
A 2d numpy array of the deconvolved map.
Examples
--------
*Use within the class:
NU_SUNPOS_FILE, ITERATIONS = "nustar_filename", 10
nu = NustarDo(NU_SUNPOS_FILE)
nu.deconvolve['apply'] = True
nu.deconvolve['iterations'] = ITERATIONS
nu.nustar_setmap(submap='FoV')
deconv_map = nu.nustar_map.data
*Use without class:
STRING, FPM = "psf_filename", "A" or "B"
nu = NustarDo()
nu.fpm = FPM
nu.nustar_map = Sunpy NuSTAR map
deconv_map = nu.nustar_deconv(psf_array=STRING)
-or-
MAP, ARRAY, FPM = nustar data 2d numpy array, psf 2d numpy array, "A" or "B"
nu = NustarDo()
nu.fpm = FPM
deconv_map = nu.nustar_deconv(map_array=MAP, psf_array=ARRAY)
"""
## for defaults
if type(map_array) == type(None):
map_array = self.nustar_map.data
if type(psf_array) == type(None):
# defualt is to check for the nu'+self.fpm+'2dpsfen1_20100101v001.fits' PSF file (the one used in Glesener code)
trials = ['/opt/caldb/data/nustar/fpm/bcf/psf/nu'+self.fpm+'2dpsfen1_20100101v001.fits',
'/usr/local/caldb/data/nustar/fpm/bcf/psf/nu'+self.fpm+'2dpsfen1_20100101v001.fits',
'/home/kris/Desktop/link_to_kris_ganymede/old_scratch_kris/data_and_coding_folder/nustar_psfs/nu'+self.fpm+'2dpsfen1_20100101v001.fits',
'/home/kris/Desktop/nustar_psfs/nu'+self.fpm+'2dpsfen1_20100101v001.fits']
if type(OA2source_offset) != type(None):
psf_OA_angles = np.arange(0,9,0.5) # angles of 0 to 8.5 arcmin in 0.5 arcmin increments
index = np.argmin([abs(psfoaangles - OA2source_offset) for psfoaangles in psf_OA_angles]) # find the closest arcmin array
hdr_unit = index+1 # header units 1 to 18 (one for each of the arcmin entries) and 0 arcmin would be hdr_unit=1, hence the +1
# print("using angle: ", hdr_unit)
else:
hdr_unit = 1
#assume we can't find the file
found_psf = False
for t in trials:
# try the files, if one exists use it
if os.path.exists(t):
psfhdu = fits.open(t)
psf_h = psfhdu[hdr_unit].header['CDELT1'] # increment in degrees/pix
psf_array = psfhdu[hdr_unit].data
psfhdu.close()
psf_used = t
found_psf = True
# if we still couldn't find a defualt PSF then print this, set self.deconvole to False, and just return the original map
if found_psf == False:
print('Could not find PSF file. Please provide the PSF filename or array.')
print('Returning original map.')
self.deconvolve['apply'] = False
self.deconv_settings_info = {'map':None, 'psf_file':None, 'psf_array':None, 'iterations':None}
return map_array
# check same res, at least in 1-D
assert psf_h*3600 == self.nustar_map.meta['CDELT1'], "The resolution in the PSF and the current map are different."
# if you have provided your own psf file use that instead
elif type(psf_array) == str:
psf_used = psf_array
psfhdu = fits.open(psf_array)
psf_h = psfhdu[1].header['CDELT1'] # increment in degrees/pix
psf_array = psfhdu[1].data
psfhdu.close()
# check same res, at least in 1-D
assert psf_h*3600 == self.nustar_map.meta['CDELT1'], "The resolution in the PSF and the current map are different."
else:
psf_used = 'Custom Array. Hopefully some numbers though.'
if type(hor2SourceAngle)!=type(None):
assert -90<=hor2SourceAngle<=90, "Please give \"hor2SourceAngle\" as an angle from horzontal to the source -90<=hor2SourceAngle<=90 where clockwise is positive and anticlockwise is negative"
psf_array = rotate(psf_array, hor2SourceAngle, reshape=True)
# deconvolve
deconvolved_RL = restoration.richardson_lucy(map_array, psf_array, iterations=it, clip=False)
# deconvolution info for later use
self.deconv_settings_info = {'map':map_array, 'psf_file':psf_used, 'psf_array':psf_array, 'iterations':it}
return deconvolved_RL
@staticmethod
def find_boxOfData(array):
'''If there is an array with loads of 0s or nans and a region of numbers then this returns the rows
and columns the block of numbers is encased between'''
array = np.array(array)
array[np.isnan(array)] = 0
# first and last row
dataRows = []
for i,row in enumerate(array):
rSum = np.sum(row)
if rSum > 0:
dataRows.append(i)
between_rows = [dataRows[0], dataRows[-1]]
# first and last column
dataCols = []
for j,col in enumerate(array.T):
cSum = np.sum(col)
if cSum > 0:
dataCols.append(j)
between_cols = [dataCols[0], dataCols[-1]]
return {'rowIndices':between_rows, 'columnIndices':between_cols}
@staticmethod
def create_submap(sunpy_map_obj, lose_off_limb, submap):
if (lose_off_limb == True) and (len(submap) == 0):
#fix really large plot, instead of going from -3600 to 3600 in x and y
bl = SkyCoord(-1200*u.arcsec, -1200*u.arcsec, frame=sunpy_map_obj.coordinate_frame)
tr = SkyCoord(1200*u.arcsec, 1200*u.arcsec, frame=sunpy_map_obj.coordinate_frame)
return sunpy_map_obj.submap(bl,top_right=tr)
elif len(submap) == 4: #Submap to plot?
bottom_left = {'x':submap[0], 'y':submap[1]}
top_right = {'x':submap[2], 'y':submap[3]}
bl = SkyCoord(bottom_left['x']*u.arcsec, bottom_left['y']*u.arcsec, frame=sunpy_map_obj.coordinate_frame)
tr = SkyCoord(top_right['x']*u.arcsec, top_right['y']*u.arcsec, frame=sunpy_map_obj.coordinate_frame)
return sunpy_map_obj.submap(bl,top_right=tr)
if (lose_off_limb == False):
return sunpy_map_obj
else:
raise TypeError('\nCheck the submap coordinates that were given please. It should be a list with four '
'float/int entries in arcseconds in the form [bottom left x, bottom left y, top right x, '
'top right y].')
if (self.deconvolve['apply'] == True) and (self.gaussian_filter['apply'] == True):
print('Caution! Did you mean to set deconvolve AND gaussian blurr to True? If so, then the'
'deconvolution will happen first then the Gaussian filter is applied.')
# might be best to only allow one of these at a time, either deconvolve OR gaussian filter
deconvolve = {'apply':False, 'iterations':10, 'OA2source_offset':None, 'hor2SourceAngle':None, 'clip':False} # set before nustar_setmap to run deconvolution on map
gaussian_filter = {'apply':False, 'sigma':2, 'mode':'nearest'}
sub_lt_zero = np.nan # replace less than zeroes with this value for plotting in a linear scale
own_map = None # if you already have a map that you want a submap of then set this, be careful not to time normalize again though
def nustar_setmap(self, time_norm=True, lose_off_limb=True, limits=None,
submap=None, rebin_factor=1, norm='linear', house_keeping_file=None):
# adapted from Iain's python code
# Map the filtered evt, into one corrected for livetime (so units count/s)
if type(self.own_map) == type(None):
self.nustar_map = custom_map.make_sunpy(self.cleanevt, self.evt_header, norm_map=False)
else:
self.nustar_map = self.own_map
if time_norm == True:
time_norm = input('Caution! Do you mean to time normalize your \'own_map\'? True or False: ')
# field of view in arcseconds
FoVlimits = self.find_boxOfData(self.nustar_map.data)
bottom_left = self.pixel_to_arcsec([FoVlimits['columnIndices'][0], FoVlimits['rowIndices'][0]])[0]
top_right = self.pixel_to_arcsec([FoVlimits['columnIndices'][1]+1, FoVlimits['rowIndices'][1]+1])[0] # plus one as index stops one short
self.FoV = [*bottom_left, *top_right]
if limits == None:
limits = []
if submap == None:
submap = []
elif type(submap) == str:
if submap.upper() == 'FOV':
submap = self.FoV
else:
print('The only string input to submap that is supported at the moment is FOV, fov, FoV, etc.')
self.submap = submap
self.time_norm = time_norm
if self.time_norm == True:
self.livetime(hk_filename=house_keeping_file, set_up_plot=False, show_fig=False)
#livetime correction
time_range = [(data_handling.getTimeFromFormat(tm) - self.rel_t).total_seconds() for tm in self.time_range]
indices = ((self.hk_times>=time_range[0]) & (self.hk_times<time_range[1]))
ltimes_in_range = self.hk_livetimes[indices]
livetime = np.average(ltimes_in_range)
lc_cor_nustar_map = self.nustar_map.data / (livetime * (time_range[1] - time_range[0]))
self.nustar_map = sunpy.map.Map(lc_cor_nustar_map, self.nustar_map.meta)
if (self.deconvolve['apply'] == False):
self.nustar_map = self.create_submap(self.nustar_map, lose_off_limb, self.submap)
elif (self.deconvolve['apply'] == True):
# make sure it's over the FoV
self.nustar_map = self.create_submap(self.nustar_map, lose_off_limb, self.FoV)
dconv = self.nustar_deconv(it=self.deconvolve['iterations'], OA2source_offset=self.deconvolve['OA2source_offset'],
hor2SourceAngle=self.deconvolve['hor2SourceAngle'], clip=self.deconvolve['clip'])
# make new map
self.nustar_map = sunpy.map.Map(dconv, self.nustar_map.meta)
# now cut to the shape you want
self.nustar_map = self.create_submap(self.nustar_map, lose_off_limb, self.submap)
if self.gaussian_filter['apply'] == True:
gaussian_width = self.gaussian_filter['sigma']
m = self.gaussian_filter['mode']
#Apply a guassian blur to the data to bring out the faint feature
dd = ndimage.gaussian_filter(self.nustar_map.data, gaussian_width, mode=m)
if limits == []:
dmin = np.min(dd[np.nonzero(self.nustar_map.data)])#*1e6 factor was here as the lowest value will come (came from dd) from the gaussian
#filter and not the actual lowest count rate hence the factor
dmax = np.max(dd[np.isfinite(self.nustar_map.data)])
elif len(limits) == 2:
if norm == 'lognorm':
if limits[0] <= 0:
dmin = 0.1
dmax=limits[1]
else:
dmin=limits[0]
dmax=limits[1]
elif norm == 'linear':
dmin=limits[0]
dmax=limits[1]
else:
raise TypeError('\nCheck the limits that were given please.')
else:
dd = self.nustar_map.data
if limits == []:
finite_vals = dd[np.isfinite(dd)]
dmin = np.min(finite_vals[np.nonzero(finite_vals)])
dmax = np.max(finite_vals)
elif len(limits) == 2:
if norm == 'lognorm':
if limits[0] <= 0:
dmin = 0.1
dmax=limits[1]
else:
dmin=limits[0]
dmax=limits[1]
elif norm == 'linear':
dmin=limits[0]
dmax=limits[1]
else:
raise TypeError('\nCheck the limits that were given please. It should be a list with two float/int '
'entries')
self.dmin = dmin # make it possible to get min and max normalisation values of the NuSTAR map
self.dmax = dmax
# Tidy up before plotting
dd[dd < dmin]=0
nm = sunpy.map.Map(dd, self.nustar_map.meta)
if rebin_factor != 1:
#can rebin the pixels if we want to further bring out faint features
#set to 1 means no actual rebinning
nx,ny = np.shape(nm.data)
if rebin_factor >= 1/nx and rebin_factor >= 1/ny:
dimensions = u.Quantity([nx*rebin_factor, ny*rebin_factor], u.pixel)
rsn_map = nm.resample(dimensions)
else:
raise TypeError(f'\nRebin factor must be greater than one over the x,y dimensions (1/{nx} and '
f'1/{ny}) as to rebin to get one, or more, pixel(s) fro the entire image, i.e. can\'t rebin to half a pixel.')
elif rebin_factor == 1:
rsn_map = nm
del nm
if norm == 'linear':
#change all zeros to NaNs so they appear white in the plot otherwise zeros appear as the lowest colour
#on the colourbar
rsn_map_data = rsn_map.data
rsn_map_data[rsn_map_data <= 0] = self.sub_lt_zero
rsn_map = sunpy.map.Map(rsn_map_data, rsn_map.meta)
# Setup the scaling of the map and colour table
rsn_map.plot_settings['norm'] = colors.Normalize(vmin=dmin,vmax=dmax)
rsn_map.plot_settings['cmap'] = cm.get_cmap('Spectral_r')
elif norm == 'lognorm':
#log(0) produces a NaN (-inf) here anyway so appears white
# Setup the scaling of the map and colour table
rsn_map.plot_settings['norm'] = colors.LogNorm(vmin=dmin,vmax=dmax)
rsn_map.plot_settings['cmap'] = cm.get_cmap('Spectral_r')
self.rsn_map = rsn_map
return rsn_map
annotations = {'apply':False, 'text':'Some text', 'position':(0,0), 'color':'black', 'fontsize':12, 'weight':'normal'}
rcParams_default_setup = True
cbar_title = 'Counts'
ax_label_size = 18
@staticmethod
def draw_solar_grid(rsnmap, axes):
rsnmap.draw_limb(color='black',linewidth=1,linestyle='dashed', zorder=0)
# Manually plot a heliographic overlay - hopefully future no_ticks option in draw_grid
overlay = axes.get_coords_overlay('heliographic_stonyhurst')
lon = overlay[0]
lat = overlay[1]
lon.set_ticks_visible(False)
lat.set_ticks_visible(False)
lat.set_ticklabel_visible(False)
lon.set_ticklabel_visible(False)
lon.coord_wrap = 180
lon.set_major_formatter('dd')
overlay.grid(color='grey', linewidth=0.5, linestyle='dashed', zorder=0)
plt_plot_lines = None
@staticmethod
def execute_plt(*arg):
"""
# Example
file = 'file_sunpos.evt'
nu = nustardo.NustarDo(file)
plt.figure(figsize=(10,10))
nu.nustar_setmap(submap="fov")
x,y = [0, 200], [0, 200]
nu.plt_plot_lines = [f'plt.plot({x},{y}, marker="o", ms=10, c="r")']
nu.nustar_plot(show_fig=False)
plt.show()
"""
for a in arg:
exec(a)
def nustar_plot(self, boxes=None, show_fig=True, save_fig=None, usr_title=None, draw_grid=True):
# adapted from Iain's python code
if self.rcParams_default_setup:
matplotlib.rcParams['font.sans-serif'] = "Arial"
matplotlib.rcParams['font.family'] = "sans-serif"
plt.rcParams["figure.figsize"] = (10,8)
plt.rcParams['font.size'] = 18
plt.rcParams['axes.facecolor']='white'
plt.rcParams['savefig.facecolor']='white'
# Start the plot - many things here just to make matplotlib look decent
self.rectangles = boxes
#fig = plt.figure(figsize=(9, 8), frameon=False)
ax = plt.subplot(projection=self.rsn_map, frame_on=False) #rsn_map nustar_submap
self.axes = ax
ax.set_facecolor((1.0, 1.0, 1.0))
self.rsn_map.plot()
# can't plot properly if the grid is drawn first so this allows plt.plot lines to be passed an executed before the grid in drawn
if type(self.plt_plot_lines)!=type(None):
self.execute_plt(*self.plt_plot_lines)
if self.annotations['apply'] == True:
plt.annotate(self.annotations['text'], self.annotations['position'], color=self.annotations['color'], fontsize=self.annotations['fontsize'], weight=self.annotations['weight'])
if draw_grid:
self.draw_solar_grid(self.rsn_map, ax)
# Tweak the titles and labels
title_obsdate = self.rsn_map.date.strftime('%Y-%b-%dT%H:%M:%S.%f')[:-13] #'{:.20}'.format('{:%Y-%b-%d}'.format(self.rsn_map.date))
fpm = 'FPM'+self.fpm
title_obstime_start = self.time_range[0][-8:]
title_obstime_end = self.time_range[1][-8:]
if type(usr_title) == type(None):
if self.chu_state == 'not_split':
ax.set_title('NuSTAR '+self.e_range_str+' keV '+fpm+' '+ title_obsdate+' '+title_obstime_start+' to '+title_obstime_end)
else:
ax.set_title('NuSTAR '+self.e_range_str+' keV '+fpm+' '+self.chu_state+' '+ title_obsdate+' '+title_obstime_start+' to '+title_obstime_end)
else:
ax.set_title(usr_title)
ax.set_ylabel('y [arcsec]', fontsize=self.ax_label_size)
ax.set_xlabel('x [arcsec]', fontsize=self.ax_label_size)
tx, ty = ax.coords
tx.set_major_formatter('s')
ty.set_major_formatter('s')
ax.grid(False)
# Add a colour bar
if self.time_norm == True:
plt.colorbar(fraction=0.035, pad=0.03,label=self.cbar_title+' $s^{-1}$')
else:
plt.colorbar(fraction=0.035, pad=0.03,label=self.cbar_title)
if boxes is not None:
if np.shape(boxes)==(4,):
rect = boxes
bottom_left_rectangle = SkyCoord(rect[0]*u.arcsec, rect[1]*u.arcsec, frame=self.rsn_map.coordinate_frame)
length = rect[2] - rect[0]
height = rect[3] - rect[1]
self.rsn_map.draw_rectangle(bottom_left_rectangle, width=length*u.arcsec, height=height*u.arcsec, color='black')
else:
b = 1
for rect in boxes:
bottom_left_rectangle = SkyCoord(rect[0]*u.arcsec, rect[1]*u.arcsec, frame=self.rsn_map.coordinate_frame)
length = rect[2] - rect[0]
height = rect[3] - rect[1]
self.rsn_map.draw_rectangle(bottom_left_rectangle, width=length*u.arcsec, height=height*u.arcsec, color='black')
for_text = self.arcsec_to_pixel([rect[0]-10,rect[3]+20], centre_pix_val= [self.rsn_map.meta['crpix1']+0.5, self.rsn_map.meta['crpix2']])
plt.text(for_text[0][0], for_text[0][1], 'Box '+str(b), fontsize=10)
b += 1
if save_fig != None:
plt.savefig(save_fig, dpi=300, bbox_inches='tight')
if show_fig == True:
plt.show('all')
def nustar_peek(self):
#just to view the map with all default settings
self.nustar_setmap()
self.nustar_plot()
@staticmethod
def stepped_lc_from_hist(x, y, inc_edges=True):
"""Takes an x and y input, duplicates the x values and y values with the offset as to produce a new x and y which
will produce a stepped graph once all the scatter points are plotted.
Parameters
----------
x : 1-d list/array
This is the original set of x values or, in the case for a histogram, the bin edges.
y : 1-d list/array
This is the original set of y values.
inc_edges : bool
This determines whether the ends should go from their value to zero (True) or stop where they are (False).
Default: True
Returns
-------
New x and y values that, when plotted, will produce a stepped graph. Can be used to represent binning along the x
axis.
"""
if len(x) == len(y)+1: #since histogram gives one more as they are the boundaries of the bins
old_x = x
x = x[:-1]
elif len(x) == len(y):
x = x #not necessary, but more readable just now
else:
raise ValueError('Either the x-axis array is the edge of the bins (len(x) == len(y)+1) or the x-axis is the '
'value for the beginning of each bin (len(x) == len(y)), you haven\'t satisfied either of '
'these.')
new_x = np.array(np.zeros(2*len(x)))
new_y = np.array(np.zeros(2*len(y)))
for i in range(len(x)): #x and y should be the same length to plot anyway
if i == 0: #start with the 1st and 2nd x value having the same y.
new_x[i] = x[i]
new_y[2*i], new_y[2*i+1] = y[i], y[i]
elif i == len(x)-1: #the last new_x should be one beyond the last x as this value for the start of its bin
if len(x) == len(y)+1:
new_x[2*i-1], new_x[2*i], new_x[2*i+1] = x[i], x[i], old_x[-1]
elif len(x) == len(y):
new_x[2*i-1], new_x[2*i], new_x[2*i+1] = x[i], x[i], x[i]+(x[i]-x[i-1])
new_y[2*i] , new_y[2*i+1] = y[i], y[i]
break
else: #else keep the pattern going that two adjacent x's should share a y
new_x[2*i-1], new_x[2*i] = x[i], x[i]
new_y[2*i], new_y[2*i+1] = y[i], y[i]
if inc_edges == True: #create first and last coordinates to have a new_y of zero
new_x = np.insert(new_x, 0, [new_x[0]])
new_x = np.append(new_x,[new_x[-1]])
new_y = np.insert(new_y, 0, [0])
new_y = np.append(new_y,[0])
return new_x, new_y
@staticmethod
def dt_to_md(dt_array):
if type(dt_array) != list:
dt_array = [dt_array]
new_array = np.zeros(len(dt_array))
for c, d in enumerate(dt_array):
plt_date = mdates.date2num(d)
new_array[c] = plt_date
return new_array
@staticmethod
def spatial_filter(evt_data, sub_region_in_pixels):
x = evt_data['X']
y = evt_data['Y']
#find indices within the x and y pixel range
indices = (sub_region_in_pixels[0][0] < x)&(x<= sub_region_in_pixels[1][0]) & \
(sub_region_in_pixels[0][1] < y)&(y <= sub_region_in_pixels[1][1])
evt_data = evt_data[:len(indices)][indices] # [:len(indices)] is a quick fix, doesn't work otherwise if cleanevt is loaded from pickle
return evt_data
@staticmethod
def time_filter(evtdata, tmrng=None):
''' ***** From filter function ***** >4x quicker to just filter with time than with full filter ***** '''
if tmrng is None:
tmrng = [evtdata['TIME'][0], evtdata['TIME'][-1]]
elif tmrng is not None:
tstart = data_handling.getTimeFromFormat(tmrng[0]) #date must be in this format 'yyyy/mm/dd, HH:MM:SS'
tend = data_handling.getTimeFromFormat(tmrng[1])
rel_t = data_handling.getTimeFromFormat("2010/01/01, 00:00:00") #the date NuSTAR times are defined from
tstart_s = (tstart - rel_t).total_seconds() #both dates are converted to number of seconds from 2010-Jan-1
tend_s = (tend - rel_t).total_seconds()
tmrng = [tstart_s, tend_s]
time_filter = ( (evtdata['TIME']>tmrng[0]) & (evtdata['TIME']<tmrng[1]) )
inds = (time_filter).nonzero()
goodinds=inds[0]
return evtdata[goodinds]
@staticmethod
def nustar_file_finder(start_directory='', obs_id='', descriptor='', fpm='', ext=''):
full_filename = None
file_directory = None
file_name = None
#expression for everything that ends in a slash
search_directory_regex = re.compile(r'\w+/')
#find all the folders in the evt directory (they end with a slash)
search_directory = search_directory_regex.findall(start_directory)
# search the folder the evt file is in first
sd = '/'+''.join(search_directory)
for in_dir in os.listdir(sd):
if in_dir == 'nu' + obs_id + fpm + descriptor + ext:
full_filename = os.path.join(sd, in_dir)
file_directory = sd
file_name = in_dir
return full_filename, file_directory, file_name
#don't includce the last folder to go back a directory
search_directory = '/'+''.join(search_directory[:-1]) #go back a directory to search for the house keeping file
for _dirpath, _dirnames, _filenames in os.walk(search_directory):
for _file in _filenames:
if _file == 'nu' + obs_id + fpm + descriptor + ext:
full_filename = os.path.join(_dirpath, _file)
file_directory = _dirpath
file_name = _file
return full_filename, file_directory, file_name
return full_filename, file_directory, file_name
def livetime(self, hk_filename=None, set_up_plot=True, show_fig=True):
#file = '/Users/kris/Documents/PhD/data/nustar/nu80414201001A_fpm.hk'
'''
This has to be moved above the time profile function so it is defined to be called
'''
if self.rcParams_default_setup:
matplotlib.rcParams['font.sans-serif'] = "Arial"
matplotlib.rcParams['font.family'] = "sans-serif"
plt.rcParams["figure.figsize"] = (10,6)
plt.rcParams['font.size'] = 18
if hk_filename == None:
hk_filename, self.hk_directory, self.hk_filename = self.nustar_file_finder(start_directory=self.evt_directory, obs_id=self.obs_id, descriptor='_fpm', fpm=self.fpm, ext='.hk')
if hk_filename == None: #if there is still no hk_filename then there won't be one used
print('Unable to find appropriate .hk file.')
self.hk_times = 0
self.hk_livetimes = [] # so the this length is 0
return #stops the function here but doesn't stop the code, this is the same as 'return None'
name_of_hk_file_regex = re.compile(r'\w+\.\w+')
name_of_hk_file = name_of_hk_file_regex.findall(hk_filename)[0]
hk_obs_id_regex = re.compile(r'\d+')
hk_obs_id = hk_obs_id_regex.findall(name_of_hk_file)[0]
hk_fpm_regex = re.compile(r'[A-Z]')
hk_fpm = hk_fpm_regex.findall(name_of_hk_file)[0]
#check .evt file and .hk file match
assert self.obs_id == hk_obs_id, 'The observation id from the .evt file and the .hk are different, i.e. {} =/= {}'.format(self.obs_id, hk_obs_id)
assert self.fpm == hk_fpm, 'The FPM from the .evt file and the .hk are different, i.e. {} =/= {}'.format(self.fpm, hk_fpm)
hdulist = fits.open(hk_filename)
self.hk_header = hdulist[1].header
self.hk_data = hdulist[1].data
hdulist.close()
#check .hk filename matches its header info
assert self.hk_header['OBS_ID'] == hk_obs_id, 'The observation id from the .hk file header and the .hk filename are different, i.e. {} =/= {}'.format(self.hk_header['OBS_ID'], hk_obs_id)
assert self.hk_header['INSTRUME'][-1] == hk_fpm, 'The FPM from the .hk header and the .hk filename are different, i.e. {} =/= {}'.format(self.hk_header['INSTRUME'][-1], hk_fpm)
self.hk_times = self.hk_data['time']
self.lvt_times = [(self.rel_t + timedelta(seconds=t)) for t in self.hk_times]
self.hk_livetimes = self.hk_data['livetime']
if set_up_plot:
hktime = self.hk_times - self.hk_times[0]
dt_times = self.lvt_times
lt_start_hhmmss = str((self.rel_t + timedelta(seconds=np.min(self.hk_times))).strftime('%Y/%m/%d, %H:%M:%S'))
fig = plt.figure()
ax = plt.axes()
plt.semilogy(self.dt_to_md(dt_times), self.hk_livetimes, drawstyle='steps-mid')
plt.title('Livetime - '+lt_start_hhmmss[:10]) #get the date in the title
plt.xlabel('Start Time - '+lt_start_hhmmss[12:])
plt.ylabel('Livetime Fraction')
plt.xlim([data_handling.getTimeFromFormat(t) for t in self.time_range])#[dt_times[0], dt_times[-1]])
plt.ylim([0,1])
fmt = mdates.DateFormatter('%H:%M')
ax.xaxis.set_major_formatter(fmt)
ax.xaxis.set_major_locator(self.xlocator) # xlocator was plt.LinearLocator(9)
plt.xticks(rotation=30)
if show_fig == True:
plt.show()
t_bin = {'seconds_per_bin':10, 'method':'approx'}
def light_curve(self, cleanevt=None, hdr=None, sub_reg=None, tstart=None, tend=None,
count_rate=True, house_keeping_file=None, show_fig=True):
if self.rcParams_default_setup:
matplotlib.rcParams['font.sans-serif'] = "Arial"
matplotlib.rcParams['font.family'] = "sans-serif"
plt.rcParams["figure.figsize"] = (10,6)
plt.rcParams['font.size'] = 18
if cleanevt == None:
cleanevt = self.cleanevt
if hdr == None:
hdr = self.evt_header
if sub_reg == 'boxes':
sub_reg = self.rectangles
self.sub_reg_lc = sub_reg
single_lc = True # just start by assuming one light curve, don't worry, this only gets set to False if not
if tstart == None:
tstart = np.min(cleanevt['TIME'])
self.rel_tstart = tstart #already relative to 1/1/2010 and in seconds
else:
tstart = data_handling.getTimeFromFormat(tstart)
self.rel_tstart = (tstart - self.rel_t).total_seconds()
if tend == None:
tend = np.max(cleanevt['TIME'])
self.rel_tend = tend #already relative to 1/1/2010 and in seconds
else:
tend = data_handling.getTimeFromFormat(tend)
self.rel_tend = (tend - self.rel_t).total_seconds()
if count_rate == True:
self.livetime(hk_filename=house_keeping_file, set_up_plot=False, show_fig=False) #run to get times and livetimes
if len(self.hk_times) == 0:
decision = input('No livetimes present. Do you just want to see the counts vs. time instead: ')
if decision in ['Yes', 'yes', 'Y', 'y']:
count_rate = False
else:
print('Will not show plot.')
return
self.lc_livetimes = 0 # just to have it defined
if self.t_bin['method'] == 'approx':
if (type(cleanevt) == astropy.io.fits.fitsrec.FITS_rec) and (sub_reg == None): #data form of NuSTAR
t_bin_conversion = int((self.rel_tend - self.rel_tstart) // self.t_bin['seconds_per_bin']) #get approximately t_bin seconds per bin as start and end of
#data are fixed when the histogram is created
assert t_bin_conversion >= 1, 'Number of bins cannot be <1. Decrease \'t_bin\' value to get more bins.'
counts = np.histogram(cleanevt['TIME'], t_bin_conversion) #gives out bin values and bin edges
self.lc_counts = counts[0]
times = counts[1][:-1]
self.t_bin_edges = counts[1]
start_hhmmss = str((self.rel_t + timedelta(seconds=np.min(times))).strftime('%H:%M:%S'))
start_yyyymmdd = str((self.rel_t + timedelta(seconds=np.min(times))).strftime('%Y/%m/%d'))
elif (type(cleanevt) == astropy.io.fits.fitsrec.FITS_rec) and (sub_reg != None):
#this is to plot the light curve of a sub-region.
print('Inconvenient to approximate the time bins for the light curve of a sub_region.'
'\nChanging to \'exact\'.')
self.t_bin['method'] = 'exact'
else:
raise TypeError('\'astropy.io.fits.fitsrec.FITS_rec\' is the only supported data type at the moment.')
if self.t_bin['method'] == 'exact': #if since if the 'approx' flag is up and also submap!=None then time profile should be made here
t_bin_number = int((self.rel_tend - self.rel_tstart) // self.t_bin['seconds_per_bin']) #get whole number of bins that are t_bin seconds long and
#doesn't include any time at the end that only has data for some of the last range
assert t_bin_number >= 1, 'Number of bins cannot be <1. Decrease \'t_bin\' value to get more bins.'
edge = self.rel_tstart
self.t_bin_edges = np.zeros(t_bin_number+1) #+1 for the last edge
for t in range(len(self.t_bin_edges)):
self.t_bin_edges[t] = edge
edge += self.t_bin['seconds_per_bin']
times = self.t_bin_edges[:-1]
start_hhmmss = str((self.rel_t + timedelta(seconds=np.min(times))).strftime('%H:%M:%S'))
start_yyyymmdd = str((self.rel_t + timedelta(seconds=np.min(times))).strftime('%Y/%m/%d'))
if (type(cleanevt) == astropy.io.fits.fitsrec.FITS_rec) and (sub_reg == None): #data form of NuSTAR
counts = np.histogram(cleanevt['TIME'], self.t_bin_edges) #gives out bin values and bin edges
self.lc_counts = counts[0]
elif (type(cleanevt) == astropy.io.fits.fitsrec.FITS_rec) and (sub_reg != None):
if np.shape(sub_reg) == (4,):
counts = []
pixels = self.arcsec_to_pixel([sub_reg[0],sub_reg[1]], [sub_reg[2],sub_reg[3]])
spatial_evtdata = self.spatial_filter(self.cleanevt, pixels)
for t in range(len(self.t_bin_edges)-1):
# ts = (datetime.datetime(1970, 1, 1) + timedelta(seconds=(float(self.rel_t.strftime("%s"))+self.t_bin_edges[t]))).strftime('%Y/%m/%d, %H:%M:%S')
# te = (datetime.datetime(1970, 1, 1) + timedelta(seconds=(float(self.rel_t.strftime("%s"))+self.t_bin_edges[t+1]))).strftime('%Y/%m/%d, %H:%M:%S')
ts = (self.rel_t + timedelta(seconds=self.t_bin_edges[t])).strftime('%Y/%m/%d, %H:%M:%S')
te = (self.rel_t + timedelta(seconds=self.t_bin_edges[t+1])).strftime('%Y/%m/%d, %H:%M:%S')
sub_cleanevt = self.time_filter(spatial_evtdata, tmrng=[ts, te])
counts.append(len(sub_cleanevt['TIME']))
self.lc_counts = np.array(counts)
elif np.shape(sub_reg)[1] == 4:
all_counts = {}
all_count_rates = {}
for b, sub_r in enumerate(sub_reg, start=1):
counts = []
pixels = self.arcsec_to_pixel([sub_r[0],sub_r[1]], [sub_r[2],sub_r[3]])
spatial_evtdata = self.spatial_filter(self.cleanevt, pixels)
for t in range(len(self.t_bin_edges)-1):
ts = (self.rel_t + timedelta(seconds=self.t_bin_edges[t])).strftime('%Y/%m/%d, %H:%M:%S')
te = (self.rel_t + timedelta(seconds=self.t_bin_edges[t+1])).strftime('%Y/%m/%d, %H:%M:%S')
sub_cleanevt = self.time_filter(spatial_evtdata, tmrng=[ts, te])
counts.append(len(sub_cleanevt['TIME']))
box = ' (Box '+str(b)+')'
all_counts[box] = np.array(counts)
#if make_final_graph == True:
if count_rate == True:
#livetime correction
livetimes = np.zeros(len(self.t_bin_edges)-1)
for t in range(len(self.t_bin_edges)-1):
indices = ((self.hk_times>=self.t_bin_edges[t]) & (self.hk_times<self.t_bin_edges[t+1]))
ltimes_in_range = self.hk_livetimes[indices]
livetimes[t] = np.average(ltimes_in_range)
self.lc_livetimes = livetimes
counts_per_second = np.array(counts) / (livetimes * (times[1]-times[0]))
fig = plt.figure()
ax = plt.axes()
dt_times = [(self.rel_t + timedelta(seconds=t)) for t in times]
plt.plot(*self.stepped_lc_from_hist(self.dt_to_md(dt_times), counts_per_second))
plt.title('NuSTAR FPM'+self.fpm+' '+self.e_range_str+' keV Light Curve - '+start_yyyymmdd + box)
plt.xlim([data_handling.getTimeFromFormat(t) for t in self.time_range])
plt.xlabel('Start Time - '+start_hhmmss)
plt.ylim([0, np.max(counts_per_second[np.isfinite(counts_per_second)])*1.05])
plt.ylabel('Counts $s^{-1}$')
fmt = mdates.DateFormatter('%H:%M')
ax.xaxis.set_major_formatter(fmt)
ax.xaxis.set_major_locator(self.xlocator)
plt.xticks(rotation=30)
#plt.show()
all_count_rates[box] = counts_per_second
else:
fig = plt.figure()
ax = plt.axes()
dt_times = [(self.rel_t + timedelta(seconds=t)) for t in times]
plt.plot(*self.stepped_lc_from_hist(self.dt_to_md(dt_times), counts))
plt.title('NuSTAR FPM'+self.fpm+' '+self.e_range_str+' keV Light Curve - '+start_yyyymmdd + box)
plt.xlim([data_handling.getTimeFromFormat(t) for t in self.time_range])
plt.xlabel('Start Time - '+start_hhmmss)
plt.ylim([0, np.max(counts[np.isfinite(counts)])*1.05])
plt.ylabel('Counts')
fmt = mdates.DateFormatter('%H:%M')
ax.xaxis.set_major_formatter(fmt)
ax.xaxis.set_major_locator(self.xlocator)
plt.xticks(rotation=30)
#plt.show()
self.lc_counts = all_counts
if all_count_rates == []:
self.lc_count_rates = None
else:
self.lc_count_rates = all_count_rates
self.lc_times = dt_times
if show_fig:
plt.show()
single_lc = False
else:
raise TypeError('Check the form of the sub-region was given in, e.g. need [bx,by,tx,ty] or [[bx,by,tx,ty], ...].')
else:
raise TypeError('\'astropy.io.fits.fitsrec.FITS_rec\' is the only supported data type at the moment.')
else:
if (self.t_bin['method'] != 'exact') and (self.t_bin['method'] != 'approx'):
raise ValueError('Only options for the time bins is \'approx\' or \'exact\'.')
if single_lc == True: #only in case multiple regions are plotted then they are handled in its own 'for' loop
if count_rate == True:
#livetime correction
livetimes = np.zeros(len(self.t_bin_edges)-1)
for t in range(len(self.t_bin_edges)-1):
indices = ((self.hk_times>=self.t_bin_edges[t]) & (self.hk_times<self.t_bin_edges[t+1]))
ltimes_in_range = self.hk_livetimes[indices]
livetimes[t] = np.average(ltimes_in_range)
self.lc_livetimes = livetimes
counts_per_second = self.lc_counts / (livetimes * (times[1]-times[0]))
fig = plt.figure()
ax = plt.axes()
dt_times = [(self.rel_t + timedelta(seconds=t)) for t in times]
plt.plot(*self.stepped_lc_from_hist(self.dt_to_md(dt_times), counts_per_second))
plt.title('NuSTAR FPM'+self.fpm+' '+self.e_range_str+' keV Light Curve - '+start_yyyymmdd)
plt.xlim([data_handling.getTimeFromFormat(t) for t in self.time_range])
plt.xlabel('Start Time - '+start_hhmmss)
plt.ylim([0, np.max(counts_per_second[np.isfinite(counts_per_second)])*1.05])
plt.ylabel('Counts $s^{-1}$')
fmt = mdates.DateFormatter('%H:%M')
ax.xaxis.set_major_formatter(fmt)
ax.xaxis.set_major_locator(self.xlocator)
plt.xticks(rotation=30)
#plt.show()
self.lc_times = dt_times
self.lc_count_rates = counts_per_second
else:
fig = plt.figure()
ax = plt.axes()
dt_times = [(self.rel_t + timedelta(seconds=t)) for t in times]
plt.plot(*self.stepped_lc_from_hist(self.dt_to_md(dt_times), self.lc_counts))
plt.title('NuSTAR FPM'+self.fpm+' '+self.e_range_str+' keV Light Curve - '+start_yyyymmdd)
plt.xlim([data_handling.getTimeFromFormat(t) for t in self.time_range])
plt.xlabel('Start Time - '+start_hhmmss)
plt.ylim([0, np.max(self.lc_counts[np.isfinite(self.lc_counts)])*1.05])
plt.ylabel('Counts')
fmt = mdates.DateFormatter('%H:%M')
ax.xaxis.set_major_formatter(fmt)
ax.xaxis.set_major_locator(self.xlocator)
plt.xticks(rotation=30)
#plt.show()
self.lc_times = dt_times
self.lc_count_rates = None
if show_fig:
plt.show()
def full_obs_chus(self, start_directory=None, obs_id=None, descriptor='_chu123', ext='.fits' ,show_fig=True):
'''
Apapted from:
https://github.com/ianan/nustar_sac/blob/master/idl/load_nschu.pro
and
https://github.com/NuSTAR/nustar_solar/blob/master/depricated/solar_mosaic_20150429/read_chus.pro
'''
if start_directory == None:
start_directory=self.evt_directory
if obs_id == None:
obs_id=self.obs_id
chu_filename, self.chu_directory, self.chu_filename = self.nustar_file_finder(start_directory=start_directory, obs_id=obs_id, descriptor=descriptor, ext=ext)
#not self.chu_filename as fits.open needs to know the full path to the file
hdulist = fits.open(chu_filename)
data1 = hdulist[1].data
data2 = hdulist[2].data
data3 = hdulist[3].data
hdulist.close()
# easier to work with numpy arrays later
data_c1 = np.array(data1)
data_c2 = np.array(data2)
data_c3 = np.array(data3)
maxres = 20
for chu_num, dat in enumerate([data_c1, data_c2, data_c3]):
chu_bool = ((dat['VALID']==1) &
(dat['RESIDUAL']<maxres) &
(dat['STARSFAIL']<dat['OBJECTS']) &
(dat['CHUQ'][:,3]!=1))
chu_01 = chu_bool*1 # change true/false into 1/0
chu_mask = chu_01* (chu_num+1)**2 # give each chu a unique number that when it is added to another it gives a unique chu combo, like file permissions
if chu_num == 0:
chu_all = chu_mask # after chu 1 file have an array with 1s and 0s
else:
chu_all += chu_mask # after the others (chu2 and chu3) have an array with 1,4,9,5,10,13,14
# last data array in the for loop can give the time, no. of seconds from 1-Jan-2010
chu_time = dat['TIME']
# reassigned values are at 100, etc. as to not accidently double sort the values again
# e.g. if mask value was changed to 10, then if it was accidently run again it would get sorted into chu state 13 etc.
chu_all[chu_all == 1] = 100 #chu1 # mask value in array is changed to chu state, e.g. mask value=5, chu state is 12, and value 102
chu_all[chu_all == 4] = 101 #chu2
chu_all[chu_all == 5] = 102 #chu12
chu_all[chu_all == 9] = 103 #chu3
chu_all[chu_all == 10] = 104 #chu13
chu_all[chu_all == 13] = 105 #chu23
chu_all[chu_all == 14] = 106 #chu123
chu_time = chu_time[chu_all > 0] # if there is still no chu assignment for that time then remove
chu_all = chu_all[chu_all > 0]
self.chu_all = chu_all
self.chu_reference = {'chu1':100, 'chu2':101, 'chu12':102, 'chu3':103, 'chu13':104, 'chu23':105, 'chu123':106}
tick_labels = ['','1', '2', '12', '3', '13', '23', '123']
self.chu_times = [(self.rel_t + datetime.timedelta(seconds=t)) for t in chu_time]
dt_times = self.chu_times
fig = plt.figure(figsize=(10,5))
ax = plt.axes()
plt.plot(dt_times, chu_all,'x')
plt.title('CHU States of NuSTAR on ' + dt_times[0].strftime('%Y/%m/%d')) #get the date in the title
plt.xlabel('Start Time - ' + dt_times[0].strftime('%H:%M:%S'))
plt.ylabel('NuSTAR CHUs')
plt.xlim([data_handling.getTimeFromFormat(t) for t in self.time_range])
fmt = mdates.DateFormatter('%H:%M')
ax.xaxis.set_major_formatter(fmt)
ax.xaxis.set_major_locator(self.xlocator)
ax.axes.set_yticklabels(tick_labels)
plt.xticks(rotation=30)
if show_fig == True:
plt.show()
lc_3D_params = {'energy_low':1.6, 'energy_high':80, 'time_range':None} # start at 1.6 keV as this is the lowest (yet not trusted) bin for NuSTAR for binning in 0.04 keV steps
def lightcurves_3D(self, all_evt_data=None, energy_increment=0.04, aspect=6):
'''***Under Construction***'''
if all_evt_data == None:
all_evt_data = self.evt_data
if self.lc_3D_params['time_range'] == None:
self.lc_3D_params['time_range'] = self.time_range
cleaned_all_evt = filter_with_tmrng.event_filter(all_evt_data, fpm = self.fpm,
energy_low = self.lc_3D_params['energy_low'],
energy_high = self.lc_3D_params['energy_high'],
tmrng=self.lc_3D_params['time_range'])
energies = np.arange(1.6 , self.lc_3D_params['energy_high'], energy_increment)
no_of_time = 200
times = np.arange(no_of_time, 1)
er_and_tc = []
for e in range(len(energies)-1):
specific_lc_inds = filter_with_tmrng.by_energy(cleaned_all_evt, energies[e], energies[e+1])
specific_lc_data = cleaned_all_evt[specific_lc_inds]
counts = np.histogram(specific_lc_data['TIME'], no_of_time)[0]
er_and_tc.append(counts)
er_and_tc = np.array(er_and_tc)
print(np.max(er_and_tc))
fig = plt.figure(figsize=(6,8))
plt.imshow(er_and_tc, origin='lower', aspect=aspect, vmax=1)
plt.ylim([self.lc_3D_params['energy_low'], self.lc_3D_params['energy_high']])
plt.xlabel('Time')
plt.ylabel('Energy')
plt.show()
## event list for each energy bin (get energy filter function)
## get lightcurve for each energy bin
## Get 2D array for counts for each energy along rows, and time steps along the columns
## 1D array for the energies, 1D array for time steps
## get seperate, static method for 3D plot creation, return axis object
## axis limits to 2.5--80 keV (range of NuSTAR that's well calibrated)
def detectors(self, show_fig=True):
self.all_detectors = {}
plt.figure()
ax = plt.axes()
for d in range(4):
# if the detector is the one I want then I want the time of it, else leave it alone
self.all_detectors['det'+str(d)] = [self.cleanevt['TIME'][c] for c,i in enumerate(self.cleanevt['DET_ID']) if i==d]
# get percentage of counts each detector contributed to the full time
self.all_detectors['det'+str(d)+'%'] = len(self.all_detectors['det'+str(d)]) / len(self.cleanevt['TIME']) * 100
dets = np.histogram(self.all_detectors['det'+str(d)], self.t_bin_edges) #gives out bin values and bin edges
dt_times = [(self.rel_t + timedelta(seconds=t)) for t in dets[1]]
plt.plot(*self.stepped_lc_from_hist(self.dt_to_md(dt_times), dets[0]), label='det'+str(d)+': '+'{:.1f}'.format(self.all_detectors['det'+str(d)+'%'])+'%')
plt.legend()
fmt = mdates.DateFormatter('%H:%M')
ax.xaxis.set_major_formatter(fmt)
ax.xaxis.set_major_locator(self.xlocator)
plt.xticks(rotation=30)
plt.title('Detector Contribution '+self.e_range_str+" keV")
plt.ylabel('Counts from detector')
plt.xlabel('Time')
if show_fig:
plt.show()
#return plt.g
def plotChuTimes(self, span=True, axis=None):
# remember to show_fig=False for the plotting methods as to allow alterations of the figures once run
# look for and get the start and end times for each CHU file
chus = ['chu1', 'chu2', 'chu12', 'chu3', 'chu13', 'chu23', 'chu123']
colours = ['k', 'r', 'g', 'c', 'm', 'b', 'y']
chuChanges = {}
axis = {'ax':plt} if axis is None else {'ax':axis}
pipeline_modes = ["_S_", "_N_"]
for c, chu in enumerate(chus):
for pm in pipeline_modes:
chuFile = self.evt_directory+'nu' + self.obs_id + self.fpm + '06_' + chu + pm + 'cl_sunpos.evt'
if isfile(chuFile):
break
if not isfile(chuFile):
continue
hdulist = fits.open(chuFile)
evt_data = hdulist[1].data
hdulist.close()
chuChanges[chu] = [self.rel_t + timedelta(seconds=min(evt_data['time'])),
self.rel_t + timedelta(seconds=max(evt_data['time']))]
# plot a shaded region or just the time boundaries for the chu changes
if span:
axis['ax'].axvspan(*chuChanges[chu], alpha=0.1, color=colours[c])
else:
axis['ax'].axvline(chuChanges[chu][0], color=colours[c])
axis['ax'].axvline(chuChanges[chu][1], color=colours[c])
self.chuChanges = chuChanges
def save(self, save_dir='./', folder_name=None, overwrite=False, **kwargs):
#replace folder of saved data if run twice or just make a new one?
"""
Can I automate the process using dir(nu) since this has every variable created?
Or at least add to a list of attributes to be saved.
Use os module to create appropriate directory structure for saved attributes.
"""
#print(dir(nuA))
'''
Variables/info to save:
***** evt_file_used *****
~evt_directory, evt_filename, evt_data, evt_header #where did the data come from?
~meta data, chu_state, energy range, fpm, obs id
***** house_keeping_file_used *****
~self.hk_directory, self.hk_filename, hk_data, hk_header #what hk file was used?
***** nustar_livetime_data *****
~hk_livetimes, hk_times, livetimes plot
***** nustar_map_data *****
~rsn_map and plot (for plot need to run the nustar_plot() with save enabled) #what does it look like?
~gaussian filter applied, rectangle coordinates
***** nustar_light_curve_data *****
~lc_counts/lc_count_rates, lc_times, lightcurve plot(s)
~rectangle coordinates
New stuff to save:
***** chu function ***** deconvolve settings *****
'''
if self.chu_state != 'not_split' and folder_name is None:
nustar_folder = save_dir + self.obs_id + self.fpm + '_' + self.chu_state + '_nustar_folder'
elif folder_name is not None:
nustar_folder = folder_name
else:
nustar_folder = save_dir + self.obs_id + self.fpm + '_nustar_folder'
# Create target Directory if don't exist
if not os.path.exists(nustar_folder + '/'):
nustar_folder = nustar_folder + '/'
os.mkdir(nustar_folder) #make empty folder
print("Directory " , nustar_folder , " Created.", end='')
# If the folder exists and overwrite is True then replace the first one
elif os.path.exists(nustar_folder + '/') and (overwrite == True):
nustar_folder = nustar_folder + '/'
subprocess.check_output(['rm', '-r', nustar_folder]) #remove evrything in it too
os.mkdir(nustar_folder) #make empty folder
print("Replacing directory " , nustar_folder, end='')
# If the folder exists and overwrite is False then just make another file with an index
elif os.path.exists(nustar_folder + '/') and (overwrite == False):
number_exist = len(np.nonzero(['nustar_folder' in f for f in os.listdir(save_dir)])[0])
nustar_folder = nustar_folder + '(' + str(number_exist) + ')/'
os.mkdir(nustar_folder)
print("Directory " , nustar_folder , " already exists. Creating another.", end='')
self.nustar_folder = nustar_folder
# Now 'nustar_folder' is the folder things will be save into
# Start with evt file information
evt_folder = nustar_folder + 'evt_file_used/'
os.mkdir(evt_folder)
evt_list_to_save = ['evt_directory', 'evt_filename', 'obs_id', 'fpm', 'chu_state', 'energy_range',
'time_range', 'evt_data', 'evt_header', 'cleanevt']
evt_info = list(set(dir(self)) & set(evt_list_to_save))
evt_to_store = {}
for name in evt_info:
evt_to_store[name] = self.__dict__[name]
with open(evt_folder + 'evt_file_info.pickle', 'wb') as evt_save_file:
pickle.dump(evt_to_store, evt_save_file, protocol=pickle.HIGHEST_PROTOCOL)
# hk file information
hk_folder = nustar_folder + 'hk_file_used/'
os.mkdir(hk_folder)
hk_list_to_save = ['hk_directory', 'hk_filename', 'hk_data', 'hk_header']
hk_info = list(set(dir(self)) & set(hk_list_to_save))
hk_to_store = {}
for name in hk_info:
hk_to_store[name] = self.__dict__[name]
with open(hk_folder + 'hk_file_info.pickle', 'wb') as hk_save_file:
pickle.dump(hk_to_store, hk_save_file, protocol=pickle.HIGHEST_PROTOCOL)
# Livetime info
lvt_folder = nustar_folder + 'livetime_data/'
os.mkdir(lvt_folder)
lvt_list_to_save = ['hk_times', 'hk_livetimes']
lvt_info = list(set(dir(self)) & set(lvt_list_to_save))
lvt_to_store = {}
for name in lvt_info:
lvt_to_store[name] = self.__dict__[name]
with open(lvt_folder + 'livetime_data.pickle', 'wb') as lvt_save_file:
pickle.dump(lvt_to_store, lvt_save_file, protocol=pickle.HIGHEST_PROTOCOL)
# Map info
map_folder = nustar_folder + 'map_data/'
os.mkdir(map_folder)
map_list_to_save = ['rsn_map', 'gaussian_filter', 'time_norm', 'rectangles']
map_info = list(set(dir(self)) & set(map_list_to_save))
map_to_store = {}
for name in map_info:
try:
map_to_store[name] = self.__dict__[name]
except KeyError:
map_to_store[name] = NustarDo.__dict__[name]
with open(map_folder + 'map_data.pickle', 'wb') as map_save_file:
pickle.dump(map_to_store, map_save_file, protocol=pickle.HIGHEST_PROTOCOL)
# Light curve info
lc_folder = nustar_folder + 'light_curve_data/'
os.mkdir(lc_folder)
lc_list_to_save = ['lc_times', 'lc_counts', 'lc_count_rates', 'sub_reg_lc', 'lc_livetimes']
lc_info = list(set(dir(self)) & set(lc_list_to_save))
lc_to_store = {}
for name in lc_info:
lc_to_store[name] = self.__dict__[name]
with open(lc_folder + 'light_curve_data.pickle', 'wb') as lc_save_file:
pickle.dump(lc_to_store, lc_save_file, protocol=pickle.HIGHEST_PROTOCOL)
# Can save your own stuff
if len(kwargs) > 0:
own_folder = nustar_folder
with open(own_folder + 'kwargs_data.pickle', 'wb') as own_save_file:
pickle.dump(kwargs, own_save_file, protocol=pickle.HIGHEST_PROTOCOL)
# save the object that can be loaded back in
with open(nustar_folder + nustar_folder[:-1].split('/')[-1] + '.pickle', 'wb') as object_file:
pickle.dump(self.__dict__, object_file, protocol=pickle.HIGHEST_PROTOCOL)
self.object_file = nustar_folder + nustar_folder[:-1].split('/')[-1] + '.pickle'
print(' Now Populated.')
def load(self, object_file=None):
'''Takes the object's namespace from the save() method and loads it back in to all it's attributes.'''
if not hasattr(self, 'object_file') and object_file is None:
print('\'object_file\' attribute and input to this function are both \'None\', please provide one. \n Note: the input for this method takes priority.')
return
object_file = object_file if (object_file is not None) else self.object_file
with open(object_file, "rb") as input_file:
self.__dict__ = pickle.load(input_file)
def shift(evt_data, pix_xshift=None, pix_yshift=None):
if pix_xshift != None:
for X in evt_data:
X['X'] = X['X'] + pix_xshift
if pix_yshift != None:
for Y in evt_data:
Y['Y'] = Y['Y'] + pix_yshift
return evt_data
def nustars_synth_count(temp_response_dataxy, plasma_temp, plasma_em, source_area, errors=None, Tresp_syserror=0, log_data=False):
"""Takes data for a channel's temperature response, plasma temperature and emission measure and area of source and
returns the expected DN/s per pixel.
*** Check output and make sure your units work ***
Parameters
----------
temp_response_dataxy : dict
The x and y data for the temperature response of the channel of interest, e.g. {'x':[...], 'y':[...]}.
plasma_temp : float
Temperature of the response you want in MK.
plasma_em : float
Volumetric emission measure of the plasma in cm^-3.
(If you have column emission measure, i.e. cm^-5, then set source_area=1.)
source_area : float
Area of the source in cm^2.
errors : dict
A dictionary of dictionaries containing the errors on T and EM, e.g. {'T':{'+':a, '-':b},
'EM':{'+':c, '-':d}}.
Defualt: None
Tresp_syserror : float
Fractional systematic error on the temperature response, e.g. 20% error on temp_response_dataxy['y'] means Tresp_error=0.2
Default: 0
log_data : bool
Do you want the data (x and y) logged (base 10) for the interpolation?
Default: False
Returns
-------
A dictionary of floats that is the synthetic DN/s per pixel for the data given, temperature response,
temperature, and emission measure with units and errors.
"""
# find temperature response at the given plasma temperature in DN cm^5 pix^-1 s^-1
if log_data:
f = interpolate.interp1d(np.log10(temp_response_dataxy['x']), np.log10(temp_response_dataxy['y']))
temp_response = [10**f(np.log10(plasma_temp))]
else:
f = interpolate.interp1d(temp_response_dataxy['x'], temp_response_dataxy['y'])
temp_response = [f(plasma_temp)]
syn_flux = [tr * plasma_em * (1 / source_area) for tr in temp_response]
# For errors
if errors is not None:
min_T, max_T = plasma_temp - errors['T']['-'], plasma_temp + errors['T']['+']
min_EM, max_EM = plasma_em - errors['EM']['-'], plasma_em + errors['EM']['+']
e_response = []
for Ts in [min_T, max_T]:
# find temperature response at the given plasma temperature in DN cm^5 pix^-1 s^-1
r = [f(Ts)]
e_response.append(r[0])
temp_max_response = temp_response_dataxy['x'][np.argmax(temp_response_dataxy['y'])]
# what if there is a bump between central value and error range
if (e_response[0] < temp_response[0]) and (e_response[1] < temp_response[0]):
if min_T < temp_max_response < plasma_temp:
e_response[0] = np.max(temp_response_dataxy['y'])
elif plasma_temp < temp_max_response < max_T:
e_response[1] = np.max(temp_response_dataxy['y'])
min_R, max_R = e_response[0], e_response[1] #R from min_T and R from max_T
# include temperature response error
up_resp = 1 + Tresp_syserror
down_resp = 1 - Tresp_syserror
#flux from min_T(max_EM) and flux from max_T(min_EM)
min_flux, max_flux = min_R * max_EM * (1 / source_area), max_R * min_EM * (1 / source_area)
flux_range = [min_flux, max_flux]
e_response = np.array(e_response)[np.isfinite(e_response)]
flux_range = np.array(flux_range)[np.isfinite(flux_range)]
# max flux could be up_resp more, and min flux could be be down_resp more
f_err = [up_resp*np.max(flux_range) - syn_flux[0], syn_flux[0] - down_resp*np.min(flux_range)]
for n,f in enumerate(f_err):
if f < 0:
f_err[n] = np.max(f_err)
errors = {'syn_flux_err':{'+': f_err[0], '-':f_err[1]},
't_res_err':{'+': abs(up_resp*np.max(e_response) - temp_response[0]), '-':abs(temp_response[0] - down_resp*np.min(e_response))},
't_res_syserr':[Tresp_syserror*100, '%'],
'T_err':{'+': errors['T']['+'], '-':errors['T']['-']},
'EM_err':{'+': errors['EM']['+'],' -':errors['EM']['-']}}
return {'syn_flux':[syn_flux[0],'DN pix^-1 s^-1'], 't_res':[temp_response, 'DN cm^5 pix^-1 s^-1'], 'T':[plasma_temp, 'MK'], 'EM':[plasma_em, 'cm^-3'], 'errors':errors}
def timefilter_evt(file, time_range=None, save_dir=None):
"""Takes a .evt file and filters the events list to a given time range. Only for region selection, do not use directly with spectral fitting software.
Parameters
----------
file : Str
File (or directory/file) of the .evt file to be filtered by time.
time_range : list
A list of length 2 with the start and end date and time. Must be given in a specific format, e.g. time_range=['2018/09/10, 16:22:30', '2018/09/10, 16:24:30'].
Default: None
save_dir : Str
String of the directory for the filtered file to be saved.
Default: None
Returns
-------
Creates a new file file with '_tf' before the file extension (meaning time filtered) and returns the name of the new file.
"""
if time_range == None:
print('No time_range given. Nothing will be done.')
return
file_regex = re.compile(r'.\w+') # form to split up filename string
ext = file_regex.findall(file) # splits up file into all components, directories, filename, extension
if save_dir == None:
new_file_name = ''.join(ext[:-1]) + '_tf' + ext[-1] # '_tf' for time filtered
else:
new_file_name = save_dir + ext[-2] + '_tf' + ext[-1]
hdulist = fits.open(file)
evtdata=hdulist[1].data # data to be filtered
evt_in_time = NustarDo().time_filter(evtdata, tmrng=time_range) # picks events inside time range
hdulist[1].data = evt_in_time # replaces this hdu with the filtered events list
hdulist.writeto(new_file_name, overwrite=True) # saves the edited file, original stays as is
hdulist.close()
return new_file_name
def CheckGrade0ToAllGrades(evtFile, wholeRangeToo=False, saveFig=None, timeRange=None, printOut=False, shortTitle=""):
"""Takes a NuSTAR evt file and compares the grade 0 events to the events of all grades.
Adapted from: https://github.com/ianan/ns_proc_test/blob/main/test_proc_jun20_002.ipynb
Parameters
----------
evtFile : str
The .evt file.
wholeRangeToo : Bool
If you want to plot the whole energy range in a second plot, next to the one ranging from
1.6--10 keV, set thi to True.
Default: False
saveFig : str
If you want to save the figure made as a PDF then set this to a string of the save name.
Defualt: None
timeRange : list, 2 strings
If you only want a certain time range of the total file's spectrum to be plotted, e.g.
["%Y/%m/%d, %H:%M:%S", "%Y/%m/%d, %H:%M:%S"].
Defualt: None
printOut : Bool
If you want to print out the output nicely(-ish) set this to True.
Default: False
shortTitle : Str
Add a quick title to help keep track of the plots
Default: ""
Returns
-------
Dictionary containing the file name used ["file"], the time range of the file ["fileTimeRange"],
time range you asked it to plot ["timeRangeGivenToPlot"], effective exposure of full file ["eff_exp"],
ontime of full file ["ontime"], and percentage livetime ["lvtime_percent"] of full file given.
"""
# read in .pha files for grade 0 and all grades
hdulist = fits.open(evtFile)
evt_data = hdulist[1].data
evt_header = hdulist[1].header
hdulist.close()
# what is the time range of the file before filtering with time if you want
## nustar times are measured in seconds from this date
rel_t = data_handling.getTimeFromFormat("2010/01/01, 00:00:00")
file_start = str((rel_t + timedelta(seconds=np.min(evt_data["time"]))).strftime('%Y/%m/%d, %H:%M:%S'))
file_end = str((rel_t + timedelta(seconds=np.max(evt_data["time"]))).strftime('%Y/%m/%d, %H:%M:%S'))
# filter evt file by time?
if type(timeRange) == list:
if len(timeRange) == 2:
evt_data = NustarDo().time_filter(evt_data, tmrng=timeRange)
# get the data
hist_gradeAll, be_gradeAll = np.histogram(evt_data['pi']*0.04+1.6,bins=np.arange(1.6,79,0.04))
# work out the grade 0 spectra as well
data_grade0 = evt_data['pi'][evt_data['grade']==0]
hist_grade0, be_grade0 = np.histogram(data_grade0*0.04+1.6,bins=np.arange(1.6,79,0.04))
# plotting info
width = 11 if wholeRangeToo else 5
columns = 2 if wholeRangeToo else 1
y_lims_spec = [1e-1, 1.1*np.max(hist_gradeAll)]
ratio = hist_gradeAll/hist_grade0
fintie_vals = np.isfinite(ratio)
y_lims_ratio = [0.95, 1.05*np.max(ratio[fintie_vals])] if wholeRangeToo else [0.95, 1.05*np.max(ratio[fintie_vals][:int((10-1.6)/0.04)])]
axes_made = []
plt.figure(figsize=(width,7))
# define subplots for close look
ax1 = plt.subplot2grid((4, columns), (0, 0), colspan=1, rowspan=3)
axes_made.append(ax1)
ax2 = plt.subplot2grid((4, columns), (3, 0), colspan=1, rowspan=1)
axes_made.append(ax2)
plt.tight_layout()
# axis 1: the plots for all grades and grade 0
ax1.plot(be_gradeAll[:-1], hist_gradeAll, drawstyle="steps-pre", label="Grade All")
ax1.plot(be_grade0[:-1], hist_grade0, drawstyle="steps-pre", label="Grade 0")
ax1.set_yscale("log")
ax1.set_ylim(y_lims_spec)
ax1.set_ylabel("Counts")# s$^{-1}$ keV$^{-1}$")
plt.setp(ax1.get_xticklabels(), visible=False)
ax1.set_xlim([1.6,10])
ax1.set_title("Grade 0 vs All Grades - "+shortTitle)
ax1.legend()
# axis 2: the difference between all grades and grade 0
ax2.plot(be_grade0[:-1], ratio, drawstyle="steps-pre", color='k')
ax2.set_ylabel("All Grades / Grade0")
ax2.set_ylim(y_lims_ratio)
ax2.set_xlim([1.6,10])
ax2.set_xlabel("Energy [keV]")
ax2.grid(axis='y')
# define subplots for whole energy range
if wholeRangeToo:
# define subplots for close look
ax3 = plt.subplot2grid((4, 2), (0, 1), colspan=1, rowspan=3)
axes_made.append(ax3)
ax4 = plt.subplot2grid((4, 2), (3, 1), colspan=1, rowspan=1)
axes_made.append(ax4)
plt.tight_layout()
# axis 1: the plots for all grades and grade 0
ax3.plot(be_gradeAll[:-1], hist_gradeAll, drawstyle="steps-pre", label="Grade All")
ax3.plot(be_grade0[:-1], hist_grade0, drawstyle="steps-pre", label="Grade 0")
ax3.set_yscale("log")
ax3.set_ylim(y_lims_spec)
ax3.set_xscale("log")
plt.setp(ax3.get_xticklabels(), visible=False)
ax3.set_xlim([1.6,79])
ax3.set_title("Same But Whole E-range")
ax3.legend()
# axis 2: the difference between all grades and grade 0
ax4.plot(be_grade0[:-1], ratio, drawstyle="steps-pre", color='k')
ax4.set_ylim(y_lims_ratio)
ax4.set_xscale("log")
ax4.set_xlim([1.6,79])
ax4.set_xlabel("Energy [keV]")
ax4.grid(axis='y')
if type(saveFig) == str:
plt.savefig(saveFig, bbox_inches="tight")
# plt.show()
inform = {"file":evtFile,
"fileTimeRange":[file_start, file_end],
"timeRangeGivenToPlot":timeRange,
"eff_exp":evt_header['livetime'],
"ontime":evt_header['ontime'],
"lvtime_percent":100*evt_header['livetime']/evt_header['ontime']}
if printOut:
for key in inform.keys():
print(key, " : ", inform[key])
return inform, axes_made
def CheckGrade0ToAnyGrades(evtFile, grades, wholeRangeToo=False, saveFig=None, timeRange=None, printOut=False, shortTitle="", xlims=None):
"""Takes a NuSTAR evt file and compares the grade 0 events to the events of all grades.
Adapted from: https://github.com/ianan/ns_proc_test/blob/main/test_proc_jun20_002.ipynb
Parameters
----------
evtFile : str
The .evt file.
grades : list of length 1 or 2 list
A list of the lists of grades you want the grade 0 counts to be compared against. E.g. grades=[[1], [0,4]]
means that grade zero will be checked against grade 1 counts and grade 0-4 counts inclusive.
wholeRangeToo : Bool
If you want to plot the whole energy range in a second plot, next to the one ranging from
1.6--10 keV, set thi to True.
Default: False
saveFig : str
If you want to save the figure made as a PDF then set this to a string of the save name.
Defualt: None
timeRange : list, 2 strings
If you only want a certain time range of the total file's spectrum to be plotted, e.g.
["%Y/%m/%d, %H:%M:%S", "%Y/%m/%d, %H:%M:%S"].
Defualt: None
printOut : Bool
If you want to print out the output nicely(-ish) set this to True.
Default: False
shortTitle : Str
Add a quick title to help keep track of the plots
Default: ""
Returns
-------
Dictionary containing the file name used ["file"], the time range of the file ["fileTimeRange"],
time range you asked it to plot ["timeRangeGivenToPlot"], effective exposure of full file ["eff_exp"],
ontime of full file ["ontime"], percentage livetime ["lvtime_percent"] of full file given, Grade 0
plotting info, and you custom grade info too.
"""
# read in .pha files for grade 0 and all grades
hdulist = fits.open(evtFile)
evt_data = hdulist[1].data
evt_header = hdulist[1].header
hdulist.close()
# what is the time range of the file before filtering with time if you want
## nustar times are measured in seconds from this date
rel_t = data_handling.getTimeFromFormat("2010/01/01, 00:00:00")
file_start = str((rel_t + timedelta(seconds=np.min(evt_data["time"]))).strftime('%Y/%m/%d, %H:%M:%S'))
file_end = str((rel_t + timedelta(seconds=np.max(evt_data["time"]))).strftime('%Y/%m/%d, %H:%M:%S'))
# filter evt file by time?
if type(timeRange) == list:
if len(timeRange) == 2:
evt_data = NustarDo().time_filter(evt_data, tmrng=timeRange)
# work out the grade 0 spectra as well
data_grade0 = evt_data['pi'][evt_data['grade']==0]
hist_grade0, be_grade0 = np.histogram(data_grade0*0.04+1.6,bins=np.arange(1.6,79,0.04))
other_grades = {}
ratios = []
max_ratios, min_ratios = [], []
# get the data
for g in grades:
if len(g)==1:
data_grade = evt_data['pi'][evt_data['grade']==g[0]]
g_str = "Grade "+str(g[0])
other_grades[g_str] = np.histogram(data_grade*0.04+1.6,bins=np.arange(1.6,79,0.04))
else:
data_grade = evt_data['pi'][(evt_data['grade']>=g[0]) & (evt_data['grade']<=g[1])]
g_str = "Grade "+str(g[0])+"-"+str(g[1])
other_grades[g_str] = np.histogram(data_grade*0.04+1.6,bins=np.arange(1.6,79,0.04))
ratio = other_grades[g_str][0]/hist_grade0
ratios.append(ratio)
maximum = np.max(ratio[np.isfinite(ratio)]) if wholeRangeToo else np.max(ratio[np.isfinite(ratio)][:int((10-1.6)/0.04)])
minimum = np.min(ratio[np.isfinite(ratio)]) if wholeRangeToo else np.min(ratio[np.isfinite(ratio)][:int((10-1.6)/0.04)])
max_ratios.append(maximum)
min_ratios.append(minimum)
# plotting info
width = 11 if wholeRangeToo else 5
columns = 2 if wholeRangeToo else 1
y_lims_spec = [1e-1, 1.1*np.max(hist_grade0)]
y_lims_ratio = [0.95*np.min(min_ratios), 1.05*np.max(max_ratios)]
axes_made = []
plt.figure(figsize=(width,7))
# define subplots for close look
ax1 = plt.subplot2grid((4, columns), (0, 0), colspan=1, rowspan=3)
axes_made.append(ax1)
ax2 = plt.subplot2grid((4, columns), (3, 0), colspan=1, rowspan=1)
axes_made.append(ax2)
plt.tight_layout()
# axis 1: the plots for all grades and grade 0
for key, r in zip(other_grades.keys(), ratios):
ax1.plot(other_grades[key][1][:-1], other_grades[key][0], drawstyle="steps-pre", label=key)
ax2.plot(other_grades[key][1][:-1], r, drawstyle="steps-pre")
ax1.plot(be_grade0[:-1], hist_grade0, drawstyle="steps-pre", label="Grade 0")
ax1.set_yscale("log")
ax1.set_ylim(y_lims_spec)
ax1.set_ylabel("Counts")# s$^{-1}$ keV$^{-1}$")
plt.setp(ax1.get_xticklabels(), visible=False)
xlims = xlims if type(xlims)!=type(None) else [1.6,10]
ax1.set_xlim(xlims)
ax1.set_title("Grade 0 vs Chosen Grades - "+shortTitle)
ax1.legend()
# axis 2: the difference between all grades and grade 0
# ax2.plot(be_grade0[:-1], ratio, drawstyle="steps-pre", color='k')
ax2.set_ylabel("Chosen Grades / Grade0")
ax2.set_ylim(y_lims_ratio)
ax2.set_xlim(xlims)
ax2.set_xlabel("Energy [keV]")
ax2.grid(axis='y')
# define subplots for whole energy range
if wholeRangeToo:
# define subplots for close look
ax3 = plt.subplot2grid((4, 2), (0, 1), colspan=1, rowspan=3)
axes_made.append(ax3)
ax4 = plt.subplot2grid((4, 2), (3, 1), colspan=1, rowspan=1)
axes_made.append(ax4)
plt.tight_layout()
# axis 1: the plots for all grades and grade 0
for key, r in zip(other_grades.keys(), ratios):
ax3.plot(other_grades[key][1][:-1], other_grades[key][0], drawstyle="steps-pre", label=key)
ax4.plot(other_grades[key][1][:-1], r, drawstyle="steps-pre")
ax3.plot(be_grade0[:-1], hist_grade0, drawstyle="steps-pre", label="Grade 0")
ax3.set_yscale("log")
ax3.set_ylim(y_lims_spec)
ax3.set_xscale("log")
plt.setp(ax3.get_xticklabels(), visible=False)
ax3.set_xlim([1.6,79])
ax3.set_title("Same But Whole E-range")
ax3.legend()
# axis 2: the difference between all grades and grade 0
# ax4.plot(be_grade0[:-1], ratio, drawstyle="steps-pre", color='k')
ax4.set_ylim(y_lims_ratio)
ax4.set_xscale("log")
ax4.set_xlim([1.6,79])
ax4.set_xlabel("Energy [keV]")
ax4.grid(axis='y')
if type(saveFig) == str:
plt.savefig(saveFig, bbox_inches="tight")
# plt.show()
inform = {"file":evtFile,
"fileTimeRange":[file_start, file_end],
"timeRangeGivenToPlot":timeRange,
"eff_exp":evt_header['livetime'],
"ontime":evt_header['ontime'],
"lvtime_percent":100*evt_header['livetime']/evt_header['ontime'],
"Grade 0":[hist_grade0, be_grade0],
**other_grades}
if printOut:
for key in inform.keys():
print(key, " : ", inform[key])
return inform, axes_made
## functions to help find the FoV rotation
def collectSameXs(rawx, rawy, solx, soly):
""" Returns a dictionary where each column is given a unique entry with a list
of the rows that correspond to that one column from the evt file. Also saves the
solar coordinates for that raw coordinate column with the rawx column key+"map2sol".
Parameters
----------
rawx, rawy : lists
Raw coordinates of the evt counts.
solx, soly : lists
Solar coordinates of the sunpos evt counts.
Returns
-------
A dictionary.
Examples
--------
rawx, rawy = [1,2,3,3], [7,8,4,9]
solx, soly = [101, 102, 103, 104], [250, 252, 254, 256]
collectSameXs(rawx, rawy, solx, soly)
>>> {"1":[7], "1map2sol":[101, 250],
"2":[8], "2map2sol":[102, 252],
"3":[4, 9], "3map2sol":[[103, 254], [104, 256]]}
"""
output = {}
for c,xs in enumerate(rawx):
if str(xs) not in output:
output[str(xs)] = [rawy[c]]
output[str(xs)+"map2sol"] = [[solx[c], soly[c]]]
else:
output[str(xs)].append(rawy[c])
output[str(xs)+"map2sol"].append([solx[c], soly[c]])
assert len([solx[c], soly[c]])==2
return output
def minRowInCol(columns):
""" Returns a dictionary where each key is the solar X position of each raw
coordinate chosen (edges between det0&3 and 1&2) with its value being the
solar Y coordinate.
Parameters
----------
columns : dictionary
Information of the raw and solar coordinates of the counts in order to
each other.
Returns
-------
A dictionary.
Examples
--------
cols = {"1":[7], "1map2sol":[101, 250],
"2":[8], "2map2sol":[102, 252],
"3":[4, 9], "3map2sol":[[103, 254], [104, 256]]}
minRowInCol(cols)
>>> {"101":250, "102":252, "103":254}
"""
output_sol = {}
for key in columns.keys():
if "map2sol" not in key:
# find the corresponding solar coords to the minimum rawy
sol_coords = columns[key+"map2sol"][np.argmin(columns[key])]
# now have the solarX key with the solarY as its value
assert len(sol_coords)==2
output_sol[str(sol_coords[0])] = sol_coords[1]
return output_sol
def maxRowInCol(columns):
""" Returns a dictionary where each key is the solar X position of each raw
coordinate chosen (edges between det0&3 and 1&2) with its value being the
solar Y coordinate.
Parameters
----------
columns : dictionary
Information of the raw and solar coordinates of the counts in order to
each other.
Returns
-------
A dictionary.
Examples
--------
cols = {"1":[7], "1map2sol":[101, 250],
"2":[8], "2map2sol":[102, 252],
"3":[4, 9], "3map2sol":[[103, 254], [104, 256]]}
minRowInCol(cols)
>>> {"101":250, "102":252, "104":256}
"""
output_sol = {}
for key in columns.keys():
if "map2sol" not in key:
# find the corresponding solar coords to the maximum rawy
sol_coords = columns[key+"map2sol"][np.argmax(columns[key])]
# now have the solarX key with the solarY as its value
output_sol[str(sol_coords[0])] = sol_coords[1]
return output_sol
def getXandY(colsAndRows):
""" Returns solar X and Y coordinates.
Parameters
----------
colsAndRows : dictionary
Keys as the solar X and values of solar Y coordinates.
Returns
-------
Two numpy arrays.
Examples
--------
colsAndRows = {"101":250, "102":252, "104":256}
getXandY(colsAndRows)
>>> [101, 102, 104], [250, 252, 256]
"""
return np.array([int(c) for c in list(colsAndRows.keys())]), np.array(list(colsAndRows.values()))
def getDegrees(grad):
""" Returns angle of rotation in degrees.
Parameters
----------
grad : float
Gradient.
Returns
-------
Angle in degrees.
Examples
--------
grad = 1
getDegrees(grad)
>>> 45
"""
return np.arctan(grad)*(180/np.pi)
def straightLine(x, m, c):
""" A straight line model.
Parameters
----------
x : numpy list
X positions.
m : float
Gradient.
c : float
Y-intercept.
Returns
-------
Ys for a straight line.
Examples
--------
x, m, c = [1, 2], 0.25, 1
straightLine(x, m, c)
>>> [1.25, 1.5]
"""
return m*x + c
def getAngle_plot(rawx, rawy, solx, soly, det, **kwargs):
""" Returns the rotation of the NuSTAR FoV from the gradient of the edges between
det0&3 and 1&2 for whatever detector(s) you give it.
Parameters
----------
rawx, rawy : lists
Raw coordinates of the evt counts.
solx, soly : lists
Solar coordinates of the sunpos evt counts.
det : int
The detector for the counts (0--3).
**kwargs : Can pass an axis to it.
Returns
-------
A float of the rotation from "North" in degrees where anticlockwise is positive.
This assumes the rotation is between 90 and -90 degrees.
Examples
--------
fig, axs = plt.subplots(2,2, figsize=(14,10))
# get orientation from the nustar_swguide.pdf, Figure 3
gradient0 = getAngle_plot(rawx0, rawy0, solx0, soly0, 0, axes=axs[0][0])
gradient1 = getAngle_plot(rawx1, rawy1, solx1, soly1, 1, axes=axs[0][1])
gradient2 = getAngle_plot(rawx2, rawy2, solx2, soly2, 2, axes=axs[1][1])
gradient3 = getAngle_plot(rawx3, rawy3, solx3, soly3, 3, axes=axs[1][0])
plt.show()
"""
k = {"axes":plt}
for kw in kwargs:
k[kw] = kwargs[kw]
if det==0:
cols = collectSameXs(rawy, rawx, solx, soly)
m_row_per_col = maxRowInCol(cols)
elif det==1:
cols = collectSameXs(rawx, rawy, solx, soly)
m_row_per_col = maxRowInCol(cols)
elif det==2:
cols = collectSameXs(rawy, rawx, solx, soly)
m_row_per_col = maxRowInCol(cols)
elif det==3:
cols = collectSameXs(rawx, rawy, solx, soly)
m_row_per_col = maxRowInCol(cols)
# working with rawx and y to make sure using correct edge then find the
# corresponding entries in solar coords
aAndY = getXandY(m_row_per_col)
x, y = aAndY[0], aAndY[1]
xlim, ylim = [np.min(x)-5, np.max(x)+5], [np.min(y)-5, np.max(y)+5]
#if det in [0, 1]:
# x = x[y>np.median(y)]
# y = y[y>np.median(y)]
#elif det in [2, 3]:
# x = x[y<np.median(y)]
# y = y[y<np.median(y)]
popt, pcov = curve_fit(straightLine, x, y, p0=[0, np.mean(y)])
k["axes"].plot(x, y, '.')
k["axes"].plot(x, straightLine(x, *popt))
if k["axes"] != plt:
k["axes"].set_ylim(ylim)
k["axes"].set_xlim(xlim)
k["axes"].set_ylabel("Solar-Y")
k["axes"].set_xlabel("Solar-X")
else:
k["axes"].ylim(ylim)
k["axes"].xlim(xlim)
k["axes"].ylabel("Solar-Y")
k["axes"].xlabel("Solar-X")
k["axes"].text(np.min(x), (ylim[0]+ylim[1])/2+5, "Grad: "+str(popt[0]))
k["axes"].text(np.min(x), (ylim[0]+ylim[1])/2, "Angle: "+str(np.arctan(popt[0]))+" rad")
k["axes"].text(np.min(x), (ylim[0]+ylim[1])/2-5, "Angle: "+str(np.arctan(popt[0])*(180/np.pi))+" deg")
k["axes"].text(np.max(x)*0.99, ylim[0]*1.001, "DET: "+str(det), fontweight="bold")
return np.arctan(popt[0])*(180/np.pi)
|
[
"matplotlib.pyplot.title",
"os.mkdir",
"pickle.dump",
"numpy.sum",
"numpy.argmax",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.subplot2grid",
"os.walk",
"numpy.isnan",
"numpy.argmin",
"numpy.shape",
"matplotlib.pyplot.figure",
"pickle.load",
"numpy.arange",
"matplotlib.colors.LogNorm",
"numpy.histogram",
"pylab.cm.get_cmap",
"matplotlib.dates.date2num",
"scipy.interpolate.interp1d",
"os.path.isfile",
"numpy.mean",
"matplotlib.pyplot.tight_layout",
"os.path.join",
"warnings.simplefilter",
"matplotlib.colors.Normalize",
"matplotlib.pyplot.imshow",
"scipy.ndimage.gaussian_filter",
"os.path.exists",
"numpy.isfinite",
"matplotlib.pyplot.colorbar",
"numpy.insert",
"numpy.append",
"matplotlib.dates.DateFormatter",
"numpy.max",
"datetime.timedelta",
"numpy.log10",
"matplotlib.pyplot.xticks",
"astropy.units.Quantity",
"matplotlib.pyplot.show",
"numpy.average",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"subprocess.check_output",
"numpy.min",
"astropy.io.fits.open",
"matplotlib.pyplot.ylabel",
"numpy.arctan",
"skimage.restoration.richardson_lucy",
"os.listdir",
"re.compile",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.annotate",
"numpy.seterr",
"numpy.zeros",
"pandas.plotting.register_matplotlib_converters",
"numpy.nonzero",
"numpy.array",
"matplotlib.dates.MinuteLocator",
"matplotlib.pyplot.xlabel",
"astropy.coordinates.SkyCoord",
"scipy.ndimage.rotate",
"matplotlib.pyplot.savefig"
] |
[((1121, 1153), 'pandas.plotting.register_matplotlib_converters', 'register_matplotlib_converters', ([], {}), '()\n', (1151, 1153), False, 'from pandas.plotting import register_matplotlib_converters\n'), ((1288, 1332), 'numpy.seterr', 'np.seterr', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (1297, 1332), True, 'import numpy as np\n'), ((1389, 1435), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'VerifyWarning'], {}), "('ignore', VerifyWarning)\n", (1410, 1435), False, 'import warnings\n'), ((1440, 1487), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'RuntimeWarning'], {}), "('ignore', RuntimeWarning)\n", (1461, 1487), False, 'import warnings\n'), ((1493, 1537), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'UserWarning'], {}), "('ignore', UserWarning)\n", (1514, 1537), False, 'import warnings\n'), ((82811, 82830), 're.compile', 're.compile', (['""".\\\\w+"""'], {}), "('.\\\\w+')\n", (82821, 82830), False, 'import re\n'), ((83173, 83188), 'astropy.io.fits.open', 'fits.open', (['file'], {}), '(file)\n', (83182, 83188), False, 'from astropy.io import fits\n'), ((85199, 85217), 'astropy.io.fits.open', 'fits.open', (['evtFile'], {}), '(evtFile)\n', (85208, 85217), False, 'from astropy.io import fits\n'), ((86438, 86456), 'numpy.isfinite', 'np.isfinite', (['ratio'], {}), '(ratio)\n', (86449, 86456), True, 'import numpy as np\n'), ((86624, 86654), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(width, 7)'}), '(figsize=(width, 7))\n', (86634, 86654), True, 'import matplotlib.pyplot as plt\n'), ((86706, 86766), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(4, columns)', '(0, 0)'], {'colspan': '(1)', 'rowspan': '(3)'}), '((4, columns), (0, 0), colspan=1, rowspan=3)\n', (86722, 86766), True, 'import matplotlib.pyplot as plt\n'), ((86803, 86863), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(4, columns)', '(3, 0)'], {'colspan': '(1)', 'rowspan': '(1)'}), '((4, columns), (3, 0), colspan=1, rowspan=1)\n', (86819, 86863), True, 'import matplotlib.pyplot as plt\n'), ((86894, 86912), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (86910, 86912), True, 'import matplotlib.pyplot as plt\n'), ((91350, 91368), 'astropy.io.fits.open', 'fits.open', (['evtFile'], {}), '(evtFile)\n', (91359, 91368), False, 'from astropy.io import fits\n'), ((93511, 93541), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(width, 7)'}), '(figsize=(width, 7))\n', (93521, 93541), True, 'import matplotlib.pyplot as plt\n'), ((93593, 93653), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(4, columns)', '(0, 0)'], {'colspan': '(1)', 'rowspan': '(3)'}), '((4, columns), (0, 0), colspan=1, rowspan=3)\n', (93609, 93653), True, 'import matplotlib.pyplot as plt\n'), ((93690, 93750), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(4, columns)', '(3, 0)'], {'colspan': '(1)', 'rowspan': '(1)'}), '((4, columns), (3, 0), colspan=1, rowspan=1)\n', (93706, 93750), True, 'import matplotlib.pyplot as plt\n'), ((93781, 93799), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (93797, 93799), True, 'import matplotlib.pyplot as plt\n'), ((1834, 1853), 're.compile', 're.compile', (['"""\\\\w+/"""'], {}), "('\\\\w+/')\n", (1844, 1853), False, 'import re\n'), ((2075, 2100), 're.compile', 're.compile', (['"""\\\\w+\\\\.\\\\w+"""'], {}), "('\\\\w+\\\\.\\\\w+')\n", (2085, 2100), False, 'import re\n'), ((2288, 2308), 're.compile', 're.compile', (['"""sunpos"""'], {}), "('sunpos')\n", (2298, 2308), False, 'import re\n'), ((2617, 2646), 're.compile', 're.compile', (['"""\\\\d{2}\\\\D\\\\d{2}"""'], {}), "('\\\\d{2}\\\\D\\\\d{2}')\n", (2627, 2646), False, 'import re\n'), ((2809, 2830), 're.compile', 're.compile', (['"""chu\\\\d+"""'], {}), "('chu\\\\d+')\n", (2819, 2830), False, 'import re\n'), ((3127, 3146), 're.compile', 're.compile', (['"""_\\\\D_"""'], {}), "('_\\\\D_')\n", (3137, 3146), False, 'import re\n'), ((3396, 3414), 're.compile', 're.compile', (['"""\\\\d+"""'], {}), "('\\\\d+')\n", (3406, 3414), False, 'import re\n'), ((4271, 4294), 'astropy.io.fits.open', 'fits.open', (['evt_filename'], {}), '(evt_filename)\n', (4280, 4294), False, 'from astropy.io import fits\n'), ((21662, 21738), 'skimage.restoration.richardson_lucy', 'restoration.richardson_lucy', (['map_array', 'psf_array'], {'iterations': 'it', 'clip': '(False)'}), '(map_array, psf_array, iterations=it, clip=False)\n', (21689, 21738), False, 'from skimage import restoration\n'), ((22178, 22193), 'numpy.array', 'np.array', (['array'], {}), '(array)\n', (22186, 22193), True, 'import numpy as np\n'), ((33855, 33907), 'matplotlib.pyplot.subplot', 'plt.subplot', ([], {'projection': 'self.rsn_map', 'frame_on': '(False)'}), '(projection=self.rsn_map, frame_on=False)\n', (33866, 33907), True, 'import matplotlib.pyplot as plt\n'), ((42138, 42157), 're.compile', 're.compile', (['"""\\\\w+/"""'], {}), "('\\\\w+/')\n", (42148, 42157), False, 'import re\n'), ((42429, 42443), 'os.listdir', 'os.listdir', (['sd'], {}), '(sd)\n', (42439, 42443), False, 'import os\n'), ((42933, 42958), 'os.walk', 'os.walk', (['search_directory'], {}), '(search_directory)\n', (42940, 42958), False, 'import os\n'), ((44523, 44548), 're.compile', 're.compile', (['"""\\\\w+\\\\.\\\\w+"""'], {}), "('\\\\w+\\\\.\\\\w+')\n", (44533, 44548), False, 'import re\n'), ((44654, 44672), 're.compile', 're.compile', (['"""\\\\d+"""'], {}), "('\\\\d+')\n", (44664, 44672), False, 'import re\n'), ((44769, 44788), 're.compile', 're.compile', (['"""[A-Z]"""'], {}), "('[A-Z]')\n", (44779, 44788), False, 'import re\n'), ((45213, 45235), 'astropy.io.fits.open', 'fits.open', (['hk_filename'], {}), '(hk_filename)\n', (45222, 45235), False, 'from astropy.io import fits\n'), ((61827, 61850), 'astropy.io.fits.open', 'fits.open', (['chu_filename'], {}), '(chu_filename)\n', (61836, 61850), False, 'from astropy.io import fits\n'), ((62040, 62055), 'numpy.array', 'np.array', (['data1'], {}), '(data1)\n', (62048, 62055), True, 'import numpy as np\n'), ((62074, 62089), 'numpy.array', 'np.array', (['data2'], {}), '(data2)\n', (62082, 62089), True, 'import numpy as np\n'), ((62108, 62123), 'numpy.array', 'np.array', (['data3'], {}), '(data3)\n', (62116, 62123), True, 'import numpy as np\n'), ((64164, 64191), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (64174, 64191), True, 'import matplotlib.pyplot as plt\n'), ((64204, 64214), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (64212, 64214), True, 'import matplotlib.pyplot as plt\n'), ((64223, 64255), 'matplotlib.pyplot.plot', 'plt.plot', (['dt_times', 'chu_all', '"""x"""'], {}), "(dt_times, chu_all, 'x')\n", (64231, 64255), True, 'import matplotlib.pyplot as plt\n'), ((64442, 64467), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""NuSTAR CHUs"""'], {}), "('NuSTAR CHUs')\n", (64452, 64467), True, 'import matplotlib.pyplot as plt\n'), ((64562, 64591), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%H:%M"""'], {}), "('%H:%M')\n", (64582, 64591), True, 'import matplotlib.dates as mdates\n'), ((64737, 64760), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(30)'}), '(rotation=30)\n', (64747, 64760), True, 'import matplotlib.pyplot as plt\n'), ((65708, 65774), 'numpy.arange', 'np.arange', (['(1.6)', "self.lc_3D_params['energy_high']", 'energy_increment'], {}), "(1.6, self.lc_3D_params['energy_high'], energy_increment)\n", (65717, 65774), True, 'import numpy as np\n'), ((65817, 65841), 'numpy.arange', 'np.arange', (['no_of_time', '(1)'], {}), '(no_of_time, 1)\n', (65826, 65841), True, 'import numpy as np\n'), ((66208, 66227), 'numpy.array', 'np.array', (['er_and_tc'], {}), '(er_and_tc)\n', (66216, 66227), True, 'import numpy as np\n'), ((66275, 66301), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 8)'}), '(figsize=(6, 8))\n', (66285, 66301), True, 'import matplotlib.pyplot as plt\n'), ((66309, 66369), 'matplotlib.pyplot.imshow', 'plt.imshow', (['er_and_tc'], {'origin': '"""lower"""', 'aspect': 'aspect', 'vmax': '(1)'}), "(er_and_tc, origin='lower', aspect=aspect, vmax=1)\n", (66319, 66369), True, 'import matplotlib.pyplot as plt\n'), ((66378, 66455), 'matplotlib.pyplot.ylim', 'plt.ylim', (["[self.lc_3D_params['energy_low'], self.lc_3D_params['energy_high']]"], {}), "([self.lc_3D_params['energy_low'], self.lc_3D_params['energy_high']])\n", (66386, 66455), True, 'import matplotlib.pyplot as plt\n'), ((66464, 66482), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (66474, 66482), True, 'import matplotlib.pyplot as plt\n'), ((66491, 66511), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Energy"""'], {}), "('Energy')\n", (66501, 66511), True, 'import matplotlib.pyplot as plt\n'), ((66520, 66530), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (66528, 66530), True, 'import matplotlib.pyplot as plt\n'), ((67048, 67060), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (67058, 67060), True, 'import matplotlib.pyplot as plt\n'), ((67074, 67084), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (67082, 67084), True, 'import matplotlib.pyplot as plt\n'), ((67928, 67940), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (67938, 67940), True, 'import matplotlib.pyplot as plt\n'), ((67956, 67985), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%H:%M"""'], {}), "('%H:%M')\n", (67976, 67985), True, 'import matplotlib.dates as mdates\n'), ((68086, 68109), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(30)'}), '(rotation=30)\n', (68096, 68109), True, 'import matplotlib.pyplot as plt\n'), ((68119, 68182), 'matplotlib.pyplot.title', 'plt.title', (["('Detector Contribution ' + self.e_range_str + ' keV')"], {}), "('Detector Contribution ' + self.e_range_str + ' keV')\n", (68128, 68182), True, 'import matplotlib.pyplot as plt\n'), ((68187, 68221), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Counts from detector"""'], {}), "('Counts from detector')\n", (68197, 68221), True, 'import matplotlib.pyplot as plt\n'), ((68230, 68248), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (68240, 68248), True, 'import matplotlib.pyplot as plt\n'), ((72969, 72989), 'os.mkdir', 'os.mkdir', (['evt_folder'], {}), '(evt_folder)\n', (72977, 72989), False, 'import os\n'), ((73619, 73638), 'os.mkdir', 'os.mkdir', (['hk_folder'], {}), '(hk_folder)\n', (73627, 73638), False, 'import os\n'), ((74149, 74169), 'os.mkdir', 'os.mkdir', (['lvt_folder'], {}), '(lvt_folder)\n', (74157, 74169), False, 'import os\n'), ((74654, 74674), 'os.mkdir', 'os.mkdir', (['map_folder'], {}), '(map_folder)\n', (74662, 74674), False, 'import os\n'), ((75314, 75333), 'os.mkdir', 'os.mkdir', (['lc_folder'], {}), '(lc_folder)\n', (75322, 75333), False, 'import os\n'), ((79196, 79270), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (["temp_response_dataxy['x']", "temp_response_dataxy['y']"], {}), "(temp_response_dataxy['x'], temp_response_dataxy['y'])\n", (79216, 79270), False, 'from scipy import interpolate\n'), ((87819, 87873), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(4, 2)', '(0, 1)'], {'colspan': '(1)', 'rowspan': '(3)'}), '((4, 2), (0, 1), colspan=1, rowspan=3)\n', (87835, 87873), True, 'import matplotlib.pyplot as plt\n'), ((87918, 87972), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(4, 2)', '(3, 1)'], {'colspan': '(1)', 'rowspan': '(1)'}), '((4, 2), (3, 1), colspan=1, rowspan=1)\n', (87934, 87972), True, 'import matplotlib.pyplot as plt\n'), ((88011, 88029), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (88027, 88029), True, 'import matplotlib.pyplot as plt\n'), ((88875, 88916), 'matplotlib.pyplot.savefig', 'plt.savefig', (['saveFig'], {'bbox_inches': '"""tight"""'}), "(saveFig, bbox_inches='tight')\n", (88886, 88916), True, 'import matplotlib.pyplot as plt\n'), ((94906, 94960), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(4, 2)', '(0, 1)'], {'colspan': '(1)', 'rowspan': '(3)'}), '((4, 2), (0, 1), colspan=1, rowspan=3)\n', (94922, 94960), True, 'import matplotlib.pyplot as plt\n'), ((95005, 95059), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(4, 2)', '(3, 1)'], {'colspan': '(1)', 'rowspan': '(1)'}), '((4, 2), (3, 1), colspan=1, rowspan=1)\n', (95021, 95059), True, 'import matplotlib.pyplot as plt\n'), ((95098, 95116), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (95114, 95116), True, 'import matplotlib.pyplot as plt\n'), ((96110, 96151), 'matplotlib.pyplot.savefig', 'plt.savefig', (['saveFig'], {'bbox_inches': '"""tight"""'}), "(saveFig, bbox_inches='tight')\n", (96121, 96151), True, 'import matplotlib.pyplot as plt\n'), ((100936, 100951), 'numpy.arctan', 'np.arctan', (['grad'], {}), '(grad)\n', (100945, 100951), True, 'import numpy as np\n'), ((103979, 103988), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (103985, 103988), True, 'import numpy as np\n'), ((104055, 104064), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (104061, 104064), True, 'import numpy as np\n'), ((104148, 104157), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (104154, 104157), True, 'import numpy as np\n'), ((104339, 104357), 'numpy.arctan', 'np.arctan', (['popt[0]'], {}), '(popt[0])\n', (104348, 104357), True, 'import numpy as np\n'), ((7321, 7350), 'numpy.max', 'np.max', (["self.cleanevt['TIME']"], {}), "(self.cleanevt['TIME'])\n", (7327, 7350), True, 'import numpy as np\n'), ((7351, 7380), 'numpy.min', 'np.min', (["self.cleanevt['TIME']"], {}), "(self.cleanevt['TIME'])\n", (7357, 7380), True, 'import numpy as np\n'), ((7447, 7513), 'matplotlib.dates.MinuteLocator', 'mdates.MinuteLocator', ([], {'byminute': '[0, 10, 20, 30, 40, 50]', 'interval': '(1)'}), '(byminute=[0, 10, 20, 30, 40, 50], interval=1)\n', (7467, 7513), True, 'import matplotlib.dates as mdates\n'), ((21566, 21614), 'scipy.ndimage.rotate', 'rotate', (['psf_array', 'hor2SourceAngle'], {'reshape': '(True)'}), '(psf_array, hor2SourceAngle, reshape=True)\n', (21572, 21614), False, 'from scipy.ndimage import rotate\n'), ((22208, 22223), 'numpy.isnan', 'np.isnan', (['array'], {}), '(array)\n', (22216, 22223), True, 'import numpy as np\n'), ((22339, 22350), 'numpy.sum', 'np.sum', (['row'], {}), '(row)\n', (22345, 22350), True, 'import numpy as np\n'), ((22584, 22595), 'numpy.sum', 'np.sum', (['col'], {}), '(col)\n', (22590, 22595), True, 'import numpy as np\n'), ((23025, 23112), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['(-1200 * u.arcsec)', '(-1200 * u.arcsec)'], {'frame': 'sunpy_map_obj.coordinate_frame'}), '(-1200 * u.arcsec, -1200 * u.arcsec, frame=sunpy_map_obj.\n coordinate_frame)\n', (23033, 23112), False, 'from astropy.coordinates import SkyCoord\n'), ((23121, 23206), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['(1200 * u.arcsec)', '(1200 * u.arcsec)'], {'frame': 'sunpy_map_obj.coordinate_frame'}), '(1200 * u.arcsec, 1200 * u.arcsec, frame=sunpy_map_obj.coordinate_frame\n )\n', (23129, 23206), False, 'from astropy.coordinates import SkyCoord\n'), ((26841, 26868), 'numpy.average', 'np.average', (['ltimes_in_range'], {}), '(ltimes_in_range)\n', (26851, 26868), True, 'import numpy as np\n'), ((28147, 28216), 'scipy.ndimage.gaussian_filter', 'ndimage.gaussian_filter', (['self.nustar_map.data', 'gaussian_width'], {'mode': 'm'}), '(self.nustar_map.data, gaussian_width, mode=m)\n', (28170, 28216), False, 'from scipy import ndimage\n'), ((30288, 30305), 'numpy.shape', 'np.shape', (['nm.data'], {}), '(nm.data)\n', (30296, 30305), True, 'import numpy as np\n'), ((31308, 31346), 'matplotlib.colors.Normalize', 'colors.Normalize', ([], {'vmin': 'dmin', 'vmax': 'dmax'}), '(vmin=dmin, vmax=dmax)\n', (31324, 31346), True, 'import matplotlib.colors as colors\n'), ((31390, 31415), 'pylab.cm.get_cmap', 'cm.get_cmap', (['"""Spectral_r"""'], {}), "('Spectral_r')\n", (31401, 31415), False, 'from pylab import figure, cm\n'), ((34323, 34507), 'matplotlib.pyplot.annotate', 'plt.annotate', (["self.annotations['text']", "self.annotations['position']"], {'color': "self.annotations['color']", 'fontsize': "self.annotations['fontsize']", 'weight': "self.annotations['weight']"}), "(self.annotations['text'], self.annotations['position'], color=\n self.annotations['color'], fontsize=self.annotations['fontsize'],\n weight=self.annotations['weight'])\n", (34335, 34507), True, 'import matplotlib.pyplot as plt\n'), ((35680, 35755), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'fraction': '(0.035)', 'pad': '(0.03)', 'label': "(self.cbar_title + ' $s^{-1}$')"}), "(fraction=0.035, pad=0.03, label=self.cbar_title + ' $s^{-1}$')\n", (35692, 35755), True, 'import matplotlib.pyplot as plt\n'), ((35779, 35840), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'fraction': '(0.035)', 'pad': '(0.03)', 'label': 'self.cbar_title'}), '(fraction=0.035, pad=0.03, label=self.cbar_title)\n', (35791, 35840), True, 'import matplotlib.pyplot as plt\n'), ((37030, 37081), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_fig'], {'dpi': '(300)', 'bbox_inches': '"""tight"""'}), "(save_fig, dpi=300, bbox_inches='tight')\n", (37041, 37081), True, 'import matplotlib.pyplot as plt\n'), ((37123, 37138), 'matplotlib.pyplot.show', 'plt.show', (['"""all"""'], {}), "('all')\n", (37131, 37138), True, 'import matplotlib.pyplot as plt\n'), ((39819, 39850), 'numpy.insert', 'np.insert', (['new_x', '(0)', '[new_x[0]]'], {}), '(new_x, 0, [new_x[0]])\n', (39828, 39850), True, 'import numpy as np\n'), ((39871, 39900), 'numpy.append', 'np.append', (['new_x', '[new_x[-1]]'], {}), '(new_x, [new_x[-1]])\n', (39880, 39900), True, 'import numpy as np\n'), ((39920, 39944), 'numpy.insert', 'np.insert', (['new_y', '(0)', '[0]'], {}), '(new_y, 0, [0])\n', (39929, 39944), True, 'import numpy as np\n'), ((39965, 39986), 'numpy.append', 'np.append', (['new_y', '[0]'], {}), '(new_y, [0])\n', (39974, 39986), True, 'import numpy as np\n'), ((40264, 40282), 'matplotlib.dates.date2num', 'mdates.date2num', (['d'], {}), '(d)\n', (40279, 40282), True, 'import matplotlib.dates as mdates\n'), ((46233, 46245), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (46243, 46245), True, 'import matplotlib.pyplot as plt\n'), ((46263, 46273), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (46271, 46273), True, 'import matplotlib.pyplot as plt\n'), ((46378, 46425), 'matplotlib.pyplot.title', 'plt.title', (["('Livetime - ' + lt_start_hhmmss[:10])"], {}), "('Livetime - ' + lt_start_hhmmss[:10])\n", (46387, 46425), True, 'import matplotlib.pyplot as plt\n'), ((46463, 46513), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Start Time - ' + lt_start_hhmmss[12:])"], {}), "('Start Time - ' + lt_start_hhmmss[12:])\n", (46473, 46513), True, 'import matplotlib.pyplot as plt\n'), ((46524, 46555), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Livetime Fraction"""'], {}), "('Livetime Fraction')\n", (46534, 46555), True, 'import matplotlib.pyplot as plt\n'), ((46681, 46697), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1]'], {}), '([0, 1])\n', (46689, 46697), True, 'import matplotlib.pyplot as plt\n'), ((46715, 46744), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%H:%M"""'], {}), "('%H:%M')\n", (46735, 46744), True, 'import matplotlib.dates as mdates\n'), ((46893, 46916), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(30)'}), '(rotation=30)\n', (46903, 46916), True, 'import matplotlib.pyplot as plt\n'), ((47887, 47911), 'numpy.min', 'np.min', (["cleanevt['TIME']"], {}), "(cleanevt['TIME'])\n", (47893, 47911), True, 'import numpy as np\n'), ((48183, 48207), 'numpy.max', 'np.max', (["cleanevt['TIME']"], {}), "(cleanevt['TIME'])\n", (48189, 48207), True, 'import numpy as np\n'), ((51057, 51083), 'numpy.zeros', 'np.zeros', (['(t_bin_number + 1)'], {}), '(t_bin_number + 1)\n', (51065, 51083), True, 'import numpy as np\n'), ((61049, 61059), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (61057, 61059), True, 'import matplotlib.pyplot as plt\n'), ((64802, 64812), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (64810, 64812), True, 'import matplotlib.pyplot as plt\n'), ((66242, 66259), 'numpy.max', 'np.max', (['er_and_tc'], {}), '(er_and_tc)\n', (66248, 66259), True, 'import numpy as np\n'), ((68283, 68293), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (68291, 68293), True, 'import matplotlib.pyplot as plt\n'), ((69137, 69155), 'astropy.io.fits.open', 'fits.open', (['chuFile'], {}), '(chuFile)\n', (69146, 69155), False, 'from astropy.io import fits\n'), ((71609, 71644), 'os.path.exists', 'os.path.exists', (["(nustar_folder + '/')"], {}), "(nustar_folder + '/')\n", (71623, 71644), False, 'import os\n'), ((71706, 71729), 'os.mkdir', 'os.mkdir', (['nustar_folder'], {}), '(nustar_folder)\n', (71714, 71729), False, 'import os\n'), ((73441, 73515), 'pickle.dump', 'pickle.dump', (['evt_to_store', 'evt_save_file'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(evt_to_store, evt_save_file, protocol=pickle.HIGHEST_PROTOCOL)\n', (73452, 73515), False, 'import pickle\n'), ((73977, 74049), 'pickle.dump', 'pickle.dump', (['hk_to_store', 'hk_save_file'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(hk_to_store, hk_save_file, protocol=pickle.HIGHEST_PROTOCOL)\n', (73988, 74049), False, 'import pickle\n'), ((74490, 74564), 'pickle.dump', 'pickle.dump', (['lvt_to_store', 'lvt_save_file'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(lvt_to_store, lvt_save_file, protocol=pickle.HIGHEST_PROTOCOL)\n', (74501, 74564), False, 'import pickle\n'), ((75135, 75209), 'pickle.dump', 'pickle.dump', (['map_to_store', 'map_save_file'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(map_to_store, map_save_file, protocol=pickle.HIGHEST_PROTOCOL)\n', (75146, 75209), False, 'import pickle\n'), ((75694, 75766), 'pickle.dump', 'pickle.dump', (['lc_to_store', 'lc_save_file'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(lc_to_store, lc_save_file, protocol=pickle.HIGHEST_PROTOCOL)\n', (75705, 75766), False, 'import pickle\n'), ((76216, 76289), 'pickle.dump', 'pickle.dump', (['self.__dict__', 'object_file'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(self.__dict__, object_file, protocol=pickle.HIGHEST_PROTOCOL)\n', (76227, 76289), False, 'import pickle\n'), ((77006, 77029), 'pickle.load', 'pickle.load', (['input_file'], {}), '(input_file)\n', (77017, 77029), False, 'import pickle\n'), ((79045, 79080), 'numpy.log10', 'np.log10', (["temp_response_dataxy['x']"], {}), "(temp_response_dataxy['x'])\n", (79053, 79080), True, 'import numpy as np\n'), ((79082, 79117), 'numpy.log10', 'np.log10', (["temp_response_dataxy['y']"], {}), "(temp_response_dataxy['y'])\n", (79090, 79117), True, 'import numpy as np\n'), ((79903, 79939), 'numpy.argmax', 'np.argmax', (["temp_response_dataxy['y']"], {}), "(temp_response_dataxy['y'])\n", (79912, 79939), True, 'import numpy as np\n'), ((80810, 80830), 'numpy.array', 'np.array', (['e_response'], {}), '(e_response)\n', (80818, 80830), True, 'import numpy as np\n'), ((80831, 80854), 'numpy.isfinite', 'np.isfinite', (['e_response'], {}), '(e_response)\n', (80842, 80854), True, 'import numpy as np\n'), ((80877, 80897), 'numpy.array', 'np.array', (['flux_range'], {}), '(flux_range)\n', (80885, 80897), True, 'import numpy as np\n'), ((80898, 80921), 'numpy.isfinite', 'np.isfinite', (['flux_range'], {}), '(flux_range)\n', (80909, 80921), True, 'import numpy as np\n'), ((86003, 86027), 'numpy.arange', 'np.arange', (['(1.6)', '(79)', '(0.04)'], {}), '(1.6, 79, 0.04)\n', (86012, 86027), True, 'import numpy as np\n'), ((86193, 86217), 'numpy.arange', 'np.arange', (['(1.6)', '(79)', '(0.04)'], {}), '(1.6, 79, 0.04)\n', (86202, 86217), True, 'import numpy as np\n'), ((86354, 86375), 'numpy.max', 'np.max', (['hist_gradeAll'], {}), '(hist_gradeAll)\n', (86360, 86375), True, 'import numpy as np\n'), ((92222, 92246), 'numpy.arange', 'np.arange', (['(1.6)', '(79)', '(0.04)'], {}), '(1.6, 79, 0.04)\n', (92231, 92246), True, 'import numpy as np\n'), ((93390, 93409), 'numpy.max', 'np.max', (['hist_grade0'], {}), '(hist_grade0)\n', (93396, 93409), True, 'import numpy as np\n'), ((93441, 93459), 'numpy.min', 'np.min', (['min_ratios'], {}), '(min_ratios)\n', (93447, 93459), True, 'import numpy as np\n'), ((93466, 93484), 'numpy.max', 'np.max', (['max_ratios'], {}), '(max_ratios)\n', (93472, 93484), True, 'import numpy as np\n'), ((104255, 104264), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (104261, 104264), True, 'import numpy as np\n'), ((7591, 7685), 'matplotlib.dates.MinuteLocator', 'mdates.MinuteLocator', ([], {'byminute': '[0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55]', 'interval': '(1)'}), '(byminute=[0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55\n ], interval=1)\n', (7611, 7685), True, 'import matplotlib.dates as mdates\n'), ((19071, 19091), 'numpy.arange', 'np.arange', (['(0)', '(9)', '(0.5)'], {}), '(0, 9, 0.5)\n', (19080, 19091), True, 'import numpy as np\n'), ((19697, 19714), 'os.path.exists', 'os.path.exists', (['t'], {}), '(t)\n', (19711, 19714), False, 'import os\n'), ((20859, 20879), 'astropy.io.fits.open', 'fits.open', (['psf_array'], {}), '(psf_array)\n', (20868, 20879), False, 'from astropy.io import fits\n'), ((23446, 23555), 'astropy.coordinates.SkyCoord', 'SkyCoord', (["(bottom_left['x'] * u.arcsec)", "(bottom_left['y'] * u.arcsec)"], {'frame': 'sunpy_map_obj.coordinate_frame'}), "(bottom_left['x'] * u.arcsec, bottom_left['y'] * u.arcsec, frame=\n sunpy_map_obj.coordinate_frame)\n", (23454, 23555), False, 'from astropy.coordinates import SkyCoord\n'), ((23564, 23669), 'astropy.coordinates.SkyCoord', 'SkyCoord', (["(top_right['x'] * u.arcsec)", "(top_right['y'] * u.arcsec)"], {'frame': 'sunpy_map_obj.coordinate_frame'}), "(top_right['x'] * u.arcsec, top_right['y'] * u.arcsec, frame=\n sunpy_map_obj.coordinate_frame)\n", (23572, 23669), False, 'from astropy.coordinates import SkyCoord\n'), ((29263, 29282), 'numpy.max', 'np.max', (['finite_vals'], {}), '(finite_vals)\n', (29269, 29282), True, 'import numpy as np\n'), ((30397, 30456), 'astropy.units.Quantity', 'u.Quantity', (['[nx * rebin_factor, ny * rebin_factor]', 'u.pixel'], {}), '([nx * rebin_factor, ny * rebin_factor], u.pixel)\n', (30407, 30456), True, 'import astropy.units as u\n'), ((31624, 31660), 'matplotlib.colors.LogNorm', 'colors.LogNorm', ([], {'vmin': 'dmin', 'vmax': 'dmax'}), '(vmin=dmin, vmax=dmax)\n', (31638, 31660), True, 'import matplotlib.colors as colors\n'), ((31705, 31730), 'pylab.cm.get_cmap', 'cm.get_cmap', (['"""Spectral_r"""'], {}), "('Spectral_r')\n", (31716, 31730), False, 'from pylab import figure, cm\n'), ((35898, 35913), 'numpy.shape', 'np.shape', (['boxes'], {}), '(boxes)\n', (35906, 35913), True, 'import numpy as np\n'), ((35990, 36080), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['(rect[0] * u.arcsec)', '(rect[1] * u.arcsec)'], {'frame': 'self.rsn_map.coordinate_frame'}), '(rect[0] * u.arcsec, rect[1] * u.arcsec, frame=self.rsn_map.\n coordinate_frame)\n', (35998, 36080), False, 'from astropy.coordinates import SkyCoord\n'), ((42542, 42566), 'os.path.join', 'os.path.join', (['sd', 'in_dir'], {}), '(sd, in_dir)\n', (42554, 42566), False, 'import os\n'), ((45869, 45889), 'datetime.timedelta', 'timedelta', ([], {'seconds': 't'}), '(seconds=t)\n', (45878, 45889), False, 'from datetime import timedelta\n'), ((46967, 46977), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (46975, 46977), True, 'import matplotlib.pyplot as plt\n'), ((49529, 49577), 'numpy.histogram', 'np.histogram', (["cleanevt['TIME']", 't_bin_conversion'], {}), "(cleanevt['TIME'], t_bin_conversion)\n", (49541, 49577), True, 'import numpy as np\n'), ((51662, 51710), 'numpy.histogram', 'np.histogram', (["cleanevt['TIME']", 'self.t_bin_edges'], {}), "(cleanevt['TIME'], self.t_bin_edges)\n", (51674, 51710), True, 'import numpy as np\n'), ((59007, 59019), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (59017, 59019), True, 'import matplotlib.pyplot as plt\n'), ((59041, 59051), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (59049, 59051), True, 'import matplotlib.pyplot as plt\n'), ((59263, 59367), 'matplotlib.pyplot.title', 'plt.title', (["('NuSTAR FPM' + self.fpm + ' ' + self.e_range_str + ' keV Light Curve - ' +\n start_yyyymmdd)"], {}), "('NuSTAR FPM' + self.fpm + ' ' + self.e_range_str +\n ' keV Light Curve - ' + start_yyyymmdd)\n", (59272, 59367), True, 'import matplotlib.pyplot as plt\n'), ((59459, 59501), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Start Time - ' + start_hhmmss)"], {}), "('Start Time - ' + start_hhmmss)\n", (59469, 59501), True, 'import matplotlib.pyplot as plt\n'), ((59627, 59656), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Counts $s^{-1}$"""'], {}), "('Counts $s^{-1}$')\n", (59637, 59656), True, 'import matplotlib.pyplot as plt\n'), ((59696, 59725), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%H:%M"""'], {}), "('%H:%M')\n", (59716, 59725), True, 'import matplotlib.dates as mdates\n'), ((59850, 59873), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(30)'}), '(rotation=30)\n', (59860, 59873), True, 'import matplotlib.pyplot as plt\n'), ((60056, 60068), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (60066, 60068), True, 'import matplotlib.pyplot as plt\n'), ((60090, 60100), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (60098, 60100), True, 'import matplotlib.pyplot as plt\n'), ((60292, 60396), 'matplotlib.pyplot.title', 'plt.title', (["('NuSTAR FPM' + self.fpm + ' ' + self.e_range_str + ' keV Light Curve - ' +\n start_yyyymmdd)"], {}), "('NuSTAR FPM' + self.fpm + ' ' + self.e_range_str +\n ' keV Light Curve - ' + start_yyyymmdd)\n", (60301, 60396), True, 'import matplotlib.pyplot as plt\n'), ((60504, 60546), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Start Time - ' + start_hhmmss)"], {}), "('Start Time - ' + start_hhmmss)\n", (60514, 60546), True, 'import matplotlib.pyplot as plt\n'), ((60666, 60686), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Counts"""'], {}), "('Counts')\n", (60676, 60686), True, 'import matplotlib.pyplot as plt\n'), ((60726, 60755), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%H:%M"""'], {}), "('%H:%M')\n", (60746, 60755), True, 'import matplotlib.dates as mdates\n'), ((60880, 60903), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(30)'}), '(rotation=30)\n', (60890, 60903), True, 'import matplotlib.pyplot as plt\n'), ((64056, 64085), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 't'}), '(seconds=t)\n', (64074, 64085), False, 'import datetime\n'), ((66097, 66147), 'numpy.histogram', 'np.histogram', (["specific_lc_data['TIME']", 'no_of_time'], {}), "(specific_lc_data['TIME'], no_of_time)\n", (66109, 66147), True, 'import numpy as np\n'), ((69011, 69026), 'os.path.isfile', 'isfile', (['chuFile'], {}), '(chuFile)\n', (69017, 69026), False, 'from os.path import isfile\n'), ((69073, 69088), 'os.path.isfile', 'isfile', (['chuFile'], {}), '(chuFile)\n', (69079, 69088), False, 'from os.path import isfile\n'), ((71925, 71960), 'os.path.exists', 'os.path.exists', (["(nustar_folder + '/')"], {}), "(nustar_folder + '/')\n", (71939, 71960), False, 'import os\n'), ((72046, 72098), 'subprocess.check_output', 'subprocess.check_output', (["['rm', '-r', nustar_folder]"], {}), "(['rm', '-r', nustar_folder])\n", (72069, 72098), False, 'import subprocess\n'), ((72139, 72162), 'os.mkdir', 'os.mkdir', (['nustar_folder'], {}), '(nustar_folder)\n', (72147, 72162), False, 'import os\n'), ((75978, 76046), 'pickle.dump', 'pickle.dump', (['kwargs', 'own_save_file'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(kwargs, own_save_file, protocol=pickle.HIGHEST_PROTOCOL)\n', (75989, 76046), False, 'import pickle\n'), ((80196, 80229), 'numpy.max', 'np.max', (["temp_response_dataxy['y']"], {}), "(temp_response_dataxy['y'])\n", (80202, 80229), True, 'import numpy as np\n'), ((81194, 81207), 'numpy.max', 'np.max', (['f_err'], {}), '(f_err)\n', (81200, 81207), True, 'import numpy as np\n'), ((86488, 86514), 'numpy.max', 'np.max', (['ratio[fintie_vals]'], {}), '(ratio[fintie_vals])\n', (86494, 86514), True, 'import numpy as np\n'), ((98860, 98883), 'numpy.argmin', 'np.argmin', (['columns[key]'], {}), '(columns[key])\n', (98869, 98883), True, 'import numpy as np\n'), ((99959, 99982), 'numpy.argmax', 'np.argmax', (['columns[key]'], {}), '(columns[key])\n', (99968, 99982), True, 'import numpy as np\n'), ((103265, 103274), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (103271, 103274), True, 'import numpy as np\n'), ((103278, 103287), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (103284, 103287), True, 'import numpy as np\n'), ((103293, 103302), 'numpy.min', 'np.min', (['y'], {}), '(y)\n', (103299, 103302), True, 'import numpy as np\n'), ((103306, 103315), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (103312, 103315), True, 'import numpy as np\n'), ((103550, 103560), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (103557, 103560), True, 'import numpy as np\n'), ((7753, 7785), 'matplotlib.dates.MinuteLocator', 'mdates.MinuteLocator', ([], {'interval': '(2)'}), '(interval=2)\n', (7773, 7785), True, 'import matplotlib.dates as mdates\n'), ((7831, 7863), 'matplotlib.dates.MinuteLocator', 'mdates.MinuteLocator', ([], {'interval': '(1)'}), '(interval=1)\n', (7851, 7863), True, 'import matplotlib.dates as mdates\n'), ((19745, 19757), 'astropy.io.fits.open', 'fits.open', (['t'], {}), '(t)\n', (19754, 19757), False, 'from astropy.io import fits\n'), ((29155, 29170), 'numpy.isfinite', 'np.isfinite', (['dd'], {}), '(dd)\n', (29166, 29170), True, 'import numpy as np\n'), ((36406, 36496), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['(rect[0] * u.arcsec)', '(rect[1] * u.arcsec)'], {'frame': 'self.rsn_map.coordinate_frame'}), '(rect[0] * u.arcsec, rect[1] * u.arcsec, frame=self.rsn_map.\n coordinate_frame)\n', (36414, 36496), False, 'from astropy.coordinates import SkyCoord\n'), ((43101, 43130), 'os.path.join', 'os.path.join', (['_dirpath', '_file'], {}), '(_dirpath, _file)\n', (43113, 43130), False, 'import os\n'), ((58806, 58833), 'numpy.average', 'np.average', (['ltimes_in_range'], {}), '(ltimes_in_range)\n', (58816, 58833), True, 'import numpy as np\n'), ((67701, 67721), 'datetime.timedelta', 'timedelta', ([], {'seconds': 't'}), '(seconds=t)\n', (67710, 67721), False, 'from datetime import timedelta\n'), ((72370, 72405), 'os.path.exists', 'os.path.exists', (["(nustar_folder + '/')"], {}), "(nustar_folder + '/')\n", (72384, 72405), False, 'import os\n'), ((72620, 72643), 'os.mkdir', 'os.mkdir', (['nustar_folder'], {}), '(nustar_folder)\n', (72628, 72643), False, 'import os\n'), ((79150, 79171), 'numpy.log10', 'np.log10', (['plasma_temp'], {}), '(plasma_temp)\n', (79158, 79171), True, 'import numpy as np\n'), ((80320, 80353), 'numpy.max', 'np.max', (["temp_response_dataxy['y']"], {}), "(temp_response_dataxy['y'])\n", (80326, 80353), True, 'import numpy as np\n'), ((81030, 81048), 'numpy.max', 'np.max', (['flux_range'], {}), '(flux_range)\n', (81036, 81048), True, 'import numpy as np\n'), ((81088, 81106), 'numpy.min', 'np.min', (['flux_range'], {}), '(flux_range)\n', (81094, 81106), True, 'import numpy as np\n'), ((92563, 92587), 'numpy.arange', 'np.arange', (['(1.6)', '(79)', '(0.04)'], {}), '(1.6, 79, 0.04)\n', (92572, 92587), True, 'import numpy as np\n'), ((92821, 92845), 'numpy.arange', 'np.arange', (['(1.6)', '(79)', '(0.04)'], {}), '(1.6, 79, 0.04)\n', (92830, 92845), True, 'import numpy as np\n'), ((92956, 92974), 'numpy.isfinite', 'np.isfinite', (['ratio'], {}), '(ratio)\n', (92967, 92974), True, 'import numpy as np\n'), ((93085, 93103), 'numpy.isfinite', 'np.isfinite', (['ratio'], {}), '(ratio)\n', (93096, 93103), True, 'import numpy as np\n'), ((104101, 104119), 'numpy.arctan', 'np.arctan', (['popt[0]'], {}), '(popt[0])\n', (104110, 104119), True, 'import numpy as np\n'), ((15131, 15141), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (15138, 15141), True, 'import numpy as np\n'), ((28279, 28311), 'numpy.nonzero', 'np.nonzero', (['self.nustar_map.data'], {}), '(self.nustar_map.data)\n', (28289, 28311), True, 'import numpy as np\n'), ((28510, 28543), 'numpy.isfinite', 'np.isfinite', (['self.nustar_map.data'], {}), '(self.nustar_map.data)\n', (28521, 28543), True, 'import numpy as np\n'), ((29214, 29237), 'numpy.nonzero', 'np.nonzero', (['finite_vals'], {}), '(finite_vals)\n', (29224, 29237), True, 'import numpy as np\n'), ((51923, 51940), 'numpy.shape', 'np.shape', (['sub_reg'], {}), '(sub_reg)\n', (51931, 51940), True, 'import numpy as np\n'), ((53006, 53022), 'numpy.array', 'np.array', (['counts'], {}), '(counts)\n', (53014, 53022), True, 'import numpy as np\n'), ((59111, 59131), 'datetime.timedelta', 'timedelta', ([], {'seconds': 't'}), '(seconds=t)\n', (59120, 59131), False, 'from datetime import timedelta\n'), ((60143, 60163), 'datetime.timedelta', 'timedelta', ([], {'seconds': 't'}), '(seconds=t)\n', (60152, 60163), False, 'from datetime import timedelta\n'), ((93012, 93030), 'numpy.isfinite', 'np.isfinite', (['ratio'], {}), '(ratio)\n', (93023, 93030), True, 'import numpy as np\n'), ((93141, 93159), 'numpy.isfinite', 'np.isfinite', (['ratio'], {}), '(ratio)\n', (93152, 93159), True, 'import numpy as np\n'), ((104196, 104214), 'numpy.arctan', 'np.arctan', (['popt[0]'], {}), '(popt[0])\n', (104205, 104214), True, 'import numpy as np\n'), ((81330, 81348), 'numpy.max', 'np.max', (['e_response'], {}), '(e_response)\n', (81336, 81348), True, 'import numpy as np\n'), ((81407, 81425), 'numpy.min', 'np.min', (['e_response'], {}), '(e_response)\n', (81413, 81425), True, 'import numpy as np\n'), ((85565, 85589), 'numpy.min', 'np.min', (["evt_data['time']"], {}), "(evt_data['time'])\n", (85571, 85589), True, 'import numpy as np\n'), ((85672, 85696), 'numpy.max', 'np.max', (["evt_data['time']"], {}), "(evt_data['time'])\n", (85678, 85696), True, 'import numpy as np\n'), ((91716, 91740), 'numpy.min', 'np.min', (["evt_data['time']"], {}), "(evt_data['time'])\n", (91722, 91740), True, 'import numpy as np\n'), ((91823, 91847), 'numpy.max', 'np.max', (["evt_data['time']"], {}), "(evt_data['time'])\n", (91829, 91847), True, 'import numpy as np\n'), ((53045, 53062), 'numpy.shape', 'np.shape', (['sub_reg'], {}), '(sub_reg)\n', (53053, 53062), True, 'import numpy as np\n'), ((54070, 54086), 'numpy.array', 'np.array', (['counts'], {}), '(counts)\n', (54078, 54086), True, 'import numpy as np\n'), ((57651, 57661), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (57659, 57661), True, 'import matplotlib.pyplot as plt\n'), ((5941, 5970), 'numpy.min', 'np.min', (["self.cleanevt['TIME']"], {}), "(self.cleanevt['TIME'])\n", (5947, 5970), True, 'import numpy as np\n'), ((6067, 6096), 'numpy.max', 'np.max', (["self.cleanevt['TIME']"], {}), "(self.cleanevt['TIME'])\n", (6073, 6096), True, 'import numpy as np\n'), ((46159, 46180), 'numpy.min', 'np.min', (['self.hk_times'], {}), '(self.hk_times)\n', (46165, 46180), True, 'import numpy as np\n'), ((51370, 51383), 'numpy.min', 'np.min', (['times'], {}), '(times)\n', (51376, 51383), True, 'import numpy as np\n'), ((51473, 51486), 'numpy.min', 'np.min', (['times'], {}), '(times)\n', (51479, 51486), True, 'import numpy as np\n'), ((55011, 55023), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (55021, 55023), True, 'import matplotlib.pyplot as plt\n'), ((55061, 55071), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (55069, 55071), True, 'import matplotlib.pyplot as plt\n'), ((55314, 55424), 'matplotlib.pyplot.title', 'plt.title', (["('NuSTAR FPM' + self.fpm + ' ' + self.e_range_str + ' keV Light Curve - ' +\n start_yyyymmdd + box)"], {}), "('NuSTAR FPM' + self.fpm + ' ' + self.e_range_str +\n ' keV Light Curve - ' + start_yyyymmdd + box)\n", (55323, 55424), True, 'import matplotlib.pyplot as plt\n'), ((55548, 55590), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Start Time - ' + start_hhmmss)"], {}), "('Start Time - ' + start_hhmmss)\n", (55558, 55590), True, 'import matplotlib.pyplot as plt\n'), ((55764, 55793), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Counts $s^{-1}$"""'], {}), "('Counts $s^{-1}$')\n", (55774, 55793), True, 'import matplotlib.pyplot as plt\n'), ((55833, 55862), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%H:%M"""'], {}), "('%H:%M')\n", (55853, 55862), True, 'import matplotlib.dates as mdates\n'), ((56035, 56058), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(30)'}), '(rotation=30)\n', (56045, 56058), True, 'import matplotlib.pyplot as plt\n'), ((56244, 56256), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (56254, 56256), True, 'import matplotlib.pyplot as plt\n'), ((56294, 56304), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (56302, 56304), True, 'import matplotlib.pyplot as plt\n'), ((56536, 56646), 'matplotlib.pyplot.title', 'plt.title', (["('NuSTAR FPM' + self.fpm + ' ' + self.e_range_str + ' keV Light Curve - ' +\n start_yyyymmdd + box)"], {}), "('NuSTAR FPM' + self.fpm + ' ' + self.e_range_str +\n ' keV Light Curve - ' + start_yyyymmdd + box)\n", (56545, 56646), True, 'import matplotlib.pyplot as plt\n'), ((56770, 56812), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Start Time - ' + start_hhmmss)"], {}), "('Start Time - ' + start_hhmmss)\n", (56780, 56812), True, 'import matplotlib.pyplot as plt\n'), ((56964, 56984), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Counts"""'], {}), "('Counts')\n", (56974, 56984), True, 'import matplotlib.pyplot as plt\n'), ((57024, 57053), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%H:%M"""'], {}), "('%H:%M')\n", (57044, 57053), True, 'import matplotlib.dates as mdates\n'), ((57226, 57249), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(30)'}), '(rotation=30)\n', (57236, 57249), True, 'import matplotlib.pyplot as plt\n'), ((59571, 59601), 'numpy.isfinite', 'np.isfinite', (['counts_per_second'], {}), '(counts_per_second)\n', (59582, 59601), True, 'import numpy as np\n'), ((60613, 60640), 'numpy.isfinite', 'np.isfinite', (['self.lc_counts'], {}), '(self.lc_counts)\n', (60624, 60640), True, 'import numpy as np\n'), ((49825, 49838), 'numpy.min', 'np.min', (['times'], {}), '(times)\n', (49831, 49838), True, 'import numpy as np\n'), ((49932, 49945), 'numpy.min', 'np.min', (['times'], {}), '(times)\n', (49938, 49945), True, 'import numpy as np\n'), ((52609, 52647), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'self.t_bin_edges[t]'}), '(seconds=self.t_bin_edges[t])\n', (52618, 52647), False, 'from datetime import timedelta\n'), ((52723, 52765), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'self.t_bin_edges[t + 1]'}), '(seconds=self.t_bin_edges[t + 1])\n', (52732, 52765), False, 'from datetime import timedelta\n'), ((54712, 54739), 'numpy.average', 'np.average', (['ltimes_in_range'], {}), '(ltimes_in_range)\n', (54722, 54739), True, 'import numpy as np\n'), ((54887, 54903), 'numpy.array', 'np.array', (['counts'], {}), '(counts)\n', (54895, 54903), True, 'import numpy as np\n'), ((72506, 72526), 'os.listdir', 'os.listdir', (['save_dir'], {}), '(save_dir)\n', (72516, 72526), False, 'import os\n'), ((55130, 55150), 'datetime.timedelta', 'timedelta', ([], {'seconds': 't'}), '(seconds=t)\n', (55139, 55150), False, 'from datetime import timedelta\n'), ((56363, 56383), 'datetime.timedelta', 'timedelta', ([], {'seconds': 't'}), '(seconds=t)\n', (56372, 56383), False, 'from datetime import timedelta\n'), ((53595, 53633), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'self.t_bin_edges[t]'}), '(seconds=self.t_bin_edges[t])\n', (53604, 53633), False, 'from datetime import timedelta\n'), ((53713, 53755), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'self.t_bin_edges[t + 1]'}), '(seconds=self.t_bin_edges[t + 1])\n', (53722, 53755), False, 'from datetime import timedelta\n'), ((55692, 55722), 'numpy.isfinite', 'np.isfinite', (['counts_per_second'], {}), '(counts_per_second)\n', (55703, 55722), True, 'import numpy as np\n'), ((56903, 56922), 'numpy.isfinite', 'np.isfinite', (['counts'], {}), '(counts)\n', (56914, 56922), True, 'import numpy as np\n')]
|
# coding: utf-8
import os, sys
from setuptools import setup, find_packages
NAME = "edam2json"
VERSION = "1.0dev1"
SETUP_DIR = os.path.dirname(__file__)
README = os.path.join(SETUP_DIR, 'README.md')
readme = open(README).read()
REQUIRES = ["rdflib", "rdflib-jsonld"]
setup(
name=NAME,
version=VERSION,
description="edam2json automates the export of the EDAM ontology to various JSON-based formats",
author='<NAME>',
author_email="<EMAIL>",
url="https://github.com/edamontology/edam2json",
packages=find_packages(),
install_requires=REQUIRES,
license="MIT",
keywords=["Bioinformatics", "OWL", "JSON", "JSON-LD", "Ontology"],
entry_points={
'console_scripts': [
'edam2json=edam2json.__main__:main',
]
}
)
|
[
"os.path.dirname",
"os.path.join",
"setuptools.find_packages"
] |
[((129, 154), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (144, 154), False, 'import os, sys\n'), ((164, 200), 'os.path.join', 'os.path.join', (['SETUP_DIR', '"""README.md"""'], {}), "(SETUP_DIR, 'README.md')\n", (176, 200), False, 'import os, sys\n'), ((530, 545), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (543, 545), False, 'from setuptools import setup, find_packages\n')]
|
from models import *
from django.contrib import admin
admin.site.register(Profile)
admin.site.register(EmailVerify)
|
[
"django.contrib.admin.site.register"
] |
[((55, 83), 'django.contrib.admin.site.register', 'admin.site.register', (['Profile'], {}), '(Profile)\n', (74, 83), False, 'from django.contrib import admin\n'), ((84, 116), 'django.contrib.admin.site.register', 'admin.site.register', (['EmailVerify'], {}), '(EmailVerify)\n', (103, 116), False, 'from django.contrib import admin\n')]
|
# Copyright 2021 <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import configargparse
def create_argparser():
p = configargparse.ArgumentParser(
prog='NTK GANs',
description='NTK GANs.',
formatter_class=configargparse.ArgumentDefaultsHelpFormatter
)
return p
def create_default_parser_wrapper(create_args_fn):
def create_parser(p=None):
if p is None:
p = create_argparser()
return create_args_fn(p)
return create_parser
def get_arg_group(p, title):
groups = [g for g in p._action_groups if g.title == title]
if len(groups) == 0:
raise Warning('No groups with this title')
elif len(groups) > 1:
raise Warning('More than 1 group with the same title')
return groups[0]
|
[
"configargparse.ArgumentParser"
] |
[((665, 802), 'configargparse.ArgumentParser', 'configargparse.ArgumentParser', ([], {'prog': '"""NTK GANs"""', 'description': '"""NTK GANs."""', 'formatter_class': 'configargparse.ArgumentDefaultsHelpFormatter'}), "(prog='NTK GANs', description='NTK GANs.',\n formatter_class=configargparse.ArgumentDefaultsHelpFormatter)\n", (694, 802), False, 'import configargparse\n')]
|
import graphene
from django.db.models import Q
from graphene import relay
from graphene_django import DjangoObjectType
from graphene_django.registry import Registry
from itdagene.app.career.models import Joblisting as ItdageneJoblisting
from itdagene.app.career.models import Town as ItdageneTown
from itdagene.app.company.models import Company as ItdageneCompany
from itdagene.app.company.models import KeyInformation as ItdageneKeyInformation
from itdagene.app.events.models import Event as ItdageneEvent
from itdagene.app.pages.models import Page as ItdagenePage
from itdagene.app.stands.models import DigitalStand as ItdageneStand
from itdagene.core.models import Preference
from itdagene.core.models import User as ItdageneUser
from itdagene.graphql.types import CountableConnectionBase, OpengraphMetadata
from itdagene.graphql.utils import resize_image
class Town(DjangoObjectType):
class Meta:
model = ItdageneTown
interfaces = (relay.Node,)
description = "Town entity"
only_fields = ("id", "name")
class Joblisting(DjangoObjectType):
towns = graphene.NonNull(graphene.List(graphene.NonNull(Town)))
class Meta:
model = ItdageneJoblisting
connection_class = CountableConnectionBase
# filter_fields = [
# 'type',
# 'to_year',
# 'from_year',
# ]
description = "Joblisting entity"
only_fields = (
"id",
"towns",
"company",
"title",
"type",
"description",
"image",
"deadline",
"from_year",
"to_year",
"url",
"date_created",
"slug",
"video_url",
"is_summerjob_marathon",
)
interfaces = (relay.Node, OpengraphMetadata)
def resolve_towns(self, info, **kwargs):
return self.towns.all()
def resolve_sharing_image(self, info, **kwargs):
if not self.company.logo:
return None
return resize_image(self.company.logo, width=1200, height=630)
def resolve_company(self, info, **kwargs):
return info.context.loaders.Companyloader.load(self.company_id)
@classmethod
def get_queryset(cls):
return ItdageneJoblisting.objects.all()
@classmethod
def get_node(cls, context, id):
try:
return ItdageneJoblisting.objects.get(pk=id)
except Exception as e:
print(e)
return None
class Page(DjangoObjectType):
class Meta:
model = ItdagenePage
interfaces = (relay.Node, OpengraphMetadata)
description = "(info)Page entity"
only_fields = (
"slug",
"title",
"language",
"menu",
"content",
"ingress",
"date_saved",
"date_created",
)
def resolve_description(self, info, **kwargs):
return self.ingress
@classmethod
def get_queryset(cls):
return ItdagenePage.objects.filter(need_auth=False, active=True)
class User(DjangoObjectType):
full_name = graphene.String()
role = graphene.String()
photo = graphene.Field(graphene.String, height=graphene.Int(), width=graphene.Int())
class Meta:
model = ItdageneUser
interfaces = (relay.Node,)
description = "User entity"
only_fields = ("id", "firstName", "lastName", "email", "year", "role")
def resolve_full_name(self, info):
return self.get_full_name()
def resolve_role(self, info):
return self.role()
def resolve_photo(self, info, **kwargs):
return resize_image(self.photo, format="JPEG", quality=80, **kwargs)
class Event(DjangoObjectType):
class Meta:
model = ItdageneEvent
description = "Small event type"
only_fields = (
"id",
"title",
"time_start",
"time_end",
"description",
"type",
"location",
"company",
"uses_tickets",
"max_participants",
"date",
)
interfaces = (relay.Node,)
@classmethod
def get_queryset(cls):
"""
When fetching all events, we do not want stand events,
unless they are of the type 'promoted stand event' (7)
"""
return ItdageneEvent.objects.filter(Q(stand=None) | Q(type=7))
class Stand(DjangoObjectType):
events = graphene.List(
graphene.NonNull(Event), description="The stand's associated events"
)
class Meta:
model = ItdageneStand
description = "A company stand"
only_fields = (
"slug",
"description",
"livestream_url",
"qa_url",
"chat_url",
"active",
"company",
)
interfaces = (relay.Node,)
def resolve_company(self, info, **kwargs):
return info.context.loaders.Companyloader.load(self.company_id)
def resolve_events(self, info, **kwargs):
return ItdageneEvent.objects.filter(stand=self)
@classmethod
def get_queryset(cls):
return ItdageneStand.objects.filter(active=True)
class KeyInformation(DjangoObjectType):
class Meta:
model = ItdageneKeyInformation
interfaces = (relay.Node,)
description = "Key information about a company"
only_fields = ("id", "name", "value")
class Company(DjangoObjectType):
logo = graphene.Field(
graphene.String,
height=graphene.Int(),
width=graphene.Int(),
padding=graphene.Boolean(),
)
key_information = graphene.List(
graphene.NonNull(KeyInformation),
description="Key information about the company.",
)
class Meta:
model = ItdageneCompany
description = "Company entity"
only_fields = (
"id",
"name",
"url",
"logo",
"description",
"is_collabrator",
"joblistings",
)
interfaces = (relay.Node,)
@classmethod
def get_queryset(cls):
return ItdageneCompany.get_last_day() | ItdageneCompany.get_first_day()
@classmethod
def get_node(cls, context, id):
try:
return cls.get_queryset().get(pk=id)
except Exception as e:
print(e)
return None
def resolve_logo(self, info, **kwargs):
return resize_image(self.logo, **kwargs)
def resolve_key_information(self, info, **kwargs):
return ItdageneKeyInformation.objects.filter(company=self)
def resolve_stand(self, info, **kwargs):
return Stand.get_queryset().filter(company=self).first()
class MainCollaborator(Company):
class Meta:
model = ItdageneCompany
description = "Main collaborator company entity"
only_fields = (
"id",
"name",
"url",
"logo",
"description",
"joblistings",
"intro",
"video",
"poster",
)
interfaces = (relay.Node,)
# This has to be added to avoid GraphQL using this definiton for all company references
registry = Registry()
intro = graphene.String()
video = graphene.String()
poster = graphene.String()
def resolve_intro(self, info):
return Preference.current_preference().hsp_intro
def resolve_video(self, info):
return Preference.current_preference().hsp_video
def resolve_poster(self, info):
return Preference.current_preference().hsp_poster
class MetaData(DjangoObjectType):
companies_first_day = graphene.List(graphene.NonNull(Company))
companies_last_day = graphene.List(graphene.NonNull(Company))
collaborators = graphene.List(
graphene.NonNull(Company),
description="List the collaborators, not including the main collaborator",
)
main_collaborator = graphene.Field(
MainCollaborator, description="Main collaborator for current years event"
)
board_members = graphene.NonNull(graphene.List(graphene.NonNull(User)))
interest_form = graphene.String()
def resolve_main_collaborator(self, info):
if self.view_hsp:
return ItdageneCompany.get_main_collaborator()
def resolve_companies_first_day(self, info):
if self.view_companies:
return ItdageneCompany.get_first_day()
def resolve_companies_last_day(self, info):
if self.view_companies:
return ItdageneCompany.get_last_day()
def resolve_collaborators(self, info):
if self.view_sp:
return ItdageneCompany.get_collaborators()
def resolve_board_members(self, info):
return (
ItdageneUser.objects.filter(year=self.year, is_active=True)
.all()
.prefetch_related("groups")
)
def resolve_interest_form(self, info):
if self.show_interest_form:
return self.interest_form_url
class Meta:
model = Preference
description = "Metadata about the current years itdagene"
only_fields = (
"id",
"start_date",
"end_date",
"year",
"nr_of_stands",
"companies_first_day" "companies_last_day",
"collaborators",
"main_collaborator",
"board_members",
"interest_form",
)
interfaces = (relay.Node,)
class SearchResult(graphene.Union):
class Meta:
types = (Joblisting, Company, Page, Town)
|
[
"itdagene.core.models.Preference.current_preference",
"itdagene.core.models.User.objects.filter",
"graphene.NonNull",
"itdagene.app.company.models.Company.get_collaborators",
"graphene_django.registry.Registry",
"itdagene.app.career.models.Joblisting.objects.get",
"graphene.Int",
"itdagene.app.pages.models.Page.objects.filter",
"itdagene.app.stands.models.DigitalStand.objects.filter",
"graphene.String",
"itdagene.app.events.models.Event.objects.filter",
"itdagene.app.company.models.Company.get_main_collaborator",
"itdagene.app.company.models.Company.get_first_day",
"graphene.Field",
"itdagene.app.career.models.Joblisting.objects.all",
"itdagene.graphql.utils.resize_image",
"itdagene.app.company.models.KeyInformation.objects.filter",
"django.db.models.Q",
"graphene.Boolean",
"itdagene.app.company.models.Company.get_last_day"
] |
[((3194, 3211), 'graphene.String', 'graphene.String', ([], {}), '()\n', (3209, 3211), False, 'import graphene\n'), ((3223, 3240), 'graphene.String', 'graphene.String', ([], {}), '()\n', (3238, 3240), False, 'import graphene\n'), ((7371, 7388), 'graphene.String', 'graphene.String', ([], {}), '()\n', (7386, 7388), False, 'import graphene\n'), ((7401, 7418), 'graphene.String', 'graphene.String', ([], {}), '()\n', (7416, 7418), False, 'import graphene\n'), ((7432, 7449), 'graphene.String', 'graphene.String', ([], {}), '()\n', (7447, 7449), False, 'import graphene\n'), ((8085, 8179), 'graphene.Field', 'graphene.Field', (['MainCollaborator'], {'description': '"""Main collaborator for current years event"""'}), "(MainCollaborator, description=\n 'Main collaborator for current years event')\n", (8099, 8179), False, 'import graphene\n'), ((8286, 8303), 'graphene.String', 'graphene.String', ([], {}), '()\n', (8301, 8303), False, 'import graphene\n'), ((2088, 2143), 'itdagene.graphql.utils.resize_image', 'resize_image', (['self.company.logo'], {'width': '(1200)', 'height': '(630)'}), '(self.company.logo, width=1200, height=630)\n', (2100, 2143), False, 'from itdagene.graphql.utils import resize_image\n'), ((2324, 2356), 'itdagene.app.career.models.Joblisting.objects.all', 'ItdageneJoblisting.objects.all', ([], {}), '()\n', (2354, 2356), True, 'from itdagene.app.career.models import Joblisting as ItdageneJoblisting\n'), ((3088, 3145), 'itdagene.app.pages.models.Page.objects.filter', 'ItdagenePage.objects.filter', ([], {'need_auth': '(False)', 'active': '(True)'}), '(need_auth=False, active=True)\n', (3115, 3145), True, 'from itdagene.app.pages.models import Page as ItdagenePage\n'), ((3725, 3786), 'itdagene.graphql.utils.resize_image', 'resize_image', (['self.photo'], {'format': '"""JPEG"""', 'quality': '(80)'}), "(self.photo, format='JPEG', quality=80, **kwargs)\n", (3737, 3786), False, 'from itdagene.graphql.utils import resize_image\n'), ((4574, 4597), 'graphene.NonNull', 'graphene.NonNull', (['Event'], {}), '(Event)\n', (4590, 4597), False, 'import graphene\n'), ((5155, 5195), 'itdagene.app.events.models.Event.objects.filter', 'ItdageneEvent.objects.filter', ([], {'stand': 'self'}), '(stand=self)\n', (5183, 5195), True, 'from itdagene.app.events.models import Event as ItdageneEvent\n'), ((5256, 5297), 'itdagene.app.stands.models.DigitalStand.objects.filter', 'ItdageneStand.objects.filter', ([], {'active': '(True)'}), '(active=True)\n', (5284, 5297), True, 'from itdagene.app.stands.models import DigitalStand as ItdageneStand\n'), ((5767, 5799), 'graphene.NonNull', 'graphene.NonNull', (['KeyInformation'], {}), '(KeyInformation)\n', (5783, 5799), False, 'import graphene\n'), ((6560, 6593), 'itdagene.graphql.utils.resize_image', 'resize_image', (['self.logo'], {}), '(self.logo, **kwargs)\n', (6572, 6593), False, 'from itdagene.graphql.utils import resize_image\n'), ((6665, 6716), 'itdagene.app.company.models.KeyInformation.objects.filter', 'ItdageneKeyInformation.objects.filter', ([], {'company': 'self'}), '(company=self)\n', (6702, 6716), True, 'from itdagene.app.company.models import KeyInformation as ItdageneKeyInformation\n'), ((7347, 7357), 'graphene_django.registry.Registry', 'Registry', ([], {}), '()\n', (7355, 7357), False, 'from graphene_django.registry import Registry\n'), ((7808, 7833), 'graphene.NonNull', 'graphene.NonNull', (['Company'], {}), '(Company)\n', (7824, 7833), False, 'import graphene\n'), ((7874, 7899), 'graphene.NonNull', 'graphene.NonNull', (['Company'], {}), '(Company)\n', (7890, 7899), False, 'import graphene\n'), ((7944, 7969), 'graphene.NonNull', 'graphene.NonNull', (['Company'], {}), '(Company)\n', (7960, 7969), False, 'import graphene\n'), ((1125, 1147), 'graphene.NonNull', 'graphene.NonNull', (['Town'], {}), '(Town)\n', (1141, 1147), False, 'import graphene\n'), ((2443, 2480), 'itdagene.app.career.models.Joblisting.objects.get', 'ItdageneJoblisting.objects.get', ([], {'pk': 'id'}), '(pk=id)\n', (2473, 2480), True, 'from itdagene.app.career.models import Joblisting as ItdageneJoblisting\n'), ((3292, 3306), 'graphene.Int', 'graphene.Int', ([], {}), '()\n', (3304, 3306), False, 'import graphene\n'), ((3314, 3328), 'graphene.Int', 'graphene.Int', ([], {}), '()\n', (3326, 3328), False, 'import graphene\n'), ((5634, 5648), 'graphene.Int', 'graphene.Int', ([], {}), '()\n', (5646, 5648), False, 'import graphene\n'), ((5664, 5678), 'graphene.Int', 'graphene.Int', ([], {}), '()\n', (5676, 5678), False, 'import graphene\n'), ((5696, 5714), 'graphene.Boolean', 'graphene.Boolean', ([], {}), '()\n', (5712, 5714), False, 'import graphene\n'), ((6243, 6273), 'itdagene.app.company.models.Company.get_last_day', 'ItdageneCompany.get_last_day', ([], {}), '()\n', (6271, 6273), True, 'from itdagene.app.company.models import Company as ItdageneCompany\n'), ((6276, 6307), 'itdagene.app.company.models.Company.get_first_day', 'ItdageneCompany.get_first_day', ([], {}), '()\n', (6305, 6307), True, 'from itdagene.app.company.models import Company as ItdageneCompany\n'), ((7501, 7532), 'itdagene.core.models.Preference.current_preference', 'Preference.current_preference', ([], {}), '()\n', (7530, 7532), False, 'from itdagene.core.models import Preference\n'), ((7594, 7625), 'itdagene.core.models.Preference.current_preference', 'Preference.current_preference', ([], {}), '()\n', (7623, 7625), False, 'from itdagene.core.models import Preference\n'), ((7688, 7719), 'itdagene.core.models.Preference.current_preference', 'Preference.current_preference', ([], {}), '()\n', (7717, 7719), False, 'from itdagene.core.models import Preference\n'), ((8241, 8263), 'graphene.NonNull', 'graphene.NonNull', (['User'], {}), '(User)\n', (8257, 8263), False, 'import graphene\n'), ((8397, 8436), 'itdagene.app.company.models.Company.get_main_collaborator', 'ItdageneCompany.get_main_collaborator', ([], {}), '()\n', (8434, 8436), True, 'from itdagene.app.company.models import Company as ItdageneCompany\n'), ((8538, 8569), 'itdagene.app.company.models.Company.get_first_day', 'ItdageneCompany.get_first_day', ([], {}), '()\n', (8567, 8569), True, 'from itdagene.app.company.models import Company as ItdageneCompany\n'), ((8670, 8700), 'itdagene.app.company.models.Company.get_last_day', 'ItdageneCompany.get_last_day', ([], {}), '()\n', (8698, 8700), True, 'from itdagene.app.company.models import Company as ItdageneCompany\n'), ((8789, 8824), 'itdagene.app.company.models.Company.get_collaborators', 'ItdageneCompany.get_collaborators', ([], {}), '()\n', (8822, 8824), True, 'from itdagene.app.company.models import Company as ItdageneCompany\n'), ((4478, 4491), 'django.db.models.Q', 'Q', ([], {'stand': 'None'}), '(stand=None)\n', (4479, 4491), False, 'from django.db.models import Q\n'), ((4494, 4503), 'django.db.models.Q', 'Q', ([], {'type': '(7)'}), '(type=7)\n', (4495, 4503), False, 'from django.db.models import Q\n'), ((8898, 8957), 'itdagene.core.models.User.objects.filter', 'ItdageneUser.objects.filter', ([], {'year': 'self.year', 'is_active': '(True)'}), '(year=self.year, is_active=True)\n', (8925, 8957), True, 'from itdagene.core.models import User as ItdageneUser\n')]
|
from pathlib import Path
import os
text_file = Path(os.getcwd()) / 'pdf_api' /'api_uploaded_files' / 'test.txt'
with open(text_file, 'rb') as f:
output = f.read()
|
[
"os.getcwd"
] |
[((53, 64), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (62, 64), False, 'import os\n')]
|
import urllib.request, urllib.parse, urllib.error
from datetime import datetime
import sys
import os
import importlib
importlib.reload(sys) # Reload does the trick!
from src.models import Source, Edam, EdamUrl, EdamAlia, EdamRelation, Ro
from scripts.loading.database_session import get_session
from scripts.loading.ontology import read_owl
__author__ = 'sweng66'
## Created on May 2017
## This script is used to update EDAM ontology in NEX2.
log_file = 'scripts/loading/ontology/logs/edam.log'
ontology = 'EDAM'
src = 'EDAM'
CREATED_BY = os.environ['DEFAULT_USER']
def load_ontology(ontology_file):
nex_session = get_session()
source_to_id = dict([(x.display_name, x.source_id) for x in nex_session.query(Source).all()])
edamid_to_edam = dict([(x.edamid, x) for x in nex_session.query(Edam).all()])
term_to_ro_id = dict([(x.display_name, x.ro_id) for x in nex_session.query(Ro).all()])
edam_id_to_alias = {}
for x in nex_session.query(EdamAlia).all():
aliases = []
if x.edam_id in edam_id_to_alias:
aliases = edam_id_to_alias[x.edam_id]
aliases.append((x.display_name, x.alias_type))
edam_id_to_alias[x.edam_id] = aliases
edam_id_to_parent = {}
for x in nex_session.query(EdamRelation).all():
parents = []
if x.child_id in edam_id_to_parent:
parents = edam_id_to_parent[x.child_id]
parents.append(x.parent_id)
edam_id_to_parent[x.child_id] = parents
####################################
fw = open(log_file, "w")
is_sgd_term = {}
data = read_owl(ontology_file, ontology)
[update_log, to_delete_list] = load_new_data(nex_session, data,
source_to_id,
edamid_to_edam,
term_to_ro_id['is a'],
edam_id_to_alias,
edam_id_to_parent,
fw)
write_summary_and_send_email(fw, update_log, to_delete_list)
nex_session.close()
fw.close()
def load_new_data(nex_session, data, source_to_id, edamid_to_edam, ro_id, edam_id_to_alias, edam_id_to_parent, fw):
active_edamid = []
update_log = {}
for count_name in ['updated', 'added', 'deleted']:
update_log[count_name] = 0
relation_just_added = {}
alias_just_added = {}
for x in data:
edam_id = None
if "EDAM:" not in x['id']:
continue
if x['id'] in edamid_to_edam:
## in database
y = edamid_to_edam[x['id']]
edam_id = y.edam_id
if y.is_obsolete is True:
y.is_obsolete = '0'
nex_session.add(y)
nex_session.flush()
update_log['updated'] = update_log['updated'] + 1
fw.write("The is_obsolete for " + x['id'] + " has been updated from " + y.is_obsolete + " to " + 'False' + "\n")
if x['term'] != y.display_name:
## update term
fw.write("The display_name for " + x['id'] + " has been updated from " + y.display_name + " to " + x['term'] + "\n")
y.display_name = x['term']
nex_session.add(y)
nex_session.flush()
update_log['updated'] = update_log['updated'] + 1
print("UPDATED: ", y.edamid, y.display_name, x['term'])
# else:
# print "SAME: ", y.edamid, y.display_name, x['definition'], x['aliases'], x['parents']
active_edamid.append(x['id'])
else:
fw.write("NEW entry = " + x['id'] + " " + x['term'] + "\n")
this_x = Edam(source_id = source_to_id[src],
format_name = x['id'],
edamid = x['id'],
display_name = x['term'],
edam_namespace = x['namespace'],
description = x['definition'],
obj_url = '/edam/' + x['id'],
is_obsolete = '0',
created_by = CREATED_BY)
nex_session.add(this_x)
nex_session.flush()
edam_id = this_x.edam_id
update_log['added'] = update_log['added'] + 1
# print "NEW: ", x['id'], x['term'], x['definition']
## add three URLs
link_id = x['id'].replace(':', '_')
insert_url(nex_session, source_to_id['Ontobee'], 'Ontobee', edam_id,
'http://www.ontobee.org/ontology/EDAM?iri=http://purl.obolibrary.org/obo/'+link_id,
fw)
insert_url(nex_session, source_to_id['BioPortal'], 'BioPortal', edam_id,
'http://bioportal.bioontology.org/ontologies/EDAM/?p=classes&conceptid=http%3A%2F%2Fpurl.obolibrary.org%2Fobo%2F' + link_id,
fw)
insert_url(nex_session, source_to_id['OLS'], 'OLS', edam_id,
'http://www.ebi.ac.uk/ols/ontologies/edam/terms?iri=http%3A%2F%2Fpurl.obolibrary.org%2Fobo%2F' + link_id,
fw)
## add RELATIONS
for parent_edamid in x['parents']:
parent = edamid_to_edam.get(parent_edamid)
if parent is not None:
parent_id = parent.edam_id
child_id = edam_id
insert_relation(nex_session, source_to_id[src], parent_id,
child_id, ro_id, relation_just_added, fw)
## add ALIASES
for (alias, alias_type) in x['aliases']:
insert_alias(nex_session, source_to_id[src], alias,
alias_type, edam_id, alias_just_added, fw)
## update RELATIONS
# print x['id'], "RELATION", edam_id_to_parent.get(edam_id), x['parents']
update_relations(nex_session, edam_id, edam_id_to_parent.get(edam_id), x['parents'],
source_to_id[src], edamid_to_edam, ro_id, relation_just_added, fw)
## update ALIASES
# print x['id'], "ALIAS", edam_id_to_alias.get(edam_id), x['aliases']
update_aliases(nex_session, edam_id, edam_id_to_alias.get(edam_id), x['aliases'],
source_to_id[src], edamid_to_edam, alias_just_added, fw)
to_delete = []
for edamid in edamid_to_edam:
if edamid in active_edamid:
continue
x = edamid_to_edam[edamid]
if edamid.startswith('NTR'):
continue
to_delete.append((edamid, x.display_name))
if x.is_obsolete is False:
x.is_obsolete = '1'
# nex_session.add(x)
# nex_session.flush()
update_log['updated'] = update_log['updated'] + 1
fw.write("The is_obsolete for " + x.edamid + " has been updated from " + x.is_obsolete +" to " + 'True' + "\n")
nex_session.commit()
return [update_log, to_delete]
def update_aliases(nex_session, edam_id, curr_aliases, new_aliases, source_id, edamid_to_edam, alias_just_added, fw):
# print "ALIAS: ", curr_aliases, new_aliases
# return
if curr_aliases is None:
curr_aliases = []
for (alias, type) in new_aliases:
if (alias, type) not in curr_aliases:
insert_alias(nex_session, source_id, alias, type, edam_id, alias_just_added, fw)
for (alias, type) in curr_aliases:
if(alias, type) not in new_aliases:
## remove the old one
to_delete = nex_session.query(EdamAlia).filter_by(edam_id=edam_id, display_name=alias, alias_type=type).first()
nex_session.delete(to_delete)
fw.write("The old alias = " + alias + " has been deleted for edam_id = " + str(edam_id) + "\n")
def update_relations(nex_session, child_id, curr_parent_ids, new_parents, source_id, edamid_to_edam, ro_id, relation_just_added, fw):
# print "RELATION: ", curr_parent_ids, new_parents
# return
if curr_parent_ids is None:
curr_parent_ids = []
new_parent_ids = []
for parent_edamid in new_parents:
parent = edamid_to_edam.get(parent_edamid)
if parent is not None:
parent_id = parent.edam_id
new_parent_ids.append(parent_id)
if parent_id not in curr_parent_ids:
insert_relation(nex_session, source_id, parent_id, child_id,
ro_id, relation_just_added, fw)
for parent_id in curr_parent_ids:
if parent_id not in new_parent_ids:
## remove the old one
to_delete = nex_session.query(EdamRelation).filter_by(child_id=child_id, parent_id=parent_id).first()
nex_session.delete(to_delete)
fw.write("The old parent: parent_id = " + str(parent_id) + " has been deleted for edam_id = " + str(child_id)+ "\n")
def insert_url(nex_session, source_id, display_name, edam_id, url, fw):
# print url
# return
x = EdamUrl(display_name = display_name,
url_type = display_name,
source_id = source_id,
edam_id = edam_id,
obj_url = url,
created_by = CREATED_BY)
nex_session.add(x)
nex_session.flush()
fw.write("Added new URL: " + url + " for edam_id = " + str(edam_id) + "\n")
def insert_alias(nex_session, source_id, display_name, alias_type, edam_id, alias_just_added, fw):
# print display_name
# return
if (edam_id, display_name, alias_type) in alias_just_added:
return
alias_just_added[(edam_id, display_name, alias_type)] = 1
x = EdamAlia(display_name = display_name,
alias_type = alias_type,
source_id = source_id,
edam_id = edam_id,
created_by = CREATED_BY)
nex_session.add(x)
nex_session.flush()
fw.write("Added new ALIAS: " + display_name + " for edam_id = " + str(edam_id) + "\n")
def insert_relation(nex_session, source_id, parent_id, child_id, ro_id, relation_just_added, fw):
# print "PARENT/CHILD: ", parent_id, child_id
# return
if (parent_id, child_id) in relation_just_added:
return
relation_just_added[(parent_id, child_id)] = 1
x = EdamRelation(parent_id = parent_id,
child_id = child_id,
source_id = source_id,
ro_id = ro_id,
created_by = CREATED_BY)
nex_session.add(x)
nex_session.flush()
fw.write("Added new PARENT: parent_id = " + str(parent_id) + " for edam_id = " + str(child_id) + "\n")
def write_summary_and_send_email(fw, update_log, to_delete_list):
summary = "Updated: " + str(update_log['updated'])+ "\n"
summary = summary + "Added: " + str(update_log['added']) + "\n"
if len(to_delete_list) > 0:
summary = summary + "The following EDAM terms are not in the current release:\n"
for (edamid, term) in to_delete_list:
summary = summary + "\t" + edamid + " " + term + "\n"
fw.write(summary)
print(summary)
if __name__ == "__main__":
# http://edamontology.org/EDAM_1.20.owl
url_path = "http://edamontology.org/"
owl_file = "EDAM_1.20.owl"
urllib.request.urlretrieve(url_path + owl_file, owl_file)
load_ontology(owl_file)
|
[
"scripts.loading.ontology.read_owl",
"src.models.Edam",
"src.models.EdamRelation",
"importlib.reload",
"scripts.loading.database_session.get_session",
"src.models.EdamAlia",
"src.models.EdamUrl"
] |
[((118, 139), 'importlib.reload', 'importlib.reload', (['sys'], {}), '(sys)\n', (134, 139), False, 'import importlib\n'), ((643, 656), 'scripts.loading.database_session.get_session', 'get_session', ([], {}), '()\n', (654, 656), False, 'from scripts.loading.database_session import get_session\n'), ((1613, 1646), 'scripts.loading.ontology.read_owl', 'read_owl', (['ontology_file', 'ontology'], {}), '(ontology_file, ontology)\n', (1621, 1646), False, 'from scripts.loading.ontology import read_owl\n'), ((9353, 9489), 'src.models.EdamUrl', 'EdamUrl', ([], {'display_name': 'display_name', 'url_type': 'display_name', 'source_id': 'source_id', 'edam_id': 'edam_id', 'obj_url': 'url', 'created_by': 'CREATED_BY'}), '(display_name=display_name, url_type=display_name, source_id=\n source_id, edam_id=edam_id, obj_url=url, created_by=CREATED_BY)\n', (9360, 9489), False, 'from src.models import Source, Edam, EdamUrl, EdamAlia, EdamRelation, Ro\n'), ((9995, 10119), 'src.models.EdamAlia', 'EdamAlia', ([], {'display_name': 'display_name', 'alias_type': 'alias_type', 'source_id': 'source_id', 'edam_id': 'edam_id', 'created_by': 'CREATED_BY'}), '(display_name=display_name, alias_type=alias_type, source_id=\n source_id, edam_id=edam_id, created_by=CREATED_BY)\n', (10003, 10119), False, 'from src.models import Source, Edam, EdamUrl, EdamAlia, EdamRelation, Ro\n'), ((10629, 10742), 'src.models.EdamRelation', 'EdamRelation', ([], {'parent_id': 'parent_id', 'child_id': 'child_id', 'source_id': 'source_id', 'ro_id': 'ro_id', 'created_by': 'CREATED_BY'}), '(parent_id=parent_id, child_id=child_id, source_id=source_id,\n ro_id=ro_id, created_by=CREATED_BY)\n', (10641, 10742), False, 'from src.models import Source, Edam, EdamUrl, EdamAlia, EdamRelation, Ro\n'), ((3841, 4077), 'src.models.Edam', 'Edam', ([], {'source_id': 'source_to_id[src]', 'format_name': "x['id']", 'edamid': "x['id']", 'display_name': "x['term']", 'edam_namespace': "x['namespace']", 'description': "x['definition']", 'obj_url': "('/edam/' + x['id'])", 'is_obsolete': '"""0"""', 'created_by': 'CREATED_BY'}), "(source_id=source_to_id[src], format_name=x['id'], edamid=x['id'],\n display_name=x['term'], edam_namespace=x['namespace'], description=x[\n 'definition'], obj_url='/edam/' + x['id'], is_obsolete='0', created_by=\n CREATED_BY)\n", (3845, 4077), False, 'from src.models import Source, Edam, EdamUrl, EdamAlia, EdamRelation, Ro\n')]
|
import re
pattern = re.compile(r"(\d+([.,]\d*)?|([.,]\d*))([a-zA-Z]+)")
def parse(x = '0.0Da'):
"""Parse a resolution string.
Args:
x (str or float): A string with resolution, like '5ppm', '4mmu', '.02Da'.
Defaults to 'ppm' (i.e. when given a float, treat is a parts per million value).
"""
try:
v = float(x)
unit = 'ppm'
except ValueError:
x = x.replace(" ","")
g = re.match(pattern, x)
unit = g[4].lower()
assert unit in ('da', 'th', 'mmu', 'ppm'), "Wrong or missing unit."
v = float(g[1].replace(',','.'))
x_type = 'abs'
if unit == 'mmu':
v /= 1000.
if unit == 'ppm':
x_type = 'rel'
v *= 1e-6
if v == 0:
print("WARNING: infinite resolution. God mode on?")
return v, x_type
def test_parse():
assert parse("0.05Da") == (0.05, 'abs')
assert parse("0.05Th") == (0.05, 'abs')
assert parse("0,05Th") == (0.05, 'abs')
assert parse("0,05Da") == (0.05, 'abs')
assert parse("50.0mmu") == (0.05, 'abs')
assert parse("50mmu") == (0.05, 'abs')
assert parse("5.0ppm") == (5.0*1e-6, 'rel')
assert parse("5,0ppm") == (5.0*1e-6, 'rel')
assert parse(",2ppm") == (.2*1e-6, 'rel')
assert parse(".3ppm") == (.3*1e-6, 'rel')
assert parse(.3) == (.3*1e-6, 'rel')
assert parse(3) == (3*1e-6, 'rel')
|
[
"re.match",
"re.compile"
] |
[((21, 74), 're.compile', 're.compile', (['"""(\\\\d+([.,]\\\\d*)?|([.,]\\\\d*))([a-zA-Z]+)"""'], {}), "('(\\\\d+([.,]\\\\d*)?|([.,]\\\\d*))([a-zA-Z]+)')\n", (31, 74), False, 'import re\n'), ((439, 459), 're.match', 're.match', (['pattern', 'x'], {}), '(pattern, x)\n', (447, 459), False, 'import re\n')]
|
# Generator functions to generate batches of data.
import numpy as np
import os
import time
import h5py
import matplotlib.pyplot as plt
import collections
from synth.config import config
from synth.utils import utils
def data_gen_SDN(mode = 'Train', sec_mode = 0):
with h5py.File(config.stat_file, mode='r') as stat_file:
max_feat = stat_file["feats_maximus"][()] + 0.001
min_feat = stat_file["feats_minimus"][()] - 0.001
voc_list = [x for x in os.listdir(config.feats_dir) if x.endswith('.hdf5') and x.split('_')[0].upper() in [x for x in config.datasets if x != "DAMP"]]
if "DAMP" in config.datasets:
damp_list = [x for x in os.listdir(config.feats_dir) if x.endswith('.hdf5') and x.split('_')[1] in config.damp_singers]
voc_list = voc_list+damp_list
# if config.SDN_mix:
# back_list = [x for x in os.listdir(config.backing_dir) if x.endswith('.hdf5')]
# voc_list = [x for x in voc_list if x not in ['csd_alto1_NinoDios_14.hdf5', 'jvs_jvs023_raw_song_unique_11.hdf5', 'jvs_jvs024_raw_song_unique_2.hdf5', 'csd_soprano3_NinoDios_18.hdf5', 'csd_tenor1_ElRossinyol_13.hdf5', 'csd_soprano3_NinoDios_5.hdf5', 'csd_tenor3_NinoDios_8.hdf5', 'csd_tenor2_NinoDios_13.hdf5', 'jvs_jvs047_raw_song_unique_4.hdf5', 'jvs_jvs098_raw_song_unique_1.hdf5', 'jvs_jvs023_raw_song_unique_9.hdf5', 'jvs_jvs023_raw_song_unique_14.hdf5', 'csd_soprano2_NinoDios_13.hdf5', 'csd_tenor4_LocusIste_12.hdf5', 'csd_bass4_NinoDios_5.hdf5', 'jvs_jvs014_raw_song_unique_15.hdf5', 'csd_soprano2_NinoDios_2.hdf5', 'csd_bass4_NinoDios_12.hdf5', 'jvs_jvs041_raw_song_unique_14.hdf5', 'csd_alto3_LocusIste_25.hdf5', 'jvs_jvs023_raw_song_unique_16.hdf5', 'jvs_jvs092_raw_song_unique_12.hdf5', 'jvs_jvs074_raw_song_unique_6.hdf5', 'jvs_jvs017_raw_song_unique_2.hdf5']]
train_list = [x for x in voc_list if not x.split('_')[2]=='04'] + voc_list[:int(len(voc_list)*0.9)]
val_list = [x for x in voc_list if x.split('_')[2]=='04']+ voc_list[int(len(voc_list)*0.9):]
max_files_to_process = int(config.batch_size/config.autovc_samples_per_file)
if mode == "Train":
num_batches = config.autovc_batches_per_epoch_train
file_list = train_list
else:
num_batches = config.autovc_batches_per_epoch_val
file_list = val_list
for k in range(num_batches):
feats_targs = []
stfts_targs = []
targets_speakers = []
# if config.SDN_mix:
# back_index = np.random.randint(0,len(back_list))
# back_to_open = back_list[back_index]
# with h5py.File(os.path.join(config.backing_dir,back_to_open), "r") as hdf5_file:
# back = hdf5_file['backing_stft'][()]
# back = np.clip(back, 0.0, 1.0)
for i in range(max_files_to_process):
voc_index = np.random.randint(0,len(file_list))
voc_to_open = file_list[voc_index]
with h5py.File(os.path.join(config.feats_dir,voc_to_open), "r") as hdf5_file:
mel = hdf5_file['feats'][()]
back = hdf5_file['back_stft'][()]
stfts = hdf5_file['stfts'][()]
back = np.clip(back, 0.0, 1.0)
f0 = mel[:,-2]
med = np.median(f0[f0 > 0])
f0[f0==0] = med
mel[:,-2] = f0
speaker_name = voc_to_open.split('_')[1]
speaker_index = config.singers.index(speaker_name)
mel = (mel - min_feat)/(max_feat-min_feat)
stfts = np.clip(stfts, 0.0, 1.0)
assert mel.max()<=1.0 and mel.min()>=0.0, "Error in file {}, max: {}, min: {}".format(voc_to_open, mel.max(), mel.min())
for j in range(config.autovc_samples_per_file):
voc_idx = np.random.randint(0,len(mel)-config.max_phr_len)
feats_targs.append(mel[voc_idx:voc_idx+config.max_phr_len])
noise = np.random.rand(config.max_phr_len,stfts.shape[-1])*np.random.uniform(0.0,config.noise_threshold)
back_gain = np.random.uniform(0.0, config.back_threshold)
stft = stfts[voc_idx:voc_idx+config.max_phr_len]*np.random.uniform(back_gain, 1.0) + noise
back_sample = back[voc_idx:voc_idx+config.max_phr_len]
stft = stft + back_sample * back_gain
# if config.SDN_mix:
# back_idx = np.random.randint(0,len(back)-config.max_phr_len)
# back_sample = back[back_idx:back_idx+config.max_phr_len]
# stft = stft + back_sample * back_gain
stfts_targs.append(stft)
targets_speakers.append(speaker_index)
feats_targs = np.array(feats_targs)
stfts_targs = np.array(stfts_targs)
yield feats_targs, stfts_targs, np.array(targets_speakers)
|
[
"numpy.random.uniform",
"h5py.File",
"numpy.median",
"numpy.clip",
"numpy.array",
"synth.config.config.singers.index",
"numpy.random.rand",
"os.path.join",
"os.listdir"
] |
[((282, 319), 'h5py.File', 'h5py.File', (['config.stat_file'], {'mode': '"""r"""'}), "(config.stat_file, mode='r')\n", (291, 319), False, 'import h5py\n'), ((4725, 4746), 'numpy.array', 'np.array', (['feats_targs'], {}), '(feats_targs)\n', (4733, 4746), True, 'import numpy as np\n'), ((4769, 4790), 'numpy.array', 'np.array', (['stfts_targs'], {}), '(stfts_targs)\n', (4777, 4790), True, 'import numpy as np\n'), ((478, 506), 'os.listdir', 'os.listdir', (['config.feats_dir'], {}), '(config.feats_dir)\n', (488, 506), False, 'import os\n'), ((3194, 3217), 'numpy.clip', 'np.clip', (['back', '(0.0)', '(1.0)'], {}), '(back, 0.0, 1.0)\n', (3201, 3217), True, 'import numpy as np\n'), ((3272, 3293), 'numpy.median', 'np.median', (['f0[f0 > 0]'], {}), '(f0[f0 > 0])\n', (3281, 3293), True, 'import numpy as np\n'), ((3434, 3468), 'synth.config.config.singers.index', 'config.singers.index', (['speaker_name'], {}), '(speaker_name)\n', (3454, 3468), False, 'from synth.config import config\n'), ((3548, 3572), 'numpy.clip', 'np.clip', (['stfts', '(0.0)', '(1.0)'], {}), '(stfts, 0.0, 1.0)\n', (3555, 3572), True, 'import numpy as np\n'), ((672, 700), 'os.listdir', 'os.listdir', (['config.feats_dir'], {}), '(config.feats_dir)\n', (682, 700), False, 'import os\n'), ((4069, 4114), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', 'config.back_threshold'], {}), '(0.0, config.back_threshold)\n', (4086, 4114), True, 'import numpy as np\n'), ((4841, 4867), 'numpy.array', 'np.array', (['targets_speakers'], {}), '(targets_speakers)\n', (4849, 4867), True, 'import numpy as np\n'), ((2951, 2994), 'os.path.join', 'os.path.join', (['config.feats_dir', 'voc_to_open'], {}), '(config.feats_dir, voc_to_open)\n', (2963, 2994), False, 'import os\n'), ((3944, 3995), 'numpy.random.rand', 'np.random.rand', (['config.max_phr_len', 'stfts.shape[-1]'], {}), '(config.max_phr_len, stfts.shape[-1])\n', (3958, 3995), True, 'import numpy as np\n'), ((3995, 4041), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', 'config.noise_threshold'], {}), '(0.0, config.noise_threshold)\n', (4012, 4041), True, 'import numpy as np\n'), ((4180, 4213), 'numpy.random.uniform', 'np.random.uniform', (['back_gain', '(1.0)'], {}), '(back_gain, 1.0)\n', (4197, 4213), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
import plotly.express as px
georgia_pop = pd.read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-16/georgia_pop.csv')
census = pd.read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-16/census.csv')
furniture = pd.read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-16/furniture.csv')
city_rural = pd.read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-16/city_rural.csv')
income = pd.read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-16/income.csv')
freed_slaves = pd.read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-16/freed_slaves.csv')
occupation = pd.read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-16/occupation.csv')
conjugal = pd.read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-16/conjugal.csv')
# dubois challenge 1
# Population change by race in Georgia.
yr = georgia_pop.Year.values
xC = georgia_pop.Colored.values
xW = georgia_pop.White.values
import plotly.graph_objects as go
fig = go.Figure()
fig.update_layout(width=500, height=700,
legend=dict(yanchor="top", y=0.99, xanchor="left", x=0.55), #showlegend=False,
title="Population change by race in Georgia",title_x=0.5,)
fig.add_trace(go.Scatter(x=xC, y=yr, mode='lines', name='African American'))
fig.add_trace(go.Scatter(x=xW, y=yr, mode='lines', name='White American'))
annotations=[]
annotations.append(dict(xref='paper', yref='paper', x=0.5, y=-0.1,
xanchor='center', yanchor='top',
text='#TidyTuesday - 2021/02/16 | twitter.com/vivekparasharr | github.com/vivekparasharr',
font=dict(family='Arial', size=12, color='grey'),
showarrow=False))
fig.update_layout(annotations=annotations)
fig.show()
# dubois challenge 2
# Marriage status
# Prepare data
conjugal = conjugal.replace('Negroes','African Americans')
conjugal.columns = ['Population', 'Age', 'c_Single', 'c_Married', 'c_Divorced_and_Widowed']
conjugal = pd.wide_to_long(conjugal, stubnames='c', i=['Population', 'Age'], j='Conjugal_Status', sep='_', suffix=r'\w+').reset_index()
conjugal.columns = ['Population', 'Age', 'Conjugal_Status', 'Conjugal_Status_Value']
import plotly.graph_objects as go
fig = go.Figure()
fig.update_layout(
template="simple_white",
yaxis=dict(title_text="Age"), xaxis=dict(title_text="Race Share Pct"),
barmode="stack",
legend=dict(yanchor="top", y=1.25, xanchor="left", x=0.50), #showlegend=False,
title="Conjugal condition",title_x=0.5,
)
colors = ['firebrick','olive','dodgerblue']#,'blueviolet','dimgrey','tomato','sienna','darkorange','forestgreen','steelblue','royalblue','orchid']
#selected_colors = colors[:y_axis_levels]
labels={'Single':'Single', 'Married':'Married', 'Divorced_and_Widowed':'Divorced and Widowed'}
for r, c in zip(conjugal.Conjugal_Status.unique(), colors):
plot_df = conjugal[conjugal.Conjugal_Status == r]
fig.add_trace(
go.Bar(y=[plot_df.Age, plot_df.Population], x=plot_df.Conjugal_Status_Value, name=labels[r], marker_color=c, orientation='h', ),
)
fig
# dubois challenge 3
# occupation
import plotly.graph_objects as go
labels = occupation.Category.tolist()
labels = ['Negroes: Agriculture, Fisheries and Mining',
'Negroes: Manufacturing and Mechanical Industries',
'Negroes: Domestic and Personal Service',
'Negroes: Professions',
'Negroes: Trade and Transportation',
'Blank: Right',
'Whites: Agriculture, Fisheries and Mining',
'Whites: Manufacturing and Mechanical Industries',
'Whites: Domestic and Personal Service',
'Whites: Professions',
'Whites: Trade and Transportation',
'Blank: Left']
white_space=50 # this can be modified as needed
values = occupation.Percentage.tolist()
values = [62.0, 5.0, 28.0, 0.8, 4.5, white_space, 64.0, 12.5, 5.5, 4.0, 13.0, white_space]
color_list = ['dimgray', 'firebrick', 'olive', 'saddlebrown', 'steelblue', 'white', 'dimgray', 'firebrick', 'olive', 'saddlebrown', 'steelblue', 'white']
fig = go.Figure(data=[go.Pie(labels=None, values=values,
direction='clockwise',
rotation=(-((white_space/sum(values))*360)),
sort=False, showlegend=False,
title='Occupation by race')]) # , labels=labels, hole=0.4 to make a donut
fig.update_traces(marker=dict(colors=color_list), textinfo='none') #, line=dict(color='#000000', width=2)))
fig.show()
|
[
"pandas.wide_to_long",
"plotly.graph_objects.Scatter",
"pandas.read_csv",
"plotly.graph_objects.Figure",
"plotly.graph_objects.Bar"
] |
[((83, 213), 'pandas.read_csv', 'pd.read_csv', (['"""https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-16/georgia_pop.csv"""'], {}), "(\n 'https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-16/georgia_pop.csv'\n )\n", (94, 213), True, 'import pandas as pd\n'), ((213, 338), 'pandas.read_csv', 'pd.read_csv', (['"""https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-16/census.csv"""'], {}), "(\n 'https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-16/census.csv'\n )\n", (224, 338), True, 'import pandas as pd\n'), ((341, 469), 'pandas.read_csv', 'pd.read_csv', (['"""https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-16/furniture.csv"""'], {}), "(\n 'https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-16/furniture.csv'\n )\n", (352, 469), True, 'import pandas as pd\n'), ((473, 602), 'pandas.read_csv', 'pd.read_csv', (['"""https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-16/city_rural.csv"""'], {}), "(\n 'https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-16/city_rural.csv'\n )\n", (484, 602), True, 'import pandas as pd\n'), ((602, 727), 'pandas.read_csv', 'pd.read_csv', (['"""https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-16/income.csv"""'], {}), "(\n 'https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-16/income.csv'\n )\n", (613, 727), True, 'import pandas as pd\n'), ((733, 864), 'pandas.read_csv', 'pd.read_csv', (['"""https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-16/freed_slaves.csv"""'], {}), "(\n 'https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-16/freed_slaves.csv'\n )\n", (744, 864), True, 'import pandas as pd\n'), ((868, 997), 'pandas.read_csv', 'pd.read_csv', (['"""https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-16/occupation.csv"""'], {}), "(\n 'https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-16/occupation.csv'\n )\n", (879, 997), True, 'import pandas as pd\n'), ((999, 1126), 'pandas.read_csv', 'pd.read_csv', (['"""https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-16/conjugal.csv"""'], {}), "(\n 'https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-16/conjugal.csv'\n )\n", (1010, 1126), True, 'import pandas as pd\n'), ((1311, 1322), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (1320, 1322), True, 'import plotly.graph_objects as go\n'), ((2555, 2566), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (2564, 2566), True, 'import plotly.graph_objects as go\n'), ((1525, 1586), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'xC', 'y': 'yr', 'mode': '"""lines"""', 'name': '"""African American"""'}), "(x=xC, y=yr, mode='lines', name='African American')\n", (1535, 1586), True, 'import plotly.graph_objects as go\n'), ((1602, 1661), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'xW', 'y': 'yr', 'mode': '"""lines"""', 'name': '"""White American"""'}), "(x=xW, y=yr, mode='lines', name='White American')\n", (1612, 1661), True, 'import plotly.graph_objects as go\n'), ((2304, 2419), 'pandas.wide_to_long', 'pd.wide_to_long', (['conjugal'], {'stubnames': '"""c"""', 'i': "['Population', 'Age']", 'j': '"""Conjugal_Status"""', 'sep': '"""_"""', 'suffix': '"""\\\\w+"""'}), "(conjugal, stubnames='c', i=['Population', 'Age'], j=\n 'Conjugal_Status', sep='_', suffix='\\\\w+')\n", (2319, 2419), True, 'import pandas as pd\n'), ((3265, 3394), 'plotly.graph_objects.Bar', 'go.Bar', ([], {'y': '[plot_df.Age, plot_df.Population]', 'x': 'plot_df.Conjugal_Status_Value', 'name': 'labels[r]', 'marker_color': 'c', 'orientation': '"""h"""'}), "(y=[plot_df.Age, plot_df.Population], x=plot_df.Conjugal_Status_Value,\n name=labels[r], marker_color=c, orientation='h')\n", (3271, 3394), True, 'import plotly.graph_objects as go\n')]
|
"""Test custom types."""
import pytest
from j5.types import ImmutableDict, ImmutableList
def test_immutable_dict_get_member() -> None:
"""Test that we can get an item from an ImmutableDict."""
d = ImmutableDict[str, str]({'foo': 'bar'})
assert d['foo'] == 'bar'
def test_immutable_dict_iterator() -> None:
"""Test that the iterator works."""
data = {'foo': 'bar', 'bar': 'doo', 'doo': 'foo'}
d = ImmutableDict(data)
assert list(d) == list(data.values())
def test_immutable_dict_length() -> None:
"""Test that the length operation works."""
data = {'foo': 'bar', 'bar': 'doo', 'doo': 'foo'}
d = ImmutableDict(data)
assert len(d) == 3
def test_immutable_dict_cannot_set_member() -> None:
"""Test that the immutable dict is immutable."""
data = {'foo': 'bar', 'bar': 'doo', 'doo': 'foo'}
d = ImmutableDict(data)
with pytest.raises(TypeError):
d['foo'] = '12' # type: ignore
def test_immutable_dict_repr() -> None:
"""Test that the repr of the immutable dict is correct."""
data = {'foo': 'bar', 'bar': 'doo'}
d = ImmutableDict(data)
assert repr(d) == "ImmutableDict({'foo': 'bar', 'bar': 'doo'})"
def test_immutable_list_construct_from_list() -> None:
"""Test that we can construct an ImmutableList from a list."""
data = [1, 3, 4, 6, 2]
li = ImmutableList[int](data)
assert list(li) == data
def test_immutable_list_construct_from_generator() -> None:
"""Test that we can construct an ImmutableList from a generator."""
data = [1, 3, 4, 6, 2]
li = ImmutableList[int](item for item in data)
assert list(li) == data
def test_immutable_list_get_item() -> None:
"""Test that we can get an item from an ImmutableList."""
data = [1, 3, 4, 6, 2]
li = ImmutableList[int](data)
assert li[0] == 1
assert li[-1] == 2
with pytest.raises(IndexError):
assert li[7]
with pytest.raises(TypeError):
assert li["foo"] # type:ignore
def test_immutable_list_length() -> None:
"""Test that we can get the list length."""
data = [1, 3, 4, 6, 2]
li = ImmutableList[int](data)
assert len(li) == 5
def test_immutable_list_cannot_set_item() -> None:
"""Test that the list is not immutable."""
data = [1, 3, 4, 6, 2]
li = ImmutableList[int](data)
with pytest.raises(TypeError):
li[0] = 12 # type: ignore
def test_immutable_list_repr() -> None:
"""Test that the repr of the immutable list is correct."""
data = [1, 3, 4, 6, 2]
d = ImmutableList(data)
assert repr(d) == "ImmutableList([1, 3, 4, 6, 2])"
|
[
"pytest.raises",
"j5.types.ImmutableDict",
"j5.types.ImmutableList"
] |
[((426, 445), 'j5.types.ImmutableDict', 'ImmutableDict', (['data'], {}), '(data)\n', (439, 445), False, 'from j5.types import ImmutableDict, ImmutableList\n'), ((643, 662), 'j5.types.ImmutableDict', 'ImmutableDict', (['data'], {}), '(data)\n', (656, 662), False, 'from j5.types import ImmutableDict, ImmutableList\n'), ((857, 876), 'j5.types.ImmutableDict', 'ImmutableDict', (['data'], {}), '(data)\n', (870, 876), False, 'from j5.types import ImmutableDict, ImmutableList\n'), ((1106, 1125), 'j5.types.ImmutableDict', 'ImmutableDict', (['data'], {}), '(data)\n', (1119, 1125), False, 'from j5.types import ImmutableDict, ImmutableList\n'), ((2546, 2565), 'j5.types.ImmutableList', 'ImmutableList', (['data'], {}), '(data)\n', (2559, 2565), False, 'from j5.types import ImmutableDict, ImmutableList\n'), ((887, 911), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (900, 911), False, 'import pytest\n'), ((1872, 1897), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (1885, 1897), False, 'import pytest\n'), ((1930, 1954), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1943, 1954), False, 'import pytest\n'), ((2345, 2369), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (2358, 2369), False, 'import pytest\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
from typing import Optional
import requests
try:
import validators # type: ignore
has_validators = True
except ImportError:
has_validators = False
from .abstractgenerator import AbstractMISPObjectGenerator
from .. import InvalidMISPObject
class VTReportObject(AbstractMISPObjectGenerator):
'''
VirusTotal Report
:apikey: VirusTotal API key (private works, but only public features are supported right now)
:indicator: IOC to search VirusTotal for
'''
def __init__(self, apikey: str, indicator: str, vt_proxies: Optional[dict] = None, **kwargs):
super().__init__('virustotal-report', **kwargs)
indicator = indicator.strip()
self._resource_type = self.__validate_resource(indicator)
if self._resource_type:
self._proxies = vt_proxies
self._report = self.__query_virustotal(apikey, indicator)
self.generate_attributes()
else:
error_msg = "A valid indicator is required. (One of type url, md5, sha1, sha256). Received '{}' instead".format(indicator)
raise InvalidMISPObject(error_msg)
def get_report(self):
return self._report
def generate_attributes(self):
''' Parse the VirusTotal report for relevant attributes '''
self.add_attribute("last-submission", value=self._report["scan_date"])
self.add_attribute("permalink", value=self._report["permalink"])
ratio = "{}/{}".format(self._report["positives"], self._report["total"])
self.add_attribute("detection-ratio", value=ratio)
def __validate_resource(self, ioc: str):
'''
Validate the data type of an indicator.
Domains and IP addresses aren't supported because
they don't return the same type of data as the URLs/files do
:ioc: Indicator to search VirusTotal for
'''
if not has_validators:
raise Exception('You need to install validators: pip install validators')
if validators.url(ioc):
return "url"
elif re.match(r"\b([a-fA-F0-9]{32}|[a-fA-F0-9]{40}|[a-fA-F0-9]{64})\b", ioc):
return "file"
return False
def __query_virustotal(self, apikey: str, resource: str):
'''
Query VirusTotal for information about an indicator
:apikey: VirusTotal API key
:resource: Indicator to search in VirusTotal
'''
url = "https://www.virustotal.com/vtapi/v2/{}/report".format(self._resource_type)
params = {"apikey": apikey, "resource": resource}
# for now assume we're using a public API key - we'll figure out private keys later
if self._proxies:
report = requests.get(url, params=params, proxies=self._proxies)
else:
report = requests.get(url, params=params)
report_json = report.json()
if report_json["response_code"] == 1:
return report_json
else:
error_msg = "{}: {}".format(resource, report_json["verbose_msg"])
raise InvalidMISPObject(error_msg)
|
[
"re.match",
"validators.url",
"requests.get"
] |
[((2057, 2076), 'validators.url', 'validators.url', (['ioc'], {}), '(ioc)\n', (2071, 2076), False, 'import validators\n'), ((2116, 2188), 're.match', 're.match', (['"""\\\\b([a-fA-F0-9]{32}|[a-fA-F0-9]{40}|[a-fA-F0-9]{64})\\\\b"""', 'ioc'], {}), "('\\\\b([a-fA-F0-9]{32}|[a-fA-F0-9]{40}|[a-fA-F0-9]{64})\\\\b', ioc)\n", (2124, 2188), False, 'import re\n'), ((2761, 2816), 'requests.get', 'requests.get', (['url'], {'params': 'params', 'proxies': 'self._proxies'}), '(url, params=params, proxies=self._proxies)\n', (2773, 2816), False, 'import requests\n'), ((2852, 2884), 'requests.get', 'requests.get', (['url'], {'params': 'params'}), '(url, params=params)\n', (2864, 2884), False, 'import requests\n')]
|
import socket
from .utils.config_file import ConfigFile
class Yaml:
def __init__(self):
self.data = {
'py2030': {
'profiles': {
socket.gethostname().replace('.', '_'): {
'start_event': 'start'
}
}
}
}
def text(self):
return ConfigFile.to_yaml(self.data)
if __name__ == '__main__':
print(Yaml().text())
|
[
"socket.gethostname"
] |
[((189, 209), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (207, 209), False, 'import socket\n')]
|
# -*- coding:utf-8 -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
STOP_RENDERING = runtime.STOP_RENDERING
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 10
_modified_time = 1467226952.515133
_enable_loop = True
_template_filename = '/home/sumukh/Documents/thesis/Cyberweb/cyberweb/cyberweb/templates/authentication/authentication.mako'
_template_uri = '/authentication/authentication.mako'
_source_encoding = 'utf-8'
from webhelpers.html import escape
_exports = ['headtags', 'col2main']
def _mako_get_namespace(context, name):
try:
return context.namespaces[(__name__, name)]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[(__name__, name)]
def _mako_generate_namespaces(context):
pass
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, u'/authentication/authentication.layout.mako', _template_uri)
def render_body(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
__M_writer = context.writer()
__M_writer(u'\n\n\n')
__M_writer(u'\n\n\n')
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_headtags(context):
__M_caller = context.caller_stack._push_frame()
try:
__M_writer = context.writer()
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_col2main(context):
__M_caller = context.caller_stack._push_frame()
try:
c = context.get('c', UNDEFINED)
__M_writer = context.writer()
__M_writer(u'\n\n<h2>Authentication Credential Summary for CyberWeb User: ')
__M_writer(escape(c.user))
__M_writer(u'</h2>\n<p>\n<h3>PKI Credentials</h3>\n<table>\n <tr>\n <td>Account</td><td>Hostname</td>Status</td><td>Date</td>\n </tr>\n <tr> <td>account1</td><td>hostname1</td>Status</td><td>date</td> </tr>\n <tr> <td>account2</td><td>hostname2</td>Status</td><td>date</td> </tr>\n <tr> <td>account3</td><td>hostname3</td>Status</td><td>date</td> </tr>\n</table>\n<p>\n<h3>GSI Credentials</h3>\n<table>\n <tr> <td>Account</td><td>DN</td><td>MyProxy Server</td><td>Credential Info</td> </tr>\n <tr> <td>account1</td><td>DN1</td><td>MyProxy Server</td><td>Credential1 Info</td> </tr>\n <tr> <td>account2</td><td>DN2</td><td>MyProxy Server</td><td>Credential2 Info</td>\n <tr> <td>account3</td><td>DN3</td><td>MyProxy Server</td><td>Credential3 Info</td>\n <tr> <td>account4</td><td>DN4</td><td>MyProxy Server</td><td>Credential4 Info</td>\n </tr>\n</table>\n')
return ''
finally:
context.caller_stack._pop_frame()
"""
__M_BEGIN_METADATA
{"source_encoding": "utf-8", "line_map": {"64": 58, "33": 1, "34": 5, "35": 31, "41": 4, "45": 4, "51": 8, "56": 8, "57": 10, "58": 10, "28": 0}, "uri": "/authentication/authentication.mako", "filename": "/home/sumukh/Documents/thesis/Cyberweb/cyberweb/cyberweb/templates/authentication/authentication.mako"}
__M_END_METADATA
"""
|
[
"webhelpers.html.escape",
"mako.runtime._inherit_from"
] |
[((893, 989), 'mako.runtime._inherit_from', 'runtime._inherit_from', (['context', 'u"""/authentication/authentication.layout.mako"""', '_template_uri'], {}), "(context,\n u'/authentication/authentication.layout.mako', _template_uri)\n", (914, 989), False, 'from mako import runtime, filters, cache\n'), ((1843, 1857), 'webhelpers.html.escape', 'escape', (['c.user'], {}), '(c.user)\n', (1849, 1857), False, 'from webhelpers.html import escape\n')]
|
import pyautogui as pag
import time
import sys
args = sys.argv
if len(args) != 2:
print("Please specify the file path of the script you would like to run.")
quit()
script = open(sys.argv[1])
lines = script.readlines()
for line in lines:
print(line)
command = line.split(None, 1)[0].lower()
try:
parameter = line.split(None, 1)[1].strip().lower()
except IndexError:
pass
if command == "control":
command = "ctrl"
if parameter == "control":
parameter = "ctrl"
if command == "string":
pag.typewrite(parameter, interval=0.1)
elif command == "delay":
time.sleep(int(parameter)/1000)
elif command == "enter":
pag.typewrite(['enter'], interval=0.1)
elif command == "gui":
pag.hotkey('winleft',parameter)
elif command == "rem":
pass
else:
pag.hotkey(command, parameter)
|
[
"pyautogui.typewrite",
"pyautogui.hotkey"
] |
[((598, 636), 'pyautogui.typewrite', 'pag.typewrite', (['parameter'], {'interval': '(0.1)'}), '(parameter, interval=0.1)\n', (611, 636), True, 'import pyautogui as pag\n'), ((751, 789), 'pyautogui.typewrite', 'pag.typewrite', (["['enter']"], {'interval': '(0.1)'}), "(['enter'], interval=0.1)\n", (764, 789), True, 'import pyautogui as pag\n'), ((827, 859), 'pyautogui.hotkey', 'pag.hotkey', (['"""winleft"""', 'parameter'], {}), "('winleft', parameter)\n", (837, 859), True, 'import pyautogui as pag\n'), ((923, 953), 'pyautogui.hotkey', 'pag.hotkey', (['command', 'parameter'], {}), '(command, parameter)\n', (933, 953), True, 'import pyautogui as pag\n')]
|
"""
Copyright 2020 The Secure, Reliable, and Intelligent Systems Lab, ETH Zurich
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import torch
import torch.nn as nn
from lstm_based_model import LstmBasedModel
class LSTMModel(LstmBasedModel):
def __init__(self,mean_return,dev_return,n_steps):
super(LSTMModel, self).__init__(mean_return,dev_return)
self.n_steps = n_steps
# LSTM output layer (investigate with activation functions ?)
self.output_layer = nn.Linear(self.hidden_dim,1)
def forward(self, input0,mode,
n_bins=None,
attack=False,
y=None,
n_steps=None,
predictions=None,
binary_predictions=None,
binning=False):
# Preprocess input
returns = self.preprocessing(input0)
next_return, cell = self.get_output(returns)
if mode == "teacher forcing":
output = next_return
transposed_y = y.permute(1, 0, 2)
n_steps = self.n_steps
elif mode == "prediction":
rescaled_return = self.rescale_return(next_return)
return_product = rescaled_return
output = return_product
elif(mode == "1 step"):
output = next_return
return output,None
for j in range(1,n_steps):
# Do one step prediction
# next_return has shape
if mode == "teacher forcing":
next_return, cell = self.get_output(transposed_y[j-1:j], cell)
output = torch.cat([output, next_return], dim=1)
elif mode == "prediction":
next_return, cell = self.get_output(next_return.permute(1,0,2), cell)
rescaled_return = self.rescale_return(next_return)
return_product *= rescaled_return
output = torch.cat([output, return_product], dim=1)
if mode == "prediction":
if attack:
return output
length = len(self.log_samples)
if binning:
a = self.compute_predictions(output, predictions, n_bins)
b = self.compute_predictions(output, binary_predictions, 2)
return [a] * length, [b] * length
return [output]*length
return output,None
# Forward method that returns a distribution
# Can be stateful or not
# Output has dim (batch_size,seq_len,dim)
def get_output(self, returns, cell=None):
# Feed to LSTM
if cell is not None:
lstm_out, (h_n, c_n) = self.lstm(returns, cell)
else:
lstm_out, (h_n, c_n) = self.lstm(returns)
outputs = torch.transpose(self.output_layer(h_n), 0, 1)
return outputs, (h_n, c_n)
|
[
"torch.cat",
"torch.nn.Linear"
] |
[((972, 1001), 'torch.nn.Linear', 'nn.Linear', (['self.hidden_dim', '(1)'], {}), '(self.hidden_dim, 1)\n', (981, 1001), True, 'import torch.nn as nn\n'), ((2070, 2109), 'torch.cat', 'torch.cat', (['[output, next_return]'], {'dim': '(1)'}), '([output, next_return], dim=1)\n', (2079, 2109), False, 'import torch\n'), ((2378, 2420), 'torch.cat', 'torch.cat', (['[output, return_product]'], {'dim': '(1)'}), '([output, return_product], dim=1)\n', (2387, 2420), False, 'import torch\n')]
|
from django.shortcuts import render
from django.http import HttpResponse
import json
from chvi import nmt
import time
# Create your views here.
def index(request):
return render(request, 'index.html')
def trans(request):
if request.method == 'POST':
ch = request.POST['ch']
if ch == '':
tran_vi = []
elif ch == '毕业设计-汉语-越南语"机器翻译"的简单demo演示':
time.sleep(9)
tran_vi = [
('tốt nghiệp thiết kế - tiếng Hán - Việt "cỗ máy dịch" đơn giản demo', 0),
('tốt nghiệp <unk> - tiếng Hán - Việt "MT" đơn giản demo', 0),
('<unk> tiếng Hán Việt "<unk>" đơn giản demo', 0),
]
elif ch == '毕业设计':
time.sleep(7)
tran_vi = [
('tốt nghiệp <unk>', 0),
('tốt nghiệp thiết kế.', 0),
('<unk>.', 0),
]
elif ch == '毕业':
time.sleep(6)
tran_vi = [
('<unk>', 0),
]
elif ch == '设计':
time.sleep(6)
tran_vi = [
('thiết kế.', 0),
('thiết kế', 0),
('<unk>.', 0),
('<unk> kế.', 0),
]
else:
tran_vi = nmt.sent(ch)
vi = ''
for i in tran_vi:
vi += i[0] + '\n'
return HttpResponse(json.dumps({
'success': 'true',
'vi': vi
}))
else:
return HttpResponse(json.dumps({
'success': 'false',
'vi': ''
}))
|
[
"django.shortcuts.render",
"chvi.nmt.sent",
"json.dumps",
"time.sleep"
] |
[((178, 207), 'django.shortcuts.render', 'render', (['request', '"""index.html"""'], {}), "(request, 'index.html')\n", (184, 207), False, 'from django.shortcuts import render\n'), ((1388, 1429), 'json.dumps', 'json.dumps', (["{'success': 'true', 'vi': vi}"], {}), "({'success': 'true', 'vi': vi})\n", (1398, 1429), False, 'import json\n'), ((1503, 1545), 'json.dumps', 'json.dumps', (["{'success': 'false', 'vi': ''}"], {}), "({'success': 'false', 'vi': ''})\n", (1513, 1545), False, 'import json\n'), ((402, 415), 'time.sleep', 'time.sleep', (['(9)'], {}), '(9)\n', (412, 415), False, 'import time\n'), ((730, 743), 'time.sleep', 'time.sleep', (['(7)'], {}), '(7)\n', (740, 743), False, 'import time\n'), ((936, 949), 'time.sleep', 'time.sleep', (['(6)'], {}), '(6)\n', (946, 949), False, 'import time\n'), ((1055, 1068), 'time.sleep', 'time.sleep', (['(6)'], {}), '(6)\n', (1065, 1068), False, 'import time\n'), ((1275, 1287), 'chvi.nmt.sent', 'nmt.sent', (['ch'], {}), '(ch)\n', (1283, 1287), False, 'from chvi import nmt\n')]
|
from django.http import JsonResponse, HttpResponse
from django.views import View
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from django.views.generic.edit import BaseDeleteView
from postoffice_django.models import PublishingError
from postoffice_django.serializers import MessagesSerializer
class ListMessagesView(View):
DEFAULT_MAX_RESULTS = 100
def get(self, request, *args, **kwargs):
max_results = self._get_max_results(request)
messages = PublishingError.objects.order_by('created_at')[:max_results]
data = MessagesSerializer().serialize(messages)
return JsonResponse(data, safe=False)
def _get_max_results(self, request):
return int(request.GET.get('limit', self.DEFAULT_MAX_RESULTS))
@method_decorator(csrf_exempt, name='dispatch')
class DeleteMessageView(BaseDeleteView):
queryset = PublishingError.objects.all()
def delete(self, request, *args, **kwargs):
message = self.get_object()
message.delete()
return HttpResponse(status=204)
|
[
"django.utils.decorators.method_decorator",
"django.http.HttpResponse",
"postoffice_django.models.PublishingError.objects.all",
"django.http.JsonResponse",
"postoffice_django.serializers.MessagesSerializer",
"postoffice_django.models.PublishingError.objects.order_by"
] |
[((814, 860), 'django.utils.decorators.method_decorator', 'method_decorator', (['csrf_exempt'], {'name': '"""dispatch"""'}), "(csrf_exempt, name='dispatch')\n", (830, 860), False, 'from django.utils.decorators import method_decorator\n'), ((917, 946), 'postoffice_django.models.PublishingError.objects.all', 'PublishingError.objects.all', ([], {}), '()\n', (944, 946), False, 'from postoffice_django.models import PublishingError\n'), ((667, 697), 'django.http.JsonResponse', 'JsonResponse', (['data'], {'safe': '(False)'}), '(data, safe=False)\n', (679, 697), False, 'from django.http import JsonResponse, HttpResponse\n'), ((1072, 1096), 'django.http.HttpResponse', 'HttpResponse', ([], {'status': '(204)'}), '(status=204)\n', (1084, 1096), False, 'from django.http import JsonResponse, HttpResponse\n'), ((535, 581), 'postoffice_django.models.PublishingError.objects.order_by', 'PublishingError.objects.order_by', (['"""created_at"""'], {}), "('created_at')\n", (567, 581), False, 'from postoffice_django.models import PublishingError\n'), ((611, 631), 'postoffice_django.serializers.MessagesSerializer', 'MessagesSerializer', ([], {}), '()\n', (629, 631), False, 'from postoffice_django.serializers import MessagesSerializer\n')]
|
# %%
from oas_dev.util.imports.get_fld_fixed import get_field_fixed
from oas_dev.util.plot.plot_maps import plot_map_diff, fix_axis4map_plot, plot_map_abs_abs_diff, plot_map, subplots_map, plot_map_diff_2case
from useful_scit.imps import (np, xr, plt, pd)
from oas_dev.util.imports import get_averaged_fields
from IPython.display import clear_output
from useful_scit.imps import *
log.ger.setLevel(log.log.INFO)
# load and autoreload
from IPython import get_ipython
# noinspection PyBroadException
try:
_ipython = get_ipython()
_magic = _ipython.magic
_magic('load_ext autoreload')
_magic('autoreload 2')
except:
pass
# %%
model = 'NorESM'
startyear = '2008-01'
endyear = '2014-12'
p_level=1013.
pmin = 850. # minimum pressure level
avg_over_lev = True # True#True#False#True
pressure_adjust = True # Can only be false if avg_over_lev false. Plots particular hybrid sigma lev
if avg_over_lev:
pressure_adjust = True
p_levels = [1013.,900., 800., 700., 600.] # used if not avg
# %%
cases_sec = ['SECTv21_ctrl_koagD']#'SECTv21_ctrl',,'SECTv21_ctrl_def']
cases_orig = ['noSECTv21_default_dd', 'noSECTv21_ox_ricc_dd']
cases = cases_sec + cases_orig
# %%
varl = ['ACTNL_incld', 'ACTREL_incld',
'TGCLDCWP',
'TGCLDIWP',
'TGCLDLWP',
'NCFT_Ghan',
'HYGRO01',
'SOA_NAcondTend',
'SO4_NAcondTend',
'cb_SOA_NA',
'cb_SO4_NA',
'HYGRO01',
'cb_SOA_LV',
'cb_H2SO4',
'SO2',
'DMS',
'isoprene',
'monoterp',
'N_AER',
'NCONC01',
'NMR01',
'GR',
'COAGNUCL',
'NUCLRATE',
'FORMRATE',
'H2SO4',
'SOA_LV',
'SOA_SV',
'SOA_NA',
'SO4_NA',
'SOA_A1',
'NCFT_Ghan',
'SFisoprene',
'SFmonoterp',
'SOA_NA_totLossR',
'SOA_NA_lifetime',
'SO4_NA_totLossR',
'SO4_NA_lifetime',
'cb_SOA_NA_OCW',
'cb_SO4_NA_OCW',
'SO4_NA_OCWDDF',
'SO4_NA_OCWSFWET',
'SOA_NA_OCWDDF',
'SOA_NA_OCWSFWET',
'cb_SOA_A1',
'cb_SO4_A1',
'cb_SOA_NA',
'cb_SO4_NA',
'cb_NA',
'SWCF_Ghan',
'LWCF_Ghan',
'AWNC_incld',
'AREL_incld',
'CLDHGH',
'CLDLOW',
'CLDMED',
'CLDTOT',
'CDNUMC',
'DIR_Ghan',
'CDOD550',
'SWDIR_Ghan',
]
varl_sec = [
'nrSOA_SEC_tot',
'nrSO4_SEC_tot',
'nrSEC_tot',
'cb_SOA_SEC01',
'cb_SOA_SEC02',
'cb_SOA_SEC03',
'leaveSecSOA',
'leaveSecH2SO4',
]
# %%
for case in cases:
get_field_fixed(case,varl, startyear, endyear, #raw_data_path=constants.get_input_datapath(),
pressure_adjust=True, model = 'NorESM', history_fld='.h0.', comp='atm', chunks=None)
maps_dic = get_averaged_fields.get_maps_cases(cases,varl,startyear, endyear,
avg_over_lev=avg_over_lev,
pmin=pmin,
pressure_adjust=pressure_adjust, p_level=p_level)
maps_dic = get_averaged_fields.get_maps_cases(cases_sec,varl_sec,startyear, endyear,
avg_over_lev=avg_over_lev,
pmin=pmin,
pressure_adjust=pressure_adjust, p_level=p_level)
for period in ['JJA','DJF']:
maps_dic = get_averaged_fields.get_maps_cases(cases,varl,startyear, endyear,
avg_over_lev=avg_over_lev,
pmin=pmin,
pressure_adjust=pressure_adjust,
p_level=p_level,
time_mask=period)
maps_dic = get_averaged_fields.get_maps_cases(cases_sec,varl_sec,startyear, endyear,
avg_over_lev=avg_over_lev,
pmin=pmin,
pressure_adjust=pressure_adjust, p_level=p_level,
time_mask=period)
|
[
"IPython.get_ipython",
"oas_dev.util.imports.get_fld_fixed.get_field_fixed",
"oas_dev.util.imports.get_averaged_fields.get_maps_cases"
] |
[((2865, 3028), 'oas_dev.util.imports.get_averaged_fields.get_maps_cases', 'get_averaged_fields.get_maps_cases', (['cases', 'varl', 'startyear', 'endyear'], {'avg_over_lev': 'avg_over_lev', 'pmin': 'pmin', 'pressure_adjust': 'pressure_adjust', 'p_level': 'p_level'}), '(cases, varl, startyear, endyear,\n avg_over_lev=avg_over_lev, pmin=pmin, pressure_adjust=pressure_adjust,\n p_level=p_level)\n', (2899, 3028), False, 'from oas_dev.util.imports import get_averaged_fields\n'), ((3168, 3339), 'oas_dev.util.imports.get_averaged_fields.get_maps_cases', 'get_averaged_fields.get_maps_cases', (['cases_sec', 'varl_sec', 'startyear', 'endyear'], {'avg_over_lev': 'avg_over_lev', 'pmin': 'pmin', 'pressure_adjust': 'pressure_adjust', 'p_level': 'p_level'}), '(cases_sec, varl_sec, startyear, endyear,\n avg_over_lev=avg_over_lev, pmin=pmin, pressure_adjust=pressure_adjust,\n p_level=p_level)\n', (3202, 3339), False, 'from oas_dev.util.imports import get_averaged_fields\n'), ((521, 534), 'IPython.get_ipython', 'get_ipython', ([], {}), '()\n', (532, 534), False, 'from IPython import get_ipython\n'), ((2658, 2793), 'oas_dev.util.imports.get_fld_fixed.get_field_fixed', 'get_field_fixed', (['case', 'varl', 'startyear', 'endyear'], {'pressure_adjust': '(True)', 'model': '"""NorESM"""', 'history_fld': '""".h0."""', 'comp': '"""atm"""', 'chunks': 'None'}), "(case, varl, startyear, endyear, pressure_adjust=True, model\n ='NorESM', history_fld='.h0.', comp='atm', chunks=None)\n", (2673, 2793), False, 'from oas_dev.util.imports.get_fld_fixed import get_field_fixed\n'), ((3514, 3695), 'oas_dev.util.imports.get_averaged_fields.get_maps_cases', 'get_averaged_fields.get_maps_cases', (['cases', 'varl', 'startyear', 'endyear'], {'avg_over_lev': 'avg_over_lev', 'pmin': 'pmin', 'pressure_adjust': 'pressure_adjust', 'p_level': 'p_level', 'time_mask': 'period'}), '(cases, varl, startyear, endyear,\n avg_over_lev=avg_over_lev, pmin=pmin, pressure_adjust=pressure_adjust,\n p_level=p_level, time_mask=period)\n', (3548, 3695), False, 'from oas_dev.util.imports import get_averaged_fields\n'), ((3931, 4120), 'oas_dev.util.imports.get_averaged_fields.get_maps_cases', 'get_averaged_fields.get_maps_cases', (['cases_sec', 'varl_sec', 'startyear', 'endyear'], {'avg_over_lev': 'avg_over_lev', 'pmin': 'pmin', 'pressure_adjust': 'pressure_adjust', 'p_level': 'p_level', 'time_mask': 'period'}), '(cases_sec, varl_sec, startyear, endyear,\n avg_over_lev=avg_over_lev, pmin=pmin, pressure_adjust=pressure_adjust,\n p_level=p_level, time_mask=period)\n', (3965, 4120), False, 'from oas_dev.util.imports import get_averaged_fields\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###########################################################
# WARNING: Generated code! #
# ************************** #
# Manual changes may get lost if file is generated again. #
# Only code inside the [MANUAL] tags will be kept. #
###########################################################
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger
from ariac_flexbe_states.message_state import MessageState
from ariac_logistics_flexbe_states.get_material_locations import GetMaterialLocationsState
from ariac_logistics_flexbe_states.get_part_from_products_state import GetPartFromProductsState
from ariac_support_flexbe_states.add_numeric_state import AddNumericState
from ariac_support_flexbe_states.equal_state import EqualState
from ariac_support_flexbe_states.get_item_from_list_state import GetItemFromListState
from unit_1_flexbe_behaviors.pick_part_from_bin_sm import pick_part_from_binSM
from unit_1_flexbe_behaviors.place_part_on_agv_sm import place_part_on_agvSM
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Sun Apr 19 2020
@author: <NAME>
'''
class get_productsSM(Behavior):
'''
Getting all the products from a product list.
This example is a part of the order example.
'''
def __init__(self):
super(get_productsSM, self).__init__()
self.name = 'get_products'
# parameters of this behavior
# references to used behaviors
self.add_behavior(pick_part_from_binSM, 'pick_part_from_bin')
self.add_behavior(place_part_on_agvSM, 'place_part_on_agv')
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# [/MANUAL_INIT]
# Behavior comments:
def create(self):
# x:719 y:341, x:826 y:25
_state_machine = OperatableStateMachine(outcomes=['finished', 'fail'], input_keys=['Products', 'NumberOfProducts'])
_state_machine.userdata.ProductIterator = 0
_state_machine.userdata.OneValue = 1
_state_machine.userdata.ProductType = ''
_state_machine.userdata.ProductPose = ''
_state_machine.userdata.Products = []
_state_machine.userdata.NumberOfProducts = 0
_state_machine.userdata.MaterialsLocationList = []
_state_machine.userdata.MaterialLocation = ''
_state_machine.userdata.MaterailLocationIndex = 0
_state_machine.userdata.Robot_namespace = ''
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
with _state_machine:
# x:356 y:121
OperatableStateMachine.add('GetProduct',
GetPartFromProductsState(),
transitions={'continue': 'ProductTypeMessage', 'invalid_index': 'fail'},
autonomy={'continue': Autonomy.Off, 'invalid_index': Autonomy.Off},
remapping={'products': 'Products', 'index': 'ProductIterator', 'type': 'ProductType', 'pose': 'ProductPose'})
# x:1226 y:120
OperatableStateMachine.add('GerMaterailLocation',
GetItemFromListState(),
transitions={'done': 'MaterailLocationMessage', 'invalid_index': 'fail'},
autonomy={'done': Autonomy.Off, 'invalid_index': Autonomy.Off},
remapping={'list': 'MaterialsLocationList', 'index': 'MaterailLocationIndex', 'item': 'MaterialLocation'})
# x:877 y:120
OperatableStateMachine.add('GetMaterialsLocation',
GetMaterialLocationsState(),
transitions={'continue': 'MaterialsLocationListMessage'},
autonomy={'continue': Autonomy.Off},
remapping={'part': 'ProductType', 'material_locations': 'MaterialsLocationList'})
# x:817 y:258
OperatableStateMachine.add('IncrementProductIterator',
AddNumericState(),
transitions={'done': 'CompareProductIterator'},
autonomy={'done': Autonomy.Off},
remapping={'value_a': 'ProductIterator', 'value_b': 'OneValue', 'result': 'ProductIterator'})
# x:1406 y:124
OperatableStateMachine.add('MaterailLocationMessage',
MessageState(),
transitions={'continue': 'pick_part_from_bin'},
autonomy={'continue': Autonomy.Off},
remapping={'message': 'MaterialLocation'})
# x:1046 y:119
OperatableStateMachine.add('MaterialsLocationListMessage',
MessageState(),
transitions={'continue': 'GerMaterailLocation'},
autonomy={'continue': Autonomy.Off},
remapping={'message': 'MaterialsLocationList'})
# x:728 y:120
OperatableStateMachine.add('ProductPoseMassage',
MessageState(),
transitions={'continue': 'GetMaterialsLocation'},
autonomy={'continue': Autonomy.Off},
remapping={'message': 'ProductPose'})
# x:569 y:121
OperatableStateMachine.add('ProductTypeMessage',
MessageState(),
transitions={'continue': 'ProductPoseMassage'},
autonomy={'continue': Autonomy.Off},
remapping={'message': 'ProductType'})
# x:1223 y:216
OperatableStateMachine.add('pick_part_from_bin',
self.use_behavior(pick_part_from_binSM, 'pick_part_from_bin'),
transitions={'finished': 'place_part_on_agv', 'failed': 'fail'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'part': 'ProductType', 'robot_namespace': 'Robot_namespace', 'part_height_float': 'part_height_float'})
# x:1011 y:247
OperatableStateMachine.add('place_part_on_agv',
self.use_behavior(place_part_on_agvSM, 'place_part_on_agv'),
transitions={'finished': 'IncrementProductIterator', 'failed': 'fail'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'ProductPose': 'ProductPose', 'robot_namespace': 'Robot_namespace', 'part_height_float': 'part_height_float'})
# x:625 y:256
OperatableStateMachine.add('CompareProductIterator',
EqualState(),
transitions={'true': 'finished', 'false': 'GetProduct'},
autonomy={'true': Autonomy.Off, 'false': Autonomy.Off},
remapping={'value_a': 'ProductIterator', 'value_b': 'NumberOfProducts'})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC]
|
[
"ariac_support_flexbe_states.equal_state.EqualState",
"ariac_logistics_flexbe_states.get_part_from_products_state.GetPartFromProductsState",
"ariac_support_flexbe_states.add_numeric_state.AddNumericState",
"ariac_flexbe_states.message_state.MessageState",
"flexbe_core.OperatableStateMachine",
"ariac_support_flexbe_states.get_item_from_list_state.GetItemFromListState",
"ariac_logistics_flexbe_states.get_material_locations.GetMaterialLocationsState"
] |
[((1928, 2031), 'flexbe_core.OperatableStateMachine', 'OperatableStateMachine', ([], {'outcomes': "['finished', 'fail']", 'input_keys': "['Products', 'NumberOfProducts']"}), "(outcomes=['finished', 'fail'], input_keys=[\n 'Products', 'NumberOfProducts'])\n", (1950, 2031), False, 'from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger\n'), ((2694, 2720), 'ariac_logistics_flexbe_states.get_part_from_products_state.GetPartFromProductsState', 'GetPartFromProductsState', ([], {}), '()\n', (2718, 2720), False, 'from ariac_logistics_flexbe_states.get_part_from_products_state import GetPartFromProductsState\n'), ((3085, 3107), 'ariac_support_flexbe_states.get_item_from_list_state.GetItemFromListState', 'GetItemFromListState', ([], {}), '()\n', (3105, 3107), False, 'from ariac_support_flexbe_states.get_item_from_list_state import GetItemFromListState\n'), ((3466, 3493), 'ariac_logistics_flexbe_states.get_material_locations.GetMaterialLocationsState', 'GetMaterialLocationsState', ([], {}), '()\n', (3491, 3493), False, 'from ariac_logistics_flexbe_states.get_material_locations import GetMaterialLocationsState\n'), ((3788, 3805), 'ariac_support_flexbe_states.add_numeric_state.AddNumericState', 'AddNumericState', ([], {}), '()\n', (3803, 3805), False, 'from ariac_support_flexbe_states.add_numeric_state import AddNumericState\n'), ((4098, 4112), 'ariac_flexbe_states.message_state.MessageState', 'MessageState', ([], {}), '()\n', (4110, 4112), False, 'from ariac_flexbe_states.message_state import MessageState\n'), ((4363, 4377), 'ariac_flexbe_states.message_state.MessageState', 'MessageState', ([], {}), '()\n', (4375, 4377), False, 'from ariac_flexbe_states.message_state import MessageState\n'), ((4623, 4637), 'ariac_flexbe_states.message_state.MessageState', 'MessageState', ([], {}), '()\n', (4635, 4637), False, 'from ariac_flexbe_states.message_state import MessageState\n'), ((4874, 4888), 'ariac_flexbe_states.message_state.MessageState', 'MessageState', ([], {}), '()\n', (4886, 4888), False, 'from ariac_flexbe_states.message_state import MessageState\n'), ((5984, 5996), 'ariac_support_flexbe_states.equal_state.EqualState', 'EqualState', ([], {}), '()\n', (5994, 5996), False, 'from ariac_support_flexbe_states.equal_state import EqualState\n')]
|
from conans.model import Generator
import platform
import os
import copy
from conans.errors import ConanException
def get_setenv_variables_commands(deps_env_info, command_set=None):
if command_set is None:
command_set = "SET" if platform.system() == "Windows" else "export"
multiple_to_set, simple_to_set = get_dict_values(deps_env_info)
ret = []
for name, value in multiple_to_set.items():
if platform.system() == "Windows":
ret.append(command_set + ' "' + name + '=' + value + ';%' + name + '%"')
else:
ret.append(command_set + ' ' + name + '=' + value + ':$' + name)
for name, value in simple_to_set.items():
if platform.system() == "Windows":
ret.append(command_set + ' "' + name + '=' + value + '"')
else:
ret.append(command_set + ' ' + name + '=' + value)
return ret
def get_dict_values(deps_env_info):
def adjust_var_name(name):
return "PATH" if name.lower() == "path" else name
multiple_to_set = {}
simple_to_set = {}
for name, value in deps_env_info.vars.items():
name = adjust_var_name(name)
if isinstance(value, list):
# Allow path with spaces in non-windows platforms
if platform.system() != "Windows" and name in ["PATH", "PYTHONPATH"]:
value = ['"%s"' % v for v in value]
multiple_to_set[name] = os.pathsep.join(value).replace("\\", "/")
else:
# It works in windows too using "/" and allows to use MSYS shell
simple_to_set[name] = value.replace("\\", "/")
return multiple_to_set, simple_to_set
class VirtualEnvGenerator(Generator):
@property
def filename(self):
return
@property
def content(self):
multiple_to_set, simple_to_set = get_dict_values(self.deps_env_info)
all_vars = copy.copy(multiple_to_set)
all_vars.update(simple_to_set)
venv_name = os.path.basename(self.conanfile.conanfile_directory)
deactivate_lines = ["@echo off"] if platform.system() == "Windows" else []
for name in all_vars.keys():
old_value = os.environ.get(name, "")
if platform.system() == "Windows":
deactivate_lines.append('SET "%s=%s"' % (name, old_value))
else:
deactivate_lines.append('export %s=%s' % (name, old_value))
if platform.system() == "Windows":
deactivate_lines.append("SET PROMPT=%s" % os.environ.get("PROMPT", ""))
else:
deactivate_lines.append('export PS1="$OLD_PS1"')
activate_lines = ["@echo off"] if platform.system() == "Windows" else []
if platform.system() == "Windows":
activate_lines.append("SET PROMPT=(%s) " % venv_name + "%PROMPT%")
else:
activate_lines.append("export OLD_PS1=\"$PS1\"")
activate_lines.append("export PS1=\"(%s) " % venv_name + "$PS1\"")
activate_lines.extend(get_setenv_variables_commands(self.deps_env_info))
ext = "bat" if platform.system() == "Windows" else "sh"
return {"activate.%s" % ext: os.linesep.join(activate_lines),
"deactivate.%s" % ext: os.linesep.join(deactivate_lines)}
|
[
"os.pathsep.join",
"os.path.basename",
"os.linesep.join",
"copy.copy",
"os.environ.get",
"platform.system"
] |
[((1882, 1908), 'copy.copy', 'copy.copy', (['multiple_to_set'], {}), '(multiple_to_set)\n', (1891, 1908), False, 'import copy\n'), ((1968, 2020), 'os.path.basename', 'os.path.basename', (['self.conanfile.conanfile_directory'], {}), '(self.conanfile.conanfile_directory)\n', (1984, 2020), False, 'import os\n'), ((429, 446), 'platform.system', 'platform.system', ([], {}), '()\n', (444, 446), False, 'import platform\n'), ((694, 711), 'platform.system', 'platform.system', ([], {}), '()\n', (709, 711), False, 'import platform\n'), ((2165, 2189), 'os.environ.get', 'os.environ.get', (['name', '""""""'], {}), "(name, '')\n", (2179, 2189), False, 'import os\n'), ((2417, 2434), 'platform.system', 'platform.system', ([], {}), '()\n', (2432, 2434), False, 'import platform\n'), ((2701, 2718), 'platform.system', 'platform.system', ([], {}), '()\n', (2716, 2718), False, 'import platform\n'), ((3149, 3180), 'os.linesep.join', 'os.linesep.join', (['activate_lines'], {}), '(activate_lines)\n', (3164, 3180), False, 'import os\n'), ((3221, 3254), 'os.linesep.join', 'os.linesep.join', (['deactivate_lines'], {}), '(deactivate_lines)\n', (3236, 3254), False, 'import os\n'), ((243, 260), 'platform.system', 'platform.system', ([], {}), '()\n', (258, 260), False, 'import platform\n'), ((2065, 2082), 'platform.system', 'platform.system', ([], {}), '()\n', (2080, 2082), False, 'import platform\n'), ((2205, 2222), 'platform.system', 'platform.system', ([], {}), '()\n', (2220, 2222), False, 'import platform\n'), ((2651, 2668), 'platform.system', 'platform.system', ([], {}), '()\n', (2666, 2668), False, 'import platform\n'), ((3071, 3088), 'platform.system', 'platform.system', ([], {}), '()\n', (3086, 3088), False, 'import platform\n'), ((1264, 1281), 'platform.system', 'platform.system', ([], {}), '()\n', (1279, 1281), False, 'import platform\n'), ((1419, 1441), 'os.pathsep.join', 'os.pathsep.join', (['value'], {}), '(value)\n', (1434, 1441), False, 'import os\n'), ((2503, 2531), 'os.environ.get', 'os.environ.get', (['"""PROMPT"""', '""""""'], {}), "('PROMPT', '')\n", (2517, 2531), False, 'import os\n')]
|
from Utils import ResponseManager, LogManager
from Setting import DefineManager
def CheckVersion():
version = DefineManager.VERSION
LogManager.PrintLogMessage("SystemManager", "CheckVersion", "this version is " + version, DefineManager.LOG_LEVEL_INFO)
return ResponseManager.TemplateOfResponse(DefineManager.SIMPLE_RESPONSE, version)
def Echo(text):
LogManager.PrintLogMessage("SystemManager", "Echo", "echo text: " + text, DefineManager.LOG_LEVEL_INFO)
return ResponseManager.TemplateOfResponse(DefineManager.SIMPLE_RESPONSE, text)
|
[
"Utils.LogManager.PrintLogMessage",
"Utils.ResponseManager.TemplateOfResponse"
] |
[((141, 265), 'Utils.LogManager.PrintLogMessage', 'LogManager.PrintLogMessage', (['"""SystemManager"""', '"""CheckVersion"""', "('this version is ' + version)", 'DefineManager.LOG_LEVEL_INFO'], {}), "('SystemManager', 'CheckVersion', \n 'this version is ' + version, DefineManager.LOG_LEVEL_INFO)\n", (167, 265), False, 'from Utils import ResponseManager, LogManager\n'), ((272, 346), 'Utils.ResponseManager.TemplateOfResponse', 'ResponseManager.TemplateOfResponse', (['DefineManager.SIMPLE_RESPONSE', 'version'], {}), '(DefineManager.SIMPLE_RESPONSE, version)\n', (306, 346), False, 'from Utils import ResponseManager, LogManager\n'), ((368, 475), 'Utils.LogManager.PrintLogMessage', 'LogManager.PrintLogMessage', (['"""SystemManager"""', '"""Echo"""', "('echo text: ' + text)", 'DefineManager.LOG_LEVEL_INFO'], {}), "('SystemManager', 'Echo', 'echo text: ' + text,\n DefineManager.LOG_LEVEL_INFO)\n", (394, 475), False, 'from Utils import ResponseManager, LogManager\n'), ((483, 554), 'Utils.ResponseManager.TemplateOfResponse', 'ResponseManager.TemplateOfResponse', (['DefineManager.SIMPLE_RESPONSE', 'text'], {}), '(DefineManager.SIMPLE_RESPONSE, text)\n', (517, 554), False, 'from Utils import ResponseManager, LogManager\n')]
|
from pathlib import Path
import numpy as np
import pytest
from divorce_predictor.data import DataLoader
def test_load_data_successfully():
dataset_path = (
Path(__file__).parent.parent.parent / "ml" / "input" / "data" / "divorce.csv"
)
data_loader = DataLoader(dataset_path=dataset_path, target_column="Class")
X, y = data_loader.load_dataset()
assert isinstance(X, np.ndarray)
assert isinstance(y, np.ndarray)
assert len(y.shape) == 1
def test_load_filenotfound():
dataset_path = Path("bulhufas")
with pytest.raises(FileNotFoundError):
_ = DataLoader(dataset_path=dataset_path, target_column="variety")
|
[
"pytest.raises",
"divorce_predictor.data.DataLoader",
"pathlib.Path"
] |
[((274, 334), 'divorce_predictor.data.DataLoader', 'DataLoader', ([], {'dataset_path': 'dataset_path', 'target_column': '"""Class"""'}), "(dataset_path=dataset_path, target_column='Class')\n", (284, 334), False, 'from divorce_predictor.data import DataLoader\n'), ((528, 544), 'pathlib.Path', 'Path', (['"""bulhufas"""'], {}), "('bulhufas')\n", (532, 544), False, 'from pathlib import Path\n'), ((554, 586), 'pytest.raises', 'pytest.raises', (['FileNotFoundError'], {}), '(FileNotFoundError)\n', (567, 586), False, 'import pytest\n'), ((600, 662), 'divorce_predictor.data.DataLoader', 'DataLoader', ([], {'dataset_path': 'dataset_path', 'target_column': '"""variety"""'}), "(dataset_path=dataset_path, target_column='variety')\n", (610, 662), False, 'from divorce_predictor.data import DataLoader\n'), ((172, 186), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (176, 186), False, 'from pathlib import Path\n')]
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/110_models.mWDN.ipynb (unless otherwise specified).
__all__ = ['WaveBlock', 'mWDN']
# Cell
from ..imports import *
from .layers import *
from .InceptionTime import *
from .utils import create_model
# Cell
import pywt
# Cell
# This is an unofficial PyTorch implementation by <NAME> - <EMAIL> based on:
# <NAME>., <NAME>., <NAME>., & <NAME>. (2018, July). Multilevel wavelet decomposition network for interpretable time series analysis. In Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining (pp. 2437-2446).
# No official implementation found
class WaveBlock(Module):
def __init__(self, c_in, c_out, seq_len, wavelet=None):
if wavelet is None:
self.h_filter = [-0.2304,0.7148,-0.6309,-0.028,0.187,0.0308,-0.0329,-0.0106]
self.l_filter = [-0.0106,0.0329,0.0308,-0.187,-0.028,0.6309,0.7148,0.2304]
else:
w = pywt.Wavelet(wavelet)
self.h_filter = w.dec_hi
self.l_filter = w.dec_lo
self.mWDN_H = nn.Linear(seq_len,seq_len)
self.mWDN_L = nn.Linear(seq_len,seq_len)
self.mWDN_H.weight = nn.Parameter(self.create_W(seq_len,False))
self.mWDN_L.weight = nn.Parameter(self.create_W(seq_len,True))
self.sigmoid = nn.Sigmoid()
self.pool = nn.AvgPool1d(2)
def forward(self,x):
hp_1 = self.sigmoid(self.mWDN_H(x))
lp_1 = self.sigmoid(self.mWDN_L(x))
hp_out = self.pool(hp_1)
lp_out = self.pool(lp_1)
all_out = torch.cat((hp_out, lp_out), dim=-1)
return lp_out, all_out
def create_W(self, P, is_l, is_comp=False):
if is_l: filter_list = self.l_filter
else: filter_list = self.h_filter
list_len = len(filter_list)
max_epsilon = np.min(np.abs(filter_list))
if is_comp: weight_np = np.zeros((P, P))
else: weight_np = np.random.randn(P, P) * 0.1 * max_epsilon
for i in range(0, P):
filter_index = 0
for j in range(i, P):
if filter_index < len(filter_list):
weight_np[i][j] = filter_list[filter_index]
filter_index += 1
return tensor(weight_np)
class mWDN(Module):
def __init__(self, c_in, c_out, seq_len, levels=3, wavelet=None, arch=InceptionTime, arch_kwargs={}):
self.levels=levels
self.blocks = nn.ModuleList()
for i in range(levels): self.blocks.append(WaveBlock(c_in, c_out, seq_len // 2 ** i, wavelet=wavelet))
self.classifier = create_model(arch, c_in, c_out, seq_len=seq_len, **arch_kwargs)
def forward(self,x):
for i in range(self.levels):
x, out_ = self.blocks[i](x)
if i == 0: out = out_ if i == 0 else torch.cat((out, out_), dim=-1)
out = self.classifier(out)
return out
|
[
"pywt.Wavelet"
] |
[((961, 982), 'pywt.Wavelet', 'pywt.Wavelet', (['wavelet'], {}), '(wavelet)\n', (973, 982), False, 'import pywt\n')]
|
#!/usr/bin/python
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates a .cc file that registers the default EME implementations.
This defines the following function:
void shaka::RegisterDefaultKeySystems();
"""
from __future__ import print_function
import argparse
import json
import os
import sys
import embed_utils
def _GetHeaders(plugins):
"""Returns a set of headers from the given plugins."""
headers = set()
for plugin in plugins:
headers.update(i['header'] for i in plugin['implementations'])
return headers
def _ParsePlugin(file_path):
"""Reads the given file and parses it into an object."""
with open(file_path, 'r') as f:
return json.load(f)
def GenerateFile(plugins, output):
"""Generates a C++ file which registers the given implementations."""
writer = embed_utils.CodeWriter(output)
writer.Write('#include <atomic>')
writer.Write()
writer.Write('#include "shaka/eme/implementation_registry.h"')
writer.Write()
for header in sorted(_GetHeaders(plugins)):
writer.Write('#include "%s"', header)
writer.Write()
with writer.Namespace('shaka'):
writer.Write('void RegisterDefaultKeySystems();')
writer.Write()
with writer.Block('void RegisterDefaultKeySystems()'):
# This ensures the key systems are registered exactly once, even if this
# is called from multiple threads. The compare_exchange_strong will
# atomically check if it is false and replace with true on only one
# thread.
writer.Write('static std::atomic<bool> called{false};')
writer.Write('bool expected = false;')
with writer.Block('if (called.compare_exchange_strong(expected, true))'):
for plugin in plugins:
for impl in plugin['implementations']:
writer.Write('eme::ImplementationRegistry::AddImplementation(')
writer.Write(' "%s", new %s);', impl['key_system'],
impl['factory_type'])
def main(args):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--output', dest='output',
help='The filename to output to')
parser.add_argument('files', nargs='+',
help='The JSON files that define the implementations')
ns = parser.parse_args(args)
plugins = map(_ParsePlugin, ns.files)
with open(ns.output, 'w') as output:
GenerateFile(plugins, output)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
[
"embed_utils.CodeWriter",
"json.load",
"argparse.ArgumentParser"
] |
[((1340, 1370), 'embed_utils.CodeWriter', 'embed_utils.CodeWriter', (['output'], {}), '(output)\n', (1362, 1370), False, 'import embed_utils\n'), ((2509, 2553), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (2532, 2553), False, 'import argparse\n'), ((1207, 1219), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1216, 1219), False, 'import json\n')]
|
import numpy as np
import pandas as pd
import scipy.sparse as sp
from sklearn.metrics.pairwise import cosine_similarity
class Evaluator():
def __init__(self, k=10, training_set=None, testing_set=None, book_sim=None, novelty_scores=None):
self.k = k
self.book_sim = book_sim
self.novelty_scores = novelty_scores
if training_set is not None:
self.training_set = training_set
self.num_users = len(self.training_set.user_id.unique())
self.num_books = len(self.training_set.book_id.unique())
if testing_set is not None:
self.testing_set = testing_set
self.testing_idx = {}
for user_id in testing_set.user_id.unique():
self.testing_idx[user_id] = testing_set[testing_set.user_id==user_id].book_id.values
self.result = {}
def _average_precision(self, pred, truth):
in_arr = np.in1d(pred, truth)
score = 0.0
num_hits = 0.0
for idx, correct in enumerate(in_arr):
if correct:
num_hits += 1
score += num_hits / (idx + 1)
return score / min(len(truth), self.k)
def _novelty_score(self, pred):
# Recommend the top 10 books in novelty score results in ~10.4
# Crop the score to 10.0 since it won't change anything and make the score range nicer
return min(self.novelty_scores.loc[pred].novelty_score.mean(), 10.0)
def _diversity_score(self, pred):
matrix = self.book_sim.loc[pred, pred].values
ils = matrix[np.triu_indices(len(pred), k=1)].mean()
return (1 - ils) * 10
def _personalization_score(self, preds, user_ids, book_ids):
df = pd.DataFrame(
data=np.zeros([len(user_ids), len(book_ids)]),
index=user_ids,
columns=book_ids
)
for user_id in user_ids:
df.loc[user_id, preds[user_id]] = 1
matrix = sp.csr_matrix(df.values)
#calculate similarity for every user's recommendation list
similarity = cosine_similarity(X=matrix, dense_output=False)
#get indicies for upper right triangle w/o diagonal
upper_right = np.triu_indices(similarity.shape[0], k=1)
#calculate average similarity
personalization = np.mean(similarity[upper_right])
return (1 - personalization) * 10
def evaluate(self, model):
model.fit(self.training_set)
preds = model.all_recommendation()
user_ids = list(preds.keys())
book_ids = np.unique(np.array(list(preds.values())).flatten())
ap_sum = 0
nov_score_sum = 0
div_score_sum = 0
for user_id in preds.keys():
pred = preds[user_id]
truth = self.testing_idx[user_id]
ap_sum += self._average_precision(pred, truth)
nov_score_sum += self._novelty_score(pred)
div_score_sum += self._diversity_score(pred)
self.result[model.name] = {}
self.result[model.name]['Mean Average Precision'] = "%.2f%%" % (ap_sum / self.num_users * 100)
self.result[model.name]['Coverage'] = "%.2f%%" % (len(book_ids) / self.num_books * 100)
self.result[model.name]['Novelty Score'] = "%.2f" % (nov_score_sum / self.num_users)
self.result[model.name]['Diversity Score'] = "%.2f" % (div_score_sum / self.num_users)
self.result[model.name]['Personalization Score'] = "%.2f" % self._personalization_score(preds, user_ids, book_ids)
def print_result(self):
print(pd.DataFrame(self.result).loc[['Mean Average Precision', 'Coverage', 'Novelty Score', 'Diversity Score', 'Personalization Score']])
|
[
"pandas.DataFrame",
"sklearn.metrics.pairwise.cosine_similarity",
"numpy.triu_indices",
"numpy.mean",
"scipy.sparse.csr_matrix",
"numpy.in1d"
] |
[((925, 945), 'numpy.in1d', 'np.in1d', (['pred', 'truth'], {}), '(pred, truth)\n', (932, 945), True, 'import numpy as np\n'), ((1977, 2001), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['df.values'], {}), '(df.values)\n', (1990, 2001), True, 'import scipy.sparse as sp\n'), ((2091, 2138), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', ([], {'X': 'matrix', 'dense_output': '(False)'}), '(X=matrix, dense_output=False)\n', (2108, 2138), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((2222, 2263), 'numpy.triu_indices', 'np.triu_indices', (['similarity.shape[0]'], {'k': '(1)'}), '(similarity.shape[0], k=1)\n', (2237, 2263), True, 'import numpy as np\n'), ((2329, 2361), 'numpy.mean', 'np.mean', (['similarity[upper_right]'], {}), '(similarity[upper_right])\n', (2336, 2361), True, 'import numpy as np\n'), ((3604, 3629), 'pandas.DataFrame', 'pd.DataFrame', (['self.result'], {}), '(self.result)\n', (3616, 3629), True, 'import pandas as pd\n')]
|
#
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from ctypes import (
Structure,
c_ubyte,
c_uint,
c_ulong,
c_ulonglong,
c_ushort,
sizeof,
)
import numpy as np
import pandas as pd
from six.moves import range
_inttypes_map = OrderedDict(sorted([
(sizeof(t) - 1, t) for t in {
c_ubyte,
c_uint,
c_ulong,
c_ulonglong,
c_ushort
}
]))
_inttypes = list(
pd.Series(_inttypes_map).reindex(
range(max(_inttypes_map.keys())),
method='bfill',
),
)
def enum(option, *options):
"""
Construct a new enum object.
Parameters
----------
*options : iterable of str
The names of the fields for the enum.
Returns
-------
enum
A new enum collection.
Examples
--------
>>> e = enum('a', 'b', 'c')
>>> e
<enum: ('a', 'b', 'c')>
>>> e.a
0
>>> e.b
1
>>> e.a in e
True
>>> tuple(e)
(0, 1, 2)
Notes
-----
Identity checking is not guaranteed to work with enum members, instead
equality checks should be used. From CPython's documentation:
"The current implementation keeps an array of integer objects for all
integers between -5 and 256, when you create an int in that range you
actually just get back a reference to the existing object. So it should be
possible to change the value of 1. I suspect the behaviour of Python in
this case is undefined. :-)"
"""
options = (option,) + options
rangeob = range(len(options))
try:
inttype = _inttypes[int(np.log2(len(options) - 1)) // 8]
except IndexError:
raise OverflowError(
'Cannot store enums with more than sys.maxsize elements, got %d' %
len(options),
)
class _enum(Structure):
_fields_ = [(o, inttype) for o in options]
def __iter__(self):
return iter(rangeob)
def __contains__(self, value):
return 0 <= value < len(options)
def __repr__(self):
return '<enum: %s>' % (
('%d fields' % len(options))
if len(options) > 10 else
repr(options)
)
return _enum(*rangeob)
|
[
"ctypes.sizeof",
"pandas.Series"
] |
[((999, 1023), 'pandas.Series', 'pd.Series', (['_inttypes_map'], {}), '(_inttypes_map)\n', (1008, 1023), True, 'import pandas as pd\n'), ((850, 859), 'ctypes.sizeof', 'sizeof', (['t'], {}), '(t)\n', (856, 859), False, 'from ctypes import Structure, c_ubyte, c_uint, c_ulong, c_ulonglong, c_ushort, sizeof\n')]
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestCashFlowMapping(unittest.TestCase):
def setUp(self):
if frappe.db.exists("Cash Flow Mapping", "Test Mapping"):
frappe.delete_doc('Cash Flow Mappping', 'Test Mapping')
def tearDown(self):
frappe.delete_doc('Cash Flow Mapping', 'Test Mapping')
def test_multiple_selections_not_allowed(self):
doc = frappe.new_doc('Cash Flow Mapping')
doc.mapping_name = 'Test Mapping'
doc.label = 'Test label'
doc.append(
'accounts',
{'account': 'Accounts Receivable - _TC'}
)
doc.is_working_capital = 1
doc.is_finance_cost = 1
self.assertRaises(frappe.ValidationError, doc.insert)
doc.is_finance_cost = 0
doc.insert()
|
[
"frappe.delete_doc",
"frappe.db.exists",
"frappe.new_doc"
] |
[((253, 306), 'frappe.db.exists', 'frappe.db.exists', (['"""Cash Flow Mapping"""', '"""Test Mapping"""'], {}), "('Cash Flow Mapping', 'Test Mapping')\n", (269, 306), False, 'import frappe\n'), ((391, 445), 'frappe.delete_doc', 'frappe.delete_doc', (['"""Cash Flow Mapping"""', '"""Test Mapping"""'], {}), "('Cash Flow Mapping', 'Test Mapping')\n", (408, 445), False, 'import frappe\n'), ((504, 539), 'frappe.new_doc', 'frappe.new_doc', (['"""Cash Flow Mapping"""'], {}), "('Cash Flow Mapping')\n", (518, 539), False, 'import frappe\n'), ((311, 366), 'frappe.delete_doc', 'frappe.delete_doc', (['"""Cash Flow Mappping"""', '"""Test Mapping"""'], {}), "('Cash Flow Mappping', 'Test Mapping')\n", (328, 366), False, 'import frappe\n')]
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 CERN.
#
# invenio-app-ils is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Invenio App ILS Records views."""
from __future__ import absolute_import, print_function
from flask import Blueprint, abort, current_app, request
from invenio_db import db
from invenio_records_rest.utils import obj_or_import_string
from invenio_records_rest.views import pass_record
from invenio_rest import ContentNegotiatedMethodView
from invenio_app_ils.documents.api import DOCUMENT_PID_TYPE
from invenio_app_ils.errors import RecordRelationsError
from invenio_app_ils.permissions import need_permissions
from invenio_app_ils.pidstore.pids import SERIES_PID_TYPE
from invenio_app_ils.records.api import IlsRecord
from invenio_app_ils.records_relations.indexer import RecordRelationIndexer
from invenio_app_ils.relations.api import Relation
from invenio_app_ils.records_relations.api import ( # isort:skip
RecordRelationsParentChild,
RecordRelationsSiblings,
)
def create_relations_blueprint(app):
"""Add relations views to the blueprint."""
def _add_resource_view(blueprint, pid_type, view_class):
"""Add a resource view for a rest endpoint."""
endpoints = app.config.get("RECORDS_REST_ENDPOINTS", [])
options = endpoints.get(pid_type, {})
default_media_type = options.get("default_media_type", "")
rec_serializers = options.get("record_serializers", {})
serializers = {
mime: obj_or_import_string(func)
for mime, func in rec_serializers.items()
}
record_relations = view_class.as_view(
view_class.view_name.format(pid_type),
serializers=serializers,
default_media_type=default_media_type,
)
blueprint.add_url_rule(
"{0}/relations".format(options["item_route"]),
view_func=record_relations,
methods=["POST", "DELETE"],
)
bp = Blueprint("invenio_app_ils_relations", __name__, url_prefix="")
_add_resource_view(bp, DOCUMENT_PID_TYPE, RecordRelationsResource)
_add_resource_view(bp, SERIES_PID_TYPE, RecordRelationsResource)
return bp
class RecordRelationsResource(ContentNegotiatedMethodView):
"""Relations views for a record."""
view_name = "{0}_relations"
def _get_record(self, record, pid, pid_type):
"""Return record if same PID or fetch the record."""
if record.pid == pid and record._pid_type == pid_type:
rec = record
else:
rec = IlsRecord.get_record_by_pid(pid, pid_type=pid_type)
return rec
def _validate_parent_child_creation_payload(self, payload):
"""Validate the payload when creating a new parent-child relation."""
try:
parent_pid = payload.pop("parent_pid")
parent_pid_type = payload.pop("parent_pid_type")
child_pid = payload.pop("child_pid")
child_pid_type = payload.pop("child_pid_type")
except KeyError as key:
raise RecordRelationsError(
"The `{}` is a required field".format(key)
)
return parent_pid, parent_pid_type, child_pid, child_pid_type, payload
def _create_parent_child_relation(self, record, relation_type, payload):
"""Create a Parent-Child relation.
Expected payload:
{
parent_pid: <pid_value>,
parent_pid_type: <pid_type>,
child_pid: <pid_value>,
child_pid_type: <pid_type>,
relation_type: "<Relation name>",
[volume: "<vol name>"]
}
"""
parent_pid, parent_pid_type, child_pid, child_pid_type, metadata = self._validate_parent_child_creation_payload(
payload
)
# fetch parent and child. The passed record should be one of the two
parent = self._get_record(record, parent_pid, parent_pid_type)
child = self._get_record(record, child_pid, child_pid_type)
rr = RecordRelationsParentChild()
modified_record = rr.add(
parent=parent, child=child, relation_type=relation_type, **metadata
)
return modified_record, parent, child
def _delete_parent_child_relation(self, record, relation_type, payload):
"""Delete a Parent-Child relation.
Expected payload:
{
parent_pid: <pid_value>,
parent_pid_type: <pid_type>,
child_pid: <pid_value>,
child_pid_type: <pid_type>,
relation_type: "<Relation name>"
}
"""
parent_pid, parent_pid_type, child_pid, child_pid_type, _ = self._validate_parent_child_creation_payload(
payload
)
# fetch parent and child. The passed record should be one of the two
parent = self._get_record(record, parent_pid, parent_pid_type)
child = self._get_record(record, child_pid, child_pid_type)
rr = RecordRelationsParentChild()
modified_record = rr.remove(
parent=parent, child=child, relation_type=relation_type
)
return modified_record, parent, child
def _validate_siblings_creation_payload(self, payload):
"""Validate the payload when creating a new siblings relation."""
try:
pid = payload.pop("pid")
pid_type = payload.pop("pid_type")
except KeyError as key:
raise RecordRelationsError(
"The `{}` is a required field".format(key)
)
return pid, pid_type, payload
def _create_sibling_relation(self, record, relation_type, payload):
"""Create a Siblings relation from current record to the given PID.
Expected payload:
{
pid: <pid_value>,
pid_type: <pid_type>,
relation_type: "<Relation name>",
[note: "<note>"]
}
"""
pid, pid_type, metadata = self._validate_siblings_creation_payload(
payload
)
if pid == record["pid"] and pid_type == record._pid_type:
raise RecordRelationsError(
"Cannot create a relation for PID `{}` with itself".format(pid)
)
second = IlsRecord.get_record_by_pid(pid, pid_type=pid_type)
rr = RecordRelationsSiblings()
modified_record = rr.add(
first=record,
second=second,
relation_type=relation_type,
**metadata
)
return modified_record, record, second
def _delete_sibling_relation(self, record, relation_type, payload):
"""Delete a Siblings relation from current record to the given PID.
Expected payload:
{
pid: <pid_value>,
pid_type: <pid_type>,
relation_type: "<Relation name>"
}
"""
pid, pid_type, metadata = self._validate_siblings_creation_payload(
payload
)
if pid == record["pid"] and pid_type == record._pid_type:
raise RecordRelationsError(
"Cannot create a relation for PID `{}` with itself".format(pid)
)
second = IlsRecord.get_record_by_pid(pid, pid_type=pid_type)
rr = RecordRelationsSiblings()
modified_record, _ = rr.remove(
first=record, second=second, relation_type=relation_type
)
return modified_record, record, second
@pass_record
@need_permissions("relations-create")
def post(self, record, **kwargs):
"""Create a new relation."""
def create(payload):
try:
relation_type = payload.pop("relation_type")
except KeyError as key:
return abort(400, "The `{}` is a required field".format(key))
rt = Relation.get_relation_by_name(relation_type)
if rt in current_app.config["PARENT_CHILD_RELATION_TYPES"]:
modified, first, second = self._create_parent_child_relation(
record, rt, payload
)
elif rt in current_app.config["SIBLINGS_RELATION_TYPES"]:
modified, first, second = self._create_sibling_relation(
record, rt, payload
)
else:
raise RecordRelationsError(
"Invalid relation type `{}`".format(rt.name)
)
db.session.commit()
records_to_index.append(first)
records_to_index.append(second)
# if the record is the modified, return the modified version
if (
modified.pid == record.pid
and modified._pid_type == record._pid_type
):
return modified
return record
records_to_index = []
actions = request.get_json()
if not isinstance(actions, list):
actions = [actions]
for action in actions:
record = create(action)
# Index both parent/child (or first/second)
RecordRelationIndexer().index(record, *records_to_index)
return self.make_response(record.pid, record, 201)
@pass_record
@need_permissions("relations-delete")
def delete(self, record, **kwargs):
"""Delete an existing relation."""
def delete(payload):
try:
relation_type = payload.pop("relation_type")
except KeyError as key:
return abort(400, "The `{}` is a required field".format(key))
rt = Relation.get_relation_by_name(relation_type)
if rt in current_app.config["PARENT_CHILD_RELATION_TYPES"]:
modified, first, second = self._delete_parent_child_relation(
record, rt, payload
)
elif rt in current_app.config["SIBLINGS_RELATION_TYPES"]:
modified, first, second = self._delete_sibling_relation(
record, rt, payload
)
else:
raise RecordRelationsError(
"Invalid relation type `{}`".format(rt.name)
)
db.session.commit()
records_to_index.append(first)
records_to_index.append(second)
# if the record is the modified, return the modified version
if (
modified.pid == record.pid
and modified._pid_type == record._pid_type
):
return modified
return record
records_to_index = []
actions = request.get_json()
if not isinstance(actions, list):
actions = [actions]
for action in actions:
record = delete(action)
# Index both parent/child (or first/second)
RecordRelationIndexer().index(record, *records_to_index)
return self.make_response(record.pid, record, 200)
|
[
"flask.Blueprint",
"invenio_records_rest.utils.obj_or_import_string",
"invenio_app_ils.relations.api.Relation.get_relation_by_name",
"invenio_app_ils.records_relations.api.RecordRelationsSiblings",
"invenio_app_ils.records_relations.api.RecordRelationsParentChild",
"invenio_app_ils.permissions.need_permissions",
"invenio_db.db.session.commit",
"invenio_app_ils.records.api.IlsRecord.get_record_by_pid",
"invenio_app_ils.records_relations.indexer.RecordRelationIndexer",
"flask.request.get_json"
] |
[((2051, 2114), 'flask.Blueprint', 'Blueprint', (['"""invenio_app_ils_relations"""', '__name__'], {'url_prefix': '""""""'}), "('invenio_app_ils_relations', __name__, url_prefix='')\n", (2060, 2114), False, 'from flask import Blueprint, abort, current_app, request\n'), ((7669, 7705), 'invenio_app_ils.permissions.need_permissions', 'need_permissions', (['"""relations-create"""'], {}), "('relations-create')\n", (7685, 7705), False, 'from invenio_app_ils.permissions import need_permissions\n'), ((9418, 9454), 'invenio_app_ils.permissions.need_permissions', 'need_permissions', (['"""relations-delete"""'], {}), "('relations-delete')\n", (9434, 9454), False, 'from invenio_app_ils.permissions import need_permissions\n'), ((4140, 4168), 'invenio_app_ils.records_relations.api.RecordRelationsParentChild', 'RecordRelationsParentChild', ([], {}), '()\n', (4166, 4168), False, 'from invenio_app_ils.records_relations.api import RecordRelationsParentChild, RecordRelationsSiblings\n'), ((5122, 5150), 'invenio_app_ils.records_relations.api.RecordRelationsParentChild', 'RecordRelationsParentChild', ([], {}), '()\n', (5148, 5150), False, 'from invenio_app_ils.records_relations.api import RecordRelationsParentChild, RecordRelationsSiblings\n'), ((6425, 6476), 'invenio_app_ils.records.api.IlsRecord.get_record_by_pid', 'IlsRecord.get_record_by_pid', (['pid'], {'pid_type': 'pid_type'}), '(pid, pid_type=pid_type)\n', (6452, 6476), False, 'from invenio_app_ils.records.api import IlsRecord\n'), ((6491, 6516), 'invenio_app_ils.records_relations.api.RecordRelationsSiblings', 'RecordRelationsSiblings', ([], {}), '()\n', (6514, 6516), False, 'from invenio_app_ils.records_relations.api import RecordRelationsParentChild, RecordRelationsSiblings\n'), ((7388, 7439), 'invenio_app_ils.records.api.IlsRecord.get_record_by_pid', 'IlsRecord.get_record_by_pid', (['pid'], {'pid_type': 'pid_type'}), '(pid, pid_type=pid_type)\n', (7415, 7439), False, 'from invenio_app_ils.records.api import IlsRecord\n'), ((7454, 7479), 'invenio_app_ils.records_relations.api.RecordRelationsSiblings', 'RecordRelationsSiblings', ([], {}), '()\n', (7477, 7479), False, 'from invenio_app_ils.records_relations.api import RecordRelationsParentChild, RecordRelationsSiblings\n'), ((9056, 9074), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (9072, 9074), False, 'from flask import Blueprint, abort, current_app, request\n'), ((10813, 10831), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (10829, 10831), False, 'from flask import Blueprint, abort, current_app, request\n'), ((1573, 1599), 'invenio_records_rest.utils.obj_or_import_string', 'obj_or_import_string', (['func'], {}), '(func)\n', (1593, 1599), False, 'from invenio_records_rest.utils import obj_or_import_string\n'), ((2638, 2689), 'invenio_app_ils.records.api.IlsRecord.get_record_by_pid', 'IlsRecord.get_record_by_pid', (['pid'], {'pid_type': 'pid_type'}), '(pid, pid_type=pid_type)\n', (2665, 2689), False, 'from invenio_app_ils.records.api import IlsRecord\n'), ((8020, 8064), 'invenio_app_ils.relations.api.Relation.get_relation_by_name', 'Relation.get_relation_by_name', (['relation_type'], {}), '(relation_type)\n', (8049, 8064), False, 'from invenio_app_ils.relations.api import Relation\n'), ((8633, 8652), 'invenio_db.db.session.commit', 'db.session.commit', ([], {}), '()\n', (8650, 8652), False, 'from invenio_db import db\n'), ((9777, 9821), 'invenio_app_ils.relations.api.Relation.get_relation_by_name', 'Relation.get_relation_by_name', (['relation_type'], {}), '(relation_type)\n', (9806, 9821), False, 'from invenio_app_ils.relations.api import Relation\n'), ((10390, 10409), 'invenio_db.db.session.commit', 'db.session.commit', ([], {}), '()\n', (10407, 10409), False, 'from invenio_db import db\n'), ((9278, 9301), 'invenio_app_ils.records_relations.indexer.RecordRelationIndexer', 'RecordRelationIndexer', ([], {}), '()\n', (9299, 9301), False, 'from invenio_app_ils.records_relations.indexer import RecordRelationIndexer\n'), ((11035, 11058), 'invenio_app_ils.records_relations.indexer.RecordRelationIndexer', 'RecordRelationIndexer', ([], {}), '()\n', (11056, 11058), False, 'from invenio_app_ils.records_relations.indexer import RecordRelationIndexer\n')]
|
from tkinter import *
from tkinter import ttk
def DECABIT_FRAME(master=None):
s = ttk.Style(master)
s.theme_use('awdark')
|
[
"tkinter.ttk.Style"
] |
[((90, 107), 'tkinter.ttk.Style', 'ttk.Style', (['master'], {}), '(master)\n', (99, 107), False, 'from tkinter import ttk\n')]
|
import bpy
from bpy.props import (StringProperty,
BoolProperty,
CollectionProperty,
IntProperty,
FloatProperty,
PointerProperty
)
from .shared_operators import UITools
from ...library.source1.mdl.structs.flex import FlexController
def update_max_min(self: 'SourceIO_PG_FlexController', _):
if self.stereo:
if self['valuen_left'] >= self.value_max:
self['valuen_left'] = self.value_max
if self['valuen_left'] <= self.value_min:
self['valuen_left'] = self.value_min
if self['valuen_right'] >= self.value_max:
self['valuen_right'] = self.value_max
if self['valuen_right'] <= self.value_min:
self['valuen_right'] = self.value_min
else:
if self['valuen'] >= self.value_max:
self['valuen'] = self.value_max
if self['valuen'] <= self.value_min:
self['valuen'] = self.value_min
# noinspection PyPep8Naming
class SourceIO_PG_FlexController(bpy.types.PropertyGroup):
name: StringProperty()
stereo: BoolProperty(name="stereo")
value_max: FloatProperty(name='max')
value_min: FloatProperty(name='min')
mode: IntProperty(name='mode')
valuen: FloatProperty(name="value", min=-100.0, max=100.0, update=update_max_min)
valuezo: FloatProperty(name="value", min=0.0, max=1.0)
valuenoo: FloatProperty(name="value", min=-1.0, max=1.0)
valuenoz: FloatProperty(name="value", min=-1.0, max=0.0)
valuen_left: FloatProperty(name="value_left", min=-100.0, max=100.0, update=update_max_min)
valuezo_left: FloatProperty(name="value_left", min=0.0, max=1.0)
valuenoo_left: FloatProperty(name="value_left", min=-1.0, max=1.0)
valuenoz_left: FloatProperty(name="value_left", min=-1.0, max=0.0)
valuen_right: FloatProperty(name="value_right", min=-100.0, max=100.0, update=update_max_min)
valuezo_right: FloatProperty(name="value_right", min=0.0, max=1.0)
valuenoo_right: FloatProperty(name="value_right", min=-1.0, max=1.0)
valuenoz_right: FloatProperty(name="value_right", min=-1.0, max=0.0)
def set_from_controller(self, controller: FlexController):
self.value_min = controller.min
self.value_max = controller.max
if controller.min == 0.0 and controller.max == 1.0:
self.mode = 1
elif controller.min == -1.0 and controller.max == 1.0:
self.mode = 2
elif controller.min == -1.0 and controller.max == 0:
self.mode = 3
else:
self.mode = 0
def get_slot_name(self):
if self.mode == 0:
return 'valuen'
elif self.mode == 1:
return 'valuezo'
elif self.mode == 2:
return 'valuenoo'
elif self.mode == 3:
return 'valuenoz'
@property
def value(self):
if self.mode == 0:
return self.valuen
elif self.mode == 1:
return self.valuezo
elif self.mode == 2:
return self.valuenoo
elif self.mode == 3:
return self.valuenoz
@value.setter
def value(self, new_value):
if self.mode == 0:
self.valuen = new_value
elif self.mode == 1:
self.valuezo = new_value
elif self.mode == 2:
self.valuenoo = new_value
elif self.mode == 3:
self.valuenoz = new_value
@property
def value_right(self):
if self.mode == 0:
return self.valuen_right
elif self.mode == 1:
return self.valuezo_right
elif self.mode == 2:
return self.valuenoo_right
elif self.mode == 3:
return self.valuenoz_right
@value_right.setter
def value_right(self, new_value):
if self.mode == 0:
self.valuen_right = new_value
elif self.mode == 1:
self.valuezo_right = new_value
elif self.mode == 2:
self.valuenoo_right = new_value
elif self.mode == 3:
self.valuenoz_right = new_value
@property
def value_left(self):
if self.mode == 0:
return self.valuen_left
elif self.mode == 1:
return self.valuezo_left
elif self.mode == 2:
return self.valuenoo_left
elif self.mode == 3:
return self.valuenoz_left
@value_left.setter
def value_left(self, new_value):
if self.mode == 0:
self.valuen_left = new_value
elif self.mode == 1:
self.valuezo_left = new_value
elif self.mode == 2:
self.valuenoo_left = new_value
elif self.mode == 3:
self.valuenoz_left = new_value
def draw_item(self, layout, icon):
split = layout.split(factor=0.3, align=True)
split.label(text=self.name, icon_value=icon)
# layout.prop(controller_entry, "name", text="", emboss=False, icon_value=icon)
if self.stereo:
row = split.row()
if self.mode == 0:
row.prop(self, 'valuen_left', text='', slider=True)
row.prop(self, 'valuen_right', text='', slider=True)
elif self.mode == 1:
row.prop(self, 'valuezo_left', text='', slider=True)
row.prop(self, 'valuezo_right', text='', slider=True)
elif self.mode == 2:
row.prop(self, 'valuenoo_left', text='', slider=True)
row.prop(self, 'valuenoo_right', text='', slider=True)
elif self.mode == 3:
row.prop(self, 'valuenoz_left', text='', slider=True)
row.prop(self, 'valuenoz_right', text='', slider=True)
else:
if self.mode == 0:
split.prop(self, 'valuen', text='', slider=True)
elif self.mode == 1:
split.prop(self, 'valuezo', text='', slider=True)
elif self.mode == 2:
split.prop(self, 'valuenoo', text='', slider=True)
elif self.mode == 3:
split.prop(self, 'valuenoz', text='', slider=True)
class SOURCEIO_PT_FlexControlPanel(UITools, bpy.types.Panel):
bl_label = 'Flex controllers'
bl_idname = 'sourceio.flex_control_panel'
bl_parent_id = "sourceio.utils"
@classmethod
def poll(cls, context):
obj = context.active_object # type:bpy.types.Object
return obj and obj.data.flex_controllers is not None
def draw(self, context):
obj = context.active_object # type:bpy.types.Object
self.layout.template_list("SourceIO_UL_FlexControllerList", "",
obj.data, "flex_controllers",
obj.data, "flex_selected_index")
class SourceIO_UL_FlexControllerList(bpy.types.UIList):
def draw_item(self, context, layout, data, item, icon, active_data, active_propname):
operator = data
controller_entry: SourceIO_PG_FlexController = item
layout.use_property_decorate = True
if self.layout_type in {'DEFAULT', 'COMPACT'}:
controller_entry.draw_item(layout, icon)
elif self.layout_type in {'GRID'}:
layout.alignment = 'CENTER'
layout.label(text="", icon_value=icon)
classes = (
SourceIO_PG_FlexController,
SourceIO_UL_FlexControllerList,
SOURCEIO_PT_FlexControlPanel,
)
|
[
"bpy.props.BoolProperty",
"bpy.props.FloatProperty",
"bpy.props.StringProperty",
"bpy.props.IntProperty"
] |
[((1139, 1155), 'bpy.props.StringProperty', 'StringProperty', ([], {}), '()\n', (1153, 1155), False, 'from bpy.props import StringProperty, BoolProperty, CollectionProperty, IntProperty, FloatProperty, PointerProperty\n'), ((1168, 1195), 'bpy.props.BoolProperty', 'BoolProperty', ([], {'name': '"""stereo"""'}), "(name='stereo')\n", (1180, 1195), False, 'from bpy.props import StringProperty, BoolProperty, CollectionProperty, IntProperty, FloatProperty, PointerProperty\n'), ((1212, 1237), 'bpy.props.FloatProperty', 'FloatProperty', ([], {'name': '"""max"""'}), "(name='max')\n", (1225, 1237), False, 'from bpy.props import StringProperty, BoolProperty, CollectionProperty, IntProperty, FloatProperty, PointerProperty\n'), ((1253, 1278), 'bpy.props.FloatProperty', 'FloatProperty', ([], {'name': '"""min"""'}), "(name='min')\n", (1266, 1278), False, 'from bpy.props import StringProperty, BoolProperty, CollectionProperty, IntProperty, FloatProperty, PointerProperty\n'), ((1290, 1314), 'bpy.props.IntProperty', 'IntProperty', ([], {'name': '"""mode"""'}), "(name='mode')\n", (1301, 1314), False, 'from bpy.props import StringProperty, BoolProperty, CollectionProperty, IntProperty, FloatProperty, PointerProperty\n'), ((1328, 1401), 'bpy.props.FloatProperty', 'FloatProperty', ([], {'name': '"""value"""', 'min': '(-100.0)', 'max': '(100.0)', 'update': 'update_max_min'}), "(name='value', min=-100.0, max=100.0, update=update_max_min)\n", (1341, 1401), False, 'from bpy.props import StringProperty, BoolProperty, CollectionProperty, IntProperty, FloatProperty, PointerProperty\n'), ((1415, 1460), 'bpy.props.FloatProperty', 'FloatProperty', ([], {'name': '"""value"""', 'min': '(0.0)', 'max': '(1.0)'}), "(name='value', min=0.0, max=1.0)\n", (1428, 1460), False, 'from bpy.props import StringProperty, BoolProperty, CollectionProperty, IntProperty, FloatProperty, PointerProperty\n'), ((1475, 1521), 'bpy.props.FloatProperty', 'FloatProperty', ([], {'name': '"""value"""', 'min': '(-1.0)', 'max': '(1.0)'}), "(name='value', min=-1.0, max=1.0)\n", (1488, 1521), False, 'from bpy.props import StringProperty, BoolProperty, CollectionProperty, IntProperty, FloatProperty, PointerProperty\n'), ((1536, 1582), 'bpy.props.FloatProperty', 'FloatProperty', ([], {'name': '"""value"""', 'min': '(-1.0)', 'max': '(0.0)'}), "(name='value', min=-1.0, max=0.0)\n", (1549, 1582), False, 'from bpy.props import StringProperty, BoolProperty, CollectionProperty, IntProperty, FloatProperty, PointerProperty\n'), ((1601, 1679), 'bpy.props.FloatProperty', 'FloatProperty', ([], {'name': '"""value_left"""', 'min': '(-100.0)', 'max': '(100.0)', 'update': 'update_max_min'}), "(name='value_left', min=-100.0, max=100.0, update=update_max_min)\n", (1614, 1679), False, 'from bpy.props import StringProperty, BoolProperty, CollectionProperty, IntProperty, FloatProperty, PointerProperty\n'), ((1698, 1748), 'bpy.props.FloatProperty', 'FloatProperty', ([], {'name': '"""value_left"""', 'min': '(0.0)', 'max': '(1.0)'}), "(name='value_left', min=0.0, max=1.0)\n", (1711, 1748), False, 'from bpy.props import StringProperty, BoolProperty, CollectionProperty, IntProperty, FloatProperty, PointerProperty\n'), ((1768, 1819), 'bpy.props.FloatProperty', 'FloatProperty', ([], {'name': '"""value_left"""', 'min': '(-1.0)', 'max': '(1.0)'}), "(name='value_left', min=-1.0, max=1.0)\n", (1781, 1819), False, 'from bpy.props import StringProperty, BoolProperty, CollectionProperty, IntProperty, FloatProperty, PointerProperty\n'), ((1839, 1890), 'bpy.props.FloatProperty', 'FloatProperty', ([], {'name': '"""value_left"""', 'min': '(-1.0)', 'max': '(0.0)'}), "(name='value_left', min=-1.0, max=0.0)\n", (1852, 1890), False, 'from bpy.props import StringProperty, BoolProperty, CollectionProperty, IntProperty, FloatProperty, PointerProperty\n'), ((1910, 1989), 'bpy.props.FloatProperty', 'FloatProperty', ([], {'name': '"""value_right"""', 'min': '(-100.0)', 'max': '(100.0)', 'update': 'update_max_min'}), "(name='value_right', min=-100.0, max=100.0, update=update_max_min)\n", (1923, 1989), False, 'from bpy.props import StringProperty, BoolProperty, CollectionProperty, IntProperty, FloatProperty, PointerProperty\n'), ((2009, 2060), 'bpy.props.FloatProperty', 'FloatProperty', ([], {'name': '"""value_right"""', 'min': '(0.0)', 'max': '(1.0)'}), "(name='value_right', min=0.0, max=1.0)\n", (2022, 2060), False, 'from bpy.props import StringProperty, BoolProperty, CollectionProperty, IntProperty, FloatProperty, PointerProperty\n'), ((2081, 2133), 'bpy.props.FloatProperty', 'FloatProperty', ([], {'name': '"""value_right"""', 'min': '(-1.0)', 'max': '(1.0)'}), "(name='value_right', min=-1.0, max=1.0)\n", (2094, 2133), False, 'from bpy.props import StringProperty, BoolProperty, CollectionProperty, IntProperty, FloatProperty, PointerProperty\n'), ((2154, 2206), 'bpy.props.FloatProperty', 'FloatProperty', ([], {'name': '"""value_right"""', 'min': '(-1.0)', 'max': '(0.0)'}), "(name='value_right', min=-1.0, max=0.0)\n", (2167, 2206), False, 'from bpy.props import StringProperty, BoolProperty, CollectionProperty, IntProperty, FloatProperty, PointerProperty\n')]
|
import time
import lib.getconfig
import logging.handlers
log_file = lib.getconfig.getparam('daemon', 'log_file')
backupcount = int(lib.getconfig.getparam('daemon', 'log_rotate_seconds'))
seconds = int(lib.getconfig.getparam('daemon', 'log_rotate_backups'))
log = logging.handlers.TimedRotatingFileHandler(log_file, 's', seconds, backupCount=backupcount)
log.setLevel(logging.INFO)
logger = logging.getLogger('main')
logger.addHandler(log)
logger.setLevel(logging.INFO)
logger.propagate = False
def print_message(message):
mssg = str(time.strftime("[%F %H %M:%S] ")) + message
logger.info(mssg)
|
[
"time.strftime"
] |
[((542, 573), 'time.strftime', 'time.strftime', (['"""[%F %H %M:%S] """'], {}), "('[%F %H %M:%S] ')\n", (555, 573), False, 'import time\n')]
|
import numpy as np
import tensorflow as tf
from ops import instance_norm, conv2d, deconv2d, lrelu
######################################################################
def generator_multiunet(image, gf_dim, reuse=False, name="generator", output_c_dim=-1, istraining=True):
if istraining:
dropout_rate = 0.5
else:
dropout_rate = 1.0
with tf.variable_scope(name):
# image is 256 x 256 x input_c_dim
if reuse:
tf.get_variable_scope().reuse_variables()
else:
assert tf.get_variable_scope().reuse is False
# image is (256 x 256 x input_c_dim)
e1 = instance_norm(conv2d(image, gf_dim, name='g_e1_conv'), 'g_bn_e1')
# e1 is (128 x 128 x self.gf_dim)
e2 = instance_norm(conv2d(lrelu(e1), gf_dim * 2, name='g_e2_conv'), 'g_bn_e2')
# e2 is (64 x 64 x self.gf_dim*2)
e3 = instance_norm(conv2d(lrelu(e2), gf_dim * 4, name='g_e3_conv'), 'g_bn_e3')
# e3 is (32 x 32 x self.gf_dim*4)
e4 = instance_norm(conv2d(lrelu(e3), gf_dim * 8, name='g_e4_conv'), 'g_bn_e4')
# e4 is (16 x 16 x self.gf_dim*8)
e5 = instance_norm(conv2d(lrelu(e4), gf_dim * 8, name='g_e5_conv'), 'g_bn_e5')
# e5 is (8 x 8 x self.gf_dim*8)
e6 = instance_norm(conv2d(lrelu(e5), gf_dim * 8, name='g_e6_conv'), 'g_bn_e6')
# e6 is (4 x 4 x self.gf_dim*8)
e7 = instance_norm(conv2d(lrelu(e6), gf_dim * 8, ks=3, s=1, padding='VALID', name='g_e7_conv'), 'g_bn_e7')
# e7 is (2 x 2 x self.gf_dim*8)
e8 = instance_norm(conv2d(lrelu(e7), gf_dim * 16, ks=2, s=1, padding='VALID', name='g_e8_conv'), 'g_bn_e8')
# e8 is (1 x 1 x self.gf_dim*8)
d1 = deconv2d(tf.nn.relu(e8), gf_dim * 8, ks=2, s=1, padding='VALID', name='g_d1')
d1 = tf.nn.dropout(d1, dropout_rate)
d1 = tf.concat([instance_norm(d1, 'g_bn_d1'), e7], 3)
# d1 is (2 x 2 x self.gf_dim*8*2)
d2 = deconv2d(tf.nn.relu(d1), gf_dim * 8, ks=3, s=1, padding='VALID', name='g_d2')
d2 = tf.nn.dropout(d2, dropout_rate)
d2 = tf.concat([instance_norm(d2, 'g_bn_d2'), e6], 3)
# d2 is (4 x 4 x self.gf_dim*8*2)
d3 = deconv2d(tf.nn.relu(d2), gf_dim * 8, name='g_d3')
d3 = tf.nn.dropout(d3, dropout_rate)
d3 = tf.concat([instance_norm(d3, 'g_bn_d3'), e5], 3)
# d3 is (8 x 8 x self.gf_dim*8*2)
d4 = deconv2d(tf.nn.relu(d3), gf_dim * 8, name='g_d4')
d4 = tf.concat([instance_norm(d4, 'g_bn_d4'), e4], 3)
# d4 is (16 x 16 x self.gf_dim*8*2)
d5 = deconv2d(tf.nn.relu(d4), gf_dim * 4, name='g_d5')
d5 = tf.concat([instance_norm(d5, 'g_bn_d5'), e3], 3)
# d5 is (32 x 32 x self.gf_dim*4*2)
d6 = deconv2d(tf.nn.relu(d5), gf_dim * 2, name='g_d6')
d6 = tf.concat([instance_norm(d6, 'g_bn_d6'), e2], 3)
# d6 is (64 x 64 x self.gf_dim*2*2)
d7 = deconv2d(tf.nn.relu(d6), gf_dim, name='g_d7')
d7 = tf.concat([instance_norm(d7, 'g_bn_d7'), e1], 3)
# d7 is (128 x 128 x self.gf_dim*1*2)
d6_pre = deconv2d(tf.nn.relu(d5), output_c_dim, name='g_d6_pre')
# d6_pre is (64 x 64 x output_c_dim)
d7_pre = deconv2d(tf.nn.relu(d6), output_c_dim, name='g_d7_pre')
# d7_pre is (128 x 128 x output_c_dim)
d8_pre = deconv2d(tf.nn.relu(d7), output_c_dim, name='g_d8_pre')
# d8_pre is (256 x 256 x output_c_dim)
return tf.nn.tanh(d8_pre), tf.nn.tanh(d7_pre), tf.nn.tanh(d6_pre)
|
[
"tensorflow.nn.relu",
"tensorflow.nn.tanh",
"ops.lrelu",
"tensorflow.get_variable_scope",
"tensorflow.variable_scope",
"ops.conv2d",
"ops.instance_norm",
"tensorflow.nn.dropout"
] |
[((376, 399), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (393, 399), True, 'import tensorflow as tf\n'), ((1830, 1861), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['d1', 'dropout_rate'], {}), '(d1, dropout_rate)\n', (1843, 1861), True, 'import tensorflow as tf\n'), ((2071, 2102), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['d2', 'dropout_rate'], {}), '(d2, dropout_rate)\n', (2084, 2102), True, 'import tensorflow as tf\n'), ((2284, 2315), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['d3', 'dropout_rate'], {}), '(d3, dropout_rate)\n', (2297, 2315), True, 'import tensorflow as tf\n'), ((661, 700), 'ops.conv2d', 'conv2d', (['image', 'gf_dim'], {'name': '"""g_e1_conv"""'}), "(image, gf_dim, name='g_e1_conv')\n", (667, 700), False, 'from ops import instance_norm, conv2d, deconv2d, lrelu\n'), ((1748, 1762), 'tensorflow.nn.relu', 'tf.nn.relu', (['e8'], {}), '(e8)\n', (1758, 1762), True, 'import tensorflow as tf\n'), ((1989, 2003), 'tensorflow.nn.relu', 'tf.nn.relu', (['d1'], {}), '(d1)\n', (1999, 2003), True, 'import tensorflow as tf\n'), ((2230, 2244), 'tensorflow.nn.relu', 'tf.nn.relu', (['d2'], {}), '(d2)\n', (2240, 2244), True, 'import tensorflow as tf\n'), ((2443, 2457), 'tensorflow.nn.relu', 'tf.nn.relu', (['d3'], {}), '(d3)\n', (2453, 2457), True, 'import tensorflow as tf\n'), ((2613, 2627), 'tensorflow.nn.relu', 'tf.nn.relu', (['d4'], {}), '(d4)\n', (2623, 2627), True, 'import tensorflow as tf\n'), ((2791, 2805), 'tensorflow.nn.relu', 'tf.nn.relu', (['d5'], {}), '(d5)\n', (2801, 2805), True, 'import tensorflow as tf\n'), ((2961, 2975), 'tensorflow.nn.relu', 'tf.nn.relu', (['d6'], {}), '(d6)\n', (2971, 2975), True, 'import tensorflow as tf\n'), ((3141, 3155), 'tensorflow.nn.relu', 'tf.nn.relu', (['d5'], {}), '(d5)\n', (3151, 3155), True, 'import tensorflow as tf\n'), ((3268, 3282), 'tensorflow.nn.relu', 'tf.nn.relu', (['d6'], {}), '(d6)\n', (3278, 3282), True, 'import tensorflow as tf\n'), ((3397, 3411), 'tensorflow.nn.relu', 'tf.nn.relu', (['d7'], {}), '(d7)\n', (3407, 3411), True, 'import tensorflow as tf\n'), ((3515, 3533), 'tensorflow.nn.tanh', 'tf.nn.tanh', (['d8_pre'], {}), '(d8_pre)\n', (3525, 3533), True, 'import tensorflow as tf\n'), ((3535, 3553), 'tensorflow.nn.tanh', 'tf.nn.tanh', (['d7_pre'], {}), '(d7_pre)\n', (3545, 3553), True, 'import tensorflow as tf\n'), ((3555, 3573), 'tensorflow.nn.tanh', 'tf.nn.tanh', (['d6_pre'], {}), '(d6_pre)\n', (3565, 3573), True, 'import tensorflow as tf\n'), ((789, 798), 'ops.lrelu', 'lrelu', (['e1'], {}), '(e1)\n', (794, 798), False, 'from ops import instance_norm, conv2d, deconv2d, lrelu\n'), ((918, 927), 'ops.lrelu', 'lrelu', (['e2'], {}), '(e2)\n', (923, 927), False, 'from ops import instance_norm, conv2d, deconv2d, lrelu\n'), ((1047, 1056), 'ops.lrelu', 'lrelu', (['e3'], {}), '(e3)\n', (1052, 1056), False, 'from ops import instance_norm, conv2d, deconv2d, lrelu\n'), ((1176, 1185), 'ops.lrelu', 'lrelu', (['e4'], {}), '(e4)\n', (1181, 1185), False, 'from ops import instance_norm, conv2d, deconv2d, lrelu\n'), ((1303, 1312), 'ops.lrelu', 'lrelu', (['e5'], {}), '(e5)\n', (1308, 1312), False, 'from ops import instance_norm, conv2d, deconv2d, lrelu\n'), ((1439, 1448), 'ops.lrelu', 'lrelu', (['e6'], {}), '(e6)\n', (1444, 1448), False, 'from ops import instance_norm, conv2d, deconv2d, lrelu\n'), ((1603, 1612), 'ops.lrelu', 'lrelu', (['e7'], {}), '(e7)\n', (1608, 1612), False, 'from ops import instance_norm, conv2d, deconv2d, lrelu\n'), ((1886, 1914), 'ops.instance_norm', 'instance_norm', (['d1', '"""g_bn_d1"""'], {}), "(d1, 'g_bn_d1')\n", (1899, 1914), False, 'from ops import instance_norm, conv2d, deconv2d, lrelu\n'), ((2127, 2155), 'ops.instance_norm', 'instance_norm', (['d2', '"""g_bn_d2"""'], {}), "(d2, 'g_bn_d2')\n", (2140, 2155), False, 'from ops import instance_norm, conv2d, deconv2d, lrelu\n'), ((2340, 2368), 'ops.instance_norm', 'instance_norm', (['d3', '"""g_bn_d3"""'], {}), "(d3, 'g_bn_d3')\n", (2353, 2368), False, 'from ops import instance_norm, conv2d, deconv2d, lrelu\n'), ((2508, 2536), 'ops.instance_norm', 'instance_norm', (['d4', '"""g_bn_d4"""'], {}), "(d4, 'g_bn_d4')\n", (2521, 2536), False, 'from ops import instance_norm, conv2d, deconv2d, lrelu\n'), ((2678, 2706), 'ops.instance_norm', 'instance_norm', (['d5', '"""g_bn_d5"""'], {}), "(d5, 'g_bn_d5')\n", (2691, 2706), False, 'from ops import instance_norm, conv2d, deconv2d, lrelu\n'), ((2856, 2884), 'ops.instance_norm', 'instance_norm', (['d6', '"""g_bn_d6"""'], {}), "(d6, 'g_bn_d6')\n", (2869, 2884), False, 'from ops import instance_norm, conv2d, deconv2d, lrelu\n'), ((3022, 3050), 'ops.instance_norm', 'instance_norm', (['d7', '"""g_bn_d7"""'], {}), "(d7, 'g_bn_d7')\n", (3035, 3050), False, 'from ops import instance_norm, conv2d, deconv2d, lrelu\n'), ((474, 497), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (495, 497), True, 'import tensorflow as tf\n'), ((549, 572), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (570, 572), True, 'import tensorflow as tf\n')]
|
# Program 19d: Generalized synchronization.
# See Figure 19.8(a).
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
# Constants
mu = 5.7
sigma = 16
b = 4
r = 45.92
g = 8 # When g=4, there is no synchronization.
tmax = 100
t = np.arange(0.0, tmax, 0.1)
def rossler_lorenz_odes(X,t):
x1, x2, x3, y1, y2, y3, z1, z2, z3 = X
dx1 = -(x2 + x3)
dx2 = x1 + 0.2*x2
dx3 = 0.2 + x3 * (x1 - mu)
dy1 = sigma * (y2 - y1) - g * (y1 - x1)
dy2 = -y1 * y3 + r*y1 - y2
dy3 = y1 * y2 - b*y3
dz1 = sigma * (z2 - z1) - g * (z1 - x1)
dz2 = -z1*z3 + r*z1 - z2
dz3 = z1*z2 - b*z3
return (dx1, dx2, dx3, dy1, dy2, dy3, dz1, dz2, dz3)
y0 = [2, -10, 44, 30, 10, 20, 31, 11, 22]
X = odeint(rossler_lorenz_odes, y0, t, rtol=1e-6)
x1, x2, x3, y1, y2, y3, x1, z2, z3 = X.T # unpack columns
plt.figure(1)
# Delete first 500 iterates.
plt.plot(y2[500:len(y2)], z2[500:len(z2)])
plt.xlabel(r'$y_2$', fontsize=15)
plt.ylabel(r'$z_2$', fontsize=15)
plt.show()
|
[
"matplotlib.pyplot.show",
"scipy.integrate.odeint",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((266, 291), 'numpy.arange', 'np.arange', (['(0.0)', 'tmax', '(0.1)'], {}), '(0.0, tmax, 0.1)\n', (275, 291), True, 'import numpy as np\n'), ((740, 786), 'scipy.integrate.odeint', 'odeint', (['rossler_lorenz_odes', 'y0', 't'], {'rtol': '(1e-06)'}), '(rossler_lorenz_odes, y0, t, rtol=1e-06)\n', (746, 786), False, 'from scipy.integrate import odeint\n'), ((846, 859), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (856, 859), True, 'import matplotlib.pyplot as plt\n'), ((932, 964), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$y_2$"""'], {'fontsize': '(15)'}), "('$y_2$', fontsize=15)\n", (942, 964), True, 'import matplotlib.pyplot as plt\n'), ((966, 998), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$z_2$"""'], {'fontsize': '(15)'}), "('$z_2$', fontsize=15)\n", (976, 998), True, 'import matplotlib.pyplot as plt\n'), ((1000, 1010), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1008, 1010), True, 'import matplotlib.pyplot as plt\n')]
|
from unittest.mock import patch
import boto3
from moto import mock_s3
from urlpath import URL
from deafrica.monitoring import s2_gap_report
from deafrica.monitoring.s2_gap_report import (
get_and_filter_cogs_keys,
generate_buckets_diff,
)
from deafrica.tests.conftest import (
COGS_REGION,
INVENTORY_BUCKET_NAME,
INVENTORY_MANIFEST_FILE,
INVENTORY_DATA_FILE,
INVENTORY_FOLDER,
INVENTORY_BUCKET_SOURCE_NAME,
REGION,
REPORT_FOLDER,
TEST_DATA_DIR,
)
DATA_FOLDER = "sentinel_2"
INVENTORY_MANIFEST_FILE = TEST_DATA_DIR / DATA_FOLDER / INVENTORY_MANIFEST_FILE
INVENTORY_DATA_FILE = TEST_DATA_DIR / DATA_FOLDER / INVENTORY_DATA_FILE
@mock_s3
def test_get_and_filter_cogs_keys(
s3_inventory_data_file: URL,
s3_inventory_manifest_file: URL,
):
s3_client = boto3.client("s3", region_name=COGS_REGION)
s3_client.create_bucket(
Bucket=INVENTORY_BUCKET_NAME,
CreateBucketConfiguration={
"LocationConstraint": COGS_REGION,
},
)
# Upload inventory manifest
s3_client.upload_file(
str(INVENTORY_MANIFEST_FILE),
INVENTORY_BUCKET_NAME,
str(s3_inventory_manifest_file),
)
# Upload inventory data
s3_client.upload_file(
str(INVENTORY_DATA_FILE),
INVENTORY_BUCKET_NAME,
str(s3_inventory_data_file),
)
print(list(boto3.resource("s3").Bucket("test-inventory-bucket").objects.all()))
s3_inventory_path = URL(
f"s3://{INVENTORY_BUCKET_NAME}/{INVENTORY_FOLDER}/{INVENTORY_BUCKET_NAME}/"
)
with patch.object(
s2_gap_report, "SOURCE_INVENTORY_PATH", str(s3_inventory_path)
), patch.object(s2_gap_report, "BASE_FOLDER_NAME", str(INVENTORY_FOLDER)):
scenes_list = get_and_filter_cogs_keys()
assert len(scenes_list) == 6
@mock_s3
def test_generate_buckets_diff(
s3_inventory_data_file: URL,
s3_inventory_manifest_file: URL,
):
s3_client_cogs = boto3.client("s3", region_name=COGS_REGION)
s3_client_cogs.create_bucket(
Bucket=INVENTORY_BUCKET_SOURCE_NAME,
CreateBucketConfiguration={
"LocationConstraint": COGS_REGION,
},
)
# Upload inventory manifest
s3_client_cogs.upload_file(
str(INVENTORY_MANIFEST_FILE),
INVENTORY_BUCKET_SOURCE_NAME,
str(s3_inventory_manifest_file),
)
# Upload inventory data
s3_client_cogs.upload_file(
str(INVENTORY_DATA_FILE),
INVENTORY_BUCKET_SOURCE_NAME,
str(s3_inventory_data_file),
)
print(list(boto3.resource("s3").Bucket("test-cogs-inventory-bucket").objects.all()))
s3_client = boto3.client("s3", region_name=REGION)
s3_client.create_bucket(
Bucket=INVENTORY_BUCKET_NAME,
CreateBucketConfiguration={
"LocationConstraint": REGION,
},
)
# Upload inventory manifest
s3_client.upload_file(
str(INVENTORY_MANIFEST_FILE),
INVENTORY_BUCKET_NAME,
str(s3_inventory_manifest_file),
)
# Upload inventory data
s3_client.upload_file(
str(INVENTORY_DATA_FILE),
INVENTORY_BUCKET_NAME,
str(s3_inventory_data_file),
)
print(list(boto3.resource("s3").Bucket("test-inventory-bucket").objects.all()))
s3_inventory_path = URL(
f"s3://{INVENTORY_BUCKET_NAME}/{INVENTORY_FOLDER}/{INVENTORY_BUCKET_NAME}/"
)
s3_cogs_inventory_path = URL(
f"s3://{INVENTORY_BUCKET_SOURCE_NAME}/{INVENTORY_FOLDER}/{INVENTORY_BUCKET_NAME}/"
)
with patch.object(
s2_gap_report, "SOURCE_INVENTORY_PATH", str(s3_cogs_inventory_path)
), patch.object(
s2_gap_report, "SENTINEL_2_INVENTORY_PATH", str(s3_inventory_path)
), patch.object(
s2_gap_report, "BASE_FOLDER_NAME", str(INVENTORY_FOLDER)
):
# No differences
generate_buckets_diff(bucket_name=INVENTORY_BUCKET_NAME)
assert (
len(
s3_client.list_objects_v2(
Bucket=INVENTORY_BUCKET_NAME, Prefix=REPORT_FOLDER
).get("Contents", [])
)
== 0
)
|
[
"boto3.client",
"urlpath.URL",
"deafrica.monitoring.s2_gap_report.generate_buckets_diff",
"deafrica.monitoring.s2_gap_report.get_and_filter_cogs_keys",
"boto3.resource"
] |
[((808, 851), 'boto3.client', 'boto3.client', (['"""s3"""'], {'region_name': 'COGS_REGION'}), "('s3', region_name=COGS_REGION)\n", (820, 851), False, 'import boto3\n'), ((1469, 1554), 'urlpath.URL', 'URL', (['f"""s3://{INVENTORY_BUCKET_NAME}/{INVENTORY_FOLDER}/{INVENTORY_BUCKET_NAME}/"""'], {}), "(f's3://{INVENTORY_BUCKET_NAME}/{INVENTORY_FOLDER}/{INVENTORY_BUCKET_NAME}/'\n )\n", (1472, 1554), False, 'from urlpath import URL\n'), ((1961, 2004), 'boto3.client', 'boto3.client', (['"""s3"""'], {'region_name': 'COGS_REGION'}), "('s3', region_name=COGS_REGION)\n", (1973, 2004), False, 'import boto3\n'), ((2655, 2693), 'boto3.client', 'boto3.client', (['"""s3"""'], {'region_name': 'REGION'}), "('s3', region_name=REGION)\n", (2667, 2693), False, 'import boto3\n'), ((3306, 3391), 'urlpath.URL', 'URL', (['f"""s3://{INVENTORY_BUCKET_NAME}/{INVENTORY_FOLDER}/{INVENTORY_BUCKET_NAME}/"""'], {}), "(f's3://{INVENTORY_BUCKET_NAME}/{INVENTORY_FOLDER}/{INVENTORY_BUCKET_NAME}/'\n )\n", (3309, 3391), False, 'from urlpath import URL\n'), ((3431, 3523), 'urlpath.URL', 'URL', (['f"""s3://{INVENTORY_BUCKET_SOURCE_NAME}/{INVENTORY_FOLDER}/{INVENTORY_BUCKET_NAME}/"""'], {}), "(f's3://{INVENTORY_BUCKET_SOURCE_NAME}/{INVENTORY_FOLDER}/{INVENTORY_BUCKET_NAME}/'\n )\n", (3434, 3523), False, 'from urlpath import URL\n'), ((1760, 1786), 'deafrica.monitoring.s2_gap_report.get_and_filter_cogs_keys', 'get_and_filter_cogs_keys', ([], {}), '()\n', (1784, 1786), False, 'from deafrica.monitoring.s2_gap_report import get_and_filter_cogs_keys, generate_buckets_diff\n'), ((3855, 3911), 'deafrica.monitoring.s2_gap_report.generate_buckets_diff', 'generate_buckets_diff', ([], {'bucket_name': 'INVENTORY_BUCKET_NAME'}), '(bucket_name=INVENTORY_BUCKET_NAME)\n', (3876, 3911), False, 'from deafrica.monitoring.s2_gap_report import get_and_filter_cogs_keys, generate_buckets_diff\n'), ((1375, 1395), 'boto3.resource', 'boto3.resource', (['"""s3"""'], {}), "('s3')\n", (1389, 1395), False, 'import boto3\n'), ((2564, 2584), 'boto3.resource', 'boto3.resource', (['"""s3"""'], {}), "('s3')\n", (2578, 2584), False, 'import boto3\n'), ((3212, 3232), 'boto3.resource', 'boto3.resource', (['"""s3"""'], {}), "('s3')\n", (3226, 3232), False, 'import boto3\n')]
|
from django.contrib import admin
from .models import Post, Thahood, UserProfile, Business
# Register your models here.
admin.site.register(Post)
admin.site.register(UserProfile)
admin.site.register(Thahood)
admin.site.register(Business)
|
[
"django.contrib.admin.site.register"
] |
[((120, 145), 'django.contrib.admin.site.register', 'admin.site.register', (['Post'], {}), '(Post)\n', (139, 145), False, 'from django.contrib import admin\n'), ((146, 178), 'django.contrib.admin.site.register', 'admin.site.register', (['UserProfile'], {}), '(UserProfile)\n', (165, 178), False, 'from django.contrib import admin\n'), ((179, 207), 'django.contrib.admin.site.register', 'admin.site.register', (['Thahood'], {}), '(Thahood)\n', (198, 207), False, 'from django.contrib import admin\n'), ((208, 237), 'django.contrib.admin.site.register', 'admin.site.register', (['Business'], {}), '(Business)\n', (227, 237), False, 'from django.contrib import admin\n')]
|
"""Implementation of a possibly bounded uniform experience replay manager."""
import random
from typing import List, Optional
from decuen.memories._memory import Memory
from decuen.structs import Trajectory, Transition
class UniformMemory(Memory):
"""Sized uniform memory mechanism, stores memories up to a maximum amount if specified."""
_transitions_cap: Optional[int]
_trajectories_cap: Optional[int]
def __init__(self, transition_replay_num: int = 1, trajectory_replay_num: int = 1,
transitions_cap: Optional[int] = None, trajectories_cap: Optional[int] = None) -> None:
"""Initialize a uniform memory mechanism."""
super().__init__([], [], transition_replay_num, trajectory_replay_num)
self._transitions_cap = transitions_cap
self._trajectories_cap = trajectories_cap
def store_transition(self, transition: Transition) -> None:
"""Store a transition in this memory mechanism's buffer with any needed associated information."""
self.transition = transition
if self._transitions_cap is not None and len(self._transition_buffer) == self._transitions_cap:
self._transition_buffer.pop(0)
self._transition_buffer.append(transition)
def _replay_transitions(self, num: int) -> List[Transition]:
return random.choices(self._transition_buffer, k=num)
def store_trajectory(self, trajectory: Trajectory) -> None:
"""Store a trajectory in this memory mechanism's buffer consisting of a sequence of transitions."""
self.trajectory = trajectory
if self._trajectories_cap is not None and len(self._trajectory_buffer) == self._trajectories_cap:
self._trajectory_buffer.pop(0)
self._trajectory_buffer.append(trajectory)
def _replay_trajectories(self, num: int) -> List[Trajectory]:
return random.choices(self._trajectory_buffer, k=num)
|
[
"random.choices"
] |
[((1332, 1378), 'random.choices', 'random.choices', (['self._transition_buffer'], {'k': 'num'}), '(self._transition_buffer, k=num)\n', (1346, 1378), False, 'import random\n'), ((1871, 1917), 'random.choices', 'random.choices', (['self._trajectory_buffer'], {'k': 'num'}), '(self._trajectory_buffer, k=num)\n', (1885, 1917), False, 'import random\n')]
|
import numpy as np
from tspdb.src.pindex.predict import get_prediction_range, get_prediction
from tspdb.src.pindex.pindex_managment import TSPI
from tspdb.src.pindex.pindex_utils import index_ts_mapper
import time
import timeit
import pandas as pd
from tspdb.src.hdf_util import read_data
from tspdb.src.tsUtils import randomlyHideValues
from scipy.stats import norm
from sklearn.metrics import r2_score
import tspdb
def r2_var(y,y_h,X):
average = np.mean(X**2) - np.mean(X)**2
return 1 - sum((y-y_h)**2)/sum((y-average)**2)
def create_table_data():
obs = np.arange(10**5).astype('float')
means = obs
var = np.zeros(obs.shape)
obs_9 = randomlyHideValues(np.array(obs), 0.9)[0]
obs_7 = randomlyHideValues(np.array(obs), 0.7)[0]
print(obs_9)
df = pd.DataFrame(data ={'ts':obs, 'means': means, 'ts_9':obs_9, 'ts_7' : obs_7,'var': var })
df.to_csv('testdata/tables/ts_basic_5.csv',index_label = 'time')
timestamps = pd.date_range('2012-10-01 00:00:00', periods = 10**5, freq='5s')
df.index = timestamps
df.to_csv('testdata/tables/ts_basic_ts_5_5.csv', index_label = 'time')
# real time series variance constant
data = read_data('testdata/MixtureTS2.h5')
obs = data['obs'][:]
means = data['means'][:]
var = np.ones(obs.shape)
obs_9 = randomlyHideValues(np.array(obs), 0.9)[0]
obs_7 = randomlyHideValues(np.array(obs), 0.7)[0]
df = pd.DataFrame(data ={'ts':obs, 'means': means, 'ts_9':obs_9, 'ts_7' : obs_7 ,'var': var })
df.index_label = 'time'
df.to_csv('testdata/tables/MixtureTS2.csv', index_label = 'time')
# real time series variance constant
data = read_data('testdata/MixtureTS.h5')
obs = data['obs'][:]
means = data['means'][:]
var = np.ones(obs.shape)
obs_9 = randomlyHideValues(np.array(obs), 0.9)[0]
obs_7 = randomlyHideValues(np.array(obs), 0.7)[0]
df = pd.DataFrame(data ={'ts':obs, 'means': means, 'ts_9':obs_9, 'ts_7' : obs_7,'var': var })
df.to_csv('testdata/tables/MixtureTS.csv', index_label = 'time')
# real time series varaince harmonics
data = read_data('testdata/MixtureTS_var.h5')
obs = data['obs'][:]
means = data['means'][:]
var = data['var'][:]
obs_9 = randomlyHideValues(np.array(obs), 0.9)[0]
obs_7 = randomlyHideValues(np.array(obs), 0.7)[0]
df = pd.DataFrame(data ={'ts':obs, 'means': means, 'ts_9':obs_9, 'ts_7' : obs_7, 'var': var })
df.to_csv('testdata/tables/MixtureTS_var.csv', index_label = 'time')
def create_tables(interface):
dir_ = tspdb.__path__[0]+'/tests/'
for table in ['mixturets2','ts_basic_5','ts_basic_ts_5_5','mixturets_var']:
df = pd.read_csv(dir_+'testdata/tables/%s.csv'%table, engine = 'python')
if table == 'ts_basic_ts_5_5': df['time'] = df['time'].astype('datetime64[ns]')
interface.create_table(table, df, 'time', include_index = False)
def update_test(interface, init_points = 10**4 , update_points = [1000,100,5000,10000], T = 1000, direct_var = True ,index_name = 'ts_basic_test_pindex'):
df = pd.DataFrame(data ={'ts': np.arange(init_points).astype('float')})
interface.create_table('ts_basic_test', df, 'row_id', index_label='row_id')
time_series_table = ['ts_basic_test','ts', 'row_id']
T0 = 1000
gamma = 0.5
k = 2
k_var = 1
agg_interval = 1.
conn = interface.engine.raw_connection()
cur = conn.cursor()
cur.execute('''SELECT create_pindex('%s','%s','%s','%s', "T" => %s, k => %s, k_var => %s, agg_interval => %s, var_direct => %s)'''%('ts_basic_test','row_id','ts', index_name, T, k,k_var, agg_interval, direct_var))
cur.close()
conn.commit()
conn.close()
for points in update_points:
df = pd.DataFrame(data = {'ts':np.arange(init_points,points+init_points).astype('float')}, index = np.arange(init_points,points+init_points) )
interface.bulk_insert('ts_basic_test', df, index_label='row_id')
init_points += points
print ('successfully updated %s points' %points)
def ts_table_tests(init_points = 10**4 , update_points = [1000,100,5000,10000], T = 1000, direct_var = True ,index_name = 'ts_basic_ts_pindex'):
interface = SqlImplementation(driver="postgresql", host="localhost", database="querytime_test",user="aalomar",password="<PASSWORD>")
df = pd.DataFrame(data ={'ts': np.arange(init_points).astype('float')})
timestamps = pd.date_range('2012-10-01 00:00:00', periods = init_points+1, freq='5s')
end = timestamps[-1]
df.index = timestamps[:-1]
interface.create_table('ts_basic_ts', df, 'timestamp', index_label='timestamp')
time_series_table = ['ts_basic_ts','ts', 'timestamp']
T0 = 1000
gamma = 0.5
k = 2
k_var = 1
TSPD = TSPI(_dir = 'C:/Program Files/PostgreSQL/10/data/', agg_interval = 5, T = T,T_var = T, rank = k, rank_var = k_var, col_to_row_ratio = 10, index_name = index_name,gamma = gamma, interface= interface ,time_series_table = time_series_table, direct_var = direct_var )
TSPD.create_index()
interface = SqlImplementation(driver="postgresql", host="localhost", database="querytime_test",user="aalomar",password="<PASSWORD>")
for points in update_points:
df = pd.DataFrame(data = {'ts':np.arange(init_points,points+init_points).astype('float')} )
timestamps = pd.date_range(end, periods = points+1, freq='5s')
end = timestamps[-1]
df.index = timestamps[:-1]
interface.bulk_insert('ts_basic_ts', df, index_label='timestamp')
init_points += points
print ('successfully updated %s points' %points)
def create_pindex_test(interface,table_name, T,T_var, k ,k_var, direct_var,value_column= ['ts'], index_name = None , agg_interval = 1., col_to_row_ratio= 10, time_column = 'row_id'):
T0 = 1000
gamma = 0.5
if index_name is None: index_name = 'pindex'
value_column = ','.join(value_column)
interface.engine.execute('''SELECT create_pindex('%s','%s','{%s}','%s', T => %s,t_var =>%s, k => %s, k_var => %s, agg_interval => %s, var_direct => %s, col_to_row_ratio => %s)'''%(table_name,time_column, value_column, index_name, T, T_var, k,k_var, agg_interval, direct_var, col_to_row_ratio))
|
[
"pandas.DataFrame",
"pandas.date_range",
"pandas.read_csv",
"numpy.zeros",
"numpy.ones",
"numpy.mean",
"tspdb.src.hdf_util.read_data",
"numpy.arange",
"numpy.array",
"tspdb.src.pindex.pindex_managment.TSPI"
] |
[((645, 664), 'numpy.zeros', 'np.zeros', (['obs.shape'], {}), '(obs.shape)\n', (653, 664), True, 'import numpy as np\n'), ((791, 883), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'ts': obs, 'means': means, 'ts_9': obs_9, 'ts_7': obs_7, 'var': var}"}), "(data={'ts': obs, 'means': means, 'ts_9': obs_9, 'ts_7': obs_7,\n 'var': var})\n", (803, 883), True, 'import pandas as pd\n'), ((967, 1031), 'pandas.date_range', 'pd.date_range', (['"""2012-10-01 00:00:00"""'], {'periods': '(10 ** 5)', 'freq': '"""5s"""'}), "('2012-10-01 00:00:00', periods=10 ** 5, freq='5s')\n", (980, 1031), True, 'import pandas as pd\n'), ((1182, 1217), 'tspdb.src.hdf_util.read_data', 'read_data', (['"""testdata/MixtureTS2.h5"""'], {}), "('testdata/MixtureTS2.h5')\n", (1191, 1217), False, 'from tspdb.src.hdf_util import read_data\n'), ((1276, 1294), 'numpy.ones', 'np.ones', (['obs.shape'], {}), '(obs.shape)\n', (1283, 1294), True, 'import numpy as np\n'), ((1406, 1498), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'ts': obs, 'means': means, 'ts_9': obs_9, 'ts_7': obs_7, 'var': var}"}), "(data={'ts': obs, 'means': means, 'ts_9': obs_9, 'ts_7': obs_7,\n 'var': var})\n", (1418, 1498), True, 'import pandas as pd\n'), ((1643, 1677), 'tspdb.src.hdf_util.read_data', 'read_data', (['"""testdata/MixtureTS.h5"""'], {}), "('testdata/MixtureTS.h5')\n", (1652, 1677), False, 'from tspdb.src.hdf_util import read_data\n'), ((1736, 1754), 'numpy.ones', 'np.ones', (['obs.shape'], {}), '(obs.shape)\n', (1743, 1754), True, 'import numpy as np\n'), ((1866, 1958), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'ts': obs, 'means': means, 'ts_9': obs_9, 'ts_7': obs_7, 'var': var}"}), "(data={'ts': obs, 'means': means, 'ts_9': obs_9, 'ts_7': obs_7,\n 'var': var})\n", (1878, 1958), True, 'import pandas as pd\n'), ((2077, 2115), 'tspdb.src.hdf_util.read_data', 'read_data', (['"""testdata/MixtureTS_var.h5"""'], {}), "('testdata/MixtureTS_var.h5')\n", (2086, 2115), False, 'from tspdb.src.hdf_util import read_data\n'), ((2300, 2392), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'ts': obs, 'means': means, 'ts_9': obs_9, 'ts_7': obs_7, 'var': var}"}), "(data={'ts': obs, 'means': means, 'ts_9': obs_9, 'ts_7': obs_7,\n 'var': var})\n", (2312, 2392), True, 'import pandas as pd\n'), ((4303, 4375), 'pandas.date_range', 'pd.date_range', (['"""2012-10-01 00:00:00"""'], {'periods': '(init_points + 1)', 'freq': '"""5s"""'}), "('2012-10-01 00:00:00', periods=init_points + 1, freq='5s')\n", (4316, 4375), True, 'import pandas as pd\n'), ((4621, 4876), 'tspdb.src.pindex.pindex_managment.TSPI', 'TSPI', ([], {'_dir': '"""C:/Program Files/PostgreSQL/10/data/"""', 'agg_interval': '(5)', 'T': 'T', 'T_var': 'T', 'rank': 'k', 'rank_var': 'k_var', 'col_to_row_ratio': '(10)', 'index_name': 'index_name', 'gamma': 'gamma', 'interface': 'interface', 'time_series_table': 'time_series_table', 'direct_var': 'direct_var'}), "(_dir='C:/Program Files/PostgreSQL/10/data/', agg_interval=5, T=T,\n T_var=T, rank=k, rank_var=k_var, col_to_row_ratio=10, index_name=\n index_name, gamma=gamma, interface=interface, time_series_table=\n time_series_table, direct_var=direct_var)\n", (4625, 4876), False, 'from tspdb.src.pindex.pindex_managment import TSPI\n'), ((470, 485), 'numpy.mean', 'np.mean', (['(X ** 2)'], {}), '(X ** 2)\n', (477, 485), True, 'import numpy as np\n'), ((2621, 2690), 'pandas.read_csv', 'pd.read_csv', (["(dir_ + 'testdata/tables/%s.csv' % table)"], {'engine': '"""python"""'}), "(dir_ + 'testdata/tables/%s.csv' % table, engine='python')\n", (2632, 2690), True, 'import pandas as pd\n'), ((5186, 5235), 'pandas.date_range', 'pd.date_range', (['end'], {'periods': '(points + 1)', 'freq': '"""5s"""'}), "(end, periods=points + 1, freq='5s')\n", (5199, 5235), True, 'import pandas as pd\n'), ((486, 496), 'numpy.mean', 'np.mean', (['X'], {}), '(X)\n', (493, 496), True, 'import numpy as np\n'), ((590, 608), 'numpy.arange', 'np.arange', (['(10 ** 5)'], {}), '(10 ** 5)\n', (599, 608), True, 'import numpy as np\n'), ((694, 707), 'numpy.array', 'np.array', (['obs'], {}), '(obs)\n', (702, 707), True, 'import numpy as np\n'), ((746, 759), 'numpy.array', 'np.array', (['obs'], {}), '(obs)\n', (754, 759), True, 'import numpy as np\n'), ((1324, 1337), 'numpy.array', 'np.array', (['obs'], {}), '(obs)\n', (1332, 1337), True, 'import numpy as np\n'), ((1376, 1389), 'numpy.array', 'np.array', (['obs'], {}), '(obs)\n', (1384, 1389), True, 'import numpy as np\n'), ((1784, 1797), 'numpy.array', 'np.array', (['obs'], {}), '(obs)\n', (1792, 1797), True, 'import numpy as np\n'), ((1836, 1849), 'numpy.array', 'np.array', (['obs'], {}), '(obs)\n', (1844, 1849), True, 'import numpy as np\n'), ((2218, 2231), 'numpy.array', 'np.array', (['obs'], {}), '(obs)\n', (2226, 2231), True, 'import numpy as np\n'), ((2270, 2283), 'numpy.array', 'np.array', (['obs'], {}), '(obs)\n', (2278, 2283), True, 'import numpy as np\n'), ((3735, 3779), 'numpy.arange', 'np.arange', (['init_points', '(points + init_points)'], {}), '(init_points, points + init_points)\n', (3744, 3779), True, 'import numpy as np\n'), ((3035, 3057), 'numpy.arange', 'np.arange', (['init_points'], {}), '(init_points)\n', (3044, 3057), True, 'import numpy as np\n'), ((4246, 4268), 'numpy.arange', 'np.arange', (['init_points'], {}), '(init_points)\n', (4255, 4268), True, 'import numpy as np\n'), ((3667, 3711), 'numpy.arange', 'np.arange', (['init_points', '(points + init_points)'], {}), '(init_points, points + init_points)\n', (3676, 3711), True, 'import numpy as np\n'), ((5108, 5152), 'numpy.arange', 'np.arange', (['init_points', '(points + init_points)'], {}), '(init_points, points + init_points)\n', (5117, 5152), True, 'import numpy as np\n')]
|
from sys import version_info
if version_info[0] == 2:
from sys import maxint
else:
from sys import maxsize as maxint
from itertools import chain
from .iters import map, range
class Stream(object):
__slots__ = ("_last", "_collection", "_origin")
class _StreamIterator(object):
__slots__ = ("_stream", "_position")
def __init__(self, stream):
self._stream = stream
self._position = -1 # not started yet
def __next__(self):
# check if elements are available for next position
# return next element or raise StopIteration
self._position += 1
if (len(self._stream._collection) > self._position or
self._stream._fill_to(self._position)):
return self._stream._collection[self._position]
raise StopIteration()
if version_info[0] == 2:
next = __next__
def __init__(self, *origin):
self._collection = []
self._last = -1 # not started yet
self._origin = iter(origin) if origin else []
def __lshift__(self, rvalue):
iterator = rvalue() if callable(rvalue) else rvalue
self._origin = chain(self._origin, iterator)
return self
def cursor(self):
"""Return position of next evaluated element"""
return self._last + 1
def _fill_to(self, index):
if self._last >= index:
return True
while self._last < index:
try:
n = next(self._origin)
except StopIteration:
return False
self._last += 1
self._collection.append(n)
return True
def __iter__(self):
return self._StreamIterator(self)
def __getitem__(self, index):
if isinstance(index, int):
# todo: i'm not sure what to do with negative indices
if index < 0: raise TypeError("Invalid argument type")
self._fill_to(index)
elif isinstance(index, slice):
low, high, step = index.indices(maxint)
if step == 0: raise ValueError("Step must not be 0")
return self.__class__() << map(self.__getitem__, range(low, high, step or 1))
else:
raise TypeError("Invalid argument type")
return self._collection.__getitem__(index)
|
[
"itertools.chain"
] |
[((1216, 1245), 'itertools.chain', 'chain', (['self._origin', 'iterator'], {}), '(self._origin, iterator)\n', (1221, 1245), False, 'from itertools import chain\n')]
|
# 10/4/18
# chenyong
# predict leaf counts using trained model
"""
Make predictions of Leaf counts using trained models
"""
import os.path as op
import sys
import numpy as np
import pandas as pd
import pickle
import matplotlib.pyplot as plt
import matplotlib as mpl
from PIL import Image
from schnablelab.apps.base import ActionDispatcher, OptionParser, glob
from schnablelab.apps.headers import Slurm_header, Slurm_gpu_constraint_header, Slurm_gpu_header
from schnablelab.apps.natsort import natsorted
from glob import glob
from PIL import Image
import cv2
from pathlib import Path
def main():
actions = (
('keras', 'using keras model to make prediction'),
('dpp', 'using dpp model to make prediction'),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def dpp(args):
"""
%prog model_dir img_dir output_prefix
using your trained dpp model to make predictions.
"""
p = OptionParser(dpp.__doc__)
p.set_slurm_opts(jn=True, gpu=True)
opts, args = p.parse_args(args)
if len(args) == 0:
sys.exit(not p.print_help())
model_dir, img_dir, otp = args
header = Slurm_gpu_constraint_header%(opts.time, opts.memory, otp, otp, otp, opts.gpu) \
if opts.gpu \
else Slurm_gpu_header%(opts.time, opts.memory, otp, otp, otp)
if opts.env:
header += 'ml anaconda \nsource activate %s\n'%opts.env
cmd = "python -m schnablelab.CNN.CNN_LeafCount_Predict %s %s %s.csv\n"%(model_dir, img_dir, otp)
header += cmd
f0 = open('%s.slurm'%otp, 'w')
f0.write(header)
f0.close()
print('%s.slurm generate, you can submit to a gpu node now.'%otp)
def keras(args):
"""
%prog model_name img_dir target_size output_prefix
using your trained model to make predictions. Target size is the input_shape when
you train your model. an invalid target_size example is 224,224,3
"""
from keras.models import load_model
p = OptionParser(keras.__doc__)
p.set_slurm_opts()
p.add_option('--img_num', default='all',
help='specify how many images used for prediction in the dir')
opts, args = p.parse_args(args)
if len(args) == 0:
sys.exit(not p.print_help())
model, img_dir, ts, otp = args
ts = tuple([int(i) for i in ts.split(',')][:-1])
print(ts)
p = Path(img_dir)
ps = list(p.glob('*.png'))[:int(opts.img_num)] \
if opts.img_num!='all' \
else list(p.glob('*.png'))
imgs = []
fns = []
for i in ps:
print(i.name)
fns.append(i.name)
img = cv2.imread(str(i))
img = cv2.resize(img, ts)
imgs.append(img)
imgs_arr = np.asarray(imgs)
my_model = load_model(model)
pre_prob = my_model.predict(imgs_arr)
df = pd.DataFrame(pre_prob)
clss = df.shape[1]
headers = ['class_%s'%i for i in range(1, clss+1)]
df.columns = headers
df['image'] = fns
headers.insert(0, 'image')
df_final = df[headers]
df_final.to_csv('%s.csv'%otp, sep='\t', index=False)
if __name__ == "__main__":
main()
|
[
"keras.models.load_model",
"pandas.DataFrame",
"numpy.asarray",
"pathlib.Path",
"schnablelab.apps.base.ActionDispatcher",
"schnablelab.apps.base.OptionParser",
"cv2.resize"
] |
[((750, 775), 'schnablelab.apps.base.ActionDispatcher', 'ActionDispatcher', (['actions'], {}), '(actions)\n', (766, 775), False, 'from schnablelab.apps.base import ActionDispatcher, OptionParser, glob\n'), ((938, 963), 'schnablelab.apps.base.OptionParser', 'OptionParser', (['dpp.__doc__'], {}), '(dpp.__doc__)\n', (950, 963), False, 'from schnablelab.apps.base import ActionDispatcher, OptionParser, glob\n'), ((1955, 1982), 'schnablelab.apps.base.OptionParser', 'OptionParser', (['keras.__doc__'], {}), '(keras.__doc__)\n', (1967, 1982), False, 'from schnablelab.apps.base import ActionDispatcher, OptionParser, glob\n'), ((2338, 2351), 'pathlib.Path', 'Path', (['img_dir'], {}), '(img_dir)\n', (2342, 2351), False, 'from pathlib import Path\n'), ((2673, 2689), 'numpy.asarray', 'np.asarray', (['imgs'], {}), '(imgs)\n', (2683, 2689), True, 'import numpy as np\n'), ((2705, 2722), 'keras.models.load_model', 'load_model', (['model'], {}), '(model)\n', (2715, 2722), False, 'from keras.models import load_model\n'), ((2774, 2796), 'pandas.DataFrame', 'pd.DataFrame', (['pre_prob'], {}), '(pre_prob)\n', (2786, 2796), True, 'import pandas as pd\n'), ((2613, 2632), 'cv2.resize', 'cv2.resize', (['img', 'ts'], {}), '(img, ts)\n', (2623, 2632), False, 'import cv2\n')]
|
import traceback
from twisted.application import service
from twisted.internet import reactor, task
from spyd.server.binding.binding import Binding
from spyd.server.metrics.rate_aggregator import RateAggregator
class BindingService(service.Service):
def __init__(self, client_protocol_factory, metrics_service):
self.bindings = set()
self.client_protocol_factory = client_protocol_factory
self.metrics_service = metrics_service
self.flush_rate_aggregator = RateAggregator(metrics_service, 'flush_all_rate', 1.0)
reactor.addSystemEventTrigger('during', 'flush_bindings', self.flush_all)
self.flush_looping_call = task.LoopingCall(reactor.fireSystemEvent, 'flush_bindings')
def startService(self):
for binding in self.bindings:
binding.listen(self.client_protocol_factory)
self.flush_looping_call.start(0.033)
service.Service.startService(self)
def stopService(self):
self.flush_looping_call.stop()
service.Service.stopService(self)
def add_binding(self, interface, port, maxclients, maxdown, maxup, max_duplicate_peers):
binding = Binding(reactor, self.metrics_service, interface, port, maxclients=maxclients, channels=2, maxdown=maxdown, maxup=maxup, max_duplicate_peers=max_duplicate_peers)
self.bindings.add(binding)
def flush_all(self):
reactor.callLater(0, reactor.addSystemEventTrigger, 'during', 'flush_bindings', self.flush_all)
try:
for binding in self.bindings:
binding.flush()
self.flush_rate_aggregator.tick()
except:
traceback.print_exc()
|
[
"traceback.print_exc",
"twisted.internet.reactor.addSystemEventTrigger",
"spyd.server.binding.binding.Binding",
"twisted.application.service.Service.startService",
"twisted.application.service.Service.stopService",
"spyd.server.metrics.rate_aggregator.RateAggregator",
"twisted.internet.task.LoopingCall",
"twisted.internet.reactor.callLater"
] |
[((500, 554), 'spyd.server.metrics.rate_aggregator.RateAggregator', 'RateAggregator', (['metrics_service', '"""flush_all_rate"""', '(1.0)'], {}), "(metrics_service, 'flush_all_rate', 1.0)\n", (514, 554), False, 'from spyd.server.metrics.rate_aggregator import RateAggregator\n'), ((564, 637), 'twisted.internet.reactor.addSystemEventTrigger', 'reactor.addSystemEventTrigger', (['"""during"""', '"""flush_bindings"""', 'self.flush_all'], {}), "('during', 'flush_bindings', self.flush_all)\n", (593, 637), False, 'from twisted.internet import reactor, task\n'), ((672, 731), 'twisted.internet.task.LoopingCall', 'task.LoopingCall', (['reactor.fireSystemEvent', '"""flush_bindings"""'], {}), "(reactor.fireSystemEvent, 'flush_bindings')\n", (688, 731), False, 'from twisted.internet import reactor, task\n'), ((911, 945), 'twisted.application.service.Service.startService', 'service.Service.startService', (['self'], {}), '(self)\n', (939, 945), False, 'from twisted.application import service\n'), ((1021, 1054), 'twisted.application.service.Service.stopService', 'service.Service.stopService', (['self'], {}), '(self)\n', (1048, 1054), False, 'from twisted.application import service\n'), ((1167, 1337), 'spyd.server.binding.binding.Binding', 'Binding', (['reactor', 'self.metrics_service', 'interface', 'port'], {'maxclients': 'maxclients', 'channels': '(2)', 'maxdown': 'maxdown', 'maxup': 'maxup', 'max_duplicate_peers': 'max_duplicate_peers'}), '(reactor, self.metrics_service, interface, port, maxclients=\n maxclients, channels=2, maxdown=maxdown, maxup=maxup,\n max_duplicate_peers=max_duplicate_peers)\n', (1174, 1337), False, 'from spyd.server.binding.binding import Binding\n'), ((1398, 1497), 'twisted.internet.reactor.callLater', 'reactor.callLater', (['(0)', 'reactor.addSystemEventTrigger', '"""during"""', '"""flush_bindings"""', 'self.flush_all'], {}), "(0, reactor.addSystemEventTrigger, 'during',\n 'flush_bindings', self.flush_all)\n", (1415, 1497), False, 'from twisted.internet import reactor, task\n'), ((1655, 1676), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (1674, 1676), False, 'import traceback\n')]
|
import responses
from tests.ad.conftest import RE_BASE
@responses.activate
def test_profiles_list(api):
responses.add(responses.GET,
f'{RE_BASE}/profiles',
json=[{
'id': 1,
'name': 'profile name',
'deleted': False,
'directories': [1, 2],
'dirty': True,
'hasEverBeenCommitted': True
}]
)
resp = api.profiles.list()
assert isinstance(resp, list)
assert len(resp) == 1
assert resp[0]['id'] == 1
assert resp[0]['name'] == 'profile name'
assert resp[0]['deleted'] is False
assert resp[0]['directories'] == [1, 2]
assert resp[0]['dirty'] is True
assert resp[0]['has_ever_been_committed'] is True
@responses.activate
def test_profiles_create(api):
responses.add(responses.POST,
f'{RE_BASE}/profiles',
json=[{
'id': 1,
'name': 'profile name',
'deleted': False,
'directories': [1, 2],
'dirty': True,
'hasEverBeenCommitted': True
}]
)
resp = api.profiles.create(name='profile name',
directories=[1, 2])
assert isinstance(resp, list)
assert len(resp) == 1
assert resp[0]['id'] == 1
assert resp[0]['name'] == 'profile name'
assert resp[0]['deleted'] is False
assert resp[0]['directories'] == [1, 2]
assert resp[0]['dirty'] is True
assert resp[0]['has_ever_been_committed'] is True
@responses.activate
def test_profiles_details(api):
responses.add(responses.GET,
f'{RE_BASE}/profiles/1',
json={
'id': 1,
'name': 'profile name',
'deleted': False,
'directories': [1, 2],
'dirty': True,
'hasEverBeenCommitted': True
}
)
resp = api.profiles.details(profile_id='1')
assert isinstance(resp, dict)
assert resp['id'] == 1
assert resp['name'] == 'profile name'
assert resp['deleted'] is False
assert resp['directories'] == [1, 2]
assert resp['dirty'] is True
assert resp['has_ever_been_committed'] is True
@responses.activate
def test_profiles_update(api):
responses.add(responses.PATCH,
f'{RE_BASE}/profiles/1',
json={
'id': 1,
'name': 'profile name',
'deleted': False,
'directories': [1, 2],
'dirty': True,
'hasEverBeenCommitted': True
}
)
resp = api.profiles.update(profile_id='1',
name='profile name',
deleted=True,
directories=[1, 2])
assert isinstance(resp, dict)
assert resp['id'] == 1
assert resp['name'] == 'profile name'
assert resp['deleted'] is False
assert resp['directories'] == [1, 2]
assert resp['dirty'] is True
assert resp['has_ever_been_committed'] is True
@responses.activate
def test_profiles_delete(api):
responses.add(responses.DELETE,
f'{RE_BASE}/profiles/1',
json=None
)
resp = api.profiles.delete(profile_id='1')
assert resp is None
@responses.activate
def test_profiles_copy_profile(api):
responses.add(responses.POST,
f'{RE_BASE}/profiles/from/1',
json={
'id': 1,
'name': 'copied profile',
'deleted': False,
'directories': [1, 2],
'dirty': True,
'hasEverBeenCommitted': True
}
)
resp = api.profiles.copy_profile(from_id='1',
name='copied profile',
directories=[1, 2])
assert isinstance(resp, dict)
assert resp['id'] == 1
assert resp['name'] == 'copied profile'
assert resp['deleted'] is False
assert resp['directories'] == [1, 2]
assert resp['dirty'] is True
assert resp['has_ever_been_committed'] is True
@responses.activate
def test_profiles_commit(api):
responses.add(responses.POST,
f'{RE_BASE}/profiles/1/commit',
json=None
)
resp = api.profiles.commit(profile_id='1')
assert resp is None
@responses.activate
def test_profiles_unstage(api):
responses.add(responses.POST,
f'{RE_BASE}/profiles/1/unstage',
json=None
)
resp = api.profiles.unstage(profile_id='1')
assert resp is None
|
[
"responses.add"
] |
[((111, 298), 'responses.add', 'responses.add', (['responses.GET', 'f"""{RE_BASE}/profiles"""'], {'json': "[{'id': 1, 'name': 'profile name', 'deleted': False, 'directories': [1, 2],\n 'dirty': True, 'hasEverBeenCommitted': True}]"}), "(responses.GET, f'{RE_BASE}/profiles', json=[{'id': 1, 'name':\n 'profile name', 'deleted': False, 'directories': [1, 2], 'dirty': True,\n 'hasEverBeenCommitted': True}])\n", (124, 298), False, 'import responses\n'), ((894, 1082), 'responses.add', 'responses.add', (['responses.POST', 'f"""{RE_BASE}/profiles"""'], {'json': "[{'id': 1, 'name': 'profile name', 'deleted': False, 'directories': [1, 2],\n 'dirty': True, 'hasEverBeenCommitted': True}]"}), "(responses.POST, f'{RE_BASE}/profiles', json=[{'id': 1, 'name':\n 'profile name', 'deleted': False, 'directories': [1, 2], 'dirty': True,\n 'hasEverBeenCommitted': True}])\n", (907, 1082), False, 'import responses\n'), ((1751, 1938), 'responses.add', 'responses.add', (['responses.GET', 'f"""{RE_BASE}/profiles/1"""'], {'json': "{'id': 1, 'name': 'profile name', 'deleted': False, 'directories': [1, 2],\n 'dirty': True, 'hasEverBeenCommitted': True}"}), "(responses.GET, f'{RE_BASE}/profiles/1', json={'id': 1, 'name':\n 'profile name', 'deleted': False, 'directories': [1, 2], 'dirty': True,\n 'hasEverBeenCommitted': True})\n", (1764, 1938), False, 'import responses\n'), ((2507, 2696), 'responses.add', 'responses.add', (['responses.PATCH', 'f"""{RE_BASE}/profiles/1"""'], {'json': "{'id': 1, 'name': 'profile name', 'deleted': False, 'directories': [1, 2],\n 'dirty': True, 'hasEverBeenCommitted': True}"}), "(responses.PATCH, f'{RE_BASE}/profiles/1', json={'id': 1,\n 'name': 'profile name', 'deleted': False, 'directories': [1, 2],\n 'dirty': True, 'hasEverBeenCommitted': True})\n", (2520, 2696), False, 'import responses\n'), ((3412, 3479), 'responses.add', 'responses.add', (['responses.DELETE', 'f"""{RE_BASE}/profiles/1"""'], {'json': 'None'}), "(responses.DELETE, f'{RE_BASE}/profiles/1', json=None)\n", (3425, 3479), False, 'import responses\n'), ((3669, 3864), 'responses.add', 'responses.add', (['responses.POST', 'f"""{RE_BASE}/profiles/from/1"""'], {'json': "{'id': 1, 'name': 'copied profile', 'deleted': False, 'directories': [1, 2],\n 'dirty': True, 'hasEverBeenCommitted': True}"}), "(responses.POST, f'{RE_BASE}/profiles/from/1', json={'id': 1,\n 'name': 'copied profile', 'deleted': False, 'directories': [1, 2],\n 'dirty': True, 'hasEverBeenCommitted': True})\n", (3682, 3864), False, 'import responses\n'), ((4554, 4626), 'responses.add', 'responses.add', (['responses.POST', 'f"""{RE_BASE}/profiles/1/commit"""'], {'json': 'None'}), "(responses.POST, f'{RE_BASE}/profiles/1/commit', json=None)\n", (4567, 4626), False, 'import responses\n'), ((4811, 4884), 'responses.add', 'responses.add', (['responses.POST', 'f"""{RE_BASE}/profiles/1/unstage"""'], {'json': 'None'}), "(responses.POST, f'{RE_BASE}/profiles/1/unstage', json=None)\n", (4824, 4884), False, 'import responses\n')]
|
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def index():
siteTitle = 'siteIndex'
name = 'Tuomo'
listOfThings = ['A thing', 'The Thing', 'Thing', 'A Big Thing']
return render_template('base.html',
name=name,
siteTitle=siteTitle,
listOfThings=listOfThings)
@app.route('/child')
def child():
siteTitle = 'Child page'
return render_template('child.html', siteTitle=siteTitle)
if __name__ == '__main__':
app.run(debug=True)
|
[
"flask.Flask",
"flask.render_template"
] |
[((47, 62), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (52, 62), False, 'from flask import Flask, render_template\n'), ((219, 311), 'flask.render_template', 'render_template', (['"""base.html"""'], {'name': 'name', 'siteTitle': 'siteTitle', 'listOfThings': 'listOfThings'}), "('base.html', name=name, siteTitle=siteTitle, listOfThings=\n listOfThings)\n", (234, 311), False, 'from flask import Flask, render_template\n'), ((396, 446), 'flask.render_template', 'render_template', (['"""child.html"""'], {'siteTitle': 'siteTitle'}), "('child.html', siteTitle=siteTitle)\n", (411, 446), False, 'from flask import Flask, render_template\n')]
|
##file needed to manage and run code without the debug/ how you run a flask script
from flask_script import Manager
from songbase import app
from songbase import app, db, Artist, Song
manager = Manager(app)
# reset the database and create two artists
@manager.command
def deploy():
db.drop_all()
db.create_all()
coldplay = Artist(name='Coldplay', about='Coldplay is a British rock band.')
maroon5 = Artist(name='<NAME>', about='Maroon 5 is an American pop rock band.')
song1 = Song(name='yellow', year=2004, lyrics='blah blah', artist=coldplay)
db.session.add(coldplay)
db.session.add(maroon5)
db.session.commit()
if __name__=='__main__':
manager.run()
|
[
"flask_script.Manager",
"songbase.db.session.add",
"songbase.db.session.commit",
"songbase.db.drop_all",
"songbase.Artist",
"songbase.db.create_all",
"songbase.Song"
] |
[((196, 208), 'flask_script.Manager', 'Manager', (['app'], {}), '(app)\n', (203, 208), False, 'from flask_script import Manager\n'), ((290, 303), 'songbase.db.drop_all', 'db.drop_all', ([], {}), '()\n', (301, 303), False, 'from songbase import app, db, Artist, Song\n'), ((308, 323), 'songbase.db.create_all', 'db.create_all', ([], {}), '()\n', (321, 323), False, 'from songbase import app, db, Artist, Song\n'), ((339, 404), 'songbase.Artist', 'Artist', ([], {'name': '"""Coldplay"""', 'about': '"""Coldplay is a British rock band."""'}), "(name='Coldplay', about='Coldplay is a British rock band.')\n", (345, 404), False, 'from songbase import app, db, Artist, Song\n'), ((419, 488), 'songbase.Artist', 'Artist', ([], {'name': '"""<NAME>"""', 'about': '"""Maroon 5 is an American pop rock band."""'}), "(name='<NAME>', about='Maroon 5 is an American pop rock band.')\n", (425, 488), False, 'from songbase import app, db, Artist, Song\n'), ((501, 568), 'songbase.Song', 'Song', ([], {'name': '"""yellow"""', 'year': '(2004)', 'lyrics': '"""blah blah"""', 'artist': 'coldplay'}), "(name='yellow', year=2004, lyrics='blah blah', artist=coldplay)\n", (505, 568), False, 'from songbase import app, db, Artist, Song\n'), ((573, 597), 'songbase.db.session.add', 'db.session.add', (['coldplay'], {}), '(coldplay)\n', (587, 597), False, 'from songbase import app, db, Artist, Song\n'), ((602, 625), 'songbase.db.session.add', 'db.session.add', (['maroon5'], {}), '(maroon5)\n', (616, 625), False, 'from songbase import app, db, Artist, Song\n'), ((630, 649), 'songbase.db.session.commit', 'db.session.commit', ([], {}), '()\n', (647, 649), False, 'from songbase import app, db, Artist, Song\n')]
|
import random
from pathlib import Path
from pkg_resources import resource_filename as _resource_filename
from ..toolz import (
pipe, curry, compose, memoize, concatv, groupby, take,
filter, map, strip_comments, sort_by, vmap, get, noop,
)
resource_filename = curry(_resource_filename)(__name__)
path = compose(
Path,
resource_filename,
str,
lambda p: Path(p),
)
@memoize
def user_agents():
return path('user-agents.txt').read_text().splitlines()
def random_user_agent():
return pipe(
user_agents(),
random.choice,
)
@memoize
def nmap_services(path='nmap-services'):
return pipe(
Path(path).read_text().splitlines(),
strip_comments,
filter(None),
map(lambda l: l.split('\t')[:3]),
map(lambda t: tuple(
concatv(t[:1], t[1].split('/'), map(float, t[-1:]))
)),
sort_by(lambda t: t[-1]),
vmap(lambda name, port, proto, perc: {
'name': name, 'port': port, 'proto': proto, 'perc': perc,
}),
tuple,
)
@curry
def top_ports(n, *, proto='tcp', services_generator=nmap_services,
just_ports=True):
'''For a given protocol ('tcp' or 'udp') and a services generator
(default nmap services file), return the top n ports
'''
return pipe(
services_generator(),
groupby(lambda d: d['proto']),
lambda d: d[proto],
sort_by(get('perc'), reverse=True),
map(get('port')) if just_ports else noop,
take(n),
tuple,
)
|
[
"pathlib.Path"
] |
[((378, 385), 'pathlib.Path', 'Path', (['p'], {}), '(p)\n', (382, 385), False, 'from pathlib import Path\n'), ((650, 660), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (654, 660), False, 'from pathlib import Path\n')]
|
import pytest
import allure
from Ar_Script.Meetu_Ui_Test.Pages.base_page import *
import json
from appium import webdriver
import time
import os
import openpyxl
from Ar_Script.Meetu_Ui_Test.common.get_info import get_meminfo_data,saveData,get_cpu_data,get_activity_name
from Ar_Script.Meetu_Ui_Test.common.app_command import *
import logging
import random
import subprocess
# def test_app_cpu_home_stay_cost():
# package_info=get_activity_name()
# os.popen('adb shell am start {}/{}'.format(package_info[1],package_info[2]))
# time.sleep(5)
# os.popen('adb shell input keyevent 4 ')
# result=[('测试时间','cpu百分比')]
# result.extend(get_cpu_data(package_info[1]))
# data_save=Data_Save(result,'cpu_test','test_result.xlsx',(0,1),(0,'C1'))
# data_save.save_data()
#
class Test_Performance:
@classmethod
def setup_class(self):
os.popen('adb shell pm clear {}'.format('com.social.nene'))
print('执行清理数据')
def setup(self):
os.chdir(os.curdir)
with open('..\config\phone.json')as f:
desired_caps = json.load(f)['sanxingC8_meetu']
self.driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
self.package_info=('',desired_caps['appPackage'],desired_caps['appActivity'])
self.account_page = Account_login_page(self.driver)
self.start_page = StarPage(self.driver)
self.home_page=Home_page(self.driver)
self.hot_start_control=Control(self.package_info,count=16,driver=self.driver)
self.cold_start_control=Control(self.package_info,count=16,driver=self.driver,mode=1)
self.loger=Loger()
# self.windows_size = self.driver.get_window_size()
# self.height = self.windows_size['height']
# self.width = self.windows_size['width']
self.driver.implicitly_wait(30)
def teardown(self):
self.driver.quit()
@allure.story('重复启动app内存测试')
@pytest.mark.parametrize('package,activity',[('com.social.nene',"com.funny.lovu.splash.LaunchActivity")])
def test_meminfo(self,package,activity):
account='<EMAIL>'
psw='123456'
meminfo_list=[]
#登录app
self.start_page.click_account_login()
self.account_page.account_input(account)
self.account_page.psw_input(psw)
self.account_page.click_login()
#处理权限允许弹窗
self.home_page.permission_allow()
#处理引导动画
self.home_page.close_guide()
for i in range(15):
self.driver.keyevent(4)
self.driver.start_activity(package,activity)
if self.home_page.loading_finish_judge():
result=get_meminfo_data(package)
meminfo_list.append(result)
logging.debug(meminfo_list)
saveData(meminfo_list,file_attr='liki_v{}'.format("test"))
@allure.story('启动时间测试')
@pytest.mark.parametrize('package_info',[('','com.social.nene',"com.funny.lovu.splash.LaunchActivity")])
def test_app_start_time(self,package_info):
self.hot_start_control.run()
self.hot_start_control.saveData('热启动_v34')
self.cold_start_control.run()
self.cold_start_control.saveData('冷启动_v34')
# @allure.story('cpu 空闲状态消耗信息记录')
# def test_app_cpu_home_stay_cost(self):
# print(get_cpu_data_t(self.package_info[1]))
# self.driver.keyevent(4)
# result=[('测试时间','cpu百分比')]
# result.extend(get_cpu_data_t(self.package_info[1]))
# data_save=Data_Save(result,'cpu_test','test_result.xlsx',(0,1),(0,'C1'))
# data_save.save_data()
if __name__ == '__main__':
pytest.main(['-s','performance_data_test.py'])
# print(get_cpu_data('com.real'))
# test_app_cpu_home_stay_cost()
|
[
"json.load",
"logging.debug",
"allure.story",
"pytest.main",
"appium.webdriver.Remote",
"pytest.mark.parametrize",
"os.chdir",
"Ar_Script.Meetu_Ui_Test.common.get_info.get_meminfo_data"
] |
[((1912, 1939), 'allure.story', 'allure.story', (['"""重复启动app内存测试"""'], {}), "('重复启动app内存测试')\n", (1924, 1939), False, 'import allure\n'), ((1945, 2055), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""package,activity"""', "[('com.social.nene', 'com.funny.lovu.splash.LaunchActivity')]"], {}), "('package,activity', [('com.social.nene',\n 'com.funny.lovu.splash.LaunchActivity')])\n", (1968, 2055), False, 'import pytest\n'), ((2854, 2876), 'allure.story', 'allure.story', (['"""启动时间测试"""'], {}), "('启动时间测试')\n", (2866, 2876), False, 'import allure\n'), ((2882, 2992), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""package_info"""', "[('', 'com.social.nene', 'com.funny.lovu.splash.LaunchActivity')]"], {}), "('package_info', [('', 'com.social.nene',\n 'com.funny.lovu.splash.LaunchActivity')])\n", (2905, 2992), False, 'import pytest\n'), ((3631, 3678), 'pytest.main', 'pytest.main', (["['-s', 'performance_data_test.py']"], {}), "(['-s', 'performance_data_test.py'])\n", (3642, 3678), False, 'import pytest\n'), ((986, 1005), 'os.chdir', 'os.chdir', (['os.curdir'], {}), '(os.curdir)\n', (994, 1005), False, 'import os\n'), ((1136, 1198), 'appium.webdriver.Remote', 'webdriver.Remote', (['"""http://localhost:4723/wd/hub"""', 'desired_caps'], {}), "('http://localhost:4723/wd/hub', desired_caps)\n", (1152, 1198), False, 'from appium import webdriver\n'), ((2752, 2779), 'logging.debug', 'logging.debug', (['meminfo_list'], {}), '(meminfo_list)\n', (2765, 2779), False, 'import logging\n'), ((1080, 1092), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1089, 1092), False, 'import json\n'), ((2673, 2698), 'Ar_Script.Meetu_Ui_Test.common.get_info.get_meminfo_data', 'get_meminfo_data', (['package'], {}), '(package)\n', (2689, 2698), False, 'from Ar_Script.Meetu_Ui_Test.common.get_info import get_meminfo_data, saveData, get_cpu_data, get_activity_name\n')]
|
import unittest
import asyncio
import motor.motor_asyncio
import city_generator
client = motor.motor_asyncio.AsyncIOMotorClient('localhost', 27017)
db = client.local
loop = asyncio.get_event_loop()
async def get_all_cities(cap=500) -> list:
return await db.Cities.find({}).to_list(cap)
async def get_city_by_index(index) -> list:
return await db.Cities.find_one({'index': {'$eq': index}})
class TestCity(unittest.TestCase):
def test_collection_cleaning(self):
loop.run_until_complete(city_generator.remove_cities())
def test_insert_city(self):
loop.run_until_complete(city_generator.remove_cities())
loop.run_until_complete(city_generator.insert_city(1))
self.assertEqual(len(loop.run_until_complete(get_all_cities(200))), 1)
loop.run_until_complete(city_generator.remove_cities())
def test_get_city(self):
loop.run_until_complete(city_generator.remove_cities())
loop.run_until_complete(city_generator.insert_city(5))
self.assertIn('name', loop.run_until_complete(get_city_by_index(5)))
self.assertIn('index', loop.run_until_complete(get_city_by_index(5)))
self.assertIn('roads', loop.run_until_complete(get_city_by_index(5)))
loop.run_until_complete(city_generator.remove_cities())
def test_get_replace_city(self):
loop.run_until_complete(city_generator.remove_cities())
loop.run_until_complete(city_generator.insert_city(5))
city = loop.run_until_complete(city_generator.get_city(5))
city['name'] = 'replaced_city'
loop.run_until_complete(city_generator.replace_city(5, city))
loop.run_until_complete(city_generator.remove_cities())
def test_state_generation(self):
loop.run_until_complete(city_generator.remove_cities())
loop.run_until_complete(city_generator.generate_state(20, 60))
self.assertEqual(len(loop.run_until_complete(get_all_cities(200))), 20)
loop.run_until_complete(city_generator.remove_cities())
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"asyncio.get_event_loop",
"city_generator.generate_state",
"city_generator.remove_cities",
"city_generator.replace_city",
"city_generator.get_city",
"city_generator.insert_city"
] |
[((175, 199), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (197, 199), False, 'import asyncio\n'), ((2053, 2068), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2066, 2068), False, 'import unittest\n'), ((510, 540), 'city_generator.remove_cities', 'city_generator.remove_cities', ([], {}), '()\n', (538, 540), False, 'import city_generator\n'), ((607, 637), 'city_generator.remove_cities', 'city_generator.remove_cities', ([], {}), '()\n', (635, 637), False, 'import city_generator\n'), ((671, 700), 'city_generator.insert_city', 'city_generator.insert_city', (['(1)'], {}), '(1)\n', (697, 700), False, 'import city_generator\n'), ((813, 843), 'city_generator.remove_cities', 'city_generator.remove_cities', ([], {}), '()\n', (841, 843), False, 'import city_generator\n'), ((907, 937), 'city_generator.remove_cities', 'city_generator.remove_cities', ([], {}), '()\n', (935, 937), False, 'import city_generator\n'), ((971, 1000), 'city_generator.insert_city', 'city_generator.insert_city', (['(5)'], {}), '(5)\n', (997, 1000), False, 'import city_generator\n'), ((1267, 1297), 'city_generator.remove_cities', 'city_generator.remove_cities', ([], {}), '()\n', (1295, 1297), False, 'import city_generator\n'), ((1369, 1399), 'city_generator.remove_cities', 'city_generator.remove_cities', ([], {}), '()\n', (1397, 1399), False, 'import city_generator\n'), ((1433, 1462), 'city_generator.insert_city', 'city_generator.insert_city', (['(5)'], {}), '(5)\n', (1459, 1462), False, 'import city_generator\n'), ((1503, 1529), 'city_generator.get_city', 'city_generator.get_city', (['(5)'], {}), '(5)\n', (1526, 1529), False, 'import city_generator\n'), ((1602, 1638), 'city_generator.replace_city', 'city_generator.replace_city', (['(5)', 'city'], {}), '(5, city)\n', (1629, 1638), False, 'import city_generator\n'), ((1672, 1702), 'city_generator.remove_cities', 'city_generator.remove_cities', ([], {}), '()\n', (1700, 1702), False, 'import city_generator\n'), ((1774, 1804), 'city_generator.remove_cities', 'city_generator.remove_cities', ([], {}), '()\n', (1802, 1804), False, 'import city_generator\n'), ((1838, 1875), 'city_generator.generate_state', 'city_generator.generate_state', (['(20)', '(60)'], {}), '(20, 60)\n', (1867, 1875), False, 'import city_generator\n'), ((1989, 2019), 'city_generator.remove_cities', 'city_generator.remove_cities', ([], {}), '()\n', (2017, 2019), False, 'import city_generator\n')]
|