code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
# coding: utf-8
from django.db import models
from django.contrib.auth.models import User
from RoomManage.models import Room, Customs
# Create your models here.
class Task(models.Model):
context = models.TextField()
date = models.DateTimeField()
task_status = models.CharField(max_length=20, default='undo')
user = models.ForeignKey(User)
room = models.ForeignKey(Room)
def __str__(self):
return '%s %s - %s' % (self.user.last_name, self.user.first_name, self.room.room_num)
class Attendance(models.Model):
clock_in = models.DateTimeField()
clock_out = models.DateTimeField(null=True, blank=True)
user = models.ForeignKey(User)
def __str__(self):
return '%s %s -- %s' % (self.user.last_name, self.user.first_name, self.clock_in)
class Emergency(models.Model):
date_time = models.DateTimeField()
room = models.ForeignKey(Room)
user = models.ForeignKey(User, null=True, blank=True)
def __str__(self):
return '%s %s - %s' % (self.user.last_name, self.user.first_name, self.room.room_num)
class Meta:
permissions = (
('create_emergency', 'can create a emergency'),
)
| [
"django.db.models.DateTimeField",
"django.db.models.ForeignKey",
"django.db.models.TextField",
"django.db.models.CharField"
] | [((205, 223), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (221, 223), False, 'from django.db import models\n'), ((235, 257), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (255, 257), False, 'from django.db import models\n'), ((276, 323), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'default': '"""undo"""'}), "(max_length=20, default='undo')\n", (292, 323), False, 'from django.db import models\n'), ((335, 358), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {}), '(User)\n', (352, 358), False, 'from django.db import models\n'), ((370, 393), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Room'], {}), '(Room)\n', (387, 393), False, 'from django.db import models\n'), ((561, 583), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (581, 583), False, 'from django.db import models\n'), ((600, 643), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (620, 643), False, 'from django.db import models\n'), ((655, 678), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {}), '(User)\n', (672, 678), False, 'from django.db import models\n'), ((842, 864), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (862, 864), False, 'from django.db import models\n'), ((876, 899), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Room'], {}), '(Room)\n', (893, 899), False, 'from django.db import models\n'), ((911, 957), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'null': '(True)', 'blank': '(True)'}), '(User, null=True, blank=True)\n', (928, 957), False, 'from django.db import models\n')] |
import pathlib
import pytest
from pypendency.parser.yaml import Parser
from pypendency.lexer import LarkRelationLexer
def test_read_yaml_node_length():
file = pathlib.Path(__file__).parent / "example.yml"
lexer = LarkRelationLexer()
p = Parser(lexer=lexer, folder=pathlib.Path(__file__).parent)
g = p.parse("example.yml")
length = len(g.nodes)
pytest.assume(length == 4)
| [
"pypendency.lexer.LarkRelationLexer",
"pytest.assume",
"pathlib.Path"
] | [((225, 244), 'pypendency.lexer.LarkRelationLexer', 'LarkRelationLexer', ([], {}), '()\n', (242, 244), False, 'from pypendency.lexer import LarkRelationLexer\n'), ((372, 398), 'pytest.assume', 'pytest.assume', (['(length == 4)'], {}), '(length == 4)\n', (385, 398), False, 'import pytest\n'), ((167, 189), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (179, 189), False, 'import pathlib\n'), ((280, 302), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (292, 302), False, 'import pathlib\n')] |
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The actuator interface and implementations.
"""
import json
import os
class Actuator(object):
def push(self, core, image, args):
"""Push an image to a target."""
raise NotImplementedError()
class ActuatorExtension(Actuator):
"""Actuator extension consisting of hook programs."""
def __init__(self, actuator_root_dir):
self.root = actuator_root_dir
self.name = os.path.basename(actuator_root_dir)
def push(self, core, image, args):
result = core.get_utils().run_hook(
self.root, 'push', core.get_source_directory(), *args,
input=json.dumps(image))
return result
| [
"json.dumps",
"os.path.basename"
] | [((989, 1024), 'os.path.basename', 'os.path.basename', (['actuator_root_dir'], {}), '(actuator_root_dir)\n', (1005, 1024), False, 'import os\n'), ((1194, 1211), 'json.dumps', 'json.dumps', (['image'], {}), '(image)\n', (1204, 1211), False, 'import json\n')] |
# linuxjournalarchiver - Some hacky code I wrote to archive the Linux Journal.
# Licensed under the BSD-3-Clause license.
from bs4 import BeautifulSoup
import requests
import re
import pathlib
# Download the download page.
print("Downloading magazine list...")
session = requests.session()
# Update the User Agent to curl, this is because the server-side code
# handles curl specially.
session.headers.update({"User-Agent": "curl/7.65.3"})
r = session.get("https://secure2.linuxjournal.com/pdf/dljdownload.php")
soup = BeautifulSoup(r.text, "lxml")
# Process all the download buttons.
for e in reversed(soup.find_all("div", class_ = "downloadbtn")):
# Some issues don't have certain file formats, skip these.
if e.get_text() == "N/A":
print("No link")
continue
# Certain downloadbtn div elements don't have a link, skip these.
try:
link = e.a.get("href")
except AttributeError:
print("Invalid element")
continue
# Download the magazine.
magr = session.get(link + '&action=spit', stream = True)
# Get the name and format it.
name = re.findall(r'filename=(.+)', magr.headers["Content-Disposition"])
name = name[0].strip('"')
# Special treatment for Supplemental issues.
if not "Supplement" in link:
# Get the date.
date = re.findall("....-..(?=\....)", name)[0]
year, month = date.split("-")
# Get the path.
dirpath = pathlib.Path(f"{year}/{month}")
dirpath.mkdir(parents = True, exist_ok = True)
magpath = dirpath / name
else:
# We don't have a date,
# so we use the "Supplement" folder as a fill in on suplemental issues.
dirpath = pathlib.Path(f"Supplement")
dirpath.mkdir(parents = True, exist_ok = True)
magpath = dirpath / name
# Don't download a magazine that we have already downloaded.
if magpath.exists():
print(f"{magpath} exists... skipping")
continue
# Debug printing.
print(f"Downloading {link} to {magpath}...")
# Save the data to a file.
with open(f"{magpath}", "wb") as f:
bytesdownloaded = 0
for chunk in magr.iter_content(chunk_size = 8192):
if chunk: # filter out keep-alive new chunks
bytesdownloaded+=len(chunk)
print(f"{name}: {bytesdownloaded}/{magr.headers['Content-Length']}")
f.write(chunk) | [
"bs4.BeautifulSoup",
"re.findall",
"requests.session",
"pathlib.Path"
] | [((272, 290), 'requests.session', 'requests.session', ([], {}), '()\n', (288, 290), False, 'import requests\n'), ((522, 551), 'bs4.BeautifulSoup', 'BeautifulSoup', (['r.text', '"""lxml"""'], {}), "(r.text, 'lxml')\n", (535, 551), False, 'from bs4 import BeautifulSoup\n'), ((1114, 1178), 're.findall', 're.findall', (['"""filename=(.+)"""', "magr.headers['Content-Disposition']"], {}), "('filename=(.+)', magr.headers['Content-Disposition'])\n", (1124, 1178), False, 'import re\n'), ((1453, 1484), 'pathlib.Path', 'pathlib.Path', (['f"""{year}/{month}"""'], {}), "(f'{year}/{month}')\n", (1465, 1484), False, 'import pathlib\n'), ((1713, 1740), 'pathlib.Path', 'pathlib.Path', (['f"""Supplement"""'], {}), "(f'Supplement')\n", (1725, 1740), False, 'import pathlib\n'), ((1332, 1369), 're.findall', 're.findall', (['"""....-..(?=\\\\....)"""', 'name'], {}), "('....-..(?=\\\\....)', name)\n", (1342, 1369), False, 'import re\n')] |
#! /usr/bin/env python3
"""
Pulls artifacts from external repo using branches defined in branchConfig.yaml
file.
Run the script with -h flag to learn about script's running options.
"""
__author__ = "<NAME>"
__copyright__ = "Copyright (C) 2018 ACK CYFRONET AGH"
__license__ = "This software is released under the MIT license cited in " \
"LICENSE.txt"
import os
import yaml
import argparse
import boto3
from paramiko import SSHClient, AutoAddPolicy
from pull_artifact import (download_artifact_safe,
download_specific_or_default,
s3_download_artifact_safe,
s3_download_specific_or_default)
BRANCH_CFG_PATH = 'branchConfig.yaml'
BAMBOO_BRANCH_NAME = 'bamboo_planRepository_branchName'
DEFAULT_BRANCH = 'default'
CURRENT_BRANCH = 'current_branch'
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Pull sources and images lists for branches specified in '
'branchConfig.yaml file.')
parser.add_argument(
'--hostname', '-hn',
help='Hostname of artifacts repository',
required=True)
parser.add_argument(
'--port', '-p',
type=int,
help='SSH port to connect to',
required=True)
parser.add_argument(
'--username', '-u',
help='The username to authenticate as',
required=True)
parser.add_argument(
'--s3-url',
help='The S3 endpoint URL',
default='https://storage.cloud.cyfronet.pl')
parser.add_argument(
'--s3-bucket',
help='The S3 bucket name',
default='bamboo-artifacts-2')
args = parser.parse_args()
if args.hostname != 'S3':
ssh = SSHClient()
ssh.set_missing_host_key_policy(AutoAddPolicy())
ssh.load_system_host_keys()
ssh.connect(args.hostname, port=args.port, username=args.username)
with open(BRANCH_CFG_PATH, 'r') as branch_cfg_file:
branch_cfg = yaml.load(branch_cfg_file)
default_branch = branch_cfg.get(DEFAULT_BRANCH)
for plan, branch in branch_cfg.get('branches').items():
if branch != CURRENT_BRANCH:
print('Getting artifact for plan {}\'s from branch {}'
.format(plan, branch))
exc_log = 'Branch {} in plan {} not found.'.format(branch,
plan)
download_artifact_safe(ssh, plan, branch, args.hostname,
args.port, args.username,
exc_handler=exit,
exc_handler_args=(1,),
exc_log=exc_log)
else:
download_specific_or_default(ssh, plan,
os.getenv(BAMBOO_BRANCH_NAME),
args.hostname, args.port,
args.username,
default_branch=default_branch)
ssh.close()
else:
s3_session = boto3.session.Session()
s3_res = s3_session.resource(
service_name='s3',
endpoint_url=args.s3_url
)
with open(BRANCH_CFG_PATH, 'r') as branch_cfg_file:
branch_cfg = yaml.load(branch_cfg_file)
default_branch = branch_cfg.get(DEFAULT_BRANCH)
for plan, branch in branch_cfg.get('branches').items():
if branch != CURRENT_BRANCH:
print('Getting artifact for plan {}\'s from branch {}'
.format(plan, branch))
exc_log = 'Branch {} in plan {} not found.'.format(branch,
plan)
s3_download_artifact_safe(s3_res, args.s3_bucket, plan, branch, args.hostname,
args.port, args.username,
exc_handler=exit,
exc_handler_args=(1,),
exc_log=exc_log)
else:
s3_download_specific_or_default(s3_res, args.s3_bucket, plan,
os.getenv(BAMBOO_BRANCH_NAME),
args.hostname, args.port,
args.username,
default_branch=default_branch)
if __name__ == '__main__':
main()
| [
"boto3.session.Session",
"pull_artifact.download_artifact_safe",
"argparse.ArgumentParser",
"paramiko.AutoAddPolicy",
"pull_artifact.s3_download_artifact_safe",
"os.getenv",
"yaml.load",
"paramiko.SSHClient"
] | [((872, 1061), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter', 'description': '"""Pull sources and images lists for branches specified in branchConfig.yaml file."""'}), "(formatter_class=argparse.\n ArgumentDefaultsHelpFormatter, description=\n 'Pull sources and images lists for branches specified in branchConfig.yaml file.'\n )\n", (895, 1061), False, 'import argparse\n'), ((1803, 1814), 'paramiko.SSHClient', 'SSHClient', ([], {}), '()\n', (1812, 1814), False, 'from paramiko import SSHClient, AutoAddPolicy\n'), ((3311, 3334), 'boto3.session.Session', 'boto3.session.Session', ([], {}), '()\n', (3332, 3334), False, 'import boto3\n'), ((1855, 1870), 'paramiko.AutoAddPolicy', 'AutoAddPolicy', ([], {}), '()\n', (1868, 1870), False, 'from paramiko import SSHClient, AutoAddPolicy\n'), ((2069, 2095), 'yaml.load', 'yaml.load', (['branch_cfg_file'], {}), '(branch_cfg_file)\n', (2078, 2095), False, 'import yaml\n'), ((3537, 3563), 'yaml.load', 'yaml.load', (['branch_cfg_file'], {}), '(branch_cfg_file)\n', (3546, 3563), False, 'import yaml\n'), ((2566, 2711), 'pull_artifact.download_artifact_safe', 'download_artifact_safe', (['ssh', 'plan', 'branch', 'args.hostname', 'args.port', 'args.username'], {'exc_handler': 'exit', 'exc_handler_args': '(1,)', 'exc_log': 'exc_log'}), '(ssh, plan, branch, args.hostname, args.port, args.\n username, exc_handler=exit, exc_handler_args=(1,), exc_log=exc_log)\n', (2588, 2711), False, 'from pull_artifact import download_artifact_safe, download_specific_or_default, s3_download_artifact_safe, s3_download_specific_or_default\n'), ((4038, 4210), 'pull_artifact.s3_download_artifact_safe', 's3_download_artifact_safe', (['s3_res', 'args.s3_bucket', 'plan', 'branch', 'args.hostname', 'args.port', 'args.username'], {'exc_handler': 'exit', 'exc_handler_args': '(1,)', 'exc_log': 'exc_log'}), '(s3_res, args.s3_bucket, plan, branch, args.\n hostname, args.port, args.username, exc_handler=exit, exc_handler_args=\n (1,), exc_log=exc_log)\n', (4063, 4210), False, 'from pull_artifact import download_artifact_safe, download_specific_or_default, s3_download_artifact_safe, s3_download_specific_or_default\n'), ((3010, 3039), 'os.getenv', 'os.getenv', (['BAMBOO_BRANCH_NAME'], {}), '(BAMBOO_BRANCH_NAME)\n', (3019, 3039), False, 'import os\n'), ((4541, 4570), 'os.getenv', 'os.getenv', (['BAMBOO_BRANCH_NAME'], {}), '(BAMBOO_BRANCH_NAME)\n', (4550, 4570), False, 'import os\n')] |
import os
import shutil
from distutils.dir_util import copy_tree
from setuptools import find_packages, setup
# global variables
nb_dir = os.environ['PYNQ_JUPYTER_NOTEBOOKS']
package_name = 'pystrath_rfsoc'
pip_name = 'pystrath-rfsoc'
data_files = []
# copy common notebooks to jupyter home
def copy_common_notebooks():
src_dir = os.path.join(f'common')
dst_dir = os.path.join(nb_dir, 'rfsoc-notebooks')
if os.path.exists(dst_dir):
shutil.rmtree(dst_dir)
copy_tree(src_dir, dst_dir)
copy_common_notebooks()
setup(
name=package_name,
version='0.2.0',
install_requires=[
'plotly==5.1.0',
'pynq==2.7'
],
author="<NAME>",
packages=find_packages(),
package_data={
'': data_files,
},
description="A collection of RFSoC introductory notebooks for PYNQ.")
| [
"os.path.exists",
"distutils.dir_util.copy_tree",
"setuptools.find_packages",
"os.path.join",
"shutil.rmtree"
] | [((336, 359), 'os.path.join', 'os.path.join', (['f"""common"""'], {}), "(f'common')\n", (348, 359), False, 'import os\n'), ((374, 413), 'os.path.join', 'os.path.join', (['nb_dir', '"""rfsoc-notebooks"""'], {}), "(nb_dir, 'rfsoc-notebooks')\n", (386, 413), False, 'import os\n'), ((421, 444), 'os.path.exists', 'os.path.exists', (['dst_dir'], {}), '(dst_dir)\n', (435, 444), False, 'import os\n'), ((481, 508), 'distutils.dir_util.copy_tree', 'copy_tree', (['src_dir', 'dst_dir'], {}), '(src_dir, dst_dir)\n', (490, 508), False, 'from distutils.dir_util import copy_tree\n'), ((454, 476), 'shutil.rmtree', 'shutil.rmtree', (['dst_dir'], {}), '(dst_dir)\n', (467, 476), False, 'import shutil\n'), ((695, 710), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (708, 710), False, 'from setuptools import find_packages, setup\n')] |
"""Tests of querying tools."""
import contextlib
import importlib
import io
import logging
import os
import pathlib
import sys
import tempfile
import unittest
from version_query.version import VersionComponent, Version
from version_query.git_query import query_git_repo, predict_git_repo
from version_query.py_query import query_metadata_json, query_pkg_info, query_package_folder
from version_query.query import \
query_folder, query_caller, query_version_str, predict_caller, predict_version_str
from .examples import \
PY_LIB_DIR, GIT_REPO_EXAMPLES, METADATA_JSON_EXAMPLE_PATHS, PKG_INFO_EXAMPLE_PATHS, \
PACKAGE_FOLDER_EXAMPLES
from .test_setup import run_module
_LOG = logging.getLogger(__name__)
class Tests(unittest.TestCase):
def test_deprecated(self):
import warnings
warnings.warn('remove this test after removing deprecated function', DeprecationWarning)
from version_query import generate_version_str
with self.assertWarns(DeprecationWarning):
version_str = generate_version_str()
self.assertIsInstance(version_str, str)
def _check_examples_count(self, description, examples):
lvl = logging.WARNING if len(examples) < 10 else logging.INFO
_LOG.log(lvl, '%s count: %i', description, len(examples))
if len(examples) < 5:
_LOG.warning('%s list: %s', description, examples)
self.assertGreater(len(examples), 0)
def test_example_count_checking(self):
_LOG.warning('%s', PY_LIB_DIR)
with self.assertRaises(AssertionError):
self._check_examples_count('test', [])
self._check_examples_count('test', list(range(1)))
self._check_examples_count('test', list(range(9)))
self._check_examples_count('test', list(range(10)))
def _query_test_case(self, paths, query_function):
for path in paths:
with self.subTest(path=path, query_function=query_function):
_LOG.debug('testing %s() on %s', query_function.__name__, path)
try:
version = query_function(path)
_LOG.debug('%s: %s', path, version)
except ValueError:
_LOG.info('failed to get version from %s', path, exc_info=True)
def test_query_git_repo(self):
self._check_examples_count('git repo', GIT_REPO_EXAMPLES)
self._query_test_case(GIT_REPO_EXAMPLES, query_git_repo)
def test_predict_caller_bad(self):
with tempfile.TemporaryDirectory() as project_path_str:
with tempfile.NamedTemporaryFile(suffix='.py', dir=project_path_str,
delete=False) as project_file:
project_file_path = pathlib.Path(project_file.name)
with project_file_path.open('a') as project_file:
project_file.write('from version_query.query import predict_caller\n\n\n'
'def caller():\n predict_caller()\n\n\ncaller()\n')
sys.path.insert(0, project_path_str)
_LOG.warning('inserted %s to sys.path', project_path_str)
print(project_file_path)
print(project_path_str)
with self.assertRaises(ValueError):
importlib.import_module(project_file_path.with_suffix('').name)
sys.path.remove(project_path_str)
_LOG.warning('removed %s from sys.path', project_path_str)
project_file_path.unlink()
def test_predict_git_repo(self):
self._query_test_case(GIT_REPO_EXAMPLES, predict_git_repo)
@unittest.skipIf(not METADATA_JSON_EXAMPLE_PATHS, 'no "metadata.json" files found')
def test_query_metadata_json(self):
self._check_examples_count('metadata.json', METADATA_JSON_EXAMPLE_PATHS)
self._query_test_case(METADATA_JSON_EXAMPLE_PATHS, query_metadata_json)
@unittest.skipIf(not PKG_INFO_EXAMPLE_PATHS, 'no "PKG-INFO" files found')
def test_query_pkg_info(self):
self._check_examples_count('PKG-INFO', PKG_INFO_EXAMPLE_PATHS)
self._query_test_case(PKG_INFO_EXAMPLE_PATHS, query_pkg_info)
@unittest.skipUnless(os.environ.get('TEST_PACKAGING'), 'skipping packaging test')
def test_query_pkg_info_current(self):
run_module('setup', 'build')
paths = list(pathlib.Path.cwd().glob('*.egg-info/PKG-INFO'))
self.assertEqual(len(paths), 1)
path = paths[0]
version = query_pkg_info(path)
_LOG.debug('%s: %s', path, version)
def test_query_pkg_info_bad(self):
with tempfile.NamedTemporaryFile(delete=False) as bad_file:
bad_file_path = pathlib.Path(bad_file.name)
with self.assertRaises(ValueError):
query_pkg_info(bad_file_path)
with bad_file_path.open('a') as bad_file:
bad_file.write('blah blah blah')
with self.assertRaises(ValueError):
query_pkg_info(bad_file_path)
with bad_file_path.open('a') as bad_file:
bad_file.write('Version: hello world')
with self.assertRaises(ValueError):
query_pkg_info(bad_file_path)
bad_file_path.unlink()
def test_query_package_folder(self):
self._check_examples_count('package folder', PACKAGE_FOLDER_EXAMPLES)
self._query_test_case(PACKAGE_FOLDER_EXAMPLES, query_package_folder)
@unittest.skipUnless(os.environ.get('TEST_PACKAGING'), 'skipping packaging test')
def test_query_package_folder_current(self):
run_module('setup', 'build')
path = pathlib.Path.cwd().joinpath('version_query')
version = query_package_folder(path)
_LOG.debug('%s: %s', path, version)
self.assertIsInstance(version, Version)
def test_query_folder(self):
self._query_test_case(PACKAGE_FOLDER_EXAMPLES, query_folder)
def test_query_folder_current(self):
path = pathlib.Path.cwd()
version = query_folder(path)
_LOG.debug('%s: %s', path, version)
self.assertIsInstance(version, Version)
def test_query_caller(self):
version = query_caller()
_LOG.debug('caller: %s', version)
self.assertIsInstance(version, Version)
def test_not_as_main(self):
run_module('version_query', run_name=None)
def test_help(self):
sio = io.StringIO()
with contextlib.redirect_stderr(sio):
with self.assertRaises(SystemExit):
run_module('version_query')
_LOG.info('%s', sio.getvalue())
def test_bad_usage(self):
sio = io.StringIO()
with contextlib.redirect_stderr(sio):
with self.assertRaises(ValueError):
run_module('version_query', '-p', '-i', '.')
_LOG.info('%s', sio.getvalue())
def test_here(self):
sio = io.StringIO()
with contextlib.redirect_stdout(sio):
run_module('version_query', '.')
self.assertEqual(sio.getvalue().rstrip(), query_caller().to_str())
self.assertEqual(sio.getvalue().rstrip(), query_version_str())
def test_increment_here(self):
sio = io.StringIO()
with contextlib.redirect_stdout(sio):
run_module('version_query', '-i', '.')
self.assertEqual(sio.getvalue().rstrip(),
query_caller().increment(VersionComponent.Patch).to_str())
def test_predict_here(self):
sio = io.StringIO()
with contextlib.redirect_stdout(sio):
run_module('version_query', '-p', '.')
self.assertEqual(sio.getvalue().rstrip(), predict_caller().to_str())
self.assertEqual(sio.getvalue().rstrip(), predict_version_str())
| [
"logging.getLogger",
"sys.path.insert",
"version_query.query.predict_version_str",
"unittest.skipIf",
"contextlib.redirect_stderr",
"pathlib.Path",
"version_query.py_query.query_pkg_info",
"sys.path.remove",
"tempfile.NamedTemporaryFile",
"warnings.warn",
"io.StringIO",
"contextlib.redirect_st... | [((689, 716), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (706, 716), False, 'import logging\n'), ((3611, 3697), 'unittest.skipIf', 'unittest.skipIf', (['(not METADATA_JSON_EXAMPLE_PATHS)', '"""no "metadata.json" files found"""'], {}), '(not METADATA_JSON_EXAMPLE_PATHS,\n \'no "metadata.json" files found\')\n', (3626, 3697), False, 'import unittest\n'), ((3901, 3973), 'unittest.skipIf', 'unittest.skipIf', (['(not PKG_INFO_EXAMPLE_PATHS)', '"""no "PKG-INFO" files found"""'], {}), '(not PKG_INFO_EXAMPLE_PATHS, \'no "PKG-INFO" files found\')\n', (3916, 3973), False, 'import unittest\n'), ((815, 907), 'warnings.warn', 'warnings.warn', (['"""remove this test after removing deprecated function"""', 'DeprecationWarning'], {}), "('remove this test after removing deprecated function',\n DeprecationWarning)\n", (828, 907), False, 'import warnings\n'), ((4468, 4488), 'version_query.py_query.query_pkg_info', 'query_pkg_info', (['path'], {}), '(path)\n', (4482, 4488), False, 'from version_query.py_query import query_metadata_json, query_pkg_info, query_package_folder\n'), ((4176, 4208), 'os.environ.get', 'os.environ.get', (['"""TEST_PACKAGING"""'], {}), "('TEST_PACKAGING')\n", (4190, 4208), False, 'import os\n'), ((5633, 5659), 'version_query.py_query.query_package_folder', 'query_package_folder', (['path'], {}), '(path)\n', (5653, 5659), False, 'from version_query.py_query import query_metadata_json, query_pkg_info, query_package_folder\n'), ((5408, 5440), 'os.environ.get', 'os.environ.get', (['"""TEST_PACKAGING"""'], {}), "('TEST_PACKAGING')\n", (5422, 5440), False, 'import os\n'), ((5912, 5930), 'pathlib.Path.cwd', 'pathlib.Path.cwd', ([], {}), '()\n', (5928, 5930), False, 'import pathlib\n'), ((5949, 5967), 'version_query.query.query_folder', 'query_folder', (['path'], {}), '(path)\n', (5961, 5967), False, 'from version_query.query import query_folder, query_caller, query_version_str, predict_caller, predict_version_str\n'), ((6112, 6126), 'version_query.query.query_caller', 'query_caller', ([], {}), '()\n', (6124, 6126), False, 'from version_query.query import query_folder, query_caller, query_version_str, predict_caller, predict_version_str\n'), ((6341, 6354), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (6352, 6354), False, 'import io\n'), ((6578, 6591), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (6589, 6591), False, 'import io\n'), ((6827, 6840), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (6838, 6840), False, 'import io\n'), ((7128, 7141), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (7139, 7141), False, 'import io\n'), ((7421, 7434), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (7432, 7434), False, 'import io\n'), ((1036, 1058), 'version_query.generate_version_str', 'generate_version_str', ([], {}), '()\n', (1056, 1058), False, 'from version_query import generate_version_str\n'), ((2506, 2535), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (2533, 2535), False, 'import tempfile\n'), ((3036, 3072), 'sys.path.insert', 'sys.path.insert', (['(0)', 'project_path_str'], {}), '(0, project_path_str)\n', (3051, 3072), False, 'import sys\n'), ((3356, 3389), 'sys.path.remove', 'sys.path.remove', (['project_path_str'], {}), '(project_path_str)\n', (3371, 3389), False, 'import sys\n'), ((4586, 4627), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(False)'}), '(delete=False)\n', (4613, 4627), False, 'import tempfile\n'), ((4669, 4696), 'pathlib.Path', 'pathlib.Path', (['bad_file.name'], {}), '(bad_file.name)\n', (4681, 4696), False, 'import pathlib\n'), ((4753, 4782), 'version_query.py_query.query_pkg_info', 'query_pkg_info', (['bad_file_path'], {}), '(bad_file_path)\n', (4767, 4782), False, 'from version_query.py_query import query_metadata_json, query_pkg_info, query_package_folder\n'), ((4935, 4964), 'version_query.py_query.query_pkg_info', 'query_pkg_info', (['bad_file_path'], {}), '(bad_file_path)\n', (4949, 4964), False, 'from version_query.py_query import query_metadata_json, query_pkg_info, query_package_folder\n'), ((5123, 5152), 'version_query.py_query.query_pkg_info', 'query_pkg_info', (['bad_file_path'], {}), '(bad_file_path)\n', (5137, 5152), False, 'from version_query.py_query import query_metadata_json, query_pkg_info, query_package_folder\n'), ((6368, 6399), 'contextlib.redirect_stderr', 'contextlib.redirect_stderr', (['sio'], {}), '(sio)\n', (6394, 6399), False, 'import contextlib\n'), ((6605, 6636), 'contextlib.redirect_stderr', 'contextlib.redirect_stderr', (['sio'], {}), '(sio)\n', (6631, 6636), False, 'import contextlib\n'), ((6854, 6885), 'contextlib.redirect_stdout', 'contextlib.redirect_stdout', (['sio'], {}), '(sio)\n', (6880, 6885), False, 'import contextlib\n'), ((7057, 7076), 'version_query.query.query_version_str', 'query_version_str', ([], {}), '()\n', (7074, 7076), False, 'from version_query.query import query_folder, query_caller, query_version_str, predict_caller, predict_version_str\n'), ((7155, 7186), 'contextlib.redirect_stdout', 'contextlib.redirect_stdout', (['sio'], {}), '(sio)\n', (7181, 7186), False, 'import contextlib\n'), ((7448, 7479), 'contextlib.redirect_stdout', 'contextlib.redirect_stdout', (['sio'], {}), '(sio)\n', (7474, 7479), False, 'import contextlib\n'), ((7659, 7680), 'version_query.query.predict_version_str', 'predict_version_str', ([], {}), '()\n', (7678, 7680), False, 'from version_query.query import query_folder, query_caller, query_version_str, predict_caller, predict_version_str\n'), ((2574, 2651), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".py"""', 'dir': 'project_path_str', 'delete': '(False)'}), "(suffix='.py', dir=project_path_str, delete=False)\n", (2601, 2651), False, 'import tempfile\n'), ((2750, 2781), 'pathlib.Path', 'pathlib.Path', (['project_file.name'], {}), '(project_file.name)\n', (2762, 2781), False, 'import pathlib\n'), ((5570, 5588), 'pathlib.Path.cwd', 'pathlib.Path.cwd', ([], {}), '()\n', (5586, 5588), False, 'import pathlib\n'), ((4338, 4356), 'pathlib.Path.cwd', 'pathlib.Path.cwd', ([], {}), '()\n', (4354, 4356), False, 'import pathlib\n'), ((6982, 6996), 'version_query.query.query_caller', 'query_caller', ([], {}), '()\n', (6994, 6996), False, 'from version_query.query import query_folder, query_caller, query_version_str, predict_caller, predict_version_str\n'), ((7582, 7598), 'version_query.query.predict_caller', 'predict_caller', ([], {}), '()\n', (7596, 7598), False, 'from version_query.query import query_folder, query_caller, query_version_str, predict_caller, predict_version_str\n'), ((7314, 7328), 'version_query.query.query_caller', 'query_caller', ([], {}), '()\n', (7326, 7328), False, 'from version_query.query import query_folder, query_caller, query_version_str, predict_caller, predict_version_str\n')] |
import argparse
import getpass
import glob
import hashlib
import itertools
import json
import logging
import os
import sys
import threading
import time
import traceback
import subprocess
import urllib
import urllib.request
import urllib.parse
import urwid
import pybtex
import pybtex.database
class BibEntry:
class SearchPanelWidgetImpl(urwid.AttrMap):
def __init__(self, entry):
super().__init__(urwid.SolidFill(), None)
self.entry = entry
self.title = urwid.AttrMap(urwid.Text(entry.title), 'title')
self.info = urwid.Text([('author', f"{entry.abbrev_authors}"),
('delim', ". "),
('venue', f"{entry.venue}"),
('delim', ", "),
('year', f"{entry.year}"),
('delim', ".")])
self.mark = urwid.AttrMap(urwid.Text(('mark_none', "[M]"), align='right'), None)
self.source = urwid.Text([('source', f"{entry.source}"),
('delim', "::"),
('bibkey', f"{entry.bibkey}")])
self.original_widget = urwid.Pile([
urwid.AttrMap(urwid.Columns([('weight', 1, self.title),
('pack', self.mark)],
dividechars=1),
'title'),
self.info, self.source])
self.set_focus_map({k: ('plain' if k is None else str(k)) + '+' for k in [
'title', 'author', 'delim', 'venue', 'year', 'source',
'bibkey', 'mark_none', 'mark_selected', 'title_delim',
'bibtex_ready', 'bibtex_fetching', None]})
def selectable(self):
return True
def keypress(self, size, key):
if key == ' ':
self.entry.repo.selected_keys_panel.Toggle(self.entry)
self.entry.OnSelectionHandler()
elif key == 'i':
self.entry.repo.details_panel.original_widget = self.entry.details_widget
elif key == '@':
self.entry.OpenInBrowser()
else:
return key
def __init__(self, source, repo):
self.repo = repo
self._source = source
self._search_panel_widget = None
self._mark = None
@property
def authors(self): return NotImplemented
@property
def title(self): return NotImplemented
@property
def year(self): return NotImplemented
@property
def venue(self): return NotImplemented
@property
def bibkey(self): return NotImplemented
@property
def url(self): return NotImplemented
@property
def abbrev_authors(self):
authors = self.authors
if len(authors) == 1:
return f"{authors[0]}"
else:
return f"{authors[0]} et al"
@property
def pyb_entry(self): return NotImplemented
@property
def details_widget(self): return NotImplemented
@property
def source(self):
return self._source
def Match(self, keywords):
trivial = True
for keyword in filter(lambda k: len(k) >= 3, keywords):
trivial = False
if keyword.upper() in self.unique_key.upper():
continue
if keyword.upper() in self.title.upper():
continue
matched = False
for author in self.authors:
if keyword.upper() in author.upper():
matched = True
break
if not matched: return False
return not trivial
@property
def search_panel_widget(self):
self._InitializeSearchPanelWidget()
return self._search_panel_widget
@property
def mark(self):
return self._mark
@mark.setter
def mark(self, value):
self._InitializeSearchPanelWidget()
self._mark = value
if value is None:
self._search_panel_widget.mark.original_widget.set_text(
[('title_delim', "["), ('mark_none', " "), ('title_delim', "]")])
elif value == 'selected':
self._search_panel_widget.mark.original_widget.set_text(
[('title_delim', "["), ('mark_selected', "X"), ('title_delim', "]")])
else:
raise ValueError(f"Invalid mark: {mark}")
@property
def unique_key(self):
return f"{self.source}::{self.bibkey}"
@property
def unique_key_item(self):
return urwid.Text([('selected_key', self.bibkey), ('selected_hint', f"({self.source})")])
def OnSelectionHandler(self): pass
def OpenInBrowser(self):
if self.url is None:
self.repo.message_bar.Post("Could not infer url of this entry.",
"warning", 1)
return
status = subprocess.run(["python3", "-m", "webbrowser", "-t", self.url],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
if status.returncode == 0:
self.repo.message_bar.Post(f"Opened url '{self.url}'.", 'normal', 1)
else:
self.repo.message_bar.Post(
f"Error occured when opening url '{self.url}' (code {status.returncode})",
'error', 1)
def _InitializeSearchPanelWidget(self):
if self._search_panel_widget is None:
self._search_panel_widget = BibEntry.SearchPanelWidgetImpl(self)
class DblpEntry(BibEntry):
class DetailsWidgetImpl(urwid.Pile):
def __init__(self, entry):
super().__init__([])
self._entry = entry
self.key_item = urwid.Columns([('pack', urwid.Text(('detail_key', "bibtex key: "))),
('weight', 1, urwid.Text(('detail_value', entry.bibkey)))])
self.source_item = urwid.Columns([('pack', urwid.Text(('detail_key', "source: "))),
('weight', 1, urwid.Text(('detail_value', entry.source)))])
self.person_items = urwid.Pile([
urwid.Columns([('pack', urwid.Text(('detail_key', f"{k.lower()}: "))),
('weight', 1, urwid.Text(('detail_value', '\n'.join(entry.data['info']['authors'][k]))))])
for k in entry.data['info']['authors'].keys()
])
self.info_items = urwid.Pile([
urwid.Columns([('pack', urwid.Text(('detail_key', f"{k.lower()}: "))),
('weight', 1, urwid.Text(('detail_value', f"{entry.data['info'][k]}")))])
for k in entry.data['info'].keys() if k != 'authors'
])
self.contents = [(self.key_item, ('pack', None)),
(self.source_item, ('pack', None)),
(self.person_items, ('pack', None)),
(self.info_items, ('pack', None)),
(urwid.SolidFill(), ('weight', 1))]
@property
def entry(self):
return self._entry
def __init__(self, dblp_entry, repo):
super().__init__('dblp.org', repo)
self.data = dblp_entry
self._details_widget = None
self._bibkey = None
self._redraw_fd = None
self.pybtex_entry = None
self.bibtex_loading_done = threading.Event()
self.bibtex_loading_thread = threading.Thread(
name=f"bibtex-{self.bibkey}",
target=self._LoadPybtexEntry,
daemon=False)
def __del__(self):
if self._redraw_fd is not None:
os.close(self._redraw_fd)
@property
def pyb_entry(self):
self.bibtex_loading_done.wait()
return self.pybtex_entry
@property
def authors(self):
try:
authors = self.data['info']['authors']['author']
if authors: return authors
else: return ["Unknown"]
except: return ["Unknown"]
@property
def title(self):
try: return str(self.data['info']['title'])
except: return "Unknown"
@property
def year(self):
try: return str(self.data['info']['year'])
except: return "Unknown"
@property
def venue(self):
try: return self.data['info']['venue']
except: return "Unknown"
@property
def bibkey(self):
if self._bibkey is None:
flat_key = self.data['info']['key']
base = flat_key.split('/')[-1]
sha1 = hashlib.sha1(flat_key.encode('utf-8')).hexdigest()
self._bibkey = f"{base}:{sha1[:4].upper()}"
return self._bibkey
@property
def url(self):
try: return self.data['info']['ee']
except: return None
@property
def details_widget(self):
self._InitializeDetailsWidget()
return self._details_widget
def OnSelectionHandler(self):
if self._redraw_fd is None:
event_loop = self.repo.event_loop
self._redraw_fd = event_loop.watch_pipe(self._FdWriteHandler)
self.bibtex_loading_thread.start()
def _FdWriteHandler(self, data):
self.repo.event_loop.draw_screen()
def _InitializeDetailsWidget(self):
if self._details_widget is None:
self._details_widget = DblpEntry.DetailsWidgetImpl(self)
def _LoadPybtexEntry(self):
bib_url = f"https://dblp.org/rec/bib2/{self.data['info']['key']}.bib"
try:
if self.search_panel_widget is not None:
self.search_panel_widget.source.set_text([
('source', f"{self.source}"),
('delim', "::"),
('bibkey', f"{self.bibkey}"),
('bibtex_fetching', " (fetching bibtex)")])
os.write(self._redraw_fd, b"?")
with urllib.request.urlopen(bib_url) as remote:
bib_text = remote.read().decode('utf-8')
pyb_db = pybtex.database.parse_string(bib_text, 'bibtex')
self.pybtex_entry = pyb_db.entries[f"DBLP:{self.data['info']['key']}"]
if self.search_panel_widget is not None:
self.search_panel_widget.source.set_text([
('source', f"{self.source}"),
('delim', "::"),
('bibkey', f"{self.bibkey}"),
('bibtex_ready', " (bibtex ready)")])
os.write(self._redraw_fd, b"?")
except Exception as e:
logging.error(f"Error when fetching bibtex entry from DBLP: Entry: {self.data} {traceback.format_exc()}")
self.bibtex_loading_done.set()
class BibtexEntry(BibEntry):
class DetailsWidgetImpl(urwid.Pile):
def __init__(self, entry):
super().__init__([])
self.entry = entry
self.key = urwid.Columns([
('pack', urwid.Text(('detail_key', "citation key: "))),
('weight', 1, urwid.Text(('detail_value', entry.bibkey)))])
self.source = urwid.Columns([
('pack', urwid.Text(('detail_key', "source: "))),
('weight', 1, urwid.Text(('detail_value', entry.source)))])
self.item_type = urwid.Columns([
('pack', urwid.Text(('detail_key', "type: "))),
('weight', 1, urwid.Text(('detail_value', entry.entry.type)))])
self.persons = urwid.Pile([
urwid.Columns([('pack', urwid.Text(('detail_key', f"{k.lower()}: "))),
('weight', 1, urwid.Text(('detail_value', '\n'.join([str(p) for p in entry.entry.persons[k]]))))])
for k in entry.entry.persons.keys()
])
self.info = urwid.Pile([
urwid.Columns([('pack', urwid.Text(('detail_key', f"{k.lower()}: "))),
('weight', 1, urwid.Text(('detail_value', f"{entry.entry.fields[k]}")))])
for k in entry.entry.fields.keys() if entry.entry.fields[k]
])
self.contents = [(self.key, ('pack', None)),
(self.source, ('pack', None)),
(self.item_type, ('pack', None)),
(self.persons, ('pack', None)),
(self.info, ('pack', None)),
(urwid.SolidFill(), ('weight', 1))]
def __init__(self, key, entry, repo, source):
super().__init__(source, repo)
self._bibkey = key
self.entry = entry
self._details_widget = None
@property
def authors(self):
try: return [str(au) for au in self.entry.persons['author']]
except: return ["Unknown"]
@property
def title(self):
try: return self.entry.fields['title']
except: return "Unknown"
@property
def year(self):
try: return self.entry.fields['year']
except: return "Unknown"
@property
def venue(self):
try:
if 'booktitle' in self.entry.fields:
return self.entry.fields['booktitle']
elif 'journal' in self.entry.fields:
return self.entry.fields['journal']
elif 'publisher' in self.entry.fields:
return f"Publisher: {self.entry.fields['publisher']}"
except: return "Unknown"
@property
def bibkey(self):
return self._bibkey
@property
def url(self):
try: return self.entry.fields['url']
except: return None
@property
def pyb_entry(self):
return self.entry
@property
def details_widget(self):
self._InitializeDetailsWidget()
return self._details_widget
def _InitializeDetailsWidget(self):
if self._details_widget is None:
self._details_widget = BibtexEntry.DetailsWidgetImpl(self)
class BibRepo:
@staticmethod
def Create(config, access, event_loop):
enabled = config.get('enabled', True)
if 'remote' in config:
return DblpRepo(event_loop, enabled)
elif 'glob' in config:
ctor = {'ro': BibtexRepo, 'rw': OutputBibtexRepo}[access]
return ctor(config['glob'], event_loop, enabled)
else:
raise ValueError(f"Invalid config: {config}")
class StatusIndicatorWidgetImpl(urwid.AttrMap):
def __init__(self, repo):
super().__init__(urwid.SolidFill(), None)
self.repo = repo
self._status = None
self.label = urwid.AttrMap(urwid.Text(f"{repo.source}"), "db_label")
self.access = urwid.Text("")
self.status_indicator = urwid.AttrMap(urwid.Text(""), "db_label")
self.original_widget = urwid.Columns([('pack', self.repo._short_label),
('pack', self.repo._enabled_mark),
('weight', 1, self.label),
('pack', self.status_indicator),
('pack', self.access)],
dividechars=1)
@property
def status(self):
return self._status
@status.setter
def status(self, value):
with self.repo.redraw_lock:
self._status = value
if value == 'initialized':
self.status_indicator.original_widget.set_text("initialized")
elif value == 'loading':
self.status_indicator.set_attr_map({None: "db_status_loading"})
self.status_indicator.original_widget.set_text("loading")
elif value == 'searching':
self.status_indicator.set_attr_map({None: "db_status_searching"})
self.status_indicator.original_widget.set_text("searching")
elif value == 'ready':
self.status_indicator.set_attr_map({None: "db_status_ready"})
self.status_indicator.original_widget.set_text("ready")
elif value == 'no file':
self.status_indicator.set_attr_map({None: "db_status_error"})
self.status_indicator.original_widget.set_text("no file")
else:
raise LookupError(f"Invalid status: {status}")
def __init__(self, source, event_loop, enabled):
self.source = source
self.redraw_lock = threading.Lock()
self.event_loop = event_loop
self._redraw_fd = event_loop.watch_pipe(self._FdWriteHandler)
self.serial = 0
self._serial_lock = threading.Lock()
self.search_results_panel = None
self.message_bar = None
self.selected_entries_panel = None
self.details_panel = None
self.loading_done = threading.Event()
self.searching_done = threading.Event()
self.loading_thread = threading.Thread(name=f"load-{self.source}",
target=self.LoadingThreadWrapper,
daemon=True)
self.searching_thread = threading.Thread(name=f"search-{self.source}",
target=self.SearchingThreadWrapper,
daemon=True)
self._short_label = urwid.Text("?")
self._enabled_mark = urwid.Text("")
self.enabled = enabled
self._status_indicator_widget = BibRepo.StatusIndicatorWidgetImpl(self)
self.access_type = 'ro'
self.status = "initialized"
self.loading_thread.start()
self.searching_thread.start()
def __del__(self):
os.close(self._redraw_fd)
@property
def short_label(self):
return self._short_label.get_text()
@short_label.setter
def short_label(self, value):
self._short_label.set_text(value)
@property
def access_type(self):
return self._access_type
@access_type.setter
def access_type(self, value):
if value == 'ro':
self._access_type = 'ro'
self._status_indicator_widget.access.set_text(('db_ro', "ro"))
elif value == 'rw':
self._access_type = 'rw'
self._status_indicator_widget.access.set_text(('db_rw', "rw"))
else:
raise ValueError(f"Invalid access info: {value}")
@property
def enabled(self):
return self._enabled
@enabled.setter
def enabled(self, value):
self._enabled = value
if self._enabled:
self._enabled_mark.set_text(["[", ('db_enabled', "X"), "]"])
else:
self._enabled_mark.set_text("[ ]")
@property
def status(self):
return self._status_indicator_widget.status
@status.setter
def status(self, value):
self._status_indicator_widget.status = value
@property
def status_indicator_widget(self):
return self._status_indicator_widget
def Search(self, search_text, serial):
self.search_text = search_text
with self._serial_lock:
self.serial = serial
self.searching_done.set()
def LoadingThreadWrapper(self):
self.status = "loading"
self.Redraw()
status = self.LoadingThreadMain()
self.status = status
self.Redraw()
self.loading_done.set()
def LoadingThreadMain(self):
return NotImplemented
def SearchingThreadWrapper(self):
self.loading_done.wait()
if self.status == 'no file':
return
while True:
self.searching_done.wait()
with self._serial_lock:
serial = self.serial
self.status = "searching"
self.Redraw()
try:
for item in self.SearchingThreadMain(self.search_text):
if self.selected_entries_panel is not None and \
item.bibkey in self.selected_entries_panel.entries.keys():
item.mark = 'selected'
else:
item.mark = None
if self.search_results_panel is not None:
self.search_results_panel.Add(item, serial)
except Exception as e:
logging.error(traceback.format_exc())
self.status = "ready"
self.Redraw()
with self._serial_lock:
if self.serial == serial:
self.searching_done.clear()
def Redraw(self):
with self.redraw_lock:
try:
os.write(self._redraw_fd, b"?")
except:
logging.error(traceback.format_exc())
def _FdWriteHandler(self, data):
self.event_loop.draw_screen()
class BibtexRepo(BibRepo):
def __init__(self, glob_expr, event_loop, enabled):
super().__init__(glob_expr, event_loop, enabled)
self._bib_files = []
self._bib_entries = []
@property
def bib_entries(self):
self.loading_done.wait()
return self._bib_entries
@property
def bib_files(self):
self.loading_done.wait()
return self._bib_files
def LoadingThreadMain(self):
glob_expr = self.source
logging.debug(f"Collecting entries from glob expression '{glob_expr}'")
self._bib_files = glob.glob(glob_expr, recursive=True)
if not self._bib_files:
logging.warning(f"Glob expr '{glob_expr}' matches no target")
if self.message_bar is not None:
self.message_bar.Post(f"Glob expr '{glob_expr}' matches no target.",
'warning')
return 'no file'
for path in self._bib_files:
try:
bib_data = pybtex.database.parse_file(path)
except Exception as e:
logging.error(f"Exception raised when parsing file {path}: {e}")
continue
for key, entry in iter(bib_data.entries.items()):
self._bib_entries.append(BibtexEntry(key, entry, self, path))
logging.debug(f"Parsed {len(bib_data.entries)} entries from file {path}")
return 'ready'
def SearchingThreadMain(self, search_text):
stripped = search_text.strip()
if not stripped:
return
keywords = search_text.split()
for entry in self.bib_entries:
if entry.Match(keywords):
yield entry
class OutputBibtexRepo(BibtexRepo):
def __init__(self, glob_expr, event_loop, enabled):
super().__init__(glob_expr, event_loop, enabled)
self.selected_keys_panel = None
if len(self.bib_files) > 1:
raise ValueError(f"Glob expr '{glob_expr}' matches more than one file")
self.access_type = 'rw'
self.output_file = self.bib_files[0] if self.bib_files else glob_expr
def Write(self):
if self.selected_keys_panel is None:
return
self.loading_done.wait()
entries = {e.bibkey: e.pyb_entry for e in self.bib_entries}
entries.update({e.bibkey: e.pyb_entry for e in self.selected_keys_panel.entries.values()})
for key, entry in entries.items():
if entry is None:
logging.error(f"Key {key} has empty entry. Not writing to file.")
return
pybtex.database.BibliographyData(entries).to_file(self.output_file)
logging.info(f"Wrote to file '{self.output_file}'")
class DblpRepo(BibRepo):
def __init__(self, event_loop, enabled):
super().__init__("https://dblp.org", event_loop, enabled)
def LoadingThreadMain(self):
return 'ready'
def SearchingThreadMain(self, search_text):
stripped = search_text.strip()
if not stripped:
return
url = f"https://dblp.org/search/publ/api?q={urllib.parse.quote(search_text)}&format=json"
with urllib.request.urlopen(url) as response:
bib_data = json.load(response)
if 'hit' not in bib_data['result']['hits']:
return []
for entry in bib_data['result']['hits']['hit']:
yield DblpEntry(entry, self)
class Banner(urwid.AttrMap):
def __init__(self):
super().__init__(urwid.SolidFill(), None)
self.big_text = urwid.BigText([('banner_hi', "bib"),
('banner_lo', "rarian")],
urwid.font.HalfBlock7x7Font())
self.big_text_clipped = urwid.Padding(self.big_text, 'center', width='clip')
self.subtitle = urwid.Text(('banner_hi', "A BibTeX Management Tool Powered By D.B.L.P"), align='center')
self.version = urwid.Text(('banner_lo', "version 1.0"), align='center')
self.original_widget = urwid.Filler(
urwid.Pile([self.big_text_clipped, self.subtitle, self.version]),
'middle')
class SearchResultsPanel(urwid.AttrMap):
def __init__(self):
super().__init__(urwid.SolidFill(), None)
self._serial = 0
self._serial_lock = threading.Lock()
self.banner = Banner()
self._Clear()
@property
def serial(self):
return self._serial
@serial.setter
def serial(self, value):
with self._serial_lock:
self._serial = value
self._Clear()
def _Clear(self):
self.items = []
self.SyncDisplay()
def Add(self, entry, serial):
with self._serial_lock:
if self._serial == serial:
self.items.append(entry.search_panel_widget)
self.SyncDisplay()
def SyncDisplay(self):
enabled_items = [item for item in self.items if item.entry.repo.enabled]
if enabled_items:
self.list_walker = urwid.SimpleListWalker(enabled_items)
self.original_widget = urwid.ListBox(self.list_walker)
else:
self.original_widget = self.banner
def keypress(self, size, key):
if key in ('ctrl n', 'j'):
self.original_widget._keypress_down(size)
elif key in ('ctrl p', 'k'):
self.original_widget._keypress_up(size)
else:
self.original_widget.keypress(size, key)
class SelectedKeysPanel(urwid.Pile):
def __init__(self, keys_output):
super().__init__([])
self.entries = {}
self.keys_output = keys_output
self.SyncDisplay()
def Toggle(self, entry):
key = entry.unique_key
if key in self.entries:
del self.entries[key]
entry.mark = None
else:
self.entries[key] = entry
entry.mark = 'selected'
self.SyncDisplay()
def Add(self, entry):
self.entries[entry.unique_key] = entry
self.SyncDisplay()
def SyncDisplay(self):
new_contents = [(ent.unique_key_item, ('pack', None)) for ent in self.entries.values()]
if not new_contents:
new_contents = [(urwid.Text(('selected_hint', "Hit <SPACE> on highlighted item to select.")), ('pack', None))]
self.contents = new_contents
def Write(self):
if self.keys_output is None: return
with open(self.keys_output, 'w') as f:
print(','.join(map(lambda e: e.bibkey, self.entries.values())),
file=f, end='')
logging.info(f"Wrote selected keys to file '{self.keys_output}'")
class SearchBar(urwid.AttrMap):
def __init__(self):
super().__init__(urwid.SolidFill(), 'search_content')
self._search = urwid.Edit(('search_label', "Search: "))
self.original_widget = self._search
self.search_results_panel = None
self._search_serial = 0
self.bib_repos = []
urwid.connect_signal(self._search, 'change', self.TextChangeHandler)
def TextChangeHandler(self, edit, text):
if self.search_results_panel is None:
return
self.search_results_panel.serial = self._search_serial
for repo in self.bib_repos:
repo.Search(text, self._search_serial)
self._search_serial += 1
class MessageBar(urwid.AttrMap):
def __init__(self, loop):
super().__init__(urwid.Text("Welcome to bibrarian."), 'msg_normal')
self.event_loop = loop
self._redraw_fd = loop.watch_pipe(self._FdWriteHandler)
self.initial_delay = 1
self.post_delay = 3
self.tips_delay = 5
self.next_message_ready = threading.Event()
self.next_message_scheduled = 0
self.messages = [
"Use ctrl+c to exit the program with all files untouched.",
"Use ctrl+w to write the selected entries to the target file.",
"Press @ (shift+2) open the entry using system browser.",
"Use up (or ctrl+p or k) and down (or ctrl+n or j) to navigate the search results.",
"Use alt+shift+n to toggle enabled/disabled the n-th bib repo.",
"This software is powered by Python 3, dblp API, Pybtex, and urwid.",
]
self.msg_lock = threading.Lock()
self.periodic_trigger_thread = threading.Thread(
name=f"msg-trigger", target=self._PeriodicTrigger, daemon=True)
self.message_update_thread = threading.Thread(
name=f"msg-update", target=self._UpdateMessage, daemon=True)
self.periodic_trigger_thread.start()
self.message_update_thread.start()
def Post(self, message, severity='normal', delay=None):
if severity == 'normal':
label = "Message"
style = 'msg_normal'
elif severity == 'warning':
label = "Warning"
style = 'msg_warning'
elif severity == 'error':
label = "Error"
style = 'msg_error'
else:
raise ValueError(f"Invalid severity: {severity}")
with self.msg_lock:
self.original_widget = urwid.Text((style, f"{label}: {message}"))
self.next_message_ready.set()
if delay is None: delay = self.post_delay
self.next_message_scheduled = time.time() + delay
def _FdWriteHandler(self, data):
self.event_loop.draw_screen()
def _PeriodicTrigger(self):
time.sleep(self.initial_delay)
while True:
for message in self.messages:
while True:
if time.time() >= self.next_message_scheduled:
with self.msg_lock:
self.original_widget = urwid.Text(('msg_tips', f"Tip: {message}"))
self.next_message_ready.set()
self.next_message_scheduled = time.time() + self.tips_delay
time.sleep(self.tips_delay)
break
else:
time.sleep(1)
continue
def _UpdateMessage(self):
while True:
self.next_message_ready.wait()
self.next_message_ready.clear()
os.write(self._redraw_fd, b"?")
def __del__(self):
os.close(self._redraw_fd)
class DetailsPanel(urwid.AttrMap):
def __init__(self):
super().__init__(urwid.Filler(urwid.Text(
('details_hint', 'Hit <i> on highlighted item to update info.')), 'top'), None)
class InputFilter:
def __init__(self):
self.widget = None
def __call__(self, keys, raw):
if not keys: return keys
if keys[0] == 'ctrl w':
try:
for repo in self.widget.output_repos:
repo.Write()
except:
logging.error(traceback.format_exc())
try: self.widget.selected_keys_panel.Write()
except: logging.error(traceback.format_exc())
raise urwid.ExitMainLoop()
elif self.MaskDatabases(keys[0]):
self.widget.search_results_panel.SyncDisplay()
return
return keys
def MaskDatabases(self, key):
symbol_number_map = {s: n for s, n in zip(")!@#$%^&*(", range(10))}
if 'meta ' in key:
symbol = key[5:]
if symbol == '~':
for repo in self.widget.bib_repos:
repo.enabled = True
else:
number = symbol_number_map.get(symbol)
if number == 0:
for repo in self.widget.bib_repos:
repo.enabled = False
else:
try:
repo = self.widget.bib_repos[number - 1]
repo.enabled = not repo.enabled
except: pass
return True
elif key == 'enter':
self.widget.focus_position = 1 - self.widget.focus_position
else:
return False
class DatabaseStatusPanel(urwid.Pile):
def __init__(self, databases, config_source):
super().__init__([])
self.contents = [(db, ('pack', None)) for db in databases] \
+ [(urwid.Text(('cfg_src', f"config: {config_source}")), ('pack', None))]
class TopWidget(urwid.Pile):
def __init__(self, args, config, event_loop):
super().__init__([urwid.SolidFill()])
self.message_bar = MessageBar(event_loop)
self.search_results_panel = SearchResultsPanel()
self.details_panel = DetailsPanel()
self.selected_keys_panel = SelectedKeysPanel(args.keys_output)
self.output_repos = [BibRepo.Create(cfg, 'rw', event_loop) for cfg in config['rw_repos']]
self.bib_repos = [BibRepo.Create(cfg, 'ro', event_loop) for cfg in config['ro_repos']] + self.output_repos
for repo, i in zip(self.bib_repos, itertools.count(1)):
repo.short_label = f"{i}"
repo.message_bar = self.message_bar
repo.search_results_panel = self.search_results_panel
repo.selected_keys_panel = self.selected_keys_panel
repo.details_panel = self.details_panel
self.search_bar = SearchBar()
self.search_bar.bib_repos = self.bib_repos
self.search_bar.search_results_panel = self.search_results_panel
self.db_status_panel = DatabaseStatusPanel(
[repo.status_indicator_widget for repo in self.bib_repos],
config.source)
for repo in self.output_repos:
repo.selected_keys_panel = self.selected_keys_panel
self.right_panel = urwid.Pile([
('pack', urwid.LineBox(self.db_status_panel, title="Database Info")),
('weight', 5, urwid.LineBox(self.details_panel, title="Detailed Info")),
('pack', urwid.LineBox(self.selected_keys_panel, title="Selected Entries"))])
self.main_widget = urwid.Columns([
('weight', 2, urwid.LineBox(self.search_results_panel, title="Search Results")),
('weight', 1, self.right_panel)])
self.contents = [(self.search_bar, ('pack', None)),
(self.main_widget, ('weight', 1)),
(self.message_bar, ('pack', None))]
class DefaultConfig(dict):
def __init__(self):
self['ro_repos'] = [
{
'remote': "dblp.org",
'enabled': True
},
{
'glob': "/path/to/lots/of/**/*.bib",
'enabled': True
},
{
'glob': "/path/to/sample.bib",
'enabled': False
},
{
'glob': "/path/to/another/sample.bib"
}
]
self['rw_repos'] = [
{
'glob': "reference.bib",
'enabled': True
}
]
def Write(self, file):
with open(file, 'w') as f:
json.dump(self, f, indent=4)
class Config(dict):
def __init__(self, file_name):
prefix = os.getcwd()
self.source = None
while True:
path = os.path.join(prefix, file_name)
if os.path.isfile(path) and os.access(path, os.R_OK):
with open(path) as f:
self.update(json.load(f))
self.source = path
break
if prefix == '/': break
prefix = os.path.dirname(prefix)
if self.source is None:
print("Did not find any config file.")
print("You can generate an example config file using option -g.")
print("For more information, please use option -h for help.")
sys.exit(1)
self._NormalizePaths()
def _NormalizePaths(self):
config_dir = os.path.dirname(os.path.realpath(self.source))
for repo_group in (self[k] for k in ('ro_repos', 'rw_repos')):
for repo_config in repo_group:
if 'glob' in repo_config:
repo_config['glob'] = os.path.expandvars(os.path.expanduser(repo_config['glob']))
if not os.path.isabs(repo_config['glob']):
repo_config['glob'] = os.path.join(config_dir, repo_config['glob'])
class ArgParser(argparse.ArgumentParser):
def __init__(self):
super().__init__(prog="bibrarian")
self.add_argument("-f", "--config",
help="force configuration file path",
default=".bibrarian_config.json",
action='store'
)
self.add_argument("-g", "--gen-config",
help="generate a configuration file",
default=False,
action='store_true')
self.add_argument("-l", "--log",
help="force log file path",
default=f"/tmp/{getpass.getuser()}_babrarian.log",
action='store')
self.add_argument("-k", "--keys-output",
help="output bib keys file (truncate mode)",
action='store')
self.add_argument("-v", "--version",
action='version',
version="%(prog)s 1.0")
class Palette(list):
def __init__(self):
self.append(('search_label', 'yellow', 'dark magenta'))
self.append(('search_content', 'white', 'dark magenta'))
self.append(('search_hint', 'light cyan', 'dark magenta'))
self.append(('msg_tips', 'white', 'dark gray'))
self.append(('msg_normal', 'light green', 'dark gray'))
self.append(('msg_warning', 'yellow', 'dark gray'))
self.append(('msg_error', 'light red', 'dark gray'))
self.append(('details_hint', 'dark green', 'default'))
self.append(('db_label', 'default', 'default'))
self.append(('db_enabled', 'light cyan', 'default'))
self.append(('db_status_ready', 'light green', 'default'))
self.append(('db_status_loading', 'light cyan', 'default'))
self.append(('db_status_searching', 'yellow', 'default'))
self.append(('db_status_error', 'light red', 'default'))
self.append(('db_rw', 'light magenta', 'default'))
self.append(('db_ro', 'light green', 'default'))
self.append(('mark_none', 'default', 'dark gray'))
self.append(('mark_selected', 'light cyan', 'dark gray'))
self.append(('title', 'yellow', 'dark gray'))
self.append(('title_delim', 'default', 'dark gray'))
self.append(('source', 'dark green', 'default'))
self.append(('author', 'white', 'default'))
self.append(('venue', 'underline', 'default'))
self.append(('year', 'light gray', 'default'))
self.append(('delim', 'default', 'default'))
self.append(('bibkey', 'light green', 'default'))
self.append(('bibtex_ready', 'dark green', 'default'))
self.append(('bibtex_fetching', 'yellow', 'default'))
self.append(('plain+', 'default', 'dark magenta'))
self.append(('mark_none+', 'default', 'light magenta'))
self.append(('mark_selected+', 'light cyan', 'light magenta'))
self.append(('title+', 'yellow', 'light magenta'))
self.append(('title_delim+', 'default', 'light magenta'))
self.append(('source+', 'light green', 'dark magenta'))
self.append(('author+', 'white', 'dark magenta'))
self.append(('venue+', 'white,underline', 'dark magenta'))
self.append(('year+', 'white', 'dark magenta'))
self.append(('delim+', 'default', 'dark magenta'))
self.append(('bibkey+', 'light green', 'dark magenta'))
self.append(('bibtex_ready+', 'dark green', 'dark magenta'))
self.append(('bibtex_fetching+', 'yellow', 'dark magenta'))
self.append(('selected_key', 'light cyan', 'default'))
self.append(('selected_hint', 'dark cyan', 'default'))
self.append(('detail_key', 'light green', 'default'))
self.append(('detail_value', 'default', 'default'))
self.append(('banner_hi', 'light magenta', 'default'))
self.append(('banner_lo', 'dark magenta', 'default'))
self.append(('cfg_src', 'dark gray', 'default'))
if __name__ == '__main__':
args = ArgParser().parse_args()
if args.gen_config:
DefaultConfig().Write(args.config)
print(f"Wrote default config to file {args.config}")
sys.exit(0)
logging.basicConfig(filename=args.log,
format="[%(asctime)s %(levelname)7s] %(threadName)s: %(message)s",
datefmt="%m-%d-%Y %H:%M:%S",
level=logging.DEBUG)
config = Config(args.config)
input_filter = InputFilter()
main_loop = urwid.MainLoop(urwid.SolidFill(),
palette=Palette(),
input_filter=input_filter)
top_widget = TopWidget(args, config, main_loop)
input_filter.widget = top_widget
main_loop.widget = top_widget
try: main_loop.run()
except KeyboardInterrupt:
sys.exit(0)
| [
"logging.debug",
"time.sleep",
"urwid.SimpleListWalker",
"sys.exit",
"getpass.getuser",
"logging.info",
"logging.error",
"urwid.Columns",
"threading.Lock",
"subprocess.run",
"urwid.SolidFill",
"pybtex.database.BibliographyData",
"urwid.Pile",
"urwid.connect_signal",
"urllib.request.urlop... | [((41926, 42091), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': 'args.log', 'format': '"""[%(asctime)s %(levelname)7s] %(threadName)s: %(message)s"""', 'datefmt': '"""%m-%d-%Y %H:%M:%S"""', 'level': 'logging.DEBUG'}), "(filename=args.log, format=\n '[%(asctime)s %(levelname)7s] %(threadName)s: %(message)s', datefmt=\n '%m-%d-%Y %H:%M:%S', level=logging.DEBUG)\n", (41945, 42091), False, 'import logging\n'), ((4667, 4753), 'urwid.Text', 'urwid.Text', (["[('selected_key', self.bibkey), ('selected_hint', f'({self.source})')]"], {}), "([('selected_key', self.bibkey), ('selected_hint',\n f'({self.source})')])\n", (4677, 4753), False, 'import urwid\n'), ((5016, 5138), 'subprocess.run', 'subprocess.run', (["['python3', '-m', 'webbrowser', '-t', self.url]"], {'stdout': 'subprocess.DEVNULL', 'stderr': 'subprocess.DEVNULL'}), "(['python3', '-m', 'webbrowser', '-t', self.url], stdout=\n subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n", (5030, 5138), False, 'import subprocess\n'), ((7589, 7606), 'threading.Event', 'threading.Event', ([], {}), '()\n', (7604, 7606), False, 'import threading\n'), ((7645, 7739), 'threading.Thread', 'threading.Thread', ([], {'name': 'f"""bibtex-{self.bibkey}"""', 'target': 'self._LoadPybtexEntry', 'daemon': '(False)'}), "(name=f'bibtex-{self.bibkey}', target=self._LoadPybtexEntry,\n daemon=False)\n", (7661, 7739), False, 'import threading\n'), ((16777, 16793), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (16791, 16793), False, 'import threading\n'), ((16955, 16971), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (16969, 16971), False, 'import threading\n'), ((17152, 17169), 'threading.Event', 'threading.Event', ([], {}), '()\n', (17167, 17169), False, 'import threading\n'), ((17200, 17217), 'threading.Event', 'threading.Event', ([], {}), '()\n', (17215, 17217), False, 'import threading\n'), ((17249, 17345), 'threading.Thread', 'threading.Thread', ([], {'name': 'f"""load-{self.source}"""', 'target': 'self.LoadingThreadWrapper', 'daemon': '(True)'}), "(name=f'load-{self.source}', target=self.\n LoadingThreadWrapper, daemon=True)\n", (17265, 17345), False, 'import threading\n'), ((17468, 17568), 'threading.Thread', 'threading.Thread', ([], {'name': 'f"""search-{self.source}"""', 'target': 'self.SearchingThreadWrapper', 'daemon': '(True)'}), "(name=f'search-{self.source}', target=self.\n SearchingThreadWrapper, daemon=True)\n", (17484, 17568), False, 'import threading\n'), ((17690, 17705), 'urwid.Text', 'urwid.Text', (['"""?"""'], {}), "('?')\n", (17700, 17705), False, 'import urwid\n'), ((17735, 17749), 'urwid.Text', 'urwid.Text', (['""""""'], {}), "('')\n", (17745, 17749), False, 'import urwid\n'), ((18038, 18063), 'os.close', 'os.close', (['self._redraw_fd'], {}), '(self._redraw_fd)\n', (18046, 18063), False, 'import os\n'), ((21650, 21721), 'logging.debug', 'logging.debug', (['f"""Collecting entries from glob expression \'{glob_expr}\'"""'], {}), '(f"Collecting entries from glob expression \'{glob_expr}\'")\n', (21663, 21721), False, 'import logging\n'), ((21749, 21785), 'glob.glob', 'glob.glob', (['glob_expr'], {'recursive': '(True)'}), '(glob_expr, recursive=True)\n', (21758, 21785), False, 'import glob\n'), ((23861, 23912), 'logging.info', 'logging.info', (['f"""Wrote to file \'{self.output_file}\'"""'], {}), '(f"Wrote to file \'{self.output_file}\'")\n', (23873, 23912), False, 'import logging\n'), ((24956, 25008), 'urwid.Padding', 'urwid.Padding', (['self.big_text', '"""center"""'], {'width': '"""clip"""'}), "(self.big_text, 'center', width='clip')\n", (24969, 25008), False, 'import urwid\n'), ((25034, 25126), 'urwid.Text', 'urwid.Text', (["('banner_hi', 'A BibTeX Management Tool Powered By D.B.L.P')"], {'align': '"""center"""'}), "(('banner_hi', 'A BibTeX Management Tool Powered By D.B.L.P'),\n align='center')\n", (25044, 25126), False, 'import urwid\n'), ((25146, 25202), 'urwid.Text', 'urwid.Text', (["('banner_lo', 'version 1.0')"], {'align': '"""center"""'}), "(('banner_lo', 'version 1.0'), align='center')\n", (25156, 25202), False, 'import urwid\n'), ((25526, 25542), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (25540, 25542), False, 'import threading\n'), ((28024, 28064), 'urwid.Edit', 'urwid.Edit', (["('search_label', 'Search: ')"], {}), "(('search_label', 'Search: '))\n", (28034, 28064), False, 'import urwid\n'), ((28221, 28289), 'urwid.connect_signal', 'urwid.connect_signal', (['self._search', '"""change"""', 'self.TextChangeHandler'], {}), "(self._search, 'change', self.TextChangeHandler)\n", (28241, 28289), False, 'import urwid\n'), ((28944, 28961), 'threading.Event', 'threading.Event', ([], {}), '()\n', (28959, 28961), False, 'import threading\n'), ((29563, 29579), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (29577, 29579), False, 'import threading\n'), ((29620, 29705), 'threading.Thread', 'threading.Thread', ([], {'name': 'f"""msg-trigger"""', 'target': 'self._PeriodicTrigger', 'daemon': '(True)'}), "(name=f'msg-trigger', target=self._PeriodicTrigger, daemon=True\n )\n", (29636, 29705), False, 'import threading\n'), ((29756, 29833), 'threading.Thread', 'threading.Thread', ([], {'name': 'f"""msg-update"""', 'target': 'self._UpdateMessage', 'daemon': '(True)'}), "(name=f'msg-update', target=self._UpdateMessage, daemon=True)\n", (29772, 29833), False, 'import threading\n'), ((30750, 30780), 'time.sleep', 'time.sleep', (['self.initial_delay'], {}), '(self.initial_delay)\n', (30760, 30780), False, 'import time\n'), ((31616, 31641), 'os.close', 'os.close', (['self._redraw_fd'], {}), '(self._redraw_fd)\n', (31624, 31641), False, 'import os\n'), ((36427, 36438), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (36436, 36438), False, 'import os\n'), ((41909, 41920), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (41917, 41920), False, 'import sys\n'), ((42253, 42270), 'urwid.SolidFill', 'urwid.SolidFill', ([], {}), '()\n', (42268, 42270), False, 'import urwid\n'), ((579, 745), 'urwid.Text', 'urwid.Text', (["[('author', f'{entry.abbrev_authors}'), ('delim', '. '), ('venue',\n f'{entry.venue}'), ('delim', ', '), ('year', f'{entry.year}'), ('delim',\n '.')]"], {}), "([('author', f'{entry.abbrev_authors}'), ('delim', '. '), (\n 'venue', f'{entry.venue}'), ('delim', ', '), ('year', f'{entry.year}'),\n ('delim', '.')])\n", (589, 745), False, 'import urwid\n'), ((1036, 1131), 'urwid.Text', 'urwid.Text', (["[('source', f'{entry.source}'), ('delim', '::'), ('bibkey', f'{entry.bibkey}')]"], {}), "([('source', f'{entry.source}'), ('delim', '::'), ('bibkey',\n f'{entry.bibkey}')])\n", (1046, 1131), False, 'import urwid\n'), ((7861, 7886), 'os.close', 'os.close', (['self._redraw_fd'], {}), '(self._redraw_fd)\n', (7869, 7886), False, 'import os\n'), ((10217, 10265), 'pybtex.database.parse_string', 'pybtex.database.parse_string', (['bib_text', '"""bibtex"""'], {}), "(bib_text, 'bibtex')\n", (10245, 10265), False, 'import pybtex\n'), ((14872, 14886), 'urwid.Text', 'urwid.Text', (['""""""'], {}), "('')\n", (14882, 14886), False, 'import urwid\n'), ((15000, 15192), 'urwid.Columns', 'urwid.Columns', (["[('pack', self.repo._short_label), ('pack', self.repo._enabled_mark), (\n 'weight', 1, self.label), ('pack', self.status_indicator), ('pack',\n self.access)]"], {'dividechars': '(1)'}), "([('pack', self.repo._short_label), ('pack', self.repo.\n _enabled_mark), ('weight', 1, self.label), ('pack', self.\n status_indicator), ('pack', self.access)], dividechars=1)\n", (15013, 15192), False, 'import urwid\n'), ((21831, 21892), 'logging.warning', 'logging.warning', (['f"""Glob expr \'{glob_expr}\' matches no target"""'], {}), '(f"Glob expr \'{glob_expr}\' matches no target")\n', (21846, 21892), False, 'import logging\n'), ((24351, 24378), 'urllib.request.urlopen', 'urllib.request.urlopen', (['url'], {}), '(url)\n', (24373, 24378), False, 'import urllib\n'), ((24415, 24434), 'json.load', 'json.load', (['response'], {}), '(response)\n', (24424, 24434), False, 'import json\n'), ((24703, 24720), 'urwid.SolidFill', 'urwid.SolidFill', ([], {}), '()\n', (24718, 24720), False, 'import urwid\n'), ((24892, 24921), 'urwid.font.HalfBlock7x7Font', 'urwid.font.HalfBlock7x7Font', ([], {}), '()\n', (24919, 24921), False, 'import urwid\n'), ((25265, 25329), 'urwid.Pile', 'urwid.Pile', (['[self.big_text_clipped, self.subtitle, self.version]'], {}), '([self.big_text_clipped, self.subtitle, self.version])\n', (25275, 25329), False, 'import urwid\n'), ((25448, 25465), 'urwid.SolidFill', 'urwid.SolidFill', ([], {}), '()\n', (25463, 25465), False, 'import urwid\n'), ((26246, 26283), 'urwid.SimpleListWalker', 'urwid.SimpleListWalker', (['enabled_items'], {}), '(enabled_items)\n', (26268, 26283), False, 'import urwid\n'), ((26319, 26350), 'urwid.ListBox', 'urwid.ListBox', (['self.list_walker'], {}), '(self.list_walker)\n', (26332, 26350), False, 'import urwid\n'), ((27815, 27880), 'logging.info', 'logging.info', (['f"""Wrote selected keys to file \'{self.keys_output}\'"""'], {}), '(f"Wrote selected keys to file \'{self.keys_output}\'")\n', (27827, 27880), False, 'import logging\n'), ((27963, 27980), 'urwid.SolidFill', 'urwid.SolidFill', ([], {}), '()\n', (27978, 27980), False, 'import urwid\n'), ((28675, 28710), 'urwid.Text', 'urwid.Text', (['"""Welcome to bibrarian."""'], {}), "('Welcome to bibrarian.')\n", (28685, 28710), False, 'import urwid\n'), ((30431, 30473), 'urwid.Text', 'urwid.Text', (["(style, f'{label}: {message}')"], {}), "((style, f'{label}: {message}'))\n", (30441, 30473), False, 'import urwid\n'), ((31552, 31583), 'os.write', 'os.write', (['self._redraw_fd', "b'?'"], {}), "(self._redraw_fd, b'?')\n", (31560, 31583), False, 'import os\n'), ((32330, 32350), 'urwid.ExitMainLoop', 'urwid.ExitMainLoop', ([], {}), '()\n', (32348, 32350), False, 'import urwid\n'), ((34244, 34262), 'itertools.count', 'itertools.count', (['(1)'], {}), '(1)\n', (34259, 34262), False, 'import itertools\n'), ((36325, 36353), 'json.dump', 'json.dump', (['self', 'f'], {'indent': '(4)'}), '(self, f, indent=4)\n', (36334, 36353), False, 'import json\n'), ((36506, 36537), 'os.path.join', 'os.path.join', (['prefix', 'file_name'], {}), '(prefix, file_name)\n', (36518, 36537), False, 'import os\n'), ((36811, 36834), 'os.path.dirname', 'os.path.dirname', (['prefix'], {}), '(prefix)\n', (36826, 36834), False, 'import os\n'), ((37083, 37094), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (37091, 37094), False, 'import sys\n'), ((37196, 37225), 'os.path.realpath', 'os.path.realpath', (['self.source'], {}), '(self.source)\n', (37212, 37225), False, 'import os\n'), ((42569, 42580), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (42577, 42580), False, 'import sys\n'), ((425, 442), 'urwid.SolidFill', 'urwid.SolidFill', ([], {}), '()\n', (440, 442), False, 'import urwid\n'), ((521, 544), 'urwid.Text', 'urwid.Text', (['entry.title'], {}), '(entry.title)\n', (531, 544), False, 'import urwid\n'), ((955, 1002), 'urwid.Text', 'urwid.Text', (["('mark_none', '[M]')"], {'align': '"""right"""'}), "(('mark_none', '[M]'), align='right')\n", (965, 1002), False, 'import urwid\n'), ((10045, 10076), 'os.write', 'os.write', (['self._redraw_fd', "b'?'"], {}), "(self._redraw_fd, b'?')\n", (10053, 10076), False, 'import os\n'), ((10095, 10126), 'urllib.request.urlopen', 'urllib.request.urlopen', (['bib_url'], {}), '(bib_url)\n', (10117, 10126), False, 'import urllib\n'), ((10673, 10704), 'os.write', 'os.write', (['self._redraw_fd', "b'?'"], {}), "(self._redraw_fd, b'?')\n", (10681, 10704), False, 'import os\n'), ((14677, 14694), 'urwid.SolidFill', 'urwid.SolidFill', ([], {}), '()\n', (14692, 14694), False, 'import urwid\n'), ((14804, 14832), 'urwid.Text', 'urwid.Text', (['f"""{repo.source}"""'], {}), "(f'{repo.source}')\n", (14814, 14832), False, 'import urwid\n'), ((14937, 14951), 'urwid.Text', 'urwid.Text', (['""""""'], {}), "('')\n", (14947, 14951), False, 'import urwid\n'), ((20981, 21012), 'os.write', 'os.write', (['self._redraw_fd', "b'?'"], {}), "(self._redraw_fd, b'?')\n", (20989, 21012), False, 'import os\n'), ((22184, 22216), 'pybtex.database.parse_file', 'pybtex.database.parse_file', (['path'], {}), '(path)\n', (22210, 22216), False, 'import pybtex\n'), ((23687, 23752), 'logging.error', 'logging.error', (['f"""Key {key} has empty entry. Not writing to file."""'], {}), "(f'Key {key} has empty entry. Not writing to file.')\n", (23700, 23752), False, 'import logging\n'), ((23785, 23826), 'pybtex.database.BibliographyData', 'pybtex.database.BibliographyData', (['entries'], {}), '(entries)\n', (23817, 23826), False, 'import pybtex\n'), ((24292, 24323), 'urllib.parse.quote', 'urllib.parse.quote', (['search_text'], {}), '(search_text)\n', (24310, 24323), False, 'import urllib\n'), ((30613, 30624), 'time.time', 'time.time', ([], {}), '()\n', (30622, 30624), False, 'import time\n'), ((31740, 31815), 'urwid.Text', 'urwid.Text', (["('details_hint', 'Hit <i> on highlighted item to update info.')"], {}), "(('details_hint', 'Hit <i> on highlighted item to update info.'))\n", (31750, 31815), False, 'import urwid\n'), ((33742, 33759), 'urwid.SolidFill', 'urwid.SolidFill', ([], {}), '()\n', (33757, 33759), False, 'import urwid\n'), ((36553, 36573), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (36567, 36573), False, 'import os\n'), ((36578, 36602), 'os.access', 'os.access', (['path', 'os.R_OK'], {}), '(path, os.R_OK)\n', (36587, 36602), False, 'import os\n'), ((7185, 7202), 'urwid.SolidFill', 'urwid.SolidFill', ([], {}), '()\n', (7200, 7202), False, 'import urwid\n'), ((12615, 12632), 'urwid.SolidFill', 'urwid.SolidFill', ([], {}), '()\n', (12630, 12632), False, 'import urwid\n'), ((22268, 22332), 'logging.error', 'logging.error', (['f"""Exception raised when parsing file {path}: {e}"""'], {}), "(f'Exception raised when parsing file {path}: {e}')\n", (22281, 22332), False, 'import logging\n'), ((27446, 27521), 'urwid.Text', 'urwid.Text', (["('selected_hint', 'Hit <SPACE> on highlighted item to select.')"], {}), "(('selected_hint', 'Hit <SPACE> on highlighted item to select.'))\n", (27456, 27521), False, 'import urwid\n'), ((33566, 33617), 'urwid.Text', 'urwid.Text', (["('cfg_src', f'config: {config_source}')"], {}), "(('cfg_src', f'config: {config_source}'))\n", (33576, 33617), False, 'import urwid\n'), ((35013, 35071), 'urwid.LineBox', 'urwid.LineBox', (['self.db_status_panel'], {'title': '"""Database Info"""'}), "(self.db_status_panel, title='Database Info')\n", (35026, 35071), False, 'import urwid\n'), ((35100, 35156), 'urwid.LineBox', 'urwid.LineBox', (['self.details_panel'], {'title': '"""Detailed Info"""'}), "(self.details_panel, title='Detailed Info')\n", (35113, 35156), False, 'import urwid\n'), ((35180, 35245), 'urwid.LineBox', 'urwid.LineBox', (['self.selected_keys_panel'], {'title': '"""Selected Entries"""'}), "(self.selected_keys_panel, title='Selected Entries')\n", (35193, 35245), False, 'import urwid\n'), ((35319, 35383), 'urwid.LineBox', 'urwid.LineBox', (['self.search_results_panel'], {'title': '"""Search Results"""'}), "(self.search_results_panel, title='Search Results')\n", (35332, 35383), False, 'import urwid\n'), ((1283, 1361), 'urwid.Columns', 'urwid.Columns', (["[('weight', 1, self.title), ('pack', self.mark)]"], {'dividechars': '(1)'}), "([('weight', 1, self.title), ('pack', self.mark)], dividechars=1)\n", (1296, 1361), False, 'import urwid\n'), ((5888, 5930), 'urwid.Text', 'urwid.Text', (["('detail_key', 'bibtex key: ')"], {}), "(('detail_key', 'bibtex key: '))\n", (5898, 5930), False, 'import urwid\n'), ((5990, 6032), 'urwid.Text', 'urwid.Text', (["('detail_value', entry.bibkey)"], {}), "(('detail_value', entry.bibkey))\n", (6000, 6032), False, 'import urwid\n'), ((6091, 6129), 'urwid.Text', 'urwid.Text', (["('detail_key', 'source: ')"], {}), "(('detail_key', 'source: '))\n", (6101, 6129), False, 'import urwid\n'), ((6192, 6234), 'urwid.Text', 'urwid.Text', (["('detail_value', entry.source)"], {}), "(('detail_value', entry.source))\n", (6202, 6234), False, 'import urwid\n'), ((11133, 11177), 'urwid.Text', 'urwid.Text', (["('detail_key', 'citation key: ')"], {}), "(('detail_key', 'citation key: '))\n", (11143, 11177), False, 'import urwid\n'), ((11210, 11252), 'urwid.Text', 'urwid.Text', (["('detail_value', entry.bibkey)"], {}), "(('detail_value', entry.bibkey))\n", (11220, 11252), False, 'import urwid\n'), ((11324, 11362), 'urwid.Text', 'urwid.Text', (["('detail_key', 'source: ')"], {}), "(('detail_key', 'source: '))\n", (11334, 11362), False, 'import urwid\n'), ((11395, 11437), 'urwid.Text', 'urwid.Text', (["('detail_value', entry.source)"], {}), "(('detail_value', entry.source))\n", (11405, 11437), False, 'import urwid\n'), ((11512, 11548), 'urwid.Text', 'urwid.Text', (["('detail_key', 'type: ')"], {}), "(('detail_key', 'type: '))\n", (11522, 11548), False, 'import urwid\n'), ((11581, 11627), 'urwid.Text', 'urwid.Text', (["('detail_value', entry.entry.type)"], {}), "(('detail_value', entry.entry.type))\n", (11591, 11627), False, 'import urwid\n'), ((20682, 20704), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (20702, 20704), False, 'import traceback\n'), ((21063, 21085), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (21083, 21085), False, 'import traceback\n'), ((30894, 30905), 'time.time', 'time.time', ([], {}), '()\n', (30903, 30905), False, 'import time\n'), ((31247, 31274), 'time.sleep', 'time.sleep', (['self.tips_delay'], {}), '(self.tips_delay)\n', (31257, 31274), False, 'import time\n'), ((31355, 31368), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (31365, 31368), False, 'import time\n'), ((32171, 32193), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (32191, 32193), False, 'import traceback\n'), ((32287, 32309), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (32307, 32309), False, 'import traceback\n'), ((36674, 36686), 'json.load', 'json.load', (['f'], {}), '(f)\n', (36683, 36686), False, 'import json\n'), ((37444, 37483), 'os.path.expanduser', 'os.path.expanduser', (["repo_config['glob']"], {}), "(repo_config['glob'])\n", (37462, 37483), False, 'import os\n'), ((37513, 37547), 'os.path.isabs', 'os.path.isabs', (["repo_config['glob']"], {}), "(repo_config['glob'])\n", (37526, 37547), False, 'import os\n'), ((37595, 37640), 'os.path.join', 'os.path.join', (['config_dir', "repo_config['glob']"], {}), "(config_dir, repo_config['glob'])\n", (37607, 37640), False, 'import os\n'), ((38327, 38344), 'getpass.getuser', 'getpass.getuser', ([], {}), '()\n', (38342, 38344), False, 'import getpass\n'), ((10829, 10851), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (10849, 10851), False, 'import traceback\n'), ((31033, 31076), 'urwid.Text', 'urwid.Text', (["('msg_tips', f'Tip: {message}')"], {}), "(('msg_tips', f'Tip: {message}'))\n", (31043, 31076), False, 'import urwid\n'), ((6749, 6805), 'urwid.Text', 'urwid.Text', (['(\'detail_value\', f"{entry.data[\'info\'][k]}")'], {}), '((\'detail_value\', f"{entry.data[\'info\'][k]}"))\n', (6759, 6805), False, 'import urwid\n'), ((12130, 12186), 'urwid.Text', 'urwid.Text', (["('detail_value', f'{entry.entry.fields[k]}')"], {}), "(('detail_value', f'{entry.entry.fields[k]}'))\n", (12140, 12186), False, 'import urwid\n'), ((31193, 31204), 'time.time', 'time.time', ([], {}), '()\n', (31202, 31204), False, 'import time\n')] |
# ----------------------- #
# -------- SETUP -------- #
# ----------------------- #
# Import PySpark, Parameters, File Paths, Functions & Packages
import pyspark
from CCSLink import Parameters
from CCSLink.Parameters import FILE_PATH
from CCSLink import Person_Functions as PF
from CCSLink import Household_Functions as HF
from CCSLink import Cluster_Function as CF
exec(open("/home/cdsw/collaborative_method_matching/CCSLink/Packages.py").read())
# Changes to SparkSession
sparkSession.conf.set('spark.sql.codegen.wholeStage', 'false')
# ----------------------- #
# -------- DATA --------- #
# ----------------------- #
# Set year, month, date for file path
YEAR, MONTH, DAY = '2021', '11', '16'
# Read in CCS HH data
ccs = sparkSession.read.csv('some_path' + 'ccs_households/ccs_households.csv'.format(YEAR, MONTH, DAY), header = True)
# Select columns
ccs = ccs.selectExpr('qid as qid_ccs', 'household_id as hh_id_ccs', 'ownership_type as tenure_ccs', 'accommodation_type as typaccom_ccs', 'resident_count as no_resi_ccs',
'census_address_indicator', 'census_address as address_cenday_ccs', 'census_address_postcode as pc_cenday_ccs', 'census_address_uprn as uprn_cenday_ccs',
'census_address_country as country_cenday_ccs').persist()
ccs.count()
# ---------------------------------------------------------------------------------- #
# ---------------- Current Address / Postcode / UPRN / House Number ---------------- #
# ---------------------------------------------------------------------------------- #
# CCS Questionnaire data
ccs_q = sparkSession.read.csv('some_path' + 'ccs_questionnaires/ccs_questionnaires.csv'.format(YEAR, MONTH, DAY), header = True)
ccs_q = ccs_q.selectExpr('qid as qid_ccs', 'display_address', 'address', 'address_postcode as pc_ccs', 'uprn as uprn_ccs').drop_duplicates()
# Replace -9 & -8 with None
for variable in ['display_address', 'address', 'pc_ccs']:
ccs_q = ccs_q.withColumn(variable, when(col(variable).isin(['-9', '-8']), lit(None)).otherwise(col(variable)))
# Add comma to display_address (helps with house number function later on)
ccs_q = ccs_q.withColumn('display_address', concat(col('display_address'), lit(',')))
# Replace missing address with display address (equivalent to CMS variable 'address_combined')
ccs_q = ccs_q.withColumn('address_ccs', when(col('address').isNull() == True, col('display_address')).otherwise(col('address'))).drop('display_address')
# Clean Postcode
ccs_q = ccs_q.withColumn('pc_ccs', upper(regexp_replace(col('pc_ccs'), "[^0-9A-Za-z]+", "")))
ccs_q = ccs_q.withColumn('pc_ccs', when(col('pc_ccs') == '', None).otherwise(col('pc_ccs')))
# Join variables on via qid
ccs = ccs.join(ccs_q.dropDuplicates(['qid_ccs']), on = 'qid_ccs', how = 'left')
# House/Flat Number
ccs = ccs.withColumn('house_no_ccs', HF.house_number_udf(col('address_ccs')))
ccs = ccs.withColumn('flat_no_ccs', HF.flat_number_udf(col('address_ccs')))
# ----------------------------------------------------------------------------------------------- #
# ---------------- Census Day Address / Postcode / UPRN / House Number / Country ---------------- #
# ----------------------------------------------------------------------------------------------- #
# Indicator update: If census day postcode exists (and census day address is not -8), set to 1, otherwise set to 0
ccs = ccs.withColumn('census_address_indicator', when(col('pc_cenday_ccs').isin(['-9', '-8']), lit(0)).otherwise(lit(1)))
ccs = ccs.withColumn('census_address_indicator', when(col('address_cenday_ccs') == '-8', lit(0)).otherwise(col('census_address_indicator')))
# Replace -9 & -8 with None
for variable in ['census_address_indicator', 'address_cenday_ccs', 'pc_cenday_ccs', 'uprn_cenday_ccs', 'country_cenday_ccs']:
ccs = ccs.withColumn(variable, when(col(variable).isin(['-9', '-8']), lit(None)).otherwise(col(variable)))
# Clean Census Day Postcode
ccs = ccs.withColumn('pc_cenday_ccs', upper(regexp_replace(col('pc_cenday_ccs'), "[^0-9A-Za-z]+", "")))
ccs = ccs.withColumn('pc_cenday_ccs', when(col('pc_cenday_ccs') == '', None).otherwise(col('pc_cenday_ccs')))
# Create House/Flat Number using UDF
ccs = ccs.withColumn('house_no_cenday_ccs', HF.house_number_udf(col('address_cenday_ccs')))
ccs = ccs.withColumn('flat_no_cenday_ccs', HF.flat_number_udf(col('address_cenday_ccs')))
# Update mover indicator to 0 if pc_cenday = pc (ccsday)
ccs = ccs.withColumn('census_address_indicator', when(col('pc_cenday_ccs') == col('pc_ccs'), lit(0)).otherwise(col('census_address_indicator')))
# -------------------------------------------------------------------------- #
# ----------------- Update Geographic Variables for Movers ----------------- #
# -------------------------------------------------------------------------- #
# Firstly, save geographic variables on CCS day in new columns
ccs = ccs.withColumn('pc_ccsday_ccs', col('pc_ccs'))
ccs = ccs.withColumn('uprn_ccsday_ccs', col('uprn_ccs'))
ccs = ccs.withColumn('house_no_ccsday_ccs', col('house_no_ccs'))
ccs = ccs.withColumn('flat_no_ccsday_ccs', col('flat_no_ccs'))
ccs = ccs.withColumn('address_ccsday_ccs', col('address_ccs'))
# Next, if CCS person is a mover, update their main geographic columns with census_day variables
ccs = ccs.withColumn('pc_ccs', when(col('census_address_indicator') == 1, col('pc_cenday_ccs')).otherwise(col('pc_ccs')))
ccs = ccs.withColumn('uprn_ccs', when(col('census_address_indicator') == 1, col('uprn_cenday_ccs')).otherwise(col('uprn_ccs')))
ccs = ccs.withColumn('house_no_ccs', when(col('census_address_indicator') == 1, col('house_no_cenday_ccs')).otherwise(col('house_no_ccs')))
ccs = ccs.withColumn('flat_no_ccs', when(col('census_address_indicator') == 1, col('flat_no_cenday_ccs')).otherwise(col('flat_no_ccs')))
ccs = ccs.withColumn('address_ccs', when(col('census_address_indicator') == 1, col('address_cenday_ccs')).otherwise(col('address_ccs')))
# If HH has moved since Census Day, set variables relating to current address to NULL
ccs = ccs.withColumn('typaccom_ccs', when(ccs.census_address_indicator == 1, lit(None)).otherwise(ccs.typaccom_ccs))
ccs = ccs.withColumn('tenure_ccs', when(ccs.census_address_indicator == 1, lit(None)).otherwise(ccs.tenure_ccs))
# Create sect/dist/area from primary postcode
ccs = ccs.withColumn('pc_sect_ccs', F.expr("substring({0},1, length({0}) - 2)".format("pc_ccs")))\
.withColumn('pc_dist_ccs', F.expr("""IF(length({0}) = 5, substring({0},1,4), IF(length({0}) =4, substring({0},1,3), substring({0},1,2)))""".format("pc_sect_ccs")))\
.withColumn('pc_area_ccs', F.expr("""IF(substring({0},2,1) in('1','2','3','4','5','6','7','8','9'), substr({0},1,1), substring({0},1,2))""".format("pc_sect_ccs")))
# --------------------------------- #
# -------- PERSON VARIABLES ------- #
# --------------------------------- #
# Read in CCS People
ccs_ppl = sparkSession.read.parquet(FILE_PATH('Stage_1_clean_ccs')).selectExpr('id_ccs', 'hh_id_ccs', 'fn1_ccs as FN', 'sn1_ccs as SN', 'dob_ccs as DOB', 'age_ccs as AGE').persist()
ccs_ppl.count()
# Remove records with no household ID
ccs_ppl = ccs_ppl.filter(col('hh_id_ccs') != '-9')
# ----------------------------- #
# -------- MISSINGNESS -------- #
# ----------------------------- #
# FN / SN
ccs_ppl = ccs_ppl.withColumn('FN', when(ccs_ppl.FN.isNull(), 'YYYYY').otherwise(ccs_ppl.FN))
ccs_ppl = ccs_ppl.withColumn('SN', when(ccs_ppl.SN.isNull(), 'YYYYY').otherwise(ccs_ppl.SN))
# DOB
ccs_ppl = ccs_ppl.withColumn('DOB', when(ccs_ppl.DOB.isNull(), '1700-07-07').otherwise(ccs_ppl.DOB))
ccs_ppl = ccs_ppl.withColumn('DOB', to_date('DOB', 'yyyy-MM-dd'))
# AGE
ccs_ppl = ccs_ppl.withColumn('AGE', when(ccs_ppl.AGE.isNull(), '777').otherwise(ccs_ppl.AGE))
# ------------------------- #
# -------- HH SIZE -------- #
# ------------------------- #
# Count number of IDs for each HH - different to number of usual residents
ccs_ppl = ccs_ppl.withColumn('hh_size_ccs', size(collect_set('id_ccs').over(Window.partitionBy('hh_id_ccs')))).drop('id_ccs')
# ------------------------------ #
# --------- PERSON SETS -------- #
# ------------------------------ #
# Create 4 columns which contains list of all unique forenames / surnames / dobs / ages from that household
ccs_ppl = ccs_ppl.withColumn('fn_set_ccs', collect_set('FN').over(Window.partitionBy('hh_id_ccs')))\
.withColumn('sn_set_ccs', collect_set('SN').over(Window.partitionBy('hh_id_ccs')))\
.withColumn('dob_set_ccs', collect_set('DOB').over(Window.partitionBy('hh_id_ccs')))\
.withColumn('age_set_ccs', collect_set('AGE').over(Window.partitionBy('hh_id_ccs')))\
.drop('FN', 'SN', 'DOB', 'AGE')\
.drop_duplicates(['hh_id_ccs'])\
# Array missing values
ccs_ppl = ccs_ppl.withColumn('fn_set_ccs', when(ccs_ppl.fn_set_ccs.isNull(), array(lit('YYYYY'))).otherwise(ccs_ppl.fn_set_ccs))
ccs_ppl = ccs_ppl.withColumn('sn_set_ccs', when(ccs_ppl.sn_set_ccs.isNull(), array(lit('YYYYY'))).otherwise(ccs_ppl.sn_set_ccs))
ccs_ppl = ccs_ppl.withColumn('dob_set_ccs', when(ccs_ppl.dob_set_ccs.isNull(), array(lit('1700-07-07'))).otherwise(ccs_ppl.dob_set_ccs))
ccs_ppl = ccs_ppl.withColumn('age_set_ccs', when(ccs_ppl.age_set_ccs.isNull(), array(lit('777'))).otherwise(ccs_ppl.age_set_ccs))
# Sort arrays
for var in ['fn_set_ccs', 'sn_set_ccs', 'dob_set_ccs', 'age_set_ccs']:
ccs_ppl = ccs_ppl.withColumn(var, sort_array(var))
# -------------------------------------- #
# ---- COMBINE PERSON & HH VARIABLES --- #
# -------------------------------------- #
# Join person variables on HH ID
ccs = ccs.join(ccs_ppl, on = 'hh_id_ccs', how = 'left')
# ------------------------- #
# ---------- SAVE --------- #
# ------------------------- #
# Ensure all None array values have a missing value - this enables HH functions to run
ccs = ccs.withColumn('fn_set_ccs', when(ccs.fn_set_ccs.isNull(), array(lit('YYYYY'))).otherwise(ccs.fn_set_ccs))
ccs = ccs.withColumn('sn_set_ccs', when(ccs.sn_set_ccs.isNull(), array(lit('YYYYY'))).otherwise(ccs.sn_set_ccs))
ccs = ccs.withColumn('dob_set_ccs', when(ccs.dob_set_ccs.isNull(), array(lit('1700-07-07'))).otherwise(ccs.dob_set_ccs))
ccs = ccs.withColumn('age_set_ccs', when(ccs.age_set_ccs.isNull(), array(lit('777'))).otherwise(ccs.age_set_ccs))
# Replace missing values with NULL
ccs = ccs.withColumn('typaccom_ccs', when(col('typaccom_ccs').isin(['-9', '-8', '-7']), lit(None)).otherwise(col('typaccom_ccs')))
ccs = ccs.withColumn('tenure_ccs', when(col('tenure_ccs').isin(['-9', '-8', '-7']), lit(None)).otherwise(col('tenure_ccs')))
ccs = ccs.withColumn('no_resi_ccs', when(col('no_resi_ccs').isin(['-9', '-5', '-4']), lit(None)).otherwise(col('no_resi_ccs')))
# Column Types
ccs = ccs.withColumn('typaccom_ccs', ccs['typaccom_ccs'].cast('int'))
ccs = ccs.withColumn('tenure_ccs', ccs['tenure_ccs'].cast('int'))
ccs = ccs.withColumn('no_resi_ccs', ccs['no_resi_ccs'].cast('int'))
# Save clean households
ccs.write.mode('overwrite').parquet(FILE_PATH('Stage_1_clean_HHs_ccs'))
sparkSession.stop()
| [
"CCSLink.Parameters.FILE_PATH"
] | [((11087, 11121), 'CCSLink.Parameters.FILE_PATH', 'FILE_PATH', (['"""Stage_1_clean_HHs_ccs"""'], {}), "('Stage_1_clean_HHs_ccs')\n", (11096, 11121), False, 'from CCSLink.Parameters import FILE_PATH\n'), ((6967, 6997), 'CCSLink.Parameters.FILE_PATH', 'FILE_PATH', (['"""Stage_1_clean_ccs"""'], {}), "('Stage_1_clean_ccs')\n", (6976, 6997), False, 'from CCSLink.Parameters import FILE_PATH\n')] |
# Get arxiv data
import json
import logging
import os
import pickle
from collections import Counter
from datetime import datetime
from io import BytesIO
from zipfile import ZipFile
import numpy as np
import pandas as pd
import requests
from kaggle.api.kaggle_api_extended import KaggleApi
from eurito_indicators import PROJECT_DIR
from eurito_indicators.pipeline.clustering_naming import make_doc_comm_lookup
from eurito_indicators.pipeline.processing_utils import covid_getter
GRID_PATH = f"{PROJECT_DIR}/inputs/data/grid"
CORD_META_PATH = f"{PROJECT_DIR}/inputs/data/metadata.csv.zip"
DISC_QUERY = f"{PROJECT_DIR}/inputs/data/arxiv_discipline.csv"
COV_PAPERS_PATH = f"{PROJECT_DIR}/inputs/data/arxiv_papers_covid.csv"
def get_arxiv_articles():
"""Get arxiv - and cord - articles"""
art = pd.read_csv(
f"{PROJECT_DIR}/inputs/data/arxiv_articles_v2.csv",
dtype={"id": str},
parse_dates=["created"],
)
art = art.rename(columns={"id": "article_id"})
art["month_year"] = [
datetime(x.year, x.month, 1) if pd.isnull(x) == False else np.nan
for x in art["created"]
]
selected_columns = [
"article_id",
"created",
"month_year",
"title",
"journal_ref",
"doi",
"authors",
"abstract",
"mag_id",
"citation_count",
"article_source",
]
return art[selected_columns]
def get_arxiv_institutes():
"""Lookup between paper ids and org id"""
inst = pd.read_csv(
f"{PROJECT_DIR}/inputs/data/arxiv_article_institutes_updated.csv",
dtype={"article_id": str, "institute_id": str},
)
return inst
def get_article_categories():
"""Article categories"""
inst = pd.read_csv(
f"{PROJECT_DIR}/inputs/data/arxiv_article_categories.csv",
dtype={"article_id": str},
)
return inst
def get_arxiv_w2v():
with open(f"{PROJECT_DIR}/outputs/models/arxiv_w2v.p", "rb") as infile:
return pickle.load(infile)
def fetch_grid():
"""Fetch the grid data"""
if os.path.exists(GRID_PATH) is False:
logging.info("Collecting Grid data")
os.makedirs(GRID_PATH, exist_ok=True)
g = requests.get("https://ndownloader.figshare.com/files/28431024")
g_z = ZipFile(BytesIO(g.content))
g_z.extractall(GRID_PATH)
def fetch_cord_meta():
"""Fetch the cord metadata"""
if os.path.exists(CORD_META_PATH) is False:
logging.info("Fetching cord data")
api = KaggleApi()
api.authenticate()
api.dataset_download_file(
"allen-institute-for-ai/CORD-19-research-challenge",
"metadata.csv",
path=f"{PROJECT_DIR}/inputs/data",
)
def get_cord_metadata():
"""Gets the cord metadata"""
meta = pd.read_csv(f"{PROJECT_DIR}/inputs/data/metadata.csv.zip", compression="zip")
meta_has_date = meta.dropna(axis=0, subset=["publish_time"])
meta_bad_date = set(
[
f"cord-{_id}"
for _id, date in zip(
meta_has_date["cord_uid"], meta_has_date["publish_time"]
)
if "-" not in date
]
)
meta_year = {
f"cord-{_id}": int(date.split("-")[0]) if "-" in date else int(date)
for _id, date in zip(meta_has_date["cord_uid"], meta_has_date["publish_time"])
}
return meta_bad_date, meta_year
def get_covid_papers():
"""Make the papers table
Includes:
Removing duplicated papers in cord
Creating month year variable missing for cord papers without detailed
publication date
"""
if os.path.exists(COV_PAPERS_PATH) is False:
logging.info("Making covid papers")
arts = get_arxiv_articles()
logging.info("processing arxiv papers")
arxiv_covid = (
arts.query("article_source!='cord'")
.dropna(axis=0, subset=["abstract","title"])
.assign(text = lambda df: [" ".join([x,y]) for x,y in zip(df['title'],df['abstract'])])
.assign(has_cov=lambda df: [covid_getter(text) for text in df["text"]])
.query("has_cov == True")
)
arxiv_covid["month_year"] = [
datetime(x.year, x.month, 1) for x in arxiv_covid["created"]
]
arxiv_covid["year"] = [x.year for x in arxiv_covid["month_year"]]
logging.info("processing cord papers")
cord = (
arts.query("article_source=='cord'")
.dropna(axis=0, subset=["abstract"])
.assign(has_cov=lambda df: [covid_getter(text) for text in df["abstract"]])
.query("has_cov == True")
.assign(
journal_ref=lambda df: [
x.lower() if type(x) == str else np.nan for x in df["journal_ref"]
]
)
)
cord = cord.loc[~cord["journal_ref"].isin(["biorxiv", "medrxiv"])]
cord = cord.drop_duplicates("title")
meta_bad_date, meta_year = get_cord_metadata()
cord["year"] = cord["article_id"].map(meta_year)
cord["month_year"] = [
datetime(d.year, d.month, 1)
if (_id not in meta_bad_date) & (not pd.isnull(d))
else np.nan
for _id, d in zip(cord["article_id"], cord["created"])
]
papers = (
pd.concat([arxiv_covid, cord], axis=0)
.reset_index(drop=True)
.drop(axis=1, labels=["has_cov"])
)
papers.to_csv(COV_PAPERS_PATH, index=False)
return papers
else:
return pd.read_csv(
COV_PAPERS_PATH,
dtype={"article_id": str},
parse_dates=["created", "month_year"],
)
def get_grid_meta():
"""Get relevant grid metadata"""
name, address, org_type, geo = [
pd.read_csv(f"{GRID_PATH}/full_tables/{n}.csv")
for n in ["institutes", "addresses", "types", "geonames"]
]
merged = (
name.merge(address, on="grid_id")
.merge(org_type, on="grid_id")
.merge(geo, on=["geonames_city_id", "city"], how="left")
)
grid_meta = merged[
[
"grid_id",
"name",
"lat",
"lng",
"city",
"country",
"country_code",
"type",
"nuts_level1_code",
"nuts_level2_code",
"nuts_level3_code",
]
]
return grid_meta
def query_arxiv_institute():
"""Combine arXiv institute lookup with grid metadata"""
inst = get_arxiv_institutes()
grid_meta = get_grid_meta()
inst_meta = inst.merge(grid_meta, left_on="institute_id", right_on="grid_id")
return inst_meta
def get_arxiv_tokenised():
with open(f"{PROJECT_DIR}/inputs/data/arxiv_tokenised.json", "r") as infile:
return json.load(infile)
def get_arxiv_fos():
return pd.read_csv(
f"{PROJECT_DIR}/inputs/data/arxiv_article_fields_of_study.csv",
dtype={"article_id": str, "fos_id": int},
)
def get_children(values):
if type(values) is str:
return [int(x) for x in values.split(",")]
else:
return np.nan
def make_fos_l0_lookup():
"""Creates a lookup between all MAG fos levels and the top level of the taxonomy"""
logging.info("Reading data")
fos_taxon = pd.read_csv(f"{PROJECT_DIR}/inputs/data/mag_fields_of_study.csv")
id_name_lookup = fos_taxon.set_index("id")["name"].to_dict()
all_children = {
_id: get_children(values)
for _id, values in zip(fos_taxon["id"], fos_taxon["child_ids"])
}
fos_0 = fos_taxon.loc[fos_taxon["level"] == 0]["id"].tolist()
fos_lu = {}
logging.info("Finding children categories")
# We recursively look for the children of level 0s at different levels of the taxonomy
for f in fos_0:
children = all_children[f].copy()
for level in range(1, 5):
table = fos_taxon.loc[fos_taxon["id"].isin(children)].query(
f"level=={level}"
)
for _id in table["id"]:
try:
for ch in all_children[_id]:
children.append(ch)
except BaseException:
pass
for c in children:
if c not in fos_lu.keys():
fos_lu[c] = [f]
else:
fos_lu[c].append(f)
logging.info("Creating dataframe")
fos_lu_df = pd.DataFrame(
{"fos_id": fos_lu.keys(), "fos_l0": fos_lu.values()}
).explode("fos_l0")
fos_lu_df["fos_id_name"], fos_lu_df["fos_l0_name"] = [
fos_lu_df[var].map(id_name_lookup) for var in ["fos_id", "fos_l0"]
]
return fos_lu_df
def query_article_discipline():
"""Returns a lookup between articles and high level disciplines"""
if os.path.exists(DISC_QUERY) is False:
arxiv_fos = get_arxiv_fos()
fos_lu_df = make_fos_l0_lookup()
arxiv_f0 = arxiv_fos.merge(fos_lu_df, on="fos_id")
logging.info("Finding top discipline")
arxiv_discipline = (
arxiv_f0.groupby("article_id")["fos_l0_name"]
.apply(lambda x: Counter(x).most_common(1)[0][0])
.reset_index(drop=False)
)
arxiv_discipline.to_csv(DISC_QUERY, index=False)
return arxiv_discipline
else:
return pd.read_csv(DISC_QUERY, dtype={"article_id": str})
def get_arxiv_topic_model():
with open(f"{PROJECT_DIR}/outputs/models/topsbm_arxiv_sampled.p", "rb") as infile:
return pickle.load(infile)
def get_arxiv_tokenised():
with open(f"{PROJECT_DIR}/inputs/data/arxiv_tokenised.json", "r") as infile:
return json.load(infile)
def get_ai_results():
with open(f"{PROJECT_DIR}/outputs/data/find_ai_outputs.p", "rb") as infile:
return pickle.load(infile)
def get_cluster_names():
with open(f"{PROJECT_DIR}/outputs/data/aux/arxiv_cluster_names.json",'r') as infile:
return {int(k):v for k,v in json.load(infile).items()}
def get_cluster_ids():
with open(f"{PROJECT_DIR}/inputs/data/arxiv_cluster_lookup.json",'r') as infile:
paper_cluster_lookup = json.load(infile)
cluster_names = get_cluster_names()
paper_cluster_name = {k: cluster_names[v] for k,v in paper_cluster_lookup.items()}
return paper_cluster_name
if __name__ == "__main__":
fetch_grid()
| [
"datetime.datetime",
"os.path.exists",
"pandas.isnull",
"os.makedirs",
"pandas.read_csv",
"pickle.load",
"io.BytesIO",
"requests.get",
"collections.Counter",
"pandas.concat",
"eurito_indicators.pipeline.processing_utils.covid_getter",
"json.load",
"kaggle.api.kaggle_api_extended.KaggleApi",
... | [((805, 916), 'pandas.read_csv', 'pd.read_csv', (['f"""{PROJECT_DIR}/inputs/data/arxiv_articles_v2.csv"""'], {'dtype': "{'id': str}", 'parse_dates': "['created']"}), "(f'{PROJECT_DIR}/inputs/data/arxiv_articles_v2.csv', dtype={'id':\n str}, parse_dates=['created'])\n", (816, 916), True, 'import pandas as pd\n'), ((1513, 1643), 'pandas.read_csv', 'pd.read_csv', (['f"""{PROJECT_DIR}/inputs/data/arxiv_article_institutes_updated.csv"""'], {'dtype': "{'article_id': str, 'institute_id': str}"}), "(f'{PROJECT_DIR}/inputs/data/arxiv_article_institutes_updated.csv',\n dtype={'article_id': str, 'institute_id': str})\n", (1524, 1643), True, 'import pandas as pd\n'), ((1752, 1853), 'pandas.read_csv', 'pd.read_csv', (['f"""{PROJECT_DIR}/inputs/data/arxiv_article_categories.csv"""'], {'dtype': "{'article_id': str}"}), "(f'{PROJECT_DIR}/inputs/data/arxiv_article_categories.csv',\n dtype={'article_id': str})\n", (1763, 1853), True, 'import pandas as pd\n'), ((2820, 2897), 'pandas.read_csv', 'pd.read_csv', (['f"""{PROJECT_DIR}/inputs/data/metadata.csv.zip"""'], {'compression': '"""zip"""'}), "(f'{PROJECT_DIR}/inputs/data/metadata.csv.zip', compression='zip')\n", (2831, 2897), True, 'import pandas as pd\n'), ((6910, 7031), 'pandas.read_csv', 'pd.read_csv', (['f"""{PROJECT_DIR}/inputs/data/arxiv_article_fields_of_study.csv"""'], {'dtype': "{'article_id': str, 'fos_id': int}"}), "(f'{PROJECT_DIR}/inputs/data/arxiv_article_fields_of_study.csv',\n dtype={'article_id': str, 'fos_id': int})\n", (6921, 7031), True, 'import pandas as pd\n'), ((7311, 7339), 'logging.info', 'logging.info', (['"""Reading data"""'], {}), "('Reading data')\n", (7323, 7339), False, 'import logging\n'), ((7357, 7422), 'pandas.read_csv', 'pd.read_csv', (['f"""{PROJECT_DIR}/inputs/data/mag_fields_of_study.csv"""'], {}), "(f'{PROJECT_DIR}/inputs/data/mag_fields_of_study.csv')\n", (7368, 7422), True, 'import pandas as pd\n'), ((7711, 7754), 'logging.info', 'logging.info', (['"""Finding children categories"""'], {}), "('Finding children categories')\n", (7723, 7754), False, 'import logging\n'), ((8437, 8471), 'logging.info', 'logging.info', (['"""Creating dataframe"""'], {}), "('Creating dataframe')\n", (8449, 8471), False, 'import logging\n'), ((2003, 2022), 'pickle.load', 'pickle.load', (['infile'], {}), '(infile)\n', (2014, 2022), False, 'import pickle\n'), ((2081, 2106), 'os.path.exists', 'os.path.exists', (['GRID_PATH'], {}), '(GRID_PATH)\n', (2095, 2106), False, 'import os\n'), ((2125, 2161), 'logging.info', 'logging.info', (['"""Collecting Grid data"""'], {}), "('Collecting Grid data')\n", (2137, 2161), False, 'import logging\n'), ((2170, 2207), 'os.makedirs', 'os.makedirs', (['GRID_PATH'], {'exist_ok': '(True)'}), '(GRID_PATH, exist_ok=True)\n', (2181, 2207), False, 'import os\n'), ((2220, 2283), 'requests.get', 'requests.get', (['"""https://ndownloader.figshare.com/files/28431024"""'], {}), "('https://ndownloader.figshare.com/files/28431024')\n", (2232, 2283), False, 'import requests\n'), ((2426, 2456), 'os.path.exists', 'os.path.exists', (['CORD_META_PATH'], {}), '(CORD_META_PATH)\n', (2440, 2456), False, 'import os\n'), ((2475, 2509), 'logging.info', 'logging.info', (['"""Fetching cord data"""'], {}), "('Fetching cord data')\n", (2487, 2509), False, 'import logging\n'), ((2524, 2535), 'kaggle.api.kaggle_api_extended.KaggleApi', 'KaggleApi', ([], {}), '()\n', (2533, 2535), False, 'from kaggle.api.kaggle_api_extended import KaggleApi\n'), ((3651, 3682), 'os.path.exists', 'os.path.exists', (['COV_PAPERS_PATH'], {}), '(COV_PAPERS_PATH)\n', (3665, 3682), False, 'import os\n'), ((3701, 3736), 'logging.info', 'logging.info', (['"""Making covid papers"""'], {}), "('Making covid papers')\n", (3713, 3736), False, 'import logging\n'), ((3783, 3822), 'logging.info', 'logging.info', (['"""processing arxiv papers"""'], {}), "('processing arxiv papers')\n", (3795, 3822), False, 'import logging\n'), ((4389, 4427), 'logging.info', 'logging.info', (['"""processing cord papers"""'], {}), "('processing cord papers')\n", (4401, 4427), False, 'import logging\n'), ((5593, 5692), 'pandas.read_csv', 'pd.read_csv', (['COV_PAPERS_PATH'], {'dtype': "{'article_id': str}", 'parse_dates': "['created', 'month_year']"}), "(COV_PAPERS_PATH, dtype={'article_id': str}, parse_dates=[\n 'created', 'month_year'])\n", (5604, 5692), True, 'import pandas as pd\n'), ((5841, 5888), 'pandas.read_csv', 'pd.read_csv', (['f"""{GRID_PATH}/full_tables/{n}.csv"""'], {}), "(f'{GRID_PATH}/full_tables/{n}.csv')\n", (5852, 5888), True, 'import pandas as pd\n'), ((6858, 6875), 'json.load', 'json.load', (['infile'], {}), '(infile)\n', (6867, 6875), False, 'import json\n'), ((8864, 8890), 'os.path.exists', 'os.path.exists', (['DISC_QUERY'], {}), '(DISC_QUERY)\n', (8878, 8890), False, 'import os\n'), ((9048, 9086), 'logging.info', 'logging.info', (['"""Finding top discipline"""'], {}), "('Finding top discipline')\n", (9060, 9086), False, 'import logging\n'), ((9399, 9449), 'pandas.read_csv', 'pd.read_csv', (['DISC_QUERY'], {'dtype': "{'article_id': str}"}), "(DISC_QUERY, dtype={'article_id': str})\n", (9410, 9449), True, 'import pandas as pd\n'), ((9583, 9602), 'pickle.load', 'pickle.load', (['infile'], {}), '(infile)\n', (9594, 9602), False, 'import pickle\n'), ((9728, 9745), 'json.load', 'json.load', (['infile'], {}), '(infile)\n', (9737, 9745), False, 'import json\n'), ((9865, 9884), 'pickle.load', 'pickle.load', (['infile'], {}), '(infile)\n', (9876, 9884), False, 'import pickle\n'), ((10203, 10220), 'json.load', 'json.load', (['infile'], {}), '(infile)\n', (10212, 10220), False, 'import json\n'), ((1029, 1057), 'datetime.datetime', 'datetime', (['x.year', 'x.month', '(1)'], {}), '(x.year, x.month, 1)\n', (1037, 1057), False, 'from datetime import datetime\n'), ((2306, 2324), 'io.BytesIO', 'BytesIO', (['g.content'], {}), '(g.content)\n', (2313, 2324), False, 'from io import BytesIO\n'), ((4235, 4263), 'datetime.datetime', 'datetime', (['x.year', 'x.month', '(1)'], {}), '(x.year, x.month, 1)\n', (4243, 4263), False, 'from datetime import datetime\n'), ((1061, 1073), 'pandas.isnull', 'pd.isnull', (['x'], {}), '(x)\n', (1070, 1073), True, 'import pandas as pd\n'), ((5138, 5166), 'datetime.datetime', 'datetime', (['d.year', 'd.month', '(1)'], {}), '(d.year, d.month, 1)\n', (5146, 5166), False, 'from datetime import datetime\n'), ((5216, 5228), 'pandas.isnull', 'pd.isnull', (['d'], {}), '(d)\n', (5225, 5228), True, 'import pandas as pd\n'), ((5363, 5401), 'pandas.concat', 'pd.concat', (['[arxiv_covid, cord]'], {'axis': '(0)'}), '([arxiv_covid, cord], axis=0)\n', (5372, 5401), True, 'import pandas as pd\n'), ((10036, 10053), 'json.load', 'json.load', (['infile'], {}), '(infile)\n', (10045, 10053), False, 'import json\n'), ((4093, 4111), 'eurito_indicators.pipeline.processing_utils.covid_getter', 'covid_getter', (['text'], {}), '(text)\n', (4105, 4111), False, 'from eurito_indicators.pipeline.processing_utils import covid_getter\n'), ((4583, 4601), 'eurito_indicators.pipeline.processing_utils.covid_getter', 'covid_getter', (['text'], {}), '(text)\n', (4595, 4601), False, 'from eurito_indicators.pipeline.processing_utils import covid_getter\n'), ((9203, 9213), 'collections.Counter', 'Counter', (['x'], {}), '(x)\n', (9210, 9213), False, 'from collections import Counter\n')] |
####
#Made reduntant in Alpha 0.4
#
# As this provided no extra functionality from the Channel, and is, in essance,
# simply a speciall channel, various additions were made to Channel.py
# To perform the same function. This has meant less duplication of code.
####
import sys, sip
from PyQt5 import QtCore, QtGui
#from pybirchMdiV4 import Ui_PyBirch
#from IrcConnection import IrcConnection
#from ServerOutputSorter import ServerOutputSorter
from pybirchMessage import Ui_BirchMessageWindow
from userInputSorter import UserInputSorter
from TextString import TextString
from NickListView import NickListView
from time import strftime
from struct import unpack
import string
try:
from PyQt5.QtCore import QString
except ImportError:
QString = str
class StatusWindow(QtGui.QMdiSubWindow):
_channel = ""
_nick = ""
_button = ""
__version__ = "Status v. 0.1"
def __init__(self, my_channel, parent=None):
self._channel=my_channel
self._nick=""
#Config will need to be passed in later, because of the order of
#creation.
#self.config=my_config
#print "DEBUG-------->In Channel :"+_channel_
QtGui.QMdiSubWindow.__init__(self,parent)
sip.delete(self.layout())
self.ui = Ui_BirchMessageWindow()
self.ui.setupUi(self)
# self.ui.setupUi(self)
self.ui.label_ChanName.setText(self._channel)
self.setWindowTitle(self._channel)
#install the key event filter.
self.ui.text_input.installEventFilter(self)
#self._nicklist = NickListView()
#self.ui.list_NickList.setModel(self._nicklist)
# self.ui.setWindowTitle(_channel_)
#Quick and Dirty hashing method.
#--- May have problems with similarly-named channels. eg
# test, tset, ttse and so on. MUST TEST (v0.1)
#http://bytes.com/topic/python/answers/23620-string-ascii-values
# def __hash__(self):
# global _channel_
# return unpack('%sB' % len(value), value)
def get_channel_name(self):
return self._channel
def append_channel_text(self,myString):
myQString = QString(str(myString))
showTime = "True"
#self.ui.editor_Window.append(myQString)
#try:
# showTime=self.config.get("Channel_Settings", "Time")
#except:
# self.config.add_section("Channel_Settings")
# self.config.set("Channel_Settings","Time", "True")
# showTime="True"
if showTime=="True":
current_time = strftime("%H:%M")
myQString="["+current_time+"] "+myQString
self.ui.editor_Window.insertHtml(myQString+"<br>")
#This should stop the window from updating when scrolling #up and down through the channel!
# if not self.ui.editor_Window.isHorizontalSliderPressed():
self.ui.editor_Window.moveCursor(11,False)
del myQString
def eventFilter(self, obj ,event):
if event.type() == QtCore.QEvent.KeyPress and event.matches(QtGui.QKeySequence.InsertParagraphSeparator):
# self.sendToServer()
self.process_input_event()
if event.type() == QtCore.QEvent.KeyPress and event.key() == QtCore.Qt.Key_Tab:
self.process_tab_event()
return True
return False
def process_tab_event(self):
print("Processing Tab Event")
#originalString = self.ui.text_input.text()
#searchString = str(originalString.rsplit(None, 1))
#searchString = originalString.split(" ")[-1]
#print(searchString)
#resultString = self._nicklist.search_nick(searchString)
#if resultString != "":
#originalString=originalString.rsplit(" ", 1)[0]
#if originalString.endswith(searchString):
# originalString= originalString[:-len(searchString)]
#originalString = originalString.rstrip(searchString)
# if len(originalString) == 0:
# self.ui.text_input.setText(originalString +resultString)
# else:
# self.ui.text_input.setText(originalString +" "+ resultString)
def process_input_event(self):
channelName=self._channel
originalString = self.ui.text_input.text()
textToSend = originalString
displayText = textToSend
uIS = UserInputSorter()
ts = uIS.process_input(channelName, self._nick, originalString)
self.ui.editor_Window.moveCursor(11,False)
#
# myTextToSwitch = textToSend.split(" ")
#
# if myTextToSwitch[0][0:1] == "/":
# if myTextToSwitch[0] == "/msg":
# #Note, this doesn't in any way work.
# remainderIndex = textToSend.find(myTextToSwitch[1])
# textToSend = "PRIVMSG "+myTextToSwitch[1]+" "+textToSend[remainderIndex:]
# displayText = "**Messaging "+myTextToSwitch[1]+textToSend[remainderIndex:]
# else:
# textToSend = str(textToSend[1:])
# displayText = "---"+str(textToSend)
#remainderIndex=string.find(strServerOutput,":",2)
# else:
# textToSend = "PRIVMSG "+channelName+" :"+textToSend
# displayText = "["+_nick_+"] "+originalString
#try:
# showTime=self.config.get("Channel_Settings", "Time")
#except:
# pass
showTime = "True"
self.emit(QtCore.SIGNAL("UserInput"),ts.get_original_string())
myDisplayString = ts.get_display_string()
if showTime == "True":
current_time = strftime("%H:%M")
myDisplayString="["+current_time+"] "+myDisplayString
self.ui.editor_Window.insertHtml(myDisplayString+"<br>")
self.ui.text_input.setText("")
def nick_update(self, my_new_nick):
self._nick = my_new_nick
def closeEvent(self, closeEvent):
self.emit(QtCore.SIGNAL("Channel_Closed"),self._channel)
closeEvent.accept();
print ("<StatusWindow : Close event> PANIC Mr Mannering!")
def button_click(self):
#sender = self.sender
if self.isMinimized():
self.show()
self.showNormal()
else:
self.showMinimized()
self.hide()
###
# While insert_nick currently works, the listWidget will need a QAbstractView in order to be able to remove
# items. This will deal with @, +, and other standard modifiers of the channel. definately TODO!
#
# def insert_nick(self, ts):
# #this is being done this way for future proofing
# print ("<DEBUG>Channel.py:insert_nick", ts.get_message())
# for myNick in ts.get_message().split():
# myNickToAdd = myNick.replace(":","",1 )
# self._nicklist.insert_nick(myNickToAdd)
# self.ui.listWidget.addItem(QtGui.QListWidgetItem(myNickToAdd))
# def remove_nick(self, ts):
# print("<DEBUG>Channel.py:remove_nick"+self._channel+" :"+ts.get_nick())
# for myNick in ts.get_nick().split():
# myNickToRemove = myNick.replace(":", "", 1)
# found = self._nicklist.remove_nick(myNickToRemove)
# self.ui.listWidget.removeItemWidget(QtGui.QListWidgetItem(myNickToRemove))
# if(found):
# self.append_channel_text(ts.get_display_string())
# def nick_mode_change(self, ts):
# self._nicklist.changeStatus(ts.get_mode_user(), ts.get_mode_settings())
| [
"pybirchMessage.Ui_BirchMessageWindow",
"time.strftime",
"PyQt5.QtGui.QMdiSubWindow.__init__",
"PyQt5.QtCore.SIGNAL",
"userInputSorter.UserInputSorter"
] | [((1130, 1172), 'PyQt5.QtGui.QMdiSubWindow.__init__', 'QtGui.QMdiSubWindow.__init__', (['self', 'parent'], {}), '(self, parent)\n', (1158, 1172), False, 'from PyQt5 import QtCore, QtGui\n'), ((1214, 1237), 'pybirchMessage.Ui_BirchMessageWindow', 'Ui_BirchMessageWindow', ([], {}), '()\n', (1235, 1237), False, 'from pybirchMessage import Ui_BirchMessageWindow\n'), ((3941, 3958), 'userInputSorter.UserInputSorter', 'UserInputSorter', ([], {}), '()\n', (3956, 3958), False, 'from userInputSorter import UserInputSorter\n'), ((2330, 2347), 'time.strftime', 'strftime', (['"""%H:%M"""'], {}), "('%H:%M')\n", (2338, 2347), False, 'from time import strftime\n'), ((4830, 4856), 'PyQt5.QtCore.SIGNAL', 'QtCore.SIGNAL', (['"""UserInput"""'], {}), "('UserInput')\n", (4843, 4856), False, 'from PyQt5 import QtCore, QtGui\n'), ((4976, 4993), 'time.strftime', 'strftime', (['"""%H:%M"""'], {}), "('%H:%M')\n", (4984, 4993), False, 'from time import strftime\n'), ((5266, 5297), 'PyQt5.QtCore.SIGNAL', 'QtCore.SIGNAL', (['"""Channel_Closed"""'], {}), "('Channel_Closed')\n", (5279, 5297), False, 'from PyQt5 import QtCore, QtGui\n')] |
import csv
import random
from functools import partial
from typing import Callable, Optional
from pdb import set_trace as st
import os
import random
import pandas as pd
from typing import Any, Callable, Dict, Iterable, List, Tuple, Union
import numpy as np
import tensorflow as tf
from foolbox.attacks import (
FGSM,
Attack,
DeepFoolAttack,
IterativeGradientSignAttack,
SaliencyMapAttack,
)
# from foolbox.criteria import TargetClass
# from foolbox.models import TensorFlowModel
from tensorflow.python.training import saver
from tensorflow.python.training.session_manager import SessionManager
import tensorflow as tf
import numpy as np
import sklearn.metrics as metrics
import matplotlib.pyplot as plt
plt.switch_backend('Agg')
from model.config import LENET
from model import LeNet
import nninst_mode as mode
from dataset import mnist
from dataset.config import MNIST_TRAIN, MNIST_PATH
from dataset.mnist_transforms import *
from trace.lenet_mnist_class_trace_v2 import (
data_config,
)
from trace.common import (
class_trace,
)
from tf_utils import new_session_config
from nninst_statistics import calc_trace_side_overlap
from nninst_trace import TraceKey
from nninst_utils.numpy import arg_approx
from nninst_utils.ray import ray_init
from nninst_utils.fs import ensure_dir, IOAction, CsvIOAction, abspath
from .common import get_overlay_summary, clean_overlap_ratio, \
translation_overlap_ratio, attack_overlap_ratio, \
lenet_mnist_example
from .cw_attack import cw_generate_adversarial_example
from .eval_mnist import foolbox_generate_adversarial_example
from .cw_attacks import CarliniL2
from nninst_graph import AttrMap, Graph, GraphAttrKey
from nninst_utils.ray import ray_iter
from tf_graph import (
MaskWeightWithTraceHook,
model_fn_with_fetch_hook,
)
from trace.common import (
get_predicted_value,
get_rank,
predict,
reconstruct_class_trace_from_tf,
reconstruct_trace_from_tf,
reconstruct_trace_from_tf_brute_force,
)
from .analyse_class_trace import reconstruct_edge
# Model config
model_label = "augmentation"
model_dir = f"result/lenet/model_{model_label}"
# Trace config
trace_dir = f"{model_dir}/traces"
trace_name = "noop"
# Result dir
result_name = "test"
key = TraceKey.POINT
# Result dir
key_name = key.split('.')[1]
# reduce_mode includes output, channel, none
reduce_mode = "none"
result_dir = f"{model_dir}/conv_point_NOT/{reduce_mode}_{trace_name}_attack_overlap"
# result_dir = f"result/lenet/test"
images_per_class = 100
attack_name = "FGSM"
attacks = {
"FGSM": [FGSM],
"BIM": [IterativeGradientSignAttack],
"JSMA": [SaliencyMapAttack],
"DeepFool": [DeepFoolAttack],
# "DeepFool_full": [DeepFoolAttack, dict(subsample=None)],
# "CWL2": [CarliniL2],
}
adversarial_label = 1
normal_label = -1
class_trace_fn=lambda class_id: lenet_mnist_class_trace(
class_id,
threshold,
label=model_label,
trace_dir = trace_dir,
)
lenet_mnist_class_trace = class_trace(
trace_name,
model_config=LENET,
data_config=data_config,
)
def reconstruct_point(
trace,
graph,
key,
node_name,
):
attrs = trace.nodes[node_name]
def to_bitmap(shape, attr):
mask = np.zeros(np.prod(shape), dtype=np.int8)
mask[TraceKey.to_array(attr)] = 1
return mask.reshape(shape)
if key in attrs:
return to_bitmap(attrs[key + "_shape"], attrs[key])
else:
for attr_name, attr in attrs.items():
if attr_name.startswith(TraceKey.POINT + ".") and attr is not None:
return to_bitmap(attrs[TraceKey.POINT_SHAPE], attr)
RuntimeError(f"Key not found")
def filter_point_by_key(
trace: AttrMap,
key: str =TraceKey.POINT,
graph = LENET.network_class.graph().load(),
):
reconstruct_point_fn = partial(
reconstruct_point,
trace,
graph,
key,
)
op_to_mask = {}
# print(trace.nodes.keys())
for node_name in sorted(trace.nodes):
# print(f"{node_name}: {trace.nodes[node_name].keys()}")
if key in trace.nodes[node_name]:
op_to_mask[node_name] = reconstruct_point_fn(node_name)
# for op in op_to_mask:
# print(f"{op}: {op_to_mask[op].shape}")
# st()
return op_to_mask
def reduce_edge_mask(edge_mask: AttrMap, reduce_mode="none"):
reduced_edge = {}
for node_name in edge_mask:
# shape of edge (Ci, Hk, Wk, Co, Ho, Wo)
edge = edge_mask[node_name]
if "conv2d" in node_name:
if reduce_mode == "channel":
edge_sum = edge_mask[node_name].sum(0)
edge_sum[edge_sum>0] = 1
elif reduce_mode == "output":
edge_sum = edge_mask[node_name].sum(-1).sum(-1)
edge_sum[edge_sum>0] = 1
else:
edge_sum = edge_mask[node_name]
else:
edge_sum = edge
reduced_edge[node_name] = edge_sum
return reduced_edge
def detect_by_reduced_edge(class_trace, trace, reduce_mode = "none"):
class_masks = filter_point_by_key(
class_trace,
key = key
)
sample_masks = filter_point_by_key(
trace,
key = key
)
class_masks = reduce_edge_mask(class_masks, reduce_mode = reduce_mode)
sample_masks = reduce_edge_mask(sample_masks, reduce_mode = reduce_mode)
is_adversarial = False
for node_name in class_masks:
if "conv2d" not in node_name or "Relu" not in node_name:
continue
class_mask = class_masks[node_name]
sample_mask = sample_masks[node_name]
class_zero = class_mask==0
sample_zero_sum = sample_mask[class_zero].sum()
if sample_zero_sum>0:
is_adversarial = True
if is_adversarial:
return adversarial_label
else:
return normal_label
# Compute the mean overlap ratio of attacked image
def attack_reduced_edge_detection(
attack_name: str,
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_channel: bool = False,
per_node: bool = False,
images_per_class: int = 1,
num_gpus: float = 0.2,
model_dir = "result/lenet/model_augmentation",
transforms = None,
transform_name = "noop",
reduce_mode = "none",
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
nonlocal model_dir
mode.check(False)
data_dir = abspath(MNIST_PATH)
model_dir = abspath(model_dir)
ckpt_dir = f"{model_dir}/ckpts"
create_model = lambda: LeNet(data_format="channels_first")
graph = LeNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook,
create_model=create_model, graph=graph
)
predicted_label = predict(
create_model=create_model,
input_fn=lambda: mnist.test(data_dir)
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1),
model_dir=ckpt_dir,
)
if predicted_label != class_id:
return [{}] if per_node else {}
adversarial_example = lenet_mnist_example(
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
# model_dir not ckpt_dir
model_dir=model_dir,
transforms = transforms,
transform_name = transform_name,
mode = "test",
).load()
if adversarial_example is None:
return [{}] if per_node else {}
adversarial_predicted_label = predict(
create_model=create_model,
input_fn=lambda: tf.data.Dataset.from_tensors(
mnist.normalize(adversarial_example)
),
model_dir=ckpt_dir,
)
if predicted_label == adversarial_predicted_label:
return [{}] if per_node else {}
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=lambda: mnist.test(data_dir, transforms=transforms)
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1),
select_fn=select_fn,
model_dir=ckpt_dir,
per_channel=per_channel,
)[0]
if trace is None:
return [{}] if per_node else {}
adversarial_trace = reconstruct_trace_from_tf_brute_force(
model_fn=model_fn,
input_fn=lambda: tf.data.Dataset.from_tensors(
mnist.normalize(adversarial_example)
),
select_fn=select_fn,
model_dir=ckpt_dir,
per_channel=per_channel,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
row = {
"image_id": image_id,
"class_id": class_id,
"original.prediction":
detect_by_reduced_edge(
class_trace_fn(class_id).load(),
trace,
reduce_mode,
),
"adversarial.prediction":
detect_by_reduced_edge(
class_trace_fn(adversarial_label).load(),
adversarial_trace,
reduce_mode,
),
}
return row
detections = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, images_per_class)
for class_id in range(0, 10)
),
# ((-1, image_id) for image_id in range(mnist_info.test().size)),
chunksize=1,
out_of_order=True,
num_gpus=num_gpus,
)
traces = [detection for detection in detections if len(detection) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def attack_transform_overlap(attack_name,
transform_name,
transforms,
reduce_mode = "none",
result_dir = "result/lenet/9transform_attack_overlap"):
name = attack_name+'_'+transform_name
lenet_mnist_class_trace = class_trace(
trace_name,
model_config=LENET,
data_config=data_config,
)
threshold = 0.5
# DeepFool will shutdown when num_gpu<0.2
num_gpus = 0.2
overlap_fn = calc_trace_side_overlap
per_channel = False
path = os.path.join(result_dir, f"{name}_overlap.csv")
# print(f"Computing {name}")
# lenet_overlap_ratio = attack_reduced_edge_detection_count_violation(
lenet_overlap_ratio = attack_reduced_edge_detection(
attack_name=attack_name,
attack_fn=attacks[attack_name][0],
generate_adversarial_fn=cw_generate_adversarial_example
if attack_name.startswith("CW")
else foolbox_generate_adversarial_example,
class_trace_fn=lambda class_id: lenet_mnist_class_trace(
class_id,
threshold,
label=model_label,
trace_dir = trace_dir,
),
select_fn=lambda input: arg_approx(input, threshold),
overlap_fn=overlap_fn,
path=path,
per_channel=per_channel,
preprocessing=(0.1307, 0.3081),
image_size=28,
class_num=10,
norm_fn=mnist.normalize,
data_format="channels_first",
**(attacks[attack_name][1] if len(attacks[attack_name]) == 2 else {}),
images_per_class=images_per_class,
model_dir=model_dir,
num_gpus = num_gpus,
transforms = transforms,
transform_name = transform_name,
reduce_mode = reduce_mode,
)
lenet_overlap_ratio.save()
return lenet_overlap_ratio.load()
def compute_accuracy(trace_frame):
adversarial_metric = trace_frame["adversarial.prediction"]
original_metric = trace_frame["original.prediction"]
predictions = np.concatenate([adversarial_metric, original_metric])
row_filter = np.isfinite(predictions)
labels = np.concatenate(
[
np.repeat(1, adversarial_metric.shape[0]),
np.repeat(-1, original_metric.shape[0]),
]
)
labels = labels[row_filter]
predictions = predictions[row_filter]
fpr, tpr, thresholds = metrics.roc_curve(labels, predictions)
roc_auc = metrics.auc(fpr, tpr)
return fpr, tpr, roc_auc
def draw_attack_transform_roc(exp_to_roc, save_name, result_dir):
plt.title('ROC')
detection_results = {}
for exp_name, item in exp_to_roc.items():
fpr, tpr, roc_auc, color = item
print(f"{exp_name}: fpr={fpr}, tpr={tpr}")
plt.plot(fpr, tpr,color,label=f"{exp_name}_AUC={roc_auc:.2f}")
detection_results[exp_name] = [fpr, tpr]
plt.legend(loc='lower right')
plt.plot([0,1],[0,1],'r--')
plt.ylabel('TPR')
plt.xlabel('FPR')
path = os.path.join(result_dir, f"{save_name}.png")
plt.savefig(path)
path = os.path.join(result_dir, f"{save_name}.txt")
with open(path, "w") as f:
for name in detection_results:
print(f"{exp_name}: fpr={fpr}, tpr={tpr}", file=f)
def attack_exp():
exp_to_roc = {}
os.makedirs(result_dir, exist_ok=True)
for transforms, transform_name, color in [
[None, "noop", 'b'],
# [Translate(dx=-5,dy=-5), "leftup", 'g'],
# [Translate(dx=5,dy=5), "rightdown", 'c'],
# [Translate(dx=-5), "left", 'y'],
# [Translate(dy=-5), "up", 'm'],
]:
exp_name = attack_name+"_"+transform_name
print(f"Computing {exp_name}")
trace_frame = attack_transform_overlap(attack_name,
transform_name,
transforms,
reduce_mode = reduce_mode,
result_dir=result_dir)
exp_to_roc[exp_name] = compute_accuracy(trace_frame) + (color,)
draw_attack_transform_roc(exp_to_roc,
save_name=attack_name,
result_dir=result_dir)
if __name__ == "__main__":
# mode.debug()
mode.local()
# ray_init("gpu")
ray_init(
log_to_driver=False
)
tf.set_random_seed(3)
np.random.seed(3)
random.seed(3)
attack_exp()
| [
"numpy.prod",
"matplotlib.pyplot.ylabel",
"sklearn.metrics.auc",
"model.config.LENET.network_class.graph",
"nninst_utils.numpy.arg_approx",
"sklearn.metrics.roc_curve",
"numpy.isfinite",
"nninst_utils.fs.abspath",
"nninst_utils.ray.ray_init",
"matplotlib.pyplot.switch_backend",
"tensorflow.set_r... | [((724, 749), 'matplotlib.pyplot.switch_backend', 'plt.switch_backend', (['"""Agg"""'], {}), "('Agg')\n", (742, 749), True, 'import matplotlib.pyplot as plt\n'), ((3011, 3079), 'trace.common.class_trace', 'class_trace', (['trace_name'], {'model_config': 'LENET', 'data_config': 'data_config'}), '(trace_name, model_config=LENET, data_config=data_config)\n', (3022, 3079), False, 'from trace.common import class_trace\n'), ((3961, 4006), 'functools.partial', 'partial', (['reconstruct_point', 'trace', 'graph', 'key'], {}), '(reconstruct_point, trace, graph, key)\n', (3968, 4006), False, 'from functools import partial\n'), ((11111, 11155), 'nninst_utils.fs.CsvIOAction', 'CsvIOAction', (['path'], {'init_fn': 'get_overlap_ratio'}), '(path, init_fn=get_overlap_ratio)\n', (11122, 11155), False, 'from nninst_utils.fs import ensure_dir, IOAction, CsvIOAction, abspath\n'), ((11492, 11560), 'trace.common.class_trace', 'class_trace', (['trace_name'], {'model_config': 'LENET', 'data_config': 'data_config'}), '(trace_name, model_config=LENET, data_config=data_config)\n', (11503, 11560), False, 'from trace.common import class_trace\n'), ((11853, 11900), 'os.path.join', 'os.path.join', (['result_dir', 'f"""{name}_overlap.csv"""'], {}), "(result_dir, f'{name}_overlap.csv')\n", (11865, 11900), False, 'import os\n'), ((13327, 13380), 'numpy.concatenate', 'np.concatenate', (['[adversarial_metric, original_metric]'], {}), '([adversarial_metric, original_metric])\n', (13341, 13380), True, 'import numpy as np\n'), ((13398, 13422), 'numpy.isfinite', 'np.isfinite', (['predictions'], {}), '(predictions)\n', (13409, 13422), True, 'import numpy as np\n'), ((13687, 13725), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (['labels', 'predictions'], {}), '(labels, predictions)\n', (13704, 13725), True, 'import sklearn.metrics as metrics\n'), ((13740, 13761), 'sklearn.metrics.auc', 'metrics.auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (13751, 13761), True, 'import sklearn.metrics as metrics\n'), ((13863, 13879), 'matplotlib.pyplot.title', 'plt.title', (['"""ROC"""'], {}), "('ROC')\n", (13872, 13879), True, 'import matplotlib.pyplot as plt\n'), ((14170, 14199), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (14180, 14199), True, 'import matplotlib.pyplot as plt\n'), ((14204, 14235), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]', '"""r--"""'], {}), "([0, 1], [0, 1], 'r--')\n", (14212, 14235), True, 'import matplotlib.pyplot as plt\n'), ((14236, 14253), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""TPR"""'], {}), "('TPR')\n", (14246, 14253), True, 'import matplotlib.pyplot as plt\n'), ((14258, 14275), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""FPR"""'], {}), "('FPR')\n", (14268, 14275), True, 'import matplotlib.pyplot as plt\n'), ((14288, 14332), 'os.path.join', 'os.path.join', (['result_dir', 'f"""{save_name}.png"""'], {}), "(result_dir, f'{save_name}.png')\n", (14300, 14332), False, 'import os\n'), ((14337, 14354), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {}), '(path)\n', (14348, 14354), True, 'import matplotlib.pyplot as plt\n'), ((14367, 14411), 'os.path.join', 'os.path.join', (['result_dir', 'f"""{save_name}.txt"""'], {}), "(result_dir, f'{save_name}.txt')\n", (14379, 14411), False, 'import os\n'), ((14589, 14627), 'os.makedirs', 'os.makedirs', (['result_dir'], {'exist_ok': '(True)'}), '(result_dir, exist_ok=True)\n', (14600, 14627), False, 'import os\n'), ((15572, 15584), 'nninst_mode.local', 'mode.local', ([], {}), '()\n', (15582, 15584), True, 'import nninst_mode as mode\n'), ((15612, 15641), 'nninst_utils.ray.ray_init', 'ray_init', ([], {'log_to_driver': '(False)'}), '(log_to_driver=False)\n', (15620, 15641), False, 'from nninst_utils.ray import ray_init\n'), ((15661, 15682), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(3)'], {}), '(3)\n', (15679, 15682), True, 'import tensorflow as tf\n'), ((15687, 15704), 'numpy.random.seed', 'np.random.seed', (['(3)'], {}), '(3)\n', (15701, 15704), True, 'import numpy as np\n'), ((15709, 15723), 'random.seed', 'random.seed', (['(3)'], {}), '(3)\n', (15720, 15723), False, 'import random\n'), ((11078, 11098), 'pandas.DataFrame', 'pd.DataFrame', (['traces'], {}), '(traces)\n', (11090, 11098), True, 'import pandas as pd\n'), ((14053, 14117), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr', 'tpr', 'color'], {'label': 'f"""{exp_name}_AUC={roc_auc:.2f}"""'}), "(fpr, tpr, color, label=f'{exp_name}_AUC={roc_auc:.2f}')\n", (14061, 14117), True, 'import matplotlib.pyplot as plt\n'), ((3357, 3371), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (3364, 3371), True, 'import numpy as np\n'), ((3401, 3424), 'nninst_trace.TraceKey.to_array', 'TraceKey.to_array', (['attr'], {}), '(attr)\n', (3418, 3424), False, 'from nninst_trace import TraceKey\n'), ((3891, 3918), 'model.config.LENET.network_class.graph', 'LENET.network_class.graph', ([], {}), '()\n', (3916, 3918), False, 'from model.config import LENET\n'), ((6819, 6836), 'nninst_mode.check', 'mode.check', (['(False)'], {}), '(False)\n', (6829, 6836), True, 'import nninst_mode as mode\n'), ((6860, 6879), 'nninst_utils.fs.abspath', 'abspath', (['MNIST_PATH'], {}), '(MNIST_PATH)\n', (6867, 6879), False, 'from nninst_utils.fs import ensure_dir, IOAction, CsvIOAction, abspath\n'), ((6904, 6922), 'nninst_utils.fs.abspath', 'abspath', (['model_dir'], {}), '(model_dir)\n', (6911, 6922), False, 'from nninst_utils.fs import ensure_dir, IOAction, CsvIOAction, abspath\n'), ((7102, 7175), 'functools.partial', 'partial', (['model_fn_with_fetch_hook'], {'create_model': 'create_model', 'graph': 'graph'}), '(model_fn_with_fetch_hook, create_model=create_model, graph=graph)\n', (7109, 7175), False, 'from functools import partial\n'), ((13474, 13515), 'numpy.repeat', 'np.repeat', (['(1)', 'adversarial_metric.shape[0]'], {}), '(1, adversarial_metric.shape[0])\n', (13483, 13515), True, 'import numpy as np\n'), ((13529, 13568), 'numpy.repeat', 'np.repeat', (['(-1)', 'original_metric.shape[0]'], {}), '(-1, original_metric.shape[0])\n', (13538, 13568), True, 'import numpy as np\n'), ((7002, 7037), 'model.LeNet', 'LeNet', ([], {'data_format': '"""channels_first"""'}), "(data_format='channels_first')\n", (7007, 7037), False, 'from model import LeNet\n'), ((12518, 12546), 'nninst_utils.numpy.arg_approx', 'arg_approx', (['input', 'threshold'], {}), '(input, threshold)\n', (12528, 12546), False, 'from nninst_utils.numpy import arg_approx\n'), ((7058, 7071), 'model.LeNet.graph', 'LeNet.graph', ([], {}), '()\n', (7069, 7071), False, 'from model import LeNet\n'), ((8539, 8575), 'dataset.mnist.normalize', 'mnist.normalize', (['adversarial_example'], {}), '(adversarial_example)\n', (8554, 8575), False, 'from dataset import mnist\n'), ((9630, 9666), 'dataset.mnist.normalize', 'mnist.normalize', (['adversarial_example'], {}), '(adversarial_example)\n', (9645, 9666), False, 'from dataset import mnist\n'), ((7338, 7358), 'dataset.mnist.test', 'mnist.test', (['data_dir'], {}), '(data_dir)\n', (7348, 7358), False, 'from dataset import mnist\n'), ((7459, 7505), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['class_id'], {'dtype': 'tf.int32'}), '(class_id, dtype=tf.int32)\n', (7479, 7505), True, 'import tensorflow as tf\n'), ((8908, 8951), 'dataset.mnist.test', 'mnist.test', (['data_dir'], {'transforms': 'transforms'}), '(data_dir, transforms=transforms)\n', (8918, 8951), False, 'from dataset import mnist\n'), ((9052, 9098), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['class_id'], {'dtype': 'tf.int32'}), '(class_id, dtype=tf.int32)\n', (9072, 9098), True, 'import tensorflow as tf\n')] |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Caterpillar presubmit checks."""
import os
TEST_DATA_REALPATHS = [
os.path.realpath(os.path.join('tests', 'test_app_minimal')),
os.path.realpath(os.path.join('tests', 'test_app_tts')),
os.path.realpath(os.path.join('tests', 'test_app_tts_output')),
]
def filter_test_data(affected_file):
path = affected_file.LocalPath()
realpath = os.path.realpath(path)
for test_path in TEST_DATA_REALPATHS:
if realpath.startswith(test_path):
return False
return True
def CheckChange(input_api, output_api):
results = []
results += input_api.canned_checks.CheckChangeHasNoTabs(
input_api, output_api)
results += input_api.canned_checks.CheckChangeHasDescription(
input_api, output_api)
results += input_api.canned_checks.CheckChangeHasNoCrAndHasOnlyOneEol(
input_api, output_api)
results += input_api.canned_checks.CheckLongLines(input_api, output_api, 80,
source_file_filter=filter_test_data)
results += input_api.canned_checks.CheckChangeHasNoStrayWhitespace(
input_api, output_api, source_file_filter=filter_test_data)
results += input_api.RunTests(GetPythonTests(input_api, output_api))
results += input_api.RunTests(GetKarmaTests(input_api, output_api))
return results
def CheckChangeOnUpload(input_api, output_api):
return CheckChange(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CheckChange(input_api, output_api)
def GetKarmaTests(input_api, output_api):
cmd = [
input_api.os_path.join('node_modules', 'karma', 'bin', 'karma'), 'start']
return [input_api.Command('Karma', cmd, {}, output_api.PresubmitError)]
def GetPythonTests(input_api, output_api):
command = ['python', '-m', 'unittest', 'discover', '-s', 'src/', '-p',
'*_test.py']
return [input_api.Command('Python', command, {}, output_api.PresubmitError)]
| [
"os.path.realpath",
"os.path.join"
] | [((951, 973), 'os.path.realpath', 'os.path.realpath', (['path'], {}), '(path)\n', (967, 973), False, 'import os\n'), ((690, 731), 'os.path.join', 'os.path.join', (['"""tests"""', '"""test_app_minimal"""'], {}), "('tests', 'test_app_minimal')\n", (702, 731), False, 'import os\n'), ((755, 792), 'os.path.join', 'os.path.join', (['"""tests"""', '"""test_app_tts"""'], {}), "('tests', 'test_app_tts')\n", (767, 792), False, 'import os\n'), ((816, 860), 'os.path.join', 'os.path.join', (['"""tests"""', '"""test_app_tts_output"""'], {}), "('tests', 'test_app_tts_output')\n", (828, 860), False, 'import os\n')] |
import twitter
import util
from config import *
BOSTON_WOEID = 2367105
api = twitter.Api(consumer_key=key,consumer_secret=secret,access_token_key=access_key,access_token_secret=access_secret)
def search(searchTerm):
"""
Print recent tweets containing `searchTerm`.
To test this function, at the command line run:
python twitter_api.py --search=<search term>
For example,
python twitter_api.py --search=python
"""
api = twitter.Api(consumer_key=key,consumer_secret=secret,access_token_key=access_key,access_token_secret=access_secret)
tweets = api.GetSearch(searchTerm)
for tweet in tweets:
util.safe_print(tweet.GetText())
def trendingTopics():
"""
Print the currently trending topics.
To test this function, at the command line run:
python twitter_api.py -t
"""
api = twitter.Api(consumer_key=key,consumer_secret=secret,access_token_key=access_key,access_token_secret=access_secret)
trending_topics = api.GetTrendsWoeid(BOSTON_WOEID)
for topic in trending_topics:
util.safe_print(topic.name)
def userTweets(username):
"""
Print recent tweets by `username`.
You may find the twitter.Api() function GetUserTimeline() helpful.
To test this function, at the command line run:
python twitter_api.py -u <username>
For example,
python twitter_api.py -u bostonpython
"""
api = twitter.Api(consumer_key=key,consumer_secret=secret,access_token_key=access_key,access_token_secret=access_secret)
user_tweet = api.GetUserTimeline(screen_name=username)
for tweet in user_tweet:
util.safe_print(tweet.GetText())
def trendingTweets():
"""
Print tweets for all the trending topics.
To test this function, at the command line run:
python twitter_api.py -w
"""
api = twitter.Api(consumer_key=key,consumer_secret=secret,access_token_key=access_key,access_token_secret=access_secret)
trending_topics = api.GetTrendsWoeid(BOSTON_WOEID)
for tweet in trending_topics:
util.safe_print(tweet.GetText())
| [
"util.safe_print",
"twitter.Api"
] | [((78, 200), 'twitter.Api', 'twitter.Api', ([], {'consumer_key': 'key', 'consumer_secret': 'secret', 'access_token_key': 'access_key', 'access_token_secret': 'access_secret'}), '(consumer_key=key, consumer_secret=secret, access_token_key=\n access_key, access_token_secret=access_secret)\n', (89, 200), False, 'import twitter\n'), ((462, 584), 'twitter.Api', 'twitter.Api', ([], {'consumer_key': 'key', 'consumer_secret': 'secret', 'access_token_key': 'access_key', 'access_token_secret': 'access_secret'}), '(consumer_key=key, consumer_secret=secret, access_token_key=\n access_key, access_token_secret=access_secret)\n', (473, 584), False, 'import twitter\n'), ((858, 980), 'twitter.Api', 'twitter.Api', ([], {'consumer_key': 'key', 'consumer_secret': 'secret', 'access_token_key': 'access_key', 'access_token_secret': 'access_secret'}), '(consumer_key=key, consumer_secret=secret, access_token_key=\n access_key, access_token_secret=access_secret)\n', (869, 980), False, 'import twitter\n'), ((1422, 1544), 'twitter.Api', 'twitter.Api', ([], {'consumer_key': 'key', 'consumer_secret': 'secret', 'access_token_key': 'access_key', 'access_token_secret': 'access_secret'}), '(consumer_key=key, consumer_secret=secret, access_token_key=\n access_key, access_token_secret=access_secret)\n', (1433, 1544), False, 'import twitter\n'), ((1847, 1969), 'twitter.Api', 'twitter.Api', ([], {'consumer_key': 'key', 'consumer_secret': 'secret', 'access_token_key': 'access_key', 'access_token_secret': 'access_secret'}), '(consumer_key=key, consumer_secret=secret, access_token_key=\n access_key, access_token_secret=access_secret)\n', (1858, 1969), False, 'import twitter\n'), ((1070, 1097), 'util.safe_print', 'util.safe_print', (['topic.name'], {}), '(topic.name)\n', (1085, 1097), False, 'import util\n')] |
# -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
import unittest, time, re, os
class BossXiadan(unittest.TestCase):
def setUp(self):
chrome_driver=os.path.abspath(r'C:\Python27\chromedriver.exe')
os.environ['webdriver.chrome.driver']=chrome_driver
self.driver=webdriver.Chrome()
self.driver.get("https://cas.qa.great-tao.com:8443/cas-server/login?service=http://boss.qa.great-tao.com/cas")
self.verificationErrors = []
self.accept_next_alert = True
def test_boss_xiadan(self):
driver = self.driver
driver.get("https://cas.qa.great-tao.com:8443/cas-server/login?service=http://boss.qa.great-tao.com/cas")
driver.find_element_by_id("username").clear()
driver.find_element_by_id("username").send_keys("dingni")
driver.find_element_by_id("password").clear()
driver.find_element_by_id("password").send_keys("<PASSWORD>")
driver.find_element_by_id("captcha").clear()
driver.find_element_by_id("captcha").send_keys("<PASSWORD>")
driver.find_element_by_name("submit").click()
driver.find_element_by_link_text(u"BOSS系统").click()
time.sleep(3)
driver.find_element_by_link_text(u"销售线索").click()
time.sleep(2)
driver.find_element_by_link_text(u"添加线索").click()
time.sleep(2)
driver.switch_to.frame(driver.find_element_by_xpath("//iframe[contains(@src,'http://boss.qa.great-tao.com/boss-leads-web/leads/add')]"))
#切换frame
driver.find_element_by_id("sourceRemark").clear()
driver.find_element_by_id("sourceRemark").send_keys("test")
driver.find_element_by_id("company").clear()
driver.find_element_by_id("company").send_keys("dntest")
xiansuo_style1=driver.find_element_by_id("type")
Select(xiansuo_style1).select_by_value('3')#下拉框选择
xiansuo_source=driver.find_element_by_id("source")
Select(xiansuo_source).select_by_value('3')
driver.find_element_by_id("contact").clear()
driver.find_element_by_id("contact").send_keys("dn")
driver.find_element_by_id("tel").clear()
driver.find_element_by_id("tel").send_keys("15987598758")
driver.find_element_by_id("email").clear()
driver.find_element_by_id("email").send_keys("156848@126.com")
driver.find_element_by_id("content").clear()
driver.find_element_by_id("content").send_keys("dntest")
driver.find_element_by_id("submit").click()
driver.switch_to.default_content()#换回主frame
driver.find_element_by_link_text(u"线索分配").click()
driver.switch_to.frame(driver.find_element_by_xpath("//iframe[contains(@src,'http://boss.qa.great-tao.com/boss-leads-web/leads/director/list')]"))
#切换frame
xiansuo_style2=driver.find_element_by_id("type")
Select(xiansuo_style2).select_by_value('3')
xiansuo_status2=driver.find_element_by_id("status")
Select(xiansuo_status2).select_by_value('1')
driver.find_element_by_id("btn_query").click()
driver.maximize_window()
time.sleep(1)
driver.find_element_by_xpath("//*[@id='table']/tbody/tr[1]/td[10]/a[2]").click()
time.sleep(2)
driver.switch_to.frame(driver.find_element_by_id("layui-layer-iframe1"))#切换frame
div_fenpei=driver.find_element_by_class_name("col-xs-6").find_element_by_id("receiver")
Select(div_fenpei).select_by_value('53')
driver.find_element_by_id("confirm").click()
driver.switch_to.default_content()#换回主frame
driver.switch_to.frame(driver.find_element_by_xpath("//iframe[contains(@src,'http://boss.qa.great-tao.com/boss-leads-web/leads/director/list')]"))
Select(xiansuo_status2).select_by_value('7')
time.sleep(1)
driver.find_element_by_id("btn_query").click()
time.sleep(3)
driver.find_element_by_xpath("//*[@id='table']/tbody/tr[1]/td[10]/a[1]").click()
time.sleep(1)
driver.find_element_by_link_text(u"关联").click()
time.sleep(3)
driver.switch_to.frame(driver.find_element_by_id("layui-layer-iframe1"))#切换frame
driver.find_element_by_xpath("//*[@id='table']/tbody/tr[1]").click()
driver.find_element_by_id("confirm").click()
driver.switch_to.default_content()#换回主frame
driver.switch_to.frame(driver.find_element_by_xpath("//iframe[contains(@src,'http://boss.qa.great-tao.com/boss-leads-web/leads/director/list')]"))
driver.find_element_by_link_text(u"跟进").click()
time.sleep(1)
driver.switch_to.default_content()#换回主frame
driver.switch_to.frame(driver.find_element_by_xpath("//iframe[contains(@src,'http://boss.qa.great-tao.com/boss-leads-web/leads/director/list')]"))
driver.switch_to.frame(driver.find_element_by_id("layui-layer-iframe1"))#切换frame
time.sleep(1)
driver.find_element_by_id("followupRemark").clear()
driver.find_element_by_id("followupRemark").send_keys("test dn")
driver.find_element_by_id("followupDate").click()
driver.find_element_by_xpath("/html/body/div[2]/div[3]/table/tbody/tr[4]/td[3]").click()
driver.find_element_by_id("confirm").click()
driver.switch_to.default_content()#换回主frame
driver.switch_to.frame(driver.find_element_by_xpath("//iframe[contains(@src,'http://boss.qa.great-tao.com/boss-leads-web/leads/director/list')]"))
driver.find_element_by_link_text(u"下单").click()
driver.find_element_by_id("selectServiceProvider").click()
driver.find_element_by_id("popup").click()
driver.find_element_by_xpath("(//input[@name='btSelectItem'])[35]").click()
driver.find_element_by_id("queryService").click()
driver.find_element_by_name("orderAddressList[0].name").clear()
driver.find_element_by_name("orderAddressList[0].name").send_keys("testdn001")
driver.find_element_by_name("orderAddressList[0].address").clear()
driver.find_element_by_name("orderAddressList[0].address").send_keys("testdn001")
driver.find_element_by_name("orderAddressList[1].name").clear()
driver.find_element_by_name("orderAddressList[1].name").send_keys("testdn0001")
driver.find_element_by_name("orderAddressList[2].name").clear()
driver.find_element_by_name("orderAddressList[2].name").send_keys("testdn00001")
driver.find_element_by_name("goodsList[0].goodsName").clear()
driver.find_element_by_name("goodsList[0].goodsName").send_keys("testdn0001")
driver.find_element_by_name("goodsList[0].num").clear()
driver.find_element_by_name("goodsList[0].num").send_keys("1")
Select(driver.find_element_by_name("goodsList[0].unit")).select_by_visible_text("CTNS")
driver.find_element_by_name("goodsList[0].grossWeight").clear()
driver.find_element_by_name("goodsList[0].grossWeight").send_keys("1")
driver.find_element_by_name("goodsList[0].measurement").clear()
driver.find_element_by_name("goodsList[0].measurement").send_keys("1")
driver.find_element_by_xpath("(//input[@name='orderExpand.containerMark'])[2]").click()
driver.find_element_by_xpath("//div[@id='sizzle-1489114881098']/div[3]/table/tbody/tr/td[5]").click()
driver.find_element_by_xpath("//div[@id='sizzle-1489114881098']/div[3]/table/tbody/tr[3]/td[6]").click()
driver.find_element_by_id("select2-orderportOfShipment-qa-container").click()
driver.find_element_by_css_selector("span.select2-selection__placeholder").click()
Select(driver.find_element_by_name("orderExpand.typeOfShipping")).select_by_visible_text(u"海运")
Select(driver.find_element_by_name("order.freightPayableat")).select_by_visible_text(u"运费预付")
driver.find_element_by_name("selectService").click()
driver.find_element_by_name("orderExpand.entrustName").clear()
driver.find_element_by_name("orderExpand.entrustName").send_keys("11")
driver.find_element_by_name("orderExpand.entrustTel").clear()
driver.find_element_by_name("orderExpand.entrustTel").send_keys("11")
Select(driver.find_element_by_id("haha")).select_by_visible_text(u"海运费")
driver.find_element_by_name("logisticsCosts[3].price").clear()
driver.find_element_by_name("logisticsCosts[3].price").send_keys("11")
Select(driver.find_element_by_id("store-name")).select_by_visible_text(u"吊机费")
driver.find_element_by_name("logisticsCosts[4].price").clear()
driver.find_element_by_name("logisticsCosts[4].price").send_keys("11")
driver.find_element_by_name("logisticsCosts[1].price").clear()
driver.find_element_by_name("logisticsCosts[1].price").send_keys("11")
driver.find_element_by_id("saveService").click()
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException as e: return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException as e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
'''def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)'''
if __name__ == "__main__":
unittest.main()
| [
"selenium.webdriver.Chrome",
"selenium.webdriver.support.ui.Select",
"time.sleep",
"unittest.main",
"os.path.abspath"
] | [((9920, 9935), 'unittest.main', 'unittest.main', ([], {}), '()\n', (9933, 9935), False, 'import unittest, time, re, os\n'), ((432, 481), 'os.path.abspath', 'os.path.abspath', (['"""C:\\\\Python27\\\\chromedriver.exe"""'], {}), "('C:\\\\Python27\\\\chromedriver.exe')\n", (447, 481), False, 'import unittest, time, re, os\n'), ((561, 579), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {}), '()\n', (577, 579), False, 'from selenium import webdriver\n'), ((1442, 1455), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (1452, 1455), False, 'import unittest, time, re, os\n'), ((1522, 1535), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (1532, 1535), False, 'import unittest, time, re, os\n'), ((1602, 1615), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (1612, 1615), False, 'import unittest, time, re, os\n'), ((3369, 3382), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3379, 3382), False, 'import unittest, time, re, os\n'), ((3480, 3493), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (3490, 3493), False, 'import unittest, time, re, os\n'), ((4049, 4062), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4059, 4062), False, 'import unittest, time, re, os\n'), ((4126, 4139), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (4136, 4139), False, 'import unittest, time, re, os\n'), ((4237, 4250), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4247, 4250), False, 'import unittest, time, re, os\n'), ((4315, 4328), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (4325, 4328), False, 'import unittest, time, re, os\n'), ((4819, 4832), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4829, 4832), False, 'import unittest, time, re, os\n'), ((5137, 5150), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (5147, 5150), False, 'import unittest, time, re, os\n'), ((2087, 2109), 'selenium.webdriver.support.ui.Select', 'Select', (['xiansuo_style1'], {}), '(xiansuo_style1)\n', (2093, 2109), False, 'from selenium.webdriver.support.ui import Select\n'), ((2204, 2226), 'selenium.webdriver.support.ui.Select', 'Select', (['xiansuo_source'], {}), '(xiansuo_source)\n', (2210, 2226), False, 'from selenium.webdriver.support.ui import Select\n'), ((3116, 3138), 'selenium.webdriver.support.ui.Select', 'Select', (['xiansuo_style2'], {}), '(xiansuo_style2)\n', (3122, 3138), False, 'from selenium.webdriver.support.ui import Select\n'), ((3228, 3251), 'selenium.webdriver.support.ui.Select', 'Select', (['xiansuo_status2'], {}), '(xiansuo_status2)\n', (3234, 3251), False, 'from selenium.webdriver.support.ui import Select\n'), ((3687, 3705), 'selenium.webdriver.support.ui.Select', 'Select', (['div_fenpei'], {}), '(div_fenpei)\n', (3693, 3705), False, 'from selenium.webdriver.support.ui import Select\n'), ((3996, 4019), 'selenium.webdriver.support.ui.Select', 'Select', (['xiansuo_status2'], {}), '(xiansuo_status2)\n', (4002, 4019), False, 'from selenium.webdriver.support.ui import Select\n')] |
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional
from talon import Context, Module, actions, app, cron, ui
# XXX(nriley) actions are being returned out of order; that's a problem if we want to pop up a menu
mod = Module()
mod.list("notification_actions", desc="Notification actions")
mod.list("notification_apps", desc="Notification apps")
notification_debug = mod.setting(
"notification_debug",
type=bool,
default=False,
desc="Display macOS notification debugging information.",
)
try:
from rich.console import Console
console = Console(color_system="truecolor", soft_wrap=True)
def debug_print(obj: any, *args):
"""Pretty prints the object"""
if not notification_debug.get():
return
if args:
console.out(obj, *args)
else:
console.print(obj)
except ImportError:
def debug_print(obj: any, *args):
if not notification_debug.get():
return
print(obj, *args)
@mod.action_class
class Actions:
def notification_action(index: int, action: str) -> bool:
"""Perform the specified action on the notification (stack) at the specified index"""
return False
def notification_app_action(app_name: str, action: str) -> bool:
"""Perform the specified action on the first notification (stack) for the specified app"""
return False
def notifications_update():
"""Update notification list to reflect what is currently onscreen"""
# (poll? not try to keep up? not sure what else to do)
def notification_center():
"""Display or hide Notification Center"""
@dataclass(frozen=True)
class Notification:
identifier: int
subrole: str = field(default=None, compare=False)
app_name: str = field(default=None, compare=False)
stacking_identifier: str = field(default=None, compare=False)
title: str = field(default=None, compare=False)
subtitle: str = field(default=None, compare=False)
body: str = field(default=None, compare=False)
# action values are named "Name:<name>\nTarget:0x0\nSelector:(null)"; keys are speakable
actions: dict[str, str] = field(default=None, compare=False)
@staticmethod
def group_identifier(group):
identifier = getattr(group, "AXIdentifier", None)
if identifier is None or not str.isdigit(identifier):
return None
return int(identifier)
@staticmethod
def from_group(group, identifier):
group_actions = group.actions
if "AXScrollToVisible" in group_actions:
del group_actions["AXScrollToVisible"] # not useful
# XXX(nriley) create_spoken_forms_from_list doesn't handle apostrophes correctly
# https://github.com/knausj85/knausj_talon/issues/780
group_actions = {
name.lower().replace("’", "'"): action
for action, name in group_actions.items()
}
title = body = subtitle = None
try:
title = group.children.find_one(AXIdentifier="title").AXValue
except ui.UIErr:
pass
try:
body = group.children.find_one(AXIdentifier="body").AXValue
except ui.UIErr:
pass
try:
subtitle = group.children.find_one(AXIdentifier="subtitle").AXValue
except ui.UIErr:
pass
return Notification(
identifier=identifier,
subrole=group.AXSubrole,
app_name=group.AXDescription,
stacking_identifier=group.AXStackingIdentifier,
title=title,
subtitle=subtitle,
body=body,
actions=group_actions,
)
@staticmethod
def notifications_in_window(window):
notifications = []
for group in window.children.find(AXRole="AXGroup"):
if not (identifier := Notification.group_identifier(group)):
continue
notification = Notification.from_group(group, identifier)
notifications.append(notification)
return notifications
MONITOR = None
ctx = Context()
ctx.matches = r"""
os: mac
"""
ctx.lists["user.notification_actions"] = {}
ctx.lists["user.notification_apps"] = {}
@ctx.action_class("user")
class UserActions:
def notification_action(index: int, action: str) -> bool:
return MONITOR.perform_action(action, index=index)
def notification_app_action(app_name: str, action: str) -> bool:
return MONITOR.perform_action(action, app_name=app_name)
def notifications_update():
MONITOR.update_notifications()
def notification_center():
cc = ui.apps(bundle="com.apple.controlcenter")[0]
cc.element.children.find_one(AXRole="AXMenuBar", max_depth=0).children.find_one(
AXRole="AXMenuBarItem",
AXSubrole="AXMenuExtra",
AXIdentifier="com.apple.menuextra.clock",
max_depth=0,
).perform("AXPress")
class NotificationMonitor:
__slots__ = (
"pid",
"notifications",
)
def __init__(self, app: ui.App):
self.pid = app.pid
self.notifications = []
ui.register("win_open", self.win_open)
ui.register("win_close", self.win_close)
ui.register("app_close", self.app_closed)
self.update_notifications()
def win_open(self, window):
if not window.app.pid == self.pid:
return
notifications = Notification.notifications_in_window(window)
self.update_notifications(adding=notifications)
def notification_groups(self):
ncui = ui.apps(pid=self.pid)[0]
for window in ncui.windows():
for group in window.children.find(AXRole="AXGroup"):
if not (identifier := Notification.group_identifier(group)):
continue
yield identifier, group
def perform_action(
self, action: str, index: Optional[int] = None, app_name: str = None
):
self.update_notifications()
cron.after("500ms", self.update_notifications)
notification = None
if index is not None:
if index < 0 or index > len(self.notifications) - 1:
app.notify(f"Unable to locate notification #{index + 1}", "Try again?")
return False
notification = self.notifications[index]
elif app_name is not None:
try:
notification = next(
notification
for notification in self.notifications
if notification.app_name == app_name
)
except StopIteration:
app.notify(
f"Unable to locate notification for {app_name}", "Try again?"
)
return False
for identifier, group in self.notification_groups():
if identifier != notification.identifier:
continue
if action not in notification.actions:
# allow closing a notification stack like an individual notification
if action == "close" and "clear all" in notification.actions:
action = "clear all"
else:
app.notify(f"No such action “{action}”", "Try again?")
return False
group.perform(notification.actions[action])
return True
app.notify("Unable to locate notification", "Try again?")
return False
def update_notifications(self, adding=[]):
if adding:
self.notifications += adding
notifications = {}
for identifier, group in self.notification_groups():
y = group.AXPosition.y
try:
notifications[y] = self.notifications[
self.notifications.index(Notification(identifier=identifier))
]
except ValueError:
notifications[y] = Notification.from_group(group, identifier)
self.notifications = list(notifications.values())
if notifications:
debug_print(notifications)
notification_actions = set()
notification_apps = set()
for notification in self.notifications:
notification_actions.update(notification.actions.keys())
notification_apps.add(notification.app_name)
notification_actions = list(notification_actions)
# XXX(nriley) create_spoken_forms_from_list doesn't handle apostrophes correctly
# https://github.com/knausj85/knausj_talon/issues/780
apostrophe_words = {
word.replace("'", " "): word
for word in chain.from_iterable(
action.split() for action in notification_actions
)
if "'" in word
}
words_to_exclude = [word.split(" ")[0] for word in apostrophe_words]
notification_actions = actions.user.create_spoken_forms_from_list(
notification_actions, words_to_exclude=words_to_exclude
)
if apostrophe_words:
notification_actions = {
spoken_form.replace(mangled_word, word): action
for mangled_word, word in apostrophe_words.items()
for spoken_form, action in notification_actions.items()
if "apostrophe" not in spoken_form
}
if notification_actions:
debug_print("actions", notification_actions)
if "close" not in notification_actions and "clear all" in notification_actions:
# allow closing a notification stack like an individual notification
notification_actions["close"] = "clear all"
ctx.lists["user.notification_actions"] = notification_actions
# XXX(nriley) use app name overrides from knausj?
notification_apps = actions.user.create_spoken_forms_from_list(
notification_apps
)
ctx.lists["user.notification_apps"] = notification_apps
if notification_apps:
debug_print("apps", notification_apps)
def win_close(self, window):
if not window.app.pid == self.pid:
return
self.update_notifications()
def app_closed(self, app):
if app.pid == self.pid:
ui.unregister("app_close", self.app_closed)
def app_launched(app):
global MONITOR
if not app.bundle == "com.apple.notificationcenterui":
return
MONITOR = NotificationMonitor(app)
def monitor():
global MONITOR
apps = ui.apps(bundle="com.apple.notificationcenterui")
if apps:
MONITOR = NotificationMonitor(apps[0])
ui.register("app_launch", app_launched)
app.register("ready", monitor)
| [
"talon.Context",
"talon.ui.register",
"dataclasses.dataclass",
"talon.ui.apps",
"talon.actions.user.create_spoken_forms_from_list",
"talon.Module",
"rich.console.Console",
"talon.app.notify",
"talon.ui.unregister",
"talon.cron.after",
"talon.app.register",
"dataclasses.field"
] | [((264, 272), 'talon.Module', 'Module', ([], {}), '()\n', (270, 272), False, 'from talon import Context, Module, actions, app, cron, ui\n'), ((1701, 1723), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (1710, 1723), False, 'from dataclasses import dataclass, field\n'), ((4167, 4176), 'talon.Context', 'Context', ([], {}), '()\n', (4174, 4176), False, 'from talon import Context, Module, actions, app, cron, ui\n'), ((10799, 10829), 'talon.app.register', 'app.register', (['"""ready"""', 'monitor'], {}), "('ready', monitor)\n", (10811, 10829), False, 'from talon import Context, Module, actions, app, cron, ui\n'), ((609, 658), 'rich.console.Console', 'Console', ([], {'color_system': '"""truecolor"""', 'soft_wrap': '(True)'}), "(color_system='truecolor', soft_wrap=True)\n", (616, 658), False, 'from rich.console import Console\n'), ((1783, 1817), 'dataclasses.field', 'field', ([], {'default': 'None', 'compare': '(False)'}), '(default=None, compare=False)\n', (1788, 1817), False, 'from dataclasses import dataclass, field\n'), ((1838, 1872), 'dataclasses.field', 'field', ([], {'default': 'None', 'compare': '(False)'}), '(default=None, compare=False)\n', (1843, 1872), False, 'from dataclasses import dataclass, field\n'), ((1904, 1938), 'dataclasses.field', 'field', ([], {'default': 'None', 'compare': '(False)'}), '(default=None, compare=False)\n', (1909, 1938), False, 'from dataclasses import dataclass, field\n'), ((1956, 1990), 'dataclasses.field', 'field', ([], {'default': 'None', 'compare': '(False)'}), '(default=None, compare=False)\n', (1961, 1990), False, 'from dataclasses import dataclass, field\n'), ((2011, 2045), 'dataclasses.field', 'field', ([], {'default': 'None', 'compare': '(False)'}), '(default=None, compare=False)\n', (2016, 2045), False, 'from dataclasses import dataclass, field\n'), ((2062, 2096), 'dataclasses.field', 'field', ([], {'default': 'None', 'compare': '(False)'}), '(default=None, compare=False)\n', (2067, 2096), False, 'from dataclasses import dataclass, field\n'), ((2220, 2254), 'dataclasses.field', 'field', ([], {'default': 'None', 'compare': '(False)'}), '(default=None, compare=False)\n', (2225, 2254), False, 'from dataclasses import dataclass, field\n'), ((10643, 10691), 'talon.ui.apps', 'ui.apps', ([], {'bundle': '"""com.apple.notificationcenterui"""'}), "(bundle='com.apple.notificationcenterui')\n", (10650, 10691), False, 'from talon import Context, Module, actions, app, cron, ui\n'), ((10757, 10796), 'talon.ui.register', 'ui.register', (['"""app_launch"""', 'app_launched'], {}), "('app_launch', app_launched)\n", (10768, 10796), False, 'from talon import Context, Module, actions, app, cron, ui\n'), ((5228, 5266), 'talon.ui.register', 'ui.register', (['"""win_open"""', 'self.win_open'], {}), "('win_open', self.win_open)\n", (5239, 5266), False, 'from talon import Context, Module, actions, app, cron, ui\n'), ((5275, 5315), 'talon.ui.register', 'ui.register', (['"""win_close"""', 'self.win_close'], {}), "('win_close', self.win_close)\n", (5286, 5315), False, 'from talon import Context, Module, actions, app, cron, ui\n'), ((5324, 5365), 'talon.ui.register', 'ui.register', (['"""app_close"""', 'self.app_closed'], {}), "('app_close', self.app_closed)\n", (5335, 5365), False, 'from talon import Context, Module, actions, app, cron, ui\n'), ((6105, 6151), 'talon.cron.after', 'cron.after', (['"""500ms"""', 'self.update_notifications'], {}), "('500ms', self.update_notifications)\n", (6115, 6151), False, 'from talon import Context, Module, actions, app, cron, ui\n'), ((7511, 7568), 'talon.app.notify', 'app.notify', (['"""Unable to locate notification"""', '"""Try again?"""'], {}), "('Unable to locate notification', 'Try again?')\n", (7521, 7568), False, 'from talon import Context, Module, actions, app, cron, ui\n'), ((9025, 9128), 'talon.actions.user.create_spoken_forms_from_list', 'actions.user.create_spoken_forms_from_list', (['notification_actions'], {'words_to_exclude': 'words_to_exclude'}), '(notification_actions,\n words_to_exclude=words_to_exclude)\n', (9067, 9128), False, 'from talon import Context, Module, actions, app, cron, ui\n'), ((9954, 10015), 'talon.actions.user.create_spoken_forms_from_list', 'actions.user.create_spoken_forms_from_list', (['notification_apps'], {}), '(notification_apps)\n', (9996, 10015), False, 'from talon import Context, Module, actions, app, cron, ui\n'), ((4714, 4755), 'talon.ui.apps', 'ui.apps', ([], {'bundle': '"""com.apple.controlcenter"""'}), "(bundle='com.apple.controlcenter')\n", (4721, 4755), False, 'from talon import Context, Module, actions, app, cron, ui\n'), ((5676, 5697), 'talon.ui.apps', 'ui.apps', ([], {'pid': 'self.pid'}), '(pid=self.pid)\n', (5683, 5697), False, 'from talon import Context, Module, actions, app, cron, ui\n'), ((10392, 10435), 'talon.ui.unregister', 'ui.unregister', (['"""app_close"""', 'self.app_closed'], {}), "('app_close', self.app_closed)\n", (10405, 10435), False, 'from talon import Context, Module, actions, app, cron, ui\n'), ((6292, 6363), 'talon.app.notify', 'app.notify', (['f"""Unable to locate notification #{index + 1}"""', '"""Try again?"""'], {}), "(f'Unable to locate notification #{index + 1}', 'Try again?')\n", (6302, 6363), False, 'from talon import Context, Module, actions, app, cron, ui\n'), ((7333, 7387), 'talon.app.notify', 'app.notify', (['f"""No such action “{action}”"""', '"""Try again?"""'], {}), "(f'No such action “{action}”', 'Try again?')\n", (7343, 7387), False, 'from talon import Context, Module, actions, app, cron, ui\n'), ((6753, 6826), 'talon.app.notify', 'app.notify', (['f"""Unable to locate notification for {app_name}"""', '"""Try again?"""'], {}), "(f'Unable to locate notification for {app_name}', 'Try again?')\n", (6763, 6826), False, 'from talon import Context, Module, actions, app, cron, ui\n')] |
import numpy as np
from multiagent.core import World, Agent, Landmark
from multiagent.scenario import BaseScenario
class Scenario(BaseScenario):
def make_world(self):
world = World()
# set any world properties first
world.dim_c = 2
num_agents = 2
num_adversaries = 1
num_landmarks = 5
# add agents
world.agents = [Agent() for i in range(num_agents)]
for i, agent in enumerate(world.agents):
agent.name = 'agent %d' % i
agent.collide = True
agent.silent = True
if i < num_adversaries:
agent.adversary = True
agent.color = np.array([0.75, 0.25, 0.25])
else:
agent.adversary = False
agent.color = np.array([0.25, 0.25, 0.75])
# add landmarks for goal posts and puck
goal_posts = [[-0.25, -1.0],
[-0.25, 1.0],
[0.25, -1.0],
[0.25, 1.0]]
world.landmarks = [Landmark() for i in range(num_landmarks)]
for i, landmark in enumerate(world.landmarks):
landmark.name = 'landmark %d' % i
if i > 0:
landmark.collide = True
landmark.movable = False
landmark.state.p_pos = np.array(goal_posts[i-1])
landmark.state.p_vel = np.zeros(world.dim_p)
else:
landmark.collide = True
landmark.movable = True
# add landmarks for rink boundary
#world.landmarks += self.set_boundaries(world)
# make initial conditions
self.reset_world(world)
return world
def set_boundaries(self, world):
boundary_list = []
landmark_size = 1
edge = 1 + landmark_size
num_landmarks = int(edge * 2 / landmark_size)
for x_pos in [-edge, edge]:
for i in range(num_landmarks):
l = Landmark()
l.state.p_pos = np.array([x_pos, -1 + i * landmark_size])
boundary_list.append(l)
for y_pos in [-edge, edge]:
for i in range(num_landmarks):
l = Landmark()
l.state.p_pos = np.array([-1 + i * landmark_size, y_pos])
boundary_list.append(l)
for i, l in enumerate(boundary_list):
l.name = 'boundary %d' % i
l.collide = True
l.movable = False
l.boundary = True
l.color = np.array([0.75, 0.75, 0.75])
l.size = landmark_size
l.state.p_vel = np.zeros(world.dim_p)
return boundary_list
def reset_world(self, world):
# random properties for landmarks
for i, landmark in enumerate(world.landmarks):
if i > 0:
landmark.color = np.array([0.7, 0.7, 0.7])
else:
landmark.color = np.array([0.1, 0.1, 0.1])
landmark.index = i
# set random initial states
for agent in world.agents:
agent.state.p_pos = np.random.uniform(-1, +1, world.dim_p)
agent.state.p_vel = np.zeros(world.dim_p)
agent.state.c = np.zeros(world.dim_c)
world.landmarks[0].state.p_pos = np.random.uniform(-1, +1, world.dim_p)
world.landmarks[0].state.p_vel = np.zeros(world.dim_p)
# return all agents of the blue team
def blue_agents(self, world):
return [agent for agent in world.agents if not agent.adversary]
# return all agents of the red team
def red_agents(self, world):
return [agent for agent in world.agents if agent.adversary]
def reward(self, agent, world):
# Agents are rewarded based on team they belong to
return self.adversary_reward(agent, world) if agent.adversary else self.agent_reward(agent, world)
def agent_reward(self, agent, world):
# reward for blue team agent
return 0.0
def adversary_reward(self, agent, world):
# reward for red team agent
return 0.0
def observation(self, agent, world):
# get positions/vel of all entities in this agent's reference frame
entity_pos = []
entity_vel = []
for entity in world.landmarks: # world.entities:
entity_pos.append(entity.state.p_pos - agent.state.p_pos)
if entity.movable:
entity_vel.append(entity.state.p_vel)
# get positions/vel of all other agents in this agent's reference frame
other_pos = []
other_vel = []
for other in world.agents:
if other is agent: continue
other_pos.append(other.state.p_pos - agent.state.p_pos)
other_vel.append(other.state.p_vel)
return np.concatenate([agent.state.p_vel] + entity_pos + entity_vel + other_pos + other_vel)
| [
"multiagent.core.Landmark",
"numpy.array",
"numpy.zeros",
"multiagent.core.World",
"numpy.concatenate",
"numpy.random.uniform",
"multiagent.core.Agent"
] | [((188, 195), 'multiagent.core.World', 'World', ([], {}), '()\n', (193, 195), False, 'from multiagent.core import World, Agent, Landmark\n'), ((3273, 3311), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(+1)', 'world.dim_p'], {}), '(-1, +1, world.dim_p)\n', (3290, 3311), True, 'import numpy as np\n'), ((3353, 3374), 'numpy.zeros', 'np.zeros', (['world.dim_p'], {}), '(world.dim_p)\n', (3361, 3374), True, 'import numpy as np\n'), ((4795, 4884), 'numpy.concatenate', 'np.concatenate', (['([agent.state.p_vel] + entity_pos + entity_vel + other_pos + other_vel)'], {}), '([agent.state.p_vel] + entity_pos + entity_vel + other_pos +\n other_vel)\n', (4809, 4884), True, 'import numpy as np\n'), ((383, 390), 'multiagent.core.Agent', 'Agent', ([], {}), '()\n', (388, 390), False, 'from multiagent.core import World, Agent, Landmark\n'), ((1043, 1053), 'multiagent.core.Landmark', 'Landmark', ([], {}), '()\n', (1051, 1053), False, 'from multiagent.core import World, Agent, Landmark\n'), ((2521, 2549), 'numpy.array', 'np.array', (['[0.75, 0.75, 0.75]'], {}), '([0.75, 0.75, 0.75])\n', (2529, 2549), True, 'import numpy as np\n'), ((2613, 2634), 'numpy.zeros', 'np.zeros', (['world.dim_p'], {}), '(world.dim_p)\n', (2621, 2634), True, 'import numpy as np\n'), ((3089, 3127), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(+1)', 'world.dim_p'], {}), '(-1, +1, world.dim_p)\n', (3106, 3127), True, 'import numpy as np\n'), ((3160, 3181), 'numpy.zeros', 'np.zeros', (['world.dim_p'], {}), '(world.dim_p)\n', (3168, 3181), True, 'import numpy as np\n'), ((3210, 3231), 'numpy.zeros', 'np.zeros', (['world.dim_c'], {}), '(world.dim_c)\n', (3218, 3231), True, 'import numpy as np\n'), ((678, 706), 'numpy.array', 'np.array', (['[0.75, 0.25, 0.25]'], {}), '([0.75, 0.25, 0.25])\n', (686, 706), True, 'import numpy as np\n'), ((795, 823), 'numpy.array', 'np.array', (['[0.25, 0.25, 0.75]'], {}), '([0.25, 0.25, 0.75])\n', (803, 823), True, 'import numpy as np\n'), ((1328, 1355), 'numpy.array', 'np.array', (['goal_posts[i - 1]'], {}), '(goal_posts[i - 1])\n', (1336, 1355), True, 'import numpy as np\n'), ((1393, 1414), 'numpy.zeros', 'np.zeros', (['world.dim_p'], {}), '(world.dim_p)\n', (1401, 1414), True, 'import numpy as np\n'), ((1974, 1984), 'multiagent.core.Landmark', 'Landmark', ([], {}), '()\n', (1982, 1984), False, 'from multiagent.core import World, Agent, Landmark\n'), ((2017, 2058), 'numpy.array', 'np.array', (['[x_pos, -1 + i * landmark_size]'], {}), '([x_pos, -1 + i * landmark_size])\n', (2025, 2058), True, 'import numpy as np\n'), ((2199, 2209), 'multiagent.core.Landmark', 'Landmark', ([], {}), '()\n', (2207, 2209), False, 'from multiagent.core import World, Agent, Landmark\n'), ((2242, 2283), 'numpy.array', 'np.array', (['[-1 + i * landmark_size, y_pos]'], {}), '([-1 + i * landmark_size, y_pos])\n', (2250, 2283), True, 'import numpy as np\n'), ((2852, 2877), 'numpy.array', 'np.array', (['[0.7, 0.7, 0.7]'], {}), '([0.7, 0.7, 0.7])\n', (2860, 2877), True, 'import numpy as np\n'), ((2929, 2954), 'numpy.array', 'np.array', (['[0.1, 0.1, 0.1]'], {}), '([0.1, 0.1, 0.1])\n', (2937, 2954), True, 'import numpy as np\n')] |
# Imports
import numpy as np
# Single to double frame
# Combines images by 2, returning an array with two frames (one for each image).
#
# Input: 5 images with step 1.
# Output: 4 double-framed images.
# FrameA: 1 2 3 4
# FrameB: 2 3 4 5
#
# Input: 8 images with step 3.
# Output: 5 doubled-framed images.
# FrameA: 1 2 3 4 5
# FrameB: 4 5 6 7 8
#
# This function also crops the image according to the provided Region of Interest (ROI), that must be passed as:
# ROI = [X-start X-end Y-start Y-end], for example: [1 100 1 50].
#
# Output:
# Array with the following dimensions: 0 - Image; 1 - Frame; 2 - Height (Y); 3 - Width (X).
def single_to_double_frame(images, step=1, roi=None):
total_images = images.shape[0]
frameA_idx = list(range(0, total_images-step))
frameB_idx = [idx+1 for idx in frameA_idx]
images_double_framed = []
for idx in frameA_idx:
double_frame = [images[frameA_idx[idx]], images[frameB_idx[idx]]]
if roi and len(roi) == 4:
size_y, size_x = double_frame[0].shape
min_x, max_x = max(0, roi[0]-1), min(roi[1], size_x)
min_y, max_y = max(0, roi[2]-1), min(roi[3], size_x)
double_frame[0] = np.array(double_frame[0][min_y:max_y, min_x:max_x])
double_frame[1] = np.array(double_frame[1][min_y:max_y, min_x:max_x])
images_double_framed += [double_frame]
return np.array(images_double_framed)
| [
"numpy.array"
] | [((1479, 1509), 'numpy.array', 'np.array', (['images_double_framed'], {}), '(images_double_framed)\n', (1487, 1509), True, 'import numpy as np\n'), ((1273, 1324), 'numpy.array', 'np.array', (['double_frame[0][min_y:max_y, min_x:max_x]'], {}), '(double_frame[0][min_y:max_y, min_x:max_x])\n', (1281, 1324), True, 'import numpy as np\n'), ((1355, 1406), 'numpy.array', 'np.array', (['double_frame[1][min_y:max_y, min_x:max_x]'], {}), '(double_frame[1][min_y:max_y, min_x:max_x])\n', (1363, 1406), True, 'import numpy as np\n')] |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from transformers import PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer
from transformers.data.datasets import GlueDataset
from transformers.data.datasets import GlueDataTrainingArguments
from transformers.data.processors.glue import glue_convert_examples_to_features
from transformers.data.processors.utils import InputFeatures
from loguru import logger
from ..processors.seq_clf import seq_clf_output_modes, seq_clf_processors, seq_clf_tasks_num_labels
class Split(Enum):
train = 'train'
dev = 'dev'
test = 'test'
class SeqClfDataset(GlueDataset):
"""
Why this class even exists?
`class GlueDataset(Dataset)` has a constructor `def __init__()` with
`processor = glue_processors[args.task_name]()`, however I want to expand `glue_processors`
with protein clf task names. The line `processor = glue_processors[args.task_name]()` in parent
class doesn't accomodate this.
"""
args: GlueDataTrainingArguments
output_mode: str
features: List[InputFeatures]
def __init__(
self,
args: GlueDataTrainingArguments,
tokenizer: PreTrainedTokenizer,
limit_length: Optional[int] = None,
mode: Union[str, Split] = Split.train,
cache_dir: Optional[str] = None,
):
self.args = args
self.processor = seq_clf_processors[args.task_name]()
self.output_mode = seq_clf_output_modes[args.task_name]
if isinstance(mode, str):
try:
mode = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name')
# Load data features from cache or dataset file
cached_features_file = os.path.join(
cache_dir if cache_dir is not None else args.data_dir,
'cached_{}_{}_{}_{}'.format(
mode.value, tokenizer.__class__.__name__, str(args.max_seq_length), args.task_name,
),
)
label_list = self.processor.get_labels()
if args.task_name in ['mnli', 'mnli-mm'] and tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
label_list[1], label_list[2] = label_list[2], label_list[1]
self.label_list = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lock_path = cached_features_file + '.lock'
with FileLock(lock_path):
if os.path.exists(cached_features_file) and not args.overwrite_cache:
start = time.time()
self.features = torch.load(cached_features_file)
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]', time.time() - start
)
else:
logger.info(f'Creating features from dataset file at {args.data_dir}')
if mode == Split.dev:
examples = self.processor.get_dev_examples(args.data_dir)
elif mode == Split.test:
examples = self.processor.get_test_examples(args.data_dir)
else:
examples = self.processor.get_train_examples(args.data_dir)
if limit_length is not None:
examples = examples[:limit_length]
# Load a data file into a list of ``InputFeatures``
self.features = glue_convert_examples_to_features(
examples,
tokenizer,
max_length=args.max_seq_length,
label_list=label_list,
output_mode=self.output_mode,
)
start = time.time()
torch.save(self.features, cached_features_file)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
'Saving features into cached file %s [took %.3f s]', cached_features_file, time.time() - start
)
def __len__(self):
return len(self.features)
def __getitem__(self, i) -> InputFeatures:
return self.features[i]
def get_labels(self):
return self.label_list
| [
"os.path.exists",
"loguru.logger.info",
"torch.load",
"filelock.FileLock",
"torch.save",
"transformers.data.processors.glue.glue_convert_examples_to_features",
"time.time"
] | [((2821, 2840), 'filelock.FileLock', 'FileLock', (['lock_path'], {}), '(lock_path)\n', (2829, 2840), False, 'from filelock import FileLock\n'), ((2858, 2894), 'os.path.exists', 'os.path.exists', (['cached_features_file'], {}), '(cached_features_file)\n', (2872, 2894), False, 'import os\n'), ((2949, 2960), 'time.time', 'time.time', ([], {}), '()\n', (2958, 2960), False, 'import time\n'), ((2993, 3025), 'torch.load', 'torch.load', (['cached_features_file'], {}), '(cached_features_file)\n', (3003, 3025), False, 'import torch\n'), ((3222, 3292), 'loguru.logger.info', 'logger.info', (['f"""Creating features from dataset file at {args.data_dir}"""'], {}), "(f'Creating features from dataset file at {args.data_dir}')\n", (3233, 3292), False, 'from loguru import logger\n'), ((3833, 3977), 'transformers.data.processors.glue.glue_convert_examples_to_features', 'glue_convert_examples_to_features', (['examples', 'tokenizer'], {'max_length': 'args.max_seq_length', 'label_list': 'label_list', 'output_mode': 'self.output_mode'}), '(examples, tokenizer, max_length=args.\n max_seq_length, label_list=label_list, output_mode=self.output_mode)\n', (3866, 3977), False, 'from transformers.data.processors.glue import glue_convert_examples_to_features\n'), ((4116, 4127), 'time.time', 'time.time', ([], {}), '()\n', (4125, 4127), False, 'import time\n'), ((4144, 4191), 'torch.save', 'torch.save', (['self.features', 'cached_features_file'], {}), '(self.features, cached_features_file)\n', (4154, 4191), False, 'import torch\n'), ((3150, 3161), 'time.time', 'time.time', ([], {}), '()\n', (3159, 3161), False, 'import time\n'), ((4422, 4433), 'time.time', 'time.time', ([], {}), '()\n', (4431, 4433), False, 'import time\n')] |
#! /usr/bin/env python
"""
This script parses and cleans up a provided Flow Cytometry Standard (fcs) file
and saves it as a Comma Separated Value (csv).
"""
import os
import re
import numpy as np
import pandas as pd
import optparse
import fcsparser
# #########################################################################
def main():
# Initialize the option parser
parser = optparse.OptionParser()
#Add options.
parser.add_option('-i', '--input_file', dest='filename', help='name of single\
file to be processed.', metavar="filename")
parser.add_option('-d', '--directory', dest='inputdir', help='name of\
input directory to be processed')
parser.add_option('-p', '--pattern', dest='pattern', help='filename\
pattern to parse files.')
parser.add_option('-o', '--output', dest='out',
help='name of output directory')
parser.add_option('-c', '--channel', action='append', dest='channels',
help=' individual channels to extract. Each channel must have its\
own -c flag.')
parser.add_option('-v', '--verbose', action='store_true', dest='verbose',\
help='print progress to stdout', default=False)
parser.add_option('-f', '--force', action='store_true', dest='force',
help='force saving of files to output directory if needed.',
default=False)
# get the ops and args
ops, args = parser.parse_args()
# List files
if (ops.inputdir == None) & (ops.filename == None):
raise ValueError('no input directory/file provided! Please indicate\
the input directory that contains the fcs files')
# get all the files in the directory
files = []
if ops.inputdir != None:
usr_files = np.array(os.listdir(ops.inputdir))
# Use the pattern to identify all of the files.
files_idx = np.array([ops.pattern in f for f in usr_files])
file_names = usr_files[files_idx]
#Add the input directory ahead of each file.
for f in file_names:
files.append('%s/%s' %(ops.inputdir, f))
else:
files.append(ops.filename)
# Test that the output directory exists and is empty.
if ops.out != None:
if os.path.isdir(ops.out) == False:
os.mkdir(ops.out)
print("Made new ouptut directory %s. I hope that's okay..." %ops.out)
elif len(os.listdir(ops.out)) != None:
if ops.force == True:
cont = 'y'
else:
cont = raw_input('Output directory is not empty! Continue? [y/n]: ')
# loop through the files
for i,f in enumerate(files):
# consider only the fcs files
if f.endswith('.fcs'):
# read the file
meta, data = fcsparser.parse(f)
# if there are set channels, get all the channels
if ops.channels != None:
data = data.loc[:, ops.channels]
#parse the file name to change the extension
filename = re.sub('.fcs', '.csv', f)
#Determine if they should be saved to an output directory or not.
if ops.out == None:
data.to_csv(filename, index=False)
if ops.verbose == True:
print(f + ' -> ' + filename)
else:
find_split = filename.rsplit('/', 1)
if len(find_split) != 1:
filename = filename.rsplit('/', 1)[1]
# Determine how to save the file.
if len(os.listdir(ops.out)) != None:
if cont.lower() == 'y':
data.to_csv(ops.out + '/' + filename, index=False)
if ops.verbose == True:
print(f + ' -> ' + ops.out + '/' + filename)
else:
raise ValueError('output directory is not empty.')
if __name__ == '__main__':
main()
print('thank you -- come again')
| [
"os.listdir",
"optparse.OptionParser",
"numpy.array",
"os.path.isdir",
"fcsparser.parse",
"os.mkdir",
"re.sub"
] | [((386, 409), 'optparse.OptionParser', 'optparse.OptionParser', ([], {}), '()\n', (407, 409), False, 'import optparse\n'), ((1906, 1955), 'numpy.array', 'np.array', (['[(ops.pattern in f) for f in usr_files]'], {}), '([(ops.pattern in f) for f in usr_files])\n', (1914, 1955), True, 'import numpy as np\n'), ((1804, 1828), 'os.listdir', 'os.listdir', (['ops.inputdir'], {}), '(ops.inputdir)\n', (1814, 1828), False, 'import os\n'), ((2281, 2303), 'os.path.isdir', 'os.path.isdir', (['ops.out'], {}), '(ops.out)\n', (2294, 2303), False, 'import os\n'), ((2326, 2343), 'os.mkdir', 'os.mkdir', (['ops.out'], {}), '(ops.out)\n', (2334, 2343), False, 'import os\n'), ((2824, 2842), 'fcsparser.parse', 'fcsparser.parse', (['f'], {}), '(f)\n', (2839, 2842), False, 'import fcsparser\n'), ((3076, 3101), 're.sub', 're.sub', (['""".fcs"""', '""".csv"""', 'f'], {}), "('.fcs', '.csv', f)\n", (3082, 3101), False, 'import re\n'), ((2443, 2462), 'os.listdir', 'os.listdir', (['ops.out'], {}), '(ops.out)\n', (2453, 2462), False, 'import os\n'), ((3627, 3646), 'os.listdir', 'os.listdir', (['ops.out'], {}), '(ops.out)\n', (3637, 3646), False, 'import os\n')] |
import requests
import configparser
from email_validator import validate_email, EmailNotValidError
from mailchimp import OnCampusJobList
import email_notifier
import groupme_bot
config = configparser.ConfigParser()
config.read('config.ini')
google_config = config['GOOGLE']
def is_valid_recaptcha(recaptcha_response) -> bool:
request_url = 'https://www.google.com/recaptcha/api/siteverify'
verification_data = {
'secret': google_config['RECAPTCHA_SECRET_KEY'],
'response': recaptcha_response
}
response = requests.post(request_url, data=verification_data)
if response.status_code == 200:
return response.json()['success']
else:
return False
def is_valid_email(email) -> bool:
try:
validate_email(email)
return True
except EmailNotValidError as e:
return False
def add_email_subscriber(new_email_subscriber):
custom_list = OnCampusJobList()
custom_list.add_list_member(new_email_subscriber)
groupme_bot.send_message("We just got a new subscriber, my dudes!")
try:
# send welcome message for new subscribers. We don't want to send welcome message to existing user
email_notifier.send_welcome_message(new_email_subscriber)
except Exception as e:
groupme_bot.send_message("Oops, there was a failure on sending the welcome email")
print(e)
pass
| [
"requests.post",
"configparser.ConfigParser",
"groupme_bot.send_message",
"mailchimp.OnCampusJobList",
"email_notifier.send_welcome_message",
"email_validator.validate_email"
] | [((188, 215), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (213, 215), False, 'import configparser\n'), ((541, 591), 'requests.post', 'requests.post', (['request_url'], {'data': 'verification_data'}), '(request_url, data=verification_data)\n', (554, 591), False, 'import requests\n'), ((922, 939), 'mailchimp.OnCampusJobList', 'OnCampusJobList', ([], {}), '()\n', (937, 939), False, 'from mailchimp import OnCampusJobList\n'), ((998, 1065), 'groupme_bot.send_message', 'groupme_bot.send_message', (['"""We just got a new subscriber, my dudes!"""'], {}), "('We just got a new subscriber, my dudes!')\n", (1022, 1065), False, 'import groupme_bot\n'), ((755, 776), 'email_validator.validate_email', 'validate_email', (['email'], {}), '(email)\n', (769, 776), False, 'from email_validator import validate_email, EmailNotValidError\n'), ((1190, 1247), 'email_notifier.send_welcome_message', 'email_notifier.send_welcome_message', (['new_email_subscriber'], {}), '(new_email_subscriber)\n', (1225, 1247), False, 'import email_notifier\n'), ((1284, 1371), 'groupme_bot.send_message', 'groupme_bot.send_message', (['"""Oops, there was a failure on sending the welcome email"""'], {}), "(\n 'Oops, there was a failure on sending the welcome email')\n", (1308, 1371), False, 'import groupme_bot\n')] |
import logging
import os
from datetime import datetime
from enum import Enum
from typing import Optional
from pydantic import BaseModel, Field, HttpUrl, SecretStr, ValidationError
from ..DataGoKr import DataGoKr
# logging
logger = logging.getLogger(__file__)
# debug only
KMA_API_KEY = os.getenv("KMA_API_KEY")
################################################################################
# Types
################################################################################
# (Type)
class DataType(str, Enum):
# Only JSON Available yet
json = "JSON"
# (Type)
class VilageFcstVersionFtype(str, Enum):
ODAM = "ODAM"
VSRT = "VSRT"
SHRT = "SHRT"
################################################################################
# [Abstract] Abstract for VilageFcst
################################################################################
class VilageFcstInfo(DataGoKr):
__version__ = "2.0"
baseUrl: HttpUrl = "http://apis.data.go.kr/1360000/VilageFcstInfoService_2.0"
dataType: Optional[DataType] = "JSON" # Only JSON available yet.
serviceKey: str = KMA_API_KEY
################################################################################
# [API] 초단기 실황 UltraSrtNcst
################################################################################
# Output Model
class UltraSrtNcstModel(BaseModel):
baseDate: str
baseTime: str
T1H: Optional[float] # 10 decimal
RN1: Optional[str] # 8 code
UUU: Optional[float] # 12 float
VVV: Optional[float] # 12 float
REH: Optional[int] # 8 int
PTY: Optional[int] # 4 code
VEC: Optional[float] # 10 decimal
WSD: Optional[float] # 10 decimal
# API
class UltraSrtNcst(VilageFcstInfo):
__RecordModel__ = UltraSrtNcstModel
__index_names__ = None
__key_name__ = "category"
__value_name__ = "obsrValue"
route: str = "getUltraSrtNcst"
base_date: str = datetime.now().strftime("%Y%m%d")
base_time: str = "0500"
nx: int = 64
ny: int = 118
################################################################################
# [API] 초단기 예보 UltraSrtFcst
################################################################################
# Output Model
class UltraSrtFcstModel(BaseModel):
baseDate: str
baseTime: str
fcstDate: str
fcstTime: str
T1H: Optional[float] # 10 decimal
RN1: Optional[str] # 8 code
SKY: Optional[int] # 4 code
UUU: Optional[float] # 12 float
VVV: Optional[float] # 12 float
REH: Optional[int] # 8 int
PTY: Optional[int] # 4 code
LGT: Optional[str] # 4 code
VEC: Optional[float] # 10 decimal
WSD: Optional[float] # 10 decimal
# API
class UltraSrtFcst(VilageFcstInfo):
__RecordModel__ = UltraSrtFcstModel
__index_names__ = ["fcstDate", "fcstTime"]
__key_name__ = "category"
__value_name__ = "fcstValue"
route: str = "getUltraSrtFcst"
base_date: str = datetime.now().strftime("%Y%m%d")
base_time: str = "0500"
nx: int = 64
ny: int = 118
################################################################################
# [API] 단기 예보 VilageFcst
################################################################################
# Output Model
class VilageFcstModel(BaseModel):
baseDate: str
baseTime: str
fcstDate: str
fcstTime: str
POP: Optional[int] # 8 int
PTY: Optional[int] # 4 code
PCP: Optional[str] # 8 code
REH: Optional[int] # 8 int
SNO: Optional[str] # 8 code
SKY: Optional[int] # 4 code
TMP: Optional[float] # 10 decimal
TMN: Optional[float] # 10 decimal
TMX: Optional[float] # 10 decimal
UUU: Optional[float] # 12 float
VVV: Optional[float] # 12 float
WAV: Optional[float] # 8 int
VEC: Optional[float] # 10 decimal
WSD: Optional[float] # 10 decimal
# API
class VilageFcst(VilageFcstInfo):
__RecordModel__ = VilageFcstModel
__index_names__ = ["fcstDate", "fcstTime"]
__key_name__ = "category"
__value_name__ = "fcstValue"
route: str = "getVilageFcst"
base_date: str = datetime.now().strftime("%Y%m%d")
base_time: str = "0500"
nx: int = 64
ny: int = 118
################################################################################
# [API] 단기예보 수치모델 버전
################################################################################
# Output Model
class VilageFcstVersion(BaseModel):
filetype: VilageFcstVersionFtype
version: str
# API
class VilageFcstVersion(VilageFcstInfo):
__RecordModel__ = VilageFcstVersion
route: str = "getFcstVersion"
ftype: VilageFcstVersionFtype = "ODAM"
basedatetime: str = datetime.now().strftime("%Y%m%d0800")
| [
"logging.getLogger",
"datetime.datetime.now",
"os.getenv"
] | [((234, 261), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (251, 261), False, 'import logging\n'), ((290, 314), 'os.getenv', 'os.getenv', (['"""KMA_API_KEY"""'], {}), "('KMA_API_KEY')\n", (299, 314), False, 'import os\n'), ((1920, 1934), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1932, 1934), False, 'from datetime import datetime\n'), ((2938, 2952), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2950, 2952), False, 'from datetime import datetime\n'), ((4089, 4103), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4101, 4103), False, 'from datetime import datetime\n'), ((4666, 4680), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4678, 4680), False, 'from datetime import datetime\n')] |
from traits.api import HasTraits, Bool, Enum, List, Str
from numpy import array, cos, sin
class ElementalRotationDefinition(HasTraits):
'''
A definition of an elemental rotation and its angle's name
'''
angle_name = Str("undefined angle")
axis = Enum('around_x', 'around_y', 'around_z')
isClockwiseCameraSystemRotation = Bool(False)
class TaitBryanAnglesDefinition(HasTraits):
'''
Tait-Bryan angle rotations are defined by three rotation angles around
the x,y & z-axis.
The resulting rotation will be different according to
1. The order in which the rotations are applied
2. The rotation direction (clockwise vs. counter-clockwise)
'''
angles_in_order_applied = List(ElementalRotationDefinition)
def angles_yaw_pitch_roll():
'''
Returns a definition of the "Yaw, Pitch, Roll" Tait-Bryan angles set widespread in aerospace applications.
'''
definition = TaitBryanAnglesDefinition()
# first roll is applied
definition.angles_in_order_applied.append(
ElementalRotationDefinition(angle_name="Roll", axis='around_x', isClockwiseCameraSystemRotation=False))
# then pitch
definition.angles_in_order_applied.append(
ElementalRotationDefinition(angle_name="Pitch", axis='around_y', isClockwiseCameraSystemRotation=False))
# then yaw
definition.angles_in_order_applied.append(
ElementalRotationDefinition(angle_name="Yaw", axis='around_z', isClockwiseCameraSystemRotation=False))
return definition
def angles_pix4d_omega_phi_kappa():
'''
Returns a definition of the "Omega, Phi, Kappa" Tait-Bryan angles set used by pix4d.
'''
definition = TaitBryanAnglesDefinition()
# first kappa is applied
definition.angles_in_order_applied.append(
ElementalRotationDefinition(angle_name="Kappa", axis='around_z', isClockwiseCameraSystemRotation=False))
# then phi
definition.angles_in_order_applied.append(
ElementalRotationDefinition(angle_name="Phi", axis='around_y', isClockwiseCameraSystemRotation=False))
# last omega
definition.angles_in_order_applied.append(
ElementalRotationDefinition(angle_name="Omega", axis='around_x', isClockwiseCameraSystemRotation=False))
return definition
def camera_to_world_rotation_around_x(cc_angle = 0):
'''
Compute a rotation matrix that is used to transform
a point in camera coordinates to a point in world coordinates.
when the camera(system) rotates counter-clockwise.
(Seeing the camera(system) as fixed, the rotation
would transform points clockwise around its x axis)
'''
return array([[1., 0., 0.],
[0., cos(cc_angle), -sin(cc_angle)],
[0., sin(cc_angle), cos(cc_angle)]])
def camera_to_world_rotation_around_y(cc_angle = 0):
'''
Compute a rotation matrix that is used to transform
a point in camera coordinates to a point in world coordinates.
when the camera(system) rotates counter-clockwise.
(Seeing the camera(system) as fixed, the rotation
would transform points clockwise around its x axis)
'''
return array([[cos(cc_angle), 0., sin(cc_angle)],
[0., 1., 0.],
[-sin(cc_angle), 0., cos(cc_angle)]])
def camera_to_world_rotation_around_z(cc_angle = 0):
'''
Compute a rotation matrix that is used to transform
a point in camera coordinates to a point in world coordinates
when the camera(system) rotates counter-clockwise.
(Seeing the camera(system) as fixed, the rotation
would transform points clockwise around its x axis)
'''
return array([[cos(cc_angle), -sin(cc_angle), 0.],
[sin(cc_angle), cos(cc_angle), 0.],
[0., 0., 1.]])
def world_angle(angle, world_axis):
'''
Correction on the angle for possibly inverted axes
due to the world system definition (w.r.t. the mayavi world system)
'''
if(world_axis in ['Down', 'West', 'South']):
angle = -angle
return angle
def elemental_rotation(angle_and_definition, worldsystem):
'''
Returns an elemental rotation matrix that is used to transform
a point in camera coordinates to a point in world coordinates
given an euler angle and its definition.
'''
angle, definition = angle_and_definition
if (definition.isClockwiseCameraSystemRotation):
angle = -angle
if definition.axis == 'around_x':
return camera_to_world_rotation_around_x(world_angle(angle, worldsystem.x_axis))
if definition.axis == 'around_y':
return camera_to_world_rotation_around_y(world_angle(angle, worldsystem.y_axis))
if definition.axis == 'around_z':
return camera_to_world_rotation_around_z(world_angle(angle, worldsystem.z_axis))
def camera_to_world_rotation_matrix(first_angle_and_definition,
second_angle_and_definition,
last_angle_and_definition,
world_system):
'''
Compute a rotation matrix that is used to transform
a point in camera coordinates to a point in world coordinates
given Tait-Bryan angles and their definition.
Note: Matrices application order is opposite to reading order
'''
return elemental_rotation(last_angle_and_definition, world_system).dot(
elemental_rotation(second_angle_and_definition, world_system)).dot(
elemental_rotation(first_angle_and_definition, world_system))
| [
"traits.api.Enum",
"traits.api.Str",
"numpy.cos",
"numpy.sin",
"traits.api.Bool",
"traits.api.List"
] | [((234, 256), 'traits.api.Str', 'Str', (['"""undefined angle"""'], {}), "('undefined angle')\n", (237, 256), False, 'from traits.api import HasTraits, Bool, Enum, List, Str\n'), ((268, 308), 'traits.api.Enum', 'Enum', (['"""around_x"""', '"""around_y"""', '"""around_z"""'], {}), "('around_x', 'around_y', 'around_z')\n", (272, 308), False, 'from traits.api import HasTraits, Bool, Enum, List, Str\n'), ((347, 358), 'traits.api.Bool', 'Bool', (['(False)'], {}), '(False)\n', (351, 358), False, 'from traits.api import HasTraits, Bool, Enum, List, Str\n'), ((722, 755), 'traits.api.List', 'List', (['ElementalRotationDefinition'], {}), '(ElementalRotationDefinition)\n', (726, 755), False, 'from traits.api import HasTraits, Bool, Enum, List, Str\n'), ((2700, 2713), 'numpy.cos', 'cos', (['cc_angle'], {}), '(cc_angle)\n', (2703, 2713), False, 'from numpy import array, cos, sin\n'), ((2758, 2771), 'numpy.sin', 'sin', (['cc_angle'], {}), '(cc_angle)\n', (2761, 2771), False, 'from numpy import array, cos, sin\n'), ((2774, 2787), 'numpy.cos', 'cos', (['cc_angle'], {}), '(cc_angle)\n', (2777, 2787), False, 'from numpy import array, cos, sin\n'), ((3170, 3183), 'numpy.cos', 'cos', (['cc_angle'], {}), '(cc_angle)\n', (3173, 3183), False, 'from numpy import array, cos, sin\n'), ((3192, 3205), 'numpy.sin', 'sin', (['cc_angle'], {}), '(cc_angle)\n', (3195, 3205), False, 'from numpy import array, cos, sin\n'), ((3295, 3308), 'numpy.cos', 'cos', (['cc_angle'], {}), '(cc_angle)\n', (3298, 3308), False, 'from numpy import array, cos, sin\n'), ((3690, 3703), 'numpy.cos', 'cos', (['cc_angle'], {}), '(cc_angle)\n', (3693, 3703), False, 'from numpy import array, cos, sin\n'), ((3748, 3761), 'numpy.sin', 'sin', (['cc_angle'], {}), '(cc_angle)\n', (3751, 3761), False, 'from numpy import array, cos, sin\n'), ((3766, 3779), 'numpy.cos', 'cos', (['cc_angle'], {}), '(cc_angle)\n', (3769, 3779), False, 'from numpy import array, cos, sin\n'), ((2717, 2730), 'numpy.sin', 'sin', (['cc_angle'], {}), '(cc_angle)\n', (2720, 2730), False, 'from numpy import array, cos, sin\n'), ((3274, 3287), 'numpy.sin', 'sin', (['cc_angle'], {}), '(cc_angle)\n', (3277, 3287), False, 'from numpy import array, cos, sin\n'), ((3709, 3722), 'numpy.sin', 'sin', (['cc_angle'], {}), '(cc_angle)\n', (3712, 3722), False, 'from numpy import array, cos, sin\n')] |
#!/usr/bin/env python3
import unittest
from textwrap import dedent
from datetime import timedelta
from parse import parse_block, TranslationShift
class TestTranslationShift(unittest.TestCase):
def test_eq(self):
self.assertEqual(TranslationShift('name', 'lang'), TranslationShift('name', 'lang'))
self.assertNotEqual(TranslationShift('name', 'lang'), TranslationShift('anonther_name', 'lang'))
self.assertNotEqual(TranslationShift('name', 'lang'), TranslationShift('name', 'anonther_lang'))
class TestParseBlock(unittest.TestCase):
def test_simple(self):
result = parse_block(dedent("""
#1
[de] 11:00 +00:30, Adams
Opening Event
rufus, rixx
Fahrplan: https://fahrplan.events.ccc.de/congress/2018/Fahrplan/events/9985.html
Slides (if available): https://speakers.c3lingo.org/talks/15f4e5c5-40e1-4c73-8da0-4cc2a773ab13/
→ en: waffle, simplysaym, sirenensang
→ fr: informancer, ironic, yann0u
"""))
self.assertEqual(result.language, 'de')
self.assertEqual(result.room, 'Adams')
self.assertEqual(result.duration, timedelta(hours=0, minutes=30))
self.assertEqual(result.title, 'Opening Event')
self.assertEqual(result.speakers, ['rufus', 'rixx'])
self.assertEqual(result.fahrplan, 'https://fahrplan.events.ccc.de/congress/2018/Fahrplan/events/9985.html')
self.assertEqual(result.translation_shifts, [
TranslationShift('waffle', 'en', result),
TranslationShift('simplysaym', 'en', result),
TranslationShift('sirenensang', 'en', result),
TranslationShift('informancer', 'fr', result),
TranslationShift('ironic', 'fr', result),
TranslationShift('yann0u', 'fr', result),
])
def test_notes(self):
"""
Test that notes and parenthetical stuff inside the shift assignments is stripped out
as much as possible
"""
result = parse_block(dedent("""
#31
[de] 18:50 +01:00, Borg
"Das ist mir nicht erinnerlich." − Der NSU-Komplex heute
<NAME> (NSU-Watch)
Fahrplan: https://fahrplan.events.ccc.de/congress/2018/Fahrplan/events/9766.html
Slides (if available): https://speakers.c3lingo.org/talks/a12d17e9-3758-4fa0-b612-0c6ba22ea773/
→ en: tr1 (note), (foo) tr2
→ fr: tr3 – yay!
→ gsw: (reservation), (another one) , (never mind me)
"""))
self.assertEqual(result.translation_shifts, [
TranslationShift('tr1', 'en', result),
TranslationShift('tr2', 'en', result),
TranslationShift('tr3', 'fr', result),
])
def test_trailing_comma(self):
"""
Test that trailing commas don't cause trouble
"""
result = parse_block(dedent("""
#31
[de] 18:50 +01:00, Borg
"Das ist mir nicht erinnerlich." − Der NSU-Komplex heute
<NAME> (NSU-Watch)
Fahrplan: https://fahrplan.events.ccc.de/congress/2018/Fahrplan/events/9766.html
Slides (if available): https://speakers.c3lingo.org/talks/a12d17e9-3758-4fa0-b612-0c6ba22ea773/
→ en: tr1, tr2,
"""))
self.assertEqual(result.translation_shifts, [
TranslationShift('tr1', 'en', result),
TranslationShift('tr2', 'en', result),
])
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"textwrap.dedent",
"datetime.timedelta",
"parse.TranslationShift"
] | [((3436, 3451), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3449, 3451), False, 'import unittest\n'), ((244, 276), 'parse.TranslationShift', 'TranslationShift', (['"""name"""', '"""lang"""'], {}), "('name', 'lang')\n", (260, 276), False, 'from parse import parse_block, TranslationShift\n'), ((278, 310), 'parse.TranslationShift', 'TranslationShift', (['"""name"""', '"""lang"""'], {}), "('name', 'lang')\n", (294, 310), False, 'from parse import parse_block, TranslationShift\n'), ((340, 372), 'parse.TranslationShift', 'TranslationShift', (['"""name"""', '"""lang"""'], {}), "('name', 'lang')\n", (356, 372), False, 'from parse import parse_block, TranslationShift\n'), ((374, 415), 'parse.TranslationShift', 'TranslationShift', (['"""anonther_name"""', '"""lang"""'], {}), "('anonther_name', 'lang')\n", (390, 415), False, 'from parse import parse_block, TranslationShift\n'), ((445, 477), 'parse.TranslationShift', 'TranslationShift', (['"""name"""', '"""lang"""'], {}), "('name', 'lang')\n", (461, 477), False, 'from parse import parse_block, TranslationShift\n'), ((479, 520), 'parse.TranslationShift', 'TranslationShift', (['"""name"""', '"""anonther_lang"""'], {}), "('name', 'anonther_lang')\n", (495, 520), False, 'from parse import parse_block, TranslationShift\n'), ((621, 1021), 'textwrap.dedent', 'dedent', (['"""\n #1\n [de] 11:00 +00:30, Adams\n Opening Event\n rufus, rixx\n Fahrplan: https://fahrplan.events.ccc.de/congress/2018/Fahrplan/events/9985.html\n Slides (if available): https://speakers.c3lingo.org/talks/15f4e5c5-40e1-4c73-8da0-4cc2a773ab13/\n → en: waffle, simplysaym, sirenensang\n → fr: informancer, ironic, yann0u\n """'], {}), '(\n """\n #1\n [de] 11:00 +00:30, Adams\n Opening Event\n rufus, rixx\n Fahrplan: https://fahrplan.events.ccc.de/congress/2018/Fahrplan/events/9985.html\n Slides (if available): https://speakers.c3lingo.org/talks/15f4e5c5-40e1-4c73-8da0-4cc2a773ab13/\n → en: waffle, simplysaym, sirenensang\n → fr: informancer, ironic, yann0u\n """\n )\n', (627, 1021), False, 'from textwrap import dedent\n'), ((1150, 1180), 'datetime.timedelta', 'timedelta', ([], {'hours': '(0)', 'minutes': '(30)'}), '(hours=0, minutes=30)\n', (1159, 1180), False, 'from datetime import timedelta\n'), ((2019, 2504), 'textwrap.dedent', 'dedent', (['"""\n #31\n [de] 18:50 +01:00, Borg\n "Das ist mir nicht erinnerlich." − Der NSU-Komplex heute\n <NAME> (NSU-Watch)\n Fahrplan: https://fahrplan.events.ccc.de/congress/2018/Fahrplan/events/9766.html\n Slides (if available): https://speakers.c3lingo.org/talks/a12d17e9-3758-4fa0-b612-0c6ba22ea773/\n → en: tr1 (note), (foo) tr2\n → fr: tr3 – yay!\n → gsw: (reservation), (another one) , (never mind me)\n """'], {}), '(\n """\n #31\n [de] 18:50 +01:00, Borg\n "Das ist mir nicht erinnerlich." − Der NSU-Komplex heute\n <NAME> (NSU-Watch)\n Fahrplan: https://fahrplan.events.ccc.de/congress/2018/Fahrplan/events/9766.html\n Slides (if available): https://speakers.c3lingo.org/talks/a12d17e9-3758-4fa0-b612-0c6ba22ea773/\n → en: tr1 (note), (foo) tr2\n → fr: tr3 – yay!\n → gsw: (reservation), (another one) , (never mind me)\n """\n )\n', (2025, 2504), False, 'from textwrap import dedent\n'), ((2857, 3243), 'textwrap.dedent', 'dedent', (['"""\n #31\n [de] 18:50 +01:00, Borg\n "Das ist mir nicht erinnerlich." − Der NSU-Komplex heute\n <NAME> (NSU-Watch)\n Fahrplan: https://fahrplan.events.ccc.de/congress/2018/Fahrplan/events/9766.html\n Slides (if available): https://speakers.c3lingo.org/talks/a12d17e9-3758-4fa0-b612-0c6ba22ea773/\n → en: tr1, tr2,\n """'], {}), '(\n """\n #31\n [de] 18:50 +01:00, Borg\n "Das ist mir nicht erinnerlich." − Der NSU-Komplex heute\n <NAME> (NSU-Watch)\n Fahrplan: https://fahrplan.events.ccc.de/congress/2018/Fahrplan/events/9766.html\n Slides (if available): https://speakers.c3lingo.org/talks/a12d17e9-3758-4fa0-b612-0c6ba22ea773/\n → en: tr1, tr2,\n """\n )\n', (2863, 3243), False, 'from textwrap import dedent\n'), ((1481, 1521), 'parse.TranslationShift', 'TranslationShift', (['"""waffle"""', '"""en"""', 'result'], {}), "('waffle', 'en', result)\n", (1497, 1521), False, 'from parse import parse_block, TranslationShift\n'), ((1535, 1579), 'parse.TranslationShift', 'TranslationShift', (['"""simplysaym"""', '"""en"""', 'result'], {}), "('simplysaym', 'en', result)\n", (1551, 1579), False, 'from parse import parse_block, TranslationShift\n'), ((1593, 1638), 'parse.TranslationShift', 'TranslationShift', (['"""sirenensang"""', '"""en"""', 'result'], {}), "('sirenensang', 'en', result)\n", (1609, 1638), False, 'from parse import parse_block, TranslationShift\n'), ((1652, 1697), 'parse.TranslationShift', 'TranslationShift', (['"""informancer"""', '"""fr"""', 'result'], {}), "('informancer', 'fr', result)\n", (1668, 1697), False, 'from parse import parse_block, TranslationShift\n'), ((1711, 1751), 'parse.TranslationShift', 'TranslationShift', (['"""ironic"""', '"""fr"""', 'result'], {}), "('ironic', 'fr', result)\n", (1727, 1751), False, 'from parse import parse_block, TranslationShift\n'), ((1765, 1805), 'parse.TranslationShift', 'TranslationShift', (['"""yann0u"""', '"""fr"""', 'result'], {}), "('yann0u', 'fr', result)\n", (1781, 1805), False, 'from parse import parse_block, TranslationShift\n'), ((2562, 2599), 'parse.TranslationShift', 'TranslationShift', (['"""tr1"""', '"""en"""', 'result'], {}), "('tr1', 'en', result)\n", (2578, 2599), False, 'from parse import parse_block, TranslationShift\n'), ((2613, 2650), 'parse.TranslationShift', 'TranslationShift', (['"""tr2"""', '"""en"""', 'result'], {}), "('tr2', 'en', result)\n", (2629, 2650), False, 'from parse import parse_block, TranslationShift\n'), ((2664, 2701), 'parse.TranslationShift', 'TranslationShift', (['"""tr3"""', '"""fr"""', 'result'], {}), "('tr3', 'fr', result)\n", (2680, 2701), False, 'from parse import parse_block, TranslationShift\n'), ((3301, 3338), 'parse.TranslationShift', 'TranslationShift', (['"""tr1"""', '"""en"""', 'result'], {}), "('tr1', 'en', result)\n", (3317, 3338), False, 'from parse import parse_block, TranslationShift\n'), ((3352, 3389), 'parse.TranslationShift', 'TranslationShift', (['"""tr2"""', '"""en"""', 'result'], {}), "('tr2', 'en', result)\n", (3368, 3389), False, 'from parse import parse_block, TranslationShift\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import Shortener
@admin.register(Shortener)
class ShortenerAdmin(admin.ModelAdmin):
list_display = ('id', 'short_url', 'link_url', 'status', 'created')
fields = ('short_url', 'link_url', 'status', 'created', 'modified')
readonly_fields = ('short_url', 'created', 'modified')
def save_formset(self, request, form, formset, change):
instances = formset.save(commit=False)
instances.link_url = instances.link_url.rstrip()
formset.save_m2m()
| [
"django.contrib.admin.register"
] | [((132, 157), 'django.contrib.admin.register', 'admin.register', (['Shortener'], {}), '(Shortener)\n', (146, 157), False, 'from django.contrib import admin\n')] |
import os
import ipaddress
import numpy as np
import pandas as pd
import datetime
import boto3
import gzip
import json
from signal_processing import signalProcess
BUCKET_NAME = os.environ.get("BUCKET_NAME", None)
VPC_FLOW_LOGS_PATH = os.environ.get("VPC_FLOW_LOGS_PATH", None)
FINDINGS_PATH = os.environ.get("FINDINGS_PATH", None)
TMP_DOWNLOAD_DIR = "/tmp/s3_download"
FLOW_COLUMNS = [
"date",
"version",
"account-id",
"interface-id",
"srcaddr",
"dstaddr",
"srcport",
"dstport",
"protocol",
"packets",
"bytes",
"start",
"end",
"action",
"log-status",
]
def cloud_sniper_beaconing_detection(event, context):
bucket_name = BUCKET_NAME
vpc_flow_logs_path = VPC_FLOW_LOGS_PATH
findings_path = FINDINGS_PATH
df = load_data(bucket_name, vpc_flow_logs_path)
print(f"Number of raw records: {len(df.index)}")
version = df.version.iloc[0] # constant
account_id = df["account-id"].iloc[0] # constant
df = filter_format_data(df)
print(f"Number of records after filtering missing data: {len(df.index)}")
df = sort_data(df)
print(f"Number of records after filtering by time: {len(df.index)}")
df = filter_useless_data(df)
print(f"Number of records after filtering by port: {len(df.index)}")
df = filter_unfrequent_data(df)
print(f"Number of records after filtering unfrequent: {len(df.index)}")
res = find_beacons(df)
new_fields = {
"hits": "",
"cloud.provider": "aws",
"event.type": "beaconing",
"cloud.account.name": "",
"interface.vpc.id": "",
"protocol": "",
"version": version,
"cloud.account.id": account_id,
}
list(map(lambda x: x.update(new_fields), res))
print(f"Result: {res}")
save_results(bucket_name, findings_path, res)
return res
def load_data(s3_bucket, s3_vpc_flow_logs_path):
s3 = boto3.resource('s3')
bucket = s3.Bucket(name=s3_bucket)
prefix = s3_vpc_flow_logs_path
if prefix.startswith("/"):
prefix = prefix[1:]
if not prefix.endswith("/"):
prefix += "/"
if not os.path.exists(TMP_DOWNLOAD_DIR):
os.mkdir(TMP_DOWNLOAD_DIR)
for i, s3_file_obj in enumerate(bucket.objects.filter(Prefix=prefix)):
if s3_file_obj.key.endswith(".log.gz"):
extension = "log.gz"
elif s3_file_obj.key.endswith(".log"):
extension = "log"
else:
continue
bucket.download_file(s3_file_obj.key,
TMP_DOWNLOAD_DIR + "/%06d" % i + "." + extension)
data = []
for fname in sorted(os.listdir(TMP_DOWNLOAD_DIR)):
if fname.endswith(".log.gz"):
open_ = gzip.open
decode = True
elif fname.endswith(".log"):
open_ = open
decode = False
else:
continue
with open_(os.path.join(TMP_DOWNLOAD_DIR, fname), 'r') as fd:
first_line = True
for line in fd:
if first_line:
first_line = False
continue
if decode:
line = line.decode("utf-8").strip().split(" ")
else:
line = line.strip().split(" ")
data.append(line)
if data and (len(data[0]) == len(FLOW_COLUMNS)):
df = pd.DataFrame(data, columns=FLOW_COLUMNS)
df.drop(['date'], axis=1, inplace=True)
else:
df = pd.DataFrame(data, columns=FLOW_COLUMNS[1:])
return df
def filter_format_data(df):
df = df[df.srcaddr != "-"]
df = df[df.dstaddr != "-"]
df.drop(["version", "srcport"], axis=1, inplace=True)
df = df.replace("-", np.nan)
df = df.replace("-", np.nan)
df[["dstport", "protocol", "packets", "bytes", "start", "end"]] = \
df[["dstport", "protocol", "packets", "bytes", "start", "end"]] \
.apply(pd.to_numeric)
return df
def sort_data(df):
df['datetime'] = pd.to_datetime(df.start, unit='s')
# TODO: should we process just the last hours?
df = df.set_index('datetime')
df.sort_index(inplace=True)
return df.reset_index(level=0)
def filter_useless_data(df):
# Requirements
# * srcIP should be private
# * dstport < 1024 and != 123
if df.empty:
return df
df = df[df.srcaddr.map(lambda x: ipaddress.ip_address(x).is_private)]
df = df[df.dstport <= 1024]
df = df[df.dstport != 123]
return df
def filter_unfrequent_data(df):
# remove communications if there were less than 6 snippets
selection = df.groupby(["srcaddr", "dstaddr", "dstport"])
df = selection.filter(lambda x: len(x) >= 6)
df = df.reset_index(level=0)
return df
def find_beacons(df):
res = []
time_fmt = "%Y-%m-%dT%H:%M:%S.%f"
groups = df.groupby(["srcaddr", "dstaddr", "dstport"])
data_in = {
"data": {},
"time": {}
}
for (srcaddr, dstaddr, port), traffic in groups:
k = (srcaddr, dstaddr, port)
data_in["data"][k] = traffic.bytes
data_in["time"][k] = traffic.datetime
lrner = signalProcess(data_in, options_in=None)
output = lrner.getPrimaryPeriods()
for (srcaddr, dstaddr, port) in output["powers"]:
if output["powers"][(srcaddr, dstaddr, port)][0] is not None:
print(data_in["time"][k])
k = (srcaddr, dstaddr, port)
start_time = data_in["time"][k].iloc[0].strftime(time_fmt)[:-3] + 'Z'
end_time = data_in["time"][k].iloc[-1].strftime(time_fmt)[:-3] + 'Z'
res.append({
"source.ip": srcaddr,
"destination.ip": dstaddr,
"destination.port": int(port),
"timestamp": start_time,
"event.end": end_time,
"event.start": start_time
})
return res
def save_results(bucket_name, findings_path, res):
now = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
s3_resource = boto3.resource('s3')
bucket = s3_resource.Bucket(name=bucket_name)
if findings_path.startswith("/"):
findings_path = findings_path[1:]
if findings_path.endswith("/"):
findings_path = findings_path[:-1]
(bucket.Object(key=f"{findings_path}/beaconing_detection_{now}.json")
.put(Body=bytes(json.dumps(res).encode('UTF-8'))))
if __name__ == "__main__":
print(json.dumps(cloud_sniper_beaconing_detection(None, None), indent=4))
| [
"os.path.exists",
"os.listdir",
"json.dumps",
"os.environ.get",
"os.path.join",
"signal_processing.signalProcess",
"boto3.resource",
"datetime.datetime.now",
"os.mkdir",
"pandas.DataFrame",
"ipaddress.ip_address",
"pandas.to_datetime"
] | [((179, 214), 'os.environ.get', 'os.environ.get', (['"""BUCKET_NAME"""', 'None'], {}), "('BUCKET_NAME', None)\n", (193, 214), False, 'import os\n'), ((236, 278), 'os.environ.get', 'os.environ.get', (['"""VPC_FLOW_LOGS_PATH"""', 'None'], {}), "('VPC_FLOW_LOGS_PATH', None)\n", (250, 278), False, 'import os\n'), ((295, 332), 'os.environ.get', 'os.environ.get', (['"""FINDINGS_PATH"""', 'None'], {}), "('FINDINGS_PATH', None)\n", (309, 332), False, 'import os\n'), ((1912, 1932), 'boto3.resource', 'boto3.resource', (['"""s3"""'], {}), "('s3')\n", (1926, 1932), False, 'import boto3\n'), ((3998, 4032), 'pandas.to_datetime', 'pd.to_datetime', (['df.start'], {'unit': '"""s"""'}), "(df.start, unit='s')\n", (4012, 4032), True, 'import pandas as pd\n'), ((5128, 5167), 'signal_processing.signalProcess', 'signalProcess', (['data_in'], {'options_in': 'None'}), '(data_in, options_in=None)\n', (5141, 5167), False, 'from signal_processing import signalProcess\n'), ((6010, 6030), 'boto3.resource', 'boto3.resource', (['"""s3"""'], {}), "('s3')\n", (6024, 6030), False, 'import boto3\n'), ((2134, 2166), 'os.path.exists', 'os.path.exists', (['TMP_DOWNLOAD_DIR'], {}), '(TMP_DOWNLOAD_DIR)\n', (2148, 2166), False, 'import os\n'), ((2176, 2202), 'os.mkdir', 'os.mkdir', (['TMP_DOWNLOAD_DIR'], {}), '(TMP_DOWNLOAD_DIR)\n', (2184, 2202), False, 'import os\n'), ((2635, 2663), 'os.listdir', 'os.listdir', (['TMP_DOWNLOAD_DIR'], {}), '(TMP_DOWNLOAD_DIR)\n', (2645, 2663), False, 'import os\n'), ((3379, 3419), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'FLOW_COLUMNS'}), '(data, columns=FLOW_COLUMNS)\n', (3391, 3419), True, 'import pandas as pd\n'), ((3491, 3535), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'FLOW_COLUMNS[1:]'}), '(data, columns=FLOW_COLUMNS[1:])\n', (3503, 3535), True, 'import pandas as pd\n'), ((5941, 5964), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5962, 5964), False, 'import datetime\n'), ((2903, 2940), 'os.path.join', 'os.path.join', (['TMP_DOWNLOAD_DIR', 'fname'], {}), '(TMP_DOWNLOAD_DIR, fname)\n', (2915, 2940), False, 'import os\n'), ((4373, 4396), 'ipaddress.ip_address', 'ipaddress.ip_address', (['x'], {}), '(x)\n', (4393, 4396), False, 'import ipaddress\n'), ((6342, 6357), 'json.dumps', 'json.dumps', (['res'], {}), '(res)\n', (6352, 6357), False, 'import json\n')] |
import argparse
import json
parser = argparse.ArgumentParser()
parser.add_argument("--text", type=str, help="path to original text file")
parser.add_argument("--train", type=str, help="path to original training data file")
parser.add_argument("--valid", type=str, help="path to original validation data file")
parser.add_argument("--converted_text", type=str, default="Qdesc.txt", help="path to converted text file")
parser.add_argument("--converted_train", type=str, default="train.txt", help="path to converted training file")
parser.add_argument("--converted_valid", type=str, default="valid.txt", help="path to converted validation file")
if __name__=='__main__':
args = parser.parse_args()
Qid={} #Entity to id (line number in the description file)
Pid={} #Relation to id
def getNum(s):
return int(s[1:])
with open(args.text, "r") as fin:
with open(args.converted_text, "w") as fout:
lines = fin.readlines()
Cnt=0
for idx, line in enumerate(lines):
data = line.split('\t')
assert len(data) >= 2
assert data[0].startswith('Q')
desc = '\t'.join(data[1:]).strip()
if getNum(data[0])>1000:
continue
fout.write(desc+"\n")
Qid[data[0]] = Cnt#idx
Cnt+=1
def convert_triples(inFile, outFile):
with open(inFile, "r") as fin:
with open(outFile, "w") as fout:
lines = fin.readlines()
for line in lines:
data = line.strip().split('\t')
assert len(data) == 3
if getNum(data[0])>1000 or getNum(data[2]) > 1000:
continue
if data[1] not in Pid:
Pid[data[1]] = len(Pid)
fout.write("%d %d %d\n"%(Qid[data[0]], Pid[data[1]], Qid[data[2]]))
convert_triples(args.train, args.converted_train)
convert_triples(args.valid, args.converted_valid)
| [
"argparse.ArgumentParser"
] | [((38, 63), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (61, 63), False, 'import argparse\n')] |
#!/usr/bin/env python3
#
# Copyright (c) 2019 <NAME> and contributors.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The test_webframe module covers the webframe module."""
from typing import List
from typing import TYPE_CHECKING
from typing import Tuple
from typing import cast
import traceback
import unittest
import webframe
if TYPE_CHECKING:
# pylint: disable=no-name-in-module,import-error,unused-import
from wsgiref.types import StartResponse # noqa: F401
class TestHandleException(unittest.TestCase):
"""Tests handle_exception()."""
def test_happy(self) -> None:
"""Tests the happy path."""
environ = {
"PATH_INFO": "/"
}
def start_response(status: str, response_headers: List[Tuple[str, str]]) -> None:
self.assertTrue(status.startswith("500"))
header_dict = dict(response_headers)
self.assertEqual(header_dict["Content-type"], "text/html; charset=utf-8")
try:
int("a")
# pylint: disable=broad-except
except Exception:
callback = cast('StartResponse', start_response)
status, headers, data = webframe.handle_exception(environ, traceback.format_exc())
callback(status, headers)
self.assertTrue(data)
output = data.decode('utf-8')
self.assertIn("ValueError", output)
return
self.fail()
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"traceback.format_exc",
"typing.cast"
] | [((1517, 1532), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1530, 1532), False, 'import unittest\n'), ((1150, 1187), 'typing.cast', 'cast', (['"""StartResponse"""', 'start_response'], {}), "('StartResponse', start_response)\n", (1154, 1187), False, 'from typing import cast\n'), ((1259, 1281), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (1279, 1281), False, 'import traceback\n')] |
from functools import reduce
def bfs(root, points):
queue = [root]
visited_states = {root}
basin_size = 1
while len(queue) > 0:
i, j = queue[0]
queue = queue[1:]
ps = [(i - 1, j), (i + 1, j), (i, j - 1), (i, j + 1)]
for n in ps:
if n not in visited_states and points[n[1]][n[0]] != 9:
basin_size += 1
queue.append(n)
visited_states.add(n)
return basin_size
def main():
points = [[9]]
with open('input.txt') as f:
for line in f.readlines():
points.append([9] + list(map(int, list(line.strip()))) + [9])
points[0] = [9] * len(points[1])
points.append(points[0])
size = (len(points[0]), len(points))
low_points, basin_sizes = [], []
for j in range(1, size[1] - 1):
for i in range(1, size[0] - 1):
ps = [points[j - 1][i], points[j + 1][i],
points[j][i - 1], points[j][i + 1]]
if all(map(lambda p, i=i, j=j: points[j][i] < p, ps)):
low_points.append(points[j][i])
basin_sizes.append(bfs((i, j), points))
basin_sizes.sort()
basins_prod = reduce(lambda x, y: x * y, basin_sizes[-3:], 1)
print(f'Risk of low points (1): { sum(low_points) + len(low_points) }')
print(f'Product of three largest basins (2): { basins_prod }')
if __name__ == '__main__':
main()
| [
"functools.reduce"
] | [((1197, 1244), 'functools.reduce', 'reduce', (['(lambda x, y: x * y)', 'basin_sizes[-3:]', '(1)'], {}), '(lambda x, y: x * y, basin_sizes[-3:], 1)\n', (1203, 1244), False, 'from functools import reduce\n')] |
from django.urls import path
from . import views
urlpatterns = [
path('new', views.new_list, name='new_list'),
path('<list_id>/', views.view_list, name='view_list'),
path('users/<email>/', views.my_lists, name='my_lists'),
] | [
"django.urls.path"
] | [((70, 114), 'django.urls.path', 'path', (['"""new"""', 'views.new_list'], {'name': '"""new_list"""'}), "('new', views.new_list, name='new_list')\n", (74, 114), False, 'from django.urls import path\n'), ((120, 173), 'django.urls.path', 'path', (['"""<list_id>/"""', 'views.view_list'], {'name': '"""view_list"""'}), "('<list_id>/', views.view_list, name='view_list')\n", (124, 173), False, 'from django.urls import path\n'), ((179, 234), 'django.urls.path', 'path', (['"""users/<email>/"""', 'views.my_lists'], {'name': '"""my_lists"""'}), "('users/<email>/', views.my_lists, name='my_lists')\n", (183, 234), False, 'from django.urls import path\n')] |
import discord
import random
from asd import *
from mtgsdk import Card
from mtgsdk import Set
from mtgsdk import Type
from mtgsdk import Supertype
from mtgsdk import Subtype
from mtgsdk import Changelog
client=discord.Client()
@client.event
async def on_ready():
print('logged in as')
print(client.user.name)
print(client.user.id)
print('-----')
@client.event
async def on_message(message):
if message.content.startswith("(debug 124)"):
x = message.content.replace("(debug 124)","")
await client.change_presence(status=discord.Status.online, activity=discord.Game(x))
elif message.content == "<@630330114605056011>":
name = "**{}**".format(message.mentions[0].name)
command1 = "**~Jesus**"
Helpmessage = """
**Thanks for adding me to {}**!
***Description***
Hello my child, I shall share images of myself with you if you call upon me using my command.
***Commands***
{}
***Support The Creator***
Please support the creator by sharing me to other servers using the following link:
https://discordapp.com/api/oauth2/authorize?client_id=630330114605056011&permissions=0&scope=bot
or through the following links.
-<:patreon:630306170791395348> https://www.patreon.com/b9king
-<:paypal:630306883105849354> https://www.paypal.com/paypalme2/b9king
Or you can visit him here: https://benignking.xyz :heart:
""".format(message.guild.name,command1)
embed=discord.Embed(title="", url="https://www.patreon.com/b9king", description= Helpmessage, color=0x00ffff)
embed.set_thumbnail(url= message.mentions[0].avatar_url)
await message.channel.send(embed=embed)
#_________________________________________________________________
#__________________MTG CARD INFO__________________________________
if message.content.startswith("~Price"):
message.content = message.content.replace("~Price ","")
x = cardPrice(message.content)
prices = ""
for i in x["prices"]:
prices += i + "\n"
#
embed=discord.Embed(title= x["name"], description = prices , color=0x746A69)
embed.set_thumbnail(url= "http://www.manaleak.com/mtguk/files/2012/12/mtg-money.jpg")
await message.channel.send(embed=embed)
if message.content.startswith("~Card "):
await message.channel.send("I'm working on it")
message.content = message.content.replace("~Card ", "")
cards = Card.where(name=message.content).all()
color = {"White" : 0xd2d2c1, "Green" : 0x008000, "Black" : 0x000000 , "Red" : 0xff0000 , "Blue" : 0x0080c0 , "None" : 0x808080}
if len(cards[0].colors) > 0:
c = cards[0].colors[0]
else:
c = "None"
cost = ""
manas = { "9" : "<:9_:549923808560021505>", "8" : "<:8_:549923808291848194>", "7" : "<:7_:549923035289878558>", "6" : "<:6_:549923035294072847>", "5" : "<:5_:549923035289878548>", "4" : "<:4_:549923035302592512>", "3" : "<:3_:549923035486879744>", "2" : "<:2_:549923035298136104>", "1" : "<:1_:549923035306655744>", "0" : "<:0_:549923035998715914>","X" : "<:x_:549923035382022145>","W" : "<:White:549913967762341888>","B" : "<:Black:549911363607199754>","U" : "<:Blue:549913606347816961>","R" : "<:Red:549910969149816842>","G" : "<:Green:549911048925347850>"}
if cards[0].mana_cost != None:
for i in cards[0].mana_cost:
if i in manas:
cost = cost + manas[i]
desc = cards[0].text
desc = desc.replace("{W}","<:White:549913967762341888>")
desc = desc.replace("{B}","<:Black:549911363607199754>")
desc = desc.replace("{U}","<:Blue:549913606347816961>")
desc = desc.replace("{R}", "<:Red:549910969149816842>")
desc = desc.replace("{G}","<:Green:549911048925347850>")
desc = desc.replace("{X}","<:x_:549923035382022145>")
desc = desc.replace("{9}" , "<:9_:549923808560021505>")
desc = desc.replace("{8}" , "<:8_:549923808291848194>")
desc = desc.replace("{7}" , "<:7_:549923035289878558>")
desc = desc.replace("{6}" , "<:6_:549923035294072847>")
desc = desc.replace("{5}" , "<:5_:549923035289878548>")
desc = desc.replace("{4}" , "<:4_:549923035302592512>")
desc = desc.replace("{3}" , "<:3_:549923035486879744>")
desc = desc.replace("{2}" , "<:2_:549923035298136104>")
desc = desc.replace("{1}" , "<:1_:549923035306655744>")
desc = desc.replace("{C}" , "<:1_:549923035306655744>")
desc = desc.replace("{0}" , "<:0_:549923035998715914>")
desc = desc.replace("{T}" , "<:tap:549932371806388259>")
desc = desc.replace("{1}" , "<:1_:549923035306655744>")
embed = discord.Embed(
title = cards[0].name + " " + cost,
colour = color[c]
)
legal = ""
for i in cards[0].legalities:
legal = legal + i["format"] + " : "
if i["legality"] == "Legal":
legal = legal + "✅"
else:
legal = legal + "🚫"
legal = legal + "\n"
rules = ""
for i in cards[0].rulings:
rules = rules + i["date"] + " : " + i["text"]
rules = rules + "\n"
embed.set_image(url = cards[0].image_url) #card picture
embed.add_field(name = "Text", value = desc, inline = False) #copy pasta for card info
if cards[0].flavor != None:
embed.add_field(name = "Flavor", value = cards[0].flavor, inline = False) #copy pasta for card info
if cards[0].power != None:
embed.add_field(name = "Power/Toughness", value = cards[0].power + "/" + cards[0].toughness , inline = False) #copy pasta for card info
if cards[0].type != None:
embed.add_field(name = "Type", value = cards[0].type , inline = False) #copy pasta for card info
if cards[0].set_name != None:
embed.add_field(name = "Set", value = cards[0].set_name, inline = False) #copy pasta for card info
if cards[0].rarity != None:
embed.add_field(name = "Rarity", value = cards[0].rarity, inline = False) #copy pasta for card info
if cards[0].loyalty != None:
embed.add_field(name = "Loyalty", value = cards[0].loyalty, inline = False) #copy pasta for card info
if cards[0].artist != None:
embed.add_field(name = "Artist", value = cards[0].artist, inline = False) #copy pasta for card info
if cards[0].variations != None:
embed.add_field(name = "Variations", value = cards[0].variations, inline = False) #copy pasta for card info
if cards[0].release_date != None:
embed.add_field(name = "Release Date", value = cards[0].release_date, inline = False) #copy pasta for card info
if rules != None and len(rules) != 0:
embed.add_field(name = "RULINGS", value = rules, inline = False) #copy pasta for card info
if cards[0].legalities != None:
embed.add_field(name = "Legalities", value = legal , inline = False) #copy pasta for card info
embed.set_footer(text="Coded by : B9king", icon_url="https://cdn6.aptoide.com/imgs/8/2/3/823240ba13a239948950f78f38b1f1d9_icon.png?w=256") #My credit
await message.channel.send(embed=embed)
client.run('<KEY>') | [
"discord.Client",
"discord.Embed",
"mtgsdk.Card.where",
"discord.Game"
] | [((226, 242), 'discord.Client', 'discord.Client', ([], {}), '()\n', (240, 242), False, 'import discord\n'), ((2357, 2422), 'discord.Embed', 'discord.Embed', ([], {'title': "x['name']", 'description': 'prices', 'color': '(7629417)'}), "(title=x['name'], description=prices, color=7629417)\n", (2370, 2422), False, 'import discord\n'), ((5175, 5239), 'discord.Embed', 'discord.Embed', ([], {'title': "(cards[0].name + ' ' + cost)", 'colour': 'color[c]'}), "(title=cards[0].name + ' ' + cost, colour=color[c])\n", (5188, 5239), False, 'import discord\n'), ((1635, 1739), 'discord.Embed', 'discord.Embed', ([], {'title': '""""""', 'url': '"""https://www.patreon.com/b9king"""', 'description': 'Helpmessage', 'color': '(65535)'}), "(title='', url='https://www.patreon.com/b9king', description=\n Helpmessage, color=65535)\n", (1648, 1739), False, 'import discord\n'), ((2801, 2833), 'mtgsdk.Card.where', 'Card.where', ([], {'name': 'message.content'}), '(name=message.content)\n', (2811, 2833), False, 'from mtgsdk import Card\n'), ((630, 645), 'discord.Game', 'discord.Game', (['x'], {}), '(x)\n', (642, 645), False, 'import discord\n')] |
#!/usr/bin/env python3
import datetime
import json
import loadPath # Adds the project path.
import linkograph.labels as llabels
import linkograph.linkoCreate as llinkoCreate
import ontologyExtraction as oe
import os
import sys
def print_usage():
print("usage:", sys.argv[0], "<JSON commands filename> ...")
def bulk_transform(method):
for session_filename in sys.argv[1:]:
#########################
# create input linkograph
#########################
label_rules = open("abstraction.json", "r")
labeler = llabels.Labeler(json.load(label_rules))
label_rules.close()
commands = open(session_filename, "r")
json_commands = json.load(commands)
commands.close()
labeled = labeler.labelCommands(json_commands, "NoLabel")
llabels.writeLabelsToJsonFile(labeled, "labeled.json")
ontology_file = open("ontology.json", "r")
inv_labeling_file = open("labeled.json", "r")
lg = llinkoCreate.createLinko(json.load(inv_labeling_file), json.load(ontology_file))
inv_labeling_file.close()
ontology_file.close()
##################################
# transform linkograph to ontology
##################################
if 0 == method:
extracted_ontology = oe.simple_lg_to_ontology(lg)
elif 1 == method:
extracted_ontology = oe.threshold_lg_to_ontology(lg)
else:
print("unknown method:", method)
#########
# cleanup
#########
os.remove("labeled.json")
if "__main__" == __name__:
if 2 > len(sys.argv):
print_usage()
exit()
simple_start_datetime = datetime.datetime.now()
bulk_transform(0)
simple_end_datetime = datetime.datetime.now()
bulk_transform(1)
threshold_end_datetime = datetime.datetime.now()
simple_run_datetime = simple_end_datetime - simple_start_datetime
threshold_run_datetime = threshold_end_datetime - simple_end_datetime
print("simple run time:", simple_run_datetime)
print("threshold run time:", threshold_run_datetime)
| [
"ontologyExtraction.simple_lg_to_ontology",
"datetime.datetime.now",
"ontologyExtraction.threshold_lg_to_ontology",
"json.load",
"linkograph.labels.writeLabelsToJsonFile",
"os.remove"
] | [((1715, 1738), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1736, 1738), False, 'import datetime\n'), ((1789, 1812), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1810, 1812), False, 'import datetime\n'), ((1866, 1889), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1887, 1889), False, 'import datetime\n'), ((700, 719), 'json.load', 'json.load', (['commands'], {}), '(commands)\n', (709, 719), False, 'import json\n'), ((819, 873), 'linkograph.labels.writeLabelsToJsonFile', 'llabels.writeLabelsToJsonFile', (['labeled', '"""labeled.json"""'], {}), "(labeled, 'labeled.json')\n", (848, 873), True, 'import linkograph.labels as llabels\n'), ((1568, 1593), 'os.remove', 'os.remove', (['"""labeled.json"""'], {}), "('labeled.json')\n", (1577, 1593), False, 'import os\n'), ((577, 599), 'json.load', 'json.load', (['label_rules'], {}), '(label_rules)\n', (586, 599), False, 'import json\n'), ((1017, 1045), 'json.load', 'json.load', (['inv_labeling_file'], {}), '(inv_labeling_file)\n', (1026, 1045), False, 'import json\n'), ((1047, 1071), 'json.load', 'json.load', (['ontology_file'], {}), '(ontology_file)\n', (1056, 1071), False, 'import json\n'), ((1325, 1353), 'ontologyExtraction.simple_lg_to_ontology', 'oe.simple_lg_to_ontology', (['lg'], {}), '(lg)\n', (1349, 1353), True, 'import ontologyExtraction as oe\n'), ((1413, 1444), 'ontologyExtraction.threshold_lg_to_ontology', 'oe.threshold_lg_to_ontology', (['lg'], {}), '(lg)\n', (1440, 1444), True, 'import ontologyExtraction as oe\n')] |
import torch
from torch.utils.data import Dataset, DataLoader
from torch.distributions.multivariate_normal import MultivariateNormal
import numpy as np
from tqdm import tqdm
import random
def get_rotation(theta):
rad = np.radians(theta)
c, s = np.cos(rad), np.sin(rad)
R = np.array([[c, -s],
[s, c]])
return R
class CircleDataset(Dataset):
def __init__(self, n_samples, n_centers=9, sigma=0.1, ysigma=0.01, include_zero=True,
target_label=1., seed = None, radius=1.):
super().__init__()
if seed != None:
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
self.include_zero = include_zero
self.nus = []
if include_zero:
self.nus.append(torch.zeros(2))
self.sigma = sigma
self.ysigma = ysigma
self.radius = radius
for i in range(n_centers-include_zero):
R = get_rotation(i*360/(n_centers-include_zero))
self.nus.append(torch.tensor([radius, 0] @ R, dtype=torch.float))
classes = torch.multinomial(torch.ones(n_centers), n_samples,
replacement=True)
data = []
target = []
for i in range(n_centers):
n_samples_class = torch.sum(classes == i)
if n_samples_class == 0:
continue
dist = MultivariateNormal(self.nus[i],
torch.eye(2)*sigma**2)
data.append(dist.sample([n_samples_class.item()]))
enc = torch.full((n_samples_class, n_centers), -target_label)
enc[:, i] = target_label
target.append(enc + ysigma * torch.randn(n_samples_class)[:, None])
self.data = torch.cat(data).float()
self.target = torch.cat(target).float()
def __getitem__(self, idx):
return self.data[idx], self.target[idx]
def __len__(self):
return self.data.shape[0]
def gaussian_sampler_2d(gaussian_center, cov_matrix):
mu_distr = MultivariateNormal(gaussian_center, cov_matrix)
return mu_distr
def gaussian_data_sampling(gaussian_center, cov_matrix, data_num, seed = None):
if seed is not None:
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
sampler = gaussian_sampler_2d(gaussian_center, cov_matrix)
data = sampler.sample(sample_shape=torch.Size([data_num]))
return data
def gaussian_mixture_data_sampling(centers, cov_matrix, data_num, seed = None, device = None):
if seed is not None:
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
index_to_choice = np.random.randint(centers.shape[0], size = data_num)
data_clusters = gaussian_data_sampling(centers[index_to_choice[0]], cov_matrix, 1)
for i in range(1, data_num):
cur_data = gaussian_data_sampling(centers[index_to_choice[i]], cov_matrix, 1)
data_clusters = torch.cat((data_clusters, cur_data), 0)
return data_clusters
def model_1d(data):
real_labels = torch.sin(12*data) + 0.66*torch.cos(25*data) + 3
return real_labels
def noise_labels_model(real_labels, sigma_noise, seed = None):
loc = 0. # mean zero
scale = 1.
normal = torch.distributions.Normal(loc, scale) # create a normal distribution object
if seed is not None:
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
x = normal.rsample([real_labels.shape[0]])
real_labels = real_labels + x*sigma_noise
return real_labels
def get_sample_regression(n_samples, noise = 0.1, seed = 42):
"""
Returns (x_train, y_train), (x_true, y_true)
"""
gaussian_centers = torch.Tensor([[-1.0/(2**0.5)], [1.0/(2**0.5)]])
data_num = n_samples
data_sigma_noise = noise
sigma = 0.01
init_cov_matrix = torch.eye(1)
cov_matrix_default = sigma*init_cov_matrix
data_1d = gaussian_mixture_data_sampling(gaussian_centers,
cov_matrix_default,
data_num,
seed)
real_labels = model_1d(data_1d[:, 0])
noise_labels = noise_labels_model(real_labels,
sigma_noise = data_sigma_noise,
seed = seed).reshape((real_labels.shape[0], 1))
range_for_real_labels = torch.linspace(-1, 1, steps = 1000)
real_labels_range = model_1d(range_for_real_labels)
# data, range_for_real_labels, real_labels, noise_labels,
return (data_1d[:, 0], noise_labels[:, 0]), (range_for_real_labels, real_labels_range) | [
"numpy.radians",
"torch.sin",
"numpy.array",
"torch.cos",
"torch.sum",
"numpy.sin",
"torch.eye",
"numpy.random.seed",
"torch.randn",
"torch.distributions.Normal",
"torch.Tensor",
"numpy.cos",
"torch.Size",
"torch.cat",
"torch.manual_seed",
"torch.full",
"random.seed",
"torch.tensor... | [((224, 241), 'numpy.radians', 'np.radians', (['theta'], {}), '(theta)\n', (234, 241), True, 'import numpy as np\n'), ((286, 313), 'numpy.array', 'np.array', (['[[c, -s], [s, c]]'], {}), '([[c, -s], [s, c]])\n', (294, 313), True, 'import numpy as np\n'), ((2140, 2187), 'torch.distributions.multivariate_normal.MultivariateNormal', 'MultivariateNormal', (['gaussian_center', 'cov_matrix'], {}), '(gaussian_center, cov_matrix)\n', (2158, 2187), False, 'from torch.distributions.multivariate_normal import MultivariateNormal\n'), ((2878, 2928), 'numpy.random.randint', 'np.random.randint', (['centers.shape[0]'], {'size': 'data_num'}), '(centers.shape[0], size=data_num)\n', (2895, 2928), True, 'import numpy as np\n'), ((3457, 3495), 'torch.distributions.Normal', 'torch.distributions.Normal', (['loc', 'scale'], {}), '(loc, scale)\n', (3483, 3495), False, 'import torch\n'), ((3965, 4016), 'torch.Tensor', 'torch.Tensor', (['[[-1.0 / 2 ** 0.5], [1.0 / 2 ** 0.5]]'], {}), '([[-1.0 / 2 ** 0.5], [1.0 / 2 ** 0.5]])\n', (3977, 4016), False, 'import torch\n'), ((4106, 4118), 'torch.eye', 'torch.eye', (['(1)'], {}), '(1)\n', (4115, 4118), False, 'import torch\n'), ((4677, 4710), 'torch.linspace', 'torch.linspace', (['(-1)', '(1)'], {'steps': '(1000)'}), '(-1, 1, steps=1000)\n', (4691, 4710), False, 'import torch\n'), ((253, 264), 'numpy.cos', 'np.cos', (['rad'], {}), '(rad)\n', (259, 264), True, 'import numpy as np\n'), ((266, 277), 'numpy.sin', 'np.sin', (['rad'], {}), '(rad)\n', (272, 277), True, 'import numpy as np\n'), ((2322, 2339), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (2333, 2339), False, 'import random\n'), ((2348, 2368), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2362, 2368), True, 'import numpy as np\n'), ((2377, 2400), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (2394, 2400), False, 'import torch\n'), ((2727, 2744), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (2738, 2744), False, 'import random\n'), ((2753, 2773), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2767, 2773), True, 'import numpy as np\n'), ((2782, 2805), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (2799, 2805), False, 'import torch\n'), ((3161, 3200), 'torch.cat', 'torch.cat', (['(data_clusters, cur_data)', '(0)'], {}), '((data_clusters, cur_data), 0)\n', (3170, 3200), False, 'import torch\n'), ((3567, 3584), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (3578, 3584), False, 'import random\n'), ((3593, 3613), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (3607, 3613), True, 'import numpy as np\n'), ((3622, 3645), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (3639, 3645), False, 'import torch\n'), ((592, 609), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (603, 609), False, 'import random\n'), ((622, 642), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (636, 642), True, 'import numpy as np\n'), ((655, 678), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (672, 678), False, 'import torch\n'), ((1173, 1194), 'torch.ones', 'torch.ones', (['n_centers'], {}), '(n_centers)\n', (1183, 1194), False, 'import torch\n'), ((1374, 1397), 'torch.sum', 'torch.sum', (['(classes == i)'], {}), '(classes == i)\n', (1383, 1397), False, 'import torch\n'), ((1654, 1709), 'torch.full', 'torch.full', (['(n_samples_class, n_centers)', '(-target_label)'], {}), '((n_samples_class, n_centers), -target_label)\n', (1664, 1709), False, 'import torch\n'), ((2553, 2575), 'torch.Size', 'torch.Size', (['[data_num]'], {}), '([data_num])\n', (2563, 2575), False, 'import torch\n'), ((3266, 3286), 'torch.sin', 'torch.sin', (['(12 * data)'], {}), '(12 * data)\n', (3275, 3286), False, 'import torch\n'), ((849, 863), 'torch.zeros', 'torch.zeros', (['(2)'], {}), '(2)\n', (860, 863), False, 'import torch\n'), ((1087, 1135), 'torch.tensor', 'torch.tensor', (['([radius, 0] @ R)'], {'dtype': 'torch.float'}), '([radius, 0] @ R, dtype=torch.float)\n', (1099, 1135), False, 'import torch\n'), ((1847, 1862), 'torch.cat', 'torch.cat', (['data'], {}), '(data)\n', (1856, 1862), False, 'import torch\n'), ((1893, 1910), 'torch.cat', 'torch.cat', (['target'], {}), '(target)\n', (1902, 1910), False, 'import torch\n'), ((3292, 3312), 'torch.cos', 'torch.cos', (['(25 * data)'], {}), '(25 * data)\n', (3301, 3312), False, 'import torch\n'), ((1550, 1562), 'torch.eye', 'torch.eye', (['(2)'], {}), '(2)\n', (1559, 1562), False, 'import torch\n'), ((1788, 1816), 'torch.randn', 'torch.randn', (['n_samples_class'], {}), '(n_samples_class)\n', (1799, 1816), False, 'import torch\n')] |
import datetime
import typing
from src import core
from src.todo import domain, adapter
__all__ = ("SqlAlchemyTodoService",)
class SqlAlchemyTodoService(domain.TodoService):
def __init__(self, /, uow: core.SqlAlchemyUnitOfWork):
self._uow = uow
def all(self, /, user_id: int) -> typing.List[domain.Todo]:
with self._uow:
return self._repo.all(user_id)
def add_todo(self, *, user_id: int, todo: domain.Todo) -> domain.Todo:
with self._uow:
new_todo = self._repo.add(user_id=user_id, item=todo)
self._uow.commit()
return new_todo
def delete_todo(self, *, user_id: int, todo_id: int) -> None:
with self._uow:
self._repo.remove(user_id=user_id, item_id=todo_id)
self._uow.commit()
def get_by_id(self, *, user_id: int, todo_id: int) -> typing.Optional[domain.Todo]:
assert todo_id > 0, f"Todo id values should be positive, but got {todo_id!r}."
with self._uow:
todo = self._repo.get_by_id(user_id=user_id, todo_id=todo_id)
if todo and todo.user_id == user_id:
return todo
else:
raise core.exception.AuthException("Todo belongs to another user")
def get_current_todos(
self,
*,
user_id: int,
category: str,
today: datetime.date = datetime.date.today(),
) -> typing.List[domain.Todo]:
return [
todo
for todo in self.all(user_id)
if todo.display(today) and todo.category == category
]
def get_todos_completed_today(
self, *, user_id: int, today: datetime.date = datetime.date.today()
) -> typing.List[domain.Todo]:
return [todo for todo in self.all(user_id) if todo.date_completed == today]
def mark_complete(self, *, user_id: int, todo_id: int) -> None:
with self._uow:
self._repo.mark_completed(user_id=user_id, item_id=todo_id)
self._uow.commit()
def update_todo(self, *, user_id: int, todo: domain.Todo) -> domain.Todo:
with self._uow:
updated_todo = self._repo.update(user_id=user_id, item=todo)
self._uow.commit()
return updated_todo
@property
def _repo(self) -> domain.TodoRepository:
return adapter.SqlAlchemyTodoRepository(self._uow.session) | [
"datetime.date.today",
"src.core.exception.AuthException",
"src.todo.adapter.SqlAlchemyTodoRepository"
] | [((1385, 1406), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (1404, 1406), False, 'import datetime\n'), ((1684, 1705), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (1703, 1705), False, 'import datetime\n'), ((2336, 2387), 'src.todo.adapter.SqlAlchemyTodoRepository', 'adapter.SqlAlchemyTodoRepository', (['self._uow.session'], {}), '(self._uow.session)\n', (2368, 2387), False, 'from src.todo import domain, adapter\n'), ((1195, 1255), 'src.core.exception.AuthException', 'core.exception.AuthException', (['"""Todo belongs to another user"""'], {}), "('Todo belongs to another user')\n", (1223, 1255), False, 'from src import core\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sat May 23 11:28:30 2020
@author: rener
"""
import numpy as np
import pandas as pd
import os
from datetime import date
import time
import sys
dir_path = os.path.dirname(os.path.realpath(__file__))
os.chdir(dir_path)
#%% For the various companies we have data going back differently far.
#
#
frames=[]
for file in os.listdir('Stocks'):
frames.append(
pd.read_csv('Stocks/' +file,index_col=0))
# For the various companies we have data going back differently far.
# So there is decision to make: We could discard look for the shortest
# available timeseries, and trim all other datasets to the same length.
# But then whenever we compute a covariance for two longer datasets
# we will not use all available information.
# So we only trim every pair in the covariance computing function.
df=pd.concat(frames)
# Add column with Estimated Average of the day
df['EstAvg'] = df[['open','high','low','close']].apply(np.mean,axis=1)
df.to_csv('fulltable.csv')
#%%
pivot = df.pivot(columns = 'symbol', values = 'EstAvg')
# Note that we are taking the symbols from the Pivot Table.
# This is the case, because when the Alphavantage API does not give
# us a dataset for some symbol, it does not appear in the pivot table,
# so we avoid a Key Error.
symbols = pivot.columns
# Next we initialize an 'empty' dataframe, and start filling it.
CovMatrix = pd.DataFrame(index=symbols,columns=symbols)
#%%
def covariance(a,b):
return np.mean((a-np.mean(a)*(b-np.mean(b))))
for col in CovMatrix:
for row in CovMatrix.index:
CovMatrix[row][col]=covariance(pivot[row], pivot[col])
| [
"numpy.mean",
"os.listdir",
"pandas.read_csv",
"os.chdir",
"os.path.realpath",
"pandas.DataFrame",
"pandas.concat"
] | [((238, 256), 'os.chdir', 'os.chdir', (['dir_path'], {}), '(dir_path)\n', (246, 256), False, 'import os\n'), ((362, 382), 'os.listdir', 'os.listdir', (['"""Stocks"""'], {}), "('Stocks')\n", (372, 382), False, 'import os\n'), ((856, 873), 'pandas.concat', 'pd.concat', (['frames'], {}), '(frames)\n', (865, 873), True, 'import pandas as pd\n'), ((1411, 1455), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'symbols', 'columns': 'symbols'}), '(index=symbols, columns=symbols)\n', (1423, 1455), True, 'import pandas as pd\n'), ((210, 236), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (226, 236), False, 'import os\n'), ((411, 453), 'pandas.read_csv', 'pd.read_csv', (["('Stocks/' + file)"], {'index_col': '(0)'}), "('Stocks/' + file, index_col=0)\n", (422, 453), True, 'import pandas as pd\n'), ((1503, 1513), 'numpy.mean', 'np.mean', (['a'], {}), '(a)\n', (1510, 1513), True, 'import numpy as np\n'), ((1517, 1527), 'numpy.mean', 'np.mean', (['b'], {}), '(b)\n', (1524, 1527), True, 'import numpy as np\n')] |
import bs4
import urllib
from base_online_scraper import base_online_scraper as scraper
BASE_URL = 'http://catalog.northeastern.edu'
INITIAL_PATH = '/course-descriptions/'
fp = urllib.urlopen(BASE_URL + INITIAL_PATH)
soup = bs4.BeautifulSoup(fp, 'lxml')
nav_menu = soup.find("div", {"id": "atozindex"}).find_all('a', href=True)
scraper = scraper('')
for a in nav_menu:
scraper.url = BASE_URL + a['href']
scraper.scrape()
| [
"bs4.BeautifulSoup",
"base_online_scraper.base_online_scraper.scrape",
"urllib.urlopen",
"base_online_scraper.base_online_scraper"
] | [((179, 218), 'urllib.urlopen', 'urllib.urlopen', (['(BASE_URL + INITIAL_PATH)'], {}), '(BASE_URL + INITIAL_PATH)\n', (193, 218), False, 'import urllib\n'), ((226, 255), 'bs4.BeautifulSoup', 'bs4.BeautifulSoup', (['fp', '"""lxml"""'], {}), "(fp, 'lxml')\n", (243, 255), False, 'import bs4\n'), ((341, 352), 'base_online_scraper.base_online_scraper', 'scraper', (['""""""'], {}), "('')\n", (348, 352), True, 'from base_online_scraper import base_online_scraper as scraper\n'), ((416, 432), 'base_online_scraper.base_online_scraper.scrape', 'scraper.scrape', ([], {}), '()\n', (430, 432), True, 'from base_online_scraper import base_online_scraper as scraper\n')] |
import pandas as pd
import numpy as np
import re
import spacy
import string
import sklearn
from html.parser import HTMLParser
from joblib import load
import tensorflow as tf
nlp = spacy.load('en_core_web_sm')
tfidf = load('tfidf.joblib')
wordlist = load('wordlist.joblib')
tf.keras.backend.clear_session()
model = tf.keras.models.load_model('simple_nn.h5')
model._make_predict_function()
graph = tf.get_default_graph()
class MLStripper(HTMLParser):
def __init__(self):
super().__init__()
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ' '.join(self.fed)
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data()
def preprocess(text):
"""cleans a single text input."""
text = strip_tags(text)
text = re.sub('\s+', ' ', text)
text = text.lower()
text = str(re.sub('[{}]'.format(string.punctuation), '', text))
text = text.strip()
return text
def vectorize(text):
"""vectorizes a single text input."""
tokens = nlp.tokenizer(text)
lemmas = [' '.join([token.lemma_ for token in tokens])]
vec = tfidf.transform(lemmas).todense()
df = pd.DataFrame(vec, columns=tfidf.get_feature_names())
return(df[wordlist])
def model_predict(input):
global graph
with graph.as_default():
output = model.predict(input)
return output
def model_predict_single(text):
"""applies cleaning and vectorizing to a single text input, produces a prediction."""
text = preprocess(text)
predicted = np.float64(model.predict([vectorize(
text)])[0][0])
return predicted
def model_predict_df(df_in):
"""applies cleaning and vectorizing to a dataframe, produces a Series of predictions."""
df = df_in.copy()
df['clean_text'] = df['text'].apply(preprocess)
X_test = df['clean_text'].copy()
X_test = X_test.apply(nlp.tokenizer)
X_test = X_test.apply(lambda x: [token.lemma_ for token in x])
X_vec = tfidf.transform(X_test.astype(str))
X_vec_frame = pd.DataFrame(X_vec.todense(), columns=tfidf.get_feature_names())
X_vec_frame = X_vec_frame[wordlist]
X_pred = model_predict(X_vec_frame)
X_pred.shape = (9970)
model_pred = pd.Series(data=X_pred, name='model_output')
return model_pred
| [
"pandas.Series",
"spacy.load",
"tensorflow.keras.models.load_model",
"joblib.load",
"re.sub",
"tensorflow.keras.backend.clear_session",
"tensorflow.get_default_graph"
] | [((183, 211), 'spacy.load', 'spacy.load', (['"""en_core_web_sm"""'], {}), "('en_core_web_sm')\n", (193, 211), False, 'import spacy\n'), ((220, 240), 'joblib.load', 'load', (['"""tfidf.joblib"""'], {}), "('tfidf.joblib')\n", (224, 240), False, 'from joblib import load\n'), ((252, 275), 'joblib.load', 'load', (['"""wordlist.joblib"""'], {}), "('wordlist.joblib')\n", (256, 275), False, 'from joblib import load\n'), ((277, 309), 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), '()\n', (307, 309), True, 'import tensorflow as tf\n'), ((318, 360), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['"""simple_nn.h5"""'], {}), "('simple_nn.h5')\n", (344, 360), True, 'import tensorflow as tf\n'), ((400, 422), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (420, 422), True, 'import tensorflow as tf\n'), ((848, 873), 're.sub', 're.sub', (['"""\\\\s+"""', '""" """', 'text'], {}), "('\\\\s+', ' ', text)\n", (854, 873), False, 'import re\n'), ((2276, 2319), 'pandas.Series', 'pd.Series', ([], {'data': 'X_pred', 'name': '"""model_output"""'}), "(data=X_pred, name='model_output')\n", (2285, 2319), True, 'import pandas as pd\n')] |
from lit.fields.base import Field
from lit.fields.base import TextType
class TextField(Field):
sql_type = TextType()
py_type = str
| [
"lit.fields.base.TextType"
] | [((111, 121), 'lit.fields.base.TextType', 'TextType', ([], {}), '()\n', (119, 121), False, 'from lit.fields.base import TextType\n')] |
from datetime import datetime
from typing import Any
from typing import Dict
from typing import Iterable
from typing import Optional
from typing import Type
from evidently.analyzers.base_analyzer import Analyzer
from evidently.analyzers.cat_target_drift_analyzer import CatTargetDriftAnalyzer
from evidently.model_profile.sections.base_profile_section import ProfileSection
class CatTargetDriftProfileSection(ProfileSection):
def part_id(self) -> str:
return 'cat_target_drift'
def __init__(self) -> None:
super().__init__()
self.analyzers_types = [CatTargetDriftAnalyzer]
self._result = None
def analyzers(self) -> Iterable[Type[Analyzer]]:
return self.analyzers_types
def calculate(self, reference_data, current_data, column_mapping, analyzers_results) -> None:
result = CatTargetDriftAnalyzer.get_results(analyzers_results)
result_json: Dict[str, Any] = result.columns.as_dict()
result_json['metrics'] = {}
if result.target_metrics:
result_json['metrics']['target_name'] = result.target_metrics.column_name
result_json['metrics']['target_type'] = 'cat'
result_json['metrics']['target_drift'] = result.target_metrics.drift
if result.prediction_metrics:
result_json['metrics']['prediction_name'] = result.prediction_metrics.column_name
result_json['metrics']['prediction_type'] = 'cat'
result_json['metrics']['prediction_drift'] = result.prediction_metrics.drift
self._result = {
'name': self.part_id(),
'datetime': str(datetime.now()),
'data': result_json
}
def get_results(self) -> Optional[dict]:
return self._result
| [
"datetime.datetime.now",
"evidently.analyzers.cat_target_drift_analyzer.CatTargetDriftAnalyzer.get_results"
] | [((843, 896), 'evidently.analyzers.cat_target_drift_analyzer.CatTargetDriftAnalyzer.get_results', 'CatTargetDriftAnalyzer.get_results', (['analyzers_results'], {}), '(analyzers_results)\n', (877, 896), False, 'from evidently.analyzers.cat_target_drift_analyzer import CatTargetDriftAnalyzer\n'), ((1630, 1644), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1642, 1644), False, 'from datetime import datetime\n')] |
import re, logging
from django import forms
from django.forms import ModelForm
from django.utils.translation import ugettext as _
from django.contrib.localflavor.us.forms import USStateSelect,\
USPhoneNumberField
from models import Preference, ShippingWeight, ShippingPrice, ShippingItem, TaxState, DnsShop, EmailNotification
from preferences.models import ShopPolicies
from auth.models import User
from users.models import Profile
class GeneralPreferenceForm(ModelForm):
email = forms.EmailField(required=False)
phone = USPhoneNumberField(required=False)
class Meta:
model = Preference
fields = ['name_store', 'email', 'phone']
class ProfileForm(ModelForm):
state = forms.CharField(widget=USStateSelect)
class Meta:
model = Profile
fields = ['street_address', 'zip', 'city', 'state', 'country', ]
def clean_zip(self):
zip = self.cleaned_data.get("zip", "")
if zip.strip() == "": raise forms.ValidationError("Zip is a required field.")
if not (re.match("[0-9]{5}(-[0-9]{4})?$", zip)): raise forms.ValidationError("Invalid Zip code. Valid formats are XXXXX or XXXXX-XXXX")
return zip
def clean_country(self):
country = self.cleaned_data.get("country", "")
if country.strip() == "": raise forms.ValidationError("Country is a required field.")
return country
def clean_street_address(self):
street = self.cleaned_data.get("street_address", "")
if street.strip() == "": raise forms.ValidationError("Street is a required field.")
return street
def clean_city(self):
city = self.cleaned_data.get("city", "")
if city.strip() == "": raise forms.ValidationError("City is a required field.")
return city
class TaxesPreferenceForm(ModelForm):
class Meta:
model = Preference
fields = ['taxes_same_state_store', 'taxes_to_shipping_fees']
class TaxStateForm(ModelForm):
#state = forms.CharField(widget=USStateSelect)
tax = forms.DecimalField(help_text=_("Enter a state tax rate number (between 1 and 100)"))
class Meta:
model = TaxState
exclude = ['shop']
def __init__(self, shop, *args, ** kwargs):
self.shop = shop
super(TaxStateForm, self).__init__(*args, ** kwargs)
def clean_state(self):
state = self.cleaned_data['state']
try:
TaxState.objects.get(shop=self.shop, state=state)
except TaxState.DoesNotExist:
return state
raise forms.ValidationError(_("A tax for state %s already exists." % state))
def clean_tax(self):
tax = self.cleaned_data['tax']
if tax < 0:
raise forms.ValidationError(_("A tax has to be more or equal 0%"))
elif tax > 100:
raise forms.ValidationError(_("A tax has to be less than 100%"))
return tax
class TaxStateEditForm(ModelForm):
class Meta:
model = TaxState
exclude = ['shop', 'state']
def __init__(self, shop, *args, ** kwargs):
self.shop = shop
super(TaxStateEditForm, self).__init__(*args, ** kwargs)
def clean_tax(self):
tax = self.cleaned_data['tax']
if tax < 0:
raise forms.ValidationError(_("A tax has to be more or equal 0%"))
elif tax > 100:
raise forms.ValidationError(_("A tax has to be less than 100%"))
return tax
class AuctionsPreferenceForm(ModelForm):
class Meta:
model = Preference
fields = ['allow_sessions', 'allow_open_auctions', 'default_days', 'open_auto_extend', 'session_auto_extend']
class DnsShopForm(ModelForm):
class Meta:
model = DnsShop
exclude = ['shop']
def clean_dns(self):
dns = self.cleaned_data['dns']
try:
DnsShop.objects.get(dns=dns)
except DnsShop.DoesNotExist:
return dns
raise forms.ValidationError(_("A shop with that dns already exists."))
class ShippingWeightForm(ModelForm):
class Meta:
model = ShippingWeight
exclude = ['shop']
class ShippingPriceForm(ModelForm):
class Meta:
model = ShippingPrice
exclude = ['shop']
class ShippingItemForm(ModelForm):
class Meta:
model = ShippingItem
exclude = ['shop']
class EmailNotificationForm(ModelForm):
class Meta:
model = EmailNotification
fields = ['subject', 'body']
class ShopPoliciesForm(ModelForm):
class Meta:
model = ShopPolicies
fields = ['refund_policy', 'privacy_policy', 'terms_of_service']
class MarketingForm(ModelForm):
class Meta:
model = Preference
fields = ['google_analytics_account_number']
def clean_google_analytics_account_number(self):
google_analytics_account_number = self.cleaned_data['google_analytics_account_number']
if re.match(r"^\w{2}\-\d{4,8}\-\d$", google_analytics_account_number) is None:
raise forms.ValidationError('Invalid analitycs account number')
return google_analytics_account_number
class UsernameChangeForm(forms.ModelForm):
username = forms.RegexField(label=_("Username"), max_length=30, regex=r'^\w+$',
help_text = _("Required. 30 characters or fewer. Alphanumeric characters only (letters, digits and underscores)."),
error_message = _("This value must contain only letters, numbers and underscores."))
class Meta:
model = User
fields = ['username'] | [
"models.TaxState.objects.get",
"models.DnsShop.objects.get",
"django.forms.CharField",
"django.contrib.localflavor.us.forms.USPhoneNumberField",
"re.match",
"django.forms.ValidationError",
"django.utils.translation.ugettext",
"django.forms.EmailField"
] | [((491, 523), 'django.forms.EmailField', 'forms.EmailField', ([], {'required': '(False)'}), '(required=False)\n', (507, 523), False, 'from django import forms\n'), ((536, 570), 'django.contrib.localflavor.us.forms.USPhoneNumberField', 'USPhoneNumberField', ([], {'required': '(False)'}), '(required=False)\n', (554, 570), False, 'from django.contrib.localflavor.us.forms import USStateSelect, USPhoneNumberField\n'), ((712, 749), 'django.forms.CharField', 'forms.CharField', ([], {'widget': 'USStateSelect'}), '(widget=USStateSelect)\n', (727, 749), False, 'from django import forms\n'), ((981, 1030), 'django.forms.ValidationError', 'forms.ValidationError', (['"""Zip is a required field."""'], {}), "('Zip is a required field.')\n", (1002, 1030), False, 'from django import forms\n'), ((1056, 1094), 're.match', 're.match', (['"""[0-9]{5}(-[0-9]{4})?$"""', 'zip'], {}), "('[0-9]{5}(-[0-9]{4})?$', zip)\n", (1064, 1094), False, 'import re, logging\n'), ((1103, 1188), 'django.forms.ValidationError', 'forms.ValidationError', (['"""Invalid Zip code. Valid formats are XXXXX or XXXXX-XXXX"""'], {}), "('Invalid Zip code. Valid formats are XXXXX or XXXXX-XXXX'\n )\n", (1124, 1188), False, 'from django import forms\n'), ((1335, 1388), 'django.forms.ValidationError', 'forms.ValidationError', (['"""Country is a required field."""'], {}), "('Country is a required field.')\n", (1356, 1388), False, 'from django import forms\n'), ((1553, 1605), 'django.forms.ValidationError', 'forms.ValidationError', (['"""Street is a required field."""'], {}), "('Street is a required field.')\n", (1574, 1605), False, 'from django import forms\n'), ((1745, 1795), 'django.forms.ValidationError', 'forms.ValidationError', (['"""City is a required field."""'], {}), "('City is a required field.')\n", (1766, 1795), False, 'from django import forms\n'), ((2091, 2145), 'django.utils.translation.ugettext', '_', (['"""Enter a state tax rate number (between 1 and 100)"""'], {}), "('Enter a state tax rate number (between 1 and 100)')\n", (2092, 2145), True, 'from django.utils.translation import ugettext as _\n'), ((2481, 2530), 'models.TaxState.objects.get', 'TaxState.objects.get', ([], {'shop': 'self.shop', 'state': 'state'}), '(shop=self.shop, state=state)\n', (2501, 2530), False, 'from models import Preference, ShippingWeight, ShippingPrice, ShippingItem, TaxState, DnsShop, EmailNotification\n'), ((2630, 2677), 'django.utils.translation.ugettext', '_', (["('A tax for state %s already exists.' % state)"], {}), "('A tax for state %s already exists.' % state)\n", (2631, 2677), True, 'from django.utils.translation import ugettext as _\n'), ((3963, 3991), 'models.DnsShop.objects.get', 'DnsShop.objects.get', ([], {'dns': 'dns'}), '(dns=dns)\n', (3982, 3991), False, 'from models import Preference, ShippingWeight, ShippingPrice, ShippingItem, TaxState, DnsShop, EmailNotification\n'), ((4088, 4129), 'django.utils.translation.ugettext', '_', (['"""A shop with that dns already exists."""'], {}), "('A shop with that dns already exists.')\n", (4089, 4129), True, 'from django.utils.translation import ugettext as _\n'), ((5086, 5156), 're.match', 're.match', (['"""^\\\\w{2}\\\\-\\\\d{4,8}\\\\-\\\\d$"""', 'google_analytics_account_number'], {}), "('^\\\\w{2}\\\\-\\\\d{4,8}\\\\-\\\\d$', google_analytics_account_number)\n", (5094, 5156), False, 'import re, logging\n'), ((5180, 5237), 'django.forms.ValidationError', 'forms.ValidationError', (['"""Invalid analitycs account number"""'], {}), "('Invalid analitycs account number')\n", (5201, 5237), False, 'from django import forms\n'), ((5385, 5398), 'django.utils.translation.ugettext', '_', (['"""Username"""'], {}), "('Username')\n", (5386, 5398), True, 'from django.utils.translation import ugettext as _\n'), ((5451, 5558), 'django.utils.translation.ugettext', '_', (['"""Required. 30 characters or fewer. Alphanumeric characters only (letters, digits and underscores)."""'], {}), "('Required. 30 characters or fewer. Alphanumeric characters only (letters, digits and underscores).'\n )\n", (5452, 5558), True, 'from django.utils.translation import ugettext as _\n'), ((5579, 5646), 'django.utils.translation.ugettext', '_', (['"""This value must contain only letters, numbers and underscores."""'], {}), "('This value must contain only letters, numbers and underscores.')\n", (5580, 5646), True, 'from django.utils.translation import ugettext as _\n'), ((2813, 2850), 'django.utils.translation.ugettext', '_', (['"""A tax has to be more or equal 0%"""'], {}), "('A tax has to be more or equal 0%')\n", (2814, 2850), True, 'from django.utils.translation import ugettext as _\n'), ((3400, 3437), 'django.utils.translation.ugettext', '_', (['"""A tax has to be more or equal 0%"""'], {}), "('A tax has to be more or equal 0%')\n", (3401, 3437), True, 'from django.utils.translation import ugettext as _\n'), ((2916, 2951), 'django.utils.translation.ugettext', '_', (['"""A tax has to be less than 100%"""'], {}), "('A tax has to be less than 100%')\n", (2917, 2951), True, 'from django.utils.translation import ugettext as _\n'), ((3503, 3538), 'django.utils.translation.ugettext', '_', (['"""A tax has to be less than 100%"""'], {}), "('A tax has to be less than 100%')\n", (3504, 3538), True, 'from django.utils.translation import ugettext as _\n')] |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://www.mozilla.org/en-US/MPL/2.0/.
import warnings
from unittest import TestCase
import pytest
def pytest_addoption(parser):
parser.addoption(
'--count',
action='store',
default=1,
type=int,
help='Number of times to repeat each test')
parser.addoption(
'--repeat-scope',
action='store',
default='function',
type=str,
choices=('function', 'class', 'module', 'session'),
help='Scope for repeating tests')
parser.addoption(
'--ignore_repeat_mark',
action='store_true',
default=False,
help='ignore repeat mark if set')
def pytest_configure(config):
config.addinivalue_line(
'markers',
'repeat(n): run the given test function `n` times.')
class UnexpectedError(Exception):
pass
@pytest.fixture
def __pytest_repeat_step_number(request):
marker = request.node.get_closest_marker("repeat")
count = marker and marker.args[0] or request.config.option.count
if count > 1:
try:
return request.param
except AttributeError:
if issubclass(request.cls, TestCase):
warnings.warn(
"Repeating unittest class tests not supported")
else:
raise UnexpectedError(
"This call couldn't work with pytest-repeat. "
"Please consider raising an issue with your usage.")
@pytest.hookimpl(trylast=True)
def pytest_generate_tests(metafunc):
count = metafunc.config.option.count
ignore_repeat_mark = metafunc.config.option.ignore_repeat_mark
m = metafunc.definition.get_closest_marker('repeat')
if ignore_repeat_mark:
pass
else:
if m is not None:
count = int(m.args[0])
if count > 1:
metafunc.fixturenames.append("__pytest_repeat_step_number")
def make_progress_id(i, n=count):
return '{0}-{1}'.format(i + 1, n)
scope = metafunc.config.option.repeat_scope
metafunc.parametrize(
'__pytest_repeat_step_number',
range(count),
indirect=True,
ids=make_progress_id,
scope=scope
)
| [
"warnings.warn",
"pytest.hookimpl"
] | [((1646, 1675), 'pytest.hookimpl', 'pytest.hookimpl', ([], {'trylast': '(True)'}), '(trylast=True)\n', (1661, 1675), False, 'import pytest\n'), ((1363, 1424), 'warnings.warn', 'warnings.warn', (['"""Repeating unittest class tests not supported"""'], {}), "('Repeating unittest class tests not supported')\n", (1376, 1424), False, 'import warnings\n')] |
__copyright__ = "Copyright (C) 2019 <NAME>"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numpy as np
import loopy as lp
from pystella.field import Field, index_fields
from pystella.elementwise import ElementWiseMap
from pymbolic import var
from pymbolic.primitives import Subscript, Variable
__doc__ = """
.. currentmodule:: pystella.step
.. autoclass:: Stepper
.. currentmodule:: pystella
Low-storage Runge-Kutta methods
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. currentmodule:: pystella.step
.. autoclass:: LowStorageRKStepper
.. currentmodule:: pystella
.. autoclass:: LowStorageRK54
.. autoclass:: LowStorageRK3Williamson
.. autoclass:: LowStorageRK3Inhomogeneous
.. autoclass:: LowStorageRK3SSP
Classical Runge-Kutta methods
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
"Classical" Runge-Kutta methods are also implemented, though are not recommended
over the low-storage methods above.
.. currentmodule:: pystella.step
.. autoclass:: RungeKuttaStepper
.. currentmodule:: pystella
.. autoclass:: RungeKutta4
.. autoclass:: RungeKutta3SSP
.. autoclass:: RungeKutta3Heun
.. autoclass:: RungeKutta3Nystrom
.. autoclass:: RungeKutta3Ralston
.. autoclass:: RungeKutta2Midpoint
.. autoclass:: RungeKutta2Ralston
"""
class Stepper:
"""
The base class for time steppers, with no implementation of a particular time
stepper.
:arg input: May be one of the following:
* a :class:`dict` whose values represent the right-hand side
of the ODEs to solve, i.e., `(key, value)` pairs corresponding to
:math:`(y, f)` such that
.. math::
\\frac{\\mathrm{d} y}{\\mathrm{d} t} = f,
where :math:`f` is an arbitrary function of kernel data.
Both keys and values must be :mod:`pymbolic` expressions.
* a :class:`~pystella.Sector`. In this case, the right-hand side
dictionary will be obtained from :attr:`~pystella.Sector.rhs_dict`.
* a :class:`list` of :class:`~pystella.Sector`\\ s. In this case,
the input obtained from each :class:`~pystella.Sector`
(as described above) will be combined.
The following keyword arguments are recognized:
:arg MapKernel: The kernel class which each substep/stage will be an
instance of---i.e., one of :class:`~pystella.ElementWiseMap` or its
subclasses. Defaults to :class:`~pystella.ElementWiseMap`.
:arg dt: A :class:`float` fixing the value of the timestep interval.
Defaults to *None*, in which case it is not fixed at kernel creation.
The remaining arguments are passed to :meth:`MapKernel` for
each substep of the timestepper (i.e., see the documentation of
:class:`~pystella.ElementWiseMap`).
.. automethod:: __call__
.. attribute:: num_stages
The number of substeps/stages per timestep.
.. attribute:: expected_order
The expected convergence order of *global* error, i.e.
:math:`n` such that the global error is :math:`\\mathcal{O}(\\Delta t^n)`.
.. attribute:: num_unknowns
The number of unknown degrees of freedom which are evolved.
"""
num_stages = None
expected_order = None
num_copies = None
def make_steps(self, MapKernel=ElementWiseMap, **kwargs):
raise NotImplementedError
def __init__(self, input, MapKernel=ElementWiseMap, **kwargs):
single_stage = kwargs.pop("single_stage", True)
from pystella import Sector
if isinstance(input, Sector):
self.rhs_dict = input.rhs_dict
elif isinstance(input, list):
self.rhs_dict = dict(i for s in input for i in s.rhs_dict.items())
elif isinstance(input, dict):
self.rhs_dict = input
if not single_stage:
prepend_with = (self.num_copies,)
else:
prepend_with = None
args = kwargs.pop("args", [...])
args = args + [lp.ValueArg("dt")]
from pystella import get_field_args
inferred_args = get_field_args(self.rhs_dict, prepend_with=prepend_with)
from pystella.elementwise import append_new_args
self.args = append_new_args(args, inferred_args)
dt = kwargs.pop("dt", None)
fixed_parameters = kwargs.pop("fixed_parameters", dict())
if dt is not None:
fixed_parameters.update(dict(dt=dt))
self.num_unknowns = len(self.rhs_dict.keys())
self.steps = self.make_steps(**kwargs, fixed_parameters=fixed_parameters)
def __call__(self, stage, queue=None, **kwargs):
"""
Calls substep/stage ``stage`` (:attr:`steps[stage]`) of the timestepper,
i.e., :func:`pystella.ElementWiseMap.__call__` for the kernel for
substep/stage ``stage``.
:arg stage: The substep/stage of time timestepper to call.
:returns: The :class:`pyopencl.Event` associated with the kernel invocation.
"""
evt, _ = self.steps[stage](queue, **kwargs)
return evt
class RungeKuttaStepper(Stepper):
"""
The base implementation of classical, explicit Runge-Kutta time steppers,
which operate by storing and operating on multiple copies of each unknown
array. Subclasses must provide an implementation of :meth:`step_statements`
which returns a key-value pair implementing a specific substep of the
particular timestepper.
.. warning::
To minimize the required storage per unknown (i.e., number of
temporaries), the implementation of most subclasses overwrite arrays that
are being read as input to compute right-hand sides. This means that any
non-local (stencil-type) operations must be precomputed and cached
*globally* (unless otherwise noted).
:raises ValueError: if the keys of :attr:`rhs_dict` are not
:class:`~pystella.Field`\\ s (or :class:`pymbolic.primitives.Subscript`\\ s
thereof). This is required for :meth:`make_steps` to be able to prepend
unknown arrays' subscripts with the index corresponding to the temporary
storage axis.
"""
def __init__(self, input, **kwargs):
super().__init__(input, single_stage=False, **kwargs)
def step_statements(self, stage, f, dt, rhs):
raise NotImplementedError
def make_steps(self, MapKernel=ElementWiseMap, **kwargs):
rhs = var("rhs")
dt = var("dt")
q = var("q")
fixed_parameters = kwargs.pop("fixed_parameters", dict())
rhs_statements = {rhs[i]: index_fields(value, prepend_with=(q,))
for i, value in enumerate(self.rhs_dict.values())}
steps = []
for stage in range(self.num_stages):
RK_dict = {}
for i, f in enumerate(self.rhs_dict.keys()):
# ensure that key is either a Field or a Subscript of a Field
# so that index_fields can prepend the q index
key_has_field = False
if isinstance(f, Field):
key_has_field = True
elif isinstance(f, Subscript):
if isinstance(f.aggregate, Field):
key_has_field = True
if not key_has_field:
raise ValueError("rhs_dict keys must be Field instances")
statements = self.step_statements(stage, f, dt, rhs[i])
for k, v in statements.items():
RK_dict[k] = v
fixed_parameters.update(q=0 if stage == 0 else 1)
options = lp.Options(enforce_variable_access_ordered="no_check")
step = MapKernel(RK_dict, tmp_instructions=rhs_statements,
args=self.args, **kwargs, options=options,
fixed_parameters=fixed_parameters)
steps.append(step)
return steps
class RungeKutta4(RungeKuttaStepper):
"""
The classical, four-stage, fourth-order Runge-Kutta method.
Requires unknown arrays to have temporary storage axes of length three.
"""
num_stages = 4
expected_order = 4
num_copies = 3
def step_statements(self, stage, f, dt, rhs):
fq = [index_fields(f, prepend_with=(q,)) for q in range(3)]
if stage == 0:
return {fq[1]: fq[0] + dt/2 * rhs,
fq[2]: fq[0] + dt/6 * rhs}
elif stage == 1:
return {fq[1]: fq[0] + dt/2 * rhs,
fq[2]: fq[2] + dt/3 * rhs}
elif stage == 2:
return {fq[1]: fq[0] + dt * rhs,
fq[2]: fq[2] + dt/3 * rhs}
elif stage == 3:
return {fq[0]: fq[2] + dt/6 * rhs}
class RungeKutta3Heun(RungeKuttaStepper):
"""
Heun's three-stage, third-order Runge-Kutta method.
Requires unknown arrays to have temporary storage axes of length three.
"""
num_stages = 3
expected_order = 3
num_copies = 3
def step_statements(self, stage, f, dt, rhs):
fq = [index_fields(f, prepend_with=(q,)) for q in range(3)]
if stage == 0:
return {fq[1]: fq[0] + dt/3 * rhs,
fq[2]: fq[0] + dt/4 * rhs}
elif stage == 1:
return {fq[1]: fq[0] + dt*2/3 * rhs}
elif stage == 2:
return {fq[0]: fq[2] + dt*3/4 * rhs}
class RungeKutta3Nystrom(RungeKuttaStepper):
"""
Nystrom's three-stage, third-order Runge-Kutta method.
Requires unknown arrays to have temporary storage axes of length three.
"""
num_stages = 3
expected_order = 3
num_copies = 3
def step_statements(self, stage, f, dt, rhs):
fq = [index_fields(f, prepend_with=(q,)) for q in range(3)]
if stage == 0:
return {fq[1]: fq[0] + dt*2/3 * rhs,
fq[2]: fq[0] + dt*2/8 * rhs}
elif stage == 1:
return {fq[1]: fq[0] + dt*2/3 * rhs,
fq[2]: fq[2] + dt*3/8 * rhs}
elif stage == 2:
return {fq[0]: fq[2] + dt*3/8 * rhs}
class RungeKutta3Ralston(RungeKuttaStepper):
"""
Ralston's three-stage, third-order Runge-Kutta method.
Requires unknown arrays to have temporary storage axes of length three.
"""
num_stages = 3
expected_order = 3
num_copies = 3
def step_statements(self, stage, f, dt, rhs):
fq = [index_fields(f, prepend_with=(q,)) for q in range(3)]
if stage == 0:
return {fq[1]: fq[0] + dt/2 * rhs,
fq[2]: fq[0] + dt*2/9 * rhs}
elif stage == 1:
return {fq[1]: fq[0] + dt*3/4 * rhs,
fq[2]: fq[2] + dt*1/3 * rhs}
elif stage == 2:
return {fq[0]: fq[2] + dt*4/9 * rhs}
class RungeKutta3SSP(RungeKuttaStepper):
"""
A three-stage, third-order strong-stability preserving Runge-Kutta method.
Requires unknown arrays to have temporary storage axes of length two.
"""
num_stages = 3
expected_order = 3
num_copies = 2
def step_statements(self, stage, f, dt, rhs):
fq = [index_fields(f, prepend_with=(q,)) for q in range(3)]
if stage == 0:
return {fq[1]: fq[0] + dt * rhs}
elif stage == 1:
return {fq[1]: 3/4 * fq[0] + 1/4 * fq[1] + dt/4 * rhs}
elif stage == 2:
return {fq[0]: 1/3 * fq[0] + 2/3 * fq[1] + dt*2/3 * rhs}
class RungeKutta2Midpoint(RungeKuttaStepper):
"""
The "midpoint" method, a two-stage, second-order Runge-Kutta method.
Requires unknown arrays to have temporary storage axes of length two.
Note that right-hand side operations *can* safely involve non-local computations
of unknown arrays for this method.
"""
num_stages = 2
expected_order = 2
num_copies = 2
def step_statements(self, stage, f, dt, rhs):
fq = [index_fields(f, prepend_with=(q,)) for q in range(2)]
if stage == 0:
return {fq[1]: fq[0] + dt/2 * rhs}
elif stage == 1:
return {fq[0]: fq[0] + dt * rhs}
# possible order reduction
class RungeKutta2Heun(RungeKuttaStepper):
num_stages = 2
expected_order = 2
num_copies = 2
def step_statements(self, stage, f, dt, rhs):
fq = [index_fields(f, prepend_with=(q,)) for q in range(2)]
if stage == 0:
return {fq[1]: fq[0] + dt * rhs,
fq[0]: fq[0] + dt/2 * rhs}
elif stage == 1:
return {fq[0]: fq[0] + dt/2 * rhs}
class RungeKutta2Ralston(RungeKuttaStepper):
"""
Ralstons's two-stage, second-order Runge-Kutta method.
Requires unknown arrays to have temporary storage axes of length two.
"""
num_stages = 2
expected_order = 2
num_copies = 2
def step_statements(self, stage, f, dt, rhs):
fq = [index_fields(f, prepend_with=(q,)) for q in range(2)]
if stage == 0:
return {fq[1]: fq[0] + dt*2/3 * rhs,
fq[0]: fq[0] + dt/4 * rhs}
elif stage == 1:
return {fq[0]: fq[0] + dt*3/4 * rhs}
def get_name(expr):
if isinstance(expr, Field):
return get_name(expr.child)
elif isinstance(expr, Subscript):
return get_name(expr.aggregate)
elif isinstance(expr, Variable):
return expr.name
elif isinstance(expr, str):
return expr
def gen_tmp_name(expr, prefix="_", suffix="_tmp"):
name = get_name(expr)
return prefix + name + suffix
def copy_and_rename(expr):
if isinstance(expr, Field):
return expr.copy(child=copy_and_rename(expr.child))
elif isinstance(expr, Subscript):
return Subscript(copy_and_rename(expr.aggregate), expr.index)
elif isinstance(expr, Variable):
return Variable(gen_tmp_name(expr))
elif isinstance(expr, str):
return gen_tmp_name(expr)
class LowStorageRKStepper(Stepper):
"""
The base implementation of low-storage, explicit Runge-Kutta time steppers,
which operate by storing and operating on a single copy of each unknown array,
plus an auxillary temporary array.
The substeps are expressed in a standard form, drawing coefficients from
a subclass's provided values of :attr:`_A`, :attr:`_B`, and :attr:`_C`.
Allocation of the auxillary arrays is handled internally by:
.. automethod:: get_tmp_arrays_like
:meth:`get_tmp_arrays_like` is called the first time
:meth:`__call__` is called, with the result stored in the attribute
:attr:`tmp_arrays`.
These arrays must not be modified between substages of a single timestep,
but may be safely modified in between timesteps.
.. versionchanged:: 2020.2
Auxillary arrays handled internally by :meth:`get_tmp_arrays_like`.
Previously, manual allocation (and passing) of a single temporary
array ``k_tmp`` was required.
"""
_A = []
_B = []
_C = []
tmp_arrays = {}
def make_steps(self, MapKernel=ElementWiseMap, **kwargs):
tmp_arrays = [copy_and_rename(key) for key in self.rhs_dict.keys()]
self.dof_names = {get_name(key) for key in self.rhs_dict.keys()}
rhs_statements = {var(gen_tmp_name(key, suffix=f"_rhs_{i}")): val
for i, (key, val) in enumerate(self.rhs_dict.items())}
steps = []
for stage in range(self.num_stages):
RK_dict = {}
for i, (f, k) in enumerate(zip(self.rhs_dict.keys(), tmp_arrays)):
rhs = var(gen_tmp_name(f, suffix=f"_rhs_{i}"))
RK_dict[k] = self._A[stage] * k + var("dt") * rhs
RK_dict[f] = f + self._B[stage] * k
step = MapKernel(RK_dict, tmp_instructions=rhs_statements,
args=self.args, **kwargs)
steps.append(step)
return steps
def get_tmp_arrays_like(self, **kwargs):
"""
Allocates required temporary arrays matching those passed via keyword.
:returns: A :class:`dict` of named arrays, suitable for passing via
dictionary expansion.
.. versionadded:: 2020.2
"""
tmp_arrays = {}
for name in self.dof_names:
f = kwargs[name]
tmp_name = gen_tmp_name(name)
import pyopencl.array as cla
if isinstance(f, cla.Array):
tmp_arrays[tmp_name] = cla.empty_like(f)
elif isinstance(f, np.ndarray):
tmp_arrays[tmp_name] = np.empty_like(f)
else:
raise ValueError(f"Could not generate tmp array for {f}"
f"of type {type(f)}")
tmp_arrays[tmp_name][...] = 0.
return tmp_arrays
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for step in self.steps:
step.knl = lp.add_inames_for_unused_hw_axes(step.knl)
def __call__(self, stage, *, queue=None, **kwargs):
if len(self.tmp_arrays) == 0:
self.tmp_arrays = self.get_tmp_arrays_like(**kwargs)
return super().__call__(stage, queue=queue, **kwargs, **self.tmp_arrays)
class LowStorageRK54(LowStorageRKStepper):
"""
A five-stage, fourth-order, low-storage Runge-Kutta method.
See
<NAME>., and <NAME>., Fourth-order-2N-storage
Runge-Kutta schemes, NASA Langley Tech Report TM 109112, 1994
"""
num_stages = 5
expected_order = 4
_A = [
0,
-567301805773 / 1357537059087,
-2404267990393 / 2016746695238,
-3550918686646 / 2091501179385,
-1275806237668 / 842570457699,
]
_B = [
1432997174477 / 9575080441755,
5161836677717 / 13612068292357,
1720146321549 / 2090206949498,
3134564353537 / 4481467310338,
2277821191437 / 14882151754819,
]
_C = [
0,
1432997174477 / 9575080441755,
2526269341429 / 6820363962896,
2006345519317 / 3224310063776,
2802321613138 / 2924317926251,
]
class LowStorageRK144(LowStorageRKStepper):
"""
A 14-stage, fourth-order low-storage Runge-Kutta method optimized for elliptic
stability regions.
See
Niegemann, Jens & Diehl, Richard & <NAME>. (2012). Efficient low-storage
Runge-Kutta schemes with optimized stability regions. J. Comput. Physics. 231.
364-372. 10.1016/j.jcp.2011.09.003.
"""
num_stages = 14
expected_order = 4
_A = [
0,
-0.7188012108672410,
-0.7785331173421570,
-0.0053282796654044,
-0.8552979934029281,
-3.9564138245774565,
-1.5780575380587385,
-2.0837094552574054,
-0.7483334182761610,
-0.7032861106563359,
0.0013917096117681,
-0.0932075369637460,
-0.9514200470875948,
-7.1151571693922548
]
_B = [
0.0367762454319673,
0.3136296607553959,
0.1531848691869027,
0.0030097086818182,
0.3326293790646110,
0.2440251405350864,
0.3718879239592277,
0.6204126221582444,
0.1524043173028741,
0.0760894927419266,
0.0077604214040978,
0.0024647284755382,
0.0780348340049386,
5.5059777270269628
]
_C = [
0,
0.0367762454319673,
0.1249685262725025,
0.2446177702277698,
0.2476149531070420,
0.2969311120382472,
0.3978149645802642,
0.5270854589440328,
0.6981269994175695,
0.8190890835352128,
0.8527059887098624,
0.8604711817462826,
0.8627060376969976,
0.8734213127600976
]
class LowStorageRK134(LowStorageRKStepper):
"""
A 13-stage, fourth-order low-storage Runge-Kutta method optimized for circular
stability regions.
See
Niegemann, Jens & Diehl, Richard & Busch, Kurt. (2012). Efficient low-storage
Runge-Kutta schemes with optimized stability regions. J. Comput. Physics. 231.
364-372. 10.1016/j.jcp.2011.09.003.
"""
num_stages = 13
expected_order = 4
_A = [
0,
0.6160178650170565,
0.4449487060774118,
1.0952033345276178,
1.2256030785959187,
0.2740182222332805,
0.0411952089052647,
0.179708489915356,
1.1771530652064288,
0.4078831463120878,
0.8295636426191777,
4.789597058425229,
0.6606671432964504
]
_B = [
0.0271990297818803,
0.1772488819905108,
0.0378528418949694,
0.6086431830142991,
0.21543139743161,
0.2066152563885843,
0.0415864076069797,
0.0219891884310925,
0.9893081222650993,
0.0063199019859826,
0.3749640721105318,
1.6080235151003195,
0.0961209123818189
]
_C = [
0,
0.0271990297818803,
0.0952594339119365,
0.1266450286591127,
0.1825883045699772,
0.3737511439063931,
0.5301279418422206,
0.5704177433952291,
0.5885784947099155,
0.6160769826246714,
0.6223252334314046,
0.6897593128753419,
0.9126827615920843
]
class LowStorageRK124(LowStorageRKStepper):
"""
A 12-stage, fourth-order low-storage Runge-Kutta method optimized for inviscid
problems.
See
Niegemann, Jens & <NAME> & <NAME>. (2012). Efficient low-storage
Runge-Kutta schemes with optimized stability regions. J. Comput. Physics. 231.
364-372. 10.1016/j.jcp.2011.09.003.
"""
num_stages = 12
expected_order = 4
_A = [
0,
0.0923311242368072,
0.9441056581158819,
4.327127324757639,
2.155777132902607,
0.9770727190189062,
0.7581835342571139,
1.79775254708255,
2.691566797270077,
4.646679896026814,
0.1539613783825189,
0.5943293901830616
]
_B = [
0.0650008435125904,
0.0161459902249842,
0.5758627178358159,
0.1649758848361671,
0.3934619494248182,
0.0443509641602719,
0.2074504268408778,
0.6914247433015102,
0.3766646883450449,
0.0757190350155483,
0.2027862031054088,
0.2167029365631842
]
_C = [
0,
0.0650008435125904,
0.0796560563081853,
0.1620416710085376,
0.2248877362907778,
0.2952293985641261,
0.3318332506149405,
0.4094724050198658,
0.6356954475753369,
0.6806551557645497,
0.714377371241835,
0.9032588871651854,
]
class LowStorageRK3Williamson(LowStorageRKStepper):
"""
A three-stage, third-order, low-storage Runge-Kutta method.
See
<NAME>., Low-storage Runge-Kutta schemes,
J. Comput. Phys., 35, 48-56, 1980
"""
num_stages = 3
expected_order = 3
_A = [0, -5/9, -153/128]
_B = [1/3, 15/16, 8/15]
_C = [0, 4/9, 15/32]
class LowStorageRK3Inhomogeneous(LowStorageRKStepper):
"""
A three-stage, third-order, low-storage Runge-Kutta method.
"""
num_stages = 3
expected_order = 3
_A = [0, -17/32, -32/27]
_B = [1/4, 8/9, 3/4]
_C = [0, 15/32, 4/9]
# possible order reduction
class LowStorageRK3Symmetric(LowStorageRKStepper):
num_stages = 3
expected_order = 3
_A = [0, -2/3, -1]
_B = [1/3, 1, 1/2]
_C = [0, 1/3, 2/3]
# possible order reduction
class LowStorageRK3PredictorCorrector(LowStorageRKStepper):
num_stages = 3
expected_order = 3
_A = [0, -1/4, -4/3]
_B = [1/2, 2/3, 1/2]
_C = [0, 1/2, 1]
c2 = .924574
z1 = np.sqrt(36 * c2**4 + 36 * c2**3 - 135 * c2**2 + 84 * c2 - 12)
z2 = 2 * c2**2 + c2 - 2
z3 = 12 * c2**4 - 18 * c2**3 + 18 * c2**2 - 11 * c2 + 2
z4 = 36 * c2**4 - 36 * c2**3 + 13 * c2**2 - 8 * c2 + 4
z5 = 69 * c2**3 - 62 * c2**2 + 28 * c2 - 8
z6 = 34 * c2**4 - 46 * c2**3 + 34 * c2**2 - 13 * c2 + 2
B1 = c2
B2 = ((12 * c2 * (c2 - 1) * (3 * z2 - z1) - (3 * z2 - z1)**2)
/ (144 * c2 * (3 * c2 - 2) * (c2 - 1)**2))
B3 = (- 24 * (3 * c2 - 2) * (c2 - 1)**2
/ ((3 * z2 - z1)**2 - 12 * c2 * (c2 - 1) * (3 * z2 - z1)))
A2 = ((- z1 * (6 * c2**2 - 4 * c2 + 1) + 3 * z3)
/ ((2 * c2 + 1) * z1 - 3 * (c2 + 2) * (2 * c2 - 1)**2))
A3 = ((- z4 * z1 + 108 * (2 * c2 - 1) * c2**5 - 3 * (2 * c2 - 1) * z5)
/ (24 * z1 * c2 * (c2 - 1)**4 + 72 * c2 * z6 + 72 * c2**6 * (2 * c2 - 13)))
class LowStorageRK3SSP(LowStorageRKStepper):
"""
A three-stage, third-order, strong-stability preserving, low-storage
Runge-Kutta method.
"""
num_stages = 3
expected_order = 3
_A = [0, A2, A3]
_B = [B1, B2, B3]
_C = [0, B1, B1 + B2 * (A2 + 1)]
all_steppers = [RungeKutta4, RungeKutta3SSP, RungeKutta3Heun, RungeKutta3Nystrom,
RungeKutta3Ralston, RungeKutta2Midpoint,
RungeKutta2Ralston, LowStorageRK54, LowStorageRK144,
LowStorageRK3Williamson, LowStorageRK3Inhomogeneous,
LowStorageRK3SSP]
| [
"pyopencl.array.empty_like",
"loopy.Options",
"loopy.ValueArg",
"numpy.sqrt",
"pymbolic.var",
"pystella.elementwise.append_new_args",
"numpy.empty_like",
"loopy.add_inames_for_unused_hw_axes",
"pystella.field.index_fields",
"pystella.get_field_args"
] | [((25354, 25421), 'numpy.sqrt', 'np.sqrt', (['(36 * c2 ** 4 + 36 * c2 ** 3 - 135 * c2 ** 2 + 84 * c2 - 12)'], {}), '(36 * c2 ** 4 + 36 * c2 ** 3 - 135 * c2 ** 2 + 84 * c2 - 12)\n', (25361, 25421), True, 'import numpy as np\n'), ((5142, 5198), 'pystella.get_field_args', 'get_field_args', (['self.rhs_dict'], {'prepend_with': 'prepend_with'}), '(self.rhs_dict, prepend_with=prepend_with)\n', (5156, 5198), False, 'from pystella import get_field_args\n'), ((5278, 5314), 'pystella.elementwise.append_new_args', 'append_new_args', (['args', 'inferred_args'], {}), '(args, inferred_args)\n', (5293, 5314), False, 'from pystella.elementwise import append_new_args\n'), ((7538, 7548), 'pymbolic.var', 'var', (['"""rhs"""'], {}), "('rhs')\n", (7541, 7548), False, 'from pymbolic import var\n'), ((7563, 7572), 'pymbolic.var', 'var', (['"""dt"""'], {}), "('dt')\n", (7566, 7572), False, 'from pymbolic import var\n'), ((7586, 7594), 'pymbolic.var', 'var', (['"""q"""'], {}), "('q')\n", (7589, 7594), False, 'from pymbolic import var\n'), ((7699, 7737), 'pystella.field.index_fields', 'index_fields', (['value'], {'prepend_with': '(q,)'}), '(value, prepend_with=(q,))\n', (7711, 7737), False, 'from pystella.field import Field, index_fields\n'), ((8754, 8808), 'loopy.Options', 'lp.Options', ([], {'enforce_variable_access_ordered': '"""no_check"""'}), "(enforce_variable_access_ordered='no_check')\n", (8764, 8808), True, 'import loopy as lp\n'), ((9412, 9446), 'pystella.field.index_fields', 'index_fields', (['f'], {'prepend_with': '(q,)'}), '(f, prepend_with=(q,))\n', (9424, 9446), False, 'from pystella.field import Field, index_fields\n'), ((10237, 10271), 'pystella.field.index_fields', 'index_fields', (['f'], {'prepend_with': '(q,)'}), '(f, prepend_with=(q,))\n', (10249, 10271), False, 'from pystella.field import Field, index_fields\n'), ((10904, 10938), 'pystella.field.index_fields', 'index_fields', (['f'], {'prepend_with': '(q,)'}), '(f, prepend_with=(q,))\n', (10916, 10938), False, 'from pystella.field import Field, index_fields\n'), ((11625, 11659), 'pystella.field.index_fields', 'index_fields', (['f'], {'prepend_with': '(q,)'}), '(f, prepend_with=(q,))\n', (11637, 11659), False, 'from pystella.field import Field, index_fields\n'), ((12358, 12392), 'pystella.field.index_fields', 'index_fields', (['f'], {'prepend_with': '(q,)'}), '(f, prepend_with=(q,))\n', (12370, 12392), False, 'from pystella.field import Field, index_fields\n'), ((13152, 13186), 'pystella.field.index_fields', 'index_fields', (['f'], {'prepend_with': '(q,)'}), '(f, prepend_with=(q,))\n', (13164, 13186), False, 'from pystella.field import Field, index_fields\n'), ((13559, 13593), 'pystella.field.index_fields', 'index_fields', (['f'], {'prepend_with': '(q,)'}), '(f, prepend_with=(q,))\n', (13571, 13593), False, 'from pystella.field import Field, index_fields\n'), ((14144, 14178), 'pystella.field.index_fields', 'index_fields', (['f'], {'prepend_with': '(q,)'}), '(f, prepend_with=(q,))\n', (14156, 14178), False, 'from pystella.field import Field, index_fields\n'), ((18279, 18321), 'loopy.add_inames_for_unused_hw_axes', 'lp.add_inames_for_unused_hw_axes', (['step.knl'], {}), '(step.knl)\n', (18311, 18321), True, 'import loopy as lp\n'), ((5053, 5070), 'loopy.ValueArg', 'lp.ValueArg', (['"""dt"""'], {}), "('dt')\n", (5064, 5070), True, 'import loopy as lp\n'), ((17793, 17810), 'pyopencl.array.empty_like', 'cla.empty_like', (['f'], {}), '(f)\n', (17807, 17810), True, 'import pyopencl.array as cla\n'), ((17896, 17912), 'numpy.empty_like', 'np.empty_like', (['f'], {}), '(f)\n', (17909, 17912), True, 'import numpy as np\n'), ((16973, 16982), 'pymbolic.var', 'var', (['"""dt"""'], {}), "('dt')\n", (16976, 16982), False, 'from pymbolic import var\n')] |
# python
#
# Copyright 2020 The usbmon-tools Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
"""Tests for usbmon.capture.usbpcap."""
import os
from absl.testing import absltest
import usbmon.addresses
import usbmon.pcapng
class TestUsbpcap(absltest.TestCase):
def test_parse(self):
session = usbmon.pcapng.parse_file(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"../../../testdata/usbpcap1.pcap",
)
)
self.assertLen(list(session), 498)
self.assertLen(session.device_descriptors, 1)
(device_descriptor,) = session.device_descriptors.values()
self.assertEqual(
device_descriptor.address, usbmon.addresses.DeviceAddress(1, 1)
)
self.assertEqual(device_descriptor.vendor_id, 0x0627)
| [
"os.path.abspath"
] | [((940, 965), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (955, 965), False, 'import os\n')] |
# --==[ Screens ]==--
from libqtile import bar
from libqtile.config import Screen
from ..utils.settings import wallpaper
from ..extras.widgets import widgets
screens = [
Screen(
wallpaper = wallpaper,
wallpaper_mode = 'fill',
top = bar.Bar(
# Source in widgets.py
widgets,
# Bar Size
20,
# Background Color
background = "#00000000",
# Margin
margin = [9, 15, 0, 15],
# Transparency
opacity = 1,
),
),
Screen(
wallpaper = wallpaper,
wallpaper_mode = 'fill',
# top = bar.Bar()
),
]
| [
"libqtile.bar.Bar",
"libqtile.config.Screen"
] | [((573, 623), 'libqtile.config.Screen', 'Screen', ([], {'wallpaper': 'wallpaper', 'wallpaper_mode': '"""fill"""'}), "(wallpaper=wallpaper, wallpaper_mode='fill')\n", (579, 623), False, 'from libqtile.config import Screen\n'), ((263, 341), 'libqtile.bar.Bar', 'bar.Bar', (['widgets', '(20)'], {'background': '"""#00000000"""', 'margin': '[9, 15, 0, 15]', 'opacity': '(1)'}), "(widgets, 20, background='#00000000', margin=[9, 15, 0, 15], opacity=1)\n", (270, 341), False, 'from libqtile import bar\n')] |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for beam_metadata_io."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# GOOGLE-INITIALIZATION
import apache_beam as beam
from tensorflow_transform.beam.tft_beam_io import beam_metadata_io
from tensorflow_transform.beam.tft_beam_io import test_metadata
from tensorflow_transform.tf_metadata import metadata_io
import unittest
from tensorflow.python.framework import test_util
class BeamMetadataIoTest(test_util.TensorFlowTestCase):
def testWriteMetadataNonDeferred(self):
# Write metadata to disk using WriteMetadata PTransform.
with beam.Pipeline() as pipeline:
path = self.get_temp_dir()
_ = (test_metadata.COMPLETE_METADATA
| beam_metadata_io.WriteMetadata(path, pipeline))
# Load from disk and check that it is as expected.
metadata = metadata_io.read_metadata(path)
self.assertEqual(metadata, test_metadata.COMPLETE_METADATA)
def testWriteMetadataDeferred(self):
# Write metadata to disk using WriteMetadata PTransform, combining
# incomplete metadata with (deferred) complete metadata.
with beam.Pipeline() as pipeline:
path = self.get_temp_dir()
deferred_metadata = pipeline | 'CreateDeferredMetadata' >> beam.Create(
[test_metadata.COMPLETE_METADATA])
metadata = beam_metadata_io.BeamDatasetMetadata(
test_metadata.INCOMPLETE_METADATA, deferred_metadata)
_ = metadata | beam_metadata_io.WriteMetadata(path, pipeline)
# Load from disk and check that it is as expected.
metadata = metadata_io.read_metadata(path)
self.assertEqual(metadata, test_metadata.COMPLETE_METADATA)
if __name__ == '__main__':
unittest.main()
| [
"tensorflow_transform.beam.tft_beam_io.beam_metadata_io.WriteMetadata",
"tensorflow_transform.tf_metadata.metadata_io.read_metadata",
"tensorflow_transform.beam.tft_beam_io.beam_metadata_io.BeamDatasetMetadata",
"apache_beam.Create",
"unittest.main",
"apache_beam.Pipeline"
] | [((2304, 2319), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2317, 2319), False, 'import unittest\n'), ((1457, 1488), 'tensorflow_transform.tf_metadata.metadata_io.read_metadata', 'metadata_io.read_metadata', (['path'], {}), '(path)\n', (1482, 1488), False, 'from tensorflow_transform.tf_metadata import metadata_io\n'), ((2177, 2208), 'tensorflow_transform.tf_metadata.metadata_io.read_metadata', 'metadata_io.read_metadata', (['path'], {}), '(path)\n', (2202, 2208), False, 'from tensorflow_transform.tf_metadata import metadata_io\n'), ((1220, 1235), 'apache_beam.Pipeline', 'beam.Pipeline', ([], {}), '()\n', (1233, 1235), True, 'import apache_beam as beam\n'), ((1734, 1749), 'apache_beam.Pipeline', 'beam.Pipeline', ([], {}), '()\n', (1747, 1749), True, 'import apache_beam as beam\n'), ((1936, 2030), 'tensorflow_transform.beam.tft_beam_io.beam_metadata_io.BeamDatasetMetadata', 'beam_metadata_io.BeamDatasetMetadata', (['test_metadata.INCOMPLETE_METADATA', 'deferred_metadata'], {}), '(test_metadata.INCOMPLETE_METADATA,\n deferred_metadata)\n', (1972, 2030), False, 'from tensorflow_transform.beam.tft_beam_io import beam_metadata_io\n'), ((1338, 1384), 'tensorflow_transform.beam.tft_beam_io.beam_metadata_io.WriteMetadata', 'beam_metadata_io.WriteMetadata', (['path', 'pipeline'], {}), '(path, pipeline)\n', (1368, 1384), False, 'from tensorflow_transform.beam.tft_beam_io import beam_metadata_io\n'), ((2059, 2105), 'tensorflow_transform.beam.tft_beam_io.beam_metadata_io.WriteMetadata', 'beam_metadata_io.WriteMetadata', (['path', 'pipeline'], {}), '(path, pipeline)\n', (2089, 2105), False, 'from tensorflow_transform.beam.tft_beam_io import beam_metadata_io\n'), ((1861, 1907), 'apache_beam.Create', 'beam.Create', (['[test_metadata.COMPLETE_METADATA]'], {}), '([test_metadata.COMPLETE_METADATA])\n', (1872, 1907), True, 'import apache_beam as beam\n')] |
from django.test import TestCase
from opentech.apply.funds.tests.factories import ApplicationSubmissionFactory
from .factories import ReviewFactory, ReviewOpinionFactory
from ..options import MAYBE, NO, YES
class TestReviewQueryset(TestCase):
def test_reviews_yes(self):
submission = ApplicationSubmissionFactory()
ReviewFactory(recommendation_yes=True, submission=submission)
ReviewFactory(recommendation_yes=True, submission=submission)
recommendation = submission.reviews.recommendation()
self.assertEqual(recommendation, YES)
def test_reviews_no(self):
submission = ApplicationSubmissionFactory()
ReviewFactory(submission=submission)
ReviewFactory(submission=submission)
recommendation = submission.reviews.recommendation()
self.assertEqual(recommendation, NO)
def test_reviews_maybe(self):
submission = ApplicationSubmissionFactory()
ReviewFactory(recommendation_maybe=True, submission=submission)
ReviewFactory(recommendation_maybe=True, submission=submission)
recommendation = submission.reviews.recommendation()
self.assertEqual(recommendation, MAYBE)
def test_reviews_mixed(self):
submission = ApplicationSubmissionFactory()
ReviewFactory(recommendation_yes=True, submission=submission)
ReviewFactory(submission=submission)
recommendation = submission.reviews.recommendation()
self.assertEqual(recommendation, MAYBE)
def test_review_yes_opinion_agree(self):
submission = ApplicationSubmissionFactory()
review = ReviewFactory(recommendation_yes=True, submission=submission)
ReviewOpinionFactory(review=review, opinion_agree=True)
recommendation = submission.reviews.recommendation()
self.assertEqual(recommendation, YES)
def test_review_yes_opinion_disagree(self):
submission = ApplicationSubmissionFactory()
review = ReviewFactory(recommendation_yes=True, submission=submission)
ReviewOpinionFactory(review=review, opinion_disagree=True)
recommendation = submission.reviews.recommendation()
self.assertEqual(recommendation, MAYBE)
def test_review_no_opinion_agree(self):
submission = ApplicationSubmissionFactory()
review = ReviewFactory(submission=submission)
ReviewOpinionFactory(review=review, opinion_agree=True)
recommendation = submission.reviews.recommendation()
self.assertEqual(recommendation, NO)
def test_review_no_opinion_disagree(self):
submission = ApplicationSubmissionFactory()
review = ReviewFactory(submission=submission)
ReviewOpinionFactory(review=review, opinion_disagree=True)
recommendation = submission.reviews.recommendation()
self.assertEqual(recommendation, MAYBE)
def test_review_not_all_opinion(self):
submission = ApplicationSubmissionFactory()
ReviewFactory(recommendation_yes=True, submission=submission)
review = ReviewFactory(recommendation_yes=True, submission=submission)
ReviewOpinionFactory(review=review, opinion_agree=True)
recommendation = submission.reviews.recommendation()
self.assertEqual(recommendation, YES)
def test_review_yes_mixed_opinion(self):
submission = ApplicationSubmissionFactory()
review = ReviewFactory(submission=submission)
ReviewOpinionFactory(review=review, opinion_agree=True)
ReviewOpinionFactory(review=review, opinion_disagree=True)
recommendation = submission.reviews.recommendation()
self.assertEqual(recommendation, MAYBE)
| [
"opentech.apply.funds.tests.factories.ApplicationSubmissionFactory"
] | [((299, 329), 'opentech.apply.funds.tests.factories.ApplicationSubmissionFactory', 'ApplicationSubmissionFactory', ([], {}), '()\n', (327, 329), False, 'from opentech.apply.funds.tests.factories import ApplicationSubmissionFactory\n'), ((630, 660), 'opentech.apply.funds.tests.factories.ApplicationSubmissionFactory', 'ApplicationSubmissionFactory', ([], {}), '()\n', (658, 660), False, 'from opentech.apply.funds.tests.factories import ApplicationSubmissionFactory\n'), ((913, 943), 'opentech.apply.funds.tests.factories.ApplicationSubmissionFactory', 'ApplicationSubmissionFactory', ([], {}), '()\n', (941, 943), False, 'from opentech.apply.funds.tests.factories import ApplicationSubmissionFactory\n'), ((1253, 1283), 'opentech.apply.funds.tests.factories.ApplicationSubmissionFactory', 'ApplicationSubmissionFactory', ([], {}), '()\n', (1281, 1283), False, 'from opentech.apply.funds.tests.factories import ApplicationSubmissionFactory\n'), ((1575, 1605), 'opentech.apply.funds.tests.factories.ApplicationSubmissionFactory', 'ApplicationSubmissionFactory', ([], {}), '()\n', (1603, 1605), False, 'from opentech.apply.funds.tests.factories import ApplicationSubmissionFactory\n'), ((1926, 1956), 'opentech.apply.funds.tests.factories.ApplicationSubmissionFactory', 'ApplicationSubmissionFactory', ([], {}), '()\n', (1954, 1956), False, 'from opentech.apply.funds.tests.factories import ApplicationSubmissionFactory\n'), ((2278, 2308), 'opentech.apply.funds.tests.factories.ApplicationSubmissionFactory', 'ApplicationSubmissionFactory', ([], {}), '()\n', (2306, 2308), False, 'from opentech.apply.funds.tests.factories import ApplicationSubmissionFactory\n'), ((2602, 2632), 'opentech.apply.funds.tests.factories.ApplicationSubmissionFactory', 'ApplicationSubmissionFactory', ([], {}), '()\n', (2630, 2632), False, 'from opentech.apply.funds.tests.factories import ApplicationSubmissionFactory\n'), ((2928, 2958), 'opentech.apply.funds.tests.factories.ApplicationSubmissionFactory', 'ApplicationSubmissionFactory', ([], {}), '()\n', (2956, 2958), False, 'from opentech.apply.funds.tests.factories import ApplicationSubmissionFactory\n'), ((3346, 3376), 'opentech.apply.funds.tests.factories.ApplicationSubmissionFactory', 'ApplicationSubmissionFactory', ([], {}), '()\n', (3374, 3376), False, 'from opentech.apply.funds.tests.factories import ApplicationSubmissionFactory\n')] |
import asyncio
import json
import os
from datetime import datetime, timedelta
import aiohttp
import tweepy
from dateutil.parser import parse
from fpl import FPL, utils
from pymongo import MongoClient
from constants import lineup_markers, twitter_usernames
dirname = os.path.dirname(os.path.realpath(__file__))
client = MongoClient()
database = client.team_news
def short_name_converter(team_id):
"""Converts a team's ID to their short name."""
short_name_map = {
1: "ARS",
2: "AVL",
3: "BHA",
4: "BUR",
5: "CHE",
6: "CRY",
7: "EVE",
8: "FUL",
9: "LEI",
10: "LEE",
11: "LIV",
12: "MCI",
13: "MUN",
14: "NEW",
15: "SHU",
16: "SOU",
17: "TOT",
18: "WBA",
19: "WHU",
20: "WOL",
None: None
}
return short_name_map[team_id]
async def get_current_fixtures():
async with aiohttp.ClientSession() as session:
fpl = FPL(session)
current_gameweek = await utils.get_current_gameweek(session)
fixtures = await fpl.get_fixtures_by_gameweek(current_gameweek)
min_range = timedelta(minutes=2)
return [fixture for fixture in fixtures
if fixture.team_news_time.replace(tzinfo=None) - min_range <
datetime.now() <
fixture.team_news_time.replace(tzinfo=None) + min_range]
def is_new_lineup(fixture_id, team_id):
if database.lineup.count_documents({"fixture_id": fixture_id,
"team_id": team_id}) < 1:
return True
return False
def add_lineup_to_database(fixture_id, team_id, url):
self.database.lineup.update_one(
{"fixture_id": fixture_id},
{"$set": {"fixture_id": fixture_id,
"team_id": team_id,
"url": url}},
upsert=True
)
def lineup_handler(team_id, team_short_name, opponent_id):
team_name = twitter_usernames[team_short_name]
for status in api.user_timeline(screen_name=team_name,
tweet_mode="extended",
count=3):
status_split = status.full_text.lower().replace("-", " ").split()
for marker in lineup_markers:
if marker in list(zip(split_status, split_status[1:])):
if "media" not in status.entities:
continue
media = status.entities["media"][0]
media_url = media["media_url_https"]
if is_new_lineup(fixture.id, team_id):
add_lineup_to_database(fixture.id, team_id, media_url)
return
async def main(config):
auth = tweepy.OAuthHandler(config["CONSUMER_API_KEY"],
config["CONSUMER_API_SECRET_KEY"])
auth.set_access_token(config["ACCESS_TOKEN"],
config["ACCESS_TOKEN_SECRET"])
api = tweepy.API(auth)
current_fixtures = await get_current_fixtures()
images_urls = []
for fixture in current_fixtures:
team_h_short = short_name_converter(fixture.team_h)
team_a_short = short_name_converter(fixture.team_a)
lineup_handler(fixture.team_h, team_h_short, fixture.team_a)
lineup_handler(fixture.team_a, team_a_short, fixture.team_h)
if __name__ == "__main__":
with open(f"{dirname}/../twitter_config.json") as file:
config = json.loads(file.read())
try:
asyncio.run(main(config))
except AttributeError:
loop = asyncio.get_event_loop()
loop.run_until_complete(main(config))
loop.close()
| [
"aiohttp.ClientSession",
"fpl.utils.get_current_gameweek",
"fpl.FPL",
"os.path.realpath",
"datetime.datetime.now",
"tweepy.API",
"pymongo.MongoClient",
"datetime.timedelta",
"asyncio.get_event_loop",
"tweepy.OAuthHandler"
] | [((321, 334), 'pymongo.MongoClient', 'MongoClient', ([], {}), '()\n', (332, 334), False, 'from pymongo import MongoClient\n'), ((284, 310), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (300, 310), False, 'import os\n'), ((1178, 1198), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(2)'}), '(minutes=2)\n', (1187, 1198), False, 'from datetime import datetime, timedelta\n'), ((2727, 2814), 'tweepy.OAuthHandler', 'tweepy.OAuthHandler', (["config['CONSUMER_API_KEY']", "config['CONSUMER_API_SECRET_KEY']"], {}), "(config['CONSUMER_API_KEY'], config[\n 'CONSUMER_API_SECRET_KEY'])\n", (2746, 2814), False, 'import tweepy\n'), ((2958, 2974), 'tweepy.API', 'tweepy.API', (['auth'], {}), '(auth)\n', (2968, 2974), False, 'import tweepy\n'), ((957, 980), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (978, 980), False, 'import aiohttp\n'), ((1007, 1019), 'fpl.FPL', 'FPL', (['session'], {}), '(session)\n', (1010, 1019), False, 'from fpl import FPL, utils\n'), ((1053, 1088), 'fpl.utils.get_current_gameweek', 'utils.get_current_gameweek', (['session'], {}), '(session)\n', (1079, 1088), False, 'from fpl import FPL, utils\n'), ((1328, 1342), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1340, 1342), False, 'from datetime import datetime, timedelta\n'), ((3577, 3601), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (3599, 3601), False, 'import asyncio\n')] |
# -*- coding: utf-8 -*-
#
# <NAME> 2021 gpSTS
###########################################
###Configuration File######################
###for gpSTS steering of experiments######
###########################################
import os
import numpy as np
from gpsts.NanonisInterface.nanonis_interface import Nanonis
from gpsts.NanonisInterface.data_class import ScanData, SpecCounter, PointList, ImageInfo
from gpsts.NanonisInterface.kernel import kernel_l2
import json
###############################
###Initialize##################
###############################
nanonis_config = {
"Nanonis_Settings": {
"File": "gpSTSinit",
"ExperimentName": "Test Out",
"Version": "0.0.1",
"ImageStart": "test_img001.sxm",
"FolderLocation": "C:\\gpSTS\\src\\",
"DataLocation": "C:\\gpSTS\\src\\data\\",
"Channel": "Z",
"ImDirection": "forward",
"SpectralRange": [-1,1],
"NumSpectralPoints": 1200,
"Center_Point": [174,34],
"Search_Window": 40,
"Feature_Window": 20,
"ScanCurrent": 30e-12,
"SpecCurrent": 200e-12,
"STSbias": "Bias calc (V)",
"STSsignal": "Current (A)"
},
"Neural_Network": {
"TrainingPath": "C:\\gpSTS\\src\\train\\",
"EpochNumber": 2,
"ClassNumber": 4,
"LearningRate": 0.001,
"BatchSizeTrain": 5,
"BatchSizeVal": 1,
"BatchSizeTest": 1
}
}
with open('data/'+str(nanonis_config['Nanonis_Settings']['File'])+'.json','w') as fil:
json.dump(nanonis_config, fil, sort_keys = True, indent = 4, ensure_ascii = False)
Vals = ScanData()
Vals.update_file_info(nanonis_config['Nanonis_Settings']['FolderLocation'],
nanonis_config['Nanonis_Settings']['ImageStart'], nanonis_config['Nanonis_Settings']['Channel'],
nanonis_config['Nanonis_Settings']['ImDirection'])
Vals.update_search_conditions(nanonis_config['Nanonis_Settings']['Center_Point'],
nanonis_config['Nanonis_Settings']['Search_Window'],nanonis_config['Nanonis_Settings']['Feature_Window'],
nanonis_config['Nanonis_Settings']['SpectralRange'])
fil_path, imfile, channel, imdirection = Vals.get_file_info()
try:
imoff, impix, imsize = Nanonis.readheader(fil_path+'data'+'\\',imfile)
except Exception as e:
print('Error. Please save '+str(imfile)+' within '+str(fil_path)+'data\\')
raise e
Vals.update_scan_conditions(imoff, impix, imsize)
imdirectory = fil_path+'data'+'\\'+'impath'
if not os.path.exists(imdirectory):
os.makedirs(imdirectory)
datadirectory = fil_path+'data'
if not os.path.exists(datadirectory):
os.makedirs(datadirectory)
def return_scandata():
return Vals
spec_counter = SpecCounter()
spec_counter.update_maxcnt(10)
def return_cnt():
return spec_counter
recorded_points = PointList()
def return_pntlist():
return recorded_points
imout = Nanonis.readimage(fil_path+'data'+'\\'+imfile,channel,imdirection)
current_image = ImageInfo(imout)
def return_image():
return current_image
Nanonis.sxm_plot(imout,imdirectory,'current',recorded_points.get_list())
center_point, search_window, feature_window, spec_range = Vals.get_search_conditions()
imx1, imx2 = int((center_point[0]-(feature_window/2))), int((center_point[0]+(feature_window/2)))
imy1, imy2 = int((center_point[1]-(feature_window/2))), int((center_point[1]+(feature_window/2)))
imtrack = imout[imx1:imx2,imy1:imy2]
Nanonis.sxm_plot(imtrack,imdirectory,'feature',recorded_points.get_list())
###############################
###General#####################
###############################
from controls import perform_NanonisExp_BiasSpec, perform_experiment_overlap2
from gpsts.NanonisInterface.graph import plot_2d_function
parameters = {
"x1": {
"element interval": [1,int(impix[0][0])],
},
"x2": {
"element interval": [1,int(impix[0][0])],
},
}
###acquisition functions###
def my_ac_func(x,obj):
mean = obj.posterior_mean(x)["f(x)"]
cov = obj.posterior_covariance(x)["v(x)"]
sig = obj.shannon_information_gain(x)["sig"]
ucb = mean + 3.0 * np.sqrt(cov)
return cov
gaussian_processes = {
"model_1": {
"kernel function": kernel_l2,
"hyperparameters": [1.0,1.0,1.0],
"hyperparameter bounds": [[1.0,100.0],[0.10,100.0],[0.10,100.0]],
"input hyper parameters": [1.0,1.0,1.0],
"output hyper parameters": [1.0],
"input hyper parameter bounds": [[0.01,1000000.0],[0.01,10.0],[0.01,10.0]],
"output hyper parameter bounds":[[0.9,1.1]],
"number of returns": 1,
"dimensionality of return": 1,
"variance optimization tolerance": 0.001,
"adjust optimization threshold": [True,0.1],
"steering mode": "covariance",
"run function in every iteration": None,
"data acquisition function": perform_NanonisExp_BiasSpec,
"acquisition function": my_ac_func,
"objective function": None,
"mean function": None,
"cost function": None,
"cost update function": None,
"cost function parameters": {"offset": 10,"slope":2.0},
"cost function optimization bounds": [[0.0,10.0],[0.0,10.0]],
"cost optimization chance" : 0.1,
"plot function": plot_2d_function,
"acquisition function optimization tolerance": 0.001
},
}
compute_device = "cpu"
sparse = False
compute_inverse = False
initial_likelihood_optimization_method = "global"
training_dask_client = False
prediction_dask_client = False
likelihood_optimization_tolerance = 1e-12
likelihood_optimization_max_iter = 200
automatic_signal_variance_range_determination = True
acquisition_function_optimization_method = "global"
chance_for_local_acquisition_function_optimization = 0.5
acquisition_function_optimization_population_size = 20
acquisition_function_optimization_max_iter = 20
global_likelihood_optimization_at = [200]
hgdl_likelihood_optimization_at = []
local_likelihood_optimization_at = []
breaking_error = 1e-18
########################################
###Variance Optimization################
########################################
objective_function_optimization_population_size = 20
likelihood_optimization_population_size = 20
number_of_suggested_measurements = 1
########################################
###Computation Parameters###############
########################################
global_kernel_optimization_frequency = 0.2
local_kernel_optimization_frequency = 0.5
gpu_acceleration = False
rank_n_update = [False,0.2]
gp_system_solver = "inv" # "inv", "cg" or "minres"
switch_system_solver_to_after = [True, "cg", 5000]
###############################
###DATA ACQUISITION############
###############################
initial_data_set_size = 1
max_number_of_measurements = 10
#####################################################################
###############END###################################################
#####################################################################
| [
"gpsts.NanonisInterface.data_class.PointList",
"os.path.exists",
"gpsts.NanonisInterface.data_class.SpecCounter",
"numpy.sqrt",
"os.makedirs",
"json.dump",
"gpsts.NanonisInterface.nanonis_interface.Nanonis.readheader",
"gpsts.NanonisInterface.nanonis_interface.Nanonis.readimage",
"gpsts.NanonisInter... | [((1551, 1561), 'gpsts.NanonisInterface.data_class.ScanData', 'ScanData', ([], {}), '()\n', (1559, 1561), False, 'from gpsts.NanonisInterface.data_class import ScanData, SpecCounter, PointList, ImageInfo\n'), ((2616, 2629), 'gpsts.NanonisInterface.data_class.SpecCounter', 'SpecCounter', ([], {}), '()\n', (2627, 2629), False, 'from gpsts.NanonisInterface.data_class import ScanData, SpecCounter, PointList, ImageInfo\n'), ((2721, 2732), 'gpsts.NanonisInterface.data_class.PointList', 'PointList', ([], {}), '()\n', (2730, 2732), False, 'from gpsts.NanonisInterface.data_class import ScanData, SpecCounter, PointList, ImageInfo\n'), ((2790, 2864), 'gpsts.NanonisInterface.nanonis_interface.Nanonis.readimage', 'Nanonis.readimage', (["(fil_path + 'data' + '\\\\' + imfile)", 'channel', 'imdirection'], {}), "(fil_path + 'data' + '\\\\' + imfile, channel, imdirection)\n", (2807, 2864), False, 'from gpsts.NanonisInterface.nanonis_interface import Nanonis\n'), ((2873, 2889), 'gpsts.NanonisInterface.data_class.ImageInfo', 'ImageInfo', (['imout'], {}), '(imout)\n', (2882, 2889), False, 'from gpsts.NanonisInterface.data_class import ScanData, SpecCounter, PointList, ImageInfo\n'), ((1461, 1537), 'json.dump', 'json.dump', (['nanonis_config', 'fil'], {'sort_keys': '(True)', 'indent': '(4)', 'ensure_ascii': '(False)'}), '(nanonis_config, fil, sort_keys=True, indent=4, ensure_ascii=False)\n', (1470, 1537), False, 'import json\n'), ((2139, 2191), 'gpsts.NanonisInterface.nanonis_interface.Nanonis.readheader', 'Nanonis.readheader', (["(fil_path + 'data' + '\\\\')", 'imfile'], {}), "(fil_path + 'data' + '\\\\', imfile)\n", (2157, 2191), False, 'from gpsts.NanonisInterface.nanonis_interface import Nanonis\n'), ((2402, 2429), 'os.path.exists', 'os.path.exists', (['imdirectory'], {}), '(imdirectory)\n', (2416, 2429), False, 'import os\n'), ((2435, 2459), 'os.makedirs', 'os.makedirs', (['imdirectory'], {}), '(imdirectory)\n', (2446, 2459), False, 'import os\n'), ((2499, 2528), 'os.path.exists', 'os.path.exists', (['datadirectory'], {}), '(datadirectory)\n', (2513, 2528), False, 'import os\n'), ((2534, 2560), 'os.makedirs', 'os.makedirs', (['datadirectory'], {}), '(datadirectory)\n', (2545, 2560), False, 'import os\n'), ((3996, 4008), 'numpy.sqrt', 'np.sqrt', (['cov'], {}), '(cov)\n', (4003, 4008), True, 'import numpy as np\n')] |
"""squeezenet in pytorch
[1] <NAME>, <NAME>, <NAME>, <NAME>
squeezenet: Learning both Weights and Connections for Efficient Neural Networks
https://arxiv.org/abs/1506.02626
"""
import torch
import torch.nn as nn
from .channel_selection import channel_selection
class Fire(nn.Module):
def __init__(self, in_channel, out_channel, squzee_channel):
super().__init__()
self.squeeze = nn.Sequential(
nn.Conv2d(in_channel, squzee_channel, 1),
nn.BatchNorm2d(squzee_channel),
channel_selection(squzee_channel),
nn.ReLU(inplace=True)
)
self.expand_1x1 = nn.Sequential(
nn.Conv2d(squzee_channel, int(out_channel / 2), 1),
nn.BatchNorm2d(int(out_channel / 2)),
channel_selection(int(out_channel / 2)),
nn.ReLU(inplace=True)
)
self.expand_3x3 = nn.Sequential(
nn.Conv2d(squzee_channel, int(out_channel / 2), 3, padding=1),
nn.BatchNorm2d(int(out_channel / 2)),
channel_selection(int(out_channel / 2)),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.squeeze(x)
x = torch.cat([
self.expand_1x1(x),
self.expand_3x3(x)
], 1)
return x
class SqueezeNet(nn.Module):
"""mobile net with simple bypass"""
def __init__(self, dataset='cifar10', depth=0):
super().__init__()
if dataset == 'cifar10':
class_num = 10
channel_size = 3
elif dataset == 'cifar100':
class_num = 100
channel_size = 3
elif dataset == 'fer2013':
class_num = 7
channel_size = 1
self.stem = nn.Sequential(
nn.Conv2d(channel_size, 96, 3, padding=1),
nn.BatchNorm2d(96),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, 2)
)
self.fire2 = Fire(96, 128, 16)
self.fire3 = Fire(128, 128, 16)
self.fire4 = Fire(128, 256, 32)
self.fire5 = Fire(256, 256, 32)
self.fire6 = Fire(256, 384, 48)
self.fire7 = Fire(384, 384, 48)
self.fire8 = Fire(384, 512, 64)
self.fire9 = Fire(512, 512, 64)
self.conv10 = nn.Conv2d(512, class_num, 1)
self.avg = nn.AdaptiveAvgPool2d(1)
self.maxpool = nn.MaxPool2d(2, 2)
def forward(self, x):
x = self.stem(x)
f2 = self.fire2(x)
f3 = self.fire3(f2) + f2
f4 = self.fire4(f3)
f4 = self.maxpool(f4)
f5 = self.fire5(f4) + f4
f6 = self.fire6(f5)
f7 = self.fire7(f6) + f6
f8 = self.fire8(f7)
f8 = self.maxpool(f8)
f9 = self.fire9(f8)
c10 = self.conv10(f9)
x = self.avg(c10)
x = x.view(x.size(0), -1)
return x
def squeezenet(dataset='cifar10', depth=0):
return SqueezeNet(dataset=dataset)
| [
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.AdaptiveAvgPool2d"
] | [((2292, 2320), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', 'class_num', '(1)'], {}), '(512, class_num, 1)\n', (2301, 2320), True, 'import torch.nn as nn\n'), ((2340, 2363), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1)'], {}), '(1)\n', (2360, 2363), True, 'import torch.nn as nn\n'), ((2387, 2405), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (2399, 2405), True, 'import torch.nn as nn\n'), ((442, 482), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channel', 'squzee_channel', '(1)'], {}), '(in_channel, squzee_channel, 1)\n', (451, 482), True, 'import torch.nn as nn\n'), ((496, 526), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['squzee_channel'], {}), '(squzee_channel)\n', (510, 526), True, 'import torch.nn as nn\n'), ((587, 608), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (594, 608), True, 'import torch.nn as nn\n'), ((840, 861), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (847, 861), True, 'import torch.nn as nn\n'), ((1104, 1125), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1111, 1125), True, 'import torch.nn as nn\n'), ((1798, 1839), 'torch.nn.Conv2d', 'nn.Conv2d', (['channel_size', '(96)', '(3)'], {'padding': '(1)'}), '(channel_size, 96, 3, padding=1)\n', (1807, 1839), True, 'import torch.nn as nn\n'), ((1853, 1871), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(96)'], {}), '(96)\n', (1867, 1871), True, 'import torch.nn as nn\n'), ((1885, 1906), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1892, 1906), True, 'import torch.nn as nn\n'), ((1920, 1938), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (1932, 1938), True, 'import torch.nn as nn\n')] |
#-*- coding : utf-8-*-
import os
import json
import yaml
import glob
import jieba
import pickle
import datetime
import argparse
from collections import Counter
from concurrent.futures import ProcessPoolExecutor, Executor, as_completed
def task_one_gram(news):
gram1 = {}
with open('./training/pinyin_table/一二级汉字表.txt', encoding='gbk') as f:
keys = list(f.read().replace('\n', ''))
for key in keys:
gram1[key] = 0
for line in open(news, 'r', encoding='gbk', errors='ignore'):
try:
content = json.loads(line)
except:
continue
info = content['html'] + content['title']
word_dict = Counter(info)
for key, value in word_dict.items():
if gram1.get(key) != None:
gram1[key] += value
return gram1
def one_gram():
news = glob.glob('./training/sina_news_gbk/*.txt')
gram1 = {}
with ProcessPoolExecutor() as executor:
for results in executor.map(task_one_gram, news):
for key, value in results.items():
gram1[key] = gram1.get(key, 0) + value
with open('./data/1gram.pkl', 'wb') as f:
pickle.dump(gram1, f)
# with open('./data/1gram.yaml', 'w', encoding='utf-8') as f:
# yaml.dump(gram1, f, default_flow_style=False, allow_unicode=True)
def task_two_gram(news):
gram1 = {}
gram2 = {}
cnt_s = 0
cnt_t = 0
with open('./data/1gram.pkl','rb') as f:
gram1 = pickle.load(f)
for line in open(news, 'r', encoding='gbk', errors='ignore'):
try:
content = json.loads(line)
except:
continue
info = content['html'] + content['title']
info_len = len(info)
for i in range(info_len - 1):
key = None
if i == 0 and gram1.get(info[0]):
key = ''.join(['s', info[0]])
cnt_s += 1
elif i > 0 and gram1.get(info[i]) and not gram1.get(info[i - 1]):
key = ''.join(['s', info[i]])
cnt_s += 1
elif gram1.get(info[i]) and not gram1.get(info[i + 1]):
key = ''.join([info[i], 't'])
cnt_t += 1
elif i == info_len - 2 and gram1.get(info[i + 1]):
key = ''.join([info[i + 1], 't'])
cnt_t += 1
if key:
gram2[key] = gram2.get(key, 0) + 1
if gram1.get(info[i]) and gram1.get(info[i + 1]):
key = ''.join([info[i], info[i + 1]])
gram2[key] = gram2.get(key, 0) + 1
return (gram2, cnt_s, cnt_t)
def two_gram():
news = glob.glob('./training/sina_news_gbk/*.txt')
gram1 = {}
gram2 = {}
cnt_s = 0
cnt_t = 0
with ProcessPoolExecutor() as executor:
for results in executor.map(task_two_gram, news):
for key, value in results[0].items():
gram2[key] = gram2.get(key, 0) + value
cnt_s += results[1]
cnt_t += results[2]
with open('./data/1gram.pkl','rb') as f:
gram1 = pickle.load(f)
gram1['s'] = cnt_s
gram1['t'] = cnt_t
with open('./data/1gram.pkl', 'wb') as f:
pickle.dump(gram1, f)
# with open('./data/1gram.yaml', 'w', encoding='utf-8') as f:
# yaml.dump(gram1, f, default_flow_style=False, allow_unicode=True)
with open('./data/2gram.pkl', 'wb') as f:
pickle.dump(gram2, f)
# with open('./data/2gram.yaml', 'w', encoding='utf-8') as f:
# yaml.dump(gram2, f, default_flow_style=False, allow_unicode=True)
def task_three_gram(news):
gram1 = {}
gram3 = {}
cnt_s = 0
cnt_t = 0
with open('./data/1gram.pkl','rb') as f:
gram1 = pickle.load(f)
for line in open(news, 'r', encoding='gbk', errors='ignore'):
try:
content = json.loads(line)
except:
continue
info = content['html'] + content['title']
info_len = len(info)
for i in range(info_len - 2):
key = ''
if i == 0 and gram1.get(info[0]) and gram1.get(info[1]):
key = ''.join(['s', info[0], info[1]])
cnt_s += 1
elif i > 1 and gram1.get(info[i]) and gram1.get(info[i - 1]) and not gram1.get(info[i - 2]):
key = ''.join(['s', info[i - 1], info[i]])
cnt_s += 1
elif gram1.get(info[i]) and gram1.get(info[i + 1]) and not gram1.get(info[i + 2]):
key = ''.join([info[i], info[i + 1], 't'])
cnt_t += 1
elif i == info_len - 3 and gram1.get(info[i + 1]) and gram1.get(info[i + 2]):
key = ''.join([info[i + 1], info[i + 2], 't'])
cnt_t += 1
if key:
gram3[key] = gram3.get(key, 0) + 1
if gram1.get(info[i]) and gram1.get(info[i + 1]) and gram1.get(info[i + 2]):
key = ''.join([info[i], info[i + 1], info[i + 2]])
gram3[key] = gram3.get(key, 0) + 1
return (gram3, cnt_s, cnt_t)
def three_gram(rank):
news = glob.glob('./training/sina_news_gbk/*.txt')
gram1 = {}
gram3 = {}
cnt_s = 0
cnt_t = 0
with ProcessPoolExecutor() as executor:
for results in executor.map(task_three_gram, news):
for key, value in results[0].items():
gram3[key] = gram3.get(key, 0) + value
cnt_s += results[1]
cnt_t += results[2]
with open('./data/1gram.pkl','rb') as f:
gram1 = pickle.load(f)
gram1['s'] = cnt_s
gram1['t'] = cnt_t
with open('./data/1gram.pkl', 'wb') as f:
pickle.dump(gram1, f)
# with open('./data/1gram.yaml', 'w', encoding='utf-8') as f:
# yaml.dump(gram1, f, default_flow_style=False, allow_unicode=True)
# Select top rank% features for valid features
gram3 = dict(sorted(gram3.items(), key = lambda x: x[1], reverse = True)[:int(len(gram3)*rank)])
if rank == 1.0:
with open('./data/3gram_whole.pkl', 'wb') as f:
pickle.dump(gram3, f)
# with open('./data/3gram_whole.yaml', 'w', encoding='utf-8') as f:
# yaml.dump(gram3, f, default_flow_style=False, allow_unicode=True)
else:
with open('./data/3gram.pkl', 'wb') as f:
pickle.dump(gram3, f)
# with open('./data/3gram.yaml', 'w', encoding='utf-8') as f:
# yaml.dump(gram3, f, default_flow_style=False, allow_unicode=True)
def task_one_word(news):
word1 = {}
gram1 = {}
with open('./data/1gram.pkl','rb') as f:
gram1 = pickle.load(f)
for line in open(news, 'r', encoding='gbk', errors='ignore'):
try:
content = json.loads(line)
except:
continue
info = content['html'] + content['title']
seg = jieba.lcut(info, cut_all=False, HMM=True)
word_dict = Counter(seg)
for key, value in word_dict.items():
valid = True
for i in key:
if not gram1.get(i) or i == 's' or i == 't':
valid = False
break
if valid:
word1[key] = word1.get(key, 0) + value
return word1
def one_word():
news = glob.glob('./training/sina_news_gbk/*.txt')
word1 = {}
with ProcessPoolExecutor() as executor:
for results in executor.map(task_one_word, news):
for key, value in results.items():
word1[key] = word1.get(key, 0) + value
with open('./data/1word.pkl', 'wb') as f:
pickle.dump(word1, f)
# with open('./data/1word.yaml', 'w', encoding='utf-8') as f:
# yaml.dump(word1, f, default_flow_style=False, allow_unicode=True)
def task_two_word(news):
word1 = {}
word2 = {}
cnt_s = 0
cnt_t = 0
with open('./data/1word.pkl','rb') as f:
word1 = pickle.load(f)
for line in open(news, 'r', encoding='gbk', errors='ignore'):
try:
content = json.loads(line)
except:
continue
info = content['html'] + content['title']
info_len = len(info)
seg = jieba.lcut(info, cut_all=False, HMM=True)
# seg = [x for x in _seg if word1.get(x)]
seg_len = len(seg)
for i in range(seg_len - 1):
key = None
if i == 0 and word1.get(seg[0]):
key = ''.join(['s_', seg[0]])
cnt_s += 1
elif i > 0 and word1.get(seg[i]) and not word1.get(seg[i - 1]):
key = ''.join(['s_', seg[i]])
cnt_s += 1
elif word1.get(seg[i]) and not word1.get(seg[i + 1]):
key = ''.join([seg[i], '_t'])
cnt_t += 1
elif i == seg_len - 2 and word1.get(seg[i + 1]):
key = ''.join([seg[i + 1], '_t'])
cnt_t += 1
if key:
word2[key] = word2.get(key, 0) + 1
if word1.get(seg[i]) and word1.get(seg[i + 1]):
key = ''.join([seg[i], '_', seg[i + 1]])
word2[key] = word2.get(key, 0) + 1
return (word2, cnt_s, cnt_t)
def two_word(rank):
news = glob.glob('./training/sina_news_gbk/*.txt')
word1 = {}
word2 = {}
cnt_s = 0
cnt_t = 0
with ProcessPoolExecutor() as executor:
for results in executor.map(task_two_word, news):
for key, value in results[0].items():
word2[key] = word2.get(key, 0) + value
cnt_s += results[1]
cnt_t += results[2]
with open('./data/1word.pkl','rb') as f:
word1 = pickle.load(f)
word1['s'] = cnt_s
word1['t'] = cnt_t
with open('./data/1word.pkl', 'wb') as f:
pickle.dump(word1, f)
# with open('./data/1word.yaml', 'w', encoding='utf-8') as f:
# yaml.dump(word1, f, default_flow_style=False, allow_unicode=True)
# Select top rank% features for valid features
word2 = dict(sorted(word2.items(), key = lambda x: x[1], reverse = True)[:int(len(word2)*rank)])
if rank == 1.0:
with open('./data/2word_whole.pkl', 'wb') as f:
pickle.dump(word2, f)
# with open('./data/2word_whole.yaml', 'w', encoding='utf-8') as f:
# yaml.dump(word2, f, default_flow_style=False, allow_unicode=True)
else:
with open('./data/2word.pkl', 'wb') as f:
pickle.dump(word2, f)
# with open('./data/2word.yaml', 'w', encoding='utf-8') as f:
# yaml.dump(word2, f, default_flow_style=False, allow_unicode=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--model_type", default='3c', type=str, choices=['2c', '3c', '2w'], help="Available models")
parser.add_argument("--rank_3c", default=0.2, type=float, help="Select rank percent of chars data for predicting")
parser.add_argument("--rank_2w", default=0.25, type=float, help="Select rank percent of words data for predicting")
args = parser.parse_args()
print('===> Preprocessing')
start = datetime.datetime.now()
if args.model_type == '2c':
print("Model type: binary char")
one_gram()
two_gram()
elif args.model_type == '3c':
print("Model type: ternary char")
one_gram()
three_gram(rank=args.rank_3c)
elif args.model_type == '2w':
print("Model type: binary word")
jieba.enable_parallel()
# one_word()
two_word(rank=args.rank_2w)
end = datetime.datetime.now()
print('Time cost: {}'.format(end -start))
print('===> Completed!')
print('-' * 20)
| [
"jieba.lcut",
"json.loads",
"jieba.enable_parallel",
"pickle.dump",
"argparse.ArgumentParser",
"pickle.load",
"collections.Counter",
"datetime.datetime.now",
"concurrent.futures.ProcessPoolExecutor",
"glob.glob"
] | [((859, 902), 'glob.glob', 'glob.glob', (['"""./training/sina_news_gbk/*.txt"""'], {}), "('./training/sina_news_gbk/*.txt')\n", (868, 902), False, 'import glob\n'), ((2687, 2730), 'glob.glob', 'glob.glob', (['"""./training/sina_news_gbk/*.txt"""'], {}), "('./training/sina_news_gbk/*.txt')\n", (2696, 2730), False, 'import glob\n'), ((5157, 5200), 'glob.glob', 'glob.glob', (['"""./training/sina_news_gbk/*.txt"""'], {}), "('./training/sina_news_gbk/*.txt')\n", (5166, 5200), False, 'import glob\n'), ((7321, 7364), 'glob.glob', 'glob.glob', (['"""./training/sina_news_gbk/*.txt"""'], {}), "('./training/sina_news_gbk/*.txt')\n", (7330, 7364), False, 'import glob\n'), ((9246, 9289), 'glob.glob', 'glob.glob', (['"""./training/sina_news_gbk/*.txt"""'], {}), "('./training/sina_news_gbk/*.txt')\n", (9255, 9289), False, 'import glob\n'), ((10676, 10701), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (10699, 10701), False, 'import argparse\n'), ((11134, 11157), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11155, 11157), False, 'import datetime\n'), ((11576, 11599), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11597, 11599), False, 'import datetime\n'), ((679, 692), 'collections.Counter', 'Counter', (['info'], {}), '(info)\n', (686, 692), False, 'from collections import Counter\n'), ((928, 949), 'concurrent.futures.ProcessPoolExecutor', 'ProcessPoolExecutor', ([], {}), '()\n', (947, 949), False, 'from concurrent.futures import ProcessPoolExecutor, Executor, as_completed\n'), ((1178, 1199), 'pickle.dump', 'pickle.dump', (['gram1', 'f'], {}), '(gram1, f)\n', (1189, 1199), False, 'import pickle\n'), ((1502, 1516), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1513, 1516), False, 'import pickle\n'), ((2798, 2819), 'concurrent.futures.ProcessPoolExecutor', 'ProcessPoolExecutor', ([], {}), '()\n', (2817, 2819), False, 'from concurrent.futures import ProcessPoolExecutor, Executor, as_completed\n'), ((3130, 3144), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3141, 3144), False, 'import pickle\n'), ((3245, 3266), 'pickle.dump', 'pickle.dump', (['gram1', 'f'], {}), '(gram1, f)\n', (3256, 3266), False, 'import pickle\n'), ((3472, 3493), 'pickle.dump', 'pickle.dump', (['gram2', 'f'], {}), '(gram2, f)\n', (3483, 3493), False, 'import pickle\n'), ((3785, 3799), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3796, 3799), False, 'import pickle\n'), ((5268, 5289), 'concurrent.futures.ProcessPoolExecutor', 'ProcessPoolExecutor', ([], {}), '()\n', (5287, 5289), False, 'from concurrent.futures import ProcessPoolExecutor, Executor, as_completed\n'), ((5602, 5616), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5613, 5616), False, 'import pickle\n'), ((5717, 5738), 'pickle.dump', 'pickle.dump', (['gram1', 'f'], {}), '(gram1, f)\n', (5728, 5738), False, 'import pickle\n'), ((6671, 6685), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6682, 6685), False, 'import pickle\n'), ((6906, 6947), 'jieba.lcut', 'jieba.lcut', (['info'], {'cut_all': '(False)', 'HMM': '(True)'}), '(info, cut_all=False, HMM=True)\n', (6916, 6947), False, 'import jieba\n'), ((6968, 6980), 'collections.Counter', 'Counter', (['seg'], {}), '(seg)\n', (6975, 6980), False, 'from collections import Counter\n'), ((7390, 7411), 'concurrent.futures.ProcessPoolExecutor', 'ProcessPoolExecutor', ([], {}), '()\n', (7409, 7411), False, 'from concurrent.futures import ProcessPoolExecutor, Executor, as_completed\n'), ((7640, 7661), 'pickle.dump', 'pickle.dump', (['word1', 'f'], {}), '(word1, f)\n', (7651, 7661), False, 'import pickle\n'), ((7952, 7966), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (7963, 7966), False, 'import pickle\n'), ((8216, 8257), 'jieba.lcut', 'jieba.lcut', (['info'], {'cut_all': '(False)', 'HMM': '(True)'}), '(info, cut_all=False, HMM=True)\n', (8226, 8257), False, 'import jieba\n'), ((9357, 9378), 'concurrent.futures.ProcessPoolExecutor', 'ProcessPoolExecutor', ([], {}), '()\n', (9376, 9378), False, 'from concurrent.futures import ProcessPoolExecutor, Executor, as_completed\n'), ((9689, 9703), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (9700, 9703), False, 'import pickle\n'), ((9804, 9825), 'pickle.dump', 'pickle.dump', (['word1', 'f'], {}), '(word1, f)\n', (9815, 9825), False, 'import pickle\n'), ((555, 571), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (565, 571), False, 'import json\n'), ((1619, 1635), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (1629, 1635), False, 'import json\n'), ((3902, 3918), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (3912, 3918), False, 'import json\n'), ((6127, 6148), 'pickle.dump', 'pickle.dump', (['gram3', 'f'], {}), '(gram3, f)\n', (6138, 6148), False, 'import pickle\n'), ((6378, 6399), 'pickle.dump', 'pickle.dump', (['gram3', 'f'], {}), '(gram3, f)\n', (6389, 6399), False, 'import pickle\n'), ((6788, 6804), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (6798, 6804), False, 'import json\n'), ((8069, 8085), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (8079, 8085), False, 'import json\n'), ((10210, 10231), 'pickle.dump', 'pickle.dump', (['word2', 'f'], {}), '(word2, f)\n', (10221, 10231), False, 'import pickle\n'), ((10461, 10482), 'pickle.dump', 'pickle.dump', (['word2', 'f'], {}), '(word2, f)\n', (10472, 10482), False, 'import pickle\n'), ((11485, 11508), 'jieba.enable_parallel', 'jieba.enable_parallel', ([], {}), '()\n', (11506, 11508), False, 'import jieba\n')] |
# AMZ-Driverless
# Copyright (c) 2019 Authors:
# - <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import yaml
from rbb_tools.simenvs.environment import SimulationEnvironment
class TestSimulationEnvironment(SimulationEnvironment):
def __init__(self, env_config, sim_config, output_dir, tmp_dir):
super(TestSimulationEnvironment, self).__init__(env_config, sim_config, output_dir, tmp_dir)
self._fail = True
if 'fail' in sim_config:
self._fail = sim_config["fail"]
def prepare(self):
logging.info("TestSimulationEnvironment.prepare()")
return True
def simulate(self):
logging.info("TestSimulationEnvironment.simulate()")
output_file = {
'title': "TestSimulationEnvironment",
'repetitions': {
'Test run 1': {
'bag': None,
'pass': True,
'duration': 1.0,
'results': {"some-result": "good"}
},
'Test run 2': {
'bag': 'missing-bag.bag',
'pass': not self._fail,
'duration': 1.0,
'results': {"some-result": "bad"}
},
'Test run 3': {
'bag': 'bag.bag',
'pass': True,
'duration': 1.0,
'results': {"some-result": "this one has a bag"}
}
}
}
with open(self._output_dir + "/output.yaml", 'w') as f:
yaml.safe_dump(output_file, f, default_flow_style=False)
with open(self._output_dir + "/bag.bag", 'w') as f:
for x in range(1024):
f.write("THIS IS A FAKE ROSBAG \n")
return True
def clean(self):
logging.info("TestSimulationEnvironment.clean()")
environment = TestSimulationEnvironment
| [
"yaml.safe_dump",
"logging.info"
] | [((1588, 1639), 'logging.info', 'logging.info', (['"""TestSimulationEnvironment.prepare()"""'], {}), "('TestSimulationEnvironment.prepare()')\n", (1600, 1639), False, 'import logging\n'), ((1693, 1745), 'logging.info', 'logging.info', (['"""TestSimulationEnvironment.simulate()"""'], {}), "('TestSimulationEnvironment.simulate()')\n", (1705, 1745), False, 'import logging\n'), ((2876, 2925), 'logging.info', 'logging.info', (['"""TestSimulationEnvironment.clean()"""'], {}), "('TestSimulationEnvironment.clean()')\n", (2888, 2925), False, 'import logging\n'), ((2621, 2677), 'yaml.safe_dump', 'yaml.safe_dump', (['output_file', 'f'], {'default_flow_style': '(False)'}), '(output_file, f, default_flow_style=False)\n', (2635, 2677), False, 'import yaml\n')] |
import os
from django.conf import settings
def get_location(directory, path, current="", parser=None):
"""Returns a tuple (directory, path)
params:
- directory: [Directory] Directory containing the currently parsed file
- path: [str] Path to the file needed
- current: [str] Current position relative to directory
returns:
Return a tuple (directory_name, path)
raises:
- SyntaxError if a directory is given but the path after ':' isn't absolute or if '~\' is
used outside repository.
- FileNotFoundError is either the library or the file does not exists."""
if ':' in path: # Relative to a library
lib, path = path.split(':')
if lib.isdigit():
raise SyntaxError("Library's name cannot be an integer")
if not path.startswith('/'):
raise SyntaxError("Syntax Error (path after ':' must be absolute)")
path = path[1:]
absolute = os.path.join(settings.FILEBROWSER_ROOT, lib)
if not os.path.isdir(absolute):
raise FileNotFoundError("Library '%s' does not exists" % lib)
absolute = os.path.join(absolute, path)
if not os.path.isfile(absolute):
raise FileNotFoundError("File '%s' does not exists in library '%s'" % (path, lib))
return lib, os.path.normpath(path)
if path.startswith('/'):
path = path[1:]
absolute = os.path.join(directory.root, path)
if not os.path.isfile(absolute):
for lib in [i for i in os.listdir(settings.FILEBROWSER_ROOT) if
i != settings.HOME]: # pragma: no cover
absolute = os.path.join(settings.FILEBROWSER_ROOT, lib, path)
if os.path.isfile(absolute):
return lib, path
raise FileNotFoundError("File '%s' does not exist" % path)
return directory.name, os.path.normpath(path)
if path.startswith('~/'): # Relative to user's home
path = path[2:]
absolute = os.path.join(directory.root, path)
if not os.path.isfile(absolute):
raise FileNotFoundError("File '%s' does not exists" % path)
return directory.name, os.path.normpath(path)
# Relative to current file
absolute = os.path.join(directory.root, current, path)
if not os.path.isfile(absolute):
raise FileNotFoundError("File '%s' does not exists" % path)
return directory.name, os.path.normpath(os.path.join(current, path))
def extends_dict(target, source):
""" Will copy every key and value of source in target if key is not present in target """
for key, value in source.items():
if key not in target:
target[key] = value
elif type(target[key]) is dict:
extends_dict(target[key], value)
elif type(target[key]) is list:
target[key] += value
return target
def displayed_path(path):
path = path.replace(settings.FILEBROWSER_ROOT, '')
p = [i for i in path.split('/') if i]
if p[0].isdigit():
p[0] = 'home'
return os.path.join(*p)
| [
"os.listdir",
"os.path.join",
"os.path.isfile",
"os.path.normpath",
"os.path.isdir"
] | [((2379, 2422), 'os.path.join', 'os.path.join', (['directory.root', 'current', 'path'], {}), '(directory.root, current, path)\n', (2391, 2422), False, 'import os\n'), ((3200, 3216), 'os.path.join', 'os.path.join', (['*p'], {}), '(*p)\n', (3212, 3216), False, 'import os\n'), ((1047, 1091), 'os.path.join', 'os.path.join', (['settings.FILEBROWSER_ROOT', 'lib'], {}), '(settings.FILEBROWSER_ROOT, lib)\n', (1059, 1091), False, 'import os\n'), ((1225, 1253), 'os.path.join', 'os.path.join', (['absolute', 'path'], {}), '(absolute, path)\n', (1237, 1253), False, 'import os\n'), ((1510, 1544), 'os.path.join', 'os.path.join', (['directory.root', 'path'], {}), '(directory.root, path)\n', (1522, 1544), False, 'import os\n'), ((2117, 2151), 'os.path.join', 'os.path.join', (['directory.root', 'path'], {}), '(directory.root, path)\n', (2129, 2151), False, 'import os\n'), ((2434, 2458), 'os.path.isfile', 'os.path.isfile', (['absolute'], {}), '(absolute)\n', (2448, 2458), False, 'import os\n'), ((1107, 1130), 'os.path.isdir', 'os.path.isdir', (['absolute'], {}), '(absolute)\n', (1120, 1130), False, 'import os\n'), ((1269, 1293), 'os.path.isfile', 'os.path.isfile', (['absolute'], {}), '(absolute)\n', (1283, 1293), False, 'import os\n'), ((1410, 1432), 'os.path.normpath', 'os.path.normpath', (['path'], {}), '(path)\n', (1426, 1432), False, 'import os\n'), ((1560, 1584), 'os.path.isfile', 'os.path.isfile', (['absolute'], {}), '(absolute)\n', (1574, 1584), False, 'import os\n'), ((1989, 2011), 'os.path.normpath', 'os.path.normpath', (['path'], {}), '(path)\n', (2005, 2011), False, 'import os\n'), ((2167, 2191), 'os.path.isfile', 'os.path.isfile', (['absolute'], {}), '(absolute)\n', (2181, 2191), False, 'import os\n'), ((2305, 2327), 'os.path.normpath', 'os.path.normpath', (['path'], {}), '(path)\n', (2321, 2327), False, 'import os\n'), ((2572, 2599), 'os.path.join', 'os.path.join', (['current', 'path'], {}), '(current, path)\n', (2584, 2599), False, 'import os\n'), ((1754, 1804), 'os.path.join', 'os.path.join', (['settings.FILEBROWSER_ROOT', 'lib', 'path'], {}), '(settings.FILEBROWSER_ROOT, lib, path)\n', (1766, 1804), False, 'import os\n'), ((1824, 1848), 'os.path.isfile', 'os.path.isfile', (['absolute'], {}), '(absolute)\n', (1838, 1848), False, 'import os\n'), ((1621, 1658), 'os.listdir', 'os.listdir', (['settings.FILEBROWSER_ROOT'], {}), '(settings.FILEBROWSER_ROOT)\n', (1631, 1658), False, 'import os\n')] |
"""
:Description: PasteScript Template to generate a GitHub hosted python package.
Let you set the package name, a one line description, the Licence (support
GPL, LGPL, AGPL and BSD - GPLv3 by default) and the author name, email and
organisation variables::
paster create -t gh_package <project name>
.. note::
The default author name and email variables are the ones set with
git-config::
git config --global user.name "<NAME>"
git config --global user.email <EMAIL>
The result::
<project name>/
docs/
source/
_static
_templates/
conf.py
index.rst
src/
<package name>/
__init__.py
support-files/
.gitignore
bootstrap.py
LICENCE
MANIFEST.in
pavement.py
README.rst
setup.cfg
* <project name>/pavement.py is the paver configuration file. All the setuptools
tasks are available with paver. Paver make the creation of of new task easy.
See `paver documentation <http://www.blueskyonmars.com/projects/paver/>`_
for more details::
paver paverdocs
* <project name>/src contain your package.
* <project name>/docs/source/ will contains your documentation source. conf.py
is Sphinx' configuration file.
Check `Sphinx' documentation <http://sphinx.pocoo.org/>`_ for more details.
.. note::
The version number, the project name and author name(s) are set in
``pavement.py`` and shared with ``docs/source/conf.py``.
However licence and copyright information are hard coded into ``LICENCE``,
``pavement.py``, ``docs/source/conf`` and ``src/<package>/__init__.py``.
"""
from datetime import date
import os
from paste.script.templates import var
from paste.script.templates import Template
from git import Git
YEAR = date.today().year
LICENCE_HEADER = """%(description)s
Copyright (c) %(year)s, %(author)s
All rights reserved.
"""
GPL = """
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU%(gpl_type)s General Public License as published by
the Free Software Foundation, either version %(gpl_version)s of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU%(gpl_type)s General Public License for more details.
You should have received a copy of the GNU%(gpl_type)s General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
BSD = """
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the %(org)s nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
DEFAULT_NAME = Git(os.getcwd()).config(
'user.name', with_exceptions=False).strip()
DEFAULT_NAME = DEFAULT_NAME or os.getlogin()
DEFAULT_EMAIL = Git(os.getcwd()).config(
'user.email', with_exceptions=False).strip()
class GithubTemplate(Template):
"""Paver template for a GitHub hosted Python package."""
_template_dir = 'tmpl/gh'
summary = ("A basic layout for project hosted on GitHub "
"and managed with Paver")
use_cheetah = True
vars = [
var('package', 'The package contained',
default='example'),
var('description',
'One-line description of the package',
default='<On-line description>'),
var('licence',
'package licence - GPLv2/GPLv3/LGPLv2/LGPLv3/AGPLv3/BSD',
default='GPLv3'),
var('author', 'Author name', DEFAULT_NAME),
var('author_email', 'Author email', DEFAULT_EMAIL),
var('org', 'Organisation name - for licence.',
default='<Organisation>'),
]
def check_vars(self, vars, command):
"""
Reset the package variable in interactive so that project and
package names can be different (GitHub and Python
Have different restriction on names).
"""
if not command.options.no_interactive and \
not hasattr(command, '_deleted_once'):
del vars['package']
command._deleted_once = True
return Template.check_vars(self, vars, command)
def pre(self, command, output_dir, vars):
"""
Set extra template variables:
* "year", current year.
* "gitignore", set to ".gitignore".
* "licence_body", licence notice of the package.
* "gpl_type", for gpl licences
"""
vars['year'] = YEAR
vars['gitignore'] = '.gitignore'
licence = vars.get('licence')
vars['licence_body'] = ''
vars['gpl_type'] = ''
vars['gpl_version'] = ''
if licence:
if licence == 'BSD':
licence_tmpl = BSD
elif licence == 'LGPLv2':
vars['gpl_type'] = ' Lesser'
vars['gpl_version'] = '2'
vars['licence'] = 'LGPLv2'
licence_tmpl = GPL
elif licence == 'LGPLv3':
vars['gpl_type'] = ' Lesser'
vars['gpl_version'] = '3'
vars['licence'] = 'LGPLv3'
licence_tmpl = GPL
elif licence == 'AGPLv3':
vars['gpl_type'] = ' Affero'
vars['gpl_version'] = '3'
vars['licence'] = 'AGPLv3'
licence_tmpl = GPL
elif licence == 'GPLv2':
vars['gpl_type'] = ''
vars['gpl_version'] = '2'
vars['licence'] = 'GPLv2'
licence_tmpl = GPL
else:
vars['gpl_type'] = ''
vars['gpl_version'] = '3'
vars['licence'] = 'GPL'
licence_tmpl = GPL
vars['licence_body'] = (LICENCE_HEADER + licence_tmpl) % vars | [
"paste.script.templates.var",
"os.getcwd",
"os.getlogin",
"paste.script.templates.Template.check_vars",
"datetime.date.today"
] | [((1886, 1898), 'datetime.date.today', 'date.today', ([], {}), '()\n', (1896, 1898), False, 'from datetime import date\n'), ((4273, 4286), 'os.getlogin', 'os.getlogin', ([], {}), '()\n', (4284, 4286), False, 'import os\n'), ((4643, 4701), 'paste.script.templates.var', 'var', (['"""package"""', '"""The package contained"""'], {'default': '"""example"""'}), "('package', 'The package contained', default='example')\n", (4646, 4701), False, 'from paste.script.templates import var\n'), ((4723, 4818), 'paste.script.templates.var', 'var', (['"""description"""', '"""One-line description of the package"""'], {'default': '"""<On-line description>"""'}), "('description', 'One-line description of the package', default=\n '<On-line description>')\n", (4726, 4818), False, 'from paste.script.templates import var\n'), ((4847, 4940), 'paste.script.templates.var', 'var', (['"""licence"""', '"""package licence - GPLv2/GPLv3/LGPLv2/LGPLv3/AGPLv3/BSD"""'], {'default': '"""GPLv3"""'}), "('licence', 'package licence - GPLv2/GPLv3/LGPLv2/LGPLv3/AGPLv3/BSD',\n default='GPLv3')\n", (4850, 4940), False, 'from paste.script.templates import var\n'), ((4970, 5012), 'paste.script.templates.var', 'var', (['"""author"""', '"""Author name"""', 'DEFAULT_NAME'], {}), "('author', 'Author name', DEFAULT_NAME)\n", (4973, 5012), False, 'from paste.script.templates import var\n'), ((5022, 5072), 'paste.script.templates.var', 'var', (['"""author_email"""', '"""Author email"""', 'DEFAULT_EMAIL'], {}), "('author_email', 'Author email', DEFAULT_EMAIL)\n", (5025, 5072), False, 'from paste.script.templates import var\n'), ((5082, 5154), 'paste.script.templates.var', 'var', (['"""org"""', '"""Organisation name - for licence."""'], {'default': '"""<Organisation>"""'}), "('org', 'Organisation name - for licence.', default='<Organisation>')\n", (5085, 5154), False, 'from paste.script.templates import var\n'), ((5612, 5652), 'paste.script.templates.Template.check_vars', 'Template.check_vars', (['self', 'vars', 'command'], {}), '(self, vars, command)\n', (5631, 5652), False, 'from paste.script.templates import Template\n'), ((4173, 4184), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4182, 4184), False, 'import os\n'), ((4308, 4319), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4317, 4319), False, 'import os\n')] |
from ncclient import manager
from xml.dom import minidom
import xmltodict
huaweiautomation = {'address':'ios-xe-mgmt-latest.cisco.com',
'netconf_port': 10000, 'username': 'developer', 'password': '<PASSWORD>'}
huawei_manager = manager.connect(host = huaweiautomation["address"], port = huaweiautomation["netconf_port"], username = huaweiautomation["username"],
password = huaweiautomation["password"], device_params = {'name': 'iosxe'}, hostkey_verify = False)
filter_Interfaces= """
<filter>
<interfaces xmlns="urn:ietf:params:xml:ns:yang:ietf-interfaces">
<interface>
</interface>
</interfaces>
</filter>
"""
#Para cualquier interfaz
huawei_get_interfaces = huawei_manager.get_config('running', filter_Interfaces).xml
xml_pretty = minidom.parseString(huawei_get_interfaces)
print ("Interfaces en XML format")
print ("#"*100)
print (xml_pretty.toprettyxml(indent=" "))
xml_to_dict_general = xmltodict.parse(huawei_get_interfaces)
print ("#"*100)
print ("Extraer todas las interfaces ")
for x in xml_to_dict_general['rpc-reply']['data']['interfaces']['interface']:
print (x['name'])
print ("#"*100)
#print ("Estatus....")
#print ("")
#huawei_manager.connected
#Verificar capabilitys
#for capability in huawei_manager.server_capabilities:
# print (capability) | [
"xml.dom.minidom.parseString",
"ncclient.manager.connect",
"xmltodict.parse"
] | [((230, 465), 'ncclient.manager.connect', 'manager.connect', ([], {'host': "huaweiautomation['address']", 'port': "huaweiautomation['netconf_port']", 'username': "huaweiautomation['username']", 'password': "huaweiautomation['password']", 'device_params': "{'name': 'iosxe'}", 'hostkey_verify': '(False)'}), "(host=huaweiautomation['address'], port=huaweiautomation[\n 'netconf_port'], username=huaweiautomation['username'], password=\n huaweiautomation['password'], device_params={'name': 'iosxe'},\n hostkey_verify=False)\n", (245, 465), False, 'from ncclient import manager\n'), ((745, 787), 'xml.dom.minidom.parseString', 'minidom.parseString', (['huawei_get_interfaces'], {}), '(huawei_get_interfaces)\n', (764, 787), False, 'from xml.dom import minidom\n'), ((908, 946), 'xmltodict.parse', 'xmltodict.parse', (['huawei_get_interfaces'], {}), '(huawei_get_interfaces)\n', (923, 946), False, 'import xmltodict\n')] |
import numpy as np
from numpy import array
from numpy.linalg import det
from numpy.linalg import matrix_rank
from numpy.linalg import solve
"""
*** remember the following useful tools***
from numpy import transpose
from numpy import dot
from numpy import argmax
from numpy import abs
from numpy.linalg import eig
from numpy.linalg import inv
"""
def linear_equations(matrix, vector) -> array:
"""
this function resolve a system of linear equations
:param matrix: matrix of coefficients
:param vector: vector of constant terms
>>> linear_equations(np.eye(2),np.array([1,1]))
The system has a single unique solution.
[1. 1.]
>>> linear_equations(np.array([[1,0],[1,0]]),np.array([1,0]))
The system has no solution.
"""
B = np.c_[matrix, vector]
rank_A = matrix_rank(matrix)
rank_B = matrix_rank(B)
if rank_A == rank_B:
if rank_A == len(matrix):
print(f'\n The system has a single unique solution.\n {solve(matrix, vector)}\n ')
return solve(matrix, vector)
else:
print('\n The system has infinitely many solutions. \n')
if input('Do you want the matrix after the gauss_elimination elimination? [y/n]\n') == 'y':
S = gauss_elimination(B)
print(S)
return S
else:
print('\n The system has no solution.\n')
return None
# esercizio 2
def linear_dependence(matrix: array) -> int:
"""
This function answer to the question "Are these vectors linearly independent?"
:param matrix: matrix with vectors as rows
:return: the number of linearly independent vectors
"""
rank = matrix_rank(matrix)
if rank == matrix.shape[0]:
print('The vectors are linearly independents')
else:
print(f'The vectors are linearly dependents and only {rank} of them are linearly independents')
if input('Do you want the matrix after the gauss_elimination elimination? [y/n]\n') == 'y':
S = gauss_elimination(matrix)
print(S)
return rank
# esercizio3
def cartesian_representation_line(vec_1: np.array, vec_2: np.array, type: int = 1) -> None:
"""
This function print the cartesian presentation of a line
a: numpy-array of the
b: numpy-array of the
:param vec_1: first point
:param vec_2: direction (type = 0) or the second point (type = 1)
:param type: it switches between two points and one point and a direction
"""
if type:
vec_2 = vec_2 - vec_1
for i in range(len(vec_1)):
print(f' x_{i + 1} = {vec_1[i]} + {vec_2[i]}t')
return None
def gauss_elimination(matrix) -> np.array:
"""
This function compute Gauss elimination process
:param matrix: generic matrix
:return: matrix after the Gauss elimination
"""
import sympy
return np.array(sympy.Matrix(matrix).rref()[0])
def conic_section_classification(coeff: list) -> None:
"""
This function provides a classification of a conic section
:param coeff: list of the coefficient of the equation of the conic section
if the equation is
A x^2 + B xy + C y^2 + D x + E y + F = 0
then the array coeff is
[A,B,C,D,E,F]
"""
A = array([[coeff[0], coeff[1] / 2, coeff[3] / 2], [coeff[1] / 2, coeff[2], coeff[4] / 2],
[coeff[3], coeff[4] / 2, coeff[5]]])
rank = matrix_rank(A)
if rank == 3:
d = det(A[:2, :2])
# remember that we have a finite precision on floats, for this reason we consider 1e-09 as tolerance
if d > 1e-09:
print('This conic section is an ellipse')
elif d < -1e-09:
print('This conic section is a hyperbola')
else:
print('This conic section is a parabola')
elif rank == 2:
print('This conic section is a degenerate conic, ', end="")
d = det(A[:2, :2])
if d > 1e-09:
print('in particular we have one point')
elif d < -1e-09:
print('in particular we have two incident lines')
else:
print('in particular we have two parallel lines')
else:
print('This conic section is a degenerate conic, in particular we have two coincident lines')
return None
| [
"numpy.linalg.matrix_rank",
"numpy.linalg.solve",
"sympy.Matrix",
"numpy.linalg.det",
"numpy.array"
] | [((811, 830), 'numpy.linalg.matrix_rank', 'matrix_rank', (['matrix'], {}), '(matrix)\n', (822, 830), False, 'from numpy.linalg import matrix_rank\n'), ((844, 858), 'numpy.linalg.matrix_rank', 'matrix_rank', (['B'], {}), '(B)\n', (855, 858), False, 'from numpy.linalg import matrix_rank\n'), ((1689, 1708), 'numpy.linalg.matrix_rank', 'matrix_rank', (['matrix'], {}), '(matrix)\n', (1700, 1708), False, 'from numpy.linalg import matrix_rank\n'), ((3262, 3390), 'numpy.array', 'array', (['[[coeff[0], coeff[1] / 2, coeff[3] / 2], [coeff[1] / 2, coeff[2], coeff[4] /\n 2], [coeff[3], coeff[4] / 2, coeff[5]]]'], {}), '([[coeff[0], coeff[1] / 2, coeff[3] / 2], [coeff[1] / 2, coeff[2], \n coeff[4] / 2], [coeff[3], coeff[4] / 2, coeff[5]]])\n', (3267, 3390), False, 'from numpy import array\n'), ((3412, 3426), 'numpy.linalg.matrix_rank', 'matrix_rank', (['A'], {}), '(A)\n', (3423, 3426), False, 'from numpy.linalg import matrix_rank\n'), ((3457, 3471), 'numpy.linalg.det', 'det', (['A[:2, :2]'], {}), '(A[:2, :2])\n', (3460, 3471), False, 'from numpy.linalg import det\n'), ((1032, 1053), 'numpy.linalg.solve', 'solve', (['matrix', 'vector'], {}), '(matrix, vector)\n', (1037, 1053), False, 'from numpy.linalg import solve\n'), ((3907, 3921), 'numpy.linalg.det', 'det', (['A[:2, :2]'], {}), '(A[:2, :2])\n', (3910, 3921), False, 'from numpy.linalg import det\n'), ((2885, 2905), 'sympy.Matrix', 'sympy.Matrix', (['matrix'], {}), '(matrix)\n', (2897, 2905), False, 'import sympy\n'), ((985, 1006), 'numpy.linalg.solve', 'solve', (['matrix', 'vector'], {}), '(matrix, vector)\n', (990, 1006), False, 'from numpy.linalg import solve\n')] |
#Chapter 4 - Creating and Manipulating your own Databases
#*******************************************************************************************#
#Creating Tables with SQLAlchemy
# Import Table, Column, String, Integer, Float, Boolean from sqlalchemy
from sqlalchemy import Table, Column, String, Integer, Float, Boolean
# Define a new table with a name, count, amount, and valid column: data
data = Table('data', metadata,
Column('name', String(255)),
Column('count', Integer()),
Column('amount', Float()),
Column('valid', Boolean())
)
# Use the metadata to create the table
metadata.create_all(engine)
# Print table details
print(repr(data))
#*******************************************************************************************#
#Constraints and Data Defaults
# Import Table, Column, String, Integer, Float, Boolean from sqlalchemy
from sqlalchemy import Table, Column, String, Integer, Float, Boolean
# Define a new table with a name, count, amount, and valid column: data
data = Table('data', metadata,
Column('name', String(255), unique=True),
Column('count', Integer(), default=1),
Column('amount', Float()),
Column('valid', Boolean(), default=False)
)
# Use the metadata to create the table
metadata.create_all(engine)
# Print the table details
print(repr(metadata.tables['data']))
#*******************************************************************************************#
#Inserting a single row with an insert() statement
# Import insert from sqlalchemy
from sqlalchemy import insert, select
# Build an insert statement to insert a record into the data table: stmt
stmt = insert(data).values(name='Anna', count=1, amount=1000.00, valid=True)
# Execute the statement via the connection: results
results = connection.execute(stmt)
# Print result rowcount
print(results.rowcount)
# Build a select statement to validate the insert
stmt = select([data]).where(data.columns.name == 'Anna')
# Print the result of executing the query.
print(connection.execute(stmt).first())
#*******************************************************************************************#
#Inserting Multiple Records at Once
# Build a list of dictionaries: values_list
values_list = [
{'name': 'Anna', 'count': 1, 'amount': 1000.00, 'valid': True},
{'name': 'Taylor', 'count': 1, 'amount': 750.00, 'valid': False}
]
# Build an insert statement for the data table: stmt
stmt = insert(data)
# Execute stmt with the values_list: results
results = connection.execute(stmt, values_list)
# Print rowcount
print(results.rowcount)
#*******************************************************************************************#
#Loading a CSV into a Table
# Create a insert statement for census: stmt
stmt = insert(census)
# Create an empty list and zeroed row count: values_list, total_rowcount
values_list = []
total_rowcount = 0
# Enumerate the rows of csv_reader
for idx, row in enumerate(csv_reader):
#create data and append to values_list
data = {'state': row[0], 'sex': row[1], 'age': row[2], 'pop2000': row[3],
'pop2008': row[4]}
values_list.append(data)
# Check to see if divisible by 51
if idx % 51 == 0:
results = connection.execute(stmt, values_list)
total_rowcount += results.rowcount
values_list = []
# Print total rowcount
print(total_rowcount)
#*******************************************************************************************#
#Updating individual records
# Build a select statement: select_stmt
select_stmt = select([state_fact]).where(state_fact.columns.name == 'New York')
# Print the results of executing the select_stmt
print(connection.execute(select_stmt).fetchall())
# Build a statement to update the fips_state to 36: stmt
stmt = update(state_fact).values(fips_state=36)
# Append a where clause to limit it to records for New York state
stmt = stmt.where(state_fact.columns.name == 'New York')
# Execute the statement: results
results = connection.execute(stmt)
# Print rowcount
print(results.rowcount)
# Execute the select_stmt again to view the changes
print(connection.execute(select_stmt).fetchall())
#*******************************************************************************************#
#Updating Multiple Records
#
# Build a statement to update the notes to 'The Wild West': stmt
stmt = update(state_fact).values(notes='The Wild West')
# Append a where clause to match the West census region records
stmt = stmt.where(state_fact.columns.census_region_name == 'West')
# Execute the statement: results
results = connection.execute(stmt)
# Print rowcount
print(results.rowcount)
#*******************************************************************************************#
## Correlated Updates
# Build a statement to select name from state_fact: stmt
fips_stmt = select([state_fact.columns.name])
# Append a where clause to Match the fips_state to flat_census fips_code
fips_stmt = fips_stmt.where(
state_fact.columns.fips_state == flat_census.columns.fips_code)
# Build an update statement to set the name to fips_stmt: update_stmt
update_stmt = update(flat_census).values(state_name=fips_stmt)
# Execute update_stmt: results
results = connection.execute(update_stmt)
# Print rowcount
print(results.rowcount)
#*******************************************************************************************#
#Deleting all the records from a table
# Import delete, select
from sqlalchemy import delete, select
# Build a statement to empty the census table: stmt
stmt = delete(census)
# Execute the statement: results
results = connection.execute(stmt)
# Print affected rowcount
print(results.rowcount)
# Build a statement to select all records from the census table
stmt = select([census])
# Print the results of executing the statement to verify there are no rows
print(connection.execute(stmt).fetchall())
#*******************************************************************************************#
## Deleting specific records
# Build a statement to count records using the sex column for Men ('M') age 36: stmt
stmt = select([func.count(census.columns.sex)]).where(
and_(census.columns.sex == 'M',
census.columns.age == 36)
)
# Execute the select statement and use the scalar() fetch method to save the record count
to_delete = connection.execute(stmt).scalar()
# Build a statement to delete records from the census table: stmt_del
stmt_del = delete(census)
# Append a where clause to target Men ('M') age 36
stmt_del = stmt_del.where(
and_(census.columns.sex == 'M',
census.columns.age == 36)
)
# Execute the statement: results
results = connection.execute(stmt_del)
# Print affected rowcount and to_delete record count, make sure they match
print(results.rowcount, to_delete)
#*******************************************************************************************#
#Deleting a Table Completely
#
# Drop the state_fact tables
state_fact.drop(engine)
# Check to see if state_fact exists
print(state_fact.exists(engine))
# Drop all tables
metadata.drop_all(engine)
# Check to see if census exists
print(census.exists(engine))
#*******************************************************************************************#
| [
"sqlalchemy.Float",
"sqlalchemy.Boolean",
"sqlalchemy.insert",
"sqlalchemy.Integer",
"sqlalchemy.delete",
"sqlalchemy.String",
"sqlalchemy.select"
] | [((2838, 2850), 'sqlalchemy.insert', 'insert', (['data'], {}), '(data)\n', (2844, 2850), False, 'from sqlalchemy import insert, select\n'), ((3226, 3240), 'sqlalchemy.insert', 'insert', (['census'], {}), '(census)\n', (3232, 3240), False, 'from sqlalchemy import insert, select\n'), ((5517, 5550), 'sqlalchemy.select', 'select', (['[state_fact.columns.name]'], {}), '([state_fact.columns.name])\n', (5523, 5550), False, 'from sqlalchemy import delete, select\n'), ((6251, 6265), 'sqlalchemy.delete', 'delete', (['census'], {}), '(census)\n', (6257, 6265), False, 'from sqlalchemy import delete, select\n'), ((6467, 6483), 'sqlalchemy.select', 'select', (['[census]'], {}), '([census])\n', (6473, 6483), False, 'from sqlalchemy import delete, select\n'), ((7180, 7194), 'sqlalchemy.delete', 'delete', (['census'], {}), '(census)\n', (7186, 7194), False, 'from sqlalchemy import delete, select\n'), ((524, 535), 'sqlalchemy.String', 'String', (['(255)'], {}), '(255)\n', (530, 535), False, 'from sqlalchemy import Table, Column, String, Integer, Float, Boolean\n'), ((568, 577), 'sqlalchemy.Integer', 'Integer', ([], {}), '()\n', (575, 577), False, 'from sqlalchemy import Table, Column, String, Integer, Float, Boolean\n'), ((611, 618), 'sqlalchemy.Float', 'Float', ([], {}), '()\n', (616, 618), False, 'from sqlalchemy import Table, Column, String, Integer, Float, Boolean\n'), ((651, 660), 'sqlalchemy.Boolean', 'Boolean', ([], {}), '()\n', (658, 660), False, 'from sqlalchemy import Table, Column, String, Integer, Float, Boolean\n'), ((1242, 1253), 'sqlalchemy.String', 'String', (['(255)'], {}), '(255)\n', (1248, 1253), False, 'from sqlalchemy import Table, Column, String, Integer, Float, Boolean\n'), ((1299, 1308), 'sqlalchemy.Integer', 'Integer', ([], {}), '()\n', (1306, 1308), False, 'from sqlalchemy import Table, Column, String, Integer, Float, Boolean\n'), ((1353, 1360), 'sqlalchemy.Float', 'Float', ([], {}), '()\n', (1358, 1360), False, 'from sqlalchemy import Table, Column, String, Integer, Float, Boolean\n'), ((1393, 1402), 'sqlalchemy.Boolean', 'Boolean', ([], {}), '()\n', (1400, 1402), False, 'from sqlalchemy import Table, Column, String, Integer, Float, Boolean\n'), ((1918, 1930), 'sqlalchemy.insert', 'insert', (['data'], {}), '(data)\n', (1924, 1930), False, 'from sqlalchemy import insert, select\n'), ((2192, 2206), 'sqlalchemy.select', 'select', (['[data]'], {}), '([data])\n', (2198, 2206), False, 'from sqlalchemy import delete, select\n'), ((4096, 4116), 'sqlalchemy.select', 'select', (['[state_fact]'], {}), '([state_fact])\n', (4102, 4116), False, 'from sqlalchemy import delete, select\n')] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
from time import strftime, gmtime
from email.header import make_header
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from .utils import strip_tags, format_email_address
from .attachment import Attachment
from .compat import unicode_compatible, to_unicode, to_string, PY3
@unicode_compatible
class PlainMessage(object):
"""Simple wrapper for data of e-mail message with plain text."""
_PREAMBLE_TEXT = "This is a multi-part message in MIME format."
def __init__(self, sender, subject, content, charset="utf-8"):
self._sender = format_email_address(sender)
self._charset = to_string(charset)
self._content = to_unicode(content)
self._subject = to_unicode(subject)
self._attachments = []
self._recipients = {"To": [], "Cc": [], "Bcc": []}
@property
def sender(self):
return self._sender
@property
def subject(self):
return self._subject
@property
def recipients(self):
to = self._recipients["To"]
cc = self._recipients["Cc"]
bcc = self._recipients["Bcc"]
return frozenset(to + cc + bcc)
def add_recipients(self, *recipients):
recipients = self._unique_recipients(recipients)
self._recipients["To"].extend(recipients)
def add_recipients_cc(self, *recipients):
recipients = self._unique_recipients(recipients)
self._recipients["Cc"].extend(recipients)
def add_recipients_bcc(self, *recipients):
recipients = self._unique_recipients(recipients)
self._recipients["Bcc"].extend(recipients)
def _unique_recipients(self, recipients):
recipients = map(format_email_address, recipients)
return frozenset(recipients) - self.recipients
@property
def content(self):
return self._content
@property
def payload(self):
payload = self._build_content_payload(self._content)
if self._attachments:
content_payload = payload
payload = MIMEMultipart("mixed")
payload.attach(content_payload)
payload.preamble = self._PREAMBLE_TEXT
payload = self._set_payload_headers(payload)
for attachment in self._attachments:
payload.attach(attachment.payload)
return payload
def _build_content_payload(self, content):
return MIMEText(content.encode(self._charset), "plain", self._charset)
def _set_payload_headers(self, payload):
for copy_type, recipients in self._recipients.items():
for recipient in recipients:
payload[copy_type] = self._make_header(recipient)
payload["From"] = self._make_header(self._sender)
payload["Subject"] = self._make_header(self._subject)
payload["Date"] = strftime("%a, %d %b %Y %H:%M:%S %z", gmtime())
return payload
def _make_header(self, value):
return make_header([(self._to_string(value), self._charset)])
def _to_string(self, value):
if PY3:
return value
else:
return value.encode(self._charset)
def attach(self, file, charset=None, mimetype=None):
if charset is None:
charset = self._charset
attachment = Attachment(file, charset, mimetype)
self._attachments.append(attachment)
return attachment
if PY3:
def __str__(self):
return self.payload.as_string()
else:
def __bytes__(self):
return self.payload.as_string()
def __repr__(self):
return to_string("<PlainMessage: %s>" % self.subject)
class HtmlMessage(PlainMessage):
"""Simple wrapper for data of e-mail message with HTML content."""
def _build_content_payload(self, content):
content = content.encode(self._charset)
payload = MIMEMultipart("alternative", charset=self._charset)
text_alternative = MIMEText(strip_tags(content), "plain", self._charset)
payload.attach(text_alternative)
html_alternative = MIMEText(content, "html", self._charset)
payload.attach(html_alternative)
return payload
| [
"email.mime.multipart.MIMEMultipart",
"time.gmtime",
"email.mime.text.MIMEText"
] | [((3988, 4039), 'email.mime.multipart.MIMEMultipart', 'MIMEMultipart', (['"""alternative"""'], {'charset': 'self._charset'}), "('alternative', charset=self._charset)\n", (4001, 4039), False, 'from email.mime.multipart import MIMEMultipart\n'), ((4191, 4231), 'email.mime.text.MIMEText', 'MIMEText', (['content', '"""html"""', 'self._charset'], {}), "(content, 'html', self._charset)\n", (4199, 4231), False, 'from email.mime.text import MIMEText\n'), ((2172, 2194), 'email.mime.multipart.MIMEMultipart', 'MIMEMultipart', (['"""mixed"""'], {}), "('mixed')\n", (2185, 2194), False, 'from email.mime.multipart import MIMEMultipart\n'), ((2987, 2995), 'time.gmtime', 'gmtime', ([], {}), '()\n', (2993, 2995), False, 'from time import strftime, gmtime\n')] |
"""
Module to execute the simulation for a given instance.
"""
""" import packages """
import logging
from importlib import import_module
import numpy.random as rdm
import copy
import numpy as np
""" import project configurations """
import configurations.settings_simulation as config
""" import project libraries """
import modules.data.datamgm as dtm
from modules.simulation.entities import Tram, Stop, Passengers, CargoRequest, write_entities_log, init_entities_log
# Global logger
logger = dtm.initialise_logger(__name__)
"""
GLOBAL VARIABLES
----------------
- These variables must be resetted after every simulation run
"""
#: now Simulation Clock
now = -1
#: last_now Last event
last_now = 0
#:event_queue Event queue
event_queue = []
#:trams List of running trams
trams = []
#:stops List of stops
stops = []
#:cargo List of cargo
cargo = []
#:updates List of updates
updates = set()
#:numEvents Number of total events
numEvents = 0
def reset_variables():
"""
Function to reset all global variables
"""
global now, last_now, numEvents, trams, stops, event_queue, cargo, updates
now = -1
last_now = 0
numEvents = 0
if trams:
trams[0].reset()
trams.clear()
for stop in stops:
stop.reset()
stops.clear()
event_queue.clear()
Passengers.reset()
if cargo:
cargo[0].reset()
cargo.clear()
updates.clear()
"""
SIMULATION LOGGING
------------------
- Simluation log (Text File): Includes all information about the events in the simulation
- Entities Log (csv file): Includes the relevant data information of single entities
"""
# "Simulation Log": What does in a single simulation run happen? (Descriptive)
sim_log = logging.getLogger("simulation")
# "Entities Log": How do the variables change during one simulation run?
ent_log = logging.getLogger("entities")
"""
SIMULATION METHODS
------------------
"""
def run(instance, passengerData, seed=False, index_child_seed=False):
"""
Run the simulation
:param instance: Path to the instance file
:param passengerData: Path to the passenger data file
:param seed: Seed to replicate the simulation
:param index_child_see: Index of the child of the global seedsequence
"""
# Used global variables
global inst, now, last_now, event_queue, numEvents
""" Initialise random generator """
# Check seed for random generator
if seed:
# seed sequence
entropy = seed.entropy
else:
seed = rdm.SeedSequence()
entropy = seed.entropy
# Import instance (from .py-file)
inst = dtm.import_instance(instance)
# Initialize the simulation
passenger = initialize(seed, passengerData)
# Run the simulation
running = True
while running:
# sort the upcoming events according to the time they occur
event_queue = sorted(event_queue,key = lambda i: i['time'])
if event_queue:
if event_queue[0]['time'] != now:
if now >= 0:
status(now)
for entity in updates:
if entity == "passenger":
entity = passenger
entity.last_event = now
write_entities_log(entity,now)
updates.clear()
last_now = now
now = event_queue[0]['time']
sim_log.info("\n-----------------------------------------------------------------------------------")
sim_log.info(f"Events at {now}:")
sim_log.info("***")
next_event()
numEvents+= 1
event_queue.pop(0)
# No more events
else:
last_time_period(inst.numPeriods-1,passenger)
running = False
# Save values for replicability
sim_log.info(f"\nentropy:\n{entropy}\n")
sim_log.info(f"index_child_seed:\n{entropy}\n")
# Reset after simulation run
reset_variables()
# Initialisation
def initialize(seed, passengerData):
"""
This function initialises the simulation run, i.e., creates the needed variables and adds the first events to the event log.
:param seed: Seed for replicability
:type seed: int
:param passengerData: Path to passenger data file
:type passengerData: string or path
:return: Global passenger object to track number of passengers
:rtype: Passengers object
"""
global event_queue
sim_log.info("Initialisation...\n--------------------------------------")
# Create child seedsequence per entity
seeds = seed.spawn(10)
# Entities Log
init_entities_log()
# initialize stops
for s in range(inst.numStops):
#sim_log.info("Creating Stop {}.".format(s))
distance_to = {"Stop": inst.stops_distance[s],"Customer": [0]}
distance_from = {"Stop": [inst.stops_distance[j][s] for j in range(inst.numStops)], "Customer": [0]}
if s == 0:
stops.append(Stop(distance_to,distance_from,True))
else:
stops.append(Stop(distance_to,distance_from))
pas = dtm.import_instance(passengerData)
""" Initialize passengers """
passenger_seeds = seeds[0].spawn(6)
if config.random_passenger_arrival:
arriving = pas.arriving_intensity
config.random_passenger_arrival = passenger_seeds[0]
else:
arriving = pas.passenger_arriving
# instantiate passenger arrivals
nonzero = np.nonzero(arriving)
for i in range(len(nonzero[0])):
p = nonzero[0][i]
s = nonzero[1][i]
create_event(p, 6, [s])
if config.random_passenger_boarding:
config.random_passenger_boarding = passenger_seeds[1]
if config.random_passenger_alighting:
config.random_passenger_boarding = passenger_seeds[2]
if config.random_passenger_changing:
config.random_passenger_changing = passenger_seeds[3]
if config.random_boarding_time:
config.random_boarding_time = passenger_seeds[4]
if config.random_alighting_time:
config.random_alighting_time = passenger_seeds[5]
""" Global passenger variables """
passenger = Passengers(
# passenger arrival
random_arrival = config.random_passenger_arrival,
arriving_passengers = arriving,
arriving_passengers_cum = pas.passenger_arriving_acc,
# passenger boarding
random_boarding = config.random_passenger_boarding,
boarding_rate = [1 for tram in range(inst.numTrams)],
# passenger alighting
random_alighting = config.random_passenger_alighting,
alighting_rate = pas.passenger_allighting_rate,
# passenger changing
random_changing = config.random_passenger_changing,
changing_rate = [0 for tram in range(inst.numStops)],
# time
random_boarding_time = config.random_boarding_time,
random_alighting_time = config.random_alighting_time,
service_time = inst.passenger_service_time_board,
service_time_alight = inst.passenger_service_time_alight,
)
# Initialize the starting times of each tram
tram_seeds = seeds[1].spawn(inst.numTrams)
for t in range(inst.numTrams):
sim_log.info(f"Tram {t} will start at {inst.tram_time_arrival[t][0]}.")
Tram.numTotal += 1
create_event(inst.tram_time_arrival[t][0],1,[t,tram_seeds[t]])
# Initialize the cargo release
cargo_seeds = seeds[2].spawn(inst.numCargo)
for c in range(inst.numCargo):
sim_log.info(f"Cargo request {c} will start at {inst.cargo_release[c]}.")
create_event(inst.cargo_release[c],5,[c,cargo_seeds[c]])
# sort the event queue according to the time
event_queue = sorted(event_queue,key = lambda i: i['time'])
sim_log.info("\n-----------------------------------------------------------------------------------\n")
return passenger
def last_time_period(time,passenger):
"""
Write the log for the last period of the simulation
:param time: last period
:type time: float
:param passenger: passenger object
:type passenger: Passengers object
"""
status(time)
for t in trams:
write_entities_log(t,time)
for s in stops:
write_entities_log(s,time)
write_entities_log(passenger,time)
for c in cargo:
c.estimate_delay(time)
write_entities_log(c,time)
def status(time):
"""
Add the status of all entities to the simulation log
:param time: Time of update
:type time: float
"""
global updates
sim_log.info("\n*~* Status *~*")
for t in trams:
t.info()
if len(t.sequences) < t.stopped:
t.sequences.append( {"time": time, "cargo": t.cargosize, "passengers": t.passengers, "delay": t.delay} )
for t in stops:
t.info()
if len(t.sequences) < t.stopped:
t.sequences.append( {"time": time, "cargo": t.cargosize, "passengers": t.passengers} )
CargoRequest.info()
Passengers.info()
"""
METHODS FOR HANDLING EVENTS
---------------------------
"""
def create_event(t,event_id,par):
"""
Creating a new event given an event id and a list of parameters (if the event is within the time horizon)
:param t: time
:type t: float
:param event_id: event id
:type event_id: int
:param par: event parameters
:type par: list
"""
if np.ceil(t) < inst.numPeriods:
event_queue.append({"time": t, "id":event_id,"par":par})
def next_event():
"""
Execute the next event in the event queue
"""
# Choose the next event
event = event_queue[0]
# Extract event id and parameters
event_id = event["id"]
par = event["par"]
# Event-id: 1
# Description: Starting a new tram
if event_id == 1:
starting_tram(par[0],seed=par[1])
# Event-id: 2
# Description: Tram reaches stop (but does not enter yet)
if event_id == 2:
tram_reaches_stop(par[0])
# Event-id: 3
# Description: Tram enters stop
if event_id == 3:
tram_entering_stop(par[0])
# Event-id: 4
# Description: Tram leaves stop (and next tram can enter this stop)
if event_id == 4:
tram_leaves_stop(par[0])
# Event-id: 5
# Description: Cargo is released
if event_id == 5:
starting_cargo(par[0], seed=par[1])
# Event-id 6:
# Description: Update passengers
if event_id == 6:
passenger_update(par[0])
"""
EVENT METHODS
-----------------------------------
"""
def starting_tram(index,seed):
"""
Event no. 1: Starting a tram
:param index: Index of the tram
:type index: int
:param seed: Seed for replicability
:type seed: int
"""
global now, updates
tram_id = len(trams)
if config.random_travel_time:
config.random_travel_time = seed
# debugging
#logger.debug(f"tram_travel_deviation: {config.tram_travel_deviation}")
# if passengers and cargo share vehicles
if inst.scheme == "SV":
trams.append(Tram(
tour = inst.tram_tour[index],
capacity_passenger = inst.tram_capacity-inst.tram_capacity_min_cargo,
capacity_cargo = inst.tram_capacity-inst.tram_capacity_min_passenger,
capacity_total = inst.tram_capacity,
schedule_arrival = inst.tram_time_arrival[index],
schedule_departure = inst.tram_time_departure[index],
speed = inst.tram_speed,
# Simulation deterministic by default
random_travel_time = config.random_travel_time,
travel_deviation = config.tram_travel_deviation,
max_service = inst.tram_max_service
)
)
# if passengers and cargo have dedicated vehicles
elif inst.scheme == "SI":
if index in inst.cargo_tram_assignment:
# cargo tram
trams.append(Tram(
tour = inst.tram_tour[index],
capacity_passenger = 0,
capacity_cargo = inst.tram_capacity_cargo,
capacity_total = inst.tram_capacity,
schedule_arrival = inst.tram_time_arrival[index],
schedule_departure = inst.tram_time_departure[index],
speed = inst.tram_speed,
# Simulation deterministic by default
random_travel_time = config.random_travel_time,
travel_deviation = config.tram_travel_deviation,
max_service = inst.tram_max_service
)
)
else:
# passenger tram
trams.append(Tram(
tour = inst.tram_tour[index],
capacity_passenger = inst.tram_capacity,
capacity_cargo = 0,
capacity_total = inst.tram_capacity,
schedule_arrival = inst.tram_time_arrival[index],
schedule_departure = inst.tram_time_departure[index],
speed = inst.tram_speed,
# Simulation deterministic by default
random_travel_time = config.random_travel_time,
travel_deviation = config.tram_travel_deviation,
max_service = inst.tram_max_service
)
)
tram = trams[-1]
if tram.is_operating:
tram_reaches_stop(tram_id)
else:
updates.add(tram)
def tram_reaches_stop(tram_id):
"""
Event no. 2: Tram reaches stop. It either queues up or enters the stop.
:param tram_id: tram id
:type tram_id: int
"""
global now
tram = trams[tram_id]
tram.reach_next_location(now)
stop = stops[tram.tour[tram.position]]
if stop.check_queue(tram):
tram_entering_stop(tram_id)
else:
updates.add(tram)
def tram_entering_stop(tram_id):
"""
Event no. 3: Tram enters the platform of the stop.
:param tram_id: tram id
:type tram_id: int
"""
global now, updates
tram = trams[tram_id]
stop=stops[tram.tour[tram.position]]
tram.enter_next_stop(stop,now)
boarding_time = 0
alighting_time = 0
# Update passengers
if tram.passenger_transport:
boarding_time, alighting_time = passenger_update(stop.index,True,True)
# Compute leaving time with passengers only
leaving_time = tram.compute_leaving_time(now,boarding_time,alighting_time)
new_leaving_time = False
if tram.cargo_transport:
# unloading
tram_cargoload = copy.copy(tram.cargoload)
for c in tram_cargoload:
request = cargo[c]
if request.end_stop == stop.index:
unloading_time = request.unload(tram,stop,now)
new_leaving_time = tram.compute_leaving_time(now,unloading_time=unloading_time)
updates.add(request)
tram_cargoload.clear()
# loading
stop_cargoload = copy.copy(stop.cargoload)
for c in stop_cargoload:
request = cargo[c]
if request.assigned_vehicle == tram.index:
loading_time = request.load(tram,stop)
new_leaving_time = tram.compute_leaving_time(now,loading_time=loading_time)
updates.add(request)
stop_cargoload.clear()
updates.add(tram)
create_event(tram.leaving_time, 4, [tram_id])
return updates
def tram_leaves_stop(tram_id):
"""
Event no. 4: Tram leaves the stop.
:param tram_id: tram id
:type tram_id: int
"""
global now
tram = trams[tram_id]
stop = stops[tram.tour[tram.position]]
if tram.leaving_time == now:
travel_time = tram.leave_location(stop,now)
updates.add(tram)
updates.add(stop)
if tram.is_operating:
create_event(now + travel_time, 2, [tram_id])
next_tram = stop.next_tram_in_queue(tram)
if next_tram >= 0:
create_event(now + inst.min_time_next_tram , 3, [next_tram])
def starting_cargo(index,seed):
"""
Event no. 5: New cargo request arrives
:param index: cargo index
:type index: int
:param seed: seed for randomisation
:type seed: int
"""
global now, updates, trams
# Generate new cargo request
cargo.append(CargoRequest(
release = inst.cargo_release[index],
deadline = inst.cargo_station_deadline[index],
end_stop = inst.cargo_station_destination[index],
assigned_vehicle = inst.cargo_tram_assignment[index],
stop = stops[0],
service_time = inst.cargo_service_time_load,
service_time_unload = inst.cargo_service_time_unload,
size = inst.cargo_size,
random_service_time = seed,
)
)
request = cargo[-1]
# Check if tram is currently at platform
stop = stops[request.start_stop]
# Update the log of stop and request
updates.add(stop)
updates.add(request)
# If the assigned vehicle is currently at the depot
if stop.current_tram == request.assigned_vehicle:
# load tram
tram = trams[request.assigned_vehicle]
# update the current loading and leaving time of the tram
loading_time = request.load(tram, stop)
leaving_time = tram.compute_leaving_time(now,loading_time = loading_time)
# update the log of the tram
updates.add(tram)
# Did the leaving time change?
if leaving_time:
# -> Create a new event for leaving the stop
create_event(leaving_time, 4, [tram.index])
def passenger_update(stop_id,recent_tram_arrival = False, consider_tram=False):
"""
Event no. 6: New passengers arrive and/or alight and board a vehicle
:param stop_id: Index of the stop
:type stop_id: int
:param recent_tram_arrival: New arrival of tram (True) or update while tram is waiting (False)?, defaults to False
:type recent_tram_arrival: bool, optional
:param consider_tram: Consider boarding and alighting process (True) or only arrival (False), defaults to False
:type consider_tram: bool, optional
:return: boarding and alighting time
:rtype: tuple
"""
global now, updates
stop = stops[stop_id]
if consider_tram:
tram_id = stop.current_tram
else:
tram_id = -1
# Update arriving passengers
Passengers.arrival(now,stop)
boarding_time = 0
alighting_time = 0
# if currently a tram waits at the platform
if tram_id >= 0:
tram = trams[tram_id]
if recent_tram_arrival or tram.leaving_time != now:
if recent_tram_arrival:
# compute number and time for alighting passengers
alighting_passengers, alighting_time = Passengers.alighting(stop,tram,now)
# compute number and time for boarding passengers
boarding_passengers, boarding_time = Passengers.boarding(stop,tram,now)
if recent_tram_arrival:
# compute number and time for changing passengers
changing_passengers = Passengers.changing(stop,alighting_passengers,now)
# Update leaving time
if not recent_tram_arrival:
leaving_time = tram.compute_leaving_time(now,boarding_time,alighting_time, 0, 0)
updates.add(tram)
#write_entities_log(tram,now)
# Did the leaving time change?
if leaving_time:
create_event(leaving_time, 4, [tram_id])
#next_arrival = Passengers.compute_next_arrival_time(now,stop,tram)
#if next_arrival:
# create new event (for passengers that may arrive before the current tram leaves)
#create_event(next_arrival, 6, [stop_id])
updates.add(stop)
updates.add("passenger")
return boarding_time, alighting_time
| [
"logging.getLogger",
"modules.simulation.entities.Tram",
"modules.simulation.entities.Passengers.reset",
"modules.simulation.entities.Passengers.boarding",
"modules.simulation.entities.Passengers.alighting",
"modules.simulation.entities.Stop",
"modules.data.datamgm.initialise_logger",
"modules.simulat... | [((506, 537), 'modules.data.datamgm.initialise_logger', 'dtm.initialise_logger', (['__name__'], {}), '(__name__)\n', (527, 537), True, 'import modules.data.datamgm as dtm\n'), ((1746, 1777), 'logging.getLogger', 'logging.getLogger', (['"""simulation"""'], {}), "('simulation')\n", (1763, 1777), False, 'import logging\n'), ((1863, 1892), 'logging.getLogger', 'logging.getLogger', (['"""entities"""'], {}), "('entities')\n", (1880, 1892), False, 'import logging\n'), ((1335, 1353), 'modules.simulation.entities.Passengers.reset', 'Passengers.reset', ([], {}), '()\n', (1351, 1353), False, 'from modules.simulation.entities import Tram, Stop, Passengers, CargoRequest, write_entities_log, init_entities_log\n'), ((2674, 2703), 'modules.data.datamgm.import_instance', 'dtm.import_instance', (['instance'], {}), '(instance)\n', (2693, 2703), True, 'import modules.data.datamgm as dtm\n'), ((4872, 4891), 'modules.simulation.entities.init_entities_log', 'init_entities_log', ([], {}), '()\n', (4889, 4891), False, 'from modules.simulation.entities import Tram, Stop, Passengers, CargoRequest, write_entities_log, init_entities_log\n'), ((5355, 5389), 'modules.data.datamgm.import_instance', 'dtm.import_instance', (['passengerData'], {}), '(passengerData)\n', (5374, 5389), True, 'import modules.data.datamgm as dtm\n'), ((8672, 8707), 'modules.simulation.entities.write_entities_log', 'write_entities_log', (['passenger', 'time'], {}), '(passenger, time)\n', (8690, 8707), False, 'from modules.simulation.entities import Tram, Stop, Passengers, CargoRequest, write_entities_log, init_entities_log\n'), ((9408, 9427), 'modules.simulation.entities.CargoRequest.info', 'CargoRequest.info', ([], {}), '()\n', (9425, 9427), False, 'from modules.simulation.entities import Tram, Stop, Passengers, CargoRequest, write_entities_log, init_entities_log\n'), ((9441, 9458), 'modules.simulation.entities.Passengers.info', 'Passengers.info', ([], {}), '()\n', (9456, 9458), False, 'from modules.simulation.entities import Tram, Stop, Passengers, CargoRequest, write_entities_log, init_entities_log\n'), ((19199, 19228), 'modules.simulation.entities.Passengers.arrival', 'Passengers.arrival', (['now', 'stop'], {}), '(now, stop)\n', (19217, 19228), False, 'from modules.simulation.entities import Tram, Stop, Passengers, CargoRequest, write_entities_log, init_entities_log\n'), ((2565, 2583), 'numpy.random.SeedSequence', 'rdm.SeedSequence', ([], {}), '()\n', (2581, 2583), True, 'import numpy.random as rdm\n'), ((5734, 5754), 'numpy.nonzero', 'np.nonzero', (['arriving'], {}), '(arriving)\n', (5744, 5754), True, 'import numpy as np\n'), ((8575, 8602), 'modules.simulation.entities.write_entities_log', 'write_entities_log', (['t', 'time'], {}), '(t, time)\n', (8593, 8602), False, 'from modules.simulation.entities import Tram, Stop, Passengers, CargoRequest, write_entities_log, init_entities_log\n'), ((8636, 8663), 'modules.simulation.entities.write_entities_log', 'write_entities_log', (['s', 'time'], {}), '(s, time)\n', (8654, 8663), False, 'from modules.simulation.entities import Tram, Stop, Passengers, CargoRequest, write_entities_log, init_entities_log\n'), ((8772, 8799), 'modules.simulation.entities.write_entities_log', 'write_entities_log', (['c', 'time'], {}), '(c, time)\n', (8790, 8799), False, 'from modules.simulation.entities import Tram, Stop, Passengers, CargoRequest, write_entities_log, init_entities_log\n'), ((9852, 9862), 'numpy.ceil', 'np.ceil', (['t'], {}), '(t)\n', (9859, 9862), True, 'import numpy as np\n'), ((15133, 15158), 'copy.copy', 'copy.copy', (['tram.cargoload'], {}), '(tram.cargoload)\n', (15142, 15158), False, 'import copy\n'), ((15608, 15633), 'copy.copy', 'copy.copy', (['stop.cargoload'], {}), '(stop.cargoload)\n', (15617, 15633), False, 'import copy\n'), ((17054, 17424), 'modules.simulation.entities.CargoRequest', 'CargoRequest', ([], {'release': 'inst.cargo_release[index]', 'deadline': 'inst.cargo_station_deadline[index]', 'end_stop': 'inst.cargo_station_destination[index]', 'assigned_vehicle': 'inst.cargo_tram_assignment[index]', 'stop': 'stops[0]', 'service_time': 'inst.cargo_service_time_load', 'service_time_unload': 'inst.cargo_service_time_unload', 'size': 'inst.cargo_size', 'random_service_time': 'seed'}), '(release=inst.cargo_release[index], deadline=inst.\n cargo_station_deadline[index], end_stop=inst.cargo_station_destination[\n index], assigned_vehicle=inst.cargo_tram_assignment[index], stop=stops[\n 0], service_time=inst.cargo_service_time_load, service_time_unload=inst\n .cargo_service_time_unload, size=inst.cargo_size, random_service_time=seed)\n', (17066, 17424), False, 'from modules.simulation.entities import Tram, Stop, Passengers, CargoRequest, write_entities_log, init_entities_log\n'), ((11590, 12076), 'modules.simulation.entities.Tram', 'Tram', ([], {'tour': 'inst.tram_tour[index]', 'capacity_passenger': '(inst.tram_capacity - inst.tram_capacity_min_cargo)', 'capacity_cargo': '(inst.tram_capacity - inst.tram_capacity_min_passenger)', 'capacity_total': 'inst.tram_capacity', 'schedule_arrival': 'inst.tram_time_arrival[index]', 'schedule_departure': 'inst.tram_time_departure[index]', 'speed': 'inst.tram_speed', 'random_travel_time': 'config.random_travel_time', 'travel_deviation': 'config.tram_travel_deviation', 'max_service': 'inst.tram_max_service'}), '(tour=inst.tram_tour[index], capacity_passenger=inst.tram_capacity -\n inst.tram_capacity_min_cargo, capacity_cargo=inst.tram_capacity - inst.\n tram_capacity_min_passenger, capacity_total=inst.tram_capacity,\n schedule_arrival=inst.tram_time_arrival[index], schedule_departure=inst\n .tram_time_departure[index], speed=inst.tram_speed, random_travel_time=\n config.random_travel_time, travel_deviation=config.\n tram_travel_deviation, max_service=inst.tram_max_service)\n', (11594, 12076), False, 'from modules.simulation.entities import Tram, Stop, Passengers, CargoRequest, write_entities_log, init_entities_log\n'), ((19796, 19832), 'modules.simulation.entities.Passengers.boarding', 'Passengers.boarding', (['stop', 'tram', 'now'], {}), '(stop, tram, now)\n', (19815, 19832), False, 'from modules.simulation.entities import Tram, Stop, Passengers, CargoRequest, write_entities_log, init_entities_log\n'), ((5230, 5268), 'modules.simulation.entities.Stop', 'Stop', (['distance_to', 'distance_from', '(True)'], {}), '(distance_to, distance_from, True)\n', (5234, 5268), False, 'from modules.simulation.entities import Tram, Stop, Passengers, CargoRequest, write_entities_log, init_entities_log\n'), ((5307, 5339), 'modules.simulation.entities.Stop', 'Stop', (['distance_to', 'distance_from'], {}), '(distance_to, distance_from)\n', (5311, 5339), False, 'from modules.simulation.entities import Tram, Stop, Passengers, CargoRequest, write_entities_log, init_entities_log\n'), ((19632, 19669), 'modules.simulation.entities.Passengers.alighting', 'Passengers.alighting', (['stop', 'tram', 'now'], {}), '(stop, tram, now)\n', (19652, 19669), False, 'from modules.simulation.entities import Tram, Stop, Passengers, CargoRequest, write_entities_log, init_entities_log\n'), ((19989, 20041), 'modules.simulation.entities.Passengers.changing', 'Passengers.changing', (['stop', 'alighting_passengers', 'now'], {}), '(stop, alighting_passengers, now)\n', (20008, 20041), False, 'from modules.simulation.entities import Tram, Stop, Passengers, CargoRequest, write_entities_log, init_entities_log\n'), ((3371, 3402), 'modules.simulation.entities.write_entities_log', 'write_entities_log', (['entity', 'now'], {}), '(entity, now)\n', (3389, 3402), False, 'from modules.simulation.entities import Tram, Stop, Passengers, CargoRequest, write_entities_log, init_entities_log\n'), ((12460, 12865), 'modules.simulation.entities.Tram', 'Tram', ([], {'tour': 'inst.tram_tour[index]', 'capacity_passenger': '(0)', 'capacity_cargo': 'inst.tram_capacity_cargo', 'capacity_total': 'inst.tram_capacity', 'schedule_arrival': 'inst.tram_time_arrival[index]', 'schedule_departure': 'inst.tram_time_departure[index]', 'speed': 'inst.tram_speed', 'random_travel_time': 'config.random_travel_time', 'travel_deviation': 'config.tram_travel_deviation', 'max_service': 'inst.tram_max_service'}), '(tour=inst.tram_tour[index], capacity_passenger=0, capacity_cargo=inst.\n tram_capacity_cargo, capacity_total=inst.tram_capacity,\n schedule_arrival=inst.tram_time_arrival[index], schedule_departure=inst\n .tram_time_departure[index], speed=inst.tram_speed, random_travel_time=\n config.random_travel_time, travel_deviation=config.\n tram_travel_deviation, max_service=inst.tram_max_service)\n', (12464, 12865), False, 'from modules.simulation.entities import Tram, Stop, Passengers, CargoRequest, write_entities_log, init_entities_log\n'), ((13191, 13590), 'modules.simulation.entities.Tram', 'Tram', ([], {'tour': 'inst.tram_tour[index]', 'capacity_passenger': 'inst.tram_capacity', 'capacity_cargo': '(0)', 'capacity_total': 'inst.tram_capacity', 'schedule_arrival': 'inst.tram_time_arrival[index]', 'schedule_departure': 'inst.tram_time_departure[index]', 'speed': 'inst.tram_speed', 'random_travel_time': 'config.random_travel_time', 'travel_deviation': 'config.tram_travel_deviation', 'max_service': 'inst.tram_max_service'}), '(tour=inst.tram_tour[index], capacity_passenger=inst.tram_capacity,\n capacity_cargo=0, capacity_total=inst.tram_capacity, schedule_arrival=\n inst.tram_time_arrival[index], schedule_departure=inst.\n tram_time_departure[index], speed=inst.tram_speed, random_travel_time=\n config.random_travel_time, travel_deviation=config.\n tram_travel_deviation, max_service=inst.tram_max_service)\n', (13195, 13590), False, 'from modules.simulation.entities import Tram, Stop, Passengers, CargoRequest, write_entities_log, init_entities_log\n')] |
# -*- mode: python; encoding: utf-8 -*-
#
# Copyright 2012 <NAME>, Opera Software ASA
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import page
import htmlutils
import gitutils
import request
from page.parameters import Optional, ReviewId
class RebaseTrackingReview(page.Page):
def __init__(self):
super(RebaseTrackingReview, self).__init__("rebasetrackingreview",
{ "review": ReviewId,
"newbranch": Optional(str),
"upstream": Optional(str),
"newhead": Optional(str),
"newupstream": Optional(str) },
RebaseTrackingReview.Handler)
class Handler(page.Page.Handler):
def __init__(self, review, newbranch=None, upstream=None, newhead=None, newupstream=None):
super(RebaseTrackingReview.Handler, self).__init__(review)
self.newbranch = newbranch
self.upstream = upstream
self.newhead = newhead
self.newupstream = newupstream
def generateHeader(self):
self.document.addExternalStylesheet("resource/rebasetrackingreview.css")
self.document.addExternalScript("resource/autocomplete.js")
self.document.addExternalScript("resource/rebasetrackingreview.js")
def generateContent(self):
trackedbranch = self.review.getTrackedBranch(self.db)
if not trackedbranch:
raise request.DisplayMessage("Not supported!", "The review r/%d is not tracking a remote branch." % self.review.id)
self.document.addInternalScript(self.review.repository.getJS())
self.document.addInternalScript(self.review.getJS())
self.document.addInternalScript("var trackedbranch = { remote: %s, name: %s };"
% (htmlutils.jsify(trackedbranch.remote),
htmlutils.jsify(trackedbranch.name)))
table = page.utils.PaleYellowTable(self.body, "Rebase tracking review")
def renderRemote(target):
target.span("value", id="remote").text(trackedbranch.remote)
def renderCurrentBranch(target):
target.span("value", id="currentbranch").text("refs/heads/" + trackedbranch.name)
table.addItem("Remote", renderRemote)
table.addItem("Current branch", renderCurrentBranch)
table.addSeparator()
if self.newbranch is not None and self.upstream is not None and self.newhead is not None and self.newupstream is not None:
import log.html
import log.commitset
sha1s = self.review.repository.revlist(included=[self.newhead], excluded=[self.newupstream])
new_commits = log.commitset.CommitSet(gitutils.Commit.fromSHA1(self.db, self.review.repository, sha1) for sha1 in sha1s)
new_heads = new_commits.getHeads()
if len(new_heads) != 1:
raise page.utils.DisplayMessage("Invalid commit-set!", "Multiple heads. (This ought to be impossible...)")
new_upstreams = new_commits.getFilteredTails(self.review.repository)
if len(new_upstreams) != 1:
raise page.utils.DisplayMessage("Invalid commit-set!", "Multiple upstreams. (This ought to be impossible...)")
new_head = new_heads.pop()
new_upstream_sha1 = new_upstreams.pop()
old_commits = log.commitset.CommitSet(self.review.branch.commits)
old_upstreams = old_commits.getFilteredTails(self.review.repository)
if len(old_upstreams) != 1:
raise page.utils.DisplayMessage("Rebase not supported!", "The review has mulitple upstreams and can't be rebased.")
if len(old_upstreams) == 1 and new_upstream_sha1 in old_upstreams:
# This appears to be a history rewrite.
new_upstream = None
new_upstream_sha1 = None
rebase_type = "history"
else:
old_upstream = gitutils.Commit.fromSHA1(self.db, self.review.repository, old_upstreams.pop())
new_upstream = gitutils.Commit.fromSHA1(self.db, self.review.repository, new_upstream_sha1)
if old_upstream.isAncestorOf(new_upstream):
rebase_type = "move:ff"
else:
rebase_type = "move"
self.document.addInternalScript("var check = { rebase_type: %s, old_head_sha1: %s, new_head_sha1: %s, new_upstream_sha1: %s, new_trackedbranch: %s };"
% (htmlutils.jsify(rebase_type),
htmlutils.jsify(self.review.branch.head.sha1),
htmlutils.jsify(new_head.sha1),
htmlutils.jsify(new_upstream_sha1),
htmlutils.jsify(self.newbranch[len("refs/heads/"):])))
def renderNewBranch(target):
target.span("value", id="newbranch").text(self.newbranch)
target.text(" @ ")
target.span("value").text(new_head.sha1[:8] + " " + new_head.niceSummary())
def renderUpstream(target):
target.span("value", id="upstream").text(self.upstream)
target.text(" @ ")
target.span("value").text(new_upstream.sha1[:8] + " " + new_upstream.niceSummary())
table.addItem("New branch", renderNewBranch)
if new_upstream:
table.addItem("New upstream", renderUpstream)
table.addSeparator()
def renderMergeStatus(target):
target.a("status", id="status_merge").text("N/A")
def renderConflictsStatus(target):
target.a("status", id="status_conflicts").text("N/A")
def renderHistoryRewriteStatus(target):
target.a("status", id="status_historyrewrite").text("N/A")
table.addSection("Status")
if rebase_type == "history":
table.addItem("History rewrite", renderHistoryRewriteStatus)
else:
if rebase_type == "move:ff":
table.addItem("Merge", renderMergeStatus)
table.addItem("Conflicts", renderConflictsStatus)
def renderRebaseReview(target):
target.button(id="rebasereview", onclick="rebaseReview();", disabled="disabled").text("Rebase Review")
table.addSeparator()
table.addCentered(renderRebaseReview)
log.html.render(self.db, self.body, "Rebased commits", commits=list(new_commits))
else:
try:
from customization.branches import getRebasedBranchPattern
except ImportError:
def getRebasedBranchPattern(branch_name): return None
pattern = getRebasedBranchPattern(trackedbranch.name)
try:
from customization.branches import isRebasedBranchCandidate
except ImportError:
isRebasedBranchCandidate = None
if pattern or isRebasedBranchCandidate:
candidates = [name[len("refs/heads/"):]
for sha1, name in gitutils.Repository.lsremote(trackedbranch.remote, pattern=pattern, include_heads=True)
if name.startswith("refs/heads/")]
if isRebasedBranchCandidate is not None:
def isCandidate(name):
return isRebasedBranchCandidate(trackedbranch.name, name)
candidates = filter(isCandidate, candidates)
else:
candidates = []
if len(candidates) > 1:
def renderCandidates(target):
target.text("refs/heads/")
dropdown = target.select(id="newbranch")
for name in candidates:
dropdown.option(value=name).text(name)
table.addItem("New branch", renderCandidates,
buttons=[("Edit", "editNewBranch(this);")])
else:
if len(candidates) == 1:
default_value = candidates[0]
else:
default_value = trackedbranch.name
def renderEdit(target):
target.text("refs/heads/")
target.input(id="newbranch", value=default_value)
table.addItem("New branch", renderEdit)
def renderUpstreamInput(target):
target.input(id="upstream", value="refs/heads/master")
table.addItem("Upstream", renderUpstreamInput)
def renderFetchBranch(target):
target.button(onclick="fetchBranch();").text("Fetch Branch")
table.addSeparator()
table.addCentered(renderFetchBranch)
| [
"customization.branches.isRebasedBranchCandidate",
"gitutils.Repository.lsremote",
"page.utils.DisplayMessage",
"page.parameters.Optional",
"customization.branches.getRebasedBranchPattern",
"gitutils.Commit.fromSHA1",
"page.utils.PaleYellowTable",
"htmlutils.jsify",
"request.DisplayMessage"
] | [((2689, 2752), 'page.utils.PaleYellowTable', 'page.utils.PaleYellowTable', (['self.body', '"""Rebase tracking review"""'], {}), "(self.body, 'Rebase tracking review')\n", (2715, 2752), False, 'import page\n'), ((1019, 1032), 'page.parameters.Optional', 'Optional', (['str'], {}), '(str)\n', (1027, 1032), False, 'from page.parameters import Optional, ReviewId\n'), ((1099, 1112), 'page.parameters.Optional', 'Optional', (['str'], {}), '(str)\n', (1107, 1112), False, 'from page.parameters import Optional, ReviewId\n'), ((1178, 1191), 'page.parameters.Optional', 'Optional', (['str'], {}), '(str)\n', (1186, 1191), False, 'from page.parameters import Optional, ReviewId\n'), ((1261, 1274), 'page.parameters.Optional', 'Optional', (['str'], {}), '(str)\n', (1269, 1274), False, 'from page.parameters import Optional, ReviewId\n'), ((2153, 2267), 'request.DisplayMessage', 'request.DisplayMessage', (['"""Not supported!"""', "('The review r/%d is not tracking a remote branch.' % self.review.id)"], {}), "('Not supported!', \n 'The review r/%d is not tracking a remote branch.' % self.review.id)\n", (2175, 2267), False, 'import request\n'), ((7967, 8010), 'customization.branches.getRebasedBranchPattern', 'getRebasedBranchPattern', (['trackedbranch.name'], {}), '(trackedbranch.name)\n', (7990, 8010), False, 'from customization.branches import getRebasedBranchPattern\n'), ((3731, 3836), 'page.utils.DisplayMessage', 'page.utils.DisplayMessage', (['"""Invalid commit-set!"""', '"""Multiple heads. (This ought to be impossible...)"""'], {}), "('Invalid commit-set!',\n 'Multiple heads. (This ought to be impossible...)')\n", (3756, 3836), False, 'import page\n'), ((3988, 4097), 'page.utils.DisplayMessage', 'page.utils.DisplayMessage', (['"""Invalid commit-set!"""', '"""Multiple upstreams. (This ought to be impossible...)"""'], {}), "('Invalid commit-set!',\n 'Multiple upstreams. (This ought to be impossible...)')\n", (4013, 4097), False, 'import page\n'), ((4433, 4546), 'page.utils.DisplayMessage', 'page.utils.DisplayMessage', (['"""Rebase not supported!"""', '"""The review has mulitple upstreams and can\'t be rebased."""'], {}), '(\'Rebase not supported!\',\n "The review has mulitple upstreams and can\'t be rebased.")\n', (4458, 4546), False, 'import page\n'), ((4987, 5063), 'gitutils.Commit.fromSHA1', 'gitutils.Commit.fromSHA1', (['self.db', 'self.review.repository', 'new_upstream_sha1'], {}), '(self.db, self.review.repository, new_upstream_sha1)\n', (5011, 5063), False, 'import gitutils\n'), ((2544, 2581), 'htmlutils.jsify', 'htmlutils.jsify', (['trackedbranch.remote'], {}), '(trackedbranch.remote)\n', (2559, 2581), False, 'import htmlutils\n'), ((2630, 2665), 'htmlutils.jsify', 'htmlutils.jsify', (['trackedbranch.name'], {}), '(trackedbranch.name)\n', (2645, 2665), False, 'import htmlutils\n'), ((3530, 3593), 'gitutils.Commit.fromSHA1', 'gitutils.Commit.fromSHA1', (['self.db', 'self.review.repository', 'sha1'], {}), '(self.db, self.review.repository, sha1)\n', (3554, 3593), False, 'import gitutils\n'), ((5467, 5495), 'htmlutils.jsify', 'htmlutils.jsify', (['rebase_type'], {}), '(rebase_type)\n', (5482, 5495), False, 'import htmlutils\n'), ((5548, 5593), 'htmlutils.jsify', 'htmlutils.jsify', (['self.review.branch.head.sha1'], {}), '(self.review.branch.head.sha1)\n', (5563, 5593), False, 'import htmlutils\n'), ((5646, 5676), 'htmlutils.jsify', 'htmlutils.jsify', (['new_head.sha1'], {}), '(new_head.sha1)\n', (5661, 5676), False, 'import htmlutils\n'), ((5729, 5763), 'htmlutils.jsify', 'htmlutils.jsify', (['new_upstream_sha1'], {}), '(new_upstream_sha1)\n', (5744, 5763), False, 'import htmlutils\n'), ((8370, 8461), 'gitutils.Repository.lsremote', 'gitutils.Repository.lsremote', (['trackedbranch.remote'], {'pattern': 'pattern', 'include_heads': '(True)'}), '(trackedbranch.remote, pattern=pattern,\n include_heads=True)\n', (8398, 8461), False, 'import gitutils\n'), ((8671, 8721), 'customization.branches.isRebasedBranchCandidate', 'isRebasedBranchCandidate', (['trackedbranch.name', 'name'], {}), '(trackedbranch.name, name)\n', (8695, 8721), False, 'from customization.branches import isRebasedBranchCandidate\n')] |
#!/usr/bin/env python3
import sys
import ujson as json
import json as json_orig
import traceback
import re
import argparse
import os.path
import operator
import requests
from threading import Thread
from queue import Queue
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
parser = argparse.ArgumentParser()
parser.add_argument('-f', "--file", help='Input JSON file', required=True)
parser.add_argument('-i', "--index-of", help='Show all with Index Of /', action="store_true")
parser.add_argument('-e', "--index-of-extended", help='Extract and show directory listing', action="store_true")
parser.add_argument('-r', "--recursive", help='Recursive directory listing', action="store_true")
parser.add_argument("-c", "--cn", help='Output TLS Cert Common Names', action="store_true")
parser.add_argument('-s', "--summary", help='Output summary', action="store_true")
parser.add_argument("--no-header", help='Suppress header', action="store_true")
args = parser.parse_args()
if not args.no_header:
print("==============================================")
print("| zdata v0.33c3 - A zmap JSON Output Utility |")
print("==============================================")
file = args.file
if not os.path.isfile(file):
exit('Error: Input file not found')
regex_indexof_links_all = re.compile(r'<a href="[^\?]', re.MULTILINE)
regex_indexof_links_path = re.compile(r'<a href="([^\?]+?)"', re.MULTILINE)
line_count = 0
line_count_with_data = 0
status_codes = {}
tls_count = 0
listing_indexof = {}
listing_cn = {}
listing_directory = {}
concurrent = 200
q = Queue(concurrent * 2)
def doWork():
while True:
data = q.get()
host = data[0]
url = data[1]
content = data[2]
requests_session = requests.Session()
requests_session.headers['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'
listing_directory[host] = indexof_extended(url, content, requests_session)
q.task_done()
for i in range(concurrent):
t = Thread(target=doWork)
t.daemon = True
t.start()
def indexof_extended(url, content, sess, level=0):
global requests_session
level += 1
if level > 5:
return 'RECURSION_LIMIT'
folder = {}
folder_links = regex_indexof_links_path.findall(content)
try:
folder_links.remove('../')
except:
pass
try:
folder_links.remove('/')
except:
pass
for link in folder_links:
if link.endswith('/'):
if args.recursive:
subfolder_url = "{}{}".format(url, link)
try:
r = sess.get(subfolder_url, verify=False, timeout=2)
except:
folder[link] = 'DIRECTORY_SUBFOLDER_LOAD_ERROR'
continue
subfolder_content = r.text
if 'Index of' in subfolder_content:
folder[link] = indexof_extended(subfolder_url, subfolder_content, sess, level)
else:
folder[link] = 'DIRECTORY_WITHOUT_INDEX'
else:
folder[link] = 'DIRECTORY'
else:
folder[link] = 'FILE'
return folder
def process_entry(line):
global q, line_count, line_count_with_data, status_codes, tls_count, listing_indexof, listing_cn
line_count += 1
try:
result = json.loads(line)
except:
traceback.print_exc()
if 'data' in result:
line_count_with_data += 1
status_code = 0
try:
status_code = result['data']['http']['response']['status_code']
except KeyError as e:
pass
if status_code not in status_codes:
status_codes[status_code] = 1
else:
status_codes[status_code] += 1
cn = "n/a"
tls = False
url = "/"
try:
#host = result['data']['http']['response']['request']['host']
host = result['data']['http']['response']['request']['url']['host']
schema = result['data']['http']['response']['request']['url']['scheme']
url = "{}://{}/".format(schema, host)
if 'tls_handshake' in result['data']['http']['response']['request']:
tls_count += 1
tls = True
try:
cn = result['data']['http']['response']['request']['tls_handshake']['server_certificates']['certificate']['parsed']['subject']['common_name'][0].encode('latin-1')
except:
cn = result['data']['http']['response']['request']['tls_handshake']['server_certificates']['certificate']['parsed']['subject']['common_name'][0]
except KeyError as e:
pass
if tls and args.cn:
listing_cn[host] = cn
try:
content = result['data']['http']['response']['body']
if 'Index of /' in content:
match = regex_indexof_links_all.findall(content)
#print( "{} has index ({})".format(host, len(match)) )
listing_indexof[host] = len(match)
if args.index_of_extended:
#print('===================================================================')
#print(url)
#struct = indexof_extended(url, content)
#print( json_orig.dumps(struct, indent=4, sort_keys=True) )
q.put([host, url, content])
except KeyError as e:
pass
except:
traceback.print_exc()
with open(file) as f:
for line in f:
process_entry(line)
if args.cn:
for host in listing_cn:
print("{} -> {}".format(host, listing_cn[host]))
def print_folder_structure(structure, level=0):
level += 1
indent = ' ' * 4 * (level)
for key in structure:
value = structure[key]
if isinstance(value, dict):
print(indent + key)
print_folder_structure(value, level)
else:
print(indent + key)
if args.index_of:
sort = sorted(listing_indexof.items(), key=operator.itemgetter(1),reverse=True)
for entry in sort:
print("{} has index ({})".format(entry[0], entry[1]))
if args.index_of_extended and entry[0] in listing_directory:
#print( json_orig.dumps(listing_directory[entry[0]], indent=4, sort_keys=True) )
print_folder_structure(listing_directory[entry[0]])
if args.summary:
print("=====================================")
print("Line Count: %s" % line_count)
print("Line Count with data: %s" % line_count_with_data)
print("TLS count: %s" % tls_count)
print("=====================================")
for status_code in status_codes:
print("Status Code {: >3}: {: >8} responses".format(status_code, status_codes[status_code]))
| [
"requests.packages.urllib3.disable_warnings",
"requests.Session",
"argparse.ArgumentParser",
"re.compile",
"operator.itemgetter",
"ujson.loads",
"threading.Thread",
"queue.Queue",
"traceback.print_exc"
] | [((296, 362), 'requests.packages.urllib3.disable_warnings', 'requests.packages.urllib3.disable_warnings', (['InsecureRequestWarning'], {}), '(InsecureRequestWarning)\n', (338, 362), False, 'import requests\n'), ((373, 398), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (396, 398), False, 'import argparse\n'), ((1372, 1415), 're.compile', 're.compile', (['"""<a href="[^\\\\?]"""', 're.MULTILINE'], {}), '(\'<a href="[^\\\\?]\', re.MULTILINE)\n', (1382, 1415), False, 'import re\n'), ((1443, 1491), 're.compile', 're.compile', (['"""<a href="([^\\\\?]+?)\\""""', 're.MULTILINE'], {}), '(\'<a href="([^\\\\?]+?)"\', re.MULTILINE)\n', (1453, 1491), False, 'import re\n'), ((1648, 1669), 'queue.Queue', 'Queue', (['(concurrent * 2)'], {}), '(concurrent * 2)\n', (1653, 1669), False, 'from queue import Queue\n'), ((2158, 2179), 'threading.Thread', 'Thread', ([], {'target': 'doWork'}), '(target=doWork)\n', (2164, 2179), False, 'from threading import Thread\n'), ((1824, 1842), 'requests.Session', 'requests.Session', ([], {}), '()\n', (1840, 1842), False, 'import requests\n'), ((3341, 3357), 'ujson.loads', 'json.loads', (['line'], {}), '(line)\n', (3351, 3357), True, 'import ujson as json\n'), ((3376, 3397), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (3395, 3397), False, 'import traceback\n'), ((5838, 5860), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (5857, 5860), False, 'import operator\n'), ((5313, 5334), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (5332, 5334), False, 'import traceback\n')] |
# MIT licensed
# Copyright (c) 2020 lilydjwg <<EMAIL>>, et al.
# Copyright (c) 2017 <NAME> <<EMAIL>>, et al.
from flaky import flaky
import pytest
pytestmark = [pytest.mark.asyncio, pytest.mark.needs_net]
@flaky(max_runs=10)
async def test_debianpkg(get_version):
assert await get_version("sigrok-firmware-fx2lafw", {
"source": "debianpkg",
}) == "0.1.7-1"
@flaky(max_runs=10)
async def test_debianpkg_strip_release(get_version):
assert await get_version("sigrok-firmware-fx2lafw", {
"source": "debianpkg",
"strip_release": 1,
}) == "0.1.7"
@flaky(max_runs=10)
async def test_debianpkg_suite(get_version):
assert await get_version("sigrok-firmware-fx2lafw", {
"source": "debianpkg",
"suite": "buster",
}) == "0.1.6-1"
| [
"flaky.flaky"
] | [((208, 226), 'flaky.flaky', 'flaky', ([], {'max_runs': '(10)'}), '(max_runs=10)\n', (213, 226), False, 'from flaky import flaky\n'), ((377, 395), 'flaky.flaky', 'flaky', ([], {'max_runs': '(10)'}), '(max_runs=10)\n', (382, 395), False, 'from flaky import flaky\n'), ((586, 604), 'flaky.flaky', 'flaky', ([], {'max_runs': '(10)'}), '(max_runs=10)\n', (591, 604), False, 'from flaky import flaky\n')] |
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg as la
def PCA(dat, center=False, percentage=0.8):
M, N = dat.shape
if center:
mu = np.mean(dat,0)
dat -= mu
U, L, Vh = la.svd(dat, full_matrices=False)
V = Vh.T.conjugate()
SIGMA = np.diag(L)
X = U.dot(SIGMA)
Lam = L**2
normalized_eigenvalues = Lam/Lam.sum(dtype=float)
csum = [normalized_eigenvalues[:i+1].sum() for i in xrange(N)]
n_components = [x < percentage for x in csum].index(False) + 1
return (normalized_eigenvalues,
V[:,0:n_components],
SIGMA[0:n_components,0:n_components],
X[:,0:n_components])
def scree(normalized_eigenvalues):
fig = plt.figure()
plt.plot(normalized_eigenvalues,'b-', normalized_eigenvalues, 'bo')
plt.xlabel("Principal Components")
plt.ylabel("Percentage of Variance")
return fig
| [
"numpy.mean",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.diag",
"matplotlib.pyplot.figure",
"scipy.linalg.svd"
] | [((225, 257), 'scipy.linalg.svd', 'la.svd', (['dat'], {'full_matrices': '(False)'}), '(dat, full_matrices=False)\n', (231, 257), True, 'from scipy import linalg as la\n'), ((300, 310), 'numpy.diag', 'np.diag', (['L'], {}), '(L)\n', (307, 310), True, 'import numpy as np\n'), ((738, 750), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (748, 750), True, 'import matplotlib.pyplot as plt\n'), ((755, 823), 'matplotlib.pyplot.plot', 'plt.plot', (['normalized_eigenvalues', '"""b-"""', 'normalized_eigenvalues', '"""bo"""'], {}), "(normalized_eigenvalues, 'b-', normalized_eigenvalues, 'bo')\n", (763, 823), True, 'import matplotlib.pyplot as plt\n'), ((827, 861), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Principal Components"""'], {}), "('Principal Components')\n", (837, 861), True, 'import matplotlib.pyplot as plt\n'), ((866, 902), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Percentage of Variance"""'], {}), "('Percentage of Variance')\n", (876, 902), True, 'import matplotlib.pyplot as plt\n'), ((176, 191), 'numpy.mean', 'np.mean', (['dat', '(0)'], {}), '(dat, 0)\n', (183, 191), True, 'import numpy as np\n')] |
# --------------------------------------------------------
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
import os
import torch
import torch.nn.functional as F
import numpy as np
from core import networks
from core.utils import *
from core.loss import *
import IPython
import time
class Agent(object):
"""
A general agent class
"""
def __init__(self, num_inputs, action_space, args, name):
for key, val in args.items():
setattr(self, key, val)
self.name = name
self.device = "cuda"
self.update_step = 1
self.init_step = 1
self.action_dim = action_space.shape[0]
self.has_critic = self.name != "BC"
self.action_space = action_space
self.num_inputs = num_inputs + self.num_input_extra
self.traj_feat = None
self.latent_sample = None
self.test_mode = False
self.use_debug_latent = False
self.gaddpg_pred = 0.
if has_check(self, 'traj_goal_mutual_conditioned') :
self.num_inputs += self.policy_traj_latent_size
self.policy, self.policy_optim, self.policy_scheduler, self.policy_target = get_policy_class('GaussianPolicy', self)
def unpack_batch(
self,
state,
point_state=None,
vis=False,
gt_goal=None,
val=False,
grasp_set=None,
vis_image=False,
repeat=False,
traj_latent=None,
separate=True
):
"""
Extract features from point cloud input
"""
if type(point_state) is list or type(point_state) is np.ndarray:
point_state = torch.cuda.FloatTensor(point_state )
if type(state) is list or type(state) is np.ndarray:
state = torch.cuda.FloatTensor(state)
state_feature, network_input = self.state_feature_extractor(
point_state,
feature_2=val,
traj_latent=traj_latent,
train=not self.test_mode)
if len(state_feature) != 2 or type(state_feature) is torch.Tensor: state_feature = [state_feature, None]
return state_feature
def gaddpg_step(self, state, remain_timestep, curr_joint ):
""" use GADDPG to forward pass """
state = select_target_point(state)
gaddpg_remain_step = max(min(remain_timestep + 1, 25), 1)
return self.gaddpg.select_action(state, remain_timestep=gaddpg_remain_step, curr_joint=curr_joint)
@torch.no_grad()
def batch_select_action(
self,
state,
actions=None,
goal_state=None,
vis=False,
remain_timestep=0,
repeat=False,
curr_joint=None,
gt_traj=None,
sample_num=None
):
"""
run policy forward pass in batch simulation
"""
self.set_mode(True)
traj = None
curr_joint_th = torch.cuda.FloatTensor(curr_joint)[:, :7]
img_state = torch.cuda.FloatTensor(state[0][1])
point_state = torch.cuda.FloatTensor(state[0][0])
timestep = remain_timestep
self.timestep = timestep
agent = self
feature, extra = agent.extract_feature( img_state,
point_state,
time_batch=timestep,
goal_batch=goal_state,
vis=vis,
value=False,
train=False,
repeat=repeat,
curr_joint=curr_joint_th )
actions = agent.policy.sample(feature)
action = actions[0].detach().cpu().numpy()
extra_pred = actions[1].detach().cpu().numpy()
action_sample = actions[2].detach().cpu().numpy()
aux_pred = actions[3].detach().cpu().numpy()
return action, traj, extra_pred, aux_pred
@torch.no_grad()
def select_action(
self,
state,
actions=None,
goal_state=None,
vis=False,
remain_timestep=0,
repeat=False,
curr_joint=None,
gt_traj=None,
sample_num=None
):
"""
policy output in test time
"""
self.set_mode(True)
multi_sample = has_check(self, 'multi_traj_sample') and gt_traj is None
if multi_sample and hasattr(self, 'critic') and self.train_traj_sampler and self.critic_mpc:
return self.critic_select_action(state, remain_timestep, curr_joint, vis=vis)
if self.name == 'DQN_HRL' and gt_traj is None and vis:
return self.critic_select_action(state, remain_timestep, curr_joint, vis=vis)
curr_joint_th = torch.Tensor([curr_joint.flatten()]).float().cuda()[:, :7]
img_state = torch.cuda.FloatTensor(state[0][1])[None]
point_state = torch.cuda.FloatTensor(state[0][0])[None]
timestep = torch.cuda.FloatTensor([remain_timestep])
self.timestep = timestep
if has_check(self, 'train_traj_sampler') and gt_traj is None and has_check(self, 'train_traj_feature'):
if multi_sample: # multiple traj samples
traj = self.select_traj(img_state,
point_state.repeat((self.test_traj_num, 1, 1)),
goal_state,
vis=vis,
remain_timestep=remain_timestep,
curr_joint=curr_joint_th.repeat((self.test_traj_num, 1)))
timestep = torch.Tensor([remain_timestep]).float().cuda()
opt_idx = 0
self.traj_feat = self.traj_feat[[opt_idx]]
else:
traj = self.select_traj(img_state, point_state, goal_state,
vis=vis, remain_timestep=remain_timestep,
curr_joint=curr_joint_th )
else:
traj = None
# policy
feature, extra = self.extract_feature( img_state,
point_state,
time_batch=timestep,
goal_batch=goal_state,
value=False,
train=False,
repeat=repeat,
curr_joint=curr_joint_th[:,:7] )
if self.name == 'DQN_HRL' and vis and hasattr(self, 'sampler_traj_feat'):
self.compute_critic_value( img_state, point_state, timestep, curr_joint_th, goal_state)
actions = self.policy.sample(feature)
action = actions[0].detach().cpu().numpy()[0]
extra_pred = actions[1].detach().cpu().numpy()[0]
action_sample = actions[2].detach().cpu().numpy()[0]
aux_pred = actions[3].detach().cpu().numpy()[0]
return action, traj, extra_pred, aux_pred
def update_parameters(self, batch_data, updates, k):
"""
To be inherited
"""
return {}
def compute_loss(self):
"""
compute loss for policy and trajectory embedding
"""
self.policy_grasp_aux_loss = goal_pred_loss(self.aux_pred[self.target_goal_reward_mask, :7], self.target_grasp_batch[self.target_goal_reward_mask, :7] )
self.bc_loss = traj_action_loss(self, self.pi, self.traj_expert_action_batch, self.target_expert_mask)
return sum([getattr(self, name) for name in self.loss_info if name.endswith('loss') and not name.startswith('critic')])
def prepare_data(self, batch_data):
"""
load batch data dictionary and compute extra data
"""
update_step = self.update_step - self.init_step
self.loss_info = list(get_loss_info_dict().keys())
for name in self.loss_info:
setattr(self, name, torch.zeros(1, device=torch.device('cuda')))
for k, v in batch_data.items():
setattr(self, k, torch.cuda.FloatTensor(v))
self.traj_time_batch = self.traj_idx_batch[:, 1, None]
self.cont_traj_inbatch_index = self.traj_idx_batch[:, 0].cuda().long()
self.traj_feat = None
self.reward_mask = (self.return_batch > 0).view(-1)
self.expert_mask = (self.expert_flag_batch >= 1).view(-1)
self.expert_reward_mask = self.reward_mask * (self.expert_flag_batch >= 1).squeeze()
self.perturb_flag_batch = self.perturb_flag_batch.bool()
self.traj_expert_reward_mask = self.expert_reward_mask[self.cont_traj_inbatch_index]
self.train_traj_idx_batch = self.cont_traj_inbatch_index
self.sparsify_sim_traj_time_batch = self.sparsify_sim_traj_idx_batch[:, 1, None]
self.sparsify_sim_cont_traj_inbatch_index = self.sparsify_sim_traj_idx_batch[:, 0].cuda().long()
self.sparsify_sim_traj_expert_reward_mask = self.expert_reward_mask[self.sparsify_sim_cont_traj_inbatch_index]
self.goal_reward_mask = torch.ones_like(self.time_batch).bool()
self.traj_goal_reward_mask = torch.ones_like(self.traj_integer_time_batch).bool()
self.target_grasp_batch = self.traj_goal_batch[:, :7] if self.full_traj_embedding else self.goal_batch[:, :7]
self.target_goal_reward_mask = self.goal_reward_mask[self.cont_traj_inbatch_index] if self.full_traj_embedding else self.goal_reward_mask
self.target_reward_mask = self.reward_mask[self.cont_traj_inbatch_index] if self.full_traj_embedding else self.reward_mask
self.target_return = self.return_batch[self.cont_traj_inbatch_index] if self.full_traj_embedding else self.return_batch
self.target_expert_mask = self.expert_mask[self.cont_traj_inbatch_index] if self.full_traj_embedding else self.expert_mask
self.target_gaddpg_batch = (self.gaddpg_batch * self.reward_mask)
self.target_expert_reward_mask = self.traj_expert_reward_mask if self.full_traj_embedding else self.expert_reward_mask
self.next_time_batch = self.time_batch - 1
self.next_traj_time_batch = self.traj_integer_time_batch - 1
self.target_reward_batch = self.traj_reward_batch if self.full_traj_embedding else self.reward_batch
self.target_mask_batch = self.traj_mask_batch if self.full_traj_embedding else self.mask_batch
def log_stat(self):
"""
log grad and param statistics for tensorboard
"""
self.policy_grad = module_max_gradient(self.policy)
self.feat_grad = module_max_gradient(self.state_feature_extractor.module.encoder)
self.feat_param = module_max_param(self.state_feature_extractor.module.encoder)
self.val_feat_grad = module_max_gradient(self.state_feature_extractor.module.value_encoder)
self.val_feat_param = module_max_param(self.state_feature_extractor.module.value_encoder)
self.policy_param = module_max_param(self.policy)
self.reward_mask_num = self.reward_mask.float().sum()
self.max_traj_sample_len = torch.unique(self.cont_traj_inbatch_index, return_counts=True)[1].max()
self.traj_num = len(self.reward_mask)
self.train_batch_size = len(self.target_expert_reward_mask)
if hasattr(self, 'traj_feature_extractor'):
self.traj_grad = module_max_gradient(self.traj_feature_extractor)
self.traj_param = module_max_param(self.traj_feature_extractor)
if hasattr(self, 'sampler_gaussian'):
self.sampler_mean = self.sampler_gaussian[0].mean().item()
self.sampler_logsigma = self.sampler_gaussian[1].mean().item()
if self.train_traj_sampler and hasattr(self, 'sampler_traj_feat'):
self.traj_sampler_grad = module_max_gradient(self.traj_feature_sampler)
self.traj_sampler_param = module_max_param(self.traj_feature_sampler)
if self.has_critic:
self.value_mean, self.value_mean_2 = self.qf1.mean(), self.qf2.mean()
self.target_mean = self.next_q_value.mean()
self.return_mean = self.traj_return_batch.mean()
self.value_min, self.value_max = self.qf1.min(), self.qf1.max()
self.expert_reward_mask_num = self.expert_reward_mask.sum()
self.goal_reward_mask_num = self.goal_reward_mask.sum()
self.reward_mask_num = self.reward_mask.sum()
self.return_min, self.return_max = self.return_batch.min(), self.return_batch.max()
self.critic_grad = module_max_gradient(self.critic)
self.critic_param = module_max_param(self.critic)
def set_mode(self, test):
"""
set training or test mode for network
"""
self.test_mode = test
if not test:
self.state_feature_extractor.train()
self.policy.train()
if hasattr(self, "critic"):
self.critic.train()
self.critic_optim.zero_grad()
self.state_feat_val_encoder_optim.zero_grad()
if hasattr(self, 'traj_feature_extractor'):
if self.train_traj_feature and not self.fix_traj_feature:
self.traj_feature_extractor.train()
else:
self.traj_feature_extractor.eval()
if self.train_traj_sampler:
self.traj_feature_sampler.train()
else:
torch.no_grad()
self.policy.eval()
self.state_feature_extractor.eval()
if hasattr(self, "critic"): self.critic.eval()
if hasattr(self, "traj_feature_extractor"): self.traj_feature_extractor.eval()
if hasattr(self, "traj_feature_sampler"): self.traj_feature_sampler.eval()
def setup_feature_extractor(self, net_dict, test_time=False):
"""
Load networks
"""
if "traj_feature_extractor" in net_dict:
self.traj_feature_extractor = net_dict["traj_feature_extractor"]["net"]
self.traj_feature_extractor_opt = net_dict["traj_feature_extractor"]["opt"]
self.traj_feature_extractor_sch = net_dict["traj_feature_extractor"]["scheduler"]
else:
self.traj_feature_extractor = net_dict["state_feature_extractor"]["net"]
if 'traj_feature_sampler' in net_dict:
self.traj_feature_sampler = net_dict["traj_feature_sampler"]["net"]
self.traj_feature_sampler_opt = net_dict["traj_feature_sampler"]["opt"]
self.traj_feature_sampler_sch = net_dict["traj_feature_sampler"]["scheduler"]
self.state_feature_extractor = net_dict["state_feature_extractor"]["net"]
self.state_feature_extractor_optim = net_dict["state_feature_extractor"]["opt"]
self.state_feature_extractor_scheduler = net_dict["state_feature_extractor"]["scheduler"]
self.state_feat_encoder_optim = net_dict["state_feature_extractor"][ "encoder_opt" ]
self.state_feat_encoder_scheduler = net_dict["state_feature_extractor"][ "encoder_scheduler" ]
self.state_feat_val_encoder_optim = net_dict["state_feature_extractor"][ "val_encoder_opt" ]
self.state_feat_val_encoder_scheduler = net_dict["state_feature_extractor"][ "val_encoder_scheduler" ]
self.test_time = test_time
def get_mix_ratio(self, update_step):
"""
Get a mixed schedule for supervised learning and RL
"""
idx = int((self.update_step > np.array(self.mix_milestones)).sum())
mix_policy_ratio = get_valid_index(self.mix_policy_ratio_list, idx)
mix_policy_ratio = min(mix_policy_ratio, self.ddpg_coefficients[4])
mix_value_ratio = get_valid_index(self.mix_value_ratio_list, idx)
mix_value_ratio = min(mix_value_ratio, self.ddpg_coefficients[3])
return mix_value_ratio, mix_policy_ratio
def get_lr(self):
"""
Get network learning rates
"""
lrs = {
"policy_lr": self.policy_optim.param_groups[0]["lr"],
"feature_lr": self.state_feature_extractor_optim.param_groups[0]["lr"],
}
if self.train_traj_feature:
lrs["traj_feature_lr"] = self.traj_feature_extractor_opt.param_groups[0]["lr"]
if self.train_traj_sampler:
lrs["traj_sampler_lr"] = self.traj_feature_sampler_opt.param_groups[0]["lr"]
if hasattr(self, 'critic_optim'):
lrs["value_lr"] = self.critic_optim.param_groups[0]["lr"]
lrs["val_feat_lr"] = self.state_feat_val_encoder_optim.param_groups[0]["lr"]
headers = ["network", "learning rate"]
data = [(name, lr) for name, lr in lrs.items()]
return lrs
def optimize(self, loss, update_step):
"""
Backward loss and update optimizer
"""
self.state_feat_encoder_optim.zero_grad()
self.policy_optim.zero_grad()
if self.train_traj_feature:
self.traj_feature_extractor_opt.zero_grad()
if self.train_traj_sampler:
self.traj_feature_sampler_opt.zero_grad()
loss.backward(retain_graph=self.re_sampler_step)
self.policy_optim.step()
if self.train_feature:
self.state_feat_encoder_optim.step()
if self.train_traj_feature:
self.traj_feature_extractor_opt.step()
if self.train_traj_sampler:
self.traj_feature_sampler_opt.step()
def step_scheduler(self, step=None):
"""
Update network scheduler
"""
if self.train_traj_sampler:
self.traj_feature_sampler_sch.step()
if self.train_traj_feature:
self.traj_feature_extractor_sch.step()
if hasattr(self, "critic"):
self.critic_scheduler.step()
if hasattr(self, "policy"):
self.policy_scheduler.step()
if self.train_feature or self.train_value_feature:
self.state_feature_extractor_scheduler.step()
self.state_feat_encoder_scheduler.step()
if self.train_value_feature and hasattr(self, 'state_feat_val_encoder_scheduler'):
self.state_feat_val_encoder_scheduler.step()
def save_model(
self,
step,
output_dir="",
surfix="latest",
actor_path=None,
critic_path=None,
traj_feat_path=None,
state_feat_path=None,
):
"""
save model
"""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
actor_path, critic_path, traj_feat_path, traj_sampler_path, state_feat_path = get_model_path(output_dir,
self.name, self.env_name, surfix)
print("Saving models to {} and {}".format(actor_path, critic_path))
if hasattr(self, "policy"):
torch.save(
{
"net": self.policy.state_dict(),
"opt": self.policy_optim.state_dict(),
"sch": self.policy_scheduler.state_dict(),
},
actor_path,
)
if hasattr(self, "critic"):
torch.save(
{
"net": self.critic.state_dict(),
"opt": self.critic_optim.state_dict(),
"sch": self.critic_scheduler.state_dict(),
},
critic_path,
)
if hasattr(self, 'traj_feature_extractor_opt'):
torch.save(
{
"net": self.traj_feature_extractor.state_dict(),
"opt": self.traj_feature_extractor_opt.state_dict(),
"sch": self.traj_feature_extractor_sch.state_dict(),
},
traj_feat_path,
)
if hasattr(self, 'traj_feature_sampler_opt'):
torch.save(
{
"net": self.traj_feature_sampler.state_dict(),
"opt": self.traj_feature_sampler_opt.state_dict(),
"sch": self.traj_feature_sampler_sch.state_dict(),
},
traj_sampler_path,
)
torch.save(
{
"net": self.state_feature_extractor.state_dict(),
"opt": self.state_feature_extractor_optim.state_dict(),
"encoder_opt": self.state_feat_encoder_optim.state_dict(),
"sch": self.state_feature_extractor_scheduler.state_dict(),
"encoder_sch": self.state_feat_encoder_scheduler.state_dict(),
"val_encoder_opt": self.state_feat_val_encoder_optim.state_dict(),
"val_encoder_sch": self.state_feat_val_encoder_scheduler.state_dict(),
"step": step,
},
state_feat_path,
)
def load_model(
self, output_dir, surfix="latest", set_init_step=False, reinit_value_feat=False
):
"""
Load saved model
"""
actor_path, critic_path, traj_feat_path, traj_sampler_path, state_feat_path = get_model_path(output_dir,
self.name, self.env_name, surfix)
if hasattr(self, "policy") and os.path.exists(actor_path):
net_dict = torch.load(actor_path)
self.policy.load_state_dict(net_dict["net"])
self.policy_optim.load_state_dict(net_dict["opt"])
self.policy_scheduler.load_state_dict(net_dict["sch"])
if self.reinit_optim and set_init_step:
for g in self.policy_optim.param_groups:
g["lr"] = self.reinit_lr
self.policy_scheduler = torch.optim.lr_scheduler.MultiStepLR(
self.policy_optim, milestones=self.policy_milestones, gamma=0.5 )
self.policy_scheduler.initial_lr = self.reinit_lr
self.policy_scheduler.base_lrs[0] = self.reinit_lr
print("reinit policy optim")
print("load policy weight: {:.3f} from {} !!!!".format(module_max_param(self.policy), actor_path))
hard_update(self.policy_target, self.policy, self.tau)
if hasattr(self, "critic") and os.path.exists(critic_path):
net_dict = torch.load(critic_path)
self.critic.load_state_dict(net_dict["net"])
self.critic_optim.load_state_dict(net_dict["opt"])
self.critic_scheduler.load_state_dict(net_dict["sch"])
print("load critic weight: {:.3f} !!!!".format(module_max_param(self.critic)))
hard_update(self.critic_target, self.critic, self.tau)
if hasattr(self, 'traj_feature_extractor') and os.path.exists(traj_feat_path):
net_dict = torch.load(traj_feat_path)
self.traj_feature_extractor.load_state_dict(net_dict["net"], strict=False)
print('load traj feature weight: {:.3f} from {} !!!!'.format(module_max_param(self.traj_feature_extractor), traj_feat_path))
try:
self.traj_feature_extractor_opt.load_state_dict(net_dict["opt"])
self.traj_feature_extractor_sch.load_state_dict(net_dict["sch"])
except:
pass
if hasattr(self, 'train_traj_sampler') and os.path.exists(traj_sampler_path):
net_dict = torch.load(traj_sampler_path)
self.traj_feature_sampler.load_state_dict(net_dict["net"], strict=False)
print('load traj sampler weight: {:.3f} from {} !!!!'.format(module_max_param(self.traj_feature_sampler), traj_sampler_path))
try:
self.traj_feature_sampler_opt.load_state_dict(net_dict["opt"])
self.traj_feature_sampler_sch.load_state_dict(net_dict["sch"])
except:
pass
if os.path.exists(state_feat_path):
net_dict = torch.load(state_feat_path)
if has_check(self, 'reinit_feat_opt'):
self.state_feature_extractor.load_state_dict(dict([(n, p) for n, p in net_dict["net"].items() if 'value' not in n ]),strict=False)
else:
self.state_feature_extractor.load_state_dict(net_dict["net"] )
self.state_feature_extractor_optim.load_state_dict(net_dict["opt"])
self.state_feature_extractor_scheduler.load_state_dict( net_dict["sch"] )
self.state_feat_encoder_optim.load_state_dict( net_dict["encoder_opt"] )
self.state_feat_encoder_scheduler.load_state_dict( net_dict["encoder_sch"] )
if not has_check(self, 'reinit_feat_opt'):
self.state_feat_val_encoder_optim.load_state_dict(
net_dict["val_encoder_opt"] )
self.state_feat_val_encoder_scheduler.load_state_dict(
net_dict["val_encoder_sch"] )
print(
"load feature weight: {} !!!! from: {} step :{}".format(
module_max_param(self.state_feature_extractor), state_feat_path, net_dict["step"]))
self.update_step = net_dict["step"]
self.init_step = self.update_step
return self.update_step
return 0
| [
"os.path.exists",
"torch.ones_like",
"torch.unique",
"torch.cuda.FloatTensor",
"torch.optim.lr_scheduler.MultiStepLR",
"os.makedirs",
"torch.load",
"torch.Tensor",
"numpy.array",
"torch.no_grad",
"torch.device"
] | [((2639, 2654), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2652, 2654), False, 'import torch\n'), ((4199, 4214), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4212, 4214), False, 'import torch\n'), ((3116, 3151), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['state[0][1]'], {}), '(state[0][1])\n', (3138, 3151), False, 'import torch\n'), ((3174, 3209), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['state[0][0]'], {}), '(state[0][0])\n', (3196, 3209), False, 'import torch\n'), ((5202, 5243), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['[remain_timestep]'], {}), '([remain_timestep])\n', (5224, 5243), False, 'import torch\n'), ((24157, 24188), 'os.path.exists', 'os.path.exists', (['state_feat_path'], {}), '(state_feat_path)\n', (24171, 24188), False, 'import os\n'), ((1706, 1741), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['point_state'], {}), '(point_state)\n', (1728, 1741), False, 'import torch\n'), ((1824, 1853), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['state'], {}), '(state)\n', (1846, 1853), False, 'import torch\n'), ((3054, 3088), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['curr_joint'], {}), '(curr_joint)\n', (3076, 3088), False, 'import torch\n'), ((5077, 5112), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['state[0][1]'], {}), '(state[0][1])\n', (5099, 5112), False, 'import torch\n'), ((5141, 5176), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['state[0][0]'], {}), '(state[0][0])\n', (5163, 5176), False, 'import torch\n'), ((13765, 13780), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13778, 13780), False, 'import torch\n'), ((18770, 18796), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (18784, 18796), False, 'import os\n'), ((18810, 18833), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (18821, 18833), False, 'import os\n'), ((21583, 21609), 'os.path.exists', 'os.path.exists', (['actor_path'], {}), '(actor_path)\n', (21597, 21609), False, 'import os\n'), ((21634, 21656), 'torch.load', 'torch.load', (['actor_path'], {}), '(actor_path)\n', (21644, 21656), False, 'import torch\n'), ((22560, 22587), 'os.path.exists', 'os.path.exists', (['critic_path'], {}), '(critic_path)\n', (22574, 22587), False, 'import os\n'), ((22612, 22635), 'torch.load', 'torch.load', (['critic_path'], {}), '(critic_path)\n', (22622, 22635), False, 'import torch\n'), ((23039, 23069), 'os.path.exists', 'os.path.exists', (['traj_feat_path'], {}), '(traj_feat_path)\n', (23053, 23069), False, 'import os\n'), ((23094, 23120), 'torch.load', 'torch.load', (['traj_feat_path'], {}), '(traj_feat_path)\n', (23104, 23120), False, 'import torch\n'), ((23618, 23651), 'os.path.exists', 'os.path.exists', (['traj_sampler_path'], {}), '(traj_sampler_path)\n', (23632, 23651), False, 'import os\n'), ((23676, 23705), 'torch.load', 'torch.load', (['traj_sampler_path'], {}), '(traj_sampler_path)\n', (23686, 23705), False, 'import torch\n'), ((24213, 24240), 'torch.load', 'torch.load', (['state_feat_path'], {}), '(state_feat_path)\n', (24223, 24240), False, 'import torch\n'), ((8394, 8419), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['v'], {}), '(v)\n', (8416, 8419), False, 'import torch\n'), ((9385, 9417), 'torch.ones_like', 'torch.ones_like', (['self.time_batch'], {}), '(self.time_batch)\n', (9400, 9417), False, 'import torch\n'), ((9462, 9507), 'torch.ones_like', 'torch.ones_like', (['self.traj_integer_time_batch'], {}), '(self.traj_integer_time_batch)\n', (9477, 9507), False, 'import torch\n'), ((22039, 22145), 'torch.optim.lr_scheduler.MultiStepLR', 'torch.optim.lr_scheduler.MultiStepLR', (['self.policy_optim'], {'milestones': 'self.policy_milestones', 'gamma': '(0.5)'}), '(self.policy_optim, milestones=self.\n policy_milestones, gamma=0.5)\n', (22075, 22145), False, 'import torch\n'), ((11399, 11461), 'torch.unique', 'torch.unique', (['self.cont_traj_inbatch_index'], {'return_counts': '(True)'}), '(self.cont_traj_inbatch_index, return_counts=True)\n', (11411, 11461), False, 'import torch\n'), ((8301, 8321), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (8313, 8321), False, 'import torch\n'), ((15804, 15833), 'numpy.array', 'np.array', (['self.mix_milestones'], {}), '(self.mix_milestones)\n', (15812, 15833), True, 'import numpy as np\n'), ((5863, 5894), 'torch.Tensor', 'torch.Tensor', (['[remain_timestep]'], {}), '([remain_timestep])\n', (5875, 5894), False, 'import torch\n')] |
import gym
import datetime
import os
import numpy as np
from agent import DeepQAgent
def main():
env = gym.make("LunarLander-v2")
timestamp = '{:%Y-%m-%d-%H:%M}'.format(datetime.datetime.now())
o_dir = "LunarLander-v2/{}/models".format(timestamp)
if not os.path.exists(o_dir):
os.makedirs(o_dir)
nof_episodes = 500
# 8 values in [0, 1]
state_size = env.observation_space.shape[0]
# 0, 1, 2, 3
action_size = env.action_space.n
agent = DeepQAgent(state_size, action_size, model=2)
batch_size = 32
for episode in range(nof_episodes):
state = env.reset()
state = np.reshape(state, [1, state_size])
done = False
t = 0
episode_reward = 0
# Iterate over the timesteps
while not done:
env.render()
# Instruct the agent to choose an action based on the current state of the environment
# This may be a random action depending on the value of the exploration_rate(epsilon)
action = agent.act(state)
# Execute said action
next_state, reward, done, _ = env.step(action)
episode_reward += reward
next_state = np.reshape(next_state, [1, state_size])
agent.memorize(state, action, reward, next_state, done)
state = next_state
if done:
print("episode: {}/{}, time: {}, total_reward: {}"
.format(episode, nof_episodes - 1, t, episode_reward))
t += 1
if len(agent.memory) / batch_size > 1:
agent.train(batch_size)
# Save model after training
if episode % batch_size == 1:
agent.save(o_dir + "/model_" + str(episode) + ".hdf5")
if __name__ == "__main__":
main()
| [
"os.path.exists",
"numpy.reshape",
"os.makedirs",
"agent.DeepQAgent",
"datetime.datetime.now",
"gym.make"
] | [((110, 136), 'gym.make', 'gym.make', (['"""LunarLander-v2"""'], {}), "('LunarLander-v2')\n", (118, 136), False, 'import gym\n'), ((487, 531), 'agent.DeepQAgent', 'DeepQAgent', (['state_size', 'action_size'], {'model': '(2)'}), '(state_size, action_size, model=2)\n', (497, 531), False, 'from agent import DeepQAgent\n'), ((181, 204), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (202, 204), False, 'import datetime\n'), ((274, 295), 'os.path.exists', 'os.path.exists', (['o_dir'], {}), '(o_dir)\n', (288, 295), False, 'import os\n'), ((305, 323), 'os.makedirs', 'os.makedirs', (['o_dir'], {}), '(o_dir)\n', (316, 323), False, 'import os\n'), ((637, 671), 'numpy.reshape', 'np.reshape', (['state', '[1, state_size]'], {}), '(state, [1, state_size])\n', (647, 671), True, 'import numpy as np\n'), ((1212, 1251), 'numpy.reshape', 'np.reshape', (['next_state', '[1, state_size]'], {}), '(next_state, [1, state_size])\n', (1222, 1251), True, 'import numpy as np\n')] |
from io import StringIO
from typing import List
import os
import csv
import re
import pytest
from django.core.management import call_command
from django.core.management.base import CommandError
from django.core.files.temp import NamedTemporaryFile
import accounts.models
from supply_chains.management.commands.ingest_csv import (
MODEL_GOV_DEPT,
MODEL_SUPPLY_CHAIN,
MODEL_STRAT_ACTION,
MODEL_STRAT_ACTION_UPDATE,
)
from supply_chains.test.factories import (
SupplyChainFactory,
StrategicActionFactory,
StrategicActionUpdateFactory,
GovDepartmentFactory,
)
pytestmark = pytest.mark.django_db
class TestExtractCSV:
DUMP_CMD = "extract_csv"
def setup_method(self):
self.data_file = NamedTemporaryFile(suffix=".csv", delete=False)
def teardown_method(self):
os.remove(self.data_file.name)
def load_csv(self) -> List:
with open(self.data_file.name) as f:
reader = csv.DictReader(f)
rows = list(reader)
return rows
def invoke_dump(self, *args):
with StringIO() as status:
call_command(self.DUMP_CMD, *args, stdout=status)
return status.getvalue()
def test_dump_accounts_data(self):
# Arrange
trade_domian = "dosac.gov.uk"
trade_name = "DOSAC"
hmrc_domain = "hmrc.gov.uk"
hmrc_name = "HMRC"
GovDepartmentFactory(email_domains=[trade_domian], name=trade_name)
GovDepartmentFactory(email_domains=[hmrc_domain], name=hmrc_name)
# Act
self.invoke_dump(MODEL_GOV_DEPT, self.data_file.name)
rows = self.load_csv()
# Assert
assert len(rows) == 3
lookup = {x["name"]: x for x in rows}
assert (
lookup[trade_name]["name"] == trade_name
and lookup[trade_name]["email_domain_0"] == trade_domian
)
assert (
lookup[hmrc_name]["name"] == hmrc_name
and lookup[hmrc_name]["email_domain_0"] == hmrc_domain
)
def test_dump_accounts_data_multi_domain(self):
# Arrange
trade_domians = "dosac.gov.uk", "analogue.dosac.gov.uk"
trade_name = "DOSAC"
GovDepartmentFactory(email_domains=trade_domians, name=trade_name)
# Act
self.invoke_dump(MODEL_GOV_DEPT, self.data_file.name)
rows = self.load_csv()
# Assert
assert len(rows) == 2
assert all(k in rows[0] for k in ("email_domain_0", "email_domain_1"))
def test_dump_accounts_no_data(self):
# Arrange
accounts.models.GovDepartment.objects.all().delete()
# Act
self.invoke_dump(MODEL_GOV_DEPT, self.data_file.name)
# Assert
assert os.path.exists(self.data_file.name)
assert os.stat(self.data_file.name).st_size == 0
def test_dump_sc_data(self):
# Arrange
SupplyChainFactory()
# Act
self.invoke_dump(MODEL_SUPPLY_CHAIN, self.data_file.name)
rows = self.load_csv()
# Assert
assert len(rows) == 1
assert re.match(f"Product ", rows[0]["name"])
def test_dump_sc_data_multiple(self):
# Arrange
SupplyChainFactory.create_batch(5)
# Act
self.invoke_dump(MODEL_SUPPLY_CHAIN, self.data_file.name)
rows = self.load_csv()
# Assert
assert len(rows) == 5
names = [x["name"] for x in rows]
assert all([x.startswith("Product ") for x in names])
ids = [x["id"] for x in rows]
assert len(ids) == len(set(ids))
def test_dump_sa_data(self):
# Arrange
sc = SupplyChainFactory()
StrategicActionFactory(supply_chain=sc)
# Act
self.invoke_dump(MODEL_STRAT_ACTION, self.data_file.name)
rows = self.load_csv()
# Assert
assert len(rows) == 1
assert re.match(f"Strategic action ", rows[0]["name"])
assert rows[0]["supply_chain"] == str(sc.id)
def test_dump_sa_data_multiple(self):
# Arrange
exp_sc_ids = list()
for _ in range(4):
sc = SupplyChainFactory()
StrategicActionFactory(supply_chain=sc)
exp_sc_ids.append(str(sc.id))
# Act
self.invoke_dump(MODEL_STRAT_ACTION, self.data_file.name)
rows = self.load_csv()
# Assert
assert len(rows) == 4
ids = [x["id"] for x in rows]
assert len(ids) == len(set(ids))
sc_ids = [x["supply_chain"] for x in rows]
assert all([a == b for a, b in zip(sorted(sc_ids), sorted(exp_sc_ids))])
names = [x["name"] for x in rows]
assert all([x.startswith("Strategic action ") for x in names])
def test_dump_sau_data(self):
# Arrange
sc = SupplyChainFactory()
sa = StrategicActionFactory(supply_chain=sc)
StrategicActionUpdateFactory(supply_chain=sc, strategic_action=sa)
# Act
self.invoke_dump(MODEL_STRAT_ACTION_UPDATE, self.data_file.name)
rows = self.load_csv()
# Assert
assert len(rows) == 1
assert rows[0]["supply_chain"] == str(sc.id)
assert rows[0]["strategic_action"] == str(sa.id)
def test_dump_sau_data_multiple(self):
# Arrange
exp_sc_ids = list()
exp_sa_ids = list()
for _ in range(4):
sc = SupplyChainFactory()
sa = StrategicActionFactory(supply_chain=sc)
StrategicActionUpdateFactory(supply_chain=sc, strategic_action=sa)
exp_sc_ids.append(str(sc.id))
exp_sa_ids.append(str(sa.id))
# Act
self.invoke_dump(MODEL_STRAT_ACTION_UPDATE, self.data_file.name)
rows = self.load_csv()
# Assert
assert len(rows) == 4
ids = [x["id"] for x in rows]
assert len(ids) == len(set(ids))
sc_ids = [x["supply_chain"] for x in rows]
assert all([a == b for a, b in zip(sorted(sc_ids), sorted(exp_sc_ids))])
sa_ids = [x["strategic_action"] for x in rows]
assert all([a == b for a, b in zip(sorted(sa_ids), sorted(exp_sa_ids))])
def test_dump_inv_model(self):
# Arrange
inv_model = "hello world"
# Act
# Assert
with pytest.raises(CommandError, match=f"Unknown model {inv_model}"):
self.invoke_dump(inv_model, self.data_file.name)
| [
"os.path.exists",
"csv.DictReader",
"supply_chains.test.factories.StrategicActionFactory",
"supply_chains.test.factories.SupplyChainFactory",
"django.core.files.temp.NamedTemporaryFile",
"django.core.management.call_command",
"supply_chains.test.factories.StrategicActionUpdateFactory",
"re.match",
"... | [((734, 781), 'django.core.files.temp.NamedTemporaryFile', 'NamedTemporaryFile', ([], {'suffix': '""".csv"""', 'delete': '(False)'}), "(suffix='.csv', delete=False)\n", (752, 781), False, 'from django.core.files.temp import NamedTemporaryFile\n'), ((822, 852), 'os.remove', 'os.remove', (['self.data_file.name'], {}), '(self.data_file.name)\n', (831, 852), False, 'import os\n'), ((1388, 1455), 'supply_chains.test.factories.GovDepartmentFactory', 'GovDepartmentFactory', ([], {'email_domains': '[trade_domian]', 'name': 'trade_name'}), '(email_domains=[trade_domian], name=trade_name)\n', (1408, 1455), False, 'from supply_chains.test.factories import SupplyChainFactory, StrategicActionFactory, StrategicActionUpdateFactory, GovDepartmentFactory\n'), ((1464, 1529), 'supply_chains.test.factories.GovDepartmentFactory', 'GovDepartmentFactory', ([], {'email_domains': '[hmrc_domain]', 'name': 'hmrc_name'}), '(email_domains=[hmrc_domain], name=hmrc_name)\n', (1484, 1529), False, 'from supply_chains.test.factories import SupplyChainFactory, StrategicActionFactory, StrategicActionUpdateFactory, GovDepartmentFactory\n'), ((2199, 2265), 'supply_chains.test.factories.GovDepartmentFactory', 'GovDepartmentFactory', ([], {'email_domains': 'trade_domians', 'name': 'trade_name'}), '(email_domains=trade_domians, name=trade_name)\n', (2219, 2265), False, 'from supply_chains.test.factories import SupplyChainFactory, StrategicActionFactory, StrategicActionUpdateFactory, GovDepartmentFactory\n'), ((2732, 2767), 'os.path.exists', 'os.path.exists', (['self.data_file.name'], {}), '(self.data_file.name)\n', (2746, 2767), False, 'import os\n'), ((2885, 2905), 'supply_chains.test.factories.SupplyChainFactory', 'SupplyChainFactory', ([], {}), '()\n', (2903, 2905), False, 'from supply_chains.test.factories import SupplyChainFactory, StrategicActionFactory, StrategicActionUpdateFactory, GovDepartmentFactory\n'), ((3081, 3119), 're.match', 're.match', (['f"""Product """', "rows[0]['name']"], {}), "(f'Product ', rows[0]['name'])\n", (3089, 3119), False, 'import re\n'), ((3189, 3223), 'supply_chains.test.factories.SupplyChainFactory.create_batch', 'SupplyChainFactory.create_batch', (['(5)'], {}), '(5)\n', (3220, 3223), False, 'from supply_chains.test.factories import SupplyChainFactory, StrategicActionFactory, StrategicActionUpdateFactory, GovDepartmentFactory\n'), ((3634, 3654), 'supply_chains.test.factories.SupplyChainFactory', 'SupplyChainFactory', ([], {}), '()\n', (3652, 3654), False, 'from supply_chains.test.factories import SupplyChainFactory, StrategicActionFactory, StrategicActionUpdateFactory, GovDepartmentFactory\n'), ((3663, 3702), 'supply_chains.test.factories.StrategicActionFactory', 'StrategicActionFactory', ([], {'supply_chain': 'sc'}), '(supply_chain=sc)\n', (3685, 3702), False, 'from supply_chains.test.factories import SupplyChainFactory, StrategicActionFactory, StrategicActionUpdateFactory, GovDepartmentFactory\n'), ((3878, 3925), 're.match', 're.match', (['f"""Strategic action """', "rows[0]['name']"], {}), "(f'Strategic action ', rows[0]['name'])\n", (3886, 3925), False, 'import re\n'), ((4780, 4800), 'supply_chains.test.factories.SupplyChainFactory', 'SupplyChainFactory', ([], {}), '()\n', (4798, 4800), False, 'from supply_chains.test.factories import SupplyChainFactory, StrategicActionFactory, StrategicActionUpdateFactory, GovDepartmentFactory\n'), ((4814, 4853), 'supply_chains.test.factories.StrategicActionFactory', 'StrategicActionFactory', ([], {'supply_chain': 'sc'}), '(supply_chain=sc)\n', (4836, 4853), False, 'from supply_chains.test.factories import SupplyChainFactory, StrategicActionFactory, StrategicActionUpdateFactory, GovDepartmentFactory\n'), ((4862, 4928), 'supply_chains.test.factories.StrategicActionUpdateFactory', 'StrategicActionUpdateFactory', ([], {'supply_chain': 'sc', 'strategic_action': 'sa'}), '(supply_chain=sc, strategic_action=sa)\n', (4890, 4928), False, 'from supply_chains.test.factories import SupplyChainFactory, StrategicActionFactory, StrategicActionUpdateFactory, GovDepartmentFactory\n'), ((952, 969), 'csv.DictReader', 'csv.DictReader', (['f'], {}), '(f)\n', (966, 969), False, 'import csv\n'), ((1071, 1081), 'io.StringIO', 'StringIO', ([], {}), '()\n', (1079, 1081), False, 'from io import StringIO\n'), ((1105, 1154), 'django.core.management.call_command', 'call_command', (['self.DUMP_CMD', '*args'], {'stdout': 'status'}), '(self.DUMP_CMD, *args, stdout=status)\n', (1117, 1154), False, 'from django.core.management import call_command\n'), ((4112, 4132), 'supply_chains.test.factories.SupplyChainFactory', 'SupplyChainFactory', ([], {}), '()\n', (4130, 4132), False, 'from supply_chains.test.factories import SupplyChainFactory, StrategicActionFactory, StrategicActionUpdateFactory, GovDepartmentFactory\n'), ((4145, 4184), 'supply_chains.test.factories.StrategicActionFactory', 'StrategicActionFactory', ([], {'supply_chain': 'sc'}), '(supply_chain=sc)\n', (4167, 4184), False, 'from supply_chains.test.factories import SupplyChainFactory, StrategicActionFactory, StrategicActionUpdateFactory, GovDepartmentFactory\n'), ((5368, 5388), 'supply_chains.test.factories.SupplyChainFactory', 'SupplyChainFactory', ([], {}), '()\n', (5386, 5388), False, 'from supply_chains.test.factories import SupplyChainFactory, StrategicActionFactory, StrategicActionUpdateFactory, GovDepartmentFactory\n'), ((5406, 5445), 'supply_chains.test.factories.StrategicActionFactory', 'StrategicActionFactory', ([], {'supply_chain': 'sc'}), '(supply_chain=sc)\n', (5428, 5445), False, 'from supply_chains.test.factories import SupplyChainFactory, StrategicActionFactory, StrategicActionUpdateFactory, GovDepartmentFactory\n'), ((5458, 5524), 'supply_chains.test.factories.StrategicActionUpdateFactory', 'StrategicActionUpdateFactory', ([], {'supply_chain': 'sc', 'strategic_action': 'sa'}), '(supply_chain=sc, strategic_action=sa)\n', (5486, 5524), False, 'from supply_chains.test.factories import SupplyChainFactory, StrategicActionFactory, StrategicActionUpdateFactory, GovDepartmentFactory\n'), ((6259, 6322), 'pytest.raises', 'pytest.raises', (['CommandError'], {'match': 'f"""Unknown model {inv_model}"""'}), "(CommandError, match=f'Unknown model {inv_model}')\n", (6272, 6322), False, 'import pytest\n'), ((2783, 2811), 'os.stat', 'os.stat', (['self.data_file.name'], {}), '(self.data_file.name)\n', (2790, 2811), False, 'import os\n')] |
from bootstrap.lib.options import Options
from bootstrap.lib.logger import Logger
from .extract_engine import ExtractEngine
from .predict_engine import PredictEngine
def factory():
if Options()['engine']['name'] == 'extract':
engine = ExtractEngine()
elif Options()['engine']['name'] == 'predict':
opt = Options()['engine']
engine = PredictEngine(vid_id=opt.get('vid_id', None))
else:
raise ValueError
return engine | [
"bootstrap.lib.options.Options"
] | [((195, 204), 'bootstrap.lib.options.Options', 'Options', ([], {}), '()\n', (202, 204), False, 'from bootstrap.lib.options import Options\n'), ((338, 347), 'bootstrap.lib.options.Options', 'Options', ([], {}), '()\n', (345, 347), False, 'from bootstrap.lib.options import Options\n'), ((281, 290), 'bootstrap.lib.options.Options', 'Options', ([], {}), '()\n', (288, 290), False, 'from bootstrap.lib.options import Options\n')] |
#!/usr/bin/python3
## Tommy
from botbase import *
_aachen_c = re.compile(r"eit Ende Februar 2020 (?:wurden beim Robert.Koch.Institut \(RKI\) )?insgesamt ([0-9.]+)")
_aachen_d = re.compile(r"Die Zahl der gemeldeten Todesfälle liegt bei ([0-9.]+)")
_aachen_a = re.compile(r"Aktuell sind ([0-9.]+) Menschen nachgewiesen")
def aachen(sheets):
import locale
locale.setlocale(locale.LC_TIME, "de_DE.UTF-8")
soup = get_soup("https://www.aachen.de/DE/stadt_buerger/notfall_informationen/corona/aktuelles/index.html")
header = next(p.get_text() for p in soup.find_all(["p","h2"]) if "Zahlen zum Infektionsge" in p.get_text())
if not today().strftime("%e. %B %Y") in header: raise NotYetAvailableException("Aachen noch alt: " + header[24:])
content = soup.get_text()
c = force_int(_aachen_c.search(content).group(1))
d = force_int(_aachen_d.search(content).group(1))
g = (c - d - force_int(_aachen_a.search(content).group(1))) if _aachen_a.search(content) else None
com = "Bot ohne G" if g is None else "Bot"
update(sheets, 5334, c=c, d=d, g=g, sig="Bot", comment=com, ignore_delta=True)
return True
schedule.append(Task(10, 15, 17, 50, 600, aachen, 5334))
if __name__ == '__main__': aachen(googlesheets())
| [
"locale.setlocale"
] | [((363, 410), 'locale.setlocale', 'locale.setlocale', (['locale.LC_TIME', '"""de_DE.UTF-8"""'], {}), "(locale.LC_TIME, 'de_DE.UTF-8')\n", (379, 410), False, 'import locale\n')] |
from mach_utils import *
import logging
from argparse import ArgumentParser
from fc_network import FCNetwork
import tqdm
from dataset import XCDataset,XCDataset_massive
import json
from typing import Dict, List
from trim_labels import get_discard_set
from xclib.evaluation import xc_metrics
from xclib.data import data_utils
from torchnet import meter
import time
def get_args():
p = ArgumentParser()
p.add_argument("--model", '-m', dest = "model", type = str, required = True,
help = "Path to the model config yaml file.")
p.add_argument("--dataset", '-d', dest = "dataset", type = str, required = True,
help = "Path to the data config yaml file.")
p.add_argument("--gpus", '-g', dest = "gpus", type = str, required = False, default = "0",
help = "A string that specifies which GPU you want to use, split by comma. Eg 0,1")
p.add_argument("--cost", '-c', dest = "cost", type = str, required = False, default = '',
help = "Use cost-sensitive model or not. Should be in [hashed, original]. "
"Default empty string, which indicates that no cost-sensitive is used.")
p.add_argument("--type", '-t', dest = "type", type = str, required = False, default = "all",
help = """Evaluation type. Should be 'all'(default) and/or 'trim_eval', split by comma. Eg. 'all,trim_eval'. If it is 'trim_eval', the rate parameter should be specified.
'all': Evaluate normally. If the 'trimmed' field in data config file is true, the code will automatically map the rest of the labels back to the orginal ones.
'trim_eval': Trim labels when evaluating. The scores with tail labels will be set to 0 in order not to predict these ones. This checks how much tail labels affect final evaluation metrics. Plus it will evaluate average precision on tail and head labels only.
""")
p.add_argument("--rate", '-r', dest = "rate", type = str, required = False, default = "0.1",
help = """If evaluation needs trimming, this parameter specifies how many labels will be trimmed, decided by cumsum.
Should be a string containing trimming rates split by comma. Eg '0.1,0.2'. Default '0.1'.""")
p.add_argument("--batch_size", '-bs', dest = "bs", type = int, required = False, default = "32",
help = """Evaluation batch size.""")
return p.parse_args()
def get_inv_hash(counts, inv_mapping, j):
"""
:param counts:
:param inv_mapping:
:param j: \in [0,b), the index we want to map back. Can be a tensor
:return:
"""
labels = inv_mapping[counts[j]: counts[j + 1]]
return labels
def single_rep(data_cfg, model_cfg, r):
# load ground truth
a.__dict__['rep'] = r
model_dir = get_model_dir(data_cfg, model_cfg, a)
# load mapping
counts, label_mapping, inv_mapping = get_label_hash(label_path, r)
label_mapping = torch.from_numpy(label_mapping)
# load models
best_param = os.path.join(model_dir, model_cfg["best_file"])
preload_path = model_cfg["pretrained"] if model_cfg["pretrained"] else best_param
if os.path.exists(preload_path):
meta_info = torch.load(preload_path)
model.load_state_dict(meta_info['model'])
else:
raise FileNotFoundError(
"Model {} does not exist.".format(preload_path))
# predict. gt: original label. p: hashed.
gt, p, _, _ = compute_scores(model, test_loader)
return gt, p[:, label_mapping]
def map_trimmed_back(scores, data_dir, prefix, ori_labels):
mapping_file = os.path.join(data_dir, prefix + "_meta.json")
with open(mapping_file, 'r') as f:
trim_mapping: Dict = json.load(f)
reverse_mapping = {v[0]: int(k) for k, v in trim_mapping.items()}
reverse_mapping_tensor = torch.tensor(
[reverse_mapping[k] for k in sorted(reverse_mapping.keys())])
num_ins = scores.shape[0]
ori_scores = np.zeros([num_ins, ori_labels])
ori_scores[:, reverse_mapping_tensor] = scores
scores = ori_scores
return scores
def sanity_check(a):
assert a.type in ['all', 'trim_eval', 'only_tail']
if __name__ == "__main__":
a = get_args()
gpus = [int(i) for i in a.gpus.split(",")]
data_cfg = get_config(a.dataset)
model_cfg = get_config(a.model)
log_file = data_cfg['prefix'] + "_eval.log"
model_dir = os.path.join(model_cfg["model_dir"], data_cfg["prefix"])
logging.basicConfig(level = logging.INFO,
format = '%(asctime)s %(levelname)-8s %(message)s', datefmt = '%Y-%m-%d %H:%M:%S',
handlers = [
logging.FileHandler(
os.path.join(model_dir, log_file)),
logging.StreamHandler()
])
cuda = torch.cuda.is_available()
R = model_cfg['r']
b = model_cfg['b']
num_labels = data_cfg["num_labels"]
ori_dim = data_cfg['ori_dim']
dest_dim = model_cfg['dest_dim']
name = data_cfg['name']
prefix = data_cfg['prefix']
record_dir = data_cfg["record_dir"]
data_dir = os.path.join("data", name)
K = model_cfg['at_k']
feat_path = os.path.join(record_dir, "_".join([prefix, str(ori_dim), str(dest_dim)]))
# load dataset
test_file = os.path.join(data_dir, prefix + "_test.txt")
# this will take a lot of space!!!!!!
test_set = XCDataset_massive(test_file, 0, data_cfg, model_cfg, 'te')
# test_sets = [XCDataset(test_file, r, data_cfg, model_cfg, 'te') for r in range(R)]
test_loader = torch.utils.data.DataLoader(
test_set, batch_size = a.bs)
# construct model
layers = [dest_dim] + model_cfg['hidden'] + [b]
model = FCNetwork(layers)
model = torch.nn.DataParallel(model, device_ids=gpus)
if cuda:
model = model.cuda()
label_path = os.path.join(record_dir, "_".join(
[prefix, str(num_labels), str(b), str(R)])) # Bibtex_159_100_32
pred_avg_meter = AverageMeter()
gt = None
logging.info("Evaluating config %s" % (a.model))
logging.info("Dataset config %s" % (a.dataset))
if a.cost:
logging.info("Evaluating cost-sensitive method: %s" % (a.cost))
# get inverse propensity
_, labels, _, _, _ = data_utils.read_data(test_file)
inv_propen = xc_metrics.compute_inv_propesity(labels, model_cfg["ps_A"], model_cfg["ps_B"])
gts = []
scaled_eval_flags = []
eval_flags = []
ps_eval_flags = []
map_meter = meter.mAPMeter()
for i, data in enumerate(tqdm.tqdm(test_loader)):
print(i, 'th data')
pred_avg_meter = AverageMeter()
X, gt = data
bs = X.shape[0]
for r in range(R):
print("REP", r, end = '\t')
x = X
feat_mapping = get_feat_hash(feat_path, r)
if model_cfg['is_feat_hash']:
x = x.coalesce()
ind = x.indices()
v = x.values()
ind[1] = torch.from_numpy(feat_mapping[ind[1]])
x = torch.sparse_coo_tensor(ind, values = v, size = (bs, dest_dim))
else:
pass
x = x.to_dense()
if cuda:
x = x.cuda()
# load model
a.__dict__['rep'] = r
model_dir = get_model_dir(data_cfg, model_cfg, a)
# load mapping
counts, label_mapping, inv_mapping = get_label_hash(label_path, r)
label_mapping = torch.from_numpy(label_mapping)
# load models
best_param = os.path.join(model_dir, model_cfg["best_file"])
preload_path = model_cfg["pretrained"] if model_cfg["pretrained"] else best_param
if os.path.exists(preload_path):
start= time.perf_counter()
if cuda:
meta_info = torch.load(preload_path)
else:
meta_info = torch.load(
preload_path, map_location=lambda storage, loc: storage)
model.load_state_dict(meta_info['model'])
end = time.perf_counter()
# logging.info("Load model time: %.3f s." % (end - start))
else:
raise FileNotFoundError(
"Model {} does not exist.".format(preload_path))
# the r_th output
start = time.perf_counter()
model.eval()
with torch.no_grad():
out = model(x)
out = torch.sigmoid(out)
out = out.detach().cpu().numpy()[:, label_mapping]
pred_avg_meter.update(out, 1)
end = time.perf_counter()
# logging.info("Single model running time: %.3f s." % (end - start))
start=time.perf_counter()
if gt.is_sparse:
gt = gt.coalesce()
gt = scipy.sparse.coo_matrix((gt.values().cpu().numpy(),
gt.indices().cpu().numpy()),
shape = (bs, num_labels))
else:
gt = scipy.sparse.coo_matrix(gt.cpu().numpy())
# only a batch of eval flags
scores = pred_avg_meter.avg
# map_meter.add(scores, gt.todense())
indices, true_labels, ps_indices, inv_psp = xc_metrics. \
_setup_metric(scores, gt, inv_propen)
eval_flag = xc_metrics._eval_flags(indices, true_labels, None)
ps_eval_flag = xc_metrics._eval_flags(ps_indices, true_labels, inv_psp)
# gts.append(gt)
scaled_eval_flag = np.multiply(inv_psp[indices], eval_flag)
eval_flags.append(eval_flag)
ps_eval_flags.append(ps_eval_flag)
scaled_eval_flags.append(scaled_eval_flag)
end = time.perf_counter()
logging.info("Eval collection time: %.3f s." % (end - start))
# eval all
# gts = np.concatenate(gts)
scaled_eval_flags = np.concatenate(scaled_eval_flags)
eval_flags = np.concatenate(eval_flags)
ps_eval_flags = np.concatenate(ps_eval_flags)
ndcg_denominator = np.cumsum(
1 / np.log2(np.arange(1, num_labels + 1) + 1))
_total_pos = np.asarray(
labels.sum(axis = 1),
dtype = np.int32)
n = ndcg_denominator[_total_pos - 1]
prec = xc_metrics._precision(eval_flags, K)
ndcg = xc_metrics._ndcg(eval_flags, n, K)
PSprec = xc_metrics._precision(scaled_eval_flags, K) / xc_metrics._precision(ps_eval_flags, K)
PSnDCG = xc_metrics._ndcg(scaled_eval_flags, n, K) / xc_metrics._ndcg(ps_eval_flags, n, K)
d = {
"prec": prec,
"ndcg": ndcg,
"psp": PSprec,
"psn": PSnDCG,
"mAP": [map_meter.value()]
}
log_eval_results(d)
# map trimmed labels back to original ones
# scores = pred_avg_meter.avg
# types = a.type.split(',')
# if 'all' in types:
# if data_cfg['trimmed']:
# # if use trim_eval or only_tail, data_cfg['trimmed'] should be false
# scores = map_trimmed_back(
# scores, data_dir, prefix, data_cfg['ori_labels'])
#
# if gt is None:
# raise Exception("You must have at least one model.")
# else:
# # Sum of avg is larger than 1 -> that is the feature, no problem
# d = evaluate_scores(gt, scores, model_cfg)
# log_eval_results(d)
#
# if 'trim_eval' in types or 'only_tail' in types:
# # find tail labels using training set.
# filepath = 'data/{n1}/{n1}_train.txt'.format(n1 = name)
# print(filepath)
# rate = [float(f) for f in a.rate.split(',')]
# discard_sets, count_np = get_discard_set(filepath, 'cumsum', rate)
# all_label_set = set(range(num_labels))
# rest_labels = [all_label_set - d for d in discard_sets]
# if 'trim_eval' in types:
# for r, dis_set, rest in zip(rate, discard_sets, rest_labels):
# logging.info(
# "Evaluate when trimming off {num_dis} labels (cumsum rate: {rate:.2f}%%, actual rate: {r2:.2f}%%)".format(
# num_dis = len(dis_set), rate = r * 100, r2 = len(dis_set) / num_labels * 100))
# dis_list = sorted(list(dis_set))
# rest_list = sorted(list(rest))
# new_score = np.copy(scores)
# new_score[:, dis_list] = 0
# log_eval_results(evaluate_scores(gt, new_score, model_cfg))
#
# # eval on head and tail labels, using original scores
# ap = APMeter()
# ap.add(scores, gt.todense())
# logging.info("AP of tail labels and head labels: %.2f, %.2f.\n" % (
# ap.value()[dis_list].mean() * 100, ap.value()[rest_list].mean() * 100))
| [
"xclib.evaluation.xc_metrics._eval_flags",
"logging.StreamHandler",
"argparse.ArgumentParser",
"xclib.data.data_utils.read_data",
"tqdm.tqdm",
"xclib.evaluation.xc_metrics._ndcg",
"time.perf_counter",
"torchnet.meter.mAPMeter",
"xclib.evaluation.xc_metrics._setup_metric",
"fc_network.FCNetwork",
... | [((390, 406), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (404, 406), False, 'from argparse import ArgumentParser\n'), ((5506, 5564), 'dataset.XCDataset_massive', 'XCDataset_massive', (['test_file', '(0)', 'data_cfg', 'model_cfg', '"""te"""'], {}), "(test_file, 0, data_cfg, model_cfg, 'te')\n", (5523, 5564), False, 'from dataset import XCDataset, XCDataset_massive\n'), ((5824, 5841), 'fc_network.FCNetwork', 'FCNetwork', (['layers'], {}), '(layers)\n', (5833, 5841), False, 'from fc_network import FCNetwork\n'), ((6126, 6172), 'logging.info', 'logging.info', (["('Evaluating config %s' % a.model)"], {}), "('Evaluating config %s' % a.model)\n", (6138, 6172), False, 'import logging\n'), ((6179, 6224), 'logging.info', 'logging.info', (["('Dataset config %s' % a.dataset)"], {}), "('Dataset config %s' % a.dataset)\n", (6191, 6224), False, 'import logging\n'), ((6378, 6409), 'xclib.data.data_utils.read_data', 'data_utils.read_data', (['test_file'], {}), '(test_file)\n', (6398, 6409), False, 'from xclib.data import data_utils\n'), ((6427, 6505), 'xclib.evaluation.xc_metrics.compute_inv_propesity', 'xc_metrics.compute_inv_propesity', (['labels', "model_cfg['ps_A']", "model_cfg['ps_B']"], {}), "(labels, model_cfg['ps_A'], model_cfg['ps_B'])\n", (6459, 6505), False, 'from xclib.evaluation import xc_metrics\n'), ((6605, 6621), 'torchnet.meter.mAPMeter', 'meter.mAPMeter', ([], {}), '()\n', (6619, 6621), False, 'from torchnet import meter\n'), ((10435, 10471), 'xclib.evaluation.xc_metrics._precision', 'xc_metrics._precision', (['eval_flags', 'K'], {}), '(eval_flags, K)\n', (10456, 10471), False, 'from xclib.evaluation import xc_metrics\n'), ((10483, 10517), 'xclib.evaluation.xc_metrics._ndcg', 'xc_metrics._ndcg', (['eval_flags', 'n', 'K'], {}), '(eval_flags, n, K)\n', (10499, 10517), False, 'from xclib.evaluation import xc_metrics\n'), ((3776, 3788), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3785, 3788), False, 'import json\n'), ((6250, 6311), 'logging.info', 'logging.info', (["('Evaluating cost-sensitive method: %s' % a.cost)"], {}), "('Evaluating cost-sensitive method: %s' % a.cost)\n", (6262, 6311), False, 'import logging\n'), ((6656, 6678), 'tqdm.tqdm', 'tqdm.tqdm', (['test_loader'], {}), '(test_loader)\n', (6665, 6678), False, 'import tqdm\n'), ((8894, 8913), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (8911, 8913), False, 'import time\n'), ((9443, 9491), 'xclib.evaluation.xc_metrics._setup_metric', 'xc_metrics._setup_metric', (['scores', 'gt', 'inv_propen'], {}), '(scores, gt, inv_propen)\n', (9467, 9491), False, 'from xclib.evaluation import xc_metrics\n'), ((9527, 9577), 'xclib.evaluation.xc_metrics._eval_flags', 'xc_metrics._eval_flags', (['indices', 'true_labels', 'None'], {}), '(indices, true_labels, None)\n', (9549, 9577), False, 'from xclib.evaluation import xc_metrics\n'), ((9601, 9657), 'xclib.evaluation.xc_metrics._eval_flags', 'xc_metrics._eval_flags', (['ps_indices', 'true_labels', 'inv_psp'], {}), '(ps_indices, true_labels, inv_psp)\n', (9623, 9657), False, 'from xclib.evaluation import xc_metrics\n'), ((9905, 9924), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (9922, 9924), False, 'import time\n'), ((9933, 9994), 'logging.info', 'logging.info', (["('Eval collection time: %.3f s.' % (end - start))"], {}), "('Eval collection time: %.3f s.' % (end - start))\n", (9945, 9994), False, 'import logging\n'), ((10536, 10579), 'xclib.evaluation.xc_metrics._precision', 'xc_metrics._precision', (['scaled_eval_flags', 'K'], {}), '(scaled_eval_flags, K)\n', (10557, 10579), False, 'from xclib.evaluation import xc_metrics\n'), ((10582, 10621), 'xclib.evaluation.xc_metrics._precision', 'xc_metrics._precision', (['ps_eval_flags', 'K'], {}), '(ps_eval_flags, K)\n', (10603, 10621), False, 'from xclib.evaluation import xc_metrics\n'), ((10635, 10676), 'xclib.evaluation.xc_metrics._ndcg', 'xc_metrics._ndcg', (['scaled_eval_flags', 'n', 'K'], {}), '(scaled_eval_flags, n, K)\n', (10651, 10676), False, 'from xclib.evaluation import xc_metrics\n'), ((10679, 10716), 'xclib.evaluation.xc_metrics._ndcg', 'xc_metrics._ndcg', (['ps_eval_flags', 'n', 'K'], {}), '(ps_eval_flags, n, K)\n', (10695, 10716), False, 'from xclib.evaluation import xc_metrics\n'), ((8491, 8510), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (8508, 8510), False, 'import time\n'), ((8766, 8785), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (8783, 8785), False, 'import time\n'), ((4856, 4879), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (4877, 4879), False, 'import logging\n'), ((7888, 7907), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (7905, 7907), False, 'import time\n'), ((8217, 8236), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (8234, 8236), False, 'import time\n')] |
import glob
import os
import sys
from sgfmill.sgfmill import sgf
import global_vars_go as gvg
import loader
import utils
import board3d as go_board
import numpy as np
kifuPath = "./kifu"
num_games = gvg.num_games
from_game = gvg.from_test_games
lb_size = 250.
correct = 0
total = 0
num_lb = int((num_games-1)/lb_size) + 1 # Number of loading batches
model = loader.load_model_from_file(gvg.nn_type)
for lb in range(num_lb):
games = []
print("Loading game data...")
i = 0
for filename in glob.glob(os.path.join(kifuPath, "*.sgf")):
load_limit = min((lb+1) * lb_size, num_games)
if from_game + (lb) * lb_size <= i < from_game + load_limit:
with open(filename, "rb") as f:
games.append(sgf.Sgf_game.from_bytes(f.read()))
i += 1
print("Done loading {} games".format(len(games)))
print("Being data processing...")
train_boards = []
train_next_moves = []
for game_index in range(len(games)):
board = go_board.setup_board(games[game_index])
for node in games[game_index].get_main_sequence():
board = go_board.switch_player_perspec(board) # Changes player perspective, black becomes white and vice versa
node_move = node.get_move()[1]
if node_move is not None:
train_boards.append(go_board.get_encoded_board(board))
next_move = np.zeros(gvg.board_size * gvg.board_size).reshape(gvg.board_size, gvg.board_size)
next_move[node_move[0], node_move[1]] = gvg.filled # y = an array in the form [board_x_position, board_y_position]
train_next_moves.append(next_move.reshape(gvg.board_size * gvg.board_size))
board = go_board.make_move(board, node_move, gvg.bot_channel, gvg.player_channel) # Update board with new move
if board is None:
print("ERROR! Illegal move, {}, while training".format(node_move))
print("Finished data processing...")
print("Begin testing...")
for i in range(len(train_boards)):
pred = np.asarray(model.predict(train_boards[i].reshape(1, gvg.board_size, gvg.board_size, gvg.enc_board_channels))) \
.reshape(gvg.board_size * gvg.board_size)
if pred.argmax() == train_next_moves[i].argmax():
correct += 1
total += 1
print("Accuracy: {}".format(correct/total))
print("Finished testing")
| [
"board3d.make_move",
"os.path.join",
"board3d.get_encoded_board",
"board3d.setup_board",
"numpy.zeros",
"board3d.switch_player_perspec",
"loader.load_model_from_file"
] | [((380, 420), 'loader.load_model_from_file', 'loader.load_model_from_file', (['gvg.nn_type'], {}), '(gvg.nn_type)\n', (407, 420), False, 'import loader\n'), ((544, 575), 'os.path.join', 'os.path.join', (['kifuPath', '"""*.sgf"""'], {}), "(kifuPath, '*.sgf')\n", (556, 575), False, 'import os\n'), ((1036, 1075), 'board3d.setup_board', 'go_board.setup_board', (['games[game_index]'], {}), '(games[game_index])\n', (1056, 1075), True, 'import board3d as go_board\n'), ((1157, 1194), 'board3d.switch_player_perspec', 'go_board.switch_player_perspec', (['board'], {}), '(board)\n', (1187, 1194), True, 'import board3d as go_board\n'), ((1780, 1853), 'board3d.make_move', 'go_board.make_move', (['board', 'node_move', 'gvg.bot_channel', 'gvg.player_channel'], {}), '(board, node_move, gvg.bot_channel, gvg.player_channel)\n', (1798, 1853), True, 'import board3d as go_board\n'), ((1382, 1415), 'board3d.get_encoded_board', 'go_board.get_encoded_board', (['board'], {}), '(board)\n', (1408, 1415), True, 'import board3d as go_board\n'), ((1446, 1487), 'numpy.zeros', 'np.zeros', (['(gvg.board_size * gvg.board_size)'], {}), '(gvg.board_size * gvg.board_size)\n', (1454, 1487), True, 'import numpy as np\n')] |
from typing import cast
import pandas as pd
import os
import numpy as np
import glob
from sklearn.model_selection import train_test_split
def prepare_data(random_state,path):
new_df = []
path1 = path
#path1 = ['C:\CassFlipkratScrappingProject\S1_Dataset','C:\CassFlipkratScrappingProject\S2_Dataset']
var = []
j = 0
tst1 = []
tst3 = pd.DataFrame()
for path2 in path1:
A = (os.listdir(path2))
for i in A:
j = j + 1
#print(i)
if i != 'README.txt':
#tst1.append(['a','b','c','d','e','f','g','h','i'])
cols = ['Time', 'Acceler_Front', 'Acceler_Vert', 'Acceler_later', 'Id_sensor', 'RSSI', 'Phase', 'Frequency', 'Label']
tst1.append(pd.read_csv(os.path.join(path2,i),sep=',',names=cols))
#print(tst1)
#print(tst1.isnull().sum())
#print(tst1.head(5))
A = pd.concat(tst1,ignore_index=False)
df = pd.DataFrame(A)
df.reset_index(inplace=True)
df.drop(columns=['index'],inplace=True)
df.Id_sensor = df.Id_sensor.astype('float64')
data = df.copy()
X = data.iloc[:,:-1]
y = data.Label
x_train , x_test, y_train,y_test = train_test_split(X,y,random_state = random_state)
return x_train , x_test, y_train,y_test
| [
"os.listdir",
"sklearn.model_selection.train_test_split",
"os.path.join",
"pandas.DataFrame",
"pandas.concat"
] | [((364, 378), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (376, 378), True, 'import pandas as pd\n'), ((910, 945), 'pandas.concat', 'pd.concat', (['tst1'], {'ignore_index': '(False)'}), '(tst1, ignore_index=False)\n', (919, 945), True, 'import pandas as pd\n'), ((954, 969), 'pandas.DataFrame', 'pd.DataFrame', (['A'], {}), '(A)\n', (966, 969), True, 'import pandas as pd\n'), ((1205, 1254), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'random_state': 'random_state'}), '(X, y, random_state=random_state)\n', (1221, 1254), False, 'from sklearn.model_selection import train_test_split\n'), ((417, 434), 'os.listdir', 'os.listdir', (['path2'], {}), '(path2)\n', (427, 434), False, 'import os\n'), ((777, 799), 'os.path.join', 'os.path.join', (['path2', 'i'], {}), '(path2, i)\n', (789, 799), False, 'import os\n')] |
import os
import numpy as np
from allennlp.predictors import Predictor
from isanlp.annotation_rst import DiscourseUnit
from symbol_map import SYMBOL_MAP
class AllenNLPSegmenter:
def __init__(self, model_dir_path, cuda_device=-1):
self._model_path = os.path.join(model_dir_path, 'segmenter_neural', 'model.tar.gz')
self._cuda_device = cuda_device
self.predictor = Predictor.from_path(self._model_path, cuda_device=self._cuda_device)
self._separator = 'U-S'
self._symbol_map = SYMBOL_MAP
def __call__(self, annot_text, annot_tokens, annot_sentences, annot_lemma, annot_postag, annot_synt_dep_tree,
start_id=0):
return self._build_discourse_units(annot_text, annot_tokens,
self._predict(annot_tokens, annot_sentences), start_id)
def _predict(self, tokens, sentences):
"""
:return: numbers of tokens predicted as EDU left boundaries
"""
_sentences = []
for sentence in sentences:
text = ' '.join([self._prepare_token(token.text) for token in tokens[sentence.begin:sentence.end]]).strip()
if text:
_sentences.append(text)
predictions = self.predictor.predict_batch_json([{'sentence': sentence} for sentence in _sentences])
result = []
for i, prediction in enumerate(predictions):
pred = np.array(prediction['tags'][:sentences[i].end - sentences[i].begin]) == self._separator
# The first token in a sentence is a separator
# if it is not a point in a list
if len(pred) > 0:
if i > 0:
if predictions[i - 1]['words'][1] == '.' and predictions[i - 1]['words'][0] in "0123456789":
pred[0] = False
else:
pred[0] = True
# No single-token EDUs
for j, token in enumerate(pred[:-1]):
if token and pred[j + 1]:
if j == 0:
pred[j + 1] = False
else:
pred[j] = False
result += list(pred)
return np.argwhere(np.array(result) == True)[:, 0]
def _build_discourse_units(self, text, tokens, numbers, start_id):
"""
:param text: original text
:param list tokens: isanlp.annotation.Token
:param numbers: positions of tokens predicted as EDU left boundaries (beginners)
:return: list of DiscourseUnit
"""
edus = []
if numbers.shape[0]:
for i in range(0, len(numbers) - 1):
new_edu = DiscourseUnit(start_id + i,
start=tokens[numbers[i]].begin,
end=tokens[numbers[i + 1]].begin - 1,
text=text[tokens[numbers[i]].begin:tokens[numbers[i + 1]].begin],
relation='elementary',
nuclearity='_')
edus.append(new_edu)
if numbers.shape[0] == 1:
i = -1
new_edu = DiscourseUnit(start_id + i + 1,
start=tokens[numbers[-1]].begin,
end=tokens[-1].end,
text=text[tokens[numbers[-1]].begin:tokens[-1].end],
relation='elementary',
nuclearity='_')
edus.append(new_edu)
return edus
def _prepare_token(self, token):
for key, value in self._symbol_map.items():
token = token.replace(key, value)
for keyword in ['www', 'http']:
if keyword in token:
return '_html_'
return token
| [
"allennlp.predictors.Predictor.from_path",
"numpy.array",
"os.path.join",
"isanlp.annotation_rst.DiscourseUnit"
] | [((265, 329), 'os.path.join', 'os.path.join', (['model_dir_path', '"""segmenter_neural"""', '"""model.tar.gz"""'], {}), "(model_dir_path, 'segmenter_neural', 'model.tar.gz')\n", (277, 329), False, 'import os\n'), ((395, 463), 'allennlp.predictors.Predictor.from_path', 'Predictor.from_path', (['self._model_path'], {'cuda_device': 'self._cuda_device'}), '(self._model_path, cuda_device=self._cuda_device)\n', (414, 463), False, 'from allennlp.predictors import Predictor\n'), ((3207, 3393), 'isanlp.annotation_rst.DiscourseUnit', 'DiscourseUnit', (['(start_id + i + 1)'], {'start': 'tokens[numbers[-1]].begin', 'end': 'tokens[-1].end', 'text': 'text[tokens[numbers[-1]].begin:tokens[-1].end]', 'relation': '"""elementary"""', 'nuclearity': '"""_"""'}), "(start_id + i + 1, start=tokens[numbers[-1]].begin, end=tokens\n [-1].end, text=text[tokens[numbers[-1]].begin:tokens[-1].end], relation\n ='elementary', nuclearity='_')\n", (3220, 3393), False, 'from isanlp.annotation_rst import DiscourseUnit\n'), ((1425, 1493), 'numpy.array', 'np.array', (["prediction['tags'][:sentences[i].end - sentences[i].begin]"], {}), "(prediction['tags'][:sentences[i].end - sentences[i].begin])\n", (1433, 1493), True, 'import numpy as np\n'), ((2682, 2894), 'isanlp.annotation_rst.DiscourseUnit', 'DiscourseUnit', (['(start_id + i)'], {'start': 'tokens[numbers[i]].begin', 'end': '(tokens[numbers[i + 1]].begin - 1)', 'text': 'text[tokens[numbers[i]].begin:tokens[numbers[i + 1]].begin]', 'relation': '"""elementary"""', 'nuclearity': '"""_"""'}), "(start_id + i, start=tokens[numbers[i]].begin, end=tokens[\n numbers[i + 1]].begin - 1, text=text[tokens[numbers[i]].begin:tokens[\n numbers[i + 1]].begin], relation='elementary', nuclearity='_')\n", (2695, 2894), False, 'from isanlp.annotation_rst import DiscourseUnit\n'), ((2215, 2231), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (2223, 2231), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from nailgun import consts
from nailgun.test.base import EnvironmentManager
from nailgun.test.performance import base
class NodeGroupOperationsLoadTest(base.BaseUnitLoadTestCase):
@classmethod
def setUpClass(cls):
super(NodeGroupOperationsLoadTest, cls).setUpClass()
cls.env = EnvironmentManager(app=cls.app, session=cls.db)
cls.env.upload_fixtures(cls.fixtures)
cls.cluster = cls.env.create_cluster(
api=False,
net_provider=consts.CLUSTER_NET_PROVIDERS.neutron,
net_segment_type=consts.NEUTRON_SEGMENT_TYPES.gre,
)
cls.group = cls.env.create_node_group()
cls.env.create_nodes(cls.NODES_NUM, cluster_id=cls.cluster['id'])
@base.evaluate_unit_performance
def test_node_group_collection_retrieve(self):
func = functools.partial(
self.get_handler,
'NodeGroupCollectionHandler',
)
self.check_time_exec(func)
@base.evaluate_unit_performance
def test_node_group_collection_create(self):
func = functools.partial(
self.post_handler,
'NodeGroupCollectionHandler',
{
'cluster_id': self.cluster.id,
'name': 'test_group',
}
)
self.check_time_exec(func)
| [
"nailgun.test.base.EnvironmentManager",
"functools.partial"
] | [((957, 1004), 'nailgun.test.base.EnvironmentManager', 'EnvironmentManager', ([], {'app': 'cls.app', 'session': 'cls.db'}), '(app=cls.app, session=cls.db)\n', (975, 1004), False, 'from nailgun.test.base import EnvironmentManager\n'), ((1482, 1547), 'functools.partial', 'functools.partial', (['self.get_handler', '"""NodeGroupCollectionHandler"""'], {}), "(self.get_handler, 'NodeGroupCollectionHandler')\n", (1499, 1547), False, 'import functools\n'), ((1720, 1846), 'functools.partial', 'functools.partial', (['self.post_handler', '"""NodeGroupCollectionHandler"""', "{'cluster_id': self.cluster.id, 'name': 'test_group'}"], {}), "(self.post_handler, 'NodeGroupCollectionHandler', {\n 'cluster_id': self.cluster.id, 'name': 'test_group'})\n", (1737, 1846), False, 'import functools\n')] |
import magic
import os
import random
import string
from ahye.settings import LOCAL_UPLOADS_DIR
def generate_filename(image_data, detect_extension=True):
alphanum = string.ascii_letters + string.digits
retval = ''
while not retval or os.path.exists(os.path.join(LOCAL_UPLOADS_DIR, retval)):
retval = ''.join(random.sample(alphanum, 8))
if detect_extension:
retval += get_file_extension(image_data)
else:
retval += '.png'
return retval
def get_file_extension(image_data):
s = magic.from_buffer(image_data)
if s.startswith('JPEG'):
return '.jpg'
elif s.startswith('GIF'):
return '.gif'
elif s.startswith('PNG'):
return '.png'
def guess_file_extension(url):
""" Used by the image mirroring service """
url = url.lower()
if '.jpg' in url or '.jpeg' in url:
return '.jpg'
elif '.gif' in url:
return '.gif'
elif '.png' in url:
return '.png'
elif '.svg' in url:
return '.svg'
else:
return '.jpg'
| [
"magic.from_buffer",
"random.sample",
"os.path.join"
] | [((547, 576), 'magic.from_buffer', 'magic.from_buffer', (['image_data'], {}), '(image_data)\n', (564, 576), False, 'import magic\n'), ((263, 302), 'os.path.join', 'os.path.join', (['LOCAL_UPLOADS_DIR', 'retval'], {}), '(LOCAL_UPLOADS_DIR, retval)\n', (275, 302), False, 'import os\n'), ((331, 357), 'random.sample', 'random.sample', (['alphanum', '(8)'], {}), '(alphanum, 8)\n', (344, 357), False, 'import random\n')] |
import json
import boto3
from elasticsearch import Elasticsearch, RequestsHttpConnection
from requests_aws4auth import AWS4Auth
from boto3.dynamodb.conditions import Key
user_table = 'user-profile'
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(user_table)
cognito = boto3.client('cognito-idp')
region = 'us-east-1'
service = 'es'
host = 'search-ccfinalsearcht-jdyfz3ale3zufejmvivdts3lea.us-east-1.es.amazonaws.com'
credentials = boto3.Session().get_credentials()
awsauth = AWS4Auth(credentials.access_key, credentials.secret_key, region, service, session_token=credentials.token)
def lambda_handler(event, context):
access_token = event['headers']['access_token']
try:
resp = cognito.get_user(
AccessToken=access_token,
)
except:
return {
'statusCode': 500,
'body': json.dumps('Error in your login'),
"headers": {
"Content-Type": "application/json",
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Headers": "*"
}
}
user = {i['Name']:i['Value'] for i in resp['UserAttributes']}
user_id = user['email']
update_expression = 'set '
expression_dict = {}
event['body'] = json.loads(event['body'])
if event['body']['isRegister']:
info = {}
for k in event['body']:
if k != 'isRegister':
info[k] = event['body'][k]
table.put_item(Item = info)
else:
for i in enumerate(event['body'].items()):
idx = i[0]
k = i[1][0]
v = i[1][1]
if k == 'user_id' or k=='isRegister':
continue
update = k+'=:val'+str(idx)+", "
update_expression += update
expression_dict[":val"+str(idx)] = v
update_expression = update_expression[:-2] # delete the last ", " in the expression
response = table.update_item(
Key={
'user_id': user_id
},
UpdateExpression=update_expression,
ExpressionAttributeValues=expression_dict,
ReturnValues="UPDATED_NEW"
)
es = Elasticsearch(
hosts = [{'host': host, 'port': 443}],
http_auth = awsauth,
use_ssl = True,
verify_certs = True,
connection_class = RequestsHttpConnection
)
if event['body']["tutor"]:
if es.exists(index="tutors",id=user_id):
es.update(index='tutors',doc_type='_doc',id=user_id,
body={"doc": {"degree":event['body']["degree"],
"first_name": event['body']['first_name'], "last_name": event['body']['last_name'],
"tags": event['body']['tags'],"school":event['body']["school"],"major":event['body']["major"]}})
else:
es.index(index="tutors",doc_type="_doc",id=user_id,body={
"degree":event['body']["degree"],
"tags": event['body']['tags'],
"school":event['body']["school"],
"major":event['body']["major"],
"last_name": event['body']['last_name'],
"first_name": event['body']['first_name']
})
else:
if es.exists(index="tutors",id=user_id):
es.delete(index="tutors", id=user_id)
return {
'statusCode': 200,
'body': json.dumps("successfully update/register your account"),
"headers": {
"Content-Type": "application/json",
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Headers": "*"
}
}
| [
"json.loads",
"boto3.client",
"requests_aws4auth.AWS4Auth",
"elasticsearch.Elasticsearch",
"boto3.Session",
"json.dumps",
"boto3.resource"
] | [((210, 236), 'boto3.resource', 'boto3.resource', (['"""dynamodb"""'], {}), "('dynamodb')\n", (224, 236), False, 'import boto3\n'), ((282, 309), 'boto3.client', 'boto3.client', (['"""cognito-idp"""'], {}), "('cognito-idp')\n", (294, 309), False, 'import boto3\n'), ((489, 599), 'requests_aws4auth.AWS4Auth', 'AWS4Auth', (['credentials.access_key', 'credentials.secret_key', 'region', 'service'], {'session_token': 'credentials.token'}), '(credentials.access_key, credentials.secret_key, region, service,\n session_token=credentials.token)\n', (497, 599), False, 'from requests_aws4auth import AWS4Auth\n'), ((1255, 1280), 'json.loads', 'json.loads', (["event['body']"], {}), "(event['body'])\n", (1265, 1280), False, 'import json\n'), ((2205, 2352), 'elasticsearch.Elasticsearch', 'Elasticsearch', ([], {'hosts': "[{'host': host, 'port': 443}]", 'http_auth': 'awsauth', 'use_ssl': '(True)', 'verify_certs': '(True)', 'connection_class': 'RequestsHttpConnection'}), "(hosts=[{'host': host, 'port': 443}], http_auth=awsauth,\n use_ssl=True, verify_certs=True, connection_class=RequestsHttpConnection)\n", (2218, 2352), False, 'from elasticsearch import Elasticsearch, RequestsHttpConnection\n'), ((445, 460), 'boto3.Session', 'boto3.Session', ([], {}), '()\n', (458, 460), False, 'import boto3\n'), ((3428, 3483), 'json.dumps', 'json.dumps', (['"""successfully update/register your account"""'], {}), "('successfully update/register your account')\n", (3438, 3483), False, 'import json\n'), ((856, 889), 'json.dumps', 'json.dumps', (['"""Error in your login"""'], {}), "('Error in your login')\n", (866, 889), False, 'import json\n')] |
import crypt
import io
import json
import logging
import re
import requests
import uuid
import yaml
from flask import current_app as app
from base64 import b64encode
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.backends import default_backend
from docutils.core import publish_parts
from ipaddress import IPv4Network
from jinja2 import Environment, meta
from pygerrit2 import GerritRestAPI, HTTPBasicAuth
from requests import HTTPError
from os import urandom
from operations_api import exceptions
from operations_api.app import cache
log = logging.getLogger('operations_api')
####################################
# GET CONTEXT FROM REMOTE LOCATION #
####################################
# Custom Jinja2 filters
def subnet(subnet, host_ip):
"""
Create network object and get host by index
Example:
Context
-------
{'my_subnet': '192.168.1.0/24'}
Template
--------
{{ my_subnet|subnet(1) }}
Output
------
192.168.1.1
"""
if not subnet:
return ""
if '/' not in subnet:
subnet = str(subnet) + '/24'
try:
network = IPv4Network(str(subnet))
idx = int(host_ip) - 1
ipaddr = str(list(network.hosts())[idx])
except IndexError:
ipaddr = "Host index is out of range of available addresses"
except Exception:
ipaddr = subnet.split('/')[0]
return ipaddr
def netmask(subnet):
"""
Create network object and get netmask
Example:
Context
-------
{'my_subnet': '192.168.1.0/24'}
Template
--------
{{ my_subnet|netmask }}
Output
------
255.255.255.0
"""
if not subnet:
return ""
if '/' not in subnet:
subnet = str(subnet) + '/24'
try:
network = IPv4Network(str(subnet))
netmask = str(network.netmask)
except Exception:
netmask = "Cannot determine network mask"
return netmask
def generate_password(length):
"""
Generate password of defined length
Example:
Template
--------
{{ 32|generate_password }}
Output
------
Jda0HK9rM4UETFzZllDPbu8i2szzKbMM
"""
chars = "aAbBcCdDeEfFgGhHiIjJkKlLmMnNpPqQrRsStTuUvVwWxXyYzZ1234567890"
return "".join(chars[ord(c) % len(chars)] for c in b64encode(urandom(length)).decode('utf-8'))
def hash_password(password):
"""
Hash password
Example:
Context
-------
{'some_password': '<PASSWORD>'}
Template
--------
{{ some_password|hash_password }}
Output
------
$2b$12$HXXew12E9mN3NIXv/egSDurU.dshYQRepBoeY.6bfbOOS5IyFVIBa
"""
chars = "aAbBcCdDeEfFgGhHiIjJkKlLmMnNpPqQrRsStTuUvVwWxXyYzZ"
salt_str = "".join(chars[ord(c) % len(chars)] for c in b64encode(urandom(8)).decode('utf-8'))
salt = "$6$%s$" % salt_str
pw_hash = ''
if password:
pw_hash = crypt.crypt(password, salt)
return pw_hash
CUSTOM_FILTERS = [
('subnet', subnet),
('generate_password', generate_password),
('hash_password', hash_password),
('netmask', netmask)
]
def generate_ssh_keypair(seed=None):
if not seed:
private_key_str = ""
public_key_str = ""
else:
private_key_cache = 'private_key_' + str(seed)
public_key_cache = 'public_key_' + str(seed)
cached_private_key = cache.get(private_key_cache)
cached_public_key = cache.get(public_key_cache)
if cached_private_key and cached_public_key:
private_key_str = cached_private_key
public_key_str = cached_public_key
else:
private_key_obj = rsa.generate_private_key(
backend=default_backend(),
public_exponent=65537,
key_size=2048
)
public_key_obj = private_key_obj.public_key()
public_key = public_key_obj.public_bytes(
serialization.Encoding.OpenSSH, serialization.PublicFormat.OpenSSH)
private_key = private_key_obj.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption())
private_key_str = private_key.decode('utf-8')
public_key_str = public_key.decode('utf-8')
cache.set(private_key_cache, private_key_str, 3600)
cache.set(public_key_cache, public_key_str, 3600)
return (private_key_str, public_key_str)
def generate_uuid():
return uuid.uuid4()
CUSTOM_FUNCTIONS = [
('generate_ssh_keypair', generate_ssh_keypair),
('generate_uuid', generate_uuid)
]
DOCUTILS_RENDERER_SETTINGS = {
'initial_header_level': 2,
# important, to have even lone titles stay in the html fragment:
'doctitle_xform': False,
# we also disable the promotion of lone subsection title to a subtitle:
'sectsubtitle_xform': False,
'file_insertion_enabled': False, # SECURITY MEASURE (file hacking)
'raw_enabled': False, # SECURITY MEASURE (script tag)
'report_level': 2, # report warnings and above, by default
}
# Decorators
def requires(attributes):
# check if required attributes are present on object
# instance and have assigned values
# attributes: [string, ...]
def wrap(f):
def wrapped_f(self, *args):
for attr in attributes:
if not getattr(self, attr):
msg = ('Configuration key MODELFORM_{} is '
'required with remote {}').format(attr.upper(), self.remote)
raise exceptions.ImproperlyConfigured(msg)
return f(self, *args)
return wrapped_f
return wrap
# Template Collector
class FormTemplateCollector(object):
'''
TODO: document this class
'''
def __init__(self, *args, **kwargs):
self.url = kwargs.get('url', app.config.get('MODELFORM_URL', None))
self.path = kwargs.get('path', app.config.get('MODELFORM_PATH', None))
self.remote = kwargs.get('remote', app.config.get('MODELFORM_REMOTE', None))
self.username = kwargs.get('username', app.config.get('MODELFORM_USERNAME', None))
self.password = kwargs.get('password', app.config.get('MODELFORM_PASSWORD', None))
self.token = kwargs.get('token', app.config.get('MODELFORM_TOKEN', None))
self.versions = kwargs.get('versions', app.config.get('MODELFORM_VERSIONS', []))
self.project_name = kwargs.get('project_name', app.config.get('MODELFORM_PROJECT_NAME', None))
self.file_name = kwargs.get('file_name', app.config.get('MODELFORM_FILE_NAME', None))
self.version_filter = kwargs.get('version_filter', app.config.get('MODELFORM_VERSION_FILTER', None))
self.version_map = kwargs.get('version_map', app.config.get('MODELFORM_VERSION_MAP', {}))
self.collectors = {
'github': {
'template_collector': self._github_collector,
'version_collector': self._static_version_collector
},
'http': {
'template_collector': self._http_collector,
'version_collector': self._static_version_collector
},
'gerrit': {
'template_collector': self._gerrit_collector,
'version_collector': self._gerrit_version_collector
},
'localfs': {
'template_collector': self._localfs_collector,
'version_collector': self._static_version_collector
}
}
if not self.remote or (self.remote and self.remote not in self.collectors):
collectors = list(self.collectors.keys())
msg = ('Configuration key MODELFORM_REMOTE is '
'required, possible values are: {}').format(', '.join(collectors))
raise exceptions.ImproperlyConfigured(msg)
# GERRIT
def _gerrit_get(self, endpoint_url):
auth = HTTPBasicAuth(self.username, self.password)
rest = GerritRestAPI(url=self.url, auth=auth)
try:
response_body = rest.get(endpoint_url)
except HTTPError as e:
msg = "Failed to get response from Gerrit URL %s: %s" % (endpoint_url, str(e))
log.error(msg)
raise exceptions.HTTPError
return response_body
@requires(['username', 'password', 'url', 'project_name', 'file_name'])
def _gerrit_collector(self, version=None):
cache_key = 'workflow_context'
endpoint_url = '/projects/%s/branches/master/files/%s/content' % (self.project_name, self.file_name)
if version:
versions = self._gerrit_get_versions()
if version in self.version_map.values():
version = [v[0] for v in self.version_map.items() if v[1] == version][0]
revision = versions.get(version)
cache_key = 'workflow_context_%s' % revision
endpoint_url = '/projects/%s/commits/%s/files/%s/content' % (
self.project_name, revision, self.file_name)
cached_ctx = cache.get(cache_key)
if cached_ctx:
return cached_ctx
ctx = self._gerrit_get(endpoint_url)
cache.set(cache_key, ctx, 3600)
return ctx
def _gerrit_get_versions(self):
cache_key = 'workflow_versions_%s_%s' % (self.url, self.project_name)
cached_versions = cache.get(cache_key)
if cached_versions:
return cached_versions
tags_endpoint_url = '/projects/%s/tags/' % self.project_name
master_endpoint_url = '/projects/%s/branches/master/' % self.project_name
tags = self._gerrit_get(tags_endpoint_url)
master = self._gerrit_get(master_endpoint_url)
self.versions = {}
for tag in tags:
key = tag['ref'].replace('refs/tags/', '')
self.versions[key] = tag['revision']
self.versions['master'] = master['revision']
cache.set(cache_key, self.versions, 3600)
return self.versions
def _gerrit_version_collector(self):
versions = self._gerrit_get_versions()
return list(versions.keys())
# GITHUB
@requires(['url', 'token'])
def _github_collector(self, version=None):
session = requests.Session()
cached_ctx = cache.get('workflow_context')
if cached_ctx:
return cached_ctx
session.headers.update({'Accept': 'application/vnd.github.v3.raw'})
session.headers.update({'Authorization': 'token ' + str(self.token)})
response = session.get(self.url)
if response.status_code >= 300:
try:
response_json = json.loads(str(response.text))
response_text = response_json['message']
except Exception:
response_text = response.text
msg = "Could not get remote file from Github:\nSTATUS CODE: %s\nRESPONSE:\n%s" % (
str(response.status_code), response_text)
log.error(msg)
ctx = ""
else:
ctx = response.text
cache.set('workflow_context', ctx, 3600)
return ctx
# HTTP
@requires(['url'])
def _http_collector(self, version=None):
session = requests.Session()
cached_ctx = cache.get('workflow_context')
if cached_ctx:
return cached_ctx
if self.username and self.password:
response = session.get(self.url, auth=(self.username, self.password))
else:
response = session.get(self.url)
if response.status_code >= 300:
msg = "Could not get remote file from HTTP URL %s:\nSTATUS CODE: %s\nRESPONSE:\n%s" % (
self.url, str(response.status_code), response.text)
log.error(msg)
ctx = ""
else:
ctx = response.text
cache.set('workflow_context', ctx, 3600)
return ctx
# LOCALFS
@requires(['path'])
def _localfs_collector(self, version=None):
try:
with io.open(self.path, 'r') as file_handle:
ctx = file_handle.read()
except Exception as e:
msg = "Could not read file %s: %s" % (self.path, repr(e))
log.error(msg)
ctx = ""
return ctx
def _static_version_collector(self):
return self.versions
# PRIVATE
def _collect_template(self, version=None):
if version:
versions = self.list_versions()
if version not in versions:
log.warning('Selected version %s not available, using default. Available versions: %s' % (
version, versions))
version = None
collector = self.collectors.get(self.remote, {}).get('template_collector')
return collector(version)
def _render_doc(self, value, header_level=None, report_level=None):
settings_overrides = DOCUTILS_RENDERER_SETTINGS.copy()
if header_level is not None: # starts from 1
settings_overrides["initial_header_level"] = header_level
if report_level is not None: # starts from 1 too
settings_overrides["report_level"] = report_level
try:
parts = publish_parts(source=value.encode('utf-8'),
writer_name="html4css1",
settings_overrides=settings_overrides)
trimmed_parts = parts['html_body'][23:-8]
except Exception as e:
# return original .rst if HTML rendering failed
trimmed_parts = value
log.exception(e)
return trimmed_parts
def _update_template(self, obj):
""" Traverse rendered template and render all rst documentation into HTML.
"""
if isinstance(obj, dict):
if 'doc' in obj:
obj['doc'] = self._render_doc(obj['doc'])
return {k: self._update_template(v) for k, v in obj.items()}
elif isinstance(obj, list):
return [self._update_template(elem) for elem in obj]
else:
return obj
# PUBLIC
def list_versions(self):
collector = self.collectors.get(self.remote, {}).get('version_collector')
versions = collector()
# filter versions by configured regular expression
if self.version_filter:
regex = re.compile(self.version_filter)
versions = list(filter(regex.search, versions))
# replace version names by names configured in version map
for idx, version in enumerate(versions):
if version in self.version_map:
versions[idx] = self.version_map[version]
return sorted(versions)
def render(self, version=None):
context = {}
env = Environment()
for fltr in CUSTOM_FILTERS:
env.filters[fltr[0]] = fltr[1]
for fnc in CUSTOM_FUNCTIONS:
env.globals[fnc[0]] = fnc[1]
source_context = self._collect_template(version)
tmpl = env.from_string(source_context)
parsed_source = env.parse(source_context)
for key in meta.find_undeclared_variables(parsed_source):
if key not in env.globals:
context[key] = ''
try:
rendered = yaml.load(tmpl.render(context))
self._update_template(rendered)
except Exception as e:
rendered = {}
log.exception(e)
return rendered
| [
"logging.getLogger",
"operations_api.app.cache.get",
"requests.Session",
"jinja2.Environment",
"re.compile",
"os.urandom",
"jinja2.meta.find_undeclared_variables",
"pygerrit2.HTTPBasicAuth",
"uuid.uuid4",
"pygerrit2.GerritRestAPI",
"operations_api.exceptions.ImproperlyConfigured",
"io.open",
... | [((639, 674), 'logging.getLogger', 'logging.getLogger', (['"""operations_api"""'], {}), "('operations_api')\n", (656, 674), False, 'import logging\n'), ((4752, 4764), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4762, 4764), False, 'import uuid\n'), ((3087, 3114), 'crypt.crypt', 'crypt.crypt', (['password', 'salt'], {}), '(password, salt)\n', (3098, 3114), False, 'import crypt\n'), ((3551, 3579), 'operations_api.app.cache.get', 'cache.get', (['private_key_cache'], {}), '(private_key_cache)\n', (3560, 3579), False, 'from operations_api.app import cache\n'), ((3608, 3635), 'operations_api.app.cache.get', 'cache.get', (['public_key_cache'], {}), '(public_key_cache)\n', (3617, 3635), False, 'from operations_api.app import cache\n'), ((8231, 8274), 'pygerrit2.HTTPBasicAuth', 'HTTPBasicAuth', (['self.username', 'self.password'], {}), '(self.username, self.password)\n', (8244, 8274), False, 'from pygerrit2 import GerritRestAPI, HTTPBasicAuth\n'), ((8290, 8328), 'pygerrit2.GerritRestAPI', 'GerritRestAPI', ([], {'url': 'self.url', 'auth': 'auth'}), '(url=self.url, auth=auth)\n', (8303, 8328), False, 'from pygerrit2 import GerritRestAPI, HTTPBasicAuth\n'), ((9354, 9374), 'operations_api.app.cache.get', 'cache.get', (['cache_key'], {}), '(cache_key)\n', (9363, 9374), False, 'from operations_api.app import cache\n'), ((9482, 9513), 'operations_api.app.cache.set', 'cache.set', (['cache_key', 'ctx', '(3600)'], {}), '(cache_key, ctx, 3600)\n', (9491, 9513), False, 'from operations_api.app import cache\n'), ((9674, 9694), 'operations_api.app.cache.get', 'cache.get', (['cache_key'], {}), '(cache_key)\n', (9683, 9694), False, 'from operations_api.app import cache\n'), ((10236, 10277), 'operations_api.app.cache.set', 'cache.set', (['cache_key', 'self.versions', '(3600)'], {}), '(cache_key, self.versions, 3600)\n', (10245, 10277), False, 'from operations_api.app import cache\n'), ((10544, 10562), 'requests.Session', 'requests.Session', ([], {}), '()\n', (10560, 10562), False, 'import requests\n'), ((10585, 10614), 'operations_api.app.cache.get', 'cache.get', (['"""workflow_context"""'], {}), "('workflow_context')\n", (10594, 10614), False, 'from operations_api.app import cache\n'), ((11373, 11413), 'operations_api.app.cache.set', 'cache.set', (['"""workflow_context"""', 'ctx', '(3600)'], {}), "('workflow_context', ctx, 3600)\n", (11382, 11413), False, 'from operations_api.app import cache\n'), ((11532, 11550), 'requests.Session', 'requests.Session', ([], {}), '()\n', (11548, 11550), False, 'import requests\n'), ((11573, 11602), 'operations_api.app.cache.get', 'cache.get', (['"""workflow_context"""'], {}), "('workflow_context')\n", (11582, 11602), False, 'from operations_api.app import cache\n'), ((12154, 12194), 'operations_api.app.cache.set', 'cache.set', (['"""workflow_context"""', 'ctx', '(3600)'], {}), "('workflow_context', ctx, 3600)\n", (12163, 12194), False, 'from operations_api.app import cache\n'), ((15092, 15105), 'jinja2.Environment', 'Environment', ([], {}), '()\n', (15103, 15105), False, 'from jinja2 import Environment, meta\n'), ((15438, 15483), 'jinja2.meta.find_undeclared_variables', 'meta.find_undeclared_variables', (['parsed_source'], {}), '(parsed_source)\n', (15468, 15483), False, 'from jinja2 import Environment, meta\n'), ((4558, 4609), 'operations_api.app.cache.set', 'cache.set', (['private_key_cache', 'private_key_str', '(3600)'], {}), '(private_key_cache, private_key_str, 3600)\n', (4567, 4609), False, 'from operations_api.app import cache\n'), ((4622, 4671), 'operations_api.app.cache.set', 'cache.set', (['public_key_cache', 'public_key_str', '(3600)'], {}), '(public_key_cache, public_key_str, 3600)\n', (4631, 4671), False, 'from operations_api.app import cache\n'), ((6151, 6188), 'flask.current_app.config.get', 'app.config.get', (['"""MODELFORM_URL"""', 'None'], {}), "('MODELFORM_URL', None)\n", (6165, 6188), True, 'from flask import current_app as app\n'), ((6229, 6267), 'flask.current_app.config.get', 'app.config.get', (['"""MODELFORM_PATH"""', 'None'], {}), "('MODELFORM_PATH', None)\n", (6243, 6267), True, 'from flask import current_app as app\n'), ((6312, 6352), 'flask.current_app.config.get', 'app.config.get', (['"""MODELFORM_REMOTE"""', 'None'], {}), "('MODELFORM_REMOTE', None)\n", (6326, 6352), True, 'from flask import current_app as app\n'), ((6401, 6443), 'flask.current_app.config.get', 'app.config.get', (['"""MODELFORM_USERNAME"""', 'None'], {}), "('MODELFORM_USERNAME', None)\n", (6415, 6443), True, 'from flask import current_app as app\n'), ((6492, 6534), 'flask.current_app.config.get', 'app.config.get', (['"""MODELFORM_PASSWORD"""', 'None'], {}), "('MODELFORM_PASSWORD', None)\n", (6506, 6534), True, 'from flask import current_app as app\n'), ((6577, 6616), 'flask.current_app.config.get', 'app.config.get', (['"""MODELFORM_TOKEN"""', 'None'], {}), "('MODELFORM_TOKEN', None)\n", (6591, 6616), True, 'from flask import current_app as app\n'), ((6665, 6705), 'flask.current_app.config.get', 'app.config.get', (['"""MODELFORM_VERSIONS"""', '[]'], {}), "('MODELFORM_VERSIONS', [])\n", (6679, 6705), True, 'from flask import current_app as app\n'), ((6762, 6808), 'flask.current_app.config.get', 'app.config.get', (['"""MODELFORM_PROJECT_NAME"""', 'None'], {}), "('MODELFORM_PROJECT_NAME', None)\n", (6776, 6808), True, 'from flask import current_app as app\n'), ((6859, 6902), 'flask.current_app.config.get', 'app.config.get', (['"""MODELFORM_FILE_NAME"""', 'None'], {}), "('MODELFORM_FILE_NAME', None)\n", (6873, 6902), True, 'from flask import current_app as app\n'), ((6963, 7011), 'flask.current_app.config.get', 'app.config.get', (['"""MODELFORM_VERSION_FILTER"""', 'None'], {}), "('MODELFORM_VERSION_FILTER', None)\n", (6977, 7011), True, 'from flask import current_app as app\n'), ((7066, 7109), 'flask.current_app.config.get', 'app.config.get', (['"""MODELFORM_VERSION_MAP"""', '{}'], {}), "('MODELFORM_VERSION_MAP', {})\n", (7080, 7109), True, 'from flask import current_app as app\n'), ((8124, 8160), 'operations_api.exceptions.ImproperlyConfigured', 'exceptions.ImproperlyConfigured', (['msg'], {}), '(msg)\n', (8155, 8160), False, 'from operations_api import exceptions\n'), ((14676, 14707), 're.compile', 're.compile', (['self.version_filter'], {}), '(self.version_filter)\n', (14686, 14707), False, 'import re\n'), ((12332, 12355), 'io.open', 'io.open', (['self.path', '"""r"""'], {}), "(self.path, 'r')\n", (12339, 12355), False, 'import io\n'), ((3881, 3898), 'cryptography.hazmat.backends.default_backend', 'default_backend', ([], {}), '()\n', (3896, 3898), False, 'from cryptography.hazmat.backends import default_backend\n'), ((4400, 4428), 'cryptography.hazmat.primitives.serialization.NoEncryption', 'serialization.NoEncryption', ([], {}), '()\n', (4426, 4428), False, 'from cryptography.hazmat.primitives import serialization\n'), ((5853, 5889), 'operations_api.exceptions.ImproperlyConfigured', 'exceptions.ImproperlyConfigured', (['msg'], {}), '(msg)\n', (5884, 5889), False, 'from operations_api import exceptions\n'), ((2478, 2493), 'os.urandom', 'urandom', (['length'], {}), '(length)\n', (2485, 2493), False, 'from os import urandom\n'), ((2975, 2985), 'os.urandom', 'urandom', (['(8)'], {}), '(8)\n', (2982, 2985), False, 'from os import urandom\n')] |
#!/usr/bin/env python3
# 2次元累積和 S の [x1, x2) × [y1, y2) 総和
def ac2(s, x1, x2, y1, y2):
return s[x2][y2] - s[x1][y2] - s[x2][y1] + s[x1][y1]
import numpy as np
_, *d = open(0)
n, k = map(int, _.split())
B = np.zeros((2*k, 2*k))
for e in d:
*z, c = e.split()
x, y = map(int, z)
B[x % (2*k)][(y + k * (z == "W")) % (2*k)] += 1
B.cumsum(axis = 0)
B.cumsum(axis = 1)
B = np.tile(B, (2,2))
print(B)
# 書きかけ | [
"numpy.tile",
"numpy.zeros"
] | [((211, 235), 'numpy.zeros', 'np.zeros', (['(2 * k, 2 * k)'], {}), '((2 * k, 2 * k))\n', (219, 235), True, 'import numpy as np\n'), ((383, 401), 'numpy.tile', 'np.tile', (['B', '(2, 2)'], {}), '(B, (2, 2))\n', (390, 401), True, 'import numpy as np\n')] |
from __future__ import print_function, absolute_import, division # makes these scripts backward compatible with python 2.6 and 2.7
# pyKratos imports
from .Element import Element
# Other imports
import numpy as np
class TriangleElement(Element):
def __init__(self, elem_id, nodes):
super(TriangleElement, self).__init__(elem_id, nodes)
if(len(self.GetNodes()) != 3):
raise Exception("wrong number of nodes! should be 3!")
for node in self.GetNodes():
if(node.Id < 0):
raise Exception("node with Id smaller than 0 found")
def ShapeFunctions(self, order=1):
'''this function provides the shape function values, derivatives and integration_weight
at the location of the gauss points. Order of integration is controlled
by the optional parameter "order".
N[gauss][i] contains the shape function of node i computed at the position of "gauss"
derivatives[gauss][i,k] contains the derivative of node i, component k at the position of gauss
weights[gauss] includes the integration weights, including the det of the jacobian, to be used
at the gauss point'''
derivatives = []
weights = []
Ncontainer = []
x10 = self.nodes[1].coordinates[0] - self.nodes[0].coordinates[0]
y10 = self.nodes[1].coordinates[1] - self.nodes[0].coordinates[1]
x20 = self.nodes[2].coordinates[0] - self.nodes[0].coordinates[0]
y20 = self.nodes[2].coordinates[1] - self.nodes[0].coordinates[1]
detJ = x10 * y20 - y10 * x20
DN_DX = np.zeros((3, 2), dtype=float)
DN_DX[0, 0] = -y20 + y10
DN_DX[0, 1] = x20 - x10
DN_DX[1, 0] = y20
DN_DX[1, 1] = -x20
DN_DX[2, 0] = -y10
DN_DX[2, 1] = x10
DN_DX /= detJ
if(order == 1): # give back 1 single integration point
one_third = 1.0 / 3.0
Ncontainer = [np.array([one_third, one_third, one_third])]
Area = 0.5 * detJ
weights = [Area]
derivatives = [DN_DX]
elif(order == 2): # gives back 3 integration points
one_sixt = 1.0 / 6.0
two_third = 2.0 / 3.0
Ncontainer.append(np.array([one_sixt, one_sixt, two_third]))
Ncontainer.append(np.array([one_sixt, two_third, one_sixt]))
Ncontainer.append(np.array([two_third, one_sixt, one_sixt]))
weights = [one_sixt * detJ, one_sixt * detJ, one_sixt * detJ]
derivatives = [DN_DX, DN_DX, DN_DX]
else:
raise Exception("integration order not implemented")
return [Ncontainer, derivatives, weights]
| [
"numpy.array",
"numpy.zeros"
] | [((1609, 1638), 'numpy.zeros', 'np.zeros', (['(3, 2)'], {'dtype': 'float'}), '((3, 2), dtype=float)\n', (1617, 1638), True, 'import numpy as np\n'), ((1958, 2001), 'numpy.array', 'np.array', (['[one_third, one_third, one_third]'], {}), '([one_third, one_third, one_third])\n', (1966, 2001), True, 'import numpy as np\n'), ((2256, 2297), 'numpy.array', 'np.array', (['[one_sixt, one_sixt, two_third]'], {}), '([one_sixt, one_sixt, two_third])\n', (2264, 2297), True, 'import numpy as np\n'), ((2329, 2370), 'numpy.array', 'np.array', (['[one_sixt, two_third, one_sixt]'], {}), '([one_sixt, two_third, one_sixt])\n', (2337, 2370), True, 'import numpy as np\n'), ((2402, 2443), 'numpy.array', 'np.array', (['[two_third, one_sixt, one_sixt]'], {}), '([two_third, one_sixt, one_sixt])\n', (2410, 2443), True, 'import numpy as np\n')] |
from collections import defaultdict
from ...account.models import Address, CustomerEvent, User
from ..core.dataloaders import DataLoader
class AddressByIdLoader(DataLoader):
context_key = "address_by_id"
def batch_load(self, keys):
address_map = Address.objects.in_bulk(keys)
return [address_map.get(address_id) for address_id in keys]
class UserByUserIdLoader(DataLoader):
context_key = "user_by_id"
def batch_load(self, keys):
user_map = User.objects.in_bulk(keys)
return [user_map.get(user_id) for user_id in keys]
class CustomerEventsByUserLoader(DataLoader):
context_key = "customer_events_by_user"
def batch_load(self, keys):
events = CustomerEvent.objects.filter(user_id__in=keys)
events_by_user_map = defaultdict(list)
for event in events:
events_by_user_map[event.user_id].append(event)
return [events_by_user_map.get(user_id, []) for user_id in keys]
| [
"collections.defaultdict"
] | [((791, 808), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (802, 808), False, 'from collections import defaultdict\n')] |
##############################################################################
#
# Copyright (c) 2009 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Login Form
"""
from zope.authentication.interfaces import IUnauthenticatedPrincipal
class LoginForm(object):
"""Mix-in class to implement login form logic"""
context = None
request = None
unauthenticated = None
camefrom = None
def __call__(self):
request = self.request
principal = request.principal
unauthenticated = IUnauthenticatedPrincipal.providedBy(principal)
self.unauthenticated = unauthenticated
camefrom = request.get('camefrom')
if isinstance(camefrom, list):
# Beginning on python2.6 this happens if the parameter is
# supplied more than once
camefrom = camefrom[0]
self.camefrom = camefrom
if not unauthenticated and 'SUBMIT' in request:
# authenticated by submitting
request.response.redirect(camefrom or '.')
return ''
return self.index() # call template
| [
"zope.authentication.interfaces.IUnauthenticatedPrincipal.providedBy"
] | [((1011, 1058), 'zope.authentication.interfaces.IUnauthenticatedPrincipal.providedBy', 'IUnauthenticatedPrincipal.providedBy', (['principal'], {}), '(principal)\n', (1047, 1058), False, 'from zope.authentication.interfaces import IUnauthenticatedPrincipal\n')] |
import vtk
import numpy as np
class LinearSubdivisionFilter:
InputData = None
Output = None
NumberOfSubdivisions = 1
def SetInputData(self, polydata):
self.InputData = polydata
def GetOutput(self):
return self.Output
def SetNumberOfSubdivisions (self, subdivisions):
self.NumberOfSubdivisions = subdivisions
def Update(self):
self.GenerateData()
def GenerateData(self):
if self.InputData:
inputpolydata = self.InputData
subdivisionlevel = self.NumberOfSubdivisions
inputpolydata_points = inputpolydata.GetPoints()
appendpoly = vtk.vtkAppendPolyData()
# Iterate over the cells in the polydata
# The idea is to linearly divide every cell according to the subdivision level
for cellid in range(inputpolydata.GetNumberOfCells()):
idlist = vtk.vtkIdList()
inputpolydata.GetCellPoints(cellid, idlist)
# For every cell we create a new poly data, i.e, bigger triangle with the interpolated triangles inside
subdiv_poly = vtk.vtkPolyData()
subdiv_points = vtk.vtkPoints()
subdiv_cellarray = vtk.vtkCellArray()
if(idlist.GetNumberOfIds() != 3):
raise Exception("Only triangle meshes are supported. Convert your mesh to triangles!", idlist.GetNumberOfIds())
# Get the triangle points from the current cell
p1 = np.array(inputpolydata_points.GetPoint(idlist.GetId(0)))
p2 = np.array(inputpolydata_points.GetPoint(idlist.GetId(1)))
p3 = np.array(inputpolydata_points.GetPoint(idlist.GetId(2)))
# Calculate the derivatives according to the level
dp12 = (p2 - p1)/subdivisionlevel
dp13 = (p3 - p1)/subdivisionlevel
# Interpolate the points
for s13 in range(0, subdivisionlevel + 1):
for s12 in range(0, subdivisionlevel + 1 - s13):
interp = p1 + s12*dp12 + s13*dp13
subdiv_points.InsertNextPoint(interp[0], interp[1], interp[2])
# Using the interpolated points, create the cells, i.e., triangles
id1 = -1
for s13 in range(0, subdivisionlevel):
id1 += 1
for s12 in range(0, subdivisionlevel - s13):
id2 = id1 + 1
id3 = id1 + subdivisionlevel + 1 - s13
id4 = id3 + 1
triangle = vtk.vtkTriangle()
triangle.GetPointIds().SetId(0, id1);
triangle.GetPointIds().SetId(1, id2);
triangle.GetPointIds().SetId(2, id3);
subdiv_cellarray.InsertNextCell(triangle)
if s12 < subdivisionlevel - s13 - 1:
triangle = vtk.vtkTriangle()
triangle.GetPointIds().SetId(0, id2);
triangle.GetPointIds().SetId(1, id4);
triangle.GetPointIds().SetId(2, id3);
subdiv_cellarray.InsertNextCell(triangle)
id1 += 1
#Set all the interpolated points and generated cells to the polydata
subdiv_poly.SetPoints(subdiv_points)
subdiv_poly.SetPolys(subdiv_cellarray)
# Append the current interpolated triangle to the 'appendPolyDataFilter'
appendpoly.AddInputData(subdiv_poly)
# All interpolated triangles now from a single polydata
appendpoly.Update()
# Remove duplicate points (if you were paying attention, you know there are a lot of repetitions in every triangle edge)
cleanpoly = vtk.vtkCleanPolyData()
cleanpoly.SetInputData(appendpoly.GetOutput())
cleanpoly.Update()
# Return the subdivied polydata
self.Output = cleanpoly.GetOutput() | [
"vtk.vtkIdList",
"vtk.vtkTriangle",
"vtk.vtkAppendPolyData",
"vtk.vtkPolyData",
"vtk.vtkCellArray",
"vtk.vtkPoints",
"vtk.vtkCleanPolyData"
] | [((567, 590), 'vtk.vtkAppendPolyData', 'vtk.vtkAppendPolyData', ([], {}), '()\n', (588, 590), False, 'import vtk\n'), ((3151, 3173), 'vtk.vtkCleanPolyData', 'vtk.vtkCleanPolyData', ([], {}), '()\n', (3171, 3173), False, 'import vtk\n'), ((789, 804), 'vtk.vtkIdList', 'vtk.vtkIdList', ([], {}), '()\n', (802, 804), False, 'import vtk\n'), ((984, 1001), 'vtk.vtkPolyData', 'vtk.vtkPolyData', ([], {}), '()\n', (999, 1001), False, 'import vtk\n'), ((1022, 1037), 'vtk.vtkPoints', 'vtk.vtkPoints', ([], {}), '()\n', (1035, 1037), False, 'import vtk\n'), ((1061, 1079), 'vtk.vtkCellArray', 'vtk.vtkCellArray', ([], {}), '()\n', (1077, 1079), False, 'import vtk\n'), ((2169, 2186), 'vtk.vtkTriangle', 'vtk.vtkTriangle', ([], {}), '()\n', (2184, 2186), False, 'import vtk\n'), ((2430, 2447), 'vtk.vtkTriangle', 'vtk.vtkTriangle', ([], {}), '()\n', (2445, 2447), False, 'import vtk\n')] |
#!/usr/bin/env python
# hsslms.py
#
# This provides a command line interface for the pyhsslms.py
# implementation of HSS/LMS Hash-based Signatures as defined
# in RFC 8554.
#
#
# Copyright (c) 2020-2021, Vigil Security, LLC
# All rights reserved.
#
# Redistribution and use, with or without modification, are permitted
# provided that the following conditions are met:
#
# (1) Redistributions must retain the above copyright notice, this
# list of conditions, and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# (3) Neither the name of the Vigil Security, LLC nor the names of the
# contributors to this code may be used to endorse or promote any
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) REGARDLESS OF THE
# CAUSE AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
# WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
import os.path
import argparse
import pyhsslms
from .__init__ import __version__ as VERSION
def usage(name):
"""
Display usage information and then exit.
"""
cmd_name = os.path.basename(name)
print("commands:")
print(cmd_name + " genkey <keyname> [<genparms>]")
print(" creates <keyname>.prv and <keyname>.pub")
print(" ")
print(cmd_name + " sign <keyname> <filename>")
print(" updates <keyname>.prv and makes the signature in <filename>.sig")
print(" ")
print(cmd_name + " verify <keyname> <filename>")
print(" verifies the signature in <filename>.sig with <keyname>.pub")
print(" ")
print(cmd_name + " showprv <keyname>")
print(" display <keyname>.prv")
print(" ")
print(cmd_name + " showpub <keyname>")
print(" display <keyname>.pub")
print(" ")
print(cmd_name + " showsig <filename>")
print(" display <filename>.sig")
print(" ")
print("optional <genparms> for the genkey command:")
print(" -l LEVELS, --levels LEVELS")
print(" Number of levels in HSS heirarchy")
print(" -s LMS_TYPE, --lms LMS_TYPE")
print(" Height of the LMS trees")
print(" -w LMOTS_TYPE, --lmots LMOTS_TYPE")
print(" Winternitz number")
print(" -a HASH_ALG, --alg HASH_ALG")
print(" Hash algorithm (sha256 or shake)")
print(" ")
print("optional command arguments:")
print(" -h, --help")
print(" Provides this information")
print(" -v, --version")
print(" Provids the program version number")
sys.exit(1)
def main():
"""
Command line interface for pyhsslms.py.
"""
cmds = ['genkey', 'keygen', 'sign', 'verify', \
'showprv', 'showpub', 'showsig', \
'--version', '-v', 'version', '--help', '-h', 'help']
if len(sys.argv) < 2 or sys.argv[1] not in cmds:
print("error: first argument must be a command")
usage(sys.argv[0])
sys.exit(1)
if sys.argv[1] == 'help' or '--help' in sys.argv or '-h' in sys.argv:
usage(sys.argv[0])
sys.exit(1)
if sys.argv[1] == 'version' or '--version' in sys.argv or '-v' in sys.argv:
print(os.path.basename(sys.argv[0]) + " " + VERSION)
sys.exit(1)
if sys.argv[1] in ['genkey', 'keygen']:
if len(sys.argv) < 3:
print("error: second argument must be a keyname")
usage(sys.argv[0])
sys.exit(1)
keyname = sys.argv[2]
levels = 2
lms_type = pyhsslms.lms_sha256_m32_h5
lmots_type = pyhsslms.lmots_sha256_n32_w8
if len(sys.argv) > 3:
parser = argparse.ArgumentParser()
parser.add_argument('-l', '--levels', dest='levels', default=2,
type=int, choices=[1, 2, 3, 4, 5, 6, 7, 8],
metavar='LEVELS', help='Number of levels in HSS heirarchy')
parser.add_argument("-s", "--lms", dest='lms', default=5,
type=int, choices=[5, 10, 15, 20, 25],
metavar='LMS_TYPE', help='Height of the LMS trees')
parser.add_argument('-w', '--lmots', dest='lmots', default=8,
type=int, choices=[1, 2, 4, 8],
metavar='LMOTS_TYPE', help='Winternitz number')
parser.add_argument('-a', '--alg', dest='alg', default='sha256',
type=str, choices=['sha256', 'shake'],
metavar='HASH_ALG', help='Hash algorithm (sha256 or shake)')
parser.add_argument('-t', '--trunc', dest='trunc', default='32',
type=str, choices=[32, 24],
metavar='TRUNC', help='Hash algorithm truncation size')
args = parser.parse_args(sys.argv[3:])
levels = args.levels
if args.alg == 'sha256':
if args.trunc == 32:
if args.lms == 5: lms_type = pyhsslms.lms_sha256_m32_h5
if args.lms == 10: lms_type = pyhsslms.lms_sha256_m32_h10
if args.lms == 15: lms_type = pyhsslms.lms_sha256_m32_h15
if args.lms == 20: lms_type = pyhsslms.lms_sha256_m32_h20
if args.lms == 25: lms_type = pyhsslms.lms_sha256_m32_h25
if args.lmots == 1: lmots_type = pyhsslms.lmots_sha256_n32_w1
if args.lmots == 2: lmots_type = pyhsslms.lmots_sha256_n32_w2
if args.lmots == 4: lmots_type = pyhsslms.lmots_sha256_n32_w4
if args.lmots == 8: lmots_type = pyhsslms.lmots_sha256_n32_w8
else: # args.trunc == 24
if args.lms == 5: lms_type = pyhsslms.lms_sha256_m24_h5
if args.lms == 10: lms_type = pyhsslms.lms_sha256_m24_h10
if args.lms == 15: lms_type = pyhsslms.lms_sha256_m24_h15
if args.lms == 20: lms_type = pyhsslms.lms_sha256_m24_h20
if args.lms == 25: lms_type = pyhsslms.lms_sha256_m24_h25
if args.lmots == 1: lmots_type = pyhsslms.lmots_sha256_n24_w1
if args.lmots == 2: lmots_type = pyhsslms.lmots_sha256_n24_w2
if args.lmots == 4: lmots_type = pyhsslms.lmots_sha256_n24_w4
if args.lmots == 8: lmots_type = pyhsslms.lmots_sha256_n24_w8
else: # args.alg == 'shake'
if args.trunc == 32:
if args.lms == 5: lms_type = pyhsslms.lms_shake_m32_h5
if args.lms == 10: lms_type = pyhsslms.lms_shake_m32_h10
if args.lms == 15: lms_type = pyhsslms.lms_shake_m32_h15
if args.lms == 20: lms_type = pyhsslms.lms_shake_m32_h20
if args.lms == 25: lms_type = pyhsslms.lms_shake_m32_h25
if args.lmots == 1: lmots_type = pyhsslms.lmots_shake_n32_w1
if args.lmots == 2: lmots_type = pyhsslms.lmots_shake_n32_w2
if args.lmots == 4: lmots_type = pyhsslms.lmots_shake_n32_w4
if args.lmots == 8: lmots_type = pyhsslms.lmots_shake_n32_w8
else: # args.trunc == 24
if args.lms == 5: lms_type = pyhsslms.lms_shake_m24_h5
if args.lms == 10: lms_type = pyhsslms.lms_shake_m24_h10
if args.lms == 15: lms_type = pyhsslms.lms_shake_m24_h15
if args.lms == 20: lms_type = pyhsslms.lms_shake_m24_h20
if args.lms == 25: lms_type = pyhsslms.lms_shake_m24_h25
if args.lmots == 1: lmots_type = pyhsslms.lmots_shake_n24_w1
if args.lmots == 2: lmots_type = pyhsslms.lmots_shake_n24_w2
if args.lmots == 4: lmots_type = pyhsslms.lmots_shake_n24_w4
if args.lmots == 8: lmots_type = pyhsslms.lmots_shake_n24_w8
pyhsslms.HssLmsPrivateKey.genkey(keyname, levels=levels,
lms_type=lms_type, lmots_type=lmots_type)
if sys.argv[1] == 'sign':
if len(sys.argv) < 3:
print("error: second argument must be a keyname")
usage(sys.argv[0])
sys.exit(1)
if len(sys.argv) < 4:
print("error: third argument must be a file name")
usage(sys.argv[0])
sys.exit(1)
keyname = sys.argv[2]
filename = sys.argv[3]
print("Signing " + filename + " ...")
prv = pyhsslms.HssLmsPrivateKey(keyname)
if prv.signFile(filename):
print(" ... Success. Signature saved in " + filename + ".sig")
else:
print(" ... Failed!")
if sys.argv[1] == 'verify':
if len(sys.argv) < 3:
print("error: second argument must be a keyname")
usage(sys.argv[0])
sys.exit(1)
if len(sys.argv) < 4:
print("error: third argument must be a file name")
usage(sys.argv[0])
sys.exit(1)
keyname = sys.argv[2]
filename = sys.argv[3]
pub = pyhsslms.HssLmsPublicKey(keyname)
if pub.verifyFile(filename):
print("Signature in " + filename + ".sig is valid.")
else:
print("Signature verification failed!")
if sys.argv[1] == 'showprv':
if len(sys.argv) < 3:
print("error: second argument must be a keyname")
usage(sys.argv[0])
sys.exit(1)
keyname = sys.argv[2]
prv = pyhsslms.HssLmsPrivateKey(keyname)
print("Private Key: " + keyname + ".prv")
print(prv.hss_prv.prettyPrint())
if sys.argv[1] == 'showpub':
if len(sys.argv) < 3:
print("error: second argument must be a keyname")
usage(sys.argv[0])
sys.exit(1)
keyname = sys.argv[2]
pub = pyhsslms.HssLmsPublicKey(keyname)
print("Public Key: " + keyname + ".pub")
print(pub.hss_pub.prettyPrint())
if sys.argv[1] == 'showsig':
if len(sys.argv) < 3:
print("error: second argument must be a file name")
usage(sys.argv[0])
sys.exit(1)
filename = sys.argv[2]
sig = pyhsslms.HssLmsSignature(filename)
print("Signature: " + filename + ".sig")
print(sig.hss_sig.prettyPrint())
| [
"pyhsslms.HssLmsPrivateKey",
"argparse.ArgumentParser",
"pyhsslms.HssLmsSignature",
"pyhsslms.HssLmsPublicKey",
"pyhsslms.HssLmsPrivateKey.genkey",
"sys.exit"
] | [((3467, 3478), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3475, 3478), False, 'import sys\n'), ((3864, 3875), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3872, 3875), False, 'import sys\n'), ((3986, 3997), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3994, 3997), False, 'import sys\n'), ((4148, 4159), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4156, 4159), False, 'import sys\n'), ((8785, 8887), 'pyhsslms.HssLmsPrivateKey.genkey', 'pyhsslms.HssLmsPrivateKey.genkey', (['keyname'], {'levels': 'levels', 'lms_type': 'lms_type', 'lmots_type': 'lmots_type'}), '(keyname, levels=levels, lms_type=lms_type,\n lmots_type=lmots_type)\n', (8817, 8887), False, 'import pyhsslms\n'), ((9345, 9379), 'pyhsslms.HssLmsPrivateKey', 'pyhsslms.HssLmsPrivateKey', (['keyname'], {}), '(keyname)\n', (9370, 9379), False, 'import pyhsslms\n'), ((9947, 9980), 'pyhsslms.HssLmsPublicKey', 'pyhsslms.HssLmsPublicKey', (['keyname'], {}), '(keyname)\n', (9971, 9980), False, 'import pyhsslms\n'), ((10384, 10418), 'pyhsslms.HssLmsPrivateKey', 'pyhsslms.HssLmsPrivateKey', (['keyname'], {}), '(keyname)\n', (10409, 10418), False, 'import pyhsslms\n'), ((10736, 10769), 'pyhsslms.HssLmsPublicKey', 'pyhsslms.HssLmsPublicKey', (['keyname'], {}), '(keyname)\n', (10760, 10769), False, 'import pyhsslms\n'), ((11089, 11123), 'pyhsslms.HssLmsSignature', 'pyhsslms.HssLmsSignature', (['filename'], {}), '(filename)\n', (11113, 11123), False, 'import pyhsslms\n'), ((4340, 4351), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4348, 4351), False, 'import sys\n'), ((4549, 4574), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4572, 4574), False, 'import argparse\n'), ((9062, 9073), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (9070, 9073), False, 'import sys\n'), ((9211, 9222), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (9219, 9222), False, 'import sys\n'), ((9710, 9721), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (9718, 9721), False, 'import sys\n'), ((9859, 9870), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (9867, 9870), False, 'import sys\n'), ((10327, 10338), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (10335, 10338), False, 'import sys\n'), ((10679, 10690), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (10687, 10690), False, 'import sys\n'), ((11031, 11042), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (11039, 11042), False, 'import sys\n')] |
"""Methods for parsing the unicode spec, and retrieving a list of Emoji and Modifiers.
Note that the model of 'Emoji' here isn't sufficiently general to represent everything in the spec -
a visual / user-facing emoji could be, for example, a super complicated Zero-Width-Join sequence. I
wanted to go in favor of ease-of-use instead of comprehensiveness here, though, so there are some
emoji that aren't represented.
An important part of this module is emoji_unicode_11_manual_supplement.py. This is a manual
interpretation of a lot of the data in the emoji-zwj-sequences.txt file, based on my reading of The
Spec.
"""
import os
from collections import defaultdict
from typing import Dict, Iterable, List, NamedTuple, Optional, Set, Tuple
import emoji.emoji_unicode_11_manual_supplement as supplement
from emoji.core import Emoji, GenderMode, Modifier
class EmojiData(NamedTuple):
emojis: List[Emoji]
modifiers: List[Modifier]
# A Unicode code point, as defined by the Unicode spec. This is just an int; the type only exists to
# provide a way of documenting return types more precisely.
_CodePoint = int
class _CodePointInfo(NamedTuple):
classes: Set[str]
comments: Set[str]
def _make_cpi() -> _CodePointInfo:
return _CodePointInfo(set(), set())
def _load_codepoints(data_directory: str) -> Dict[_CodePoint, _CodePointInfo]:
"""Returns a Dict mapping every possible emoji character to information known about it, from the
unicode data specification.
"""
result: Dict[_CodePoint, _CodePointInfo] = defaultdict(_make_cpi)
for codepoint_or_range, codepoint_class, comment in _scan_codepoints_file(data_directory):
if '..' in codepoint_or_range:
start, end = codepoint_or_range.split('..')
else:
start = end = codepoint_or_range
# have to use end + 1 because the ranges specified up til here are _inclusive_ ranges.
for codepoint in range(int(start, base=16), int(end, base=16) + 1):
result[codepoint].classes.add(codepoint_class)
if comment:
result[codepoint].comments.add(comment)
return result
def _scan_codepoints_file(data_directory: str) -> Iterable[Tuple[str, str, Optional[str]]]:
"""Returns an Iterable of tuples from the codepoints file. Each Tuple is:
- codepoint / or range of codepoints. Examples: "2139", "2194..2199"
- unicode class
- any comment found on that line (useful for debugging.)
"""
path = os.path.join(data_directory, 'emoji-data.txt')
with open(path, 'r') as file:
# NOTE(fabian): I thought about using the csv module for this, but decided against it
# because of the fact that the file structure has comments with # at the end. If you _did_
# want to change this to CSV, I'd probably do it by wrapping `file` with something that
# stripped comments.
for line in file:
line, comment = _remove_comment(line)
if not line:
# It was just a comment, continue
continue
codepoint_or_range, unicode_class = (field.strip() for field in line.split(';'))
yield codepoint_or_range, unicode_class, comment
def _remove_comment(line: str) -> Tuple[str, Optional[str]]:
"""Returns: [data-part of line] [comment]"""
vals = line.split('#', maxsplit=1)
if len(vals) == 1:
# There is no comment if there is one element
return vals[0].strip(), None
else:
return vals[0].strip(), vals[1].strip()
def _get_gender_mode(codepoint: _CodePoint) -> GenderMode:
if codepoint in supplement.SUPPORTS_OBJECT_FORMAT_GENDERING:
return GenderMode.OBJECT_FORMAT
elif codepoint in supplement.SUPPORTS_SIGN_FORMAT_GENDERING:
return GenderMode.SIGN_FORMAT
else:
return GenderMode.NONE
def load_emoji_and_modifiers() -> EmojiData:
"""Returns a list of all Emoji and all Modifiers from the data source."""
emojis: List[Emoji] = []
modifiers: List[Modifier] = []
for k, v in _load_codepoints('datasources/emoji-unicode-11/').items():
if (v.classes & {'Emoji', 'Emoji_Component'}) == {'Emoji'}:
modifiable = 'Emoji_Modifier_Base' in v.classes
defaults_to_text = 'Emoji_Presentation' not in v.classes
gender_mode = _get_gender_mode(k)
if gender_mode == GenderMode.OBJECT_FORMAT:
# The non-gendered case has a different meaning from the gendered cases, so add both
# an Emoji with GenderMode.NONE _and_ an Emoji with GenderMode.OBJECT_FORMAT. The
# gendered cases are always modifiable (by manually examining the spec).
emojis.append(Emoji(k, defaults_to_text, modifiable, GenderMode.NONE))
emojis.append(Emoji(k, defaults_to_text, True, GenderMode.OBJECT_FORMAT))
else:
emojis.append(Emoji(k, defaults_to_text, modifiable, gender_mode))
elif {'Emoji', 'Emoji_Modifier'} <= v.classes:
# it's a modifier!
modifiers.append(chr(k))
else:
# ??? i dunno something else. It's probably better to handle this exhaustively.
pass
return EmojiData(emojis, modifiers)
| [
"os.path.join",
"emoji.core.Emoji",
"collections.defaultdict"
] | [((1547, 1569), 'collections.defaultdict', 'defaultdict', (['_make_cpi'], {}), '(_make_cpi)\n', (1558, 1569), False, 'from collections import defaultdict\n'), ((2493, 2539), 'os.path.join', 'os.path.join', (['data_directory', '"""emoji-data.txt"""'], {}), "(data_directory, 'emoji-data.txt')\n", (2505, 2539), False, 'import os\n'), ((4739, 4794), 'emoji.core.Emoji', 'Emoji', (['k', 'defaults_to_text', 'modifiable', 'GenderMode.NONE'], {}), '(k, defaults_to_text, modifiable, GenderMode.NONE)\n', (4744, 4794), False, 'from emoji.core import Emoji, GenderMode, Modifier\n'), ((4826, 4884), 'emoji.core.Emoji', 'Emoji', (['k', 'defaults_to_text', '(True)', 'GenderMode.OBJECT_FORMAT'], {}), '(k, defaults_to_text, True, GenderMode.OBJECT_FORMAT)\n', (4831, 4884), False, 'from emoji.core import Emoji, GenderMode, Modifier\n'), ((4934, 4985), 'emoji.core.Emoji', 'Emoji', (['k', 'defaults_to_text', 'modifiable', 'gender_mode'], {}), '(k, defaults_to_text, modifiable, gender_mode)\n', (4939, 4985), False, 'from emoji.core import Emoji, GenderMode, Modifier\n')] |
# -*- coding: utf-8 -*-
from setuptools import setup
setup(
name='superpose3d',
packages=['superpose3d'],
description='Diamond\'s 1988 rotational superposition algorithm (+scale tranforms)',
long_description='''Register 3-D point clouds using rotation, translation, and scale transformations.
## Usage
```
def Superpose3D(X, # <-- Nx3 array of coords for the "frozen" point cloud
x, # <-- Nx3 array of coords for the "mobile" point cloud
# ---- optional arguments: ----
w = None, # optional weights for the calculation of RMSD
allow_rescale=False, # attempt to rescale mobile point cloud?
report_quaternion=False) # report rotation angle and axis?
```
Superpose3D() takes two ordered lists (or numpy arrays) of xyz coordinates
(*of the same length*, **N**) representing points in a point cloud
(**X** and **x**). Treating them as rigid objects,
"Superpose3D()" attempts to superimpose them using **rotations**,
**translations**, and (optionally) **scale** transformations in order
to minimize the root-mean-squared-distance (RMSD) between corresponding
points from either point cloud, where RMSD is defined as:
```
RMSD = sqrt( (Σ_n w[n] * Σ_i |X[n][i] - (Σ_j c*R[i][j]*x[n][j] + T[i])|^2) / (Σ_n w[n]) )
```
If *w=None*, equal weights are used. In that case:
```
RMSD = sqrt( (Σ_n Σ_i |X[n][i] - (Σ_j c*R[i][j]*x[n][j] + T[i])|^2) / N )
```
...where:
```
R = a rotation matrix (a 3x3 numpy array representing the rotation. |R|=1)
T = a translation vector (a 1-D numpy array containing x,y,z displacements)
c = a scalar (a number, 1 by default)
```
This function returns a 4-tuple containing the optimal values of:
```
(RMSD, R, T, c)
```
If the rotation angle and axis are needed, then set the *report_quaternion*
argument to *True*. In that case, the function will return this 4-tuple instead:
```
(RMSD, q, T, c)
```
...where *q* is the
[quaternion corresponding to rotation *R*](https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation),
from which the rotation angle and rotation axis can be easily determined.
This function implements a more general variant of the method from this paper:
R. Diamond, (1988)
"A Note on the Rotational Superposition Problem",
Acta Cryst. A44, pp. 211-216.
This version has been augmented slightly to support scale transformations. (I.E. multiplication by scalars. This can be useful for the registration of two different annotated volumetric 3-D images of the same object taken at different magnifications.)
Note that if you enable scale transformations (i.e. if *allow_rescale=True*), you should be wary if the function returns a negative **c** value. Negative **c** values correspond to inversions (reflections). For this reason, if you are using this function to compare the conformations of molecules, you should probably set *allow_rescale=False*. This will prevent matching a molecule with its stereoisomer.
Note: A C++ version of this repository is available at
https://github.com/jewettaij/superpose3d_cpp
''',
long_description_content_type='text/markdown',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/jewettaij/superpose3d',
download_url='https://github.com/jewettaij/superpose3d/archive/v1.0.1.zip',
version='1.0.1',
install_requires=[
'numpy',
],
keywords=['registration', '3d', 'structure-comparison', 'molecular-structure',
'clem'],
license='MIT',
classifiers=['Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Environment :: Console',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering'],
zip_safe=True,
include_package_data=True
)
| [
"setuptools.setup"
] | [((55, 4049), 'setuptools.setup', 'setup', ([], {'name': '"""superpose3d"""', 'packages': "['superpose3d']", 'description': '"""Diamond\'s 1988 rotational superposition algorithm (+scale tranforms)"""', 'long_description': '"""Register 3-D point clouds using rotation, translation, and scale transformations.\n\n## Usage\n\n```\ndef Superpose3D(X, # <-- Nx3 array of coords for the "frozen" point cloud\n x, # <-- Nx3 array of coords for the "mobile" point cloud\n # ---- optional arguments: ----\n w = None, # optional weights for the calculation of RMSD\n allow_rescale=False, # attempt to rescale mobile point cloud?\n report_quaternion=False) # report rotation angle and axis?\n```\n\nSuperpose3D() takes two ordered lists (or numpy arrays) of xyz coordinates\n(*of the same length*, **N**) representing points in a point cloud\n(**X** and **x**). Treating them as rigid objects,\n"Superpose3D()" attempts to superimpose them using **rotations**,\n**translations**, and (optionally) **scale** transformations in order\nto minimize the root-mean-squared-distance (RMSD) between corresponding\npoints from either point cloud, where RMSD is defined as:\n```\n RMSD = sqrt( (Σ_n w[n] * Σ_i |X[n][i] - (Σ_j c*R[i][j]*x[n][j] + T[i])|^2) / (Σ_n w[n]) )\n```\nIf *w=None*, equal weights are used. In that case:\n```\n RMSD = sqrt( (Σ_n Σ_i |X[n][i] - (Σ_j c*R[i][j]*x[n][j] + T[i])|^2) / N )\n```\n...where:\n```\n R = a rotation matrix (a 3x3 numpy array representing the rotation. |R|=1)\n T = a translation vector (a 1-D numpy array containing x,y,z displacements)\n c = a scalar (a number, 1 by default)\n```\nThis function returns a 4-tuple containing the optimal values of:\n```\n (RMSD, R, T, c)\n```\nIf the rotation angle and axis are needed, then set the *report_quaternion*\nargument to *True*. In that case, the function will return this 4-tuple instead:\n```\n (RMSD, q, T, c)\n```\n ...where *q* is the\n[quaternion corresponding to rotation *R*](https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation),\nfrom which the rotation angle and rotation axis can be easily determined.\n\nThis function implements a more general variant of the method from this paper:\nR. Diamond, (1988)\n"A Note on the Rotational Superposition Problem",\n Acta Cryst. A44, pp. 211-216.\n\nThis version has been augmented slightly to support scale transformations. (I.E. multiplication by scalars. This can be useful for the registration of two different annotated volumetric 3-D images of the same object taken at different magnifications.)\n\nNote that if you enable scale transformations (i.e. if *allow_rescale=True*), you should be wary if the function returns a negative **c** value. Negative **c** values correspond to inversions (reflections). For this reason, if you are using this function to compare the conformations of molecules, you should probably set *allow_rescale=False*. This will prevent matching a molecule with its stereoisomer.\n\nNote: A C++ version of this repository is available at\nhttps://github.com/jewettaij/superpose3d_cpp\n"""', 'long_description_content_type': '"""text/markdown"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'url': '"""https://github.com/jewettaij/superpose3d"""', 'download_url': '"""https://github.com/jewettaij/superpose3d/archive/v1.0.1.zip"""', 'version': '"""1.0.1"""', 'install_requires': "['numpy']", 'keywords': "['registration', '3d', 'structure-comparison', 'molecular-structure', 'clem']", 'license': '"""MIT"""', 'classifiers': "['Development Status :: 5 - Production/Stable',\n 'License :: OSI Approved :: MIT License', 'Environment :: Console',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: Microsoft :: Windows',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.8', 'Topic :: Scientific/Engineering']", 'zip_safe': '(True)', 'include_package_data': '(True)'}), '(name=\'superpose3d\', packages=[\'superpose3d\'], description=\n "Diamond\'s 1988 rotational superposition algorithm (+scale tranforms)",\n long_description=\n """Register 3-D point clouds using rotation, translation, and scale transformations.\n\n## Usage\n\n```\ndef Superpose3D(X, # <-- Nx3 array of coords for the "frozen" point cloud\n x, # <-- Nx3 array of coords for the "mobile" point cloud\n # ---- optional arguments: ----\n w = None, # optional weights for the calculation of RMSD\n allow_rescale=False, # attempt to rescale mobile point cloud?\n report_quaternion=False) # report rotation angle and axis?\n```\n\nSuperpose3D() takes two ordered lists (or numpy arrays) of xyz coordinates\n(*of the same length*, **N**) representing points in a point cloud\n(**X** and **x**). Treating them as rigid objects,\n"Superpose3D()" attempts to superimpose them using **rotations**,\n**translations**, and (optionally) **scale** transformations in order\nto minimize the root-mean-squared-distance (RMSD) between corresponding\npoints from either point cloud, where RMSD is defined as:\n```\n RMSD = sqrt( (Σ_n w[n] * Σ_i |X[n][i] - (Σ_j c*R[i][j]*x[n][j] + T[i])|^2) / (Σ_n w[n]) )\n```\nIf *w=None*, equal weights are used. In that case:\n```\n RMSD = sqrt( (Σ_n Σ_i |X[n][i] - (Σ_j c*R[i][j]*x[n][j] + T[i])|^2) / N )\n```\n...where:\n```\n R = a rotation matrix (a 3x3 numpy array representing the rotation. |R|=1)\n T = a translation vector (a 1-D numpy array containing x,y,z displacements)\n c = a scalar (a number, 1 by default)\n```\nThis function returns a 4-tuple containing the optimal values of:\n```\n (RMSD, R, T, c)\n```\nIf the rotation angle and axis are needed, then set the *report_quaternion*\nargument to *True*. In that case, the function will return this 4-tuple instead:\n```\n (RMSD, q, T, c)\n```\n ...where *q* is the\n[quaternion corresponding to rotation *R*](https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation),\nfrom which the rotation angle and rotation axis can be easily determined.\n\nThis function implements a more general variant of the method from this paper:\nR. Diamond, (1988)\n"A Note on the Rotational Superposition Problem",\n Acta Cryst. A44, pp. 211-216.\n\nThis version has been augmented slightly to support scale transformations. (I.E. multiplication by scalars. This can be useful for the registration of two different annotated volumetric 3-D images of the same object taken at different magnifications.)\n\nNote that if you enable scale transformations (i.e. if *allow_rescale=True*), you should be wary if the function returns a negative **c** value. Negative **c** values correspond to inversions (reflections). For this reason, if you are using this function to compare the conformations of molecules, you should probably set *allow_rescale=False*. This will prevent matching a molecule with its stereoisomer.\n\nNote: A C++ version of this repository is available at\nhttps://github.com/jewettaij/superpose3d_cpp\n"""\n , long_description_content_type=\'text/markdown\', author=\'<NAME>\',\n author_email=\'<EMAIL>\', url=\'https://github.com/jewettaij/superpose3d\',\n download_url=\n \'https://github.com/jewettaij/superpose3d/archive/v1.0.1.zip\', version=\n \'1.0.1\', install_requires=[\'numpy\'], keywords=[\'registration\', \'3d\',\n \'structure-comparison\', \'molecular-structure\', \'clem\'], license=\'MIT\',\n classifiers=[\'Development Status :: 5 - Production/Stable\',\n \'License :: OSI Approved :: MIT License\', \'Environment :: Console\',\n \'Operating System :: MacOS :: MacOS X\',\n \'Operating System :: POSIX :: Linux\',\n \'Operating System :: Microsoft :: Windows\',\n \'Programming Language :: Python\',\n \'Programming Language :: Python :: 2.7\',\n \'Programming Language :: Python :: 3.4\',\n \'Programming Language :: Python :: 3.8\',\n \'Topic :: Scientific/Engineering\'], zip_safe=True, include_package_data\n =True)\n', (60, 4049), False, 'from setuptools import setup\n')] |
import os
import tempfile
from glob import glob
import json
from collections import OrderedDict
import numpy as np
from .adding_features import adding_no_features
def iterate_json_data(filepath,
columns_to_keep=None,
feature_adder=adding_no_features,
data_filter=lambda datum: True,
missing_val_default={}):
inputfile = open(filepath, 'r')
for line in inputfile:
datum = json.loads(line)
datum = feature_adder(datum)
if not data_filter(datum):
continue
if columns_to_keep is not None:
filtered_datum = OrderedDict()
for column in columns_to_keep:
filtered_datum[column] = datum[column]
if column in missing_val_default.keys() and datum[column] is None:
filtered_datum[column] = missing_val_default[column]
yield filtered_datum
else:
yield OrderedDict(datum)
def iterate_json_files_directory(dir,
columns_to_keep=None,
feature_adder=adding_no_features,
data_filter=lambda datum: True,
missing_val_default={}
):
print('\tReading {}'.format(dir))
print('\tColumns: {}'.format(', '.join(columns_to_keep) if columns_to_keep is not None else 'ALL'))
for filepath in glob(os.path.join(dir, '*.json')):
for datum in iterate_json_data(filepath,
columns_to_keep=columns_to_keep,
feature_adder=feature_adder,
data_filter=data_filter,
missing_val_default=missing_val_default):
yield datum
def process_data(traindatafilepath, qual_features, binary_features, quant_features,
target_label,
feature_adder=adding_no_features,
nb_lines_per_tempfile=10000,
data_filter=lambda datum: True,
missing_val_default={},
filename_fmt='data_{0:09d}.json'):
tempdir = tempfile.TemporaryDirectory()
fileid = 0
tmpfile = None
nbdata = 0
for i, datum in enumerate(iterate_json_data(traindatafilepath,
columns_to_keep=qual_features+binary_features+quant_features+[target_label],
feature_adder=feature_adder,
data_filter=data_filter,
missing_val_default=missing_val_default)):
if i % nb_lines_per_tempfile == 0:
if tmpfile is not None:
tmpfile.close()
tmpfile = open(os.path.join(tempdir.name, filename_fmt.format(fileid)), 'w')
fileid += 1
print('\tRead {} lines...'.format(i))
nbdata += 1
tmpfile.write(json.dumps(datum)+'\n')
tmpfile.close()
return tempdir, nbdata
def assign_partitions(nbdata, cv_nfold, heldout_fraction, seed=None):
if seed is not None:
np.random.seed(seed)
return np.random.choice([-1] + list(range(cv_nfold)), # -1 indicating hold-out set
p=[heldout_fraction] + [(1 - heldout_fraction) / cv_nfold] * cv_nfold,
size=nbdata)
| [
"tempfile.TemporaryDirectory",
"json.loads",
"collections.OrderedDict",
"json.dumps",
"os.path.join",
"numpy.random.seed"
] | [((2255, 2284), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (2282, 2284), False, 'import tempfile\n'), ((480, 496), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (490, 496), False, 'import json\n'), ((1497, 1524), 'os.path.join', 'os.path.join', (['dir', '"""*.json"""'], {}), "(dir, '*.json')\n", (1509, 1524), False, 'import os\n'), ((3259, 3279), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (3273, 3279), True, 'import numpy as np\n'), ((659, 672), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (670, 672), False, 'from collections import OrderedDict\n'), ((992, 1010), 'collections.OrderedDict', 'OrderedDict', (['datum'], {}), '(datum)\n', (1003, 1010), False, 'from collections import OrderedDict\n'), ((3083, 3100), 'json.dumps', 'json.dumps', (['datum'], {}), '(datum)\n', (3093, 3100), False, 'import json\n')] |
# -*- coding: utf-8 -*-
###########
# IMPORTS #
###########
# Standard
from os.path import (
abspath as _os_abspath,
dirname as _os_dirname,
isfile as _os_isfile,
join as _os_join
)
from json import (
load as _json_load
)
#############
# CONSTANTS #
#############
_replacements = [
('NaN', float('nan')),
('-Infinity', float('-inf')),
('Infinity', float('inf'))
]
###########
# CACHING #
###########
_fixtures = {}
#############
# FUNCTIONS #
#############
def _sanitize_fixture_recursive(element, replacements):
if isinstance(element, dict):
return {key: _sanitize_fixture_recursive(value, replacements) for key, value in element.items()}
if isinstance(element, list):
return [_sanitize_fixture_recursive(item, replacements) for item in element]
for replacement in replacements:
if element == replacement[0]:
return replacement[1]
return element
def _parse_fixture_dictionary(fixture, fixture_names, subtest_name):
values = []
ids = []
expected_args = len(fixture_names)
subtest_reference = f'{subtest_name.replace("test_", "")}_data'
if subtest_reference in fixture:
fixture_data = fixture[subtest_reference]
if isinstance(fixture_data, dict):
values_current = tuple(fixture_data[fixture_name] for fixture_name in fixture_names if fixture_name in fixture_data)
if len(values_current) == expected_args:
values.append(values_current)
ids.append(f'{subtest_name}')
elif isinstance(fixture_data, list):
for index, case in enumerate(fixture_data):
case_id = f'_{case["id"]}' if 'id' in case else f' #{str(index + 1)}'
values_current = tuple(case[fixture_name] for fixture_name in fixture_names if fixture_name in case)
if len(values_current) == expected_args:
values.append(values_current)
ids.append(f'{subtest_name}{case_id}')
if len(values) != len(fixture_data):
values = []
ids = []
return values, ids
def _parse_fixture_list(fixture, fixture_names, subtest_name):
values = []
ids = []
expected_args = len(fixture_names)
subtest_reference = f'{subtest_name.replace("test_", "")}_data'
if any(subtest_reference in case for case in fixture):
flags = [False] * len(fixture)
for index_case, case in enumerate(fixture):
if subtest_reference in case:
case_id = case['id'] if 'id' in case else f' #{str(index_case + 1)}'
case_values = tuple(case[fixture_name] for fixture_name in fixture_names if fixture_name in case)
for index_subcase, subcase in enumerate(case[subtest_reference]):
values_current = case_values + tuple(subcase[fixture_name] for fixture_name in fixture_names if fixture_name in subcase)
if len(values_current) == expected_args:
values.append(values_current)
ids.append(f'{subtest_name} {case_id}-{str(index_subcase + 1)}')
flags[index_case] = True
if not all(flags):
values = []
ids = []
else:
for index, case in enumerate(fixture):
case_id = case['id'] if 'id' in case else f' #{str(index + 1)}'
values_current = tuple(case[fixture_name] for fixture_name in fixture_names if fixture_name in case)
if len(values_current) == expected_args:
values.append(values_current)
ids.append(f'{subtest_name} {case_id}')
if len(values) != len(fixture):
values = []
ids = []
return values, ids
#########
# SETUP #
#########
def pytest_configure(config):
config.addinivalue_line('filterwarnings', 'ignore::DeprecationWarning')
config.addinivalue_line('filterwarnings', 'ignore::PendingDeprecationWarning')
config.addinivalue_line('filterwarnings', 'ignore::matplotlib.cbook.mplDeprecation')
config.addinivalue_line('markers', 'slow: mark tests as slow (exclude them with \'-m "not slow"\').')
def pytest_generate_tests(metafunc):
module = metafunc.module.__name__
func = metafunc.definition.name
mark = metafunc.definition.get_closest_marker('parametrize')
names = metafunc.fixturenames
test_index = module.find('_') + 1
test_name = module[test_index:]
if test_name not in _fixtures:
base_directory = _os_abspath(_os_dirname(__file__))
fixtures_file = _os_join(base_directory, f'fixtures/fixtures_{test_name}.json')
if not _os_isfile(fixtures_file):
_fixtures[test_name] = None
else:
with open(fixtures_file, 'r') as file:
fixture = _json_load(file)
fixture = _sanitize_fixture_recursive(fixture, _replacements)
_fixtures[test_name] = fixture
fixture = _fixtures[test_name]
values = []
ids = []
if len(names) > 0 and mark is None and fixture is not None and len(fixture) > 0:
if isinstance(fixture, dict):
values, ids = _parse_fixture_dictionary(fixture, names, func)
elif isinstance(fixture, list):
values, ids = _parse_fixture_list(fixture, names, func)
metafunc.parametrize(names, values, False, ids)
| [
"os.path.isfile",
"os.path.dirname",
"os.path.join",
"json.load"
] | [((4664, 4727), 'os.path.join', '_os_join', (['base_directory', 'f"""fixtures/fixtures_{test_name}.json"""'], {}), "(base_directory, f'fixtures/fixtures_{test_name}.json')\n", (4672, 4727), True, 'from os.path import abspath as _os_abspath, dirname as _os_dirname, isfile as _os_isfile, join as _os_join\n'), ((4617, 4638), 'os.path.dirname', '_os_dirname', (['__file__'], {}), '(__file__)\n', (4628, 4638), True, 'from os.path import abspath as _os_abspath, dirname as _os_dirname, isfile as _os_isfile, join as _os_join\n'), ((4744, 4769), 'os.path.isfile', '_os_isfile', (['fixtures_file'], {}), '(fixtures_file)\n', (4754, 4769), True, 'from os.path import abspath as _os_abspath, dirname as _os_dirname, isfile as _os_isfile, join as _os_join\n'), ((4903, 4919), 'json.load', '_json_load', (['file'], {}), '(file)\n', (4913, 4919), True, 'from json import load as _json_load\n')] |
import hashlib
import hmac
import requests
import time
from functools import reduce
class CryptoMKT(object):
BASE_URL = 'https://api.cryptomkt.com'
API_VERSION = 'v1'
ENDPOINT_BALANCE = 'balance'
ENDPOINT_BOOK = 'book'
ENDPOINT_MARKETS = 'market'
ENDPOINT_TICKER = 'ticker'
ENDPOINT_TRADES = 'trades'
def __init__(self, api_key=None, api_secret=None):
self.api_key = api_key
self.api_secret = api_secret
def check_has_tokens(self):
if self.api_key is None:
raise InvalidTokensException('API Key is required')
if self.api_secret is None:
raise InvalidTokensException('API Secret is required')
def get_headers(self, endpoint, body):
timestamp = str(time.time())
payload = '{timestamp}/{version}/{endpoint}{body}'.format(
timestamp=timestamp,
version=self.API_VERSION,
endpoint=endpoint,
body=body
)
signature = hmac.new(
self.api_secret.encode(),
payload.encode(),
hashlib.sha384
).hexdigest()
return {
'X-MKT-APIKEY': self.api_key,
'X-MKT-SIGNATURE': signature,
'X-MKT-TIMESTAMP': timestamp
}
def get(self, endpoint, params=None, headers=None):
return requests.get(
'{}/{}/{}'.format(self.BASE_URL, self.API_VERSION, endpoint),
params=params,
headers=headers
).json()
def private_get(self, endpoint, params=None):
self.check_has_tokens()
headers = self.get_headers(endpoint, '')
return self.get(endpoint, params=params, headers=headers)
def post(self, endpoint, payload):
self.check_has_tokens()
body = [
str(p[1]) for p in sorted(payload.items(), key=lambda p: p[0])
]
headers = self.get_headers(endpoint, reduce(str.__add__, body))
return requests.post(
'{}/{}/{}'.format(self.BASE_URL, self.API_VERSION, endpoint),
data=payload,
headers=headers
).json()
def markets(self):
return self.get(self.ENDPOINT_MARKETS)
def ticker(self):
return self.get(self.ENDPOINT_TICKER)
def book(self, market, order_type, page=0, limit=20):
params = {
'market': market,
'type': order_type,
'page': page,
'limit': limit
}
return self.get(self.ENDPOINT_BOOK, params=params)
def trades(self, market, start=None, end=None, page=0, limit=20):
params = {
'market': market,
'page': page,
'limit': limit
}
if start is not None:
params['start'] = start
if end is not None:
params['end'] = end
return self.get(self.ENDPOINT_TRADES, params=params)
def balance(self):
return self.private_get(self.ENDPOINT_BALANCE)
@property
def orders(self):
return CryptoMKTOrdersAPI(self)
class CryptoMKTOrdersAPI(object):
ENDPOINT_ACTIVE = 'orders/active'
ENDPOINT_CANCEL = 'orders/cancel'
ENDPOINT_CREATE = 'orders/create'
ENDPOINT_EXECUTED = 'orders/executed'
ENDPOINT_STATUS = 'orders/status'
def __init__(self, api_wrapper):
self.api = api_wrapper
def active(self, market, page=0, limit=20):
params = {
'market': market,
'page': page,
'limit': limit
}
return self.api.private_get(self.ENDPOINT_ACTIVE, params)
def executed(self, market, page=0, limit=20):
params = {
'market': market,
'page': page,
'limit': limit
}
return self.api.private_get(self.ENDPOINT_EXECUTED, params)
def create(self, market, type, amount, price):
params = {
'market': market,
'type': type,
'amount': amount,
'price': price
}
return self.api.post(self.ENDPOINT_CREATE, params)
def cancel(self, order_id):
return self.api.post(self.ENDPOINT_CANCEL, {'id': order_id})
def status(self, order_id):
return self.api.private_get(self.ENDPOINT_STATUS, {'id': order_id})
class InvalidTokensException(Exception):
pass
| [
"functools.reduce",
"time.time"
] | [((762, 773), 'time.time', 'time.time', ([], {}), '()\n', (771, 773), False, 'import time\n'), ((1928, 1953), 'functools.reduce', 'reduce', (['str.__add__', 'body'], {}), '(str.__add__, body)\n', (1934, 1953), False, 'from functools import reduce\n')] |
# -*- coding: utf-8 -*-
"""
Combination of
http://scipy-central.org/item/52/1/zplane-function
and
http://www.dsprelated.com/showcode/244.php
with my own modifications
"""
# Copyright (c) 2011 <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# The following is derived from the slides presented by
# <NAME> for CS506/606 "Special Topics: Speech Signal Processing"
# CSLU / OHSU, Spring Term 2011.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import patches
from matplotlib.pyplot import axvline, axhline
from collections import defaultdict
def zplane(z, p, filename=None):
"""Plot the complex z-plane given zeros and poles.
"""
# get a figure/plot
ax = plt.subplot(2, 2, 1)
# TODO: should just inherit whatever subplot it's called in?
# Add unit circle and zero axes
unit_circle = patches.Circle((0,0), radius=1, fill=False,
color='black', ls='solid', alpha=0.1)
ax.add_patch(unit_circle)
axvline(0, color='0.7')
axhline(0, color='0.7')
# Plot the poles and set marker properties
poles = plt.plot(p.real, p.imag, 'x', markersize=9, alpha=0.5)
# Plot the zeros and set marker properties
zeros = plt.plot(z.real, z.imag, 'o', markersize=9,
color='none', alpha=0.5,
markeredgecolor=poles[0].get_color(), # same color as poles
)
# Scale axes to fit
r = 1.5 * np.amax(np.concatenate((abs(z), abs(p), [1])))
plt.axis('scaled')
plt.axis([-r, r, -r, r])
# ticks = [-1, -.5, .5, 1]
# plt.xticks(ticks)
# plt.yticks(ticks)
"""
If there are multiple poles or zeros at the same point, put a
superscript next to them.
TODO: can this be made to self-update when zoomed?
"""
# Finding duplicates by same pixel coordinates (hacky for now):
poles_xy = ax.transData.transform(np.vstack(poles[0].get_data()).T)
zeros_xy = ax.transData.transform(np.vstack(zeros[0].get_data()).T)
# dict keys should be ints for matching, but coords should be floats for
# keeping location of text accurate while zooming
# TODO make less hacky, reduce duplication of code
d = defaultdict(int)
coords = defaultdict(tuple)
for xy in poles_xy:
key = tuple(np.rint(xy).astype('int'))
d[key] += 1
coords[key] = xy
print(d)
for key, value in d.items():
if value > 1:
x, y = ax.transData.inverted().transform(coords[key])
plt.text(x, y,
r' ${}^{' + str(value) + '}$',
fontsize=13,
)
d = defaultdict(int)
coords = defaultdict(tuple)
for xy in zeros_xy:
key = tuple(np.rint(xy).astype('int'))
d[key] += 1
coords[key] = xy
for key, value in d.items():
if value > 1:
x, y = ax.transData.inverted().transform(coords[key])
plt.text(x, y,
r' ${}^{' + str(value) + '}$',
fontsize=13,
)
if filename is None:
plt.show()
else:
plt.savefig(filename)
print( 'Pole-zero plot saved to ' + str(filename))
if __name__ == "__main__":
from scipy.signal import (freqz, butter, bessel, cheby1, cheby2, ellip,
tf2zpk, zpk2tf, lfilter, buttap, bilinear, cheb2ord, cheb2ap
)
from numpy import asarray, tan, array, pi, arange, cos, log10, unwrap, angle
from matplotlib.pyplot import (stem, title, grid, show, plot, xlabel,
ylabel, subplot, xscale, figure, xlim,
margins)
# # Cosine function
# omega = pi/4
# b = array([1.0, -cos(omega)])
# a = array([1, -2*cos(omega), 1.0])
b, a = butter(2, [0.06, 0.7], 'bandpass')
# Get the poles and zeros
z, p, k = tf2zpk(b, a)
# Create zero-pole plot
figure(figsize=(16, 9))
subplot(2, 2, 1)
zplane(z, p)
grid(True, color='0.9', linestyle='-', which='both', axis='both')
title('Poles and zeros')
# Display zeros, poles and gain
print( str(len(z)) + " zeros: " + str(z))
print( str(len(p)) + " poles: " + str(p))
print( "gain: " + str(k))
# Impulse response
index = arange(0,20)
u = 1.0*(index==0)
y = lfilter(b, a, u)
subplot(2, 2, 3)
stem(index,y)
title('Impulse response')
margins(0, 0.1)
grid(True, color='0.9', linestyle='-', which='both', axis='both')
show()
# Frequency response
w, h = freqz(b, a)
subplot(2, 2, 2)
plot(w/pi, 20*log10(abs(h)))
xscale('log')
title('Frequency response')
xlabel('Normalized frequency')
ylabel('Amplitude [dB]')
margins(0, 0.1)
grid(True, color = '0.7', linestyle='-', which='major', axis='both')
grid(True, color = '0.9', linestyle='-', which='minor', axis='both')
show()
# Phase
subplot(2, 2, 4)
plot(w/pi, 180/pi * unwrap(angle(h)))
xscale('log')
xlabel('Normalized frequency')
ylabel('Phase [degrees]')
grid(True, color = '0.7', linestyle='-', which='major')
grid(True, color = '0.9', linestyle='-', which='minor')
show() | [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.margins",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.axhline",
"numpy.rint",
"scipy.signal.tf2zpk",
"matplotlib.pyplot.axis",
"matplotlib.patches.Circle",
"matplotlib.pyplot.s... | [((1321, 1341), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (1332, 1341), True, 'import matplotlib.pyplot as plt\n'), ((1466, 1552), 'matplotlib.patches.Circle', 'patches.Circle', (['(0, 0)'], {'radius': '(1)', 'fill': '(False)', 'color': '"""black"""', 'ls': '"""solid"""', 'alpha': '(0.1)'}), "((0, 0), radius=1, fill=False, color='black', ls='solid',\n alpha=0.1)\n", (1480, 1552), False, 'from matplotlib import patches\n'), ((1615, 1638), 'matplotlib.pyplot.axvline', 'axvline', (['(0)'], {'color': '"""0.7"""'}), "(0, color='0.7')\n", (1622, 1638), False, 'from matplotlib.pyplot import axvline, axhline\n'), ((1643, 1666), 'matplotlib.pyplot.axhline', 'axhline', (['(0)'], {'color': '"""0.7"""'}), "(0, color='0.7')\n", (1650, 1666), False, 'from matplotlib.pyplot import axvline, axhline\n'), ((1731, 1785), 'matplotlib.pyplot.plot', 'plt.plot', (['p.real', 'p.imag', '"""x"""'], {'markersize': '(9)', 'alpha': '(0.5)'}), "(p.real, p.imag, 'x', markersize=9, alpha=0.5)\n", (1739, 1785), True, 'import matplotlib.pyplot as plt\n'), ((2112, 2130), 'matplotlib.pyplot.axis', 'plt.axis', (['"""scaled"""'], {}), "('scaled')\n", (2120, 2130), True, 'import matplotlib.pyplot as plt\n'), ((2135, 2159), 'matplotlib.pyplot.axis', 'plt.axis', (['[-r, r, -r, r]'], {}), '([-r, r, -r, r])\n', (2143, 2159), True, 'import matplotlib.pyplot as plt\n'), ((2818, 2834), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (2829, 2834), False, 'from collections import defaultdict\n'), ((2848, 2866), 'collections.defaultdict', 'defaultdict', (['tuple'], {}), '(tuple)\n', (2859, 2866), False, 'from collections import defaultdict\n'), ((3281, 3297), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (3292, 3297), False, 'from collections import defaultdict\n'), ((3311, 3329), 'collections.defaultdict', 'defaultdict', (['tuple'], {}), '(tuple)\n', (3322, 3329), False, 'from collections import defaultdict\n'), ((4496, 4530), 'scipy.signal.butter', 'butter', (['(2)', '[0.06, 0.7]', '"""bandpass"""'], {}), "(2, [0.06, 0.7], 'bandpass')\n", (4502, 4530), False, 'from scipy.signal import freqz, butter, bessel, cheby1, cheby2, ellip, tf2zpk, zpk2tf, lfilter, buttap, bilinear, cheb2ord, cheb2ap\n'), ((4576, 4588), 'scipy.signal.tf2zpk', 'tf2zpk', (['b', 'a'], {}), '(b, a)\n', (4582, 4588), False, 'from scipy.signal import freqz, butter, bessel, cheby1, cheby2, ellip, tf2zpk, zpk2tf, lfilter, buttap, bilinear, cheb2ord, cheb2ap\n'), ((4622, 4645), 'matplotlib.pyplot.figure', 'figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (4628, 4645), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((4650, 4666), 'matplotlib.pyplot.subplot', 'subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (4657, 4666), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((4688, 4753), 'matplotlib.pyplot.grid', 'grid', (['(True)'], {'color': '"""0.9"""', 'linestyle': '"""-"""', 'which': '"""both"""', 'axis': '"""both"""'}), "(True, color='0.9', linestyle='-', which='both', axis='both')\n", (4692, 4753), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((4758, 4782), 'matplotlib.pyplot.title', 'title', (['"""Poles and zeros"""'], {}), "('Poles and zeros')\n", (4763, 4782), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((4986, 4999), 'numpy.arange', 'arange', (['(0)', '(20)'], {}), '(0, 20)\n', (4992, 4999), False, 'from numpy import asarray, tan, array, pi, arange, cos, log10, unwrap, angle\n'), ((5030, 5046), 'scipy.signal.lfilter', 'lfilter', (['b', 'a', 'u'], {}), '(b, a, u)\n', (5037, 5046), False, 'from scipy.signal import freqz, butter, bessel, cheby1, cheby2, ellip, tf2zpk, zpk2tf, lfilter, buttap, bilinear, cheb2ord, cheb2ap\n'), ((5051, 5067), 'matplotlib.pyplot.subplot', 'subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (5058, 5067), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((5072, 5086), 'matplotlib.pyplot.stem', 'stem', (['index', 'y'], {}), '(index, y)\n', (5076, 5086), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((5090, 5115), 'matplotlib.pyplot.title', 'title', (['"""Impulse response"""'], {}), "('Impulse response')\n", (5095, 5115), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((5120, 5135), 'matplotlib.pyplot.margins', 'margins', (['(0)', '(0.1)'], {}), '(0, 0.1)\n', (5127, 5135), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((5140, 5205), 'matplotlib.pyplot.grid', 'grid', (['(True)'], {'color': '"""0.9"""', 'linestyle': '"""-"""', 'which': '"""both"""', 'axis': '"""both"""'}), "(True, color='0.9', linestyle='-', which='both', axis='both')\n", (5144, 5205), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((5210, 5216), 'matplotlib.pyplot.show', 'show', ([], {}), '()\n', (5214, 5216), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((5258, 5269), 'scipy.signal.freqz', 'freqz', (['b', 'a'], {}), '(b, a)\n', (5263, 5269), False, 'from scipy.signal import freqz, butter, bessel, cheby1, cheby2, ellip, tf2zpk, zpk2tf, lfilter, buttap, bilinear, cheb2ord, cheb2ap\n'), ((5274, 5290), 'matplotlib.pyplot.subplot', 'subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (5281, 5290), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((5328, 5341), 'matplotlib.pyplot.xscale', 'xscale', (['"""log"""'], {}), "('log')\n", (5334, 5341), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((5347, 5374), 'matplotlib.pyplot.title', 'title', (['"""Frequency response"""'], {}), "('Frequency response')\n", (5352, 5374), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((5379, 5409), 'matplotlib.pyplot.xlabel', 'xlabel', (['"""Normalized frequency"""'], {}), "('Normalized frequency')\n", (5385, 5409), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((5414, 5438), 'matplotlib.pyplot.ylabel', 'ylabel', (['"""Amplitude [dB]"""'], {}), "('Amplitude [dB]')\n", (5420, 5438), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((5443, 5458), 'matplotlib.pyplot.margins', 'margins', (['(0)', '(0.1)'], {}), '(0, 0.1)\n', (5450, 5458), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((5463, 5529), 'matplotlib.pyplot.grid', 'grid', (['(True)'], {'color': '"""0.7"""', 'linestyle': '"""-"""', 'which': '"""major"""', 'axis': '"""both"""'}), "(True, color='0.7', linestyle='-', which='major', axis='both')\n", (5467, 5529), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((5536, 5602), 'matplotlib.pyplot.grid', 'grid', (['(True)'], {'color': '"""0.9"""', 'linestyle': '"""-"""', 'which': '"""minor"""', 'axis': '"""both"""'}), "(True, color='0.9', linestyle='-', which='minor', axis='both')\n", (5540, 5602), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((5609, 5615), 'matplotlib.pyplot.show', 'show', ([], {}), '()\n', (5613, 5615), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((5633, 5649), 'matplotlib.pyplot.subplot', 'subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (5640, 5649), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((5696, 5709), 'matplotlib.pyplot.xscale', 'xscale', (['"""log"""'], {}), "('log')\n", (5702, 5709), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((5714, 5744), 'matplotlib.pyplot.xlabel', 'xlabel', (['"""Normalized frequency"""'], {}), "('Normalized frequency')\n", (5720, 5744), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((5749, 5774), 'matplotlib.pyplot.ylabel', 'ylabel', (['"""Phase [degrees]"""'], {}), "('Phase [degrees]')\n", (5755, 5774), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((5779, 5832), 'matplotlib.pyplot.grid', 'grid', (['(True)'], {'color': '"""0.7"""', 'linestyle': '"""-"""', 'which': '"""major"""'}), "(True, color='0.7', linestyle='-', which='major')\n", (5783, 5832), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((5839, 5892), 'matplotlib.pyplot.grid', 'grid', (['(True)'], {'color': '"""0.9"""', 'linestyle': '"""-"""', 'which': '"""minor"""'}), "(True, color='0.9', linestyle='-', which='minor')\n", (5843, 5892), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((5899, 5905), 'matplotlib.pyplot.show', 'show', ([], {}), '()\n', (5903, 5905), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((3747, 3757), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3755, 3757), True, 'import matplotlib.pyplot as plt\n'), ((3776, 3797), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (3787, 3797), True, 'import matplotlib.pyplot as plt\n'), ((5681, 5689), 'numpy.angle', 'angle', (['h'], {}), '(h)\n', (5686, 5689), False, 'from numpy import asarray, tan, array, pi, arange, cos, log10, unwrap, angle\n'), ((2911, 2922), 'numpy.rint', 'np.rint', (['xy'], {}), '(xy)\n', (2918, 2922), True, 'import numpy as np\n'), ((3374, 3385), 'numpy.rint', 'np.rint', (['xy'], {}), '(xy)\n', (3381, 3385), True, 'import numpy as np\n')] |
import string
import math
from codestat_token import Token
from codestat_tokenizer import Tokenizer
from token_builders import (
InvalidTokenBuilder,
NullTokenBuilder,
WhitespaceTokenBuilder,
NewlineTokenBuilder,
EscapedStringTokenBuilder,
PrefixedStringTokenBuilder,
IntegerTokenBuilder,
IntegerExponentTokenBuilder,
PrefixedIntegerTokenBuilder,
SuffixedIntegerTokenBuilder,
RealTokenBuilder,
IdentifierTokenBuilder,
CaseInsensitiveListTokenBuilder,
CaseSensitiveListTokenBuilder,
LeadToEndOfLineTokenBuilder,
SingleCharacterTokenBuilder
)
from assembly_token_builders import (
LabelTokenBuilder,
AssemblyCommentTokenBuilder,
MultilineCommentTokenBuilder,
HashQuoteCharTokenBuilder
)
from examiner import Examiner
class AssemblyExaminer(Examiner):
@staticmethod
def __escape_z__():
InvalidTokenBuilder.__escape_z__()
WhitespaceTokenBuilder.__escape_z__()
NewlineTokenBuilder.__escape_z__()
EscapedStringTokenBuilder.__escape_z__()
PrefixedStringTokenBuilder.__escape_z__()
IntegerTokenBuilder.__escape_z__()
IntegerExponentTokenBuilder.__escape_z__()
PrefixedIntegerTokenBuilder.__escape_z__()
SuffixedIntegerTokenBuilder.__escape_z__()
RealTokenBuilder.__escape_z__()
IdentifierTokenBuilder.__escape_z__()
CaseInsensitiveListTokenBuilder.__escape_z__()
CaseSensitiveListTokenBuilder.__escape_z__()
LeadToEndOfLineTokenBuilder.__escape_z__()
SingleCharacterTokenBuilder.__escape_z__()
LabelTokenBuilder.__escape_z__()
AssemblyCommentTokenBuilder.__escape_z__()
MultilineCommentTokenBuilder.__escape_z__()
HashQuoteCharTokenBuilder.__escape_z__()
return 'Escape ?Z'
def __init__(self, code, tab_size, processor):
super().__init__()
self.newlines_important = 'always'
operand_types = []
whitespace_tb = WhitespaceTokenBuilder()
newline_tb = NewlineTokenBuilder()
comment_tb = LeadToEndOfLineTokenBuilder(';', True, 'comment')
if processor in ['pdp-8']:
comment_tb = LeadToEndOfLineTokenBuilder('/', True, 'comment')
comment_2_tb = NullTokenBuilder()
if processor in ['1802']:
comment_2_tb = LeadToEndOfLineTokenBuilder('..', True, 'comment')
line_comment_star_tb = AssemblyCommentTokenBuilder('*')
line_comment_hash_tb = NullTokenBuilder()
if processor in ['68000']:
line_comment_hash_tb = AssemblyCommentTokenBuilder('#')
stmt_separator_tb = NullTokenBuilder()
if processor in ['pdp-8']:
stmt_separator_tb = SingleCharacterTokenBuilder(';', 'statement separator', False)
integer_tb = IntegerTokenBuilder("'")
integer_exponent_tb = IntegerExponentTokenBuilder("'")
integer_1_tb = NullTokenBuilder()
integer_2_tb = NullTokenBuilder()
prefixed_integer_tb = PrefixedIntegerTokenBuilder('#', True, '0123456789')
if processor in ['pdp-11']:
integer_1_tb = SuffixedIntegerTokenBuilder('$', True, '0123456789')
if processor in ['z80']:
integer_1_tb = SuffixedIntegerTokenBuilder('O', True, '0123456789')
integer_2_tb = SuffixedIntegerTokenBuilder('D', True, '0123456789')
hex_integer_1_tb = PrefixedIntegerTokenBuilder('&', True, '0123456789abcdefABCDEF')
hex_integer_2_tb = SuffixedIntegerTokenBuilder('h', False, '0123456789abcdefABCDEF')
hex_integer_3_tb = PrefixedIntegerTokenBuilder('$', True, '0123456789abcdefABCDEF')
hex_integer_4_tb = PrefixedIntegerTokenBuilder('#$', True, '0123456789abcdefABCDEF')
hash_quote_value_tb = NullTokenBuilder()
if processor in ['pdp-11']:
hash_quote_value_tb = HashQuoteCharTokenBuilder()
operand_types.append('number')
leads = '_.$@#'
extras = '_.$@#'
identifier_tb = IdentifierTokenBuilder(leads, extras)
operand_types.append('identifier')
label_tb = LabelTokenBuilder(leads, extras, ':')
quotes = ['"', "'", "’"]
string_tb = EscapedStringTokenBuilder(quotes, 0)
operand_types.append('string')
known_operators = [
'+', '-', '*', '/', '&', '|', '=', '??', '#', '@', "'", '!'
]
self.unary_operators = [
'+', '-', '??', '#', '@', "'"
]
self.postfix_operators = ['+']
groupers = ['(', ')', ',', '[', ']', '<', '>', ':']
group_starts = ['(', '[', ',', '<']
group_ends = [')', ']', '>']
group_mids = [',', ':']
groupers_tb = CaseInsensitiveListTokenBuilder(groupers, 'group', False)
known_operator_tb = CaseSensitiveListTokenBuilder(known_operators, 'operator', False)
preprocessors = [
'if', 'ifne', 'ifeq',
'else', 'endif', 'endc',
'error'
]
preprocessors_68000 = [
'MACRO', 'ENDM'
]
preprocessors_8080 = [
'MACRO', 'ENDM'
]
preprocessors_8086 = [
'ELSE', 'ELSEIF', 'ELSEIF2', 'ENDM', 'EXITM',
'FOR', 'FORC',
'GOTO',
'IF', 'IF2', 'IFB', 'IFNB', 'IFDEF', 'IFNDEF',
'IFDIF', 'IFDIF[[I]]', 'IFE', 'IFIDN', 'IFIDN[[I]]',
'LOCAL',
'MACRO',
'PURGE',
'.BREAK', '.CONTINUE',
'.ELSE', '.ELSEIF', '.ENDIF',
'.ERR', '.ERR2', '.ERRB', '.ERRDEF',
'.ERRDIF', '.ERRDIF[[I]]]', '.ERRE', '.ERRIDN', '.ERRIDN[[I]]',
'.ERRNB', '.ERRNDEF', '.ERRNZ', '.EXIT',
'.IF',
'.REPEAT', '.UNTIL', '.UNTILCXZ',
'.WHILE'
]
if processor in ['68000']:
preprocessors += preprocessors_68000
if processor in ['8080']:
preprocessors += preprocessors_8080
if processor in ['8086']:
preprocessors += preprocessors_8086
preprocessor_tb = CaseInsensitiveListTokenBuilder(preprocessors, 'preprocessor', False)
directives = [
'DB', 'DW', 'DS',
'EJECT', 'END', 'EQU', 'EXTRN',
'INCLUDE',
'NAME',
'ORG',
'PAGE',
'SECTION', 'SEGMENT', 'START', 'SUBTITLE',
'TEXT'
]
directives_6502 = [
'DFB', 'DFW'
]
directives_6800 = [
'CPU',
'NAM'
]
directives_68000 = [
'=',
'EVEN',
'ODD'
]
directives_8080 = [
'ASEG',
'CPU',
'LOCAL',
'TITLE',
'.8080', '.8086', '.6800', '.6502', ".386",
]
directives_z80 = [
'DEFB', 'DEFS', 'DEFW'
]
directives_8086 = [
'=',
'ABSOLUTE', 'ALIAS', 'ALIGN', 'AS', 'ASSUME', 'AT',
'BITS', 'BYTE',
'COMM', 'COMMON', 'CPU', 'CSEG',
'DEFAULT', 'DSEG', 'DWORD',
'ECHO', 'ENDP', 'ENDS', 'EVEN', 'EXTERNDEF',
'FWORD', 'FORMAT',
'GLOBAL', 'GROUP',
'INCLUDELIB', 'INS86', 'INVOKE',
'LABEL',
'MMWORD',
'OPTION',
'POPCONTEXT', 'PROC', 'PROTO', 'PUBLIC', 'PUSHCONTEXT',
'SEGMENT'
'QWORD',
'REAL4', 'REAL8', 'REAL10', 'RECORD',
'STRUCT',
'TEXTEQU', 'TBYTE', 'TYPEDEF',
'WORD',
'SBYTE', 'SDWORD', 'SWORD',
'SECT', 'SECTION', 'SEGMENT', 'STATIC'
'UNION', 'USE16', 'USE32', 'USE64',
'VIRTUAL',
'XMMWORD', 'YMMWORD',
'.386', '.386P', '.387', '.486', '.486P', '.586', '.586P',
'.686', '.686P', '.K3D',
'.ALLOCSTACK', '.ALPHA',
'.CODE', '.CONST', '.CREF',
'.DATA', '.DATA?', '.DOSSEG',
'.ENDW', '.ENDPROLOG',
'.FARDATA', '.FARDATA?', '.FPO',
'.LIST', '.LISTALL', '.LISTIF', '.LISTMACRO', '.LISTMACROALL',
'.MODEL', '.MMX',
'.NOCREF', '.NOLIST', '.NOLISTIF', '.NOLISTMACRO',
'.PUSHFRAME', '.PUSHREG',
'.RADIX',
'.SAFESEH', '.SALL', '.SAVEREG', '.SAVEXMM128', '.STACK', '.STARTUP',
'.SEQ', '.SETFRAME',
'.TFCOND',
'.XLIST', '.XMM',
]
directives_80386 = [
'ALIGN',
'BITS',
'GLOBAL',
'PROC',
'SECTION',
'RESB', 'RESD',
'.386',
'.CODE',
'.DATA',
'.MODEL',
'.TEXT',
'%INCLUDE',
]
directives_pdp8 = [
'='
]
directives_pdp11 = [
'=',
'BYTE',
'WORD',
'.odd', '.even', '.blkb', '.blkw', '.byte', '.word',
'.ascii', '.asciz', '.end', '.hex', '.radix',
'.ident', '.if', '.ift', '.endc', '.psect', '.mcall',
'.macro', '.endm', '.restore', '.print', '.error',
'.list', '.nlist'
]
if processor in ['6502']:
directives += directives_6502
if processor in ['6800']:
directives += directives_6800
if processor in ['68000']:
directives += directives_68000
if processor in ['8080']:
directives += directives_8080
if processor in ['z80']:
directives += directives_z80
if processor in ['8086']:
directives += directives_8086
if processor in ['80386']:
directives += directives_80386
if processor in ['pdp-8']:
directives += directives_pdp8
if processor in ['pdp-11']:
directives += directives_pdp11
directive_tb = CaseInsensitiveListTokenBuilder(directives, 'directive', False)
title_directive_tb = LeadToEndOfLineTokenBuilder('TITLE', False, 'directive')
title_directive_2_tb = LeadToEndOfLineTokenBuilder('.TITLE', False, 'directive')
subtitle_directive_tb = LeadToEndOfLineTokenBuilder('SUBTTL', False, 'directive')
subtitle_directive_2_tb = LeadToEndOfLineTokenBuilder('.SUBTTL', False, 'directive')
subtitle_directive_3_tb = LeadToEndOfLineTokenBuilder('.SBTTL', False, 'directive')
include_directive_tb = LeadToEndOfLineTokenBuilder('INCLUDE', False, 'directive')
include_directive_2_tb = LeadToEndOfLineTokenBuilder('.INCLUDE', False, 'directive')
multiline_comment_tb = MultilineCommentTokenBuilder()
opcodes_1802 = [
'IDL', 'LDN', 'INC', 'DEC', 'BR', 'BO', 'BZ', 'BDF', 'BPZ', 'BGE',
'B1', 'B2', 'B3', 'B4', 'SKP', 'NBR', 'BNO', 'BNZ', 'BNF', 'BM', 'BL',
'BN1', 'BN2', 'BN3', 'BN4', 'LDA', 'STR', 'IRX', 'OUT', 'INP',
'RET', 'DIS', 'LDXA', 'STXD', 'ADC', 'SDB', 'SHRC', 'RSHR', 'SMB',
'SAV', 'MARK', 'REQ', 'SEQ', 'ADCI', 'SDBI', 'SHLC', 'RSHL', 'SMBI',
'GLO', 'GHI', 'PLO', 'PHI', 'LBO', 'LBZ', 'LBDF', 'NOP', 'LSNO',
'LSNZ', 'LSNF', 'LSKP', 'NLBR', 'LBNQ', 'LBNZ', 'LBNF', 'LSIE', 'LSQ',
'LSZ', 'LSDF', 'SEP', 'SEX', 'LDX', 'OR', 'AND', 'XOR', 'ADD', 'SD',
'SHR', 'SM', 'LDI', 'ORI', 'ANI', 'XRI', 'ADI', 'SDI', 'SHL', 'SMI'
]
registers_1802 = []
opcodes_6502 = [
'ADC', 'AND', 'ASL', 'AST',
'BCC', 'BCS', 'BEQ', 'BIT', 'BMI', 'BNE', 'BPL', 'BRK', 'BVC', 'BVS',
'CLC', 'CLD', 'CLI', 'CLV', 'CMP', 'CPR', 'CPX', 'CPY',
'DEC', 'DEX', 'DEY',
'EOR',
'INC', 'INX', 'INY',
'JMP', 'JSR',
'LDA', 'LDX', 'LDY', 'LSR',
'NOP',
'ORA',
'PHA', 'PHP', 'PLA', 'PLP',
'ROL', 'ROR', 'RTI', 'RTS',
'SBC', 'SEC', 'SED', 'SEI', 'STA', 'STX', 'STY',
'TAX', 'TAY', 'TSX', 'TXA', 'TXS', 'TYA'
]
registers_6502 = ['A', 'X', 'Y', 'P', 'S']
opcodes_6800 = [
'ABA', 'ADC', 'ADCA', 'ADCB', 'ADD', 'AND', 'ASL', 'ASR',
'BCC', 'BCS', 'BEQ', 'BGE', 'BGT', 'BHI', 'BIT', 'BLE', 'BLS', 'BLT', 'BMI', 'BNE', 'BPL', 'BRA', 'BSR', 'BVC', 'BVS',
'CBA', 'CLC', 'CLI', 'CLR', 'CLRA', 'CLRB', 'CLV', 'CMP', 'COM', 'CPX',
'DAA', 'DEC', 'DES', 'DEX',
'EOR', 'EORA', 'EROB',
'INC', 'INS', 'INX',
'JMP', 'JSR',
'LDA', 'LDAA', 'LDAB', 'LDS', 'LDX', 'LSR',
'NEG', 'NOP',
'ORA',
'PSH', 'PUL',
'ROL', 'ROR', 'RTI', 'RTS',
'SBA', 'SBC', 'SEC', 'SEI', 'SEV', 'STA', 'STAA', 'STAB', 'STS', 'STX', 'SUB', 'SWI',
'TAB', 'TAP', 'TBA', 'TPA', 'TST', 'TSX', 'TXS',
'WAI'
]
registers_6800 = ['A', 'B', 'IX', 'PC', 'SP']
opcodes_68000 = [
'AND', 'ANDI', 'EOR', 'EORI', 'NOT', 'OR', 'ORI', 'CLR',
'BCHG', 'BCLR', 'BSET', 'BTST', 'EXT', 'EXTB',
'MOVE', 'MOVEA', 'MOVEM', 'MOVEP', 'MOVEQ',
'CMP', 'CMPA', 'CMPI', 'CMPM', 'CMP2',
'LEA', 'PEA', 'TAS', 'CHK',
'ADD', 'ADDA', 'ADDI', 'ADDQ', 'ADDX',
'SUB', 'SUBA', 'SUBI', 'SUBQ', 'SUBX',
'MULS', 'MULU', 'DIVS', 'DIVU', 'NEG', 'NEGX',
'ASL', 'ASR', 'LSL', 'LSR', 'ROL', 'ROR', 'ROXL', 'ROXR',
'DBCC', 'SWAP', 'TST',
'ANDB', 'ANDIB', 'EORB', 'EORIB', 'NOTB', 'ORB', 'ORIB', 'CLRB',
'BCHGB', 'BCLRB', 'BSETB', 'BTSTB', 'EXTB', 'EXTBB',
'MOVEB', 'MOVEAB', 'MOVEMB', 'MOVEPB', 'MOVEQB',
'CMPB', 'CMPAB', 'CMPIB', 'CMPMB', 'CMP2B',
'LEAB', 'PEAB', 'TASB', 'CHKB',
'ADDB', 'ADDAB', 'ADDIB', 'ADDQB', 'ADDXB',
'SUBB', 'SUBAB', 'SUBIB', 'SUBQB', 'SUBXB',
'MULSB', 'MULUB', 'DIVSB', 'DIVUB', 'NEGB', 'NEGXB',
'ASLB', 'ASRB', 'LSLB', 'LSRB', 'ROLB', 'RORB', 'ROXLB', 'ROXRB',
'DBCCB', 'SWAPB', 'TSTB',
'ANDW', 'ANDIW', 'EORW', 'EORIW', 'NOTW', 'ORW', 'ORIW', 'CLRW',
'BCHGW', 'BCLRW', 'BSETW', 'BTSTW', 'EXTW', 'EXTBW',
'MOVEW', 'MOVEAW', 'MOVEMW', 'MOVEPW', 'MOVEQW',
'CMPW', 'CMPAW', 'CMPIW', 'CMPMW', 'CMP2W',
'LEAW', 'PEAW', 'TASW', 'CHKW',
'ADDW', 'ADDAW', 'ADDIW', 'ADDQW', 'ADDXW',
'SUBW', 'SUBAW', 'SUBIW', 'SUBQW', 'SUBXW',
'MULSW', 'MULUW', 'DIVSW', 'DIVUW', 'NEGW', 'NEGXW',
'ASLW', 'ASRW', 'LSLW', 'LSRW', 'ROLW', 'RORW', 'ROXLW', 'ROXRW',
'DBCCW', 'SWAPW', 'TSTW',
'ANDL', 'ANDIL', 'EORL', 'EORIL', 'NOTL', 'ORL', 'ORIL', 'CLRL',
'BCHGL', 'BCLRL', 'BSETL', 'BTSTL', 'EXTL', 'EXTBL',
'MOVEL', 'MOVEAL', 'MOVEML', 'MOVEPL', 'MOVEQL',
'CMPL', 'CMPAL', 'CMPIL', 'CMPML', 'CMP2L',
'LEAL', 'PEAL', 'TASL', 'CHKL',
'ADDL', 'ADDAL', 'ADDIL', 'ADDQL', 'ADDXL',
'SUBL', 'SUBAL' 'SUBIL', 'SUBQL', 'SUBXL',
'MULSL', 'MULUL', 'DIVSL', 'DIVUL', 'NEGL', 'NEGXL',
'ASLL', 'ASRL', 'LSLL', 'LSRL', 'ROLL', 'RORL', 'ROXLL', 'ROXRL',
'DBCCL', 'SWAPL', 'TSTL',
'ABCD', 'NBCD', 'PACK', 'SBCD', 'UNPK',
'BSR', 'BRA', 'BT', 'BF',
'BEQ', 'BNE', 'BLS', 'BLT', 'BLE', 'BGT', 'BGE',
'BCC', 'BCS', 'BPL', 'BMI', 'BHI', 'BVC', 'BVS',
'BSRS', 'BRAS', 'BEQS', 'BNES', 'BLSS', 'BLTS', 'BLES', 'BGTS', 'BGES',
'BCCS', 'BCSS', 'BPLS', 'BMIS', 'BHIS', 'BVCS', 'BVSS',
'DBSR', 'DBRA', 'DBT', 'DBF',
'DBEQ', 'DBNE', 'DBLS', 'DBLT', 'DBLE', 'DBGT', 'DBGE',
'DBCC', 'DBCS', 'DBPL', 'DBMI', 'DBHI', 'DBVC', 'DBVS',
'JSR', 'JMP',
'TRAP', 'HALT', 'STOP',
'RTD', 'RTE', 'RTR', 'RTS',
'TRAP', 'HALT', 'STOP', 'NOP', 'MOVE16', 'EXG',
'BFCHG', 'BFCLR', 'BFEXTS', 'BFEXTU', 'BFFFO', 'BFINS', 'BFSET', 'BFTST',
'FNOP', 'FABS', 'FACOS', 'FASIN', 'FATAN', 'FCOS', 'FCOSH', 'FETOX',
'FETOXM1', 'FGETMAN', 'FINT', 'FINTRZ', 'FLOGN', 'FLOGNP1', 'FLOG10',
'FLOG2', 'FNEG', 'FSIN', 'FSINH', 'FSQRT', 'FTAN', 'FTANH',
'FTENTOX', 'FTWOTOX', 'FTST',
'DSB', 'DSW', 'DSL', 'DCB', 'DCW', 'DCL',
'AND.B', 'ANDI.B', 'EOR.B', 'EORI.B', 'NOT.B', 'OR.B', 'ORI.B', 'CLR.B',
'BCHG.B', 'BCLR.B', 'BSET.B', 'BTST.B', 'EXT.B', 'EXTB.B',
'MOVE.B', 'MOVEA.B', 'MOVEM.B', 'MOVEP.B', 'MOVEQ.B',
'CMP.B', 'CMPA.B', 'CMPI.B', 'CMPM.B', 'CMP2.B',
'LEA.B', 'PEA.B', 'TAS.B', 'CHK.B',
'ADD.B', 'ADDA.B', 'ADDI.B', 'ADDQ.B', 'ADDX.B',
'SUB.B', 'SUBA.B', 'SUBI.B', 'SUBQ.B', 'SUBX.B',
'MULS.B', 'MULU.B', 'DIVS.B', 'DIVU.B', 'NEG.B', 'NEGX.B',
'ASL.B', 'ASR.B', 'LSL.B', 'LSR.B', 'ROL.B', 'ROR.B', 'ROXL.B', 'ROXR.B',
'DBCC.B', 'SWAP.B', 'TST.B',
'AND.W', 'ANDI.W', 'EOR.W', 'EORI.W', 'NOT.W', 'OR.W', 'ORI.W', 'CLR.W',
'BCHG.W', 'BCLR.W', 'BSET.W', 'BTST.W', 'EXT.W', 'EXTB.W',
'MOVE.W', 'MOVEA.W', 'MOVEM.W', 'MOVEP.W', 'MOVEQ.W',
'CMP.W', 'CMPA.W', 'CMPI.W', 'CMPM.W', 'CMP2.W',
'LEA.W', 'PEA.W', 'TAS.W', 'CHK.W',
'ADD.W', 'ADDA.W', 'ADDI.W', 'ADDQ.W', 'ADDX.W',
'SUB.W', 'SUBA.W', 'SUBI.W', 'SUBQ.W', 'SUBX.W',
'MULS.W', 'MULU.W', 'DIVS.W', 'DIVU.W', 'NEG.W', 'NEGX.W',
'ASL.W', 'ASR.W', 'LSL.W', 'LSR.W', 'ROL.W', 'ROR.W', 'ROXL.W', 'ROXR.W',
'DBCC.W', 'SWAP.W', 'TST.W',
'AND.L', 'ANDI.L', 'EOR.L', 'EORI.L', 'NOT.L', 'OR.L', 'ORI.L', 'CLR.L',
'BCHG.L', 'BCLR.L', 'BSET.L', 'BTST.L', 'EXT.L', 'EXTB.L',
'MOVE.L', 'MOVEA.L', 'MOVEM.L', 'MOVEP.L', 'MOVEQ.L',
'CMP.L', 'CMPA.L', 'CMPI.L', 'CMPM.L', 'CMP2.L',
'LEA.L', 'PEA.L', 'TAS.L', 'CHK.L',
'ADD.L', 'ADDA.L', 'ADDI.L', 'ADDQ.L', 'ADDX.L',
'SUB.L', 'SUBA.L', 'SUBI.L', 'SUBQ.L', 'SUBX.L',
'MULS.L', 'MULU.L', 'DIVS.L', 'DIVU.L', 'NEG.L', 'NEGX.L',
'ASL.L', 'ASR.L', 'LSL.L', 'LSR.L', 'ROL.L', 'ROR.L', 'ROXL.L', 'ROXR.L',
'DBCC.L', 'SWAP.L', 'TST.L',
'BSR.S', 'BRA.S', 'BT.S', 'BF.S',
'BEQ.S', 'BNE.S', 'BLS.S', 'BLT.S', 'BLE.S', 'BGT.S', 'BGE.S',
'BCC.S', 'BCS.S', 'BPL.S', 'BMI.S', 'BHI.S', 'BVC.S', 'BVS.S',
'DS.B', 'DS.W', 'DS.L', 'DC.B', 'DC.W', 'DC.L'
]
registers_68000 = [
'D0', 'D1', 'D2', 'D3', 'D4', 'D5', 'D6', 'D7',
'A0', 'A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7',
'FP0', 'FP1', 'FP2', 'FP3', 'FP4', 'FP5', 'FP6', 'FP7',
'PC', 'SR'
]
opcodes_8080 = [
'ACI', 'ADC', 'ADD', 'ADI', 'ANA', 'ANI',
'CALL', 'CC', 'CM', 'CMA', 'CMC', 'CMP', 'CNC', 'CNZ', 'CP', 'CPE', 'CPI',
'CPO', 'CZ',
'DAA', 'DAD', 'DCR', 'DCX', 'DI',
'EI',
'HLT',
'IN', 'INR', 'INX',
'JC', 'JM', 'JMP', 'JNC', 'JNZ', 'JP', 'JPE', 'JPO', 'JZ',
'LDAX', 'LHLD', 'LXI',
'MOV', 'MVI',
'NOP',
'ORA', 'ORI', 'OUT',
'PCHL', 'POP', 'PUSH',
'RAL', 'RAR', 'RC', 'RIM', 'RLC', 'RET', 'RM', 'RNC', 'RNZ', 'RP', 'RPE',
'RPO', 'RRC', 'RST', 'RZ ',
'SBB', 'SBI', 'SHLD', 'SIM', 'SPHL', 'STA', 'STC', 'STAX', 'SUB', 'SUI',
'XCHG', 'XRA', 'XRI', 'XTHL',
]
registers_8080 = [
'A', 'B', 'C', 'D', 'E', 'H', 'L', 'M', 'PSW', 'F'
]
opcodes_z80 = [
'ADC', 'ADD', 'AND',
'BIT',
'CALL', 'CCF', 'CP', 'CPD', 'CPDR', 'CPI', 'CPIR', 'CPL',
'DAA', 'DEC', 'DI', 'DJNZ',
'EI', 'EX', 'EXX',
'HALT',
'IM', 'IN', 'INC', 'IND', 'INDR', 'INI', 'INIR',
'JP', 'JR',
'LD', 'LDD', 'LDDR', 'LDI', 'LDIR',
'NEG', 'NOP',
'OR', 'OTDR', 'OTIR', 'OUT', 'OUTD', 'OUTI',
'POP', 'PUSH',
'RES', 'RET', 'RETI', 'RETN', 'RL', 'RLA', 'RLC', 'RLCA', 'RLD',
'RR', 'RRA', 'RRC', 'RRCA', 'RRD', 'RST',
'SBC', 'SCF', 'SET', 'SLA', 'SRA', 'SRL', 'SUB',
'XOR'
]
registers_z80 = [
'A', 'B', 'C', 'D', 'E', 'H', 'L', 'F', 'AF', 'BC', 'DE', 'HL',
"A'", "B'", "C'", "D'", "E'", "H'", "L'", "AF'", "F'", "BC'", "DE'", "HL'",
'IX', 'IY', 'PSW', 'M'
]
opcodes_8086 = [
'AAA', 'AAD', 'AAM', 'AAS', 'ADC', 'ADD', 'AND',
'CALL', 'CBW', 'CLC', 'CLD', 'CLI', 'CMC', 'CMP', 'CMPS', 'CMPSB', 'CMPW', 'CMPXCHG', 'CWD',
'DAA', 'DAS', 'DEC', 'DIV',
'ESC',
'FWAIT',
'F2XM1', 'FABS', 'FADD', 'FADDP', 'FBLD', 'FBSTP', 'FCHS', 'FCLEX', 'FCOM', 'FCOMP',
'FCOMPP', 'FCOS', 'FDECSTP', 'FDISI', 'FDIV', 'FDIVP', 'FDIVR', 'FDIVRP',
'FENI', 'FFREE', 'FIADD', 'FICOM', 'FICOMP', 'FIDIV', 'FIDIVR', 'FILD',
'FIMUL', 'FINCSTP', 'FINIT', 'FIST', 'FISTP', 'FISUB', 'FISUBR', 'FLD', 'FLD1',
'FLDCW', 'FLDENV', 'FLDL2E', 'FLDL2T', 'FLDLG2', 'FLDLN2', 'FLDPI',
'FLDZ', 'FMUL', 'FMULP', 'FNCLEX', 'FNDISI', 'FNENI', 'FNINIT', 'FNOP', 'FNSAVE',
'FNSTCW', 'FNSTENV', 'FNSTSW', 'FPATAN', 'FPREM', 'FPREM1', 'FPTAN', 'FRNDINT',
'FRSTOR', 'FSAVE', 'FSCALE', 'FSETPM', 'FSIN', 'FSINCOS', 'FSQRT', 'FST', 'FSTCW',
'FSTENV', 'FSTP', 'FSTSW', 'FSUB', 'FSUBP', 'FSUBRP', 'FTST', 'FUCOM', 'FUCOMP',
'FUCOMPP', 'FXAM', 'FXCH', 'FXTRACT', 'FYL2X', 'FYL2XP1',
'HLT',
'IDIV', 'IMUL', 'IN', 'INC', 'INT', 'INTO', 'INVD', 'IRET', 'IRETD',
'JA', 'JAE', 'JB', 'JBE', 'JC', 'JCXZ', 'JE', 'JECXZ', 'JG', 'JGE', 'JL', 'JLE', 'JMP', 'JNA', 'JNAE', 'JNB', 'JNBE', 'JNC', 'JNE', 'JNG', 'JNGE', 'JNL', 'JNLE', 'JNO', 'JNP', 'JNS', 'JO', 'JP', 'JPE', 'JPO', 'JNZ', 'JS', 'JZ',
'LAHF', 'LAR', 'LDS', 'LEA', 'LES', 'LOCK', 'LODS', 'LODSB', 'LODSW', 'LOOP', 'LOOPE', 'LOOPNE', 'LOOPNZ', 'LOOPZ',
'MOV', 'MOVS', 'MOVSB', 'MOVSW', 'MUL',
'NEG', 'NOP', 'NOT',
'OR', 'OUT',
'POP', 'POPF', 'POPFD', 'PUSH', 'PUSHF', 'PUSHFD',
'RCL', 'RCR', 'REP', 'REPE', 'REPNE', 'REPNZ', 'REPZ', 'RET', 'RETF', 'ROL', 'ROR',
'SAHF', 'SAL', 'SAR', 'SBB', 'SCAS', 'SCASB', 'SCASW', 'SHL', 'SHR', 'STC', 'STD', 'STI', 'STOS', 'STOSB', 'STOSW', 'SUB',
'TEST',
'WAIT', 'WBINVD',
'XCHG', 'XLAT', 'XLATB', 'XOR',
]
registers_8086 = [
'AL', 'AH', 'BL', 'BH', 'CL', 'CH', 'DL', 'DH',
'AX', 'BX', 'CX', 'DX', 'CS', 'DS', 'SS', 'ES',
'IP', 'SI', 'DI', 'BP', 'SP', 'FLAGS'
]
opcodes_80186 = [
'BOUND',
'ENTER',
'INS',
'LEAVE',
'OUTS',
'POPA', 'POPAD', 'PUSHA', 'PUSHAD'
]
opcodes_80286 = [
'ARPL',
'CLTS',
'LGDT', 'LIDT', 'LLDT', 'LMSW', 'LSL', 'LSS',
'SGDT', 'SIDT', 'SLDT', 'SMSW', 'STR',
'VERR', 'VERW'
]
registers_80286 = [
'TR'
]
opcodes_80386 = [
'BSF', 'BSR', 'BT', 'BTC', 'BTR', 'BTS',
'CDQ', 'CWDE',
'LFS', 'LGS', 'LSS',
'MOVSX', 'MOVZX',
'SETAE', 'SETB', 'SETC', 'SETNAE', 'SETNB', 'SETNE', 'SETNZ', 'SETG', 'SETGE', 'SETL', 'SETLE', 'SETNC', 'SETNG', 'SETNGE', 'SETNL', 'SETNLE', 'SETNO', 'SETNP', 'SETNS', 'SETE', 'SETO', 'SETP', 'SETPE', 'SETPO', 'SETS', 'SETZ',
'SHLD', 'SHRD'
]
registers_80386 = [
'EAX', 'EBX', 'ECX', 'EDX', 'ESI', 'EDI', 'EBP', 'ESP',
'FS', 'GS', 'EFLAGS'
]
opcodes_80486 = [
'BSWAP',
'INVPLG'
]
opcodes_pdp8 = [
'AND', 'TAD', 'ISZ', 'DCA', 'JMS', 'JMP',
'CDF', 'CIF', 'RDF', 'RIF', 'RIB', 'RMF',
'CLA', 'CLL', 'CMA', 'CML', 'IAC', 'RAR', 'RAL', 'RTR', 'RTL', 'BSW',
'SMA', 'SZA', 'SNL', 'SPA', 'SNA', 'SZL', 'OSR', 'HLT', 'MQA', 'MQL',
'SEL', 'LCD', 'XDR', 'STR', 'SER', 'SDN', 'INTR', 'INIT',
'DILC', 'DICD', 'DISD', 'DILX', 'DILY', 'DIXY', 'DILE', 'DIRE',
'RCSF', 'RCRA', 'RCRB', 'RCNO', 'RCRC', 'RCNI', 'RCSD', 'RCSE',
'RCRD', 'RCSI', 'RCTF',
'RPE', 'RSF', 'RRB', 'RFC', 'PCE', 'PSF', 'PCF', 'PPC', 'PLS',
'KCF', 'KSF', 'KCC', 'KRS', 'KIE', 'KRB', 'TFL', 'TSF', 'TCF',
'TPC', 'TSK', 'TLS'
]
opcodes_pdp11 = [
'CLR', 'CLRB', 'COM', 'COMB', 'INC', 'INCB', 'DEC', 'DECB', 'NEG', 'NEGB',
'NOP', 'TST', 'TSTB', 'TSTSET', 'WRTLCK', 'ASR', 'ASRB', 'ASL', 'ASLB',
'ROR', 'RORB', 'ROL', 'ROLB', 'SWAB', 'ADC', 'ADCB', 'SBC', 'SBCB', 'SXT',
'MOV', 'MOVB', 'ADD', 'SUB', 'CMP', 'CMPB', 'ASH', 'ASHC',
'MUL', 'DIV', 'BIT', 'BITB', 'BIC', 'BICB', 'BIS', 'BISB',
'XOR', 'CLR', 'CLRB', 'BR', 'BNE', 'BPL', 'BEQ', 'BMI', 'BVC',
'BVS', 'BCC', 'BCS', 'BGE', 'BLT', 'BGT', 'BLE', 'SOB', 'BHI',
'BLOS', 'BHIS', 'BLO',
'JMP', 'JSR', 'RTS', 'MARK', 'EMT', 'TRAP', 'BPT', 'IOT', 'CSM',
'RTI', 'RTT', 'HALT', 'WAIT', 'RESET',
'MTPD', 'MTPI', 'MFPD', 'MTPS', 'MFPS', 'MFPT',
'CLC', 'CLV', 'CLZ', 'CLN', 'CCC', 'SEC', 'SEV', 'SEZ', 'SEN', 'SCC',
'FADD', 'FSUB', 'FMUL', 'FDIV',
'DIV', 'MUL'
]
registers_pdp11 = [
'r0', 'r1', 'r2', 'r3', 'r4', 'r5', 'r6', 'r7'
]
opcodes = []
registers = []
if processor in ['1802']:
opcodes += opcodes_1802
registers += registers_1802
if processor in ['6502']:
opcodes += opcodes_6502
registers += registers_6502
if processor in ['6800']:
opcodes += opcodes_6800
registers += registers_6800
if processor in ['68000']:
opcodes += opcodes_68000
registers += registers_68000
if processor in ['8080']:
opcodes += opcodes_8080
registers += registers_8080
if processor in ['z80']:
opcodes += opcodes_z80
registers += registers_z80
if processor in ['8086', '80186', '80286', '80386', '80486']:
opcodes += opcodes_8086
registers += registers_8086
if processor in ['80286', '80386', '80486']:
opcodes += opcodes_80186
opcodes += opcodes_80286
registers += registers_80286
if processor in ['80386', '80486']:
opcodes += opcodes_80386
registers += registers_80386
if processor in ['80486']:
opcodes += opcodes_80486
if processor in ['pdp-8']:
opcodes += opcodes_pdp8
# registers += registers_pdp8
if processor in ['pdp-11']:
opcodes += opcodes_pdp11
registers += registers_pdp11
opcode_tb = CaseInsensitiveListTokenBuilder(opcodes, 'keyword', False)
register_tb = CaseInsensitiveListTokenBuilder(registers, 'register', True)
values = ['*', '$', '.']
values_tb = CaseSensitiveListTokenBuilder(values, 'value', True)
operand_types.append('value')
invalid_token_builder = InvalidTokenBuilder()
tokenbuilders = [
newline_tb,
whitespace_tb,
stmt_separator_tb,
integer_tb,
integer_exponent_tb,
integer_1_tb,
integer_2_tb,
prefixed_integer_tb,
hex_integer_1_tb,
hex_integer_2_tb,
hex_integer_3_tb,
hex_integer_4_tb,
hash_quote_value_tb,
values_tb,
groupers_tb,
register_tb,
opcode_tb,
directive_tb,
title_directive_tb,
title_directive_2_tb,
subtitle_directive_tb,
subtitle_directive_2_tb,
subtitle_directive_3_tb,
include_directive_tb,
include_directive_2_tb,
multiline_comment_tb,
preprocessor_tb,
identifier_tb,
label_tb,
string_tb,
comment_tb,
comment_2_tb,
line_comment_star_tb,
line_comment_hash_tb,
known_operator_tb,
self.unknown_operator_tb,
invalid_token_builder
]
opcode_tokenbuilders = [
opcode_tb,
directive_tb,
title_directive_tb,
subtitle_directive_tb,
include_directive_tb,
preprocessor_tb,
invalid_token_builder
]
args_tokenbuilders = [
integer_tb,
integer_exponent_tb,
hex_integer_1_tb,
hex_integer_2_tb,
hex_integer_3_tb,
hex_integer_4_tb,
values_tb,
groupers_tb,
known_operator_tb,
register_tb,
identifier_tb,
label_tb,
string_tb,
comment_tb,
line_comment_star_tb,
line_comment_hash_tb,
self.unknown_operator_tb,
invalid_token_builder
]
tokenizer = Tokenizer(tokenbuilders)
opcode_tokenizer = Tokenizer(opcode_tokenbuilders)
args_tokenizer = Tokenizer(args_tokenbuilders)
# tokenize as free-format
tokens_free = tokenizer.tokenize(code)
tokens_free = Examiner.combine_adjacent_identical_tokens(tokens_free, 'invalid operator')
tokens_free = Examiner.combine_adjacent_identical_tokens(tokens_free, 'invalid')
tokens_free = Examiner.combine_identifier_colon(tokens_free, ['newline'], [], [])
tokens_free = Tokenizer.combine_number_and_adjacent_identifier(tokens_free)
tokens_free = Examiner.convert_values_to_operators(tokens_free, known_operators)
self.tokens = tokens_free
self.convert_asm_identifiers_to_labels()
self.convert_asm_keywords_to_operators()
self.convert_asm_keywords_to_identifiers()
self.calc_statistics()
statistics_free = self.statistics
self.statistics = {}
self.calc_confidences(operand_types, group_starts, group_mids, group_ends, None)
self.calc_line_length_confidence(code, self.max_expected_line)
confidences_free = self.confidences
self.confidences = {}
errors_free = self.errors
self.errors = []
if processor in ['pdp-8', 'pdp-11']:
# do not try space-format, it never exists for these processors
tokens_space = []
statistics_space = {}
confidences_space = {}
errors_space = []
else:
# tokenize as space-format
opcode_extras = '.&=,()+-*/'
label_leads = '.&$@#'
label_mids = '.&$#@_'
label_ends = ':'
comment_leads = '*;'
line_comment_leads = ''
use_line_id = False
tokens_space, indents = Tokenizer.tokenize_asm_code(code, tab_size, opcode_tokenizer, opcode_extras, args_tokenizer, label_leads, label_mids, label_ends, comment_leads, line_comment_leads, use_line_id)
tokens_space = Examiner.combine_adjacent_identical_tokens(tokens_space, 'invalid operator')
tokens_space = Examiner.combine_adjacent_identical_tokens(tokens_space, 'invalid')
tokens_space = Examiner.combine_identifier_colon(tokens_space, ['newline'], [], [])
tokens_space = Tokenizer.combine_number_and_adjacent_identifier(tokens_space)
tokens_space = Examiner.convert_values_to_operators(tokens_space, known_operators)
self.tokens = tokens_space
self.convert_asm_identifiers_to_labels()
self.calc_statistics()
statistics_space = self.statistics
self.statistics = {}
self.calc_confidences(operand_types, group_starts, group_mids, group_ends, indents)
self.calc_line_length_confidence(code, self.max_expected_line)
confidences_space = self.confidences
self.confidences = {}
errors_space = self.errors
self.errors = []
# compute confidence for free-format and spaced-format
confidence_free = 1.0
if len(confidences_free) == 0:
confidence_free = 0.0
else:
for key in confidences_free:
factor = confidences_free[key]
confidence_free *= factor
confidence_space = 1.0
if len(confidences_space) == 0:
confidence_space = 0.0
else:
for key in confidences_space:
factor = confidences_space[key]
confidence_space *= factor
# select the better of free-format and spaced-format
if confidence_space > confidence_free:
self.tokens = tokens_space
self.statistics = statistics_space
self.confidences = confidences_space
self.errors = errors_space
else:
self.tokens = tokens_free
self.statistics = statistics_free
self.confidences = confidences_free
self.errors = errors_free
# combine numbers followed by identfiers to identifiers
@staticmethod
def combine_number_and_adjacent_identifier(tokens):
new_list = []
new_token = None
for token in tokens:
if token.group == 'identifier' and \
new_token is not None and new_token.group == 'number':
new_token = Token(new_token.text + token.text, 'identifier', True)
else:
if new_token is not None:
new_list.append(new_token)
new_token = token
if new_token is not None:
new_list.append(new_token)
return new_list
| [
"token_builders.IntegerTokenBuilder.__escape_z__",
"token_builders.WhitespaceTokenBuilder",
"token_builders.PrefixedStringTokenBuilder.__escape_z__",
"token_builders.CaseSensitiveListTokenBuilder",
"token_builders.InvalidTokenBuilder",
"codestat_tokenizer.Tokenizer.combine_number_and_adjacent_identifier",... | [((833, 867), 'token_builders.InvalidTokenBuilder.__escape_z__', 'InvalidTokenBuilder.__escape_z__', ([], {}), '()\n', (865, 867), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((872, 909), 'token_builders.WhitespaceTokenBuilder.__escape_z__', 'WhitespaceTokenBuilder.__escape_z__', ([], {}), '()\n', (907, 909), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((914, 948), 'token_builders.NewlineTokenBuilder.__escape_z__', 'NewlineTokenBuilder.__escape_z__', ([], {}), '()\n', (946, 948), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((953, 993), 'token_builders.EscapedStringTokenBuilder.__escape_z__', 'EscapedStringTokenBuilder.__escape_z__', ([], {}), '()\n', (991, 993), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((998, 1039), 'token_builders.PrefixedStringTokenBuilder.__escape_z__', 'PrefixedStringTokenBuilder.__escape_z__', ([], {}), '()\n', (1037, 1039), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((1044, 1078), 'token_builders.IntegerTokenBuilder.__escape_z__', 'IntegerTokenBuilder.__escape_z__', ([], {}), '()\n', (1076, 1078), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((1083, 1125), 'token_builders.IntegerExponentTokenBuilder.__escape_z__', 'IntegerExponentTokenBuilder.__escape_z__', ([], {}), '()\n', (1123, 1125), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((1130, 1172), 'token_builders.PrefixedIntegerTokenBuilder.__escape_z__', 'PrefixedIntegerTokenBuilder.__escape_z__', ([], {}), '()\n', (1170, 1172), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((1177, 1219), 'token_builders.SuffixedIntegerTokenBuilder.__escape_z__', 'SuffixedIntegerTokenBuilder.__escape_z__', ([], {}), '()\n', (1217, 1219), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((1224, 1255), 'token_builders.RealTokenBuilder.__escape_z__', 'RealTokenBuilder.__escape_z__', ([], {}), '()\n', (1253, 1255), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((1260, 1297), 'token_builders.IdentifierTokenBuilder.__escape_z__', 'IdentifierTokenBuilder.__escape_z__', ([], {}), '()\n', (1295, 1297), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((1302, 1348), 'token_builders.CaseInsensitiveListTokenBuilder.__escape_z__', 'CaseInsensitiveListTokenBuilder.__escape_z__', ([], {}), '()\n', (1346, 1348), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((1353, 1397), 'token_builders.CaseSensitiveListTokenBuilder.__escape_z__', 'CaseSensitiveListTokenBuilder.__escape_z__', ([], {}), '()\n', (1395, 1397), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((1402, 1444), 'token_builders.LeadToEndOfLineTokenBuilder.__escape_z__', 'LeadToEndOfLineTokenBuilder.__escape_z__', ([], {}), '()\n', (1442, 1444), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((1449, 1491), 'token_builders.SingleCharacterTokenBuilder.__escape_z__', 'SingleCharacterTokenBuilder.__escape_z__', ([], {}), '()\n', (1489, 1491), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((1496, 1528), 'assembly_token_builders.LabelTokenBuilder.__escape_z__', 'LabelTokenBuilder.__escape_z__', ([], {}), '()\n', (1526, 1528), False, 'from assembly_token_builders import LabelTokenBuilder, AssemblyCommentTokenBuilder, MultilineCommentTokenBuilder, HashQuoteCharTokenBuilder\n'), ((1533, 1575), 'assembly_token_builders.AssemblyCommentTokenBuilder.__escape_z__', 'AssemblyCommentTokenBuilder.__escape_z__', ([], {}), '()\n', (1573, 1575), False, 'from assembly_token_builders import LabelTokenBuilder, AssemblyCommentTokenBuilder, MultilineCommentTokenBuilder, HashQuoteCharTokenBuilder\n'), ((1580, 1623), 'assembly_token_builders.MultilineCommentTokenBuilder.__escape_z__', 'MultilineCommentTokenBuilder.__escape_z__', ([], {}), '()\n', (1621, 1623), False, 'from assembly_token_builders import LabelTokenBuilder, AssemblyCommentTokenBuilder, MultilineCommentTokenBuilder, HashQuoteCharTokenBuilder\n'), ((1628, 1668), 'assembly_token_builders.HashQuoteCharTokenBuilder.__escape_z__', 'HashQuoteCharTokenBuilder.__escape_z__', ([], {}), '()\n', (1666, 1668), False, 'from assembly_token_builders import LabelTokenBuilder, AssemblyCommentTokenBuilder, MultilineCommentTokenBuilder, HashQuoteCharTokenBuilder\n'), ((1851, 1875), 'token_builders.WhitespaceTokenBuilder', 'WhitespaceTokenBuilder', ([], {}), '()\n', (1873, 1875), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((1893, 1914), 'token_builders.NewlineTokenBuilder', 'NewlineTokenBuilder', ([], {}), '()\n', (1912, 1914), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((1933, 1982), 'token_builders.LeadToEndOfLineTokenBuilder', 'LeadToEndOfLineTokenBuilder', (['""";"""', '(True)', '"""comment"""'], {}), "(';', True, 'comment')\n", (1960, 1982), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((2104, 2122), 'token_builders.NullTokenBuilder', 'NullTokenBuilder', ([], {}), '()\n', (2120, 2122), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((2254, 2286), 'assembly_token_builders.AssemblyCommentTokenBuilder', 'AssemblyCommentTokenBuilder', (['"""*"""'], {}), "('*')\n", (2281, 2286), False, 'from assembly_token_builders import LabelTokenBuilder, AssemblyCommentTokenBuilder, MultilineCommentTokenBuilder, HashQuoteCharTokenBuilder\n'), ((2314, 2332), 'token_builders.NullTokenBuilder', 'NullTokenBuilder', ([], {}), '()\n', (2330, 2332), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((2452, 2470), 'token_builders.NullTokenBuilder', 'NullTokenBuilder', ([], {}), '()\n', (2468, 2470), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((2610, 2634), 'token_builders.IntegerTokenBuilder', 'IntegerTokenBuilder', (['"""\'"""'], {}), '("\'")\n', (2629, 2634), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((2661, 2693), 'token_builders.IntegerExponentTokenBuilder', 'IntegerExponentTokenBuilder', (['"""\'"""'], {}), '("\'")\n', (2688, 2693), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((2713, 2731), 'token_builders.NullTokenBuilder', 'NullTokenBuilder', ([], {}), '()\n', (2729, 2731), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((2751, 2769), 'token_builders.NullTokenBuilder', 'NullTokenBuilder', ([], {}), '()\n', (2767, 2769), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((2796, 2848), 'token_builders.PrefixedIntegerTokenBuilder', 'PrefixedIntegerTokenBuilder', (['"""#"""', '(True)', '"""0123456789"""'], {}), "('#', True, '0123456789')\n", (2823, 2848), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((3156, 3220), 'token_builders.PrefixedIntegerTokenBuilder', 'PrefixedIntegerTokenBuilder', (['"""&"""', '(True)', '"""0123456789abcdefABCDEF"""'], {}), "('&', True, '0123456789abcdefABCDEF')\n", (3183, 3220), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((3244, 3309), 'token_builders.SuffixedIntegerTokenBuilder', 'SuffixedIntegerTokenBuilder', (['"""h"""', '(False)', '"""0123456789abcdefABCDEF"""'], {}), "('h', False, '0123456789abcdefABCDEF')\n", (3271, 3309), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((3333, 3397), 'token_builders.PrefixedIntegerTokenBuilder', 'PrefixedIntegerTokenBuilder', (['"""$"""', '(True)', '"""0123456789abcdefABCDEF"""'], {}), "('$', True, '0123456789abcdefABCDEF')\n", (3360, 3397), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((3421, 3486), 'token_builders.PrefixedIntegerTokenBuilder', 'PrefixedIntegerTokenBuilder', (['"""#$"""', '(True)', '"""0123456789abcdefABCDEF"""'], {}), "('#$', True, '0123456789abcdefABCDEF')\n", (3448, 3486), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((3514, 3532), 'token_builders.NullTokenBuilder', 'NullTokenBuilder', ([], {}), '()\n', (3530, 3532), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((3720, 3757), 'token_builders.IdentifierTokenBuilder', 'IdentifierTokenBuilder', (['leads', 'extras'], {}), '(leads, extras)\n', (3742, 3757), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((3813, 3850), 'assembly_token_builders.LabelTokenBuilder', 'LabelTokenBuilder', (['leads', 'extras', '""":"""'], {}), "(leads, extras, ':')\n", (3830, 3850), False, 'from assembly_token_builders import LabelTokenBuilder, AssemblyCommentTokenBuilder, MultilineCommentTokenBuilder, HashQuoteCharTokenBuilder\n'), ((3897, 3933), 'token_builders.EscapedStringTokenBuilder', 'EscapedStringTokenBuilder', (['quotes', '(0)'], {}), '(quotes, 0)\n', (3922, 3933), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((4351, 4408), 'token_builders.CaseInsensitiveListTokenBuilder', 'CaseInsensitiveListTokenBuilder', (['groupers', '"""group"""', '(False)'], {}), "(groupers, 'group', False)\n", (4382, 4408), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((4434, 4499), 'token_builders.CaseSensitiveListTokenBuilder', 'CaseSensitiveListTokenBuilder', (['known_operators', '"""operator"""', '(False)'], {}), "(known_operators, 'operator', False)\n", (4463, 4499), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((5530, 5599), 'token_builders.CaseInsensitiveListTokenBuilder', 'CaseInsensitiveListTokenBuilder', (['preprocessors', '"""preprocessor"""', '(False)'], {}), "(preprocessors, 'preprocessor', False)\n", (5561, 5599), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((8755, 8818), 'token_builders.CaseInsensitiveListTokenBuilder', 'CaseInsensitiveListTokenBuilder', (['directives', '"""directive"""', '(False)'], {}), "(directives, 'directive', False)\n", (8786, 8818), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((8845, 8901), 'token_builders.LeadToEndOfLineTokenBuilder', 'LeadToEndOfLineTokenBuilder', (['"""TITLE"""', '(False)', '"""directive"""'], {}), "('TITLE', False, 'directive')\n", (8872, 8901), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((8929, 8986), 'token_builders.LeadToEndOfLineTokenBuilder', 'LeadToEndOfLineTokenBuilder', (['""".TITLE"""', '(False)', '"""directive"""'], {}), "('.TITLE', False, 'directive')\n", (8956, 8986), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((9015, 9072), 'token_builders.LeadToEndOfLineTokenBuilder', 'LeadToEndOfLineTokenBuilder', (['"""SUBTTL"""', '(False)', '"""directive"""'], {}), "('SUBTTL', False, 'directive')\n", (9042, 9072), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((9103, 9161), 'token_builders.LeadToEndOfLineTokenBuilder', 'LeadToEndOfLineTokenBuilder', (['""".SUBTTL"""', '(False)', '"""directive"""'], {}), "('.SUBTTL', False, 'directive')\n", (9130, 9161), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((9192, 9249), 'token_builders.LeadToEndOfLineTokenBuilder', 'LeadToEndOfLineTokenBuilder', (['""".SBTTL"""', '(False)', '"""directive"""'], {}), "('.SBTTL', False, 'directive')\n", (9219, 9249), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((9277, 9335), 'token_builders.LeadToEndOfLineTokenBuilder', 'LeadToEndOfLineTokenBuilder', (['"""INCLUDE"""', '(False)', '"""directive"""'], {}), "('INCLUDE', False, 'directive')\n", (9304, 9335), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((9365, 9424), 'token_builders.LeadToEndOfLineTokenBuilder', 'LeadToEndOfLineTokenBuilder', (['""".INCLUDE"""', '(False)', '"""directive"""'], {}), "('.INCLUDE', False, 'directive')\n", (9392, 9424), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((9453, 9483), 'assembly_token_builders.MultilineCommentTokenBuilder', 'MultilineCommentTokenBuilder', ([], {}), '()\n', (9481, 9483), False, 'from assembly_token_builders import LabelTokenBuilder, AssemblyCommentTokenBuilder, MultilineCommentTokenBuilder, HashQuoteCharTokenBuilder\n'), ((24417, 24475), 'token_builders.CaseInsensitiveListTokenBuilder', 'CaseInsensitiveListTokenBuilder', (['opcodes', '"""keyword"""', '(False)'], {}), "(opcodes, 'keyword', False)\n", (24448, 24475), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((24494, 24554), 'token_builders.CaseInsensitiveListTokenBuilder', 'CaseInsensitiveListTokenBuilder', (['registers', '"""register"""', '(True)'], {}), "(registers, 'register', True)\n", (24525, 24554), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((24602, 24654), 'token_builders.CaseSensitiveListTokenBuilder', 'CaseSensitiveListTokenBuilder', (['values', '"""value"""', '(True)'], {}), "(values, 'value', True)\n", (24631, 24654), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((24718, 24739), 'token_builders.InvalidTokenBuilder', 'InvalidTokenBuilder', ([], {}), '()\n', (24737, 24739), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((26314, 26338), 'codestat_tokenizer.Tokenizer', 'Tokenizer', (['tokenbuilders'], {}), '(tokenbuilders)\n', (26323, 26338), False, 'from codestat_tokenizer import Tokenizer\n'), ((26362, 26393), 'codestat_tokenizer.Tokenizer', 'Tokenizer', (['opcode_tokenbuilders'], {}), '(opcode_tokenbuilders)\n', (26371, 26393), False, 'from codestat_tokenizer import Tokenizer\n'), ((26415, 26444), 'codestat_tokenizer.Tokenizer', 'Tokenizer', (['args_tokenbuilders'], {}), '(args_tokenbuilders)\n', (26424, 26444), False, 'from codestat_tokenizer import Tokenizer\n'), ((26537, 26612), 'examiner.Examiner.combine_adjacent_identical_tokens', 'Examiner.combine_adjacent_identical_tokens', (['tokens_free', '"""invalid operator"""'], {}), "(tokens_free, 'invalid operator')\n", (26579, 26612), False, 'from examiner import Examiner\n'), ((26631, 26697), 'examiner.Examiner.combine_adjacent_identical_tokens', 'Examiner.combine_adjacent_identical_tokens', (['tokens_free', '"""invalid"""'], {}), "(tokens_free, 'invalid')\n", (26673, 26697), False, 'from examiner import Examiner\n'), ((26716, 26783), 'examiner.Examiner.combine_identifier_colon', 'Examiner.combine_identifier_colon', (['tokens_free', "['newline']", '[]', '[]'], {}), "(tokens_free, ['newline'], [], [])\n", (26749, 26783), False, 'from examiner import Examiner\n'), ((26802, 26863), 'codestat_tokenizer.Tokenizer.combine_number_and_adjacent_identifier', 'Tokenizer.combine_number_and_adjacent_identifier', (['tokens_free'], {}), '(tokens_free)\n', (26850, 26863), False, 'from codestat_tokenizer import Tokenizer\n'), ((26882, 26948), 'examiner.Examiner.convert_values_to_operators', 'Examiner.convert_values_to_operators', (['tokens_free', 'known_operators'], {}), '(tokens_free, known_operators)\n', (26918, 26948), False, 'from examiner import Examiner\n'), ((2034, 2083), 'token_builders.LeadToEndOfLineTokenBuilder', 'LeadToEndOfLineTokenBuilder', (['"""/"""', '(True)', '"""comment"""'], {}), "('/', True, 'comment')\n", (2061, 2083), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((2175, 2225), 'token_builders.LeadToEndOfLineTokenBuilder', 'LeadToEndOfLineTokenBuilder', (['""".."""', '(True)', '"""comment"""'], {}), "('..', True, 'comment')\n", (2202, 2225), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((2394, 2426), 'assembly_token_builders.AssemblyCommentTokenBuilder', 'AssemblyCommentTokenBuilder', (['"""#"""'], {}), "('#')\n", (2421, 2426), False, 'from assembly_token_builders import LabelTokenBuilder, AssemblyCommentTokenBuilder, MultilineCommentTokenBuilder, HashQuoteCharTokenBuilder\n'), ((2529, 2591), 'token_builders.SingleCharacterTokenBuilder', 'SingleCharacterTokenBuilder', (['""";"""', '"""statement separator"""', '(False)'], {}), "(';', 'statement separator', False)\n", (2556, 2591), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((2902, 2954), 'token_builders.SuffixedIntegerTokenBuilder', 'SuffixedIntegerTokenBuilder', (['"""$"""', '(True)', '"""0123456789"""'], {}), "('$', True, '0123456789')\n", (2929, 2954), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((3005, 3057), 'token_builders.SuffixedIntegerTokenBuilder', 'SuffixedIntegerTokenBuilder', (['"""O"""', '(True)', '"""0123456789"""'], {}), "('O', True, '0123456789')\n", (3032, 3057), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((3079, 3131), 'token_builders.SuffixedIntegerTokenBuilder', 'SuffixedIntegerTokenBuilder', (['"""D"""', '(True)', '"""0123456789"""'], {}), "('D', True, '0123456789')\n", (3106, 3131), False, 'from token_builders import InvalidTokenBuilder, NullTokenBuilder, WhitespaceTokenBuilder, NewlineTokenBuilder, EscapedStringTokenBuilder, PrefixedStringTokenBuilder, IntegerTokenBuilder, IntegerExponentTokenBuilder, PrefixedIntegerTokenBuilder, SuffixedIntegerTokenBuilder, RealTokenBuilder, IdentifierTokenBuilder, CaseInsensitiveListTokenBuilder, CaseSensitiveListTokenBuilder, LeadToEndOfLineTokenBuilder, SingleCharacterTokenBuilder\n'), ((3594, 3621), 'assembly_token_builders.HashQuoteCharTokenBuilder', 'HashQuoteCharTokenBuilder', ([], {}), '()\n', (3619, 3621), False, 'from assembly_token_builders import LabelTokenBuilder, AssemblyCommentTokenBuilder, MultilineCommentTokenBuilder, HashQuoteCharTokenBuilder\n'), ((27964, 28149), 'codestat_tokenizer.Tokenizer.tokenize_asm_code', 'Tokenizer.tokenize_asm_code', (['code', 'tab_size', 'opcode_tokenizer', 'opcode_extras', 'args_tokenizer', 'label_leads', 'label_mids', 'label_ends', 'comment_leads', 'line_comment_leads', 'use_line_id'], {}), '(code, tab_size, opcode_tokenizer, opcode_extras,\n args_tokenizer, label_leads, label_mids, label_ends, comment_leads,\n line_comment_leads, use_line_id)\n', (27991, 28149), False, 'from codestat_tokenizer import Tokenizer\n'), ((28163, 28239), 'examiner.Examiner.combine_adjacent_identical_tokens', 'Examiner.combine_adjacent_identical_tokens', (['tokens_space', '"""invalid operator"""'], {}), "(tokens_space, 'invalid operator')\n", (28205, 28239), False, 'from examiner import Examiner\n'), ((28261, 28328), 'examiner.Examiner.combine_adjacent_identical_tokens', 'Examiner.combine_adjacent_identical_tokens', (['tokens_space', '"""invalid"""'], {}), "(tokens_space, 'invalid')\n", (28303, 28328), False, 'from examiner import Examiner\n'), ((28350, 28418), 'examiner.Examiner.combine_identifier_colon', 'Examiner.combine_identifier_colon', (['tokens_space', "['newline']", '[]', '[]'], {}), "(tokens_space, ['newline'], [], [])\n", (28383, 28418), False, 'from examiner import Examiner\n'), ((28440, 28502), 'codestat_tokenizer.Tokenizer.combine_number_and_adjacent_identifier', 'Tokenizer.combine_number_and_adjacent_identifier', (['tokens_space'], {}), '(tokens_space)\n', (28488, 28502), False, 'from codestat_tokenizer import Tokenizer\n'), ((28524, 28591), 'examiner.Examiner.convert_values_to_operators', 'Examiner.convert_values_to_operators', (['tokens_space', 'known_operators'], {}), '(tokens_space, known_operators)\n', (28560, 28591), False, 'from examiner import Examiner\n'), ((30269, 30323), 'codestat_token.Token', 'Token', (['(new_token.text + token.text)', '"""identifier"""', '(True)'], {}), "(new_token.text + token.text, 'identifier', True)\n", (30274, 30323), False, 'from codestat_token import Token\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import (
BaseModel
)
classes = [BaseModel]
for c in classes:
admin.site.register(c)
| [
"django.contrib.admin.site.register"
] | [((184, 206), 'django.contrib.admin.site.register', 'admin.site.register', (['c'], {}), '(c)\n', (203, 206), False, 'from django.contrib import admin\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
from PyQt5.QtWidgets import QWidget, QFormLayout, QComboBox, QSpinBox, QCheckBox
from PyQt5.QtCore import QSettings, pyqtSignal
from common import IMAGE_HASH_ALGO, DEFAULT_IMAGE_HASH_ALGO, DEFAULT_IMAGE_HASH_MAX_SCORE
class SearchForSimilarSettingsWidget(QWidget):
about_mark_matching = pyqtSignal(bool)
def __init__(self):
super().__init__()
self.setWindowTitle("Search for similar")
self.cb_algo = QComboBox()
self.cb_algo.addItems(IMAGE_HASH_ALGO)
self.sb_max_score = QSpinBox()
self.cb_mark_matching = QCheckBox()
self.cb_mark_matching.clicked.connect(self.about_mark_matching)
layout = QFormLayout()
layout.addRow("Hash algo:", self.cb_algo)
layout.addRow("Max score:", self.sb_max_score)
layout.addRow("Mark matching:", self.cb_mark_matching)
self.setLayout(layout)
def read_settings(self, ini: QSettings):
ini.beginGroup(self.__class__.__name__)
self.cb_algo.setCurrentText(
ini.value('algo', DEFAULT_IMAGE_HASH_ALGO)
)
self.sb_max_score.setValue(
int(ini.value('max_score', DEFAULT_IMAGE_HASH_MAX_SCORE))
)
self.cb_mark_matching.setChecked(
ini.value('mark_matching', 'true') == 'true'
)
ini.endGroup()
def write_settings(self, ini: QSettings):
ini.beginGroup(self.__class__.__name__)
ini.setValue('algo', self.cb_algo.currentText())
ini.setValue('max_score', self.sb_max_score.value())
ini.setValue('mark_matching', self.cb_mark_matching.isChecked())
ini.endGroup()
| [
"PyQt5.QtCore.pyqtSignal",
"PyQt5.QtWidgets.QSpinBox",
"PyQt5.QtWidgets.QComboBox",
"PyQt5.QtWidgets.QFormLayout",
"PyQt5.QtWidgets.QCheckBox"
] | [((368, 384), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', (['bool'], {}), '(bool)\n', (378, 384), False, 'from PyQt5.QtCore import QSettings, pyqtSignal\n'), ((512, 523), 'PyQt5.QtWidgets.QComboBox', 'QComboBox', ([], {}), '()\n', (521, 523), False, 'from PyQt5.QtWidgets import QWidget, QFormLayout, QComboBox, QSpinBox, QCheckBox\n'), ((600, 610), 'PyQt5.QtWidgets.QSpinBox', 'QSpinBox', ([], {}), '()\n', (608, 610), False, 'from PyQt5.QtWidgets import QWidget, QFormLayout, QComboBox, QSpinBox, QCheckBox\n'), ((644, 655), 'PyQt5.QtWidgets.QCheckBox', 'QCheckBox', ([], {}), '()\n', (653, 655), False, 'from PyQt5.QtWidgets import QWidget, QFormLayout, QComboBox, QSpinBox, QCheckBox\n'), ((746, 759), 'PyQt5.QtWidgets.QFormLayout', 'QFormLayout', ([], {}), '()\n', (757, 759), False, 'from PyQt5.QtWidgets import QWidget, QFormLayout, QComboBox, QSpinBox, QCheckBox\n')] |
from flask import Blueprint
auth_blueprint = Blueprint(
'auth_blueprint',
__name__,
template_folder='templates'
)
| [
"flask.Blueprint"
] | [((47, 113), 'flask.Blueprint', 'Blueprint', (['"""auth_blueprint"""', '__name__'], {'template_folder': '"""templates"""'}), "('auth_blueprint', __name__, template_folder='templates')\n", (56, 113), False, 'from flask import Blueprint\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017~2999 - cologler <<EMAIL>>
# ----------
#
# ----------
from pytest import raises
from singletonify import singleton
def test_base():
@singleton()
class A:
pass
assert not A._is_init()
assert A() is A()
assert A._is_init()
def test_with_args():
@singleton(x='s')
class A:
def __init__(self, x):
self.x = x
assert A() is A()
assert A().x == 's'
def test_instance_check():
@singleton()
class A:
pass
assert isinstance(A(), A)
def test_subclass_check():
class B:
pass
@singleton()
class A(B):
pass
assert issubclass(A, B)
def test_multi_apply():
@singleton()
class A:
pass
@singleton()
class B:
pass
assert A() is A()
assert B() is B()
assert A() is not B()
def test_with_slots():
@singleton()
class D:
pass
@singleton()
class S:
__slots__ = ('buffer', )
assert hasattr(D(), '__dict__')
assert not hasattr(S(), '__dict__')
def test_inherit():
class B:
pass
@singleton()
class A(B):
pass
assert A() is A()
assert B() is not B()
assert A() is not B()
assert type(A()) is A
assert isinstance(A(), A)
def test_inherit_from_singleton():
@singleton()
class B:
pass
# cannot inherit
with raises(TypeError, match='cannot inherit from a singleton class'):
@singleton()
class A(B):
pass
| [
"pytest.raises",
"singletonify.singleton"
] | [((210, 221), 'singletonify.singleton', 'singleton', ([], {}), '()\n', (219, 221), False, 'from singletonify import singleton\n'), ((350, 366), 'singletonify.singleton', 'singleton', ([], {'x': '"""s"""'}), "(x='s')\n", (359, 366), False, 'from singletonify import singleton\n'), ((513, 524), 'singletonify.singleton', 'singleton', ([], {}), '()\n', (522, 524), False, 'from singletonify import singleton\n'), ((641, 652), 'singletonify.singleton', 'singleton', ([], {}), '()\n', (650, 652), False, 'from singletonify import singleton\n'), ((741, 752), 'singletonify.singleton', 'singleton', ([], {}), '()\n', (750, 752), False, 'from singletonify import singleton\n'), ((785, 796), 'singletonify.singleton', 'singleton', ([], {}), '()\n', (794, 796), False, 'from singletonify import singleton\n'), ((923, 934), 'singletonify.singleton', 'singleton', ([], {}), '()\n', (932, 934), False, 'from singletonify import singleton\n'), ((967, 978), 'singletonify.singleton', 'singleton', ([], {}), '()\n', (976, 978), False, 'from singletonify import singleton\n'), ((1155, 1166), 'singletonify.singleton', 'singleton', ([], {}), '()\n', (1164, 1166), False, 'from singletonify import singleton\n'), ((1368, 1379), 'singletonify.singleton', 'singleton', ([], {}), '()\n', (1377, 1379), False, 'from singletonify import singleton\n'), ((1437, 1501), 'pytest.raises', 'raises', (['TypeError'], {'match': '"""cannot inherit from a singleton class"""'}), "(TypeError, match='cannot inherit from a singleton class')\n", (1443, 1501), False, 'from pytest import raises\n'), ((1512, 1523), 'singletonify.singleton', 'singleton', ([], {}), '()\n', (1521, 1523), False, 'from singletonify import singleton\n')] |
from flask import Flask, request, jsonify
from ..common import app, db, getJson
from .model import User
from .method import *
@app.route("/api/user/register", methods=["POST"])
def register():
json = getJson()
username = json['username']
password = json['password']
userId = addUser(username, password)
if userId != 0:
return jsonify({"status": "ok", "username": username, "user id": userId})
else:
return jsonify({"status": "error", "username": username, "message": username + " is exist"})
@app.route("/api/user/login", methods=["GET", "POST"])
def login():
json = getJson()
username = json['username']
password = json['password']
if checkPassword(username, password):
return jsonify({"status": "ok", "username": username})
else:
return jsonify({"status": "error", "username": username, "message": "Auth failed"})
| [
"flask.jsonify"
] | [((355, 421), 'flask.jsonify', 'jsonify', (["{'status': 'ok', 'username': username, 'user id': userId}"], {}), "({'status': 'ok', 'username': username, 'user id': userId})\n", (362, 421), False, 'from flask import Flask, request, jsonify\n'), ((447, 536), 'flask.jsonify', 'jsonify', (["{'status': 'error', 'username': username, 'message': username + ' is exist'}"], {}), "({'status': 'error', 'username': username, 'message': username +\n ' is exist'})\n", (454, 536), False, 'from flask import Flask, request, jsonify\n'), ((744, 791), 'flask.jsonify', 'jsonify', (["{'status': 'ok', 'username': username}"], {}), "({'status': 'ok', 'username': username})\n", (751, 791), False, 'from flask import Flask, request, jsonify\n'), ((817, 893), 'flask.jsonify', 'jsonify', (["{'status': 'error', 'username': username, 'message': 'Auth failed'}"], {}), "({'status': 'error', 'username': username, 'message': 'Auth failed'})\n", (824, 893), False, 'from flask import Flask, request, jsonify\n')] |
import subprocess
import pytest
import os
import json
def test_call_generate_promoter_terminator():
print('')
process_result = subprocess.run(['python', 'generate_promoter_terminator.py', './test/1.gff.json', '500', '200'], \
capture_output=True)
assert process_result.returncode == 0
result_line = process_result.stdout.decode().splitlines()[-1]
result_obj = json.loads(result_line)
assert result_obj['type'] == 'result'
file_url = result_obj['data']['files'][0]['url']
assert file_url
with open(os.path.join('test', '1.gff.json')) as fp:
src_gff = json.load(fp)
with open(os.path.join('results', file_url)) as fp:
dst_gff = json.load(fp)
assert len(dst_gff['records']) > len(src_gff['records'])
#all sequence must have hash
for record in dst_gff['records']:
assert 'sequenceHash' in record
assert record['sequenceHash'] == tools.get_sequence_hash(dst_gff, record['chrName'], record['start'], record['end'], record['strand'])
os.remove(os.path.join('results', file_url))
| [
"json.load",
"json.loads",
"subprocess.run",
"os.path.join"
] | [((136, 257), 'subprocess.run', 'subprocess.run', (["['python', 'generate_promoter_terminator.py', './test/1.gff.json', '500', '200'\n ]"], {'capture_output': '(True)'}), "(['python', 'generate_promoter_terminator.py',\n './test/1.gff.json', '500', '200'], capture_output=True)\n", (150, 257), False, 'import subprocess\n'), ((403, 426), 'json.loads', 'json.loads', (['result_line'], {}), '(result_line)\n', (413, 426), False, 'import json\n'), ((618, 631), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (627, 631), False, 'import json\n'), ((706, 719), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (715, 719), False, 'import json\n'), ((1052, 1085), 'os.path.join', 'os.path.join', (['"""results"""', 'file_url'], {}), "('results', file_url)\n", (1064, 1085), False, 'import os\n'), ((557, 591), 'os.path.join', 'os.path.join', (['"""test"""', '"""1.gff.json"""'], {}), "('test', '1.gff.json')\n", (569, 591), False, 'import os\n'), ((646, 679), 'os.path.join', 'os.path.join', (['"""results"""', 'file_url'], {}), "('results', file_url)\n", (658, 679), False, 'import os\n')] |
"""
VF: Validation Functions (for Python dicts.)
Copyright (c) 2020 Polydojo, Inc.
SOFTWARE LICENSING
------------------
The software is released "AS IS" under the MIT License,
WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED. Kindly
see LICENSE.txt for more details.
NO TRADEMARK RIGHTS
-------------------
The above software licensing terms DO NOT grant any right in the
trademarks, service marks, brand names or logos of Polydojo, Inc.
""";
import functools;
import re;
__version__ = "0.0.2"; # Req'd by flit.
############################################################
# SIMPLE: ##################################################
############################################################
identity = lambda x: x;
truthy = lambda x: bool(x);
falsy = lambda x: not x;
noneIs = lambda x: x is None;
############################################################
# CHECKER MAKERS: ##########################################
############################################################
def typeIs (typ): # <-- TODO: Intro truthy option.
"Makes `func (x)` for checking `type(x) is typ`.";
return lambda x: type(x) is typ;
def instanceOf (*typs):
"Makes `func (x)` for checking `isinstance(x, typs)`.";
return lambda x: isinstance(x, typs);
def typeIn (*typs):
"Makes `func (x)` for checking `type(x) in typs`.";
return lambda x: type(x) in typs;
def patternIs (pattern):
"Makes `func (s)` for checking `s` against `pattern`.";
if type(pattern) is str:
return lambda s: bool(re.match(pattern, s));
if type(pattern) is re.Pattern:
return lambda s: bool(pattern.match(s));
raise ValueError("Expected `pattern` to be of type "
"`str` or `re.Pattern`, not: %r" % (pattern,)
);
def allOf (*fns):
"Makes `func (x)` for checking `all(fn(x) for fn in fns)`.";
return lambda x: all(map(lambda fn: fn(x), fns));
def anyOf (*fns):
"Makes `func (x)` for checking `any(fn(x) for fn in fns)`.";
return lambda x: any(map(lambda fn: fn(x), fns));
def listOf (fn, minLen=0):
"Makes `func (li)` for checking `all(fn(x) for x in li)`.";
return lambda li: (
isinstance(li, list) and
len(li) >= minLen and
all(map(fn, li)) #and
);
############################################################
# DICT VALIDATION: #########################################
############################################################
class BadSchemaError (ValueError): pass;
class ValidationError (ValueError): pass;
def _validateSchemaItself (schema):
"Ensures that `schema` itself is valid.";
if not isinstance(schema, dict):
raise BadSchemaError("Not an instance of `dict`.");
for key, rhsFn in schema.items():
if not callable(rhsFn):
raise BadSchemaError(
"Non-callable value against key: %r" % (key,),
);
return True;
def dictOf (schema, extraKeysOk=False):
"Makes `func (d)` for VALIDATING `d` against `schema`.";
assert _validateSchemaItself(schema);
def validateFn (d):
if not isinstance(d, dict):
raise ValidationError(
"Expected dict-like object, not: %r" % (d,),
);
dKeySet = set(d.keys());
sKeySet = set(schema.keys());
if not dKeySet.issuperset(sKeySet):
raise ValidationError("Dict-like object is missing " +
"required keys: {}".format(sKeySet - dKeySet) #+
);
if (not extraKeysOk) and (dKeySet != sKeySet):
raise ValidationError("Dict-like object has " +
"excess keys: {}".format(dKeySet - sKeySet) #+
);
for key, rhsFn in schema.items():
assert callable(rhsFn);
if not rhsFn(d[key]):
raise ValidationError(
("Against key: %r\n" % (key,)) +
("Unexpected value: %r" % (d[key],)) #+
);
return True;
return validateFn;
# End ######################################################
| [
"re.match"
] | [((1528, 1548), 're.match', 're.match', (['pattern', 's'], {}), '(pattern, s)\n', (1536, 1548), False, 'import re\n')] |
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, validators
from wtforms.fields.html5 import EmailField
class RegisterForm(FlaskForm):
firstname = StringField(u"Firstname", [validators.Length(min=2, max=256)])
lastname = StringField(u"Lastname", [validators.length(min=2, max=256)])
email = EmailField(u"Email", [validators.InputRequired(), validators.Email()])
password = PasswordField(u"Password", [validators.InputRequired(), validators.EqualTo('passwordb')])
passwordb = PasswordField(u"Confirm password")
class Meta:
csrf = False
class LoginForm(FlaskForm):
email = StringField(u"Email", [validators.InputRequired(), validators.Email()])
password = PasswordField(u"Password", [validators.InputRequired()]) | [
"wtforms.validators.Email",
"wtforms.PasswordField",
"wtforms.validators.EqualTo",
"wtforms.validators.length",
"wtforms.validators.Length",
"wtforms.validators.InputRequired"
] | [((527, 561), 'wtforms.PasswordField', 'PasswordField', (['u"""Confirm password"""'], {}), "(u'Confirm password')\n", (540, 561), False, 'from wtforms import StringField, PasswordField, validators\n'), ((210, 243), 'wtforms.validators.Length', 'validators.Length', ([], {'min': '(2)', 'max': '(256)'}), '(min=2, max=256)\n', (227, 243), False, 'from wtforms import StringField, PasswordField, validators\n'), ((287, 320), 'wtforms.validators.length', 'validators.length', ([], {'min': '(2)', 'max': '(256)'}), '(min=2, max=256)\n', (304, 320), False, 'from wtforms import StringField, PasswordField, validators\n'), ((357, 383), 'wtforms.validators.InputRequired', 'validators.InputRequired', ([], {}), '()\n', (381, 383), False, 'from wtforms import StringField, PasswordField, validators\n'), ((385, 403), 'wtforms.validators.Email', 'validators.Email', ([], {}), '()\n', (401, 403), False, 'from wtforms import StringField, PasswordField, validators\n'), ((449, 475), 'wtforms.validators.InputRequired', 'validators.InputRequired', ([], {}), '()\n', (473, 475), False, 'from wtforms import StringField, PasswordField, validators\n'), ((477, 508), 'wtforms.validators.EqualTo', 'validators.EqualTo', (['"""passwordb"""'], {}), "('passwordb')\n", (495, 508), False, 'from wtforms import StringField, PasswordField, validators\n'), ((664, 690), 'wtforms.validators.InputRequired', 'validators.InputRequired', ([], {}), '()\n', (688, 690), False, 'from wtforms import StringField, PasswordField, validators\n'), ((692, 710), 'wtforms.validators.Email', 'validators.Email', ([], {}), '()\n', (708, 710), False, 'from wtforms import StringField, PasswordField, validators\n'), ((764, 790), 'wtforms.validators.InputRequired', 'validators.InputRequired', ([], {}), '()\n', (788, 790), False, 'from wtforms import StringField, PasswordField, validators\n')] |
from setuptools import setup, find_packages
version = '0.0.1'
setup(name='mvvm',
version=version,
description='Model-View-ViewModel framework for Python, based on Wx',
long_description=open('README.rst').read(),
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: User Interfaces',
'Topic :: Utilities',
],
author='<NAME>',
author_email='<EMAIL>',
url='http://github.com/Bouke/mvvm',
license='MIT',
packages=find_packages(),
zip_safe=False,
)
| [
"setuptools.find_packages"
] | [((765, 780), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (778, 780), False, 'from setuptools import setup, find_packages\n')] |
"""
Corporation management paths
"""
from datetime import datetime
from typing import Dict, Iterator, List, Optional
from fastapi import APIRouter, Depends
import pydantic as pdt
from sni.esi.scope import EsiScope, esi_scope_set_to_hex
from sni.esi.token import tracking_status, TrackingStatus
from sni.uac.clearance import assert_has_clearance
from sni.uac.token import (
create_state_code,
from_authotization_header_nondyn,
Token,
)
from sni.user.models import Alliance, Corporation, User
from sni.user.user import ensure_corporation
from .user import GetUserShortOut
router = APIRouter()
class GetAllianceShortOut(pdt.BaseModel):
"""
Short alliance description
"""
alliance_id: int
alliance_name: str
@staticmethod
def from_record(alliance: Alliance) -> "GetAllianceShortOut":
"""
Converts an instance of :class:`sni.user.models.Alliance` to
:class:`sni.api.routers.alliance.GetAllianceShortOut`
"""
return GetAllianceShortOut(
alliance_id=alliance.alliance_id,
alliance_name=alliance.alliance_name,
)
class GetCorporationOut(pdt.BaseModel):
"""
Corporation data
"""
alliance: Optional[GetAllianceShortOut]
authorized_to_login: Optional[bool]
ceo: GetUserShortOut
corporation_id: int
corporation_name: str
cumulated_mandatory_esi_scopes: List[EsiScope]
mandatory_esi_scopes: List[EsiScope]
ticker: str
updated_on: datetime
@staticmethod
def from_record(corporation: Corporation) -> "GetCorporationOut":
"""
Converts an instance of :class:`sni.user.models.Corporation` to
:class:`sni.api.routers.corporation.GetCorporationOut`
"""
return GetCorporationOut(
alliance=GetAllianceShortOut.from_record(corporation.alliance)
if corporation.alliance is not None
else None,
authorized_to_login=corporation.authorized_to_login,
ceo=GetUserShortOut.from_record(corporation.ceo),
corporation_id=corporation.corporation_id,
corporation_name=corporation.corporation_name,
cumulated_mandatory_esi_scopes=list(
corporation.cumulated_mandatory_esi_scopes()
),
mandatory_esi_scopes=corporation.mandatory_esi_scopes,
ticker=corporation.ticker,
updated_on=corporation.updated_on,
)
class PostCorporationGuestOut(pdt.BaseModel):
"""
Model for ``POST /corporation/{corporation_id}/guest`` reponses.
"""
state_code: str
class GetCorporationShortOut(pdt.BaseModel):
"""
Short corporation description
"""
corporation_id: int
corporation_name: str
@staticmethod
def from_record(corporation: Corporation) -> "GetCorporationShortOut":
"""
Converts an instance of :class:`sni.user.models.Corporation` to
:class:`sni.api.routers.corporation.GetCorporationShortOut`
"""
return GetCorporationShortOut(
corporation_id=corporation.corporation_id,
corporation_name=corporation.corporation_name,
)
class GetTrackingOut(pdt.BaseModel):
"""
Represents a corporation tracking response.
"""
invalid_refresh_token: List[GetUserShortOut] = []
no_refresh_token: List[GetUserShortOut] = []
valid_refresh_token: List[GetUserShortOut] = []
@staticmethod
def from_user_iterator(iterator: Iterator[User]) -> "GetTrackingOut":
"""
Creates a tracking response from a user iterator. See
:meth:`sni.esi.token.tracking_status`
"""
result = GetTrackingOut()
ldict: Dict[int, List[GetUserShortOut]] = {
TrackingStatus.HAS_NO_REFRESH_TOKEN: result.no_refresh_token,
TrackingStatus.ONLY_HAS_INVALID_REFRESH_TOKEN: result.invalid_refresh_token,
TrackingStatus.HAS_A_VALID_REFRESH_TOKEN: result.valid_refresh_token,
}
for usr in iterator:
status = tracking_status(usr)
ldict[status].append(GetUserShortOut.from_record(usr))
return result
class PutCorporationIn(pdt.BaseModel):
"""
Model for ``PUT /corporation/{corporation_id}`` requests
"""
authorized_to_login: Optional[bool]
mandatory_esi_scopes: Optional[List[EsiScope]]
@router.get(
"",
response_model=List[GetCorporationShortOut],
summary="Get the list of corporations",
)
def get_corporations(tkn: Token = Depends(from_authotization_header_nondyn),):
"""
Gets the list of corporations registered in this instance. Requires a
clearance level of 0 or more.
"""
assert_has_clearance(tkn.owner, "sni.read_corporation")
return [
GetCorporationShortOut.from_record(corporation)
for corporation in Corporation.objects(
corporation_id__gte=2000000
).order_by("corporation_name")
]
@router.get(
"/{corporation_id}",
response_model=GetCorporationOut,
summary="Get informations about a corporation",
)
def get_corporation(
corporation_id: int,
tkn: Token = Depends(from_authotization_header_nondyn),
):
"""
Get informations about a corporation. Note that this corporation must be
registered on SNI
"""
assert_has_clearance(tkn.owner, "sni.read_corporation")
corporation = Corporation.objects(corporation_id=corporation_id).get()
return GetCorporationOut.from_record(corporation)
@router.post(
"/{corporation_id}",
response_model=GetCorporationOut,
summary="Manually fetch a corporation from the ESI",
)
def post_corporation(
corporation_id: int,
tkn: Token = Depends(from_authotization_header_nondyn),
):
"""
Manually fetches a corporation from the ESI. Requires a clearance level of
8 or more.
"""
assert_has_clearance(tkn.owner, "sni.fetch_corporation")
corporation = ensure_corporation(corporation_id)
return GetCorporationOut.from_record(corporation)
@router.put(
"/{corporation_id}",
response_model=GetCorporationOut,
summary="Modify a corporation registered on SNI",
)
def put_corporation(
corporation_id: int,
data: PutCorporationIn,
tkn: Token = Depends(from_authotization_header_nondyn),
):
"""
Modify a corporation registered on SNI. Note that it does not modify it on
an ESI level. Requires a clearance level of 2 or more.
"""
corporation: Corporation = Corporation.objects(
corporation_id=corporation_id
).get()
assert_has_clearance(tkn.owner, "sni.update_corporation", corporation.ceo)
corporation.authorized_to_login = data.authorized_to_login
if data.mandatory_esi_scopes is not None:
corporation.mandatory_esi_scopes = data.mandatory_esi_scopes
corporation.save()
return GetCorporationOut.from_record(corporation)
@router.delete(
"/{corporation_id}/guest/{character_id}",
summary="Deletes a corporation guest",
)
def delete_corporation_guest(
corporation_id: int,
character_id: int,
tkn: Token = Depends(from_authotization_header_nondyn),
):
"""
Deletes a corporation guest
"""
corporation: Corporation = Corporation.objects(
corporation_id=corporation_id
).get()
assert_has_clearance(
tkn.owner, "sni.delete_corporation_guest", corporation.ceo
)
guest: User = User.objects(
character_id=character_id,
clearance_level__lt=0,
corporation=corporation,
).get()
guest.delete()
@router.get(
"/{corporation_id}/guest",
response_model=List[GetUserShortOut],
summary="Corporation guests",
)
def get_corporation_guests(
corporation_id: int,
tkn: Token = Depends(from_authotization_header_nondyn),
):
"""
Returns the list of guests in this corporation.
"""
corporation: Corporation = Corporation.objects(
corporation_id=corporation_id
).get()
assert_has_clearance(
tkn.owner, "sni.read_corporation_guests", corporation.ceo
)
return [
GetUserShortOut.from_record(guest)
for guest in corporation.guest_iterator()
]
@router.post(
"/{corporation_id}/guest",
response_model=PostCorporationGuestOut,
summary="Creates a state code for a new guest to this corporation",
)
def post_corporation_guest(
corporation_id: int,
tkn: Token = Depends(from_authotization_header_nondyn),
):
"""
Creates a state code for a new guest to this corporation. The user then has
to login with this state code to be considered a guest.
"""
corporation: Corporation = Corporation.objects(
corporation_id=corporation_id
).get()
assert_has_clearance(
tkn.owner, "sni.create_corporation_guest", corporation.ceo
)
state_code = create_state_code(
tkn.parent,
inviting_corporation=corporation,
code_prefix=esi_scope_set_to_hex(
corporation.cumulated_mandatory_esi_scopes()
),
)
return PostCorporationGuestOut(state_code=str(state_code.uuid))
@router.get(
"/{corporation_id}/tracking",
response_model=GetTrackingOut,
summary="Corporation tracking",
)
def get_corporation_tracking(
corporation_id: int,
tkn: Token = Depends(from_authotization_header_nondyn),
):
"""
Reports which member (of a given corporation) have a valid refresh token
attacked to them, and which do not. Requires a clearance level of 1 and
having authority over this corporation.
"""
corporation: Corporation = Corporation.objects(
corporation_id=corporation_id
).get()
assert_has_clearance(tkn.owner, "sni.track_corporation", corporation.ceo)
return GetTrackingOut.from_user_iterator(corporation.user_iterator())
| [
"sni.uac.clearance.assert_has_clearance",
"sni.user.models.Corporation.objects",
"sni.user.user.ensure_corporation",
"fastapi.APIRouter",
"sni.user.models.User.objects",
"fastapi.Depends",
"sni.esi.token.tracking_status"
] | [((596, 607), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (605, 607), False, 'from fastapi import APIRouter, Depends\n'), ((4525, 4566), 'fastapi.Depends', 'Depends', (['from_authotization_header_nondyn'], {}), '(from_authotization_header_nondyn)\n', (4532, 4566), False, 'from fastapi import APIRouter, Depends\n'), ((4698, 4753), 'sni.uac.clearance.assert_has_clearance', 'assert_has_clearance', (['tkn.owner', '"""sni.read_corporation"""'], {}), "(tkn.owner, 'sni.read_corporation')\n", (4718, 4753), False, 'from sni.uac.clearance import assert_has_clearance\n'), ((5151, 5192), 'fastapi.Depends', 'Depends', (['from_authotization_header_nondyn'], {}), '(from_authotization_header_nondyn)\n', (5158, 5192), False, 'from fastapi import APIRouter, Depends\n'), ((5316, 5371), 'sni.uac.clearance.assert_has_clearance', 'assert_has_clearance', (['tkn.owner', '"""sni.read_corporation"""'], {}), "(tkn.owner, 'sni.read_corporation')\n", (5336, 5371), False, 'from sni.uac.clearance import assert_has_clearance\n'), ((5703, 5744), 'fastapi.Depends', 'Depends', (['from_authotization_header_nondyn'], {}), '(from_authotization_header_nondyn)\n', (5710, 5744), False, 'from fastapi import APIRouter, Depends\n'), ((5863, 5919), 'sni.uac.clearance.assert_has_clearance', 'assert_has_clearance', (['tkn.owner', '"""sni.fetch_corporation"""'], {}), "(tkn.owner, 'sni.fetch_corporation')\n", (5883, 5919), False, 'from sni.uac.clearance import assert_has_clearance\n'), ((5938, 5972), 'sni.user.user.ensure_corporation', 'ensure_corporation', (['corporation_id'], {}), '(corporation_id)\n', (5956, 5972), False, 'from sni.user.user import ensure_corporation\n'), ((6252, 6293), 'fastapi.Depends', 'Depends', (['from_authotization_header_nondyn'], {}), '(from_authotization_header_nondyn)\n', (6259, 6293), False, 'from fastapi import APIRouter, Depends\n'), ((6558, 6632), 'sni.uac.clearance.assert_has_clearance', 'assert_has_clearance', (['tkn.owner', '"""sni.update_corporation"""', 'corporation.ceo'], {}), "(tkn.owner, 'sni.update_corporation', corporation.ceo)\n", (6578, 6632), False, 'from sni.uac.clearance import assert_has_clearance\n'), ((7092, 7133), 'fastapi.Depends', 'Depends', (['from_authotization_header_nondyn'], {}), '(from_authotization_header_nondyn)\n', (7099, 7133), False, 'from fastapi import APIRouter, Depends\n'), ((7292, 7377), 'sni.uac.clearance.assert_has_clearance', 'assert_has_clearance', (['tkn.owner', '"""sni.delete_corporation_guest"""', 'corporation.ceo'], {}), "(tkn.owner, 'sni.delete_corporation_guest', corporation.ceo\n )\n", (7312, 7377), False, 'from sni.uac.clearance import assert_has_clearance\n'), ((7743, 7784), 'fastapi.Depends', 'Depends', (['from_authotization_header_nondyn'], {}), '(from_authotization_header_nondyn)\n', (7750, 7784), False, 'from fastapi import APIRouter, Depends\n'), ((7963, 8042), 'sni.uac.clearance.assert_has_clearance', 'assert_has_clearance', (['tkn.owner', '"""sni.read_corporation_guests"""', 'corporation.ceo'], {}), "(tkn.owner, 'sni.read_corporation_guests', corporation.ceo)\n", (7983, 8042), False, 'from sni.uac.clearance import assert_has_clearance\n'), ((8404, 8445), 'fastapi.Depends', 'Depends', (['from_authotization_header_nondyn'], {}), '(from_authotization_header_nondyn)\n', (8411, 8445), False, 'from fastapi import APIRouter, Depends\n'), ((8712, 8797), 'sni.uac.clearance.assert_has_clearance', 'assert_has_clearance', (['tkn.owner', '"""sni.create_corporation_guest"""', 'corporation.ceo'], {}), "(tkn.owner, 'sni.create_corporation_guest', corporation.ceo\n )\n", (8732, 8797), False, 'from sni.uac.clearance import assert_has_clearance\n'), ((9283, 9324), 'fastapi.Depends', 'Depends', (['from_authotization_header_nondyn'], {}), '(from_authotization_header_nondyn)\n', (9290, 9324), False, 'from fastapi import APIRouter, Depends\n'), ((9648, 9721), 'sni.uac.clearance.assert_has_clearance', 'assert_has_clearance', (['tkn.owner', '"""sni.track_corporation"""', 'corporation.ceo'], {}), "(tkn.owner, 'sni.track_corporation', corporation.ceo)\n", (9668, 9721), False, 'from sni.uac.clearance import assert_has_clearance\n'), ((4053, 4073), 'sni.esi.token.tracking_status', 'tracking_status', (['usr'], {}), '(usr)\n', (4068, 4073), False, 'from sni.esi.token import tracking_status, TrackingStatus\n'), ((5390, 5440), 'sni.user.models.Corporation.objects', 'Corporation.objects', ([], {'corporation_id': 'corporation_id'}), '(corporation_id=corporation_id)\n', (5409, 5440), False, 'from sni.user.models import Alliance, Corporation, User\n'), ((6483, 6533), 'sni.user.models.Corporation.objects', 'Corporation.objects', ([], {'corporation_id': 'corporation_id'}), '(corporation_id=corporation_id)\n', (6502, 6533), False, 'from sni.user.models import Alliance, Corporation, User\n'), ((7217, 7267), 'sni.user.models.Corporation.objects', 'Corporation.objects', ([], {'corporation_id': 'corporation_id'}), '(corporation_id=corporation_id)\n', (7236, 7267), False, 'from sni.user.models import Alliance, Corporation, User\n'), ((7405, 7497), 'sni.user.models.User.objects', 'User.objects', ([], {'character_id': 'character_id', 'clearance_level__lt': '(0)', 'corporation': 'corporation'}), '(character_id=character_id, clearance_level__lt=0, corporation=\n corporation)\n', (7417, 7497), False, 'from sni.user.models import Alliance, Corporation, User\n'), ((7888, 7938), 'sni.user.models.Corporation.objects', 'Corporation.objects', ([], {'corporation_id': 'corporation_id'}), '(corporation_id=corporation_id)\n', (7907, 7938), False, 'from sni.user.models import Alliance, Corporation, User\n'), ((8637, 8687), 'sni.user.models.Corporation.objects', 'Corporation.objects', ([], {'corporation_id': 'corporation_id'}), '(corporation_id=corporation_id)\n', (8656, 8687), False, 'from sni.user.models import Alliance, Corporation, User\n'), ((9573, 9623), 'sni.user.models.Corporation.objects', 'Corporation.objects', ([], {'corporation_id': 'corporation_id'}), '(corporation_id=corporation_id)\n', (9592, 9623), False, 'from sni.user.models import Alliance, Corporation, User\n'), ((4850, 4898), 'sni.user.models.Corporation.objects', 'Corporation.objects', ([], {'corporation_id__gte': '(2000000)'}), '(corporation_id__gte=2000000)\n', (4869, 4898), False, 'from sni.user.models import Alliance, Corporation, User\n')] |
import requests
import json
from .Graph import Graph
from .FactSheets import FactSheets
from .Users import Users
from .Metrics import Metrics
from .Polls import Polls
class LeanIX:
def __init__(self,api_token="",workspaceid="",baseurl="https://us.leanix.net/"):
""" Authenticates to LeanIX with the given API Token and returns the Authorization header for use in future calls
Retuns a class with subclasses pointing to the other options:
.factsheets
.users
.graph
"""
self.__api_token = api_token
self.workspaceid = workspaceid
self.baseurl = baseurl
if not self.baseurl.endswith("/"):
self.baseurl += "/" # If URL is not passed in with a trailing /, add it
self.auth()
self.graph = Graph(self)
self.factsheets = FactSheets(self)
self.users = Users(self)
self.metrics = Metrics(self)
self.polls = Polls(self)
def __repr__(self):
return f"LeanIX Object for {self.workspaceid}"
def auth(self):
"""Authenticate to LeanIX using the API token in the class"""
auth_url = f"{self.baseurl}/services/mtm/v1/oauth2/token"
response = requests.post(auth_url, auth=('apitoken', self.__api_token),
data={'grant_type': 'client_credentials'})
response.raise_for_status()
self._access_token = response.json()['access_token']
self._auth_header = 'Bearer ' + self._access_token
self.header = {'Authorization': self._auth_header,"Content-Type":"application/json"}
def _sendrequest(self,method,parameters=None,data=None,verb="get"):
api_url =f'{self.baseurl}{method}'
allrows = []
if verb.lower() == "get":
response = requests.get(api_url,headers=self.header,params=parameters)
jresp = response.json()
if jresp['total'] == len(jresp['data']):
allrows = jresp['data']
else:
allrows+=jresp['data']
while jresp['total'] > len(allrows):
parameters['page']+=1
allrows += requests.get(api_url,headers=self.header,params=parameters).json()['data']
elif verb.lower() == "post":
return requests.post(api_url,headers=self.header,data=json.dumps(data),params=parameters)
a=1
return allrows
| [
"json.dumps",
"requests.post",
"requests.get"
] | [((1231, 1339), 'requests.post', 'requests.post', (['auth_url'], {'auth': "('apitoken', self.__api_token)", 'data': "{'grant_type': 'client_credentials'}"}), "(auth_url, auth=('apitoken', self.__api_token), data={\n 'grant_type': 'client_credentials'})\n", (1244, 1339), False, 'import requests\n'), ((1821, 1882), 'requests.get', 'requests.get', (['api_url'], {'headers': 'self.header', 'params': 'parameters'}), '(api_url, headers=self.header, params=parameters)\n', (1833, 1882), False, 'import requests\n'), ((2372, 2388), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (2382, 2388), False, 'import json\n'), ((2193, 2254), 'requests.get', 'requests.get', (['api_url'], {'headers': 'self.header', 'params': 'parameters'}), '(api_url, headers=self.header, params=parameters)\n', (2205, 2254), False, 'import requests\n')] |
import os
import numpy as np
from constants import DATABASE_FILE_NAME, PLAYER_ONE, PLAYER_TWO, POSITION_TO_DATABASE
from Agents.random import Random
from othello import Othello
import multiprocessing as mp
class Database:
def __init__(self):
"""
load database data and store them in self._db_data
self._db_data = 3 dim array:
60 turns
9 game categories
[0] : won games of player1
[1] : won games of player2
[2] : total played games
"""
# check if database file exists
if not os.path.isfile(DATABASE_FILE_NAME):
self._create_new_database()
# load csv in self_data as 3 dim. array
csv = np.loadtxt(DATABASE_FILE_NAME, delimiter=';', dtype='int64')
self._db_data = csv.reshape((60, 9, 3))
def _create_new_database(self):
"""
Reset stored played / won games
change self._db_data to array of 0
"""
self._db_data = np.zeros(shape=(60, 9, 3), dtype='int64')
# save modified array in file
self.store_database()
def store_database(self):
"""
store database on filesystem
:return:
"""
with open(DATABASE_FILE_NAME, 'w') as outfile:
# write 3 dim. array as list of 2 dim. array's
for row in self._db_data:
# write one row (turn number) of matrix
np.savetxt(outfile, row, fmt='%d', delimiter=';')
def get_change_of_winning(self, move, turn_nr, current_player):
"""
calculate chance of winning for given move and turn_number
:param move: move is a pair <row, column> in available_moves
:param turn_nr: actual turn_number
:param current_player: use constants PLAYER_ONE and PLAYER_TWO
:return: chance of winning for given field at the given turn number
"""
# translate move to category in array
category = POSITION_TO_DATABASE[move]
# access data of one category in one turn number of the database to compute statistic
won_games_pl1, won_games_pl2, total_games_played = self._db_data[turn_nr][category]
# avoid dividing with 0
if total_games_played == 0:
return 0
# return win probability
if current_player == PLAYER_ONE:
return won_games_pl1 / total_games_played
return won_games_pl2 / total_games_played
def update_field_stat(self, turn_nr, field_type, winner):
"""
update database with new played move
:param turn_nr: turn number of move to store
:param field_type: field category of move
:param winner: winner of whole played game
:return: nothing
update self._db_data at given turn number and field type
"""
# get actual database entry
(won_games_pl1, won_games_pl2, total_games_played) = self._db_data[turn_nr][field_type]
if winner == PLAYER_ONE:
won_games_pl1 += 1
elif winner == PLAYER_TWO:
won_games_pl2 += 1
# store updated entry at same position in database
self._db_data[turn_nr][field_type] = (won_games_pl1, won_games_pl2, total_games_played + 1)
def update_fields_stats_for_single_game(self, moves, winner):
"""
update statistics of each taken move in game
:param moves: list of taken moves
:param winner: PLAYER_ONE or PLAYER_TWO
"""
for turn_nr in enumerate(moves):
# translate move 1,0 to position 8
position = POSITION_TO_DATABASE[moves[turn_nr]]
# update array at position position
self.update_field_stat(turn_nr, position, winner)
@staticmethod
def _play_n_random_games(count):
"""
play count random games
:param count: number of played games
:return: winning statistics
statistics = list of pair <taken moves, winner of this game>
"""
multi_stats = []
for i in range(count):
# print each 100 games actual game played position
if i % 100 == 0:
print(f"Game No: {i}")
g = Othello()
g.init_game()
# play whole game
while not g.game_is_over():
g.play_position(Random.get_move(g))
winner = g.get_winner()
# add winner and taken moves to statistic
multi_stats.append((g.get_taken_mv(), winner))
return multi_stats
def train_db_multi_threaded(self, count):
"""
play count random games and update database winning statistics
:param count: number of games to play
:return:
"""
# Create a pool of worker processes.
# Workload can be distributed equally on the processes when their number is known
number_of_processes = mp.cpu_count()
pool = mp.Pool()
# Use Worker processes asynchronous
# split calculation in number_of_processes parts to calculate multi threaded
list_of_stats = [pool.apply_async(self._play_n_random_games, args=(count // number_of_processes,))
for _ in range(number_of_processes)]
# Collect the result of the first worker
# update statistics of number_of_processes results sequential
for single_process_list in list_of_stats:
list_of_games = single_process_list.get()
for single_game in list_of_games:
moves, winner = single_game
self.update_fields_stats_for_single_game(moves, winner)
# Close the worker pool.
pool.close()
db = Database()
| [
"othello.Othello",
"Agents.random.Random.get_move",
"multiprocessing.cpu_count",
"os.path.isfile",
"numpy.zeros",
"multiprocessing.Pool",
"numpy.savetxt",
"numpy.loadtxt"
] | [((765, 825), 'numpy.loadtxt', 'np.loadtxt', (['DATABASE_FILE_NAME'], {'delimiter': '""";"""', 'dtype': '"""int64"""'}), "(DATABASE_FILE_NAME, delimiter=';', dtype='int64')\n", (775, 825), True, 'import numpy as np\n'), ((1042, 1083), 'numpy.zeros', 'np.zeros', ([], {'shape': '(60, 9, 3)', 'dtype': '"""int64"""'}), "(shape=(60, 9, 3), dtype='int64')\n", (1050, 1083), True, 'import numpy as np\n'), ((4957, 4971), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (4969, 4971), True, 'import multiprocessing as mp\n'), ((4987, 4996), 'multiprocessing.Pool', 'mp.Pool', ([], {}), '()\n', (4994, 4996), True, 'import multiprocessing as mp\n'), ((627, 661), 'os.path.isfile', 'os.path.isfile', (['DATABASE_FILE_NAME'], {}), '(DATABASE_FILE_NAME)\n', (641, 661), False, 'import os\n'), ((4253, 4262), 'othello.Othello', 'Othello', ([], {}), '()\n', (4260, 4262), False, 'from othello import Othello\n'), ((1485, 1534), 'numpy.savetxt', 'np.savetxt', (['outfile', 'row'], {'fmt': '"""%d"""', 'delimiter': '""";"""'}), "(outfile, row, fmt='%d', delimiter=';')\n", (1495, 1534), True, 'import numpy as np\n'), ((4391, 4409), 'Agents.random.Random.get_move', 'Random.get_move', (['g'], {}), '(g)\n', (4406, 4409), False, 'from Agents.random import Random\n')] |