code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
import regex
from src.extractor.constants import separators, prefixes
from src.extractor.models.ExtractedData import ExtractedList
from src.extractor.models.RegexHandler import RegexHandler
from src.utils.conversion_utils import parse_postal_code
from src.utils.io_utils import load_regex
from src.utils.number_conversion_utils import japanese_container_dict
POSTAL_CODE_REGEX_STRING = load_regex(regex_file_name="postal_code.regexp")
POSTAL_CODE_REGEX = regex.compile(
POSTAL_CODE_REGEX_STRING,
seperator_postal_code=separators["postal_code_numbers"],
prefix_postal_code=prefixes["postal_code"],
seperator_space=separators["blank"],
kanji_0to9=japanese_container_dict["0to9"],
separator_postal_code_kanji=separators["postal_code_kanji"]
)
POSTAL_CODE_REGEX_IDENTIFIERS = {
"postal_code_string": lambda raw_value: raw_value,
"postal_code_value": parse_postal_code
}
def extract_all_postal_codes(target_string: str) -> ExtractedList:
extractor = RegexHandler(compiled_regex=POSTAL_CODE_REGEX,
regex_identifiers=POSTAL_CODE_REGEX_IDENTIFIERS)
return extractor.search_string(target_string=target_string)
|
[
"src.extractor.models.RegexHandler.RegexHandler",
"regex.compile",
"src.utils.io_utils.load_regex"
] |
[((388, 436), 'src.utils.io_utils.load_regex', 'load_regex', ([], {'regex_file_name': '"""postal_code.regexp"""'}), "(regex_file_name='postal_code.regexp')\n", (398, 436), False, 'from src.utils.io_utils import load_regex\n'), ((457, 753), 'regex.compile', 'regex.compile', (['POSTAL_CODE_REGEX_STRING'], {'seperator_postal_code': "separators['postal_code_numbers']", 'prefix_postal_code': "prefixes['postal_code']", 'seperator_space': "separators['blank']", 'kanji_0to9': "japanese_container_dict['0to9']", 'separator_postal_code_kanji': "separators['postal_code_kanji']"}), "(POSTAL_CODE_REGEX_STRING, seperator_postal_code=separators[\n 'postal_code_numbers'], prefix_postal_code=prefixes['postal_code'],\n seperator_space=separators['blank'], kanji_0to9=japanese_container_dict\n ['0to9'], separator_postal_code_kanji=separators['postal_code_kanji'])\n", (470, 753), False, 'import regex\n'), ((985, 1085), 'src.extractor.models.RegexHandler.RegexHandler', 'RegexHandler', ([], {'compiled_regex': 'POSTAL_CODE_REGEX', 'regex_identifiers': 'POSTAL_CODE_REGEX_IDENTIFIERS'}), '(compiled_regex=POSTAL_CODE_REGEX, regex_identifiers=\n POSTAL_CODE_REGEX_IDENTIFIERS)\n', (997, 1085), False, 'from src.extractor.models.RegexHandler import RegexHandler\n')]
|
"""
This compares force from drag on a 2 inch projectile at a variety of
velocities using a variety of formulas.
"""
import math
import sys
import ballistics
Hutton = { # Hutton, 1812, Vol. III, p. 318
5: 0.006,
10: 0.026,
15: 0.058,
20: 0.103,
25: 0.163,
30: 0.237,
40: 0.427,
50: 0.676,
100: 2.78,
200: 11.34,
300: 25.8,
400: 46.5,
500: 74.4,
600: 110.4,
700: 156.0,
800: 212.0,
900: 280.3,
1000: 362.1,
1100: 456.9,
1200: 564.4,
1300: 683.3,
1400: 811.5,
1500: 947.1,
1600: 1086.9,
1700: 1228.4,
1800: 1368.6,
1900: 1505.7,
2000: 1637.8,
}
methods = ['hutton', 'miller', 'collins', 'henderson', 'morrison',
'adjusted']
results = {}
for vel in Hutton:
results[vel] = {'hutton': Hutton[vel]}
for method in methods[1:]:
for vel in sorted(Hutton):
state = {
'vy': 0,
'vx': ballistics.convert_units('%d ft/s' % vel),
'diam': ballistics.convert_units('2 in'),
'material': 'iron',
'settings': {'drag_method': method},
}
ballistics.determine_material(state)
acc = ballistics.acceleration_from_drag(state)[0]
accgrav = -ballistics.acceleration_from_gravity(state)
kgforce = state['mass'] * acc / accgrav
ozforce = ballistics.convert_units(kgforce, to='oz')
results[vel][method] = ozforce
results[vel]['Mn'] = state['drag_data']['Mn']
results[vel]['Re'] = state['drag_data']['Re']
sys.stdout.write('Velocity ')
for method in methods:
sys.stdout.write(' %9s' % method.capitalize()[:9])
sys.stdout.write(' Mn ^Re\n')
for vel in sorted(Hutton):
sys.stdout.write('%4d ft/s' % vel)
for method in methods:
sys.stdout.write(' %8.3f' % results[vel][method])
sys.stdout.write(' ozf %4.2f %3.1f\n' % (
results[vel]['Mn'], math.log10(results[vel]['Re'])))
|
[
"sys.stdout.write",
"ballistics.determine_material",
"ballistics.acceleration_from_drag",
"ballistics.convert_units",
"math.log10",
"ballistics.acceleration_from_gravity"
] |
[((1553, 1582), 'sys.stdout.write', 'sys.stdout.write', (['"""Velocity """'], {}), "('Velocity ')\n", (1569, 1582), False, 'import sys\n'), ((1661, 1696), 'sys.stdout.write', 'sys.stdout.write', (['""" Mn ^Re\n"""'], {}), "(' Mn ^Re\\n')\n", (1677, 1696), False, 'import sys\n'), ((1728, 1762), 'sys.stdout.write', 'sys.stdout.write', (["('%4d ft/s' % vel)"], {}), "('%4d ft/s' % vel)\n", (1744, 1762), False, 'import sys\n'), ((1137, 1173), 'ballistics.determine_material', 'ballistics.determine_material', (['state'], {}), '(state)\n', (1166, 1173), False, 'import ballistics\n'), ((1363, 1405), 'ballistics.convert_units', 'ballistics.convert_units', (['kgforce'], {'to': '"""oz"""'}), "(kgforce, to='oz')\n", (1387, 1405), False, 'import ballistics\n'), ((1798, 1848), 'sys.stdout.write', 'sys.stdout.write', (["(' %8.3f' % results[vel][method])"], {}), "(' %8.3f' % results[vel][method])\n", (1814, 1848), False, 'import sys\n'), ((940, 981), 'ballistics.convert_units', 'ballistics.convert_units', (["('%d ft/s' % vel)"], {}), "('%d ft/s' % vel)\n", (964, 981), False, 'import ballistics\n'), ((1003, 1035), 'ballistics.convert_units', 'ballistics.convert_units', (['"""2 in"""'], {}), "('2 in')\n", (1027, 1035), False, 'import ballistics\n'), ((1188, 1228), 'ballistics.acceleration_from_drag', 'ballistics.acceleration_from_drag', (['state'], {}), '(state)\n', (1221, 1228), False, 'import ballistics\n'), ((1252, 1295), 'ballistics.acceleration_from_gravity', 'ballistics.acceleration_from_gravity', (['state'], {}), '(state)\n', (1288, 1295), False, 'import ballistics\n'), ((1923, 1953), 'math.log10', 'math.log10', (["results[vel]['Re']"], {}), "(results[vel]['Re'])\n", (1933, 1953), False, 'import math\n')]
|
"""
Script to delete datasource
It is a handy script to delete datasource, while creating and new importers
The script only takes one argument the name of the API that needs to be deleted
Importers can be deleted:
python manage.py remove -id <Name-of-the-importer>
Name of the importer can be found:
python manage.py remove -d True
"""
import os, sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from app import create_app
from db import db
from models.api import API
from models.sensor import Sensor
from models.sensor_attribute import SensorAttribute
from models.attributes import Attributes
from models.attribute_data import ModelClass
from flask_script import Command, Option
class DropDatasource(Command):
def __init__(self, id=None, datasources=False):
self.datasources = datasources
self.id = id
def get_options(self):
return [
Option('--api_id', '-id', dest='id', default=self.id),
Option('--datasources', '-d', dest='datasources', default=self.datasources)
]
def run(self, id, datasources):
apis = API.get_all()
_dict = {}
for a in apis:
_dict[a.name] = a
if datasources:
print(a.name)
if id is None:
return
self.drop_datasource(_dict[id].id)
db.session.delete(_dict[id])
db.session.commit()
print('Dropped Datasource for API: ', id)
def drop_datasource(self, id):
sensors = Sensor.query.filter_by(a_id = id).all()
sensor_list = []
for s in sensors:
sensor_list.append(s.id)
db.session.delete(s)
sensor_attributes = db.session.query(SensorAttribute)\
.filter(SensorAttribute.s_id.in_((sensor_list)))\
.all()
attribute_ids = set()
for sa in sensor_attributes:
attribute_ids.add(sa.a_id)
db.session.delete(sa)
attributes = db.session.query(Attributes)\
.filter(Attributes.id.in_((attribute_ids))).all()
for attribute in attributes:
model = ModelClass(attribute.table_name.lower())
model.__table__.drop(db.engine)
db.session.delete(attribute)
|
[
"models.sensor.Sensor.query.filter_by",
"models.sensor_attribute.SensorAttribute.s_id.in_",
"models.api.API.get_all",
"os.path.dirname",
"db.db.session.delete",
"models.attributes.Attributes.id.in_",
"db.db.session.query",
"db.db.session.commit",
"flask_script.Option"
] |
[((1072, 1085), 'models.api.API.get_all', 'API.get_all', ([], {}), '()\n', (1083, 1085), False, 'from models.api import API\n'), ((1242, 1270), 'db.db.session.delete', 'db.session.delete', (['_dict[id]'], {}), '(_dict[id])\n', (1259, 1270), False, 'from db import db\n'), ((1273, 1292), 'db.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1290, 1292), False, 'from db import db\n'), ((403, 428), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (418, 428), False, 'import os, sys\n'), ((892, 945), 'flask_script.Option', 'Option', (['"""--api_id"""', '"""-id"""'], {'dest': '"""id"""', 'default': 'self.id'}), "('--api_id', '-id', dest='id', default=self.id)\n", (898, 945), False, 'from flask_script import Command, Option\n'), ((949, 1024), 'flask_script.Option', 'Option', (['"""--datasources"""', '"""-d"""'], {'dest': '"""datasources"""', 'default': 'self.datasources'}), "('--datasources', '-d', dest='datasources', default=self.datasources)\n", (955, 1024), False, 'from flask_script import Command, Option\n'), ((1492, 1512), 'db.db.session.delete', 'db.session.delete', (['s'], {}), '(s)\n', (1509, 1512), False, 'from db import db\n'), ((1733, 1754), 'db.db.session.delete', 'db.session.delete', (['sa'], {}), '(sa)\n', (1750, 1754), False, 'from db import db\n'), ((1981, 2009), 'db.db.session.delete', 'db.session.delete', (['attribute'], {}), '(attribute)\n', (1998, 2009), False, 'from db import db\n'), ((1382, 1413), 'models.sensor.Sensor.query.filter_by', 'Sensor.query.filter_by', ([], {'a_id': 'id'}), '(a_id=id)\n', (1404, 1413), False, 'from models.sensor import Sensor\n'), ((1588, 1625), 'models.sensor_attribute.SensorAttribute.s_id.in_', 'SensorAttribute.s_id.in_', (['sensor_list'], {}), '(sensor_list)\n', (1612, 1625), False, 'from models.sensor_attribute import SensorAttribute\n'), ((1816, 1848), 'models.attributes.Attributes.id.in_', 'Attributes.id.in_', (['attribute_ids'], {}), '(attribute_ids)\n', (1833, 1848), False, 'from models.attributes import Attributes\n'), ((1538, 1571), 'db.db.session.query', 'db.session.query', (['SensorAttribute'], {}), '(SensorAttribute)\n', (1554, 1571), False, 'from db import db\n'), ((1772, 1800), 'db.db.session.query', 'db.session.query', (['Attributes'], {}), '(Attributes)\n', (1788, 1800), False, 'from db import db\n')]
|
from django.shortcuts import render,redirect
from django.http import HttpResponse,Http404
from .models import Image,Location,Category
# Create your views here.
from django import forms
from django.http import HttpResponse
from cloudinary.forms import cl_init_js_callbacks
from .forms import PhotoForm
def upload(request):
context = dict( backend_form = PhotoForm())
if request.method == 'POST':
form = PhotoForm(request.POST, request.FILES)
context['posted'] = form.instance
if form.is_valid():
form.save()
return render(request, 'upload.html', context)
def photos(request):
images = Image.get_images()
locations = Location.objects.all()
categories = Category.objects.all()
return render(request, 'all-photos/all_photos.html',{"images":images,"locations":locations,"categories":categories})
def photos_by_location(request, location_id):
images = Image.filter_by_location(location_id)
return render(request,'all-photos/location.html',{"images":images})
def photos_by_category(request, category_id):
images = Image.filter_by_category(category_id)
return render(request,'all-photos/category.html',{"images":images})
def search_images(request):
if 'photo' in request.GET and request.GET["photo"]:
category= request.GET.get("photo")
searched_images = Image.search_image(category)
message = f"{category}"
return render(request, 'all-photos/search.html',{"message":message, "photos":searched_images})
else:
message = "You have not searched for any picture"
return render(request, 'all-photos/search.html',{"message":message})
|
[
"django.shortcuts.render"
] |
[((551, 590), 'django.shortcuts.render', 'render', (['request', '"""upload.html"""', 'context'], {}), "(request, 'upload.html', context)\n", (557, 590), False, 'from django.shortcuts import render, redirect\n'), ((734, 853), 'django.shortcuts.render', 'render', (['request', '"""all-photos/all_photos.html"""', "{'images': images, 'locations': locations, 'categories': categories}"], {}), "(request, 'all-photos/all_photos.html', {'images': images,\n 'locations': locations, 'categories': categories})\n", (740, 853), False, 'from django.shortcuts import render, redirect\n'), ((954, 1017), 'django.shortcuts.render', 'render', (['request', '"""all-photos/location.html"""', "{'images': images}"], {}), "(request, 'all-photos/location.html', {'images': images})\n", (960, 1017), False, 'from django.shortcuts import render, redirect\n'), ((1126, 1189), 'django.shortcuts.render', 'render', (['request', '"""all-photos/category.html"""', "{'images': images}"], {}), "(request, 'all-photos/category.html', {'images': images})\n", (1132, 1189), False, 'from django.shortcuts import render, redirect\n'), ((1420, 1514), 'django.shortcuts.render', 'render', (['request', '"""all-photos/search.html"""', "{'message': message, 'photos': searched_images}"], {}), "(request, 'all-photos/search.html', {'message': message, 'photos':\n searched_images})\n", (1426, 1514), False, 'from django.shortcuts import render, redirect\n'), ((1592, 1655), 'django.shortcuts.render', 'render', (['request', '"""all-photos/search.html"""', "{'message': message}"], {}), "(request, 'all-photos/search.html', {'message': message})\n", (1598, 1655), False, 'from django.shortcuts import render, redirect\n')]
|
import http.server
import socketserver
PORT = 8000
# Found this at
# https://stackoverflow.com/questions/39801718/how-to-run-a-http-server-which-serve-a-specific-path
#
# Change the base directory for the simple server by intercepting the constructor
# for SimpleHTTPRequestHandler
DIRECTORY = "web"
class Handler(http.server.SimpleHTTPRequestHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, directory=DIRECTORY, **kwargs)
# Handler = http.server.SimpleHTTPRequestHandler
# Handler.directory = "/Users"
with socketserver.TCPServer(("", PORT), Handler) as httpd:
print("serving at port", PORT)
httpd.serve_forever()
|
[
"socketserver.TCPServer"
] |
[((586, 629), 'socketserver.TCPServer', 'socketserver.TCPServer', (["('', PORT)", 'Handler'], {}), "(('', PORT), Handler)\n", (608, 629), False, 'import socketserver\n')]
|
import numpy as np
import random as rd
import pandas as pd
data = pd.read_csv('Assets\MetaData\Metadata1.csv',sep = ';')
print(data)
x = data["Cost"]
print (x)
for i in range(1,17):
x = data[data.columns[i]]
for j in range (0,len(x)-1):
if(x[j] is not None):
x[j] = rd.randint(0,400)
print (x)
|
[
"pandas.read_csv",
"random.randint"
] |
[((70, 125), 'pandas.read_csv', 'pd.read_csv', (['"""Assets\\\\MetaData\\\\Metadata1.csv"""'], {'sep': '""";"""'}), "('Assets\\\\MetaData\\\\Metadata1.csv', sep=';')\n", (81, 125), True, 'import pandas as pd\n'), ((298, 316), 'random.randint', 'rd.randint', (['(0)', '(400)'], {}), '(0, 400)\n', (308, 316), True, 'import random as rd\n')]
|
"""Config flow for roon integration."""
import asyncio
import logging
from roon import RoonApi
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
from homeassistant.const import CONF_API_KEY, CONF_HOST
from .const import ( # pylint: disable=unused-import
AUTHENTICATE_TIMEOUT,
DEFAULT_NAME,
DOMAIN,
ROON_APPINFO,
)
_LOGGER = logging.getLogger(__name__)
DATA_SCHEMA = vol.Schema({"host": str})
TIMEOUT = 120
class RoonHub:
"""Interact with roon during config flow."""
def __init__(self, host):
"""Initialize."""
self._host = host
async def authenticate(self, hass) -> bool:
"""Test if we can authenticate with the host."""
token = None
secs = 0
roonapi = RoonApi(ROON_APPINFO, None, self._host, blocking_init=False)
while secs < TIMEOUT:
token = roonapi.token
secs += AUTHENTICATE_TIMEOUT
if token:
break
await asyncio.sleep(AUTHENTICATE_TIMEOUT)
token = roonapi.token
roonapi.stop()
return token
async def authenticate(hass: core.HomeAssistant, host):
"""Connect and authenticate home assistant."""
hub = RoonHub(host)
token = await hub.authenticate(hass)
if token is None:
raise InvalidAuth
return {CONF_HOST: host, CONF_API_KEY: token}
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for roon."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
def __init__(self):
"""Initialize the Roon flow."""
self._host = None
async def async_step_user(self, user_input=None):
"""Handle getting host details from the user."""
errors = {}
if user_input is not None:
self._host = user_input["host"]
existing = {
entry.data[CONF_HOST] for entry in self._async_current_entries()
}
if self._host in existing:
errors["base"] = "duplicate_entry"
return self.async_show_form(step_id="user", errors=errors)
return await self.async_step_link()
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors
)
async def async_step_link(self, user_input=None):
"""Handle linking and authenticting with the roon server."""
errors = {}
if user_input is not None:
try:
info = await authenticate(self.hass, self._host)
except InvalidAuth:
errors["base"] = "invalid_auth"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
else:
return self.async_create_entry(title=DEFAULT_NAME, data=info)
return self.async_show_form(step_id="link", errors=errors)
class InvalidAuth(exceptions.HomeAssistantError):
"""Error to indicate there is invalid auth."""
|
[
"voluptuous.Schema",
"roon.RoonApi",
"asyncio.sleep",
"logging.getLogger"
] |
[((379, 406), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (396, 406), False, 'import logging\n'), ((422, 447), 'voluptuous.Schema', 'vol.Schema', (["{'host': str}"], {}), "({'host': str})\n", (432, 447), True, 'import voluptuous as vol\n'), ((774, 834), 'roon.RoonApi', 'RoonApi', (['ROON_APPINFO', 'None', 'self._host'], {'blocking_init': '(False)'}), '(ROON_APPINFO, None, self._host, blocking_init=False)\n', (781, 834), False, 'from roon import RoonApi\n'), ((1002, 1037), 'asyncio.sleep', 'asyncio.sleep', (['AUTHENTICATE_TIMEOUT'], {}), '(AUTHENTICATE_TIMEOUT)\n', (1015, 1037), False, 'import asyncio\n')]
|
import os
from abc import abstractmethod
from typing import Tuple, Type, Dict, Optional, Any
from enn_zoo.griddly.wrapper import GriddlyEnv
from entity_gym.environment import ActionSpace, ObsSpace, Entity, CategoricalActionSpace
from entity_gym.environment import Environment
from griddly import GymWrapper, gd
init_path = os.path.dirname(os.path.realpath(__file__))
def generate_obs_space(env: Any) -> ObsSpace:
# Each entity contains x, y, z positions, plus the values of all variables
global_variables = env.game.get_global_variable_names()
object_variable_map = env.game.get_object_variable_map()
# Global entity for global variables and global actions (these dont really exist in Griddly)
space = {"__global__": Entity(global_variables)}
for name in env.object_names:
space[name] = Entity(
["x", "y", "z", "orientation", "player_id", *object_variable_map[name]]
)
return ObsSpace(space)
def generate_action_space(env: Any) -> Dict[str, ActionSpace]:
action_space: Dict[str, ActionSpace] = {}
for action_name, action_mapping in env.action_input_mappings.items():
# Ignore internal actions for the action space
if action_mapping["Internal"] == True:
continue
input_mappings = action_mapping["InputMappings"]
actions = []
actions.append("NOP") # In Griddly, Action ID 0 is always NOP
for action_id in range(1, len(input_mappings) + 1):
mapping = input_mappings[str(action_id)]
description = mapping["Description"]
actions.append(description)
action_space[action_name] = CategoricalActionSpace(actions)
return action_space
def create_env(
yaml_file: str,
image_path: Optional[str] = None,
shader_path: Optional[str] = None,
level: int = 0,
) -> Type[GriddlyEnv]:
"""
In order to fit the API for the Environment, we need to pre-load the environment from the yaml and then pass in
observation space, action space and the instantiated GymWrapper
"""
env = GymWrapper(
yaml_file=yaml_file, image_path=image_path, shader_path=shader_path, level=level
)
env.reset()
action_space = generate_action_space(env)
observation_space = generate_obs_space(env)
env.close()
class InstantiatedGriddlyEnv(GriddlyEnv):
@classmethod
def _griddly_env(cls) -> Any:
return GymWrapper(
yaml_file=yaml_file,
image_path=image_path,
shader_path=shader_path,
player_observer_type=gd.ObserverType.NONE,
global_observer_type=gd.ObserverType.BLOCK_2D,
level=level,
)
@classmethod
def obs_space(cls) -> ObsSpace:
return observation_space
@classmethod
def action_space(cls) -> Dict[str, ActionSpace]:
return action_space
return InstantiatedGriddlyEnv
GRIDDLY_ENVS: Dict[str, Tuple[str, int]] = {
"GDY-Clusters-0": (os.path.join(init_path, "env_descriptions/clusters.yaml"), 0),
"GDY-Clusters-1": (os.path.join(init_path, "env_descriptions/clusters.yaml"), 1),
"GDY-Clusters-2": (os.path.join(init_path, "env_descriptions/clusters.yaml"), 2),
"GDY-Clusters-3": (os.path.join(init_path, "env_descriptions/clusters.yaml"), 3),
"GDY-Clusters-4": (os.path.join(init_path, "env_descriptions/clusters.yaml"), 4),
}
|
[
"os.path.realpath",
"entity_gym.environment.CategoricalActionSpace",
"entity_gym.environment.ObsSpace",
"entity_gym.environment.Entity",
"os.path.join",
"griddly.GymWrapper"
] |
[((341, 367), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (357, 367), False, 'import os\n'), ((938, 953), 'entity_gym.environment.ObsSpace', 'ObsSpace', (['space'], {}), '(space)\n', (946, 953), False, 'from entity_gym.environment import ActionSpace, ObsSpace, Entity, CategoricalActionSpace\n'), ((2078, 2175), 'griddly.GymWrapper', 'GymWrapper', ([], {'yaml_file': 'yaml_file', 'image_path': 'image_path', 'shader_path': 'shader_path', 'level': 'level'}), '(yaml_file=yaml_file, image_path=image_path, shader_path=\n shader_path, level=level)\n', (2088, 2175), False, 'from griddly import GymWrapper, gd\n'), ((742, 766), 'entity_gym.environment.Entity', 'Entity', (['global_variables'], {}), '(global_variables)\n', (748, 766), False, 'from entity_gym.environment import ActionSpace, ObsSpace, Entity, CategoricalActionSpace\n'), ((824, 903), 'entity_gym.environment.Entity', 'Entity', (["['x', 'y', 'z', 'orientation', 'player_id', *object_variable_map[name]]"], {}), "(['x', 'y', 'z', 'orientation', 'player_id', *object_variable_map[name]])\n", (830, 903), False, 'from entity_gym.environment import ActionSpace, ObsSpace, Entity, CategoricalActionSpace\n'), ((1652, 1683), 'entity_gym.environment.CategoricalActionSpace', 'CategoricalActionSpace', (['actions'], {}), '(actions)\n', (1674, 1683), False, 'from entity_gym.environment import ActionSpace, ObsSpace, Entity, CategoricalActionSpace\n'), ((3045, 3102), 'os.path.join', 'os.path.join', (['init_path', '"""env_descriptions/clusters.yaml"""'], {}), "(init_path, 'env_descriptions/clusters.yaml')\n", (3057, 3102), False, 'import os\n'), ((3131, 3188), 'os.path.join', 'os.path.join', (['init_path', '"""env_descriptions/clusters.yaml"""'], {}), "(init_path, 'env_descriptions/clusters.yaml')\n", (3143, 3188), False, 'import os\n'), ((3217, 3274), 'os.path.join', 'os.path.join', (['init_path', '"""env_descriptions/clusters.yaml"""'], {}), "(init_path, 'env_descriptions/clusters.yaml')\n", (3229, 3274), False, 'import os\n'), ((3303, 3360), 'os.path.join', 'os.path.join', (['init_path', '"""env_descriptions/clusters.yaml"""'], {}), "(init_path, 'env_descriptions/clusters.yaml')\n", (3315, 3360), False, 'import os\n'), ((3389, 3446), 'os.path.join', 'os.path.join', (['init_path', '"""env_descriptions/clusters.yaml"""'], {}), "(init_path, 'env_descriptions/clusters.yaml')\n", (3401, 3446), False, 'import os\n'), ((2436, 2627), 'griddly.GymWrapper', 'GymWrapper', ([], {'yaml_file': 'yaml_file', 'image_path': 'image_path', 'shader_path': 'shader_path', 'player_observer_type': 'gd.ObserverType.NONE', 'global_observer_type': 'gd.ObserverType.BLOCK_2D', 'level': 'level'}), '(yaml_file=yaml_file, image_path=image_path, shader_path=\n shader_path, player_observer_type=gd.ObserverType.NONE,\n global_observer_type=gd.ObserverType.BLOCK_2D, level=level)\n', (2446, 2627), False, 'from griddly import GymWrapper, gd\n')]
|
# Copyright (c) 2020 wngfra
# Use of this source code is governed by the Apache-2.0 license, see LICENSE
import socket
from collections import deque
import numpy as np
import rclpy
from rclpy.node import Node
from tactile_interfaces.msg import TactileSignal
from tactile_interfaces.srv import ChangeState
STATE_LIST = {0: "calibration", 1: "recording",
50: "standby", 99: "termination"}
class TactileSignalPublisher(Node):
"""
A node class for tactile signal publisher.
The node receives tactile signals in bytes via UDP and converts the data to array and publish to ROS2 network.
Runtime node state switch is implemented.
"""
def __init__(self):
super().__init__("tactile_publisher")
# Parameters are set via ROS2 parameter server.
self.declare_parameters(
namespace="",
parameters=[
("ip", "0.0.0.0"), # for container host net
("port", 10240),
("buffer_size", 96),
],
)
ip = str(self.get_parameter("ip").value)
port = int(self.get_parameter("port").value)
buffer_size = int(self.get_parameter("buffer_size").value)
# Open UDP socket and bind the port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.bind((ip, port))
self.node_state = 0
# Data buffer for calibration
self.buffer = deque(maxlen=buffer_size)
self.reference_value = np.zeros(16)
# Create the publisher and service host
self.publisher = self.create_publisher(
TactileSignal, "tactile_signals", 10)
self.service = self.create_service(
ChangeState,
"tactile_publisher/change_state",
self.change_node_state_callback,
)
# Publisher rate 0.03s
self.timer = self.create_timer(0.03, self.timer_callback)
# self.get_logger().info("Node started in state: calibration")
def timer_callback(self):
data, addr = self.sock.recvfrom(256)
values = np.array(
[int.from_bytes(data[i: i + 2], "big")
for i in range(0, len(data), 2)],
dtype=np.int32,
)
try:
if self.node_state == 0: # calibration state
self.buffer.append(values)
# Once the buffer is full, compute the average values as reference
if len(self.buffer) == self.buffer.maxlen:
self.reference_value = np.mean(
self.buffer, axis=0, dtype=np.int32)
self.node_state = 1 # Change to recording state
self.get_logger().info("Calibration finished!")
elif self.node_state == 1: # recording state
if len(self.buffer) < self.buffer.maxlen:
self.get_logger().warn("Calibration unfinished!")
values -= self.reference_value
# Prepare TactileSignal message
msg = TactileSignal()
msg.addr = addr[0] + ":" + str(addr[1])
msg.header.frame_id = "world"
msg.header.stamp = self.get_clock().now().to_msg()
msg.data = values
msg.mean = np.mean(values)
self.publisher.publish(msg)
elif self.node_state == 50: # standby state
pass
elif self.node_state == 99: # termination state
self.get_logger().warn("Tactile publisher terminated.")
self.destroy_node()
except Exception as error:
self.get_logger().error(str(error))
def change_node_state_callback(self, request, response):
if request.transition != self.node_state and request.transition in STATE_LIST.keys():
self.node_state = request.transition
response.success = True
response.info = "OK"
self.get_logger().info(
"Changed to state: {}".format(
STATE_LIST[self.node_state])
)
if self.node_state == 0:
self.buffer.clear()
else:
raise Exception("Node state cannot be changed!")
return response
def main(args=None):
rclpy.init(args=args)
pub = TactileSignalPublisher()
rclpy.spin(pub)
rclpy.shutdown()
if __name__ == "__main__":
main()
|
[
"rclpy.spin",
"rclpy.init",
"socket.socket",
"numpy.zeros",
"rclpy.shutdown",
"numpy.mean",
"tactile_interfaces.msg.TactileSignal",
"collections.deque"
] |
[((4306, 4327), 'rclpy.init', 'rclpy.init', ([], {'args': 'args'}), '(args=args)\n', (4316, 4327), False, 'import rclpy\n'), ((4367, 4382), 'rclpy.spin', 'rclpy.spin', (['pub'], {}), '(pub)\n', (4377, 4382), False, 'import rclpy\n'), ((4387, 4403), 'rclpy.shutdown', 'rclpy.shutdown', ([], {}), '()\n', (4401, 4403), False, 'import rclpy\n'), ((1270, 1318), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (1283, 1318), False, 'import socket\n'), ((1443, 1468), 'collections.deque', 'deque', ([], {'maxlen': 'buffer_size'}), '(maxlen=buffer_size)\n', (1448, 1468), False, 'from collections import deque\n'), ((1500, 1512), 'numpy.zeros', 'np.zeros', (['(16)'], {}), '(16)\n', (1508, 1512), True, 'import numpy as np\n'), ((2539, 2583), 'numpy.mean', 'np.mean', (['self.buffer'], {'axis': '(0)', 'dtype': 'np.int32'}), '(self.buffer, axis=0, dtype=np.int32)\n', (2546, 2583), True, 'import numpy as np\n'), ((3050, 3065), 'tactile_interfaces.msg.TactileSignal', 'TactileSignal', ([], {}), '()\n', (3063, 3065), False, 'from tactile_interfaces.msg import TactileSignal\n'), ((3296, 3311), 'numpy.mean', 'np.mean', (['values'], {}), '(values)\n', (3303, 3311), True, 'import numpy as np\n')]
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class AppliedScopeType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Type of the Applied Scope.
"""
SINGLE = "Single"
SHARED = "Shared"
class AqiStateType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The Auto quota increase feature state - enabled: feature is enabled, disabled: feature is
disabled.
"""
ENABLED = "enabled"
DISABLED = "disabled"
class CalculateExchangeOperationResultStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Status of the operation.
"""
SUCCEEDED = "Succeeded"
FAILED = "Failed"
CANCELLED = "Cancelled"
PENDING = "Pending"
class ContactMethodType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The contact method - Email: Contact using provided email, Phone: contact using provided phone
number.
"""
EMAIL = "Email"
PHONE = "Phone"
class ErrorResponseCode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
NOT_SPECIFIED = "NotSpecified"
INTERNAL_SERVER_ERROR = "InternalServerError"
SERVER_TIMEOUT = "ServerTimeout"
AUTHORIZATION_FAILED = "AuthorizationFailed"
BAD_REQUEST = "BadRequest"
CLIENT_CERTIFICATE_THUMBPRINT_NOT_SET = "ClientCertificateThumbprintNotSet"
INVALID_REQUEST_CONTENT = "InvalidRequestContent"
OPERATION_FAILED = "OperationFailed"
HTTP_METHOD_NOT_SUPPORTED = "HttpMethodNotSupported"
INVALID_REQUEST_URI = "InvalidRequestUri"
MISSING_TENANT_ID = "MissingTenantId"
INVALID_TENANT_ID = "InvalidTenantId"
INVALID_RESERVATION_ORDER_ID = "InvalidReservationOrderId"
INVALID_RESERVATION_ID = "InvalidReservationId"
RESERVATION_ID_NOT_IN_RESERVATION_ORDER = "ReservationIdNotInReservationOrder"
RESERVATION_ORDER_NOT_FOUND = "ReservationOrderNotFound"
INVALID_SUBSCRIPTION_ID = "InvalidSubscriptionId"
INVALID_ACCESS_TOKEN = "InvalidAccessToken"
INVALID_LOCATION_ID = "InvalidLocationId"
UNAUTHENTICATED_REQUESTS_THROTTLED = "UnauthenticatedRequestsThrottled"
INVALID_HEALTH_CHECK_TYPE = "InvalidHealthCheckType"
FORBIDDEN = "Forbidden"
BILLING_SCOPE_ID_CANNOT_BE_CHANGED = "BillingScopeIdCannotBeChanged"
APPLIED_SCOPES_NOT_ASSOCIATED_WITH_COMMERCE_ACCOUNT = "AppliedScopesNotAssociatedWithCommerceAccount"
PATCH_VALUES_SAME_AS_EXISTING = "PatchValuesSameAsExisting"
ROLE_ASSIGNMENT_CREATION_FAILED = "RoleAssignmentCreationFailed"
RESERVATION_ORDER_CREATION_FAILED = "ReservationOrderCreationFailed"
RESERVATION_ORDER_NOT_ENABLED = "ReservationOrderNotEnabled"
CAPACITY_UPDATE_SCOPES_FAILED = "CapacityUpdateScopesFailed"
UNSUPPORTED_RESERVATION_TERM = "UnsupportedReservationTerm"
RESERVATION_ORDER_ID_ALREADY_EXISTS = "ReservationOrderIdAlreadyExists"
RISK_CHECK_FAILED = "RiskCheckFailed"
CREATE_QUOTE_FAILED = "CreateQuoteFailed"
ACTIVATE_QUOTE_FAILED = "ActivateQuoteFailed"
NONSUPPORTED_ACCOUNT_ID = "NonsupportedAccountId"
PAYMENT_INSTRUMENT_NOT_FOUND = "PaymentInstrumentNotFound"
MISSING_APPLIED_SCOPES_FOR_SINGLE = "MissingAppliedScopesForSingle"
NO_VALID_RESERVATIONS_TO_RE_RATE = "NoValidReservationsToReRate"
RE_RATE_ONLY_ALLOWED_FOR_EA = "ReRateOnlyAllowedForEA"
OPERATION_CANNOT_BE_PERFORMED_IN_CURRENT_STATE = "OperationCannotBePerformedInCurrentState"
INVALID_SINGLE_APPLIED_SCOPES_COUNT = "InvalidSingleAppliedScopesCount"
INVALID_FULFILLMENT_REQUEST_PARAMETERS = "InvalidFulfillmentRequestParameters"
NOT_SUPPORTED_COUNTRY = "NotSupportedCountry"
INVALID_REFUND_QUANTITY = "InvalidRefundQuantity"
PURCHASE_ERROR = "PurchaseError"
BILLING_CUSTOMER_INPUT_ERROR = "BillingCustomerInputError"
BILLING_PAYMENT_INSTRUMENT_SOFT_ERROR = "BillingPaymentInstrumentSoftError"
BILLING_PAYMENT_INSTRUMENT_HARD_ERROR = "BillingPaymentInstrumentHardError"
BILLING_TRANSIENT_ERROR = "BillingTransientError"
BILLING_ERROR = "BillingError"
FULFILLMENT_CONFIGURATION_ERROR = "FulfillmentConfigurationError"
FULFILLMENT_OUT_OF_STOCK_ERROR = "FulfillmentOutOfStockError"
FULFILLMENT_TRANSIENT_ERROR = "FulfillmentTransientError"
FULFILLMENT_ERROR = "FulfillmentError"
CALCULATE_PRICE_FAILED = "CalculatePriceFailed"
class ExchangeOperationResultStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Status of the operation.
"""
SUCCEEDED = "Succeeded"
FAILED = "Failed"
CANCELLED = "Cancelled"
PENDING_REFUNDS = "PendingRefunds"
PENDING_PURCHASES = "PendingPurchases"
class InstanceFlexibility(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Turning this on will apply the reservation discount to other VMs in the same VM size group.
Only specify for VirtualMachines reserved resource type.
"""
ON = "On"
OFF = "Off"
class OperationStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Status of the individual operation.
"""
SUCCEEDED = "Succeeded"
FAILED = "Failed"
CANCELLED = "Cancelled"
PENDING = "Pending"
class PaymentStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Describes whether the payment is completed, failed, cancelled or scheduled in the future.
"""
SUCCEEDED = "Succeeded"
FAILED = "Failed"
SCHEDULED = "Scheduled"
CANCELLED = "Cancelled"
class QuotaRequestState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The quota request status.
"""
ACCEPTED = "Accepted"
INVALID = "Invalid"
SUCCEEDED = "Succeeded"
FAILED = "Failed"
IN_PROGRESS = "InProgress"
class ReservationBillingPlan(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Represent the billing plans.
"""
UPFRONT = "Upfront"
MONTHLY = "Monthly"
class ReservationStatusCode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
NONE = "None"
PENDING = "Pending"
ACTIVE = "Active"
PURCHASE_ERROR = "PurchaseError"
PAYMENT_INSTRUMENT_ERROR = "PaymentInstrumentError"
SPLIT = "Split"
MERGED = "Merged"
EXPIRED = "Expired"
SUCCEEDED = "Succeeded"
class ReservationTerm(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Represent the term of Reservation.
"""
P1_Y = "P1Y"
P3_Y = "P3Y"
class ReservedResourceType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The type of the resource that is being reserved.
"""
VIRTUAL_MACHINES = "VirtualMachines"
SQL_DATABASES = "SqlDatabases"
SUSE_LINUX = "SuseLinux"
COSMOS_DB = "CosmosDb"
RED_HAT = "RedHat"
SQL_DATA_WAREHOUSE = "SqlDataWarehouse"
V_MWARE_CLOUD_SIMPLE = "VMwareCloudSimple"
RED_HAT_OSA = "RedHatOsa"
DATABRICKS = "Databricks"
APP_SERVICE = "AppService"
MANAGED_DISK = "ManagedDisk"
BLOCK_BLOB = "BlockBlob"
REDIS_CACHE = "RedisCache"
AZURE_DATA_EXPLORER = "AzureDataExplorer"
MY_SQL = "MySql"
MARIA_DB = "MariaDb"
POSTGRE_SQL = "PostgreSql"
DEDICATED_HOST = "DedicatedHost"
SAP_HANA = "SapHana"
SQL_AZURE_HYBRID_BENEFIT = "SqlAzureHybridBenefit"
class ResourceType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The resource types.
"""
STANDARD = "standard"
DEDICATED = "dedicated"
LOW_PRIORITY = "lowPriority"
SHARED = "shared"
SERVICE_SPECIFIC = "serviceSpecific"
class SeverityType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The Severity types.
"""
CRITICAL = "Critical"
MODERATE = "Moderate"
MINIMAL = "Minimal"
class SupportContactType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The support contact types.
"""
EMAIL = "email"
PHONE = "phone"
CHAT = "chat"
|
[
"six.with_metaclass"
] |
[((1163, 1214), 'six.with_metaclass', 'with_metaclass', (['_CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(_CaseInsensitiveEnumMeta, str, Enum)\n', (1177, 1214), False, 'from six import with_metaclass\n'), ((1324, 1375), 'six.with_metaclass', 'with_metaclass', (['_CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(_CaseInsensitiveEnumMeta, str, Enum)\n', (1338, 1375), False, 'from six import with_metaclass\n'), ((1594, 1645), 'six.with_metaclass', 'with_metaclass', (['_CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(_CaseInsensitiveEnumMeta, str, Enum)\n', (1608, 1645), False, 'from six import with_metaclass\n'), ((1816, 1867), 'six.with_metaclass', 'with_metaclass', (['_CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(_CaseInsensitiveEnumMeta, str, Enum)\n', (1830, 1867), False, 'from six import with_metaclass\n'), ((2057, 2108), 'six.with_metaclass', 'with_metaclass', (['_CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(_CaseInsensitiveEnumMeta, str, Enum)\n', (2071, 2108), False, 'from six import with_metaclass\n'), ((5392, 5443), 'six.with_metaclass', 'with_metaclass', (['_CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(_CaseInsensitiveEnumMeta, str, Enum)\n', (5406, 5443), False, 'from six import with_metaclass\n'), ((5674, 5725), 'six.with_metaclass', 'with_metaclass', (['_CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(_CaseInsensitiveEnumMeta, str, Enum)\n', (5688, 5725), False, 'from six import with_metaclass\n'), ((5950, 6001), 'six.with_metaclass', 'with_metaclass', (['_CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(_CaseInsensitiveEnumMeta, str, Enum)\n', (5964, 6001), False, 'from six import with_metaclass\n'), ((6179, 6230), 'six.with_metaclass', 'with_metaclass', (['_CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(_CaseInsensitiveEnumMeta, str, Enum)\n', (6193, 6230), False, 'from six import with_metaclass\n'), ((6470, 6521), 'six.with_metaclass', 'with_metaclass', (['_CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(_CaseInsensitiveEnumMeta, str, Enum)\n', (6484, 6521), False, 'from six import with_metaclass\n'), ((6727, 6778), 'six.with_metaclass', 'with_metaclass', (['_CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(_CaseInsensitiveEnumMeta, str, Enum)\n', (6741, 6778), False, 'from six import with_metaclass\n'), ((6903, 6954), 'six.with_metaclass', 'with_metaclass', (['_CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(_CaseInsensitiveEnumMeta, str, Enum)\n', (6917, 6954), False, 'from six import with_metaclass\n'), ((7232, 7283), 'six.with_metaclass', 'with_metaclass', (['_CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(_CaseInsensitiveEnumMeta, str, Enum)\n', (7246, 7283), False, 'from six import with_metaclass\n'), ((7399, 7450), 'six.with_metaclass', 'with_metaclass', (['_CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(_CaseInsensitiveEnumMeta, str, Enum)\n', (7413, 7450), False, 'from six import with_metaclass\n'), ((8208, 8259), 'six.with_metaclass', 'with_metaclass', (['_CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(_CaseInsensitiveEnumMeta, str, Enum)\n', (8222, 8259), False, 'from six import with_metaclass\n'), ((8468, 8519), 'six.with_metaclass', 'with_metaclass', (['_CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(_CaseInsensitiveEnumMeta, str, Enum)\n', (8482, 8519), False, 'from six import with_metaclass\n'), ((8660, 8711), 'six.with_metaclass', 'with_metaclass', (['_CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(_CaseInsensitiveEnumMeta, str, Enum)\n', (8674, 8711), False, 'from six import with_metaclass\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Copyright (c) 2019 <NAME>
This software is released under the MIT License.
See the LICENSE file in the project root for more information.
"""
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
import collections
import random
import time
import cv2
from distutils.version import StrictVersion
from collections import defaultdict
from io import StringIO
from PIL import Image
from collections import defaultdict
from datetime import datetime as dt
# This is needed since the notebook is stored in the object_detection folder.
from object_detection.utils import ops as utils_ops
if StrictVersion(tf.__version__) < StrictVersion('1.9.0'):
raise ImportError('Please upgrade your TensorFlow installation to v1.9.* or later!')
sys.path.append("/home/pi/models/research/object_detection")
from utils import label_map_util
from utils import visualization_utils as vis_util
MODEL_NAME = '/home/pi/models/research/object_detection/ssdlite_mobilenet_v2_coco_2018_05_09'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_FROZEN_GRAPH = os.path.join(MODEL_NAME, 'frozen_inference_graph.pb')
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('/home/pi/models/research/object_detection/data', 'mscoco_label_map.pbtxt')
def load_image_into_numpy_array2(image):
return np.asarray(image).astype(np.uint8)
def main():
WINDOW_NAME = 'Tensorflow object detection'
freq = cv2.getTickFrequency()
cv2.namedWindow(WINDOW_NAME)
cv2.moveWindow(WINDOW_NAME, 100, 200)
image = np.zeros((480, 640, 3), np.uint8)
cv2.putText(image, 'Loadg ...', (80, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
cv2.imshow(WINDOW_NAME, image)
for i in range(20):
cv2.waitKey(10)
# Load a (frozen) Tensorflow model into memory.
print('Load a (frozen) Tensorflow model into memory.')
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# Loading label map
print('Loading label map.')
category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)
with detection_graph.as_default():
with tf.Session() as sess:
# Get handles to input and output tensors
print('Get handles to input and output tensors.')
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in ['num_detections', 'detection_boxes', 'detection_scores', 'detection_classes', 'detection_masks']:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(tensor_name)
if 'detection_masks' in tensor_dict:
# The following processing is only for single image
detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
# Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
detection_masks, detection_boxes, image.shape[0], image.shape[1])
detection_masks_reframed = tf.cast(
tf.greater(detection_masks_reframed, 0.5), tf.uint8)
# Follow the convention by adding back the batch dimension
tensor_dict['detection_masks'] = tf.expand_dims(detection_masks_reframed, 0)
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
# Start VideoCapture.
print('Start VideoCapture.')
cap = cv2.VideoCapture(0)
cap.set(3, 640)
cap.set(4, 480)
# Run inference
while (True):
# Capture frame-by-frame
ret, frame = cap.read()
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
# image_np = load_image_into_numpy_array2(image)
# bgr -> rgb
image_np = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
start_time = cv2.getTickCount()
# inference
print('sess.run in.')
output_dict = sess.run(tensor_dict, feed_dict={image_tensor: np.expand_dims(image_np, 0)})
print('sess.run out.')
end_time = cv2.getTickCount()
# all outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict['detection_classes'][0].astype(np.uint8)
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
if 'detection_masks' in output_dict:
output_dict['detection_masks'] = output_dict['detection_masks'][0]
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
frame,
output_dict['detection_boxes'],
output_dict['detection_classes'],
output_dict['detection_scores'],
category_index,
instance_masks=output_dict.get('detection_masks'),
use_normalized_coordinates=True,
line_thickness=4)
# Draw FPS
frame_rate = 1 / ((end_time - start_time) / freq)
cv2.putText(frame, "FPS: {0:.2f}".format(frame_rate),
(30, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 2, cv2.LINE_AA)
# Display the resulting frame
cv2.imshow(WINDOW_NAME, frame)
# if cv2.waitKey(10) & 0xFF == ord('q') or video_getter.stopped:
if cv2.waitKey(10) & 0xFF == ord('q'):
break
for i in range(10):
ret, frame = cap.read()
# When everything done, release the windows
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
[
"distutils.version.StrictVersion",
"cv2.getTickCount",
"tensorflow.get_default_graph",
"cv2.imshow",
"os.path.join",
"tensorflow.greater",
"sys.path.append",
"cv2.getTickFrequency",
"cv2.cvtColor",
"tensorflow.cast",
"tensorflow.squeeze",
"tensorflow.GraphDef",
"cv2.destroyAllWindows",
"cv2.waitKey",
"numpy.asarray",
"tensorflow.Session",
"tensorflow.gfile.GFile",
"tensorflow.Graph",
"tensorflow.import_graph_def",
"tensorflow.expand_dims",
"cv2.putText",
"numpy.zeros",
"numpy.expand_dims",
"utils.label_map_util.create_category_index_from_labelmap",
"cv2.VideoCapture",
"object_detection.utils.ops.reframe_box_masks_to_image_masks",
"tensorflow.slice",
"cv2.moveWindow",
"cv2.namedWindow"
] |
[((873, 933), 'sys.path.append', 'sys.path.append', (['"""/home/pi/models/research/object_detection"""'], {}), "('/home/pi/models/research/object_detection')\n", (888, 933), False, 'import sys\n'), ((1234, 1287), 'os.path.join', 'os.path.join', (['MODEL_NAME', '"""frozen_inference_graph.pb"""'], {}), "(MODEL_NAME, 'frozen_inference_graph.pb')\n", (1246, 1287), False, 'import os\n'), ((1376, 1468), 'os.path.join', 'os.path.join', (['"""/home/pi/models/research/object_detection/data"""', '"""mscoco_label_map.pbtxt"""'], {}), "('/home/pi/models/research/object_detection/data',\n 'mscoco_label_map.pbtxt')\n", (1388, 1468), False, 'import os\n'), ((727, 756), 'distutils.version.StrictVersion', 'StrictVersion', (['tf.__version__'], {}), '(tf.__version__)\n', (740, 756), False, 'from distutils.version import StrictVersion\n'), ((759, 781), 'distutils.version.StrictVersion', 'StrictVersion', (['"""1.9.0"""'], {}), "('1.9.0')\n", (772, 781), False, 'from distutils.version import StrictVersion\n'), ((1625, 1647), 'cv2.getTickFrequency', 'cv2.getTickFrequency', ([], {}), '()\n', (1645, 1647), False, 'import cv2\n'), ((1653, 1681), 'cv2.namedWindow', 'cv2.namedWindow', (['WINDOW_NAME'], {}), '(WINDOW_NAME)\n', (1668, 1681), False, 'import cv2\n'), ((1686, 1723), 'cv2.moveWindow', 'cv2.moveWindow', (['WINDOW_NAME', '(100)', '(200)'], {}), '(WINDOW_NAME, 100, 200)\n', (1700, 1723), False, 'import cv2\n'), ((1737, 1770), 'numpy.zeros', 'np.zeros', (['(480, 640, 3)', 'np.uint8'], {}), '((480, 640, 3), np.uint8)\n', (1745, 1770), True, 'import numpy as np\n'), ((1775, 1879), 'cv2.putText', 'cv2.putText', (['image', '"""Loadg ..."""', '(80, 200)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1)', '(0, 0, 255)', '(1)', 'cv2.LINE_AA'], {}), "(image, 'Loadg ...', (80, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,\n 0, 255), 1, cv2.LINE_AA)\n", (1786, 1879), False, 'import cv2\n'), ((1880, 1910), 'cv2.imshow', 'cv2.imshow', (['WINDOW_NAME', 'image'], {}), '(WINDOW_NAME, image)\n', (1890, 1910), False, 'import cv2\n'), ((2093, 2103), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2101, 2103), True, 'import tensorflow as tf\n'), ((2478, 2571), 'utils.label_map_util.create_category_index_from_labelmap', 'label_map_util.create_category_index_from_labelmap', (['PATH_TO_LABELS'], {'use_display_name': '(True)'}), '(PATH_TO_LABELS,\n use_display_name=True)\n', (2528, 2571), False, 'from utils import label_map_util\n'), ((7394, 7417), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (7415, 7417), False, 'import cv2\n'), ((1943, 1958), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (1954, 1958), False, 'import cv2\n'), ((2166, 2179), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (2177, 2179), True, 'import tensorflow as tf\n'), ((1518, 1535), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (1528, 1535), True, 'import numpy as np\n'), ((2193, 2235), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['PATH_TO_FROZEN_GRAPH', '"""rb"""'], {}), "(PATH_TO_FROZEN_GRAPH, 'rb')\n", (2207, 2235), True, 'import tensorflow as tf\n'), ((2357, 2399), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['od_graph_def'], {'name': '""""""'}), "(od_graph_def, name='')\n", (2376, 2399), True, 'import tensorflow as tf\n'), ((2622, 2634), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2632, 2634), True, 'import tensorflow as tf\n'), ((4575, 4594), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (4591, 4594), False, 'import cv2\n'), ((3391, 3438), 'tensorflow.squeeze', 'tf.squeeze', (["tensor_dict['detection_boxes']", '[0]'], {}), "(tensor_dict['detection_boxes'], [0])\n", (3401, 3438), True, 'import tensorflow as tf\n'), ((3473, 3520), 'tensorflow.squeeze', 'tf.squeeze', (["tensor_dict['detection_masks']", '[0]'], {}), "(tensor_dict['detection_masks'], [0])\n", (3483, 3520), True, 'import tensorflow as tf\n'), ((3680, 3731), 'tensorflow.cast', 'tf.cast', (["tensor_dict['num_detections'][0]", 'tf.int32'], {}), "(tensor_dict['num_detections'][0], tf.int32)\n", (3687, 3731), True, 'import tensorflow as tf\n'), ((3766, 3825), 'tensorflow.slice', 'tf.slice', (['detection_boxes', '[0, 0]', '[real_num_detection, -1]'], {}), '(detection_boxes, [0, 0], [real_num_detection, -1])\n', (3774, 3825), True, 'import tensorflow as tf\n'), ((3860, 3926), 'tensorflow.slice', 'tf.slice', (['detection_masks', '[0, 0, 0]', '[real_num_detection, -1, -1]'], {}), '(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])\n', (3868, 3926), True, 'import tensorflow as tf\n'), ((3970, 4082), 'object_detection.utils.ops.reframe_box_masks_to_image_masks', 'utils_ops.reframe_box_masks_to_image_masks', (['detection_masks', 'detection_boxes', 'image.shape[0]', 'image.shape[1]'], {}), '(detection_masks, detection_boxes,\n image.shape[0], image.shape[1])\n', (4012, 4082), True, 'from object_detection.utils import ops as utils_ops\n'), ((4349, 4392), 'tensorflow.expand_dims', 'tf.expand_dims', (['detection_masks_reframed', '(0)'], {}), '(detection_masks_reframed, 0)\n', (4363, 4392), True, 'import tensorflow as tf\n'), ((5075, 5113), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB'], {}), '(frame, cv2.COLOR_BGR2RGB)\n', (5087, 5113), False, 'import cv2\n'), ((5252, 5284), 'numpy.expand_dims', 'np.expand_dims', (['image_np'], {'axis': '(0)'}), '(image_np, axis=0)\n', (5266, 5284), True, 'import numpy as np\n'), ((5315, 5333), 'cv2.getTickCount', 'cv2.getTickCount', ([], {}), '()\n', (5331, 5333), False, 'import cv2\n'), ((5575, 5593), 'cv2.getTickCount', 'cv2.getTickCount', ([], {}), '()\n', (5591, 5593), False, 'import cv2\n'), ((7050, 7080), 'cv2.imshow', 'cv2.imshow', (['WINDOW_NAME', 'frame'], {}), '(WINDOW_NAME, frame)\n', (7060, 7080), False, 'import cv2\n'), ((2778, 2800), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (2798, 2800), True, 'import tensorflow as tf\n'), ((4172, 4213), 'tensorflow.greater', 'tf.greater', (['detection_masks_reframed', '(0.5)'], {}), '(detection_masks_reframed, 0.5)\n', (4182, 4213), True, 'import tensorflow as tf\n'), ((4421, 4443), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (4441, 4443), True, 'import tensorflow as tf\n'), ((7180, 7195), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (7191, 7195), False, 'import cv2\n'), ((3185, 3207), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (3205, 3207), True, 'import tensorflow as tf\n'), ((5478, 5505), 'numpy.expand_dims', 'np.expand_dims', (['image_np', '(0)'], {}), '(image_np, 0)\n', (5492, 5505), True, 'import numpy as np\n')]
|
# Copyright 2021 AIPlan4EU project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import upf
from upf.shortcuts import *
from upf.test import TestCase, main
from upf.test.examples import get_example_problems
class TestProblem(TestCase):
def setUp(self):
TestCase.setUp(self)
self.problems = get_example_problems()
def test_problem_kind(self):
problem_kind = ProblemKind()
self.assertFalse(problem_kind.has_discrete_time())
self.assertFalse(problem_kind.has_continuous_time())
problem_kind.set_time('DISCRETE_TIME')
self.assertTrue(problem_kind.has_discrete_time())
problem_kind.set_time('CONTINUOUS_TIME')
self.assertTrue(problem_kind.has_continuous_time())
def test_basic(self):
problem = self.problems['basic'].problem
x = problem.fluent('x')
self.assertEqual(x.name(), 'x')
self.assertEqual(str(x), 'bool x')
self.assertEqual(x.arity(), 0)
self.assertTrue(x.type().is_bool_type())
a = problem.action('a')
self.assertEqual(a.name, 'a')
self.assertEqual(len(a.preconditions()), 1)
self.assertEqual(len(a.effects()), 1)
a_str = str(a)
self.assertIn('action a', a_str)
self.assertIn('preconditions', a_str)
self.assertIn('not x', a_str)
self.assertIn('effects', a_str)
self.assertIn('x := true', a_str)
self.assertEqual(problem.name, 'basic')
self.assertEqual(len(problem.fluents()), 1)
self.assertEqual(len(problem.actions()), 1)
self.assertTrue(problem.initial_value(x) is not None)
self.assertEqual(len(problem.goals()), 1)
problem_str = str(problem)
self.assertIn('fluents', problem_str)
self.assertIn('actions', problem_str)
self.assertIn('initial values', problem_str)
self.assertIn('goals', problem_str)
def test_basic_conditional(self):
problem = self.problems['basic_conditional'].problem
x = problem.fluent('x')
self.assertEqual(x.name(), 'x')
self.assertEqual(str(x), 'bool x')
self.assertEqual(x.arity(), 0)
self.assertTrue(x.type().is_bool_type())
y = problem.fluent('y')
self.assertEqual(y.name(), 'y')
self.assertEqual(str(y), 'bool y')
self.assertEqual(y.arity(), 0)
self.assertTrue(y.type().is_bool_type())
a_x = problem.action('a_x')
self.assertEqual(a_x.name, 'a_x')
self.assertEqual(len(a_x.preconditions()), 1)
self.assertEqual(len(a_x.effects()), 1)
ax_str = str(a_x)
self.assertIn('action a_x', ax_str)
self.assertIn('preconditions', ax_str)
self.assertIn('not x', ax_str)
self.assertIn('effects', ax_str)
self.assertIn('if y then x := true', ax_str)
a_y = problem.action('a_y')
self.assertEqual(a_y.name, 'a_y')
self.assertEqual(len(a_y.preconditions()), 1)
self.assertEqual(len(a_y.effects()), 1)
ay_str = str(a_y)
self.assertIn('action a_y', ay_str)
self.assertIn('preconditions', ay_str)
self.assertIn('not y', ay_str)
self.assertIn('effects', ay_str)
self.assertIn('y := true', ay_str)
self.assertEqual(problem.name, 'basic_conditional')
self.assertEqual(len(problem.fluents()), 2)
self.assertEqual(len(problem.actions()), 2)
self.assertTrue(problem.initial_value(x) is not None)
self.assertTrue(problem.initial_value(y) is not None)
self.assertEqual(len(problem.goals()), 1)
problem_str = str(problem)
self.assertIn('fluents', problem_str)
self.assertIn('actions', problem_str)
self.assertIn('initial values', problem_str)
self.assertIn('goals', problem_str)
def test_robot(self):
problem = self.problems['robot'].problem
Location = problem.user_type('Location')
self.assertTrue(Location.is_user_type())
self.assertEqual(Location.name(), 'Location')
self.assertEqual(str(Location), 'Location')
robot_at = problem.fluent('robot_at')
self.assertEqual(robot_at.name(), 'robot_at')
self.assertEqual(str(robot_at), 'bool robot_at[Location]')
self.assertEqual(robot_at.arity(), 1)
self.assertEqual(robot_at.signature(), [Location])
self.assertTrue(robot_at.type().is_bool_type())
battery_charge = problem.fluent('battery_charge')
self.assertEqual(battery_charge.name(), 'battery_charge')
self.assertEqual(str(battery_charge), 'real[0, 100] battery_charge')
self.assertEqual(battery_charge.arity(), 0)
self.assertTrue(battery_charge.type().is_real_type())
move = problem.action('move')
l_from = move.parameter('l_from')
l_to = move.parameter('l_to')
self.assertEqual(move.name, 'move')
self.assertEqual(len(move.parameters()), 2)
self.assertEqual(l_from.name(), 'l_from')
self.assertEqual(l_from.type(), Location)
self.assertEqual(l_to.name(), 'l_to')
self.assertEqual(l_to.type(), Location)
self.assertEqual(len(move.preconditions()), 4)
self.assertEqual(len(move.effects()), 3)
move_str = str(move)
self.assertTrue('action move(Location l_from, Location l_to)' in move_str)
self.assertTrue('preconditions' in move_str)
self.assertTrue('10 <= battery_charge' in move_str)
self.assertTrue('not (l_from == l_to)' in move_str)
self.assertTrue('robot_at(l_from)' in move_str)
self.assertTrue('not robot_at(l_to)' in move_str)
self.assertTrue('effects' in move_str)
self.assertTrue('robot_at(l_from) := false' in move_str)
self.assertTrue('robot_at(l_to) := true' in move_str)
self.assertTrue('battery_charge := (battery_charge - 10)' in move_str)
l1 = problem.object('l1')
l2 = problem.object('l2')
self.assertEqual(l1.name(), 'l1')
self.assertEqual(str(l1), 'l1')
self.assertEqual(l1.type(), Location)
self.assertEqual(l2.name(), 'l2')
self.assertEqual(str(l2), 'l2')
self.assertEqual(l2.type(), Location)
self.assertEqual(problem.name, 'robot')
self.assertEqual(len(problem.fluents()), 2)
self.assertEqual(problem.fluent('robot_at'), robot_at)
self.assertEqual(problem.fluent('battery_charge'), battery_charge)
self.assertEqual(len(problem.user_types()), 1)
self.assertEqual(problem.user_type('Location'), Location)
self.assertEqual(len(problem.objects(Location)), 2)
self.assertEqual(problem.objects(Location), [l1, l2])
self.assertEqual(len(problem.actions()), 1)
self.assertEqual(problem.action('move'), move)
self.assertTrue(problem.initial_value(robot_at(l1)) is not None)
self.assertTrue(problem.initial_value(robot_at(l2)) is not None)
self.assertTrue(problem.initial_value(battery_charge) is not None)
self.assertEqual(len(problem.goals()), 1)
problem_str = str(problem)
self.assertTrue('types' in problem_str)
self.assertTrue('fluents' in problem_str)
self.assertTrue('actions' in problem_str)
self.assertTrue('objects' in problem_str)
self.assertTrue('initial values' in problem_str)
self.assertTrue('goals' in problem_str)
def test_robot_loader(self):
problem = self.problems['robot_loader'].problem
Location = problem.user_type('Location')
self.assertTrue(Location.is_user_type())
self.assertEqual(Location.name(), 'Location')
robot_at = problem.fluent('robot_at')
self.assertEqual(robot_at.name(), 'robot_at')
self.assertEqual(robot_at.arity(), 1)
self.assertEqual(robot_at.signature(), [Location])
self.assertTrue(robot_at.type().is_bool_type())
cargo_at = problem.fluent('cargo_at')
self.assertEqual(cargo_at.name(), 'cargo_at')
self.assertEqual(cargo_at.arity(), 1)
self.assertEqual(cargo_at.signature(), [Location])
self.assertTrue(cargo_at.type().is_bool_type())
cargo_mounted = problem.fluent('cargo_mounted')
self.assertEqual(cargo_mounted.name(), 'cargo_mounted')
self.assertEqual(cargo_mounted.arity(), 0)
self.assertTrue(cargo_mounted.type().is_bool_type())
move = problem.action('move')
l_from = move.parameter('l_from')
l_to = move.parameter('l_to')
self.assertEqual(move.name, 'move')
self.assertEqual(len(move.parameters()), 2)
self.assertEqual(l_from.name(), 'l_from')
self.assertEqual(l_from.type(), Location)
self.assertEqual(l_to.name(), 'l_to')
self.assertEqual(l_to.type(), Location)
self.assertEqual(len(move.preconditions()), 3)
self.assertEqual(len(move.effects()), 2)
load = problem.action('load')
loc = load.parameter('loc')
self.assertEqual(load.name, 'load')
self.assertEqual(len(load.parameters()), 1)
self.assertEqual(loc.name(), 'loc')
self.assertEqual(loc.type(), Location)
self.assertEqual(len(load.preconditions()), 3)
self.assertEqual(len(load.effects()), 2)
unload = problem.action('unload')
loc = unload.parameter('loc')
self.assertEqual(unload.name, 'unload')
self.assertEqual(len(unload.parameters()), 1)
self.assertEqual(loc.name(), 'loc')
self.assertEqual(loc.type(), Location)
self.assertEqual(len(unload.preconditions()), 3)
self.assertEqual(len(unload.effects()), 2)
l1 = problem.object('l1')
l2 = problem.object('l2')
self.assertEqual(l1.name(), 'l1')
self.assertEqual(l1.type(), Location)
self.assertEqual(l2.name(), 'l2')
self.assertEqual(l2.type(), Location)
self.assertEqual(problem.name, 'robot_loader')
self.assertEqual(len(problem.fluents()), 3)
self.assertEqual(problem.fluent('robot_at'), robot_at)
self.assertEqual(problem.fluent('cargo_at'), cargo_at)
self.assertEqual(problem.fluent('cargo_mounted'), cargo_mounted)
self.assertEqual(len(problem.user_types()), 1)
self.assertEqual(problem.user_type('Location'), Location)
self.assertEqual(len(problem.objects(Location)), 2)
self.assertEqual(problem.objects(Location), [l1, l2])
self.assertEqual(len(problem.actions()), 3)
self.assertEqual(problem.action('move'), move)
self.assertEqual(problem.action('load'), load)
self.assertEqual(problem.action('unload'), unload)
self.assertTrue(problem.initial_value(robot_at(l1)) is not None)
self.assertTrue(problem.initial_value(robot_at(l2)) is not None)
self.assertTrue(problem.initial_value(cargo_at(l1)) is not None)
self.assertTrue(problem.initial_value(cargo_at(l2)) is not None)
self.assertTrue(problem.initial_value(cargo_mounted) is not None)
self.assertEqual(len(problem.goals()), 1)
def test_robot_loader_adv(self):
problem = self.problems['robot_loader_adv'].problem
Location = problem.user_type('Location')
self.assertTrue(Location.is_user_type())
self.assertEqual(Location.name(), 'Location')
Robot = problem.user_type('Robot')
self.assertTrue(Robot.is_user_type())
self.assertEqual(Robot.name(), 'Robot')
Container = problem.user_type('Container')
self.assertTrue(Container.is_user_type())
self.assertEqual(Container.name(), 'Container')
robot_at = problem.fluent('robot_at')
self.assertEqual(robot_at.name(), 'robot_at')
self.assertEqual(robot_at.arity(), 2)
self.assertEqual(robot_at.signature(), [Robot, Location])
self.assertTrue(robot_at.type().is_bool_type())
cargo_at = problem.fluent('cargo_at')
self.assertEqual(cargo_at.name(), 'cargo_at')
self.assertEqual(cargo_at.arity(), 2)
self.assertEqual(cargo_at.signature(), [Container, Location])
self.assertTrue(cargo_at.type().is_bool_type())
cargo_mounted = problem.fluent('cargo_mounted')
self.assertEqual(cargo_mounted.name(), 'cargo_mounted')
self.assertEqual(cargo_mounted.arity(), 2)
self.assertEqual(cargo_mounted.signature(), [Container, Robot])
self.assertTrue(cargo_mounted.type().is_bool_type())
move = problem.action('move')
l_from = move.parameter('l_from')
l_to = move.parameter('l_to')
r = move.parameter('r')
self.assertEqual(move.name, 'move')
self.assertEqual(len(move.parameters()), 3)
self.assertEqual(l_from.name(), 'l_from')
self.assertEqual(l_from.type(), Location)
self.assertEqual(l_to.name(), 'l_to')
self.assertEqual(l_to.type(), Location)
self.assertEqual(r.name(), 'r')
self.assertEqual(r.type(), Robot)
self.assertEqual(len(move.preconditions()), 3)
self.assertEqual(len(move.effects()), 2)
load = problem.action('load')
loc = load.parameter('loc')
r = load.parameter('r')
c = load.parameter('c')
self.assertEqual(load.name, 'load')
self.assertEqual(len(load.parameters()), 3)
self.assertEqual(loc.name(), 'loc')
self.assertEqual(loc.type(), Location)
self.assertEqual(r.name(), 'r')
self.assertEqual(r.type(), Robot)
self.assertEqual(c.name(), 'c')
self.assertEqual(c.type(), Container)
self.assertEqual(len(load.preconditions()), 3)
self.assertEqual(len(load.effects()), 2)
unload = problem.action('unload')
loc = unload.parameter('loc')
r = unload.parameter('r')
c = unload.parameter('c')
self.assertEqual(unload.name, 'unload')
self.assertEqual(len(unload.parameters()), 3)
self.assertEqual(loc.name(), 'loc')
self.assertEqual(loc.type(), Location)
self.assertEqual(r.name(), 'r')
self.assertEqual(r.type(), Robot)
self.assertEqual(c.name(), 'c')
self.assertEqual(c.type(), Container)
self.assertEqual(len(unload.preconditions()), 3)
self.assertEqual(len(unload.effects()), 2)
l1 = problem.object('l1')
l2 = problem.object('l2')
l3 = problem.object('l3')
r1 = problem.object('r1')
c1 = problem.object('c1')
self.assertEqual(l1.name(), 'l1')
self.assertEqual(l1.type(), Location)
self.assertEqual(l2.name(), 'l2')
self.assertEqual(l2.type(), Location)
self.assertEqual(l3.name(), 'l3')
self.assertEqual(l3.type(), Location)
self.assertEqual(r1.name(), 'r1')
self.assertEqual(r1.type(), Robot)
self.assertEqual(c1.name(), 'c1')
self.assertEqual(c1.type(), Container)
self.assertEqual(problem.name, 'robot_loader_adv')
self.assertEqual(len(problem.fluents()), 3)
self.assertEqual(problem.fluent('robot_at'), robot_at)
self.assertEqual(problem.fluent('cargo_at'), cargo_at)
self.assertEqual(problem.fluent('cargo_mounted'), cargo_mounted)
self.assertEqual(len(problem.user_types()), 3)
self.assertEqual(problem.user_type('Location'), Location)
self.assertEqual(len(problem.objects(Location)), 3)
self.assertEqual(problem.objects(Location), [l1, l2, l3])
self.assertEqual(problem.user_type('Robot'), Robot)
self.assertEqual(len(problem.objects(Robot)), 1)
self.assertEqual(problem.objects(Robot), [r1])
self.assertEqual(problem.user_type('Container'), Container)
self.assertEqual(len(problem.objects(Container)), 1)
self.assertEqual(problem.objects(Container), [c1])
self.assertEqual(len(problem.actions()), 3)
self.assertEqual(problem.action('move'), move)
self.assertEqual(problem.action('load'), load)
self.assertEqual(problem.action('unload'), unload)
self.assertTrue(problem.initial_value(robot_at(r1, l1)) is not None)
self.assertTrue(problem.initial_value(robot_at(r1, l2)) is not None)
self.assertTrue(problem.initial_value(robot_at(r1, l3)) is not None)
self.assertTrue(problem.initial_value(cargo_at(c1, l1)) is not None)
self.assertTrue(problem.initial_value(cargo_at(c1, l2)) is not None)
self.assertTrue(problem.initial_value(cargo_at(c1, l3)) is not None)
self.assertTrue(problem.initial_value(cargo_mounted(c1, r1)) is not None)
self.assertEqual(len(problem.goals()), 2)
def test_fluents_defaults(self):
Location = UserType('Location')
robot_at = Fluent('robot_at', BoolType(), [Location])
distance = Fluent('distance', RealType(), [Location, Location])
N = 10
locations = [Object(f'l{i}', Location) for i in range(N)]
problem = Problem('robot')
problem.add_fluent(robot_at, default_initial_value=False)
problem.add_fluent(distance, default_initial_value=Fraction(-1))
problem.add_objects(locations)
problem.set_initial_value(robot_at(locations[0]), True)
for i in range(N-1):
problem.set_initial_value(distance(locations[i], locations[i+1]), Fraction(10))
self.assertEqual(problem.initial_value(robot_at(locations[0])), TRUE())
for i in range(1, N):
self.assertEqual(problem.initial_value(robot_at(locations[i])), FALSE())
for i in range(N):
for j in range(N):
if j == i+1:
self.assertEqual(problem.initial_value(distance(locations[i], locations[j])),
Real(Fraction(10)))
else:
self.assertEqual(problem.initial_value(distance(locations[i], locations[j])),
Real(Fraction(-1)))
def test_problem_defaults(self):
Location = UserType('Location')
robot_at = Fluent('robot_at', BoolType(), [Location])
distance = Fluent('distance', IntType(), [Location, Location])
cost = Fluent('cost', IntType(), [Location, Location])
N = 10
locations = [Object(f'l{i}', Location) for i in range(N)]
problem = Problem('robot', initial_defaults={IntType(): 0})
problem.add_fluent(robot_at, default_initial_value=False)
problem.add_fluent(distance, default_initial_value=-1)
problem.add_fluent(cost)
problem.add_objects(locations)
problem.set_initial_value(robot_at(locations[0]), True)
for i in range(N-1):
problem.set_initial_value(distance(locations[i], locations[i+1]), 10)
problem.set_initial_value(cost(locations[i], locations[i+1]), 100)
self.assertEqual(problem.initial_value(robot_at(locations[0])), TRUE())
for i in range(1, N):
self.assertEqual(problem.initial_value(robot_at(locations[i])), FALSE())
for i in range(N):
for j in range(N):
if j == i+1:
self.assertEqual(problem.initial_value(distance(locations[i], locations[j])), Int(10))
self.assertEqual(problem.initial_value(cost(locations[i], locations[j])), Int(100))
else:
self.assertEqual(problem.initial_value(distance(locations[i], locations[j])), Int(-1))
self.assertEqual(problem.initial_value(cost(locations[i], locations[j])), Int(0))
if __name__ == "__main__":
main()
|
[
"upf.test.main",
"upf.test.TestCase.setUp",
"upf.test.examples.get_example_problems"
] |
[((20166, 20172), 'upf.test.main', 'main', ([], {}), '()\n', (20170, 20172), False, 'from upf.test import TestCase, main\n'), ((768, 788), 'upf.test.TestCase.setUp', 'TestCase.setUp', (['self'], {}), '(self)\n', (782, 788), False, 'from upf.test import TestCase, main\n'), ((813, 835), 'upf.test.examples.get_example_problems', 'get_example_problems', ([], {}), '()\n', (833, 835), False, 'from upf.test.examples import get_example_problems\n')]
|
import sys
import os
from cliff.app import App
from cliff.commandmanager import CommandManager
from hvac_cli.version import __version__
DEFAULT_VAULT_ADDR = 'http://127.0.0.1:8200'
class HvacApp(App):
def __init__(self):
super(HvacApp, self).__init__(
description="""
hvac-cli is CLI to Hashicorp Vault with additional features.
It does not support extensions that are not available
as Free Software such as namespaces, Sentinel, Policy Overrides
or Multi-factor Authentication (MFA).
""",
version=__version__,
command_manager=CommandManager('hvac_cli'),
deferred_help=True,
)
def build_option_parser(self, description, version, argparse_kwargs=None):
parser = super().build_option_parser(description, version, argparse_kwargs)
self.set_parser_arguments(parser)
return parser
@staticmethod
def set_parser_arguments(parser):
parser.add_argument(
'--dry-run',
action='store_true',
help='Show what would be done but do nothing'
)
parser.add_argument(
'--token',
required=False,
default=os.getenv('VAULT_TOKEN'),
help=('Vault token. It will be prompted interactively if unset. '
'This can also be specified via the VAULT_TOKEN environment variable.')
)
parser.add_argument(
'--address', '--agent-address',
default=os.getenv('VAULT_AGENT_ADDR', os.getenv('VAULT_ADDR', DEFAULT_VAULT_ADDR)),
required=False,
dest='address',
help=('Address of the Vault server or the Vault agent. '
'--agent-address was introduced with vault 1.1.0. '
'This can also be specified via the VAULT_ADDR '
'or the VAULT_AGENT_ADDR environment variable. '
'If both VAULT_AGENT_ADDR and VAULT_ADDR are in the environment '
'VAULT_AGENT_ADDR has precedence')
)
parser.add_argument(
'--tls-skip-verify',
action='store_true',
default=True if os.getenv('VAULT_SKIP_VERIFY', False) else False,
required=False,
help=('Disable verification of TLS certificates. Using this option is highly '
'discouraged and decreases the security of data transmissions to and from '
'the Vault server. The default is false. '
'This can also be specified via the VAULT_SKIP_VERIFY environment variable.')
)
parser.add_argument(
'--ca-cert',
default=os.getenv('VAULT_CACERT'),
required=False,
help=('Path on the local disk to a single PEM-encoded CA certificate to verify '
'the Vault server\'s SSL certificate. '
'This can also be specified via the VAULT_CACERT environment variable. ')
)
parser.add_argument(
'--client-cert',
default=os.getenv('VAULT_CLIENT_CERT'),
required=False,
help=('Path on the local disk to a single PEM-encoded CA certificate to use '
'for TLS authentication to the Vault server. If this flag is specified, '
'--client-key is also required. '
'This can also be specified via the VAULT_CLIENT_CERT environment variable.')
)
parser.add_argument(
'--client-key',
default=os.getenv('VAULT_CLIENT_KEY'),
required=False,
help=('Path on the local disk to a single PEM-encoded private key matching the '
'client certificate from -client-cert. '
'This can also be specified via the VAULT_CLIENT_KEY environment variable.')
)
def main(argv=sys.argv[1:]):
myapp = HvacApp()
return myapp.run(argv)
|
[
"cliff.commandmanager.CommandManager",
"os.getenv"
] |
[((641, 667), 'cliff.commandmanager.CommandManager', 'CommandManager', (['"""hvac_cli"""'], {}), "('hvac_cli')\n", (655, 667), False, 'from cliff.commandmanager import CommandManager\n'), ((1255, 1279), 'os.getenv', 'os.getenv', (['"""VAULT_TOKEN"""'], {}), "('VAULT_TOKEN')\n", (1264, 1279), False, 'import os\n'), ((2731, 2756), 'os.getenv', 'os.getenv', (['"""VAULT_CACERT"""'], {}), "('VAULT_CACERT')\n", (2740, 2756), False, 'import os\n'), ((3117, 3147), 'os.getenv', 'os.getenv', (['"""VAULT_CLIENT_CERT"""'], {}), "('VAULT_CLIENT_CERT')\n", (3126, 3147), False, 'import os\n'), ((3594, 3623), 'os.getenv', 'os.getenv', (['"""VAULT_CLIENT_KEY"""'], {}), "('VAULT_CLIENT_KEY')\n", (3603, 3623), False, 'import os\n'), ((1582, 1625), 'os.getenv', 'os.getenv', (['"""VAULT_ADDR"""', 'DEFAULT_VAULT_ADDR'], {}), "('VAULT_ADDR', DEFAULT_VAULT_ADDR)\n", (1591, 1625), False, 'import os\n'), ((2227, 2264), 'os.getenv', 'os.getenv', (['"""VAULT_SKIP_VERIFY"""', '(False)'], {}), "('VAULT_SKIP_VERIFY', False)\n", (2236, 2264), False, 'import os\n')]
|
QGRAIN_VERSION = "0.3.4.2"
import os
QGRAIN_ROOT_PATH = os.path.dirname(__file__)
from enum import Enum, unique
@unique
class DistributionType(Enum):
Customized = "Customized"
Nonparametric = "NonParametric"
Normal = "Normal"
Weibull = "Weibull"
SkewNormal = "SkewNormal"
@unique
class FittingState(Enum):
NotStarted = 0
Fitting = 1
Failed = 2
Succeeded = 4
|
[
"os.path.dirname"
] |
[((58, 83), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (73, 83), False, 'import os\n')]
|
import vcs
x=vcs.init()
b=x.createboxfill()
b.boxfill_type="custom"
b.levels=[10.0, 21.42857142857143, 32.85714285714286, 44.28571428571429, 55.71428571428572, 67.14285714285715, 78.57142857142858, 90.00000000000001]
assert(abs(b.levels[0])<1.e19 and b.ext_1 is False and b.ext_2 is False)
b.ext_1=False
assert(abs(b.levels[0])<1.e19 and b.ext_1 is False and b.ext_2 is False)
|
[
"vcs.init"
] |
[((13, 23), 'vcs.init', 'vcs.init', ([], {}), '()\n', (21, 23), False, 'import vcs\n')]
|
"""
https://softeer.ai/practice/info.do?eventIdx=1&psProblemId=581
택배 마스터 광우
큐, 순열
"""
import sys
input = sys.stdin.readline
import itertools
N, M, K = map(int, input().split())
arr = map(int, input().split())
result = 987654321
def dojob(k, p, m):
global result
part_sum = 0
s = 0
i = 0
while True:
if part_sum > result: return
if k == 0: break
if s + p[i] <= m:
s += p[i]
else:
part_sum += s
s = p[i]
k -= 1
i = (i + 1) % N
if part_sum < result:
result = part_sum
def solve():
for p in itertools.permutations(arr):
dojob(K, p, M)
print(result)
solve()
|
[
"itertools.permutations"
] |
[((620, 647), 'itertools.permutations', 'itertools.permutations', (['arr'], {}), '(arr)\n', (642, 647), False, 'import itertools\n')]
|
# -*- encoding: utf-8 -*-
#from flask_sqlalchemy import SQLAlchemy
#db = SQLAlchemy()
from sqlalchemy import Column, Integer, Text
from database import db
from database import Base
class Paste(Base):
__tablename__ = 'paste'
id = Column(Integer, primary_key = True)
#hash = db.Column(db.String(32), unique = True)
data = Column(Text())
def __init__(self):
return None
def __repr__(self):
return '<Paste %r>' % (self.id)
#return "<Paste :%s, %s>" % (self.id, self.data)
def get(self, id):
return Paste.query.filter_by(id=id).first()
def get_id(self):
return self.id
def get_data(self):
return self.data
def put(self, data):
self.data = data
db.add(self)
db.commit()
return self.id
|
[
"database.db.add",
"sqlalchemy.Text",
"sqlalchemy.Column",
"database.db.commit"
] |
[((239, 272), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (245, 272), False, 'from sqlalchemy import Column, Integer, Text\n'), ((345, 351), 'sqlalchemy.Text', 'Text', ([], {}), '()\n', (349, 351), False, 'from sqlalchemy import Column, Integer, Text\n'), ((753, 765), 'database.db.add', 'db.add', (['self'], {}), '(self)\n', (759, 765), False, 'from database import db\n'), ((774, 785), 'database.db.commit', 'db.commit', ([], {}), '()\n', (783, 785), False, 'from database import db\n')]
|
from __future__ import unicode_literals
import json
import time
class Event(object):
"""Base class for a websocket 'event'."""
__slots__ = ['received_time']
def __init__(self):
self.received_time = time.time()
def __repr__(self):
return "{}()".format(self.__class__.__name__)
@classmethod
def _summarize_bytes(cls, data, max_len=24):
"""Avoid spamming logs by truncating byte strings in repr."""
if len(data) > max_len:
return "{!r} + {} bytes".format(
data[:max_len],
len(data) - max_len
)
return repr(data)
@classmethod
def _summarize_text(cls, text, max_len=24):
"""Avoid spamming logs by truncating text."""
if len(text) > max_len:
return "{!r} + {} chars".format(
text[:max_len],
len(text) - max_len
)
return repr(text)
class Poll(Event):
"""A generated poll event."""
name = 'poll'
class Connecting(Event):
"""
Generated prior to establishing a websocket connection to a server.
:param url: The websocket URL the websocket is connecting to.
"""
__slots__ = ['url']
name = 'connecting'
def __init__(self, url):
self.url = url
super(Connecting, self).__init__()
def __repr__(self):
return "{}(url='{}')".format(self.__class__.__name__, self.url)
class ConnectFail(Event):
"""
Generate when Lomond was unable to connect to a Websocket server.
:param reason: A short description of the reason for the
failure.
:type reason: str
"""
__slots__ = ['reason']
name = 'connect_fail'
def __init__(self, reason):
self.reason = reason
super(ConnectFail, self).__init__()
def __repr__(self):
return "{}(reason='{}')".format(
self.__class__.__name__,
self.reason,
)
class Connected(Event):
"""Generated when Lomond has connected to a server but not yet
negotiated the websocket upgrade.
:param str url: The websocket URL connected to.
:param str proxy: The proxy URL connected to (or None).
"""
__slots__ = ['url', 'proxy']
name = 'connected'
def __init__(self, url, proxy=None):
self.url = url
self.proxy = proxy
super(Connected, self).__init__()
def __repr__(self):
_class = self.__class__.__name__
return (
"{}(url='{}')".format(_class, self.url)
if self.proxy is None else
"{}(url='{}', proxy='{}')".format(
_class, self.url, self.proxy
)
)
class Rejected(Event):
"""Server rejected WS connection."""
__slots__ = ['response', 'reason']
name = 'rejected'
def __init__(self, response, reason):
"""
Generated when Lomond is connected to the server, but the
websocket upgrade failed.
:param response: The response returned by the server.
:param str reason: A description of why the connection was
rejects.
"""
self.response = response
self.reason = reason
super(Rejected, self).__init__()
def __repr__(self):
return "{}(response={!r}, reason='{}')".format(
self.__class__.__name__,
self.response,
self.reason
)
class Ready(Event):
"""Generated when Lomond has connected to the server,
and successfully negotiated the websocket upgrade.
:param response: A :class:`~lomond.response.Response` object.
:param str protocol: A websocket protocol or ``None`` if no protocol
was supplied.
:param set extensions: A set of negotiated websocket extensions.
Currently only the ``'permessage-deflate'`` extension is supported.
"""
__slots__ = ['response', 'protocol', 'extensions']
name = 'ready'
def __init__(self, response, protocol, extensions):
self.response = response
self.protocol = protocol
self.extensions = extensions
super(Ready, self).__init__()
def __repr__(self):
return '{}(response={!r}, protocol={!r}, extensions={!r})'.format(
self.__class__.__name__,
self.response,
self.protocol,
self.extensions
)
class ProtocolError(Event):
"""Generated when the server deviates from the protocol.
:param str error: A description of the error.
:param bool critical: Indicates if the error is considered
'critical'. If ``True``, Lomond will disconnect immediately.
If ``False``, Lomond will send a close message to the server.
"""
__slots__ = ['error', 'critical']
name = 'protocol_error'
def __init__(self, error, critical):
self.error = error
self.critical = critical
super(ProtocolError, self).__init__()
def __repr__(self):
return "{}(error='{}', critical={!r})".format(
self.__class__.__name__,
self.error,
self.critical
)
class Unresponsive(Event):
"""The server has not responding to pings within `ping_timeout`
seconds.
Will be followed by a :class:`~lomond.events.Disconnected` event.
"""
name = 'unresponsive'
class Disconnected(Event):
"""Generated when a websocket connection has
been dropped.
:param str reason: A description of why the websocket was closed.
:param bool graceful: Flag indicating if the connection was dropped
gracefully (`True`), or disconnected due to a socket failure
(`False`) or other problem.
"""
__slots__ = ['graceful', 'reason']
name = 'disconnected'
def __init__(self, reason='closed', graceful=False):
self.reason = reason
self.graceful = graceful
super(Disconnected, self).__init__()
def __repr__(self):
return "{}(reason='{}', graceful={!r})".format(
self.__class__.__name__,
self.reason,
self.graceful
)
class Closed(Event):
"""Generated when the websocket was closed. The websocket may no
longer send packets after this event has been received. This event
will be followed by :class:`~lomond.events.Disconnected`.
:param code: The closed code returned from the server.
:param str reason: An optional description why the websocket was
closed, as returned from the server.
"""
__slots__ = ['code', 'reason']
name = 'closed'
def __init__(self, code, reason):
self.code = code
self.reason = reason
super(Closed, self).__init__()
def __repr__(self):
return '{}(code={!r}, reason={!r})'.format(
self.__class__.__name__,
self.code,
self.reason,
)
class Closing(Event):
"""Generated when the server is closing the connection.
No more messages will be received from the server, but you may still
send messages while handling this event. A
:class:`~lomond.events.Disconnected` event should be generated
shortly after this event.
:param code: The closed code returned from the server.
:param str reason: An optional description why the websocket was
closed, as returned from the server.
"""
__slots__ = ['code', 'reason']
name = 'closing'
def __init__(self, code, reason):
self.code = code
self.reason = reason
super(Closing, self).__init__()
def __repr__(self):
return '{}(code={!r}, reason={!r})'.format(
self.__class__.__name__,
self.code,
self.reason,
)
class UnknownMessage(Event):
"""
An application message was received, with an unknown opcode.
"""
__slots__ = ['message']
name = 'unknown'
def __init__(self, message):
self.message = message
super(UnknownMessage, self).__init__()
class Ping(Event):
"""Generated when Lomond received a ping packet from the server.
:param bytes data: Ping payload data.
"""
__slots__ = ['data']
name = 'ping'
def __init__(self, data):
self.data = data
super(Ping, self).__init__()
def __repr__(self):
return "{}(data={!r})".format(self.__class__.__name__, self.data)
class Pong(Event):
"""Generated when Lomond receives a pong packet from the server.
:param bytes data: The pong payload data.
"""
__slots__ = ['data']
name = 'pong'
def __init__(self, data):
self.data = data
super(Pong, self).__init__()
def __repr__(self):
return "{}(data={!r})".format(self.__class__.__name__, self.data)
class Text(Event):
"""Generated when Lomond receives a text message from the server.
:param str text: The text payload.
"""
__slots__ = ['text', '_json']
name = 'text'
def __init__(self, text):
self.text = text
self._json = None
super(Text, self).__init__()
@property
def json(self):
"""Text decoded as JSON.
Calls ``json.loads`` to decode the ``text`` attribute, and may
throw the same exceptions if the text is not valid json.
"""
if self._json is None:
self._json = json.loads(self.text)
return self._json
def __repr__(self):
return "{}(text={})".format(
self.__class__.__name__,
self._summarize_text(self.text)
)
class Binary(Event):
"""Generated when Lomond receives a binary message from the server.
:param bytes data: The binary payload.
"""
__slots__ = ['data']
name = 'binary'
def __init__(self, data):
self.data = data
super(Binary, self).__init__()
def __repr__(self):
return "{}(data={})".format(
self.__class__.__name__,
self._summarize_bytes(self.data)
)
class BackOff(Event):
"""Generated when a persistent connection has to wait before re-
attempting a connection.
:param float delay: The delay (in seconds) before Lomond will re-
attempt to connect.
"""
__slots__ = ['delay']
name = 'back_off'
def __init__(self, delay):
self.delay = delay
super(BackOff, self).__init__()
def __repr__(self):
return "{}(delay={:0.1f})".format(
self.__class__.__name__,
self.delay
)
|
[
"json.loads",
"time.time"
] |
[((222, 233), 'time.time', 'time.time', ([], {}), '()\n', (231, 233), False, 'import time\n'), ((9315, 9336), 'json.loads', 'json.loads', (['self.text'], {}), '(self.text)\n', (9325, 9336), False, 'import json\n')]
|
"""Retrive Tweets, word embeddings, and populate DB"""
import tweepy
import spacy
from .models import DB, Tweet, User
from os import getenv
TWITTER_API_KEY = getenv('TWITTER_API_KEY')
TWITTER_API_KEY_SECRET = getenv('TWITTER_API_KEY_SECRET')
TWITTER_AUTH = tweepy.OAuthHandler(TWITTER_API_KEY, TWITTER_API_KEY_SECRET)
TWITTER = tweepy.API(TWITTER_AUTH)
nlp = spacy.load('my_model')
def vectorize_tweet(tweet_text):
return nlp(tweet_text).vector
def add_or_update_user(username):
try:
"""Allows us to add/update users to our DB"""
twitter_user = TWITTER.get_user(username)
db_user = (User.query.get(twitter_user.id)) or User(id=twitter_user.id, name=username)
DB.session.add(db_user)
tweets = twitter_user.timeline(
count=200, exclude_replies=True,
include_rts=False, tweet_mode='extended'
)
if tweets:
db_user.newest_tweet_id = tweets[0].id
for tweet in tweets:
vectorize_tweet = vectorize_tweet(tweet.full_text)
db_tweet = Tweet(
id=tweet.id, text=tweet.full_text,
vect = vectorize_tweet
)
db_user.tweets.append(db_tweet)
DB.session.add(db_tweet)
except Exception as e:
print('Error Processing: {}: {}'.format(username, e))
raise e
else:
DB.session.commit()
def insert_example_users():
# using our functions to add two users
add_or_update_user('elonmusk')
add_or_update_user('jackblack')
|
[
"tweepy.OAuthHandler",
"tweepy.API",
"os.getenv",
"spacy.load"
] |
[((160, 185), 'os.getenv', 'getenv', (['"""TWITTER_API_KEY"""'], {}), "('TWITTER_API_KEY')\n", (166, 185), False, 'from os import getenv\n'), ((211, 243), 'os.getenv', 'getenv', (['"""TWITTER_API_KEY_SECRET"""'], {}), "('TWITTER_API_KEY_SECRET')\n", (217, 243), False, 'from os import getenv\n'), ((260, 320), 'tweepy.OAuthHandler', 'tweepy.OAuthHandler', (['TWITTER_API_KEY', 'TWITTER_API_KEY_SECRET'], {}), '(TWITTER_API_KEY, TWITTER_API_KEY_SECRET)\n', (279, 320), False, 'import tweepy\n'), ((331, 355), 'tweepy.API', 'tweepy.API', (['TWITTER_AUTH'], {}), '(TWITTER_AUTH)\n', (341, 355), False, 'import tweepy\n'), ((363, 385), 'spacy.load', 'spacy.load', (['"""my_model"""'], {}), "('my_model')\n", (373, 385), False, 'import spacy\n')]
|
import os
from tests.checks import checks
from tests.harness import harness
from scripts import terminal
check_dir = os.path.dirname(os.path.abspath(__file__))
check_suite = checks.CheckSuite()
folds = [3]
inf_folds = [3]
incs = [1]
FLT_BIN_WIDTH=13
FLT_MAX_EXP=128
FLT_BIG_EXP=13
FLT_SMALL_EXP=-12
FLT_MIN_EXP=-125
FLT_MANT_DIG=24
FLT_ONES = 0
for i in range(FLT_MANT_DIG):
FLT_ONES += 2.0 ** -i
DBL_BIN_WIDTH=41
DBL_MAX_EXP=1024
DBL_BIG_EXP=27
DBL_SMALL_EXP=-27
DBL_MIN_EXP=-1021
DBL_MANT_DIG=53
DBL_ONES = 0
for i in range(DBL_MANT_DIG):
DBL_ONES += 2.0 ** -i
check_suite.add_checks([checks.ValidateInternalDSCALETest(),\
checks.ValidateInternalSSCALETest()],\
["N", "incX"],\
[[4], [1]])
check_suite.add_checks([checks.ValidateInternalUFPTest(),\
checks.ValidateInternalUFPFTest()],\
["N", "incX"],\
[[10], [1, 2, 4]])
check_suite.add_checks([checks.ValidateInternalDINDEXTest(),\
checks.ValidateInternalSINDEXTest(),\
checks.ValidateInternalDMINDEXTest(),\
checks.ValidateInternalSMINDEXTest()],\
["N", "incX"],\
[[4], [1]])
check_suite.add_checks([checks.ValidateInternalDAMAXTest(),\
checks.ValidateInternalZAMAXTest(),\
checks.ValidateInternalSAMAXTest(),\
checks.ValidateInternalCAMAXTest()],\
["N", "incX"],\
[[4095], [1, 2, 4]])
check_suite.add_checks([checks.ValidateInternalRDSUMTest(),\
checks.ValidateInternalDBDBADDTest(),\
checks.ValidateInternalDIDADDTest(),\
checks.ValidateInternalDIDDEPOSITTest(),\
checks.ValidateInternalRSSUMTest(),\
checks.ValidateInternalSBSBADDTest(),\
checks.ValidateInternalSISADDTest(),\
checks.ValidateInternalSISDEPOSITTest(),\
],\
["N", "fold", "incX", "RealScaleX", "FillX"],\
[[4095], folds, incs, [1.0, -1.0],\
["constant",\
"mountain",\
"+big",\
"++big",\
"+-big",\
"sine"]])
check_suite.add_checks([checks.ValidateInternalRZSUMTest(),\
checks.ValidateInternalZBZBADDTest(),\
checks.ValidateInternalZIZADDTest(),\
checks.ValidateInternalZIZDEPOSITTest(),\
checks.ValidateInternalRCSUMTest(),\
checks.ValidateInternalCBCBADDTest(),\
checks.ValidateInternalCICADDTest(),\
checks.ValidateInternalCICDEPOSITTest()],\
["N", "fold", "incX", "RealScaleX", "ImagScaleX", "FillX"],\
[[4095], folds, incs, [-1.0, 0.0, 1.0], [-1.0, 0.0, 1.0],\
["constant",\
"mountain",\
"+big",\
"++big",\
"+-big",\
"sine"]])
check_suite.add_checks([checks.ValidateInternalRDNRM2Test(),\
checks.ValidateInternalRDASUMTest(),\
checks.ValidateInternalRSNRM2Test(),\
checks.ValidateInternalRSASUMTest(),\
],\
["N", "fold", "incX", "RealScaleX", "FillX"],\
[[4095], folds, incs, [1.0, -1.0],\
["constant",\
"+big",\
"++big",\
"+-big"]])
check_suite.add_checks([checks.ValidateInternalRDZNRM2Test(),\
checks.ValidateInternalRDZASUMTest(),\
checks.ValidateInternalRSCNRM2Test(),\
checks.ValidateInternalRSCASUMTest(),\
],\
["N", "fold", "incX", ("RealScaleX", "ImagScaleX"), "FillX"],\
[[4095], folds, incs, [-1.0, 0.0, 1.0], [-1.0, 0.0, 1.0],\
["constant",\
"+big",\
"++big",\
"+-big"]])
check_suite.add_checks([checks.ValidateInternalRDDOTTest(),\
checks.ValidateInternalRSDOTTest(),\
],\
["N", "fold", "incX", "RealScaleX", "RealScaleY", "FillX", "FillY"],\
[[4095], folds, incs, [1.0, -1.0], [1.0, -1.0],\
["constant",\
"+big",\
"++big",\
"+-big"],\
["constant",\
"+big",\
"++big",\
"+-big"]])
check_suite.add_checks([checks.ValidateInternalRDDOTTest(),\
checks.ValidateInternalRSDOTTest(),\
],\
["N", "fold", "incX", "RealScaleX", "RealScaleY", ("FillX", "FillY")],\
[[4095], folds, incs, [1.0, -1.0], [1.0, -1.0],\
[("constant", "sine"),\
("sine", "constant")]])
check_suite.add_checks([checks.ValidateInternalRDDOTTest(),\
checks.ValidateInternalRSDOTTest(),\
],\
["N", "fold", "incX", "RealScaleX", "RealScaleY", ("FillX", "FillY")],\
[[4095], folds, incs, [1.0, -1.0], [1.0, -1.0],\
[("constant", "mountain"),\
("mountain", "constant")]])
check_suite.add_checks([checks.ValidateInternalRZDOTUTest(),\
checks.ValidateInternalRZDOTCTest(),\
checks.ValidateInternalRCDOTUTest(),\
checks.ValidateInternalRCDOTCTest(),\
],\
["N", "fold", "incX", "RealScaleX", "ImagScaleX", "RealScaleY", "ImagScaleY", "FillX", "FillY"],\
[[4095], folds, incs, [-1.0, 0.0, 1.0], [-1.0, 0.0, 1.0], [-1.0, 0.0, 1.0], [-1.0, 0.0, 1.0],\
["constant",\
"+big",\
"++big",\
"+-big"],\
["constant",\
"+big",\
"++big",\
"+-big"]])
check_suite.add_checks([checks.ValidateInternalRZDOTUTest(),\
checks.ValidateInternalRZDOTCTest(),\
checks.ValidateInternalRCDOTUTest(),\
checks.ValidateInternalRCDOTCTest(),\
],\
["N", "fold", "incX", "RealScaleX", "ImagScaleX", "RealScaleY", "ImagScaleY", ("FillX", "FillY")],\
[[4095], folds, incs, [-1.0, 0.0, 1.0], [-1.0, 0.0, 1.0], [-1.0, 0.0, 1.0], [-1.0, 0.0, 1.0],\
[("constant", "sine"),\
("sine", "constant")]])
check_suite.add_checks([checks.ValidateInternalRZDOTUTest(),\
checks.ValidateInternalRZDOTCTest(),\
checks.ValidateInternalRCDOTUTest(),\
checks.ValidateInternalRCDOTCTest(),\
],\
["N", "fold", "incX", "RealScaleX", "ImagScaleX", "RealScaleY", "ImagScaleY", ("FillX", "FillY")],\
[[4095], folds, incs, [-1.0, 0.0, 1.0], [-1.0, 0.0, 1.0], [-1.0, 0.0, 1.0], [-1.0, 0.0, 1.0],\
[("constant", "mountain"),\
("mountain", "constant")]])
check_suite.add_checks([checks.ValidateInternalRDSUMTest(),\
checks.ValidateInternalRDASUMTest(),\
checks.ValidateInternalRDNRM2Test(),\
checks.ValidateInternalDBDBADDTest(),\
checks.ValidateInternalDIDADDTest(),\
checks.ValidateInternalDIDDEPOSITTest(),\
checks.ValidateInternalRSSUMTest(),\
checks.ValidateInternalRSASUMTest(),\
checks.ValidateInternalRSNRM2Test(),\
checks.ValidateInternalSBSBADDTest(),\
checks.ValidateInternalSISADDTest(),\
checks.ValidateInternalSISDEPOSITTest(),\
],\
["N", "fold", "incX", "RealScaleX", "FillX"],\
[[255], inf_folds, incs, [1.0, -1.0],\
["+inf",\
"++inf",\
"+-inf",\
"nan",\
"+inf_nan",\
"++inf_nan",\
"+-inf_nan"]])
check_suite.add_checks([checks.ValidateInternalRZSUMTest(),\
checks.ValidateInternalRDZASUMTest(),\
checks.ValidateInternalRDZNRM2Test(),\
checks.ValidateInternalZBZBADDTest(),\
checks.ValidateInternalZIZADDTest(),\
checks.ValidateInternalZIZDEPOSITTest(),\
checks.ValidateInternalRCSUMTest(),\
checks.ValidateInternalRSCASUMTest(),\
checks.ValidateInternalRSCNRM2Test(),\
checks.ValidateInternalCBCBADDTest(),\
checks.ValidateInternalCICADDTest(),\
checks.ValidateInternalCICDEPOSITTest()],\
["N", "fold", "incX", "RealScaleX", "ImagScaleX", "FillX"],\
[[255], inf_folds, incs, [-1.0, 0.0, 1.0], [-1.0, 0.0, 1.0],\
["+inf",\
"++inf",\
"+-inf",\
"nan",\
"+inf_nan",\
"++inf_nan",\
"+-inf_nan"]])
check_suite.add_checks([checks.ValidateInternalRDDOTTest(),\
checks.ValidateInternalRSDOTTest(),\
],\
["N", "fold", "incX", "RealScaleX", "RealScaleY", "FillX", "FillY"],\
[[255], inf_folds, incs, [1.0, -1.0], [1.0, -1.0],\
["constant",\
"+inf",\
"++inf",\
"+-inf",\
"nan",\
"+inf_nan",\
"++inf_nan",\
"+-inf_nan"],\
["constant",\
"+inf",\
"++inf",\
"+-inf",\
"nan",\
"+inf_nan",\
"++inf_nan",\
"+-inf_nan"]])
check_suite.add_checks([checks.ValidateInternalRZDOTUTest(),\
checks.ValidateInternalRZDOTCTest(),\
checks.ValidateInternalRCDOTUTest(),\
checks.ValidateInternalRCDOTCTest(),\
],\
["N", "fold", "incX", "RealScaleX", "ImagScaleX", "RealScaleY", "ImagScaleY", "FillX", "FillY"],\
[[255], inf_folds, incs, [-1.0, 0.0, 1.0], [-1.0, 0.0, 1.0], [-1.0, 0.0, 1.0], [-1.0, 0.0, 1.0],
["constant",\
"+inf",\
"++inf",\
"+-inf",\
"nan",\
"+inf_nan",\
"++inf_nan",\
"+-inf_nan"],\
["constant",\
"+inf",\
"++inf",\
"+-inf",\
"nan",\
"+inf_nan",\
"++inf_nan",\
"+-inf_nan"]])
check_suite.add_checks([checks.ValidateXBLASRDDOTTest(),\
checks.ValidateXBLASRZDOTUTest(),\
checks.ValidateXBLASRZDOTCTest(),\
checks.ValidateXBLASRSDOTTest(),\
checks.ValidateXBLASRCDOTUTest(),\
checks.ValidateXBLASRCDOTCTest()],\
["N", "incX", "incY", "norm"],\
[[1, 2, 3, 4, 5, 6, 7, 8, 15, 16, 63, 64, 4095, 4096], [1, 2, 4], [1, 2, 4], [-1, 0, 1]])
check_suite.add_checks([checks.VerifyRDSUMTest(),\
checks.VerifyRDASUMTest(),\
checks.VerifyDBDBADDTest(),\
checks.VerifyDIDADDTest(),\
checks.VerifyDIDDEPOSITTest(),\
checks.VerifyRZSUMTest(),\
checks.VerifyRDZASUMTest(),\
checks.VerifyZBZBADDTest(),\
checks.VerifyZIZADDTest(),\
checks.VerifyZIZDEPOSITTest(),\
checks.VerifyRSSUMTest(),\
checks.VerifyRSASUMTest(),\
checks.VerifySBSBADDTest(),\
checks.VerifySISADDTest(),\
checks.VerifySISDEPOSITTest(),\
checks.VerifyRCSUMTest(),\
checks.VerifyRSCASUMTest(),\
checks.VerifyCBCBADDTest(),\
checks.VerifyCICADDTest(),\
checks.VerifyCICDEPOSITTest()],\
["N", "fold", "B", "incX", "RealScaleX", "FillX"],\
[[4095], folds, [256], incs, [0],\
["constant"]])
check_suite.add_checks([checks.VerifyRDSUMTest(),\
checks.VerifyRDASUMTest(),\
checks.VerifyRDNRM2Test(),\
checks.VerifyDIDSSQTest(),\
checks.VerifyDBDBADDTest(),\
checks.VerifyDIDADDTest(),\
checks.VerifyDIDDEPOSITTest(),\
checks.VerifyRZSUMTest(),\
checks.VerifyRDZASUMTest(),\
checks.VerifyRDZNRM2Test(),\
checks.VerifyDIZSSQTest(),\
checks.VerifyZBZBADDTest(),\
checks.VerifyZIZADDTest(),\
checks.VerifyZIZDEPOSITTest(),\
checks.VerifyRSSUMTest(),\
checks.VerifyRSASUMTest(),\
checks.VerifyRSNRM2Test(),\
checks.VerifySISSSQTest(),\
checks.VerifySBSBADDTest(),\
checks.VerifySISADDTest(),\
checks.VerifySISDEPOSITTest(),\
checks.VerifyRCSUMTest(),\
checks.VerifyRSCASUMTest(),\
checks.VerifyRSCNRM2Test(),\
checks.VerifySICSSQTest(),\
checks.VerifyCBCBADDTest(),\
checks.VerifyCICADDTest(),\
checks.VerifyCICDEPOSITTest()],\
["N", "fold", "B", "incX", "FillX"],\
[[4095], folds, [256], incs,\
["rand",\
"rand+(rand-1)",\
"sine",\
"small+grow*big"]])
check_suite.add_checks([checks.VerifyRDDOTTest(),\
checks.VerifyRZDOTUTest(),\
checks.VerifyRZDOTCTest(),\
checks.VerifyRSDOTTest(),\
checks.VerifyRCDOTUTest(),\
checks.VerifyRCDOTCTest()],\
["N", "fold", "incX", "incY", "FillX", "FillY"],\
[[4095], folds, incs, incs,\
["rand",\
"rand+(rand-1)",\
"sine",\
"small+grow*big"],\
["rand",\
"rand+(rand-1)",\
"sine",\
"small+grow*big"]])
for i in range(DBL_BIN_WIDTH + 2):
check_suite.add_checks([checks.ValidateInternalRDSUMTest(),\
checks.ValidateInternalDBDBADDTest(),\
checks.ValidateInternalDIDADDTest(),\
checks.ValidateInternalDIDDEPOSITTest(),\
checks.ValidateInternalRDASUMTest(),\
checks.ValidateInternalRDNRM2Test(),\
checks.ValidateInternalRDDOTTest(),\
],\
["N", "fold", "incX", "RealScaleX", "FillX", "FillY"],\
[[8192], folds, incs, [DBL_ONES + 2 ** i],\
["constant"],\
["constant"]])
check_suite.add_checks([checks.ValidateInternalRDSUMTest(),\
checks.ValidateInternalDBDBADDTest(),\
checks.ValidateInternalDIDADDTest(),\
checks.ValidateInternalDIDDEPOSITTest(),\
checks.ValidateInternalRDASUMTest(),\
checks.ValidateInternalRDNRM2Test(),\
checks.ValidateInternalRDDOTTest(),\
],\
["N", "fold", "incX", "RealScaleX", "FillX", "FillY"],\
[[32], folds, incs,\
[1.5 * 2**(DBL_MAX_EXP - DBL_BIG_EXP - 6 - i), 0.75 * 2**(DBL_MIN_EXP - DBL_SMALL_EXP + i)],\
["constant",\
"+big",\
"++big",\
"+-big"],\
["constant"]])
check_suite.add_checks([checks.ValidateInternalRDDOTTest(),\
],\
["N", "fold", "incX", "RealScaleX", "FillX", "FillY"],\
[[32], folds, incs,\
[1.5 * 2**(DBL_MAX_EXP - DBL_BIG_EXP - 6 - i), 0.75 * 2**(DBL_MIN_EXP - DBL_SMALL_EXP + i)],\
["constant"],\
["constant",\
"+big",\
"++big",\
"+-big"]])
check_suite.add_checks([checks.ValidateInternalRDDOTTest(),\
],\
["N", "fold", "incX", "RealScaleX", "FillX", "FillY"],\
[[32], folds, incs,\
[1.5 * 2**(DBL_MAX_EXP - 2 * DBL_BIG_EXP - 6 - i), 0.75 * 2**(DBL_MIN_EXP - 2 * DBL_SMALL_EXP + i)],\
["constant",\
"+big",\
"++big",\
"+-big"],\
["constant",\
"+big",\
"++big",\
"+-big"]])
check_suite.add_checks([checks.ValidateInternalRZSUMTest(),\
checks.ValidateInternalZBZBADDTest(),\
checks.ValidateInternalZIZADDTest(),\
checks.ValidateInternalZIZDEPOSITTest(),\
checks.ValidateInternalRDZASUMTest(),\
checks.ValidateInternalRDZNRM2Test(),\
checks.ValidateInternalRZDOTUTest(),\
checks.ValidateInternalRZDOTCTest(),\
],\
["N", "fold", "incX", "RealScaleX", "ImagScaleX", "FillX", "FillY"],\
[[16], folds, incs,\
[1.5 * 2**(DBL_MAX_EXP - DBL_BIG_EXP - 6 - i), 0.75 * 2**(DBL_MIN_EXP - DBL_SMALL_EXP + i)],\
[1.5 * 2**(DBL_MAX_EXP - DBL_BIG_EXP - 6 - i), 0.75 * 2**(DBL_MIN_EXP - DBL_SMALL_EXP + i)],\
["constant",\
"+big",\
"++big",\
"+-big"],\
["constant"]])
check_suite.add_checks([checks.ValidateInternalRZDOTUTest(),\
checks.ValidateInternalRZDOTCTest(),\
],\
["N", "fold", "incX", "RealScaleX", "ImagScaleX", "FillX", "FillY"],\
[[16], folds, incs,\
[1.5 * 2**(DBL_MAX_EXP - DBL_BIG_EXP - 6 - i), 0.75 * 2**(DBL_MIN_EXP - DBL_SMALL_EXP + i)],\
[1.5 * 2**(DBL_MAX_EXP - DBL_BIG_EXP - 6 - i), 0.75 * 2**(DBL_MIN_EXP - DBL_SMALL_EXP + i)],\
["constant"],\
["constant",\
"+big",\
"++big",\
"+-big"]])
check_suite.add_checks([checks.ValidateInternalRZDOTUTest(),\
checks.ValidateInternalRZDOTCTest(),\
],\
["N", "fold", "incX", "RealScaleX", "ImagScaleX", "FillX", "FillY"],\
[[16], folds, incs,\
[1.5 * 2**(DBL_MAX_EXP - 2 * DBL_BIG_EXP - 6 - i), 0.75 * 2**(DBL_MIN_EXP - 2 * DBL_SMALL_EXP + i)],\
[1.5 * 2**(DBL_MAX_EXP - 2 * DBL_BIG_EXP - 6 - i), 0.75 * 2**(DBL_MIN_EXP - 2 * DBL_SMALL_EXP + i)],\
["constant",\
"+big",\
"++big",\
"+-big"],\
["constant",\
"+big",\
"++big",\
"+-big"]])
for i in range(FLT_BIN_WIDTH + 2):
check_suite.add_checks([checks.ValidateInternalRSSUMTest(),\
checks.ValidateInternalSBSBADDTest(),\
checks.ValidateInternalSISADDTest(),\
checks.ValidateInternalSISDEPOSITTest(),\
checks.ValidateInternalRSASUMTest(),\
checks.ValidateInternalRSNRM2Test(),\
checks.ValidateInternalRSDOTTest(),\
],\
["N", "fold", "incX", "RealScaleX", "FillX", "FillY"],\
[[8192], folds, incs, [FLT_ONES * 2.0 ** i],\
["constant",],\
["constant"]])
check_suite.add_checks([checks.ValidateInternalRSSUMTest(),\
checks.ValidateInternalSBSBADDTest(),\
checks.ValidateInternalSISADDTest(),\
checks.ValidateInternalSISDEPOSITTest(),\
checks.ValidateInternalRSASUMTest(),\
checks.ValidateInternalRSNRM2Test(),\
checks.ValidateInternalRSDOTTest(),\
],\
["N", "fold", "incX", "RealScaleX", "FillX", "FillY"],\
[[32], folds, incs,\
[1.5 * 2**(FLT_MAX_EXP - FLT_BIG_EXP - 6 - i), 0.75 * 2**(FLT_MIN_EXP - FLT_SMALL_EXP + i)],\
["constant",\
"+big",\
"++big",\
"+-big"],\
["constant"]])
check_suite.add_checks([checks.ValidateInternalRSDOTTest(),\
],\
["N", "fold", "incX", "RealScaleX", "FillX", "FillY"],\
[[32], folds, incs,\
[1.5 * 2**(FLT_MAX_EXP - FLT_BIG_EXP - 6 - i), 0.75 * 2**(FLT_MIN_EXP - FLT_SMALL_EXP + i)],\
["constant"],\
["constant",\
"+big",\
"++big",\
"+-big"]])
check_suite.add_checks([checks.ValidateInternalRSDOTTest(),\
],\
["N", "fold", "incX", "RealScaleX", "FillX", "FillY"],\
[[32], folds, incs,\
[1.5 * 2**(FLT_MAX_EXP - 2 * FLT_BIG_EXP - 6 - i), 0.75 * 2**(FLT_MIN_EXP - 2 * FLT_SMALL_EXP + i)],\
["constant",\
"+big",\
"++big",\
"+-big"],\
["constant",\
"+big",\
"++big",\
"+-big"]])
check_suite.add_checks([checks.ValidateInternalRCSUMTest(),\
checks.ValidateInternalCBCBADDTest(),\
checks.ValidateInternalCICADDTest(),\
checks.ValidateInternalCICDEPOSITTest(),\
checks.ValidateInternalRSCASUMTest(),\
checks.ValidateInternalRSCNRM2Test(),\
checks.ValidateInternalRCDOTUTest(),\
checks.ValidateInternalRCDOTCTest(),\
],\
["N", "fold", "incX", "RealScaleX", "ImagScaleX", "FillX", "FillY"],\
[[16], folds, incs,\
[1.5 * 2**(FLT_MAX_EXP - FLT_BIG_EXP - 6 - i), 0.75 * 2**(FLT_MIN_EXP - FLT_SMALL_EXP + i)],\
[1.5 * 2**(FLT_MAX_EXP - FLT_BIG_EXP - 6 - i), 0.75 * 2**(FLT_MIN_EXP - FLT_SMALL_EXP + i)],\
["constant",\
"+big",\
"++big",\
"+-big"],\
["constant"]])
check_suite.add_checks([checks.ValidateInternalRCDOTUTest(),\
checks.ValidateInternalRCDOTCTest(),\
],\
["N", "fold", "incX", "RealScaleX", "ImagScaleX", "FillX", "FillY"],\
[[16], folds, incs,\
[1.5 * 2**(FLT_MAX_EXP - FLT_BIG_EXP - 6 - i), 0.75 * 2**(FLT_MIN_EXP - FLT_SMALL_EXP + i)],\
[1.5 * 2**(FLT_MAX_EXP - FLT_BIG_EXP - 6 - i), 0.75 * 2**(FLT_MIN_EXP - FLT_SMALL_EXP + i)],\
["constant"],\
["constant",\
"+big",\
"++big",\
"+-big"]])
check_suite.add_checks([checks.ValidateInternalRCDOTUTest(),\
checks.ValidateInternalRCDOTCTest(),\
],\
["N", "fold", "incX", "RealScaleX", "ImagScaleX", "FillX", "FillY"],\
[[16], folds, incs,\
[1.5 * 2**(FLT_MAX_EXP - 2 * FLT_BIG_EXP - 6 - i), 0.75 * 2**(FLT_MIN_EXP - 2 * FLT_SMALL_EXP + i)],\
[1.5 * 2**(FLT_MAX_EXP - 2 * FLT_BIG_EXP - 6 - i), 0.75 * 2**(FLT_MIN_EXP - 2 * FLT_SMALL_EXP + i)],\
["constant",\
"+big",\
"++big",\
"+-big"],\
["constant",\
"+big",\
"++big",\
"+-big"]])
check_suite.add_checks([checks.ValidateInternalDBDBADDTest(),\
checks.ValidateInternalDIDADDTest(),\
checks.ValidateInternalDIDDEPOSITTest(),\
checks.ValidateInternalRDSUMTest(),\
checks.ValidateInternalRDASUMTest(),\
checks.ValidateInternalRDDOTTest(),\
checks.ValidateInternalZBZBADDTest(),\
checks.ValidateInternalZIZADDTest(),\
checks.ValidateInternalZIZDEPOSITTest(),\
checks.ValidateInternalRZSUMTest(),\
checks.ValidateInternalRDZASUMTest(),\
checks.ValidateInternalRZDOTUTest(),\
checks.ValidateInternalRZDOTCTest()],\
["N", "fold", "incX", "RealScaleX", "ImagScaleX", "FillX"],\
[[1], folds, incs, [DBL_ONES * 2 **(DBL_MAX_EXP - 1), 1.0], [DBL_ONES * 2 **(DBL_MAX_EXP - 1), 1.0],\
["constant",]])
check_suite.add_checks([checks.ValidateInternalSBSBADDTest(),\
checks.ValidateInternalSISADDTest(),\
checks.ValidateInternalSISDEPOSITTest(),\
checks.ValidateInternalRSSUMTest(),\
checks.ValidateInternalRSASUMTest(),\
checks.ValidateInternalRSDOTTest(),\
checks.ValidateInternalCBCBADDTest(),\
checks.ValidateInternalCICADDTest(),\
checks.ValidateInternalCICDEPOSITTest(),\
checks.ValidateInternalRCSUMTest(),\
checks.ValidateInternalRSCASUMTest(),\
checks.ValidateInternalRCDOTUTest(),\
checks.ValidateInternalRCDOTCTest()],\
["N", "fold", "incX", "RealScaleX", "ImagScaleX", "FillX"],\
[[1], folds, incs, [FLT_ONES * 2 **(FLT_MAX_EXP - 1), 1.0], [FLT_ONES * 2 **(FLT_MAX_EXP - 1), 1.0],\
["constant",]])
check_suite.add_checks([checks.CorroborateRDGEMVTest(),\
checks.CorroborateRZGEMVTest(),\
checks.CorroborateRSGEMVTest(),\
checks.CorroborateRCGEMVTest(),\
],\
["O", "T", "M", "N", "lda", ("incX", "incY"), "FillA", "FillX", "FillY", ("RealAlpha", "ImagAlpha"), ("RealBeta", "ImagBeta"), "fold"],\
[["RowMajor", "ColMajor"], ["Trans", "NoTrans"], [255, 512], [255, 512], [0, -15], list(zip(incs, incs)),\
["rand",\
],\
["rand",\
],\
["rand"],\
[(0.0, 0.0), (1.0, 0.0), (2.0, 2.0)],\
[(0.0, 0.0), (1.0, 0.0), (2.0, 2.0)],\
folds])
check_suite.add_checks([checks.CorroborateRDGEMMTest(),
checks.CorroborateRZGEMMTest(),\
checks.CorroborateRSGEMMTest(),\
checks.CorroborateRCGEMMTest(),\
],\
["O", "TransA", "TransB", "M", "N", "K", ("lda", "ldb", "ldc"), "FillA", "FillB", "FillC", ("RealAlpha", "ImagAlpha"), ("RealBeta", "ImagBeta"), "fold"],\
[["RowMajor", "ColMajor"], ["ConjTrans", "Trans", "NoTrans"], ["ConjTrans", "Trans", "NoTrans"], [32, 64], [32, 64], [32, 64], [(0, 0, 0), (-63, -63, -63)], \
["rand",\
],\
["rand",\
],\
["rand"],\
[(0.0, 0.0), (1.0, 0.0), (2.0, 2.0)],\
[(0.0, 0.0), (1.0, 0.0), (2.0, 2.0)],\
folds])
check_harness = harness.Harness("check")
check_harness.add_suite(check_suite)
check_harness.run()
|
[
"tests.checks.checks.VerifyRDASUMTest",
"tests.checks.checks.ValidateXBLASRCDOTUTest",
"tests.checks.checks.ValidateInternalRZSUMTest",
"tests.checks.checks.ValidateInternalRZDOTUTest",
"tests.checks.checks.CorroborateRDGEMMTest",
"os.path.abspath",
"tests.checks.checks.CorroborateRCGEMVTest",
"tests.checks.checks.ValidateInternalZIZADDTest",
"tests.checks.checks.ValidateInternalZIZDEPOSITTest",
"tests.checks.checks.VerifyCICDEPOSITTest",
"tests.checks.checks.ValidateInternalSSCALETest",
"tests.checks.checks.ValidateInternalRSDOTTest",
"tests.checks.checks.VerifyRSASUMTest",
"tests.checks.checks.ValidateXBLASRDDOTTest",
"tests.checks.checks.VerifyRSCNRM2Test",
"tests.checks.checks.ValidateXBLASRSDOTTest",
"tests.checks.checks.ValidateInternalDBDBADDTest",
"tests.checks.checks.VerifyZIZADDTest",
"tests.checks.checks.ValidateInternalRSCNRM2Test",
"tests.checks.checks.ValidateInternalDSCALETest",
"tests.checks.checks.ValidateInternalRZDOTCTest",
"tests.checks.checks.ValidateInternalRDSUMTest",
"tests.checks.checks.VerifyDIDSSQTest",
"tests.checks.checks.ValidateInternalRCDOTCTest",
"tests.checks.checks.VerifySISADDTest",
"tests.checks.checks.ValidateInternalRDZNRM2Test",
"tests.checks.checks.VerifyZBZBADDTest",
"tests.checks.checks.VerifyRDZASUMTest",
"tests.checks.checks.VerifyRDZNRM2Test",
"tests.checks.checks.VerifyRZDOTUTest",
"tests.checks.checks.VerifySISSSQTest",
"tests.checks.checks.VerifyRCSUMTest",
"tests.checks.checks.ValidateInternalDMINDEXTest",
"tests.checks.checks.ValidateInternalUFPFTest",
"tests.checks.checks.ValidateInternalRCDOTUTest",
"tests.checks.checks.ValidateInternalRSSUMTest",
"tests.checks.checks.VerifyRDSUMTest",
"tests.checks.checks.VerifyCICADDTest",
"tests.checks.checks.ValidateInternalZBZBADDTest",
"tests.checks.checks.ValidateInternalDINDEXTest",
"tests.checks.checks.ValidateInternalSBSBADDTest",
"tests.checks.checks.ValidateXBLASRZDOTCTest",
"tests.checks.checks.ValidateXBLASRCDOTCTest",
"tests.checks.checks.ValidateInternalRDNRM2Test",
"tests.checks.checks.CorroborateRCGEMMTest",
"tests.harness.harness.Harness",
"tests.checks.checks.ValidateInternalDIDDEPOSITTest",
"tests.checks.checks.VerifyRSDOTTest",
"tests.checks.checks.VerifyCBCBADDTest",
"tests.checks.checks.CorroborateRZGEMVTest",
"tests.checks.checks.ValidateInternalDIDADDTest",
"tests.checks.checks.VerifyRCDOTCTest",
"tests.checks.checks.VerifyRSSUMTest",
"tests.checks.checks.CorroborateRSGEMMTest",
"tests.checks.checks.ValidateInternalRSASUMTest",
"tests.checks.checks.ValidateInternalRSCASUMTest",
"tests.checks.checks.VerifySICSSQTest",
"tests.checks.checks.VerifyRZDOTCTest",
"tests.checks.checks.VerifyRSNRM2Test",
"tests.checks.checks.ValidateInternalUFPTest",
"tests.checks.checks.CorroborateRDGEMVTest",
"tests.checks.checks.CorroborateRZGEMMTest",
"tests.checks.checks.ValidateInternalSINDEXTest",
"tests.checks.checks.ValidateInternalZAMAXTest",
"tests.checks.checks.VerifyZIZDEPOSITTest",
"tests.checks.checks.ValidateInternalDAMAXTest",
"tests.checks.checks.ValidateInternalRDZASUMTest",
"tests.checks.checks.VerifyDBDBADDTest",
"tests.checks.checks.VerifyRDNRM2Test",
"tests.checks.checks.VerifySBSBADDTest",
"tests.checks.checks.ValidateInternalCAMAXTest",
"tests.checks.checks.VerifyRCDOTUTest",
"tests.checks.checks.VerifyRDDOTTest",
"tests.checks.checks.VerifySISDEPOSITTest",
"tests.checks.checks.ValidateInternalRSNRM2Test",
"tests.checks.checks.ValidateInternalSAMAXTest",
"tests.checks.checks.ValidateInternalRDDOTTest",
"tests.checks.checks.VerifyDIZSSQTest",
"tests.checks.checks.ValidateInternalSISADDTest",
"tests.checks.checks.ValidateInternalRCSUMTest",
"tests.checks.checks.CheckSuite",
"tests.checks.checks.CorroborateRSGEMVTest",
"tests.checks.checks.VerifyRSCASUMTest",
"tests.checks.checks.ValidateXBLASRZDOTUTest",
"tests.checks.checks.VerifyDIDDEPOSITTest",
"tests.checks.checks.ValidateInternalCICDEPOSITTest",
"tests.checks.checks.ValidateInternalCICADDTest",
"tests.checks.checks.ValidateInternalSMINDEXTest",
"tests.checks.checks.VerifyRZSUMTest",
"tests.checks.checks.ValidateInternalSISDEPOSITTest",
"tests.checks.checks.ValidateInternalRDASUMTest",
"tests.checks.checks.VerifyDIDADDTest",
"tests.checks.checks.ValidateInternalCBCBADDTest"
] |
[((177, 196), 'tests.checks.checks.CheckSuite', 'checks.CheckSuite', ([], {}), '()\n', (194, 196), False, 'from tests.checks import checks\n'), ((31700, 31724), 'tests.harness.harness.Harness', 'harness.Harness', (['"""check"""'], {}), "('check')\n", (31715, 31724), False, 'from tests.harness import harness\n'), ((135, 160), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (150, 160), False, 'import os\n'), ((598, 633), 'tests.checks.checks.ValidateInternalDSCALETest', 'checks.ValidateInternalDSCALETest', ([], {}), '()\n', (631, 633), False, 'from tests.checks import checks\n'), ((660, 695), 'tests.checks.checks.ValidateInternalSSCALETest', 'checks.ValidateInternalSSCALETest', ([], {}), '()\n', (693, 695), False, 'from tests.checks import checks\n'), ((798, 830), 'tests.checks.checks.ValidateInternalUFPTest', 'checks.ValidateInternalUFPTest', ([], {}), '()\n', (828, 830), False, 'from tests.checks import checks\n'), ((857, 890), 'tests.checks.checks.ValidateInternalUFPFTest', 'checks.ValidateInternalUFPFTest', ([], {}), '()\n', (888, 890), False, 'from tests.checks import checks\n'), ((1000, 1035), 'tests.checks.checks.ValidateInternalDINDEXTest', 'checks.ValidateInternalDINDEXTest', ([], {}), '()\n', (1033, 1035), False, 'from tests.checks import checks\n'), ((1062, 1097), 'tests.checks.checks.ValidateInternalSINDEXTest', 'checks.ValidateInternalSINDEXTest', ([], {}), '()\n', (1095, 1097), False, 'from tests.checks import checks\n'), ((1124, 1160), 'tests.checks.checks.ValidateInternalDMINDEXTest', 'checks.ValidateInternalDMINDEXTest', ([], {}), '()\n', (1158, 1160), False, 'from tests.checks import checks\n'), ((1187, 1223), 'tests.checks.checks.ValidateInternalSMINDEXTest', 'checks.ValidateInternalSMINDEXTest', ([], {}), '()\n', (1221, 1223), False, 'from tests.checks import checks\n'), ((1326, 1360), 'tests.checks.checks.ValidateInternalDAMAXTest', 'checks.ValidateInternalDAMAXTest', ([], {}), '()\n', (1358, 1360), False, 'from tests.checks import checks\n'), ((1387, 1421), 'tests.checks.checks.ValidateInternalZAMAXTest', 'checks.ValidateInternalZAMAXTest', ([], {}), '()\n', (1419, 1421), False, 'from tests.checks import checks\n'), ((1448, 1482), 'tests.checks.checks.ValidateInternalSAMAXTest', 'checks.ValidateInternalSAMAXTest', ([], {}), '()\n', (1480, 1482), False, 'from tests.checks import checks\n'), ((1509, 1543), 'tests.checks.checks.ValidateInternalCAMAXTest', 'checks.ValidateInternalCAMAXTest', ([], {}), '()\n', (1541, 1543), False, 'from tests.checks import checks\n'), ((1655, 1689), 'tests.checks.checks.ValidateInternalRDSUMTest', 'checks.ValidateInternalRDSUMTest', ([], {}), '()\n', (1687, 1689), False, 'from tests.checks import checks\n'), ((1716, 1752), 'tests.checks.checks.ValidateInternalDBDBADDTest', 'checks.ValidateInternalDBDBADDTest', ([], {}), '()\n', (1750, 1752), False, 'from tests.checks import checks\n'), ((1779, 1814), 'tests.checks.checks.ValidateInternalDIDADDTest', 'checks.ValidateInternalDIDADDTest', ([], {}), '()\n', (1812, 1814), False, 'from tests.checks import checks\n'), ((1841, 1880), 'tests.checks.checks.ValidateInternalDIDDEPOSITTest', 'checks.ValidateInternalDIDDEPOSITTest', ([], {}), '()\n', (1878, 1880), False, 'from tests.checks import checks\n'), ((1907, 1941), 'tests.checks.checks.ValidateInternalRSSUMTest', 'checks.ValidateInternalRSSUMTest', ([], {}), '()\n', (1939, 1941), False, 'from tests.checks import checks\n'), ((1968, 2004), 'tests.checks.checks.ValidateInternalSBSBADDTest', 'checks.ValidateInternalSBSBADDTest', ([], {}), '()\n', (2002, 2004), False, 'from tests.checks import checks\n'), ((2031, 2066), 'tests.checks.checks.ValidateInternalSISADDTest', 'checks.ValidateInternalSISADDTest', ([], {}), '()\n', (2064, 2066), False, 'from tests.checks import checks\n'), ((2093, 2132), 'tests.checks.checks.ValidateInternalSISDEPOSITTest', 'checks.ValidateInternalSISDEPOSITTest', ([], {}), '()\n', (2130, 2132), False, 'from tests.checks import checks\n'), ((2532, 2566), 'tests.checks.checks.ValidateInternalRZSUMTest', 'checks.ValidateInternalRZSUMTest', ([], {}), '()\n', (2564, 2566), False, 'from tests.checks import checks\n'), ((2593, 2629), 'tests.checks.checks.ValidateInternalZBZBADDTest', 'checks.ValidateInternalZBZBADDTest', ([], {}), '()\n', (2627, 2629), False, 'from tests.checks import checks\n'), ((2656, 2691), 'tests.checks.checks.ValidateInternalZIZADDTest', 'checks.ValidateInternalZIZADDTest', ([], {}), '()\n', (2689, 2691), False, 'from tests.checks import checks\n'), ((2718, 2757), 'tests.checks.checks.ValidateInternalZIZDEPOSITTest', 'checks.ValidateInternalZIZDEPOSITTest', ([], {}), '()\n', (2755, 2757), False, 'from tests.checks import checks\n'), ((2784, 2818), 'tests.checks.checks.ValidateInternalRCSUMTest', 'checks.ValidateInternalRCSUMTest', ([], {}), '()\n', (2816, 2818), False, 'from tests.checks import checks\n'), ((2845, 2881), 'tests.checks.checks.ValidateInternalCBCBADDTest', 'checks.ValidateInternalCBCBADDTest', ([], {}), '()\n', (2879, 2881), False, 'from tests.checks import checks\n'), ((2908, 2943), 'tests.checks.checks.ValidateInternalCICADDTest', 'checks.ValidateInternalCICADDTest', ([], {}), '()\n', (2941, 2943), False, 'from tests.checks import checks\n'), ((2970, 3009), 'tests.checks.checks.ValidateInternalCICDEPOSITTest', 'checks.ValidateInternalCICDEPOSITTest', ([], {}), '()\n', (3007, 3009), False, 'from tests.checks import checks\n'), ((3419, 3454), 'tests.checks.checks.ValidateInternalRDNRM2Test', 'checks.ValidateInternalRDNRM2Test', ([], {}), '()\n', (3452, 3454), False, 'from tests.checks import checks\n'), ((3481, 3516), 'tests.checks.checks.ValidateInternalRDASUMTest', 'checks.ValidateInternalRDASUMTest', ([], {}), '()\n', (3514, 3516), False, 'from tests.checks import checks\n'), ((3543, 3578), 'tests.checks.checks.ValidateInternalRSNRM2Test', 'checks.ValidateInternalRSNRM2Test', ([], {}), '()\n', (3576, 3578), False, 'from tests.checks import checks\n'), ((3605, 3640), 'tests.checks.checks.ValidateInternalRSASUMTest', 'checks.ValidateInternalRSASUMTest', ([], {}), '()\n', (3638, 3640), False, 'from tests.checks import checks\n'), ((3968, 4004), 'tests.checks.checks.ValidateInternalRDZNRM2Test', 'checks.ValidateInternalRDZNRM2Test', ([], {}), '()\n', (4002, 4004), False, 'from tests.checks import checks\n'), ((4031, 4067), 'tests.checks.checks.ValidateInternalRDZASUMTest', 'checks.ValidateInternalRDZASUMTest', ([], {}), '()\n', (4065, 4067), False, 'from tests.checks import checks\n'), ((4094, 4130), 'tests.checks.checks.ValidateInternalRSCNRM2Test', 'checks.ValidateInternalRSCNRM2Test', ([], {}), '()\n', (4128, 4130), False, 'from tests.checks import checks\n'), ((4157, 4193), 'tests.checks.checks.ValidateInternalRSCASUMTest', 'checks.ValidateInternalRSCASUMTest', ([], {}), '()\n', (4191, 4193), False, 'from tests.checks import checks\n'), ((4561, 4595), 'tests.checks.checks.ValidateInternalRDDOTTest', 'checks.ValidateInternalRDDOTTest', ([], {}), '()\n', (4593, 4595), False, 'from tests.checks import checks\n'), ((4622, 4656), 'tests.checks.checks.ValidateInternalRSDOTTest', 'checks.ValidateInternalRSDOTTest', ([], {}), '()\n', (4654, 4656), False, 'from tests.checks import checks\n'), ((5163, 5197), 'tests.checks.checks.ValidateInternalRDDOTTest', 'checks.ValidateInternalRDDOTTest', ([], {}), '()\n', (5195, 5197), False, 'from tests.checks import checks\n'), ((5224, 5258), 'tests.checks.checks.ValidateInternalRSDOTTest', 'checks.ValidateInternalRSDOTTest', ([], {}), '()\n', (5256, 5258), False, 'from tests.checks import checks\n'), ((5578, 5612), 'tests.checks.checks.ValidateInternalRDDOTTest', 'checks.ValidateInternalRDDOTTest', ([], {}), '()\n', (5610, 5612), False, 'from tests.checks import checks\n'), ((5639, 5673), 'tests.checks.checks.ValidateInternalRSDOTTest', 'checks.ValidateInternalRSDOTTest', ([], {}), '()\n', (5671, 5673), False, 'from tests.checks import checks\n'), ((6001, 6036), 'tests.checks.checks.ValidateInternalRZDOTUTest', 'checks.ValidateInternalRZDOTUTest', ([], {}), '()\n', (6034, 6036), False, 'from tests.checks import checks\n'), ((6063, 6098), 'tests.checks.checks.ValidateInternalRZDOTCTest', 'checks.ValidateInternalRZDOTCTest', ([], {}), '()\n', (6096, 6098), False, 'from tests.checks import checks\n'), ((6125, 6160), 'tests.checks.checks.ValidateInternalRCDOTUTest', 'checks.ValidateInternalRCDOTUTest', ([], {}), '()\n', (6158, 6160), False, 'from tests.checks import checks\n'), ((6187, 6222), 'tests.checks.checks.ValidateInternalRCDOTCTest', 'checks.ValidateInternalRCDOTCTest', ([], {}), '()\n', (6220, 6222), False, 'from tests.checks import checks\n'), ((6803, 6838), 'tests.checks.checks.ValidateInternalRZDOTUTest', 'checks.ValidateInternalRZDOTUTest', ([], {}), '()\n', (6836, 6838), False, 'from tests.checks import checks\n'), ((6865, 6900), 'tests.checks.checks.ValidateInternalRZDOTCTest', 'checks.ValidateInternalRZDOTCTest', ([], {}), '()\n', (6898, 6900), False, 'from tests.checks import checks\n'), ((6927, 6962), 'tests.checks.checks.ValidateInternalRCDOTUTest', 'checks.ValidateInternalRCDOTUTest', ([], {}), '()\n', (6960, 6962), False, 'from tests.checks import checks\n'), ((6989, 7024), 'tests.checks.checks.ValidateInternalRCDOTCTest', 'checks.ValidateInternalRCDOTCTest', ([], {}), '()\n', (7022, 7024), False, 'from tests.checks import checks\n'), ((7418, 7453), 'tests.checks.checks.ValidateInternalRZDOTUTest', 'checks.ValidateInternalRZDOTUTest', ([], {}), '()\n', (7451, 7453), False, 'from tests.checks import checks\n'), ((7480, 7515), 'tests.checks.checks.ValidateInternalRZDOTCTest', 'checks.ValidateInternalRZDOTCTest', ([], {}), '()\n', (7513, 7515), False, 'from tests.checks import checks\n'), ((7542, 7577), 'tests.checks.checks.ValidateInternalRCDOTUTest', 'checks.ValidateInternalRCDOTUTest', ([], {}), '()\n', (7575, 7577), False, 'from tests.checks import checks\n'), ((7604, 7639), 'tests.checks.checks.ValidateInternalRCDOTCTest', 'checks.ValidateInternalRCDOTCTest', ([], {}), '()\n', (7637, 7639), False, 'from tests.checks import checks\n'), ((8041, 8075), 'tests.checks.checks.ValidateInternalRDSUMTest', 'checks.ValidateInternalRDSUMTest', ([], {}), '()\n', (8073, 8075), False, 'from tests.checks import checks\n'), ((8102, 8137), 'tests.checks.checks.ValidateInternalRDASUMTest', 'checks.ValidateInternalRDASUMTest', ([], {}), '()\n', (8135, 8137), False, 'from tests.checks import checks\n'), ((8164, 8199), 'tests.checks.checks.ValidateInternalRDNRM2Test', 'checks.ValidateInternalRDNRM2Test', ([], {}), '()\n', (8197, 8199), False, 'from tests.checks import checks\n'), ((8226, 8262), 'tests.checks.checks.ValidateInternalDBDBADDTest', 'checks.ValidateInternalDBDBADDTest', ([], {}), '()\n', (8260, 8262), False, 'from tests.checks import checks\n'), ((8289, 8324), 'tests.checks.checks.ValidateInternalDIDADDTest', 'checks.ValidateInternalDIDADDTest', ([], {}), '()\n', (8322, 8324), False, 'from tests.checks import checks\n'), ((8351, 8390), 'tests.checks.checks.ValidateInternalDIDDEPOSITTest', 'checks.ValidateInternalDIDDEPOSITTest', ([], {}), '()\n', (8388, 8390), False, 'from tests.checks import checks\n'), ((8417, 8451), 'tests.checks.checks.ValidateInternalRSSUMTest', 'checks.ValidateInternalRSSUMTest', ([], {}), '()\n', (8449, 8451), False, 'from tests.checks import checks\n'), ((8478, 8513), 'tests.checks.checks.ValidateInternalRSASUMTest', 'checks.ValidateInternalRSASUMTest', ([], {}), '()\n', (8511, 8513), False, 'from tests.checks import checks\n'), ((8540, 8575), 'tests.checks.checks.ValidateInternalRSNRM2Test', 'checks.ValidateInternalRSNRM2Test', ([], {}), '()\n', (8573, 8575), False, 'from tests.checks import checks\n'), ((8602, 8638), 'tests.checks.checks.ValidateInternalSBSBADDTest', 'checks.ValidateInternalSBSBADDTest', ([], {}), '()\n', (8636, 8638), False, 'from tests.checks import checks\n'), ((8665, 8700), 'tests.checks.checks.ValidateInternalSISADDTest', 'checks.ValidateInternalSISADDTest', ([], {}), '()\n', (8698, 8700), False, 'from tests.checks import checks\n'), ((8727, 8766), 'tests.checks.checks.ValidateInternalSISDEPOSITTest', 'checks.ValidateInternalSISDEPOSITTest', ([], {}), '()\n', (8764, 8766), False, 'from tests.checks import checks\n'), ((9208, 9242), 'tests.checks.checks.ValidateInternalRZSUMTest', 'checks.ValidateInternalRZSUMTest', ([], {}), '()\n', (9240, 9242), False, 'from tests.checks import checks\n'), ((9269, 9305), 'tests.checks.checks.ValidateInternalRDZASUMTest', 'checks.ValidateInternalRDZASUMTest', ([], {}), '()\n', (9303, 9305), False, 'from tests.checks import checks\n'), ((9332, 9368), 'tests.checks.checks.ValidateInternalRDZNRM2Test', 'checks.ValidateInternalRDZNRM2Test', ([], {}), '()\n', (9366, 9368), False, 'from tests.checks import checks\n'), ((9395, 9431), 'tests.checks.checks.ValidateInternalZBZBADDTest', 'checks.ValidateInternalZBZBADDTest', ([], {}), '()\n', (9429, 9431), False, 'from tests.checks import checks\n'), ((9458, 9493), 'tests.checks.checks.ValidateInternalZIZADDTest', 'checks.ValidateInternalZIZADDTest', ([], {}), '()\n', (9491, 9493), False, 'from tests.checks import checks\n'), ((9520, 9559), 'tests.checks.checks.ValidateInternalZIZDEPOSITTest', 'checks.ValidateInternalZIZDEPOSITTest', ([], {}), '()\n', (9557, 9559), False, 'from tests.checks import checks\n'), ((9586, 9620), 'tests.checks.checks.ValidateInternalRCSUMTest', 'checks.ValidateInternalRCSUMTest', ([], {}), '()\n', (9618, 9620), False, 'from tests.checks import checks\n'), ((9647, 9683), 'tests.checks.checks.ValidateInternalRSCASUMTest', 'checks.ValidateInternalRSCASUMTest', ([], {}), '()\n', (9681, 9683), False, 'from tests.checks import checks\n'), ((9710, 9746), 'tests.checks.checks.ValidateInternalRSCNRM2Test', 'checks.ValidateInternalRSCNRM2Test', ([], {}), '()\n', (9744, 9746), False, 'from tests.checks import checks\n'), ((9773, 9809), 'tests.checks.checks.ValidateInternalCBCBADDTest', 'checks.ValidateInternalCBCBADDTest', ([], {}), '()\n', (9807, 9809), False, 'from tests.checks import checks\n'), ((9836, 9871), 'tests.checks.checks.ValidateInternalCICADDTest', 'checks.ValidateInternalCICADDTest', ([], {}), '()\n', (9869, 9871), False, 'from tests.checks import checks\n'), ((9898, 9937), 'tests.checks.checks.ValidateInternalCICDEPOSITTest', 'checks.ValidateInternalCICDEPOSITTest', ([], {}), '()\n', (9935, 9937), False, 'from tests.checks import checks\n'), ((10389, 10423), 'tests.checks.checks.ValidateInternalRDDOTTest', 'checks.ValidateInternalRDDOTTest', ([], {}), '()\n', (10421, 10423), False, 'from tests.checks import checks\n'), ((10450, 10484), 'tests.checks.checks.ValidateInternalRSDOTTest', 'checks.ValidateInternalRSDOTTest', ([], {}), '()\n', (10482, 10484), False, 'from tests.checks import checks\n'), ((11292, 11327), 'tests.checks.checks.ValidateInternalRZDOTUTest', 'checks.ValidateInternalRZDOTUTest', ([], {}), '()\n', (11325, 11327), False, 'from tests.checks import checks\n'), ((11354, 11389), 'tests.checks.checks.ValidateInternalRZDOTCTest', 'checks.ValidateInternalRZDOTCTest', ([], {}), '()\n', (11387, 11389), False, 'from tests.checks import checks\n'), ((11416, 11451), 'tests.checks.checks.ValidateInternalRCDOTUTest', 'checks.ValidateInternalRCDOTUTest', ([], {}), '()\n', (11449, 11451), False, 'from tests.checks import checks\n'), ((11478, 11513), 'tests.checks.checks.ValidateInternalRCDOTCTest', 'checks.ValidateInternalRCDOTCTest', ([], {}), '()\n', (11511, 11513), False, 'from tests.checks import checks\n'), ((12394, 12425), 'tests.checks.checks.ValidateXBLASRDDOTTest', 'checks.ValidateXBLASRDDOTTest', ([], {}), '()\n', (12423, 12425), False, 'from tests.checks import checks\n'), ((12452, 12484), 'tests.checks.checks.ValidateXBLASRZDOTUTest', 'checks.ValidateXBLASRZDOTUTest', ([], {}), '()\n', (12482, 12484), False, 'from tests.checks import checks\n'), ((12511, 12543), 'tests.checks.checks.ValidateXBLASRZDOTCTest', 'checks.ValidateXBLASRZDOTCTest', ([], {}), '()\n', (12541, 12543), False, 'from tests.checks import checks\n'), ((12570, 12601), 'tests.checks.checks.ValidateXBLASRSDOTTest', 'checks.ValidateXBLASRSDOTTest', ([], {}), '()\n', (12599, 12601), False, 'from tests.checks import checks\n'), ((12628, 12660), 'tests.checks.checks.ValidateXBLASRCDOTUTest', 'checks.ValidateXBLASRCDOTUTest', ([], {}), '()\n', (12658, 12660), False, 'from tests.checks import checks\n'), ((12687, 12719), 'tests.checks.checks.ValidateXBLASRCDOTCTest', 'checks.ValidateXBLASRCDOTCTest', ([], {}), '()\n', (12717, 12719), False, 'from tests.checks import checks\n'), ((12917, 12941), 'tests.checks.checks.VerifyRDSUMTest', 'checks.VerifyRDSUMTest', ([], {}), '()\n', (12939, 12941), False, 'from tests.checks import checks\n'), ((12968, 12993), 'tests.checks.checks.VerifyRDASUMTest', 'checks.VerifyRDASUMTest', ([], {}), '()\n', (12991, 12993), False, 'from tests.checks import checks\n'), ((13020, 13046), 'tests.checks.checks.VerifyDBDBADDTest', 'checks.VerifyDBDBADDTest', ([], {}), '()\n', (13044, 13046), False, 'from tests.checks import checks\n'), ((13073, 13098), 'tests.checks.checks.VerifyDIDADDTest', 'checks.VerifyDIDADDTest', ([], {}), '()\n', (13096, 13098), False, 'from tests.checks import checks\n'), ((13125, 13154), 'tests.checks.checks.VerifyDIDDEPOSITTest', 'checks.VerifyDIDDEPOSITTest', ([], {}), '()\n', (13152, 13154), False, 'from tests.checks import checks\n'), ((13181, 13205), 'tests.checks.checks.VerifyRZSUMTest', 'checks.VerifyRZSUMTest', ([], {}), '()\n', (13203, 13205), False, 'from tests.checks import checks\n'), ((13232, 13258), 'tests.checks.checks.VerifyRDZASUMTest', 'checks.VerifyRDZASUMTest', ([], {}), '()\n', (13256, 13258), False, 'from tests.checks import checks\n'), ((13285, 13311), 'tests.checks.checks.VerifyZBZBADDTest', 'checks.VerifyZBZBADDTest', ([], {}), '()\n', (13309, 13311), False, 'from tests.checks import checks\n'), ((13338, 13363), 'tests.checks.checks.VerifyZIZADDTest', 'checks.VerifyZIZADDTest', ([], {}), '()\n', (13361, 13363), False, 'from tests.checks import checks\n'), ((13390, 13419), 'tests.checks.checks.VerifyZIZDEPOSITTest', 'checks.VerifyZIZDEPOSITTest', ([], {}), '()\n', (13417, 13419), False, 'from tests.checks import checks\n'), ((13446, 13470), 'tests.checks.checks.VerifyRSSUMTest', 'checks.VerifyRSSUMTest', ([], {}), '()\n', (13468, 13470), False, 'from tests.checks import checks\n'), ((13497, 13522), 'tests.checks.checks.VerifyRSASUMTest', 'checks.VerifyRSASUMTest', ([], {}), '()\n', (13520, 13522), False, 'from tests.checks import checks\n'), ((13549, 13575), 'tests.checks.checks.VerifySBSBADDTest', 'checks.VerifySBSBADDTest', ([], {}), '()\n', (13573, 13575), False, 'from tests.checks import checks\n'), ((13602, 13627), 'tests.checks.checks.VerifySISADDTest', 'checks.VerifySISADDTest', ([], {}), '()\n', (13625, 13627), False, 'from tests.checks import checks\n'), ((13654, 13683), 'tests.checks.checks.VerifySISDEPOSITTest', 'checks.VerifySISDEPOSITTest', ([], {}), '()\n', (13681, 13683), False, 'from tests.checks import checks\n'), ((13710, 13734), 'tests.checks.checks.VerifyRCSUMTest', 'checks.VerifyRCSUMTest', ([], {}), '()\n', (13732, 13734), False, 'from tests.checks import checks\n'), ((13761, 13787), 'tests.checks.checks.VerifyRSCASUMTest', 'checks.VerifyRSCASUMTest', ([], {}), '()\n', (13785, 13787), False, 'from tests.checks import checks\n'), ((13814, 13840), 'tests.checks.checks.VerifyCBCBADDTest', 'checks.VerifyCBCBADDTest', ([], {}), '()\n', (13838, 13840), False, 'from tests.checks import checks\n'), ((13867, 13892), 'tests.checks.checks.VerifyCICADDTest', 'checks.VerifyCICADDTest', ([], {}), '()\n', (13890, 13892), False, 'from tests.checks import checks\n'), ((13919, 13948), 'tests.checks.checks.VerifyCICDEPOSITTest', 'checks.VerifyCICDEPOSITTest', ([], {}), '()\n', (13946, 13948), False, 'from tests.checks import checks\n'), ((14149, 14173), 'tests.checks.checks.VerifyRDSUMTest', 'checks.VerifyRDSUMTest', ([], {}), '()\n', (14171, 14173), False, 'from tests.checks import checks\n'), ((14200, 14225), 'tests.checks.checks.VerifyRDASUMTest', 'checks.VerifyRDASUMTest', ([], {}), '()\n', (14223, 14225), False, 'from tests.checks import checks\n'), ((14252, 14277), 'tests.checks.checks.VerifyRDNRM2Test', 'checks.VerifyRDNRM2Test', ([], {}), '()\n', (14275, 14277), False, 'from tests.checks import checks\n'), ((14304, 14329), 'tests.checks.checks.VerifyDIDSSQTest', 'checks.VerifyDIDSSQTest', ([], {}), '()\n', (14327, 14329), False, 'from tests.checks import checks\n'), ((14356, 14382), 'tests.checks.checks.VerifyDBDBADDTest', 'checks.VerifyDBDBADDTest', ([], {}), '()\n', (14380, 14382), False, 'from tests.checks import checks\n'), ((14409, 14434), 'tests.checks.checks.VerifyDIDADDTest', 'checks.VerifyDIDADDTest', ([], {}), '()\n', (14432, 14434), False, 'from tests.checks import checks\n'), ((14461, 14490), 'tests.checks.checks.VerifyDIDDEPOSITTest', 'checks.VerifyDIDDEPOSITTest', ([], {}), '()\n', (14488, 14490), False, 'from tests.checks import checks\n'), ((14517, 14541), 'tests.checks.checks.VerifyRZSUMTest', 'checks.VerifyRZSUMTest', ([], {}), '()\n', (14539, 14541), False, 'from tests.checks import checks\n'), ((14568, 14594), 'tests.checks.checks.VerifyRDZASUMTest', 'checks.VerifyRDZASUMTest', ([], {}), '()\n', (14592, 14594), False, 'from tests.checks import checks\n'), ((14621, 14647), 'tests.checks.checks.VerifyRDZNRM2Test', 'checks.VerifyRDZNRM2Test', ([], {}), '()\n', (14645, 14647), False, 'from tests.checks import checks\n'), ((14674, 14699), 'tests.checks.checks.VerifyDIZSSQTest', 'checks.VerifyDIZSSQTest', ([], {}), '()\n', (14697, 14699), False, 'from tests.checks import checks\n'), ((14726, 14752), 'tests.checks.checks.VerifyZBZBADDTest', 'checks.VerifyZBZBADDTest', ([], {}), '()\n', (14750, 14752), False, 'from tests.checks import checks\n'), ((14779, 14804), 'tests.checks.checks.VerifyZIZADDTest', 'checks.VerifyZIZADDTest', ([], {}), '()\n', (14802, 14804), False, 'from tests.checks import checks\n'), ((14831, 14860), 'tests.checks.checks.VerifyZIZDEPOSITTest', 'checks.VerifyZIZDEPOSITTest', ([], {}), '()\n', (14858, 14860), False, 'from tests.checks import checks\n'), ((14887, 14911), 'tests.checks.checks.VerifyRSSUMTest', 'checks.VerifyRSSUMTest', ([], {}), '()\n', (14909, 14911), False, 'from tests.checks import checks\n'), ((14938, 14963), 'tests.checks.checks.VerifyRSASUMTest', 'checks.VerifyRSASUMTest', ([], {}), '()\n', (14961, 14963), False, 'from tests.checks import checks\n'), ((14990, 15015), 'tests.checks.checks.VerifyRSNRM2Test', 'checks.VerifyRSNRM2Test', ([], {}), '()\n', (15013, 15015), False, 'from tests.checks import checks\n'), ((15042, 15067), 'tests.checks.checks.VerifySISSSQTest', 'checks.VerifySISSSQTest', ([], {}), '()\n', (15065, 15067), False, 'from tests.checks import checks\n'), ((15094, 15120), 'tests.checks.checks.VerifySBSBADDTest', 'checks.VerifySBSBADDTest', ([], {}), '()\n', (15118, 15120), False, 'from tests.checks import checks\n'), ((15147, 15172), 'tests.checks.checks.VerifySISADDTest', 'checks.VerifySISADDTest', ([], {}), '()\n', (15170, 15172), False, 'from tests.checks import checks\n'), ((15199, 15228), 'tests.checks.checks.VerifySISDEPOSITTest', 'checks.VerifySISDEPOSITTest', ([], {}), '()\n', (15226, 15228), False, 'from tests.checks import checks\n'), ((15255, 15279), 'tests.checks.checks.VerifyRCSUMTest', 'checks.VerifyRCSUMTest', ([], {}), '()\n', (15277, 15279), False, 'from tests.checks import checks\n'), ((15306, 15332), 'tests.checks.checks.VerifyRSCASUMTest', 'checks.VerifyRSCASUMTest', ([], {}), '()\n', (15330, 15332), False, 'from tests.checks import checks\n'), ((15359, 15385), 'tests.checks.checks.VerifyRSCNRM2Test', 'checks.VerifyRSCNRM2Test', ([], {}), '()\n', (15383, 15385), False, 'from tests.checks import checks\n'), ((15412, 15437), 'tests.checks.checks.VerifySICSSQTest', 'checks.VerifySICSSQTest', ([], {}), '()\n', (15435, 15437), False, 'from tests.checks import checks\n'), ((15464, 15490), 'tests.checks.checks.VerifyCBCBADDTest', 'checks.VerifyCBCBADDTest', ([], {}), '()\n', (15488, 15490), False, 'from tests.checks import checks\n'), ((15517, 15542), 'tests.checks.checks.VerifyCICADDTest', 'checks.VerifyCICADDTest', ([], {}), '()\n', (15540, 15542), False, 'from tests.checks import checks\n'), ((15569, 15598), 'tests.checks.checks.VerifyCICDEPOSITTest', 'checks.VerifyCICDEPOSITTest', ([], {}), '()\n', (15596, 15598), False, 'from tests.checks import checks\n'), ((15897, 15921), 'tests.checks.checks.VerifyRDDOTTest', 'checks.VerifyRDDOTTest', ([], {}), '()\n', (15919, 15921), False, 'from tests.checks import checks\n'), ((15948, 15973), 'tests.checks.checks.VerifyRZDOTUTest', 'checks.VerifyRZDOTUTest', ([], {}), '()\n', (15971, 15973), False, 'from tests.checks import checks\n'), ((16000, 16025), 'tests.checks.checks.VerifyRZDOTCTest', 'checks.VerifyRZDOTCTest', ([], {}), '()\n', (16023, 16025), False, 'from tests.checks import checks\n'), ((16052, 16076), 'tests.checks.checks.VerifyRSDOTTest', 'checks.VerifyRSDOTTest', ([], {}), '()\n', (16074, 16076), False, 'from tests.checks import checks\n'), ((16103, 16128), 'tests.checks.checks.VerifyRCDOTUTest', 'checks.VerifyRCDOTUTest', ([], {}), '()\n', (16126, 16128), False, 'from tests.checks import checks\n'), ((16155, 16180), 'tests.checks.checks.VerifyRCDOTCTest', 'checks.VerifyRCDOTCTest', ([], {}), '()\n', (16178, 16180), False, 'from tests.checks import checks\n'), ((27776, 27812), 'tests.checks.checks.ValidateInternalDBDBADDTest', 'checks.ValidateInternalDBDBADDTest', ([], {}), '()\n', (27810, 27812), False, 'from tests.checks import checks\n'), ((27839, 27874), 'tests.checks.checks.ValidateInternalDIDADDTest', 'checks.ValidateInternalDIDADDTest', ([], {}), '()\n', (27872, 27874), False, 'from tests.checks import checks\n'), ((27901, 27940), 'tests.checks.checks.ValidateInternalDIDDEPOSITTest', 'checks.ValidateInternalDIDDEPOSITTest', ([], {}), '()\n', (27938, 27940), False, 'from tests.checks import checks\n'), ((27967, 28001), 'tests.checks.checks.ValidateInternalRDSUMTest', 'checks.ValidateInternalRDSUMTest', ([], {}), '()\n', (27999, 28001), False, 'from tests.checks import checks\n'), ((28028, 28063), 'tests.checks.checks.ValidateInternalRDASUMTest', 'checks.ValidateInternalRDASUMTest', ([], {}), '()\n', (28061, 28063), False, 'from tests.checks import checks\n'), ((28090, 28124), 'tests.checks.checks.ValidateInternalRDDOTTest', 'checks.ValidateInternalRDDOTTest', ([], {}), '()\n', (28122, 28124), False, 'from tests.checks import checks\n'), ((28151, 28187), 'tests.checks.checks.ValidateInternalZBZBADDTest', 'checks.ValidateInternalZBZBADDTest', ([], {}), '()\n', (28185, 28187), False, 'from tests.checks import checks\n'), ((28214, 28249), 'tests.checks.checks.ValidateInternalZIZADDTest', 'checks.ValidateInternalZIZADDTest', ([], {}), '()\n', (28247, 28249), False, 'from tests.checks import checks\n'), ((28276, 28315), 'tests.checks.checks.ValidateInternalZIZDEPOSITTest', 'checks.ValidateInternalZIZDEPOSITTest', ([], {}), '()\n', (28313, 28315), False, 'from tests.checks import checks\n'), ((28342, 28376), 'tests.checks.checks.ValidateInternalRZSUMTest', 'checks.ValidateInternalRZSUMTest', ([], {}), '()\n', (28374, 28376), False, 'from tests.checks import checks\n'), ((28403, 28439), 'tests.checks.checks.ValidateInternalRDZASUMTest', 'checks.ValidateInternalRDZASUMTest', ([], {}), '()\n', (28437, 28439), False, 'from tests.checks import checks\n'), ((28466, 28501), 'tests.checks.checks.ValidateInternalRZDOTUTest', 'checks.ValidateInternalRZDOTUTest', ([], {}), '()\n', (28499, 28501), False, 'from tests.checks import checks\n'), ((28528, 28563), 'tests.checks.checks.ValidateInternalRZDOTCTest', 'checks.ValidateInternalRZDOTCTest', ([], {}), '()\n', (28561, 28563), False, 'from tests.checks import checks\n'), ((28841, 28877), 'tests.checks.checks.ValidateInternalSBSBADDTest', 'checks.ValidateInternalSBSBADDTest', ([], {}), '()\n', (28875, 28877), False, 'from tests.checks import checks\n'), ((28904, 28939), 'tests.checks.checks.ValidateInternalSISADDTest', 'checks.ValidateInternalSISADDTest', ([], {}), '()\n', (28937, 28939), False, 'from tests.checks import checks\n'), ((28966, 29005), 'tests.checks.checks.ValidateInternalSISDEPOSITTest', 'checks.ValidateInternalSISDEPOSITTest', ([], {}), '()\n', (29003, 29005), False, 'from tests.checks import checks\n'), ((29032, 29066), 'tests.checks.checks.ValidateInternalRSSUMTest', 'checks.ValidateInternalRSSUMTest', ([], {}), '()\n', (29064, 29066), False, 'from tests.checks import checks\n'), ((29093, 29128), 'tests.checks.checks.ValidateInternalRSASUMTest', 'checks.ValidateInternalRSASUMTest', ([], {}), '()\n', (29126, 29128), False, 'from tests.checks import checks\n'), ((29155, 29189), 'tests.checks.checks.ValidateInternalRSDOTTest', 'checks.ValidateInternalRSDOTTest', ([], {}), '()\n', (29187, 29189), False, 'from tests.checks import checks\n'), ((29216, 29252), 'tests.checks.checks.ValidateInternalCBCBADDTest', 'checks.ValidateInternalCBCBADDTest', ([], {}), '()\n', (29250, 29252), False, 'from tests.checks import checks\n'), ((29279, 29314), 'tests.checks.checks.ValidateInternalCICADDTest', 'checks.ValidateInternalCICADDTest', ([], {}), '()\n', (29312, 29314), False, 'from tests.checks import checks\n'), ((29341, 29380), 'tests.checks.checks.ValidateInternalCICDEPOSITTest', 'checks.ValidateInternalCICDEPOSITTest', ([], {}), '()\n', (29378, 29380), False, 'from tests.checks import checks\n'), ((29407, 29441), 'tests.checks.checks.ValidateInternalRCSUMTest', 'checks.ValidateInternalRCSUMTest', ([], {}), '()\n', (29439, 29441), False, 'from tests.checks import checks\n'), ((29468, 29504), 'tests.checks.checks.ValidateInternalRSCASUMTest', 'checks.ValidateInternalRSCASUMTest', ([], {}), '()\n', (29502, 29504), False, 'from tests.checks import checks\n'), ((29531, 29566), 'tests.checks.checks.ValidateInternalRCDOTUTest', 'checks.ValidateInternalRCDOTUTest', ([], {}), '()\n', (29564, 29566), False, 'from tests.checks import checks\n'), ((29593, 29628), 'tests.checks.checks.ValidateInternalRCDOTCTest', 'checks.ValidateInternalRCDOTCTest', ([], {}), '()\n', (29626, 29628), False, 'from tests.checks import checks\n'), ((29906, 29936), 'tests.checks.checks.CorroborateRDGEMVTest', 'checks.CorroborateRDGEMVTest', ([], {}), '()\n', (29934, 29936), False, 'from tests.checks import checks\n'), ((29963, 29993), 'tests.checks.checks.CorroborateRZGEMVTest', 'checks.CorroborateRZGEMVTest', ([], {}), '()\n', (29991, 29993), False, 'from tests.checks import checks\n'), ((30020, 30050), 'tests.checks.checks.CorroborateRSGEMVTest', 'checks.CorroborateRSGEMVTest', ([], {}), '()\n', (30048, 30050), False, 'from tests.checks import checks\n'), ((30077, 30107), 'tests.checks.checks.CorroborateRCGEMVTest', 'checks.CorroborateRCGEMVTest', ([], {}), '()\n', (30105, 30107), False, 'from tests.checks import checks\n'), ((30773, 30803), 'tests.checks.checks.CorroborateRDGEMMTest', 'checks.CorroborateRDGEMMTest', ([], {}), '()\n', (30801, 30803), False, 'from tests.checks import checks\n'), ((30829, 30859), 'tests.checks.checks.CorroborateRZGEMMTest', 'checks.CorroborateRZGEMMTest', ([], {}), '()\n', (30857, 30859), False, 'from tests.checks import checks\n'), ((30886, 30916), 'tests.checks.checks.CorroborateRSGEMMTest', 'checks.CorroborateRSGEMMTest', ([], {}), '()\n', (30914, 30916), False, 'from tests.checks import checks\n'), ((30943, 30973), 'tests.checks.checks.CorroborateRCGEMMTest', 'checks.CorroborateRCGEMMTest', ([], {}), '()\n', (30971, 30973), False, 'from tests.checks import checks\n'), ((16684, 16718), 'tests.checks.checks.ValidateInternalRDSUMTest', 'checks.ValidateInternalRDSUMTest', ([], {}), '()\n', (16716, 16718), False, 'from tests.checks import checks\n'), ((16747, 16783), 'tests.checks.checks.ValidateInternalDBDBADDTest', 'checks.ValidateInternalDBDBADDTest', ([], {}), '()\n', (16781, 16783), False, 'from tests.checks import checks\n'), ((16812, 16847), 'tests.checks.checks.ValidateInternalDIDADDTest', 'checks.ValidateInternalDIDADDTest', ([], {}), '()\n', (16845, 16847), False, 'from tests.checks import checks\n'), ((16876, 16915), 'tests.checks.checks.ValidateInternalDIDDEPOSITTest', 'checks.ValidateInternalDIDDEPOSITTest', ([], {}), '()\n', (16913, 16915), False, 'from tests.checks import checks\n'), ((16944, 16979), 'tests.checks.checks.ValidateInternalRDASUMTest', 'checks.ValidateInternalRDASUMTest', ([], {}), '()\n', (16977, 16979), False, 'from tests.checks import checks\n'), ((17008, 17043), 'tests.checks.checks.ValidateInternalRDNRM2Test', 'checks.ValidateInternalRDNRM2Test', ([], {}), '()\n', (17041, 17043), False, 'from tests.checks import checks\n'), ((17072, 17106), 'tests.checks.checks.ValidateInternalRDDOTTest', 'checks.ValidateInternalRDDOTTest', ([], {}), '()\n', (17104, 17106), False, 'from tests.checks import checks\n'), ((17398, 17432), 'tests.checks.checks.ValidateInternalRDSUMTest', 'checks.ValidateInternalRDSUMTest', ([], {}), '()\n', (17430, 17432), False, 'from tests.checks import checks\n'), ((17461, 17497), 'tests.checks.checks.ValidateInternalDBDBADDTest', 'checks.ValidateInternalDBDBADDTest', ([], {}), '()\n', (17495, 17497), False, 'from tests.checks import checks\n'), ((17526, 17561), 'tests.checks.checks.ValidateInternalDIDADDTest', 'checks.ValidateInternalDIDADDTest', ([], {}), '()\n', (17559, 17561), False, 'from tests.checks import checks\n'), ((17590, 17629), 'tests.checks.checks.ValidateInternalDIDDEPOSITTest', 'checks.ValidateInternalDIDDEPOSITTest', ([], {}), '()\n', (17627, 17629), False, 'from tests.checks import checks\n'), ((17658, 17693), 'tests.checks.checks.ValidateInternalRDASUMTest', 'checks.ValidateInternalRDASUMTest', ([], {}), '()\n', (17691, 17693), False, 'from tests.checks import checks\n'), ((17722, 17757), 'tests.checks.checks.ValidateInternalRDNRM2Test', 'checks.ValidateInternalRDNRM2Test', ([], {}), '()\n', (17755, 17757), False, 'from tests.checks import checks\n'), ((17786, 17820), 'tests.checks.checks.ValidateInternalRDDOTTest', 'checks.ValidateInternalRDDOTTest', ([], {}), '()\n', (17818, 17820), False, 'from tests.checks import checks\n'), ((18318, 18352), 'tests.checks.checks.ValidateInternalRDDOTTest', 'checks.ValidateInternalRDDOTTest', ([], {}), '()\n', (18350, 18352), False, 'from tests.checks import checks\n'), ((18850, 18884), 'tests.checks.checks.ValidateInternalRDDOTTest', 'checks.ValidateInternalRDDOTTest', ([], {}), '()\n', (18882, 18884), False, 'from tests.checks import checks\n'), ((19500, 19534), 'tests.checks.checks.ValidateInternalRZSUMTest', 'checks.ValidateInternalRZSUMTest', ([], {}), '()\n', (19532, 19534), False, 'from tests.checks import checks\n'), ((19563, 19599), 'tests.checks.checks.ValidateInternalZBZBADDTest', 'checks.ValidateInternalZBZBADDTest', ([], {}), '()\n', (19597, 19599), False, 'from tests.checks import checks\n'), ((19628, 19663), 'tests.checks.checks.ValidateInternalZIZADDTest', 'checks.ValidateInternalZIZADDTest', ([], {}), '()\n', (19661, 19663), False, 'from tests.checks import checks\n'), ((19692, 19731), 'tests.checks.checks.ValidateInternalZIZDEPOSITTest', 'checks.ValidateInternalZIZDEPOSITTest', ([], {}), '()\n', (19729, 19731), False, 'from tests.checks import checks\n'), ((19760, 19796), 'tests.checks.checks.ValidateInternalRDZASUMTest', 'checks.ValidateInternalRDZASUMTest', ([], {}), '()\n', (19794, 19796), False, 'from tests.checks import checks\n'), ((19825, 19861), 'tests.checks.checks.ValidateInternalRDZNRM2Test', 'checks.ValidateInternalRDZNRM2Test', ([], {}), '()\n', (19859, 19861), False, 'from tests.checks import checks\n'), ((19890, 19925), 'tests.checks.checks.ValidateInternalRZDOTUTest', 'checks.ValidateInternalRZDOTUTest', ([], {}), '()\n', (19923, 19925), False, 'from tests.checks import checks\n'), ((19954, 19989), 'tests.checks.checks.ValidateInternalRZDOTCTest', 'checks.ValidateInternalRZDOTCTest', ([], {}), '()\n', (19987, 19989), False, 'from tests.checks import checks\n'), ((20622, 20657), 'tests.checks.checks.ValidateInternalRZDOTUTest', 'checks.ValidateInternalRZDOTUTest', ([], {}), '()\n', (20655, 20657), False, 'from tests.checks import checks\n'), ((20686, 20721), 'tests.checks.checks.ValidateInternalRZDOTCTest', 'checks.ValidateInternalRZDOTCTest', ([], {}), '()\n', (20719, 20721), False, 'from tests.checks import checks\n'), ((21354, 21389), 'tests.checks.checks.ValidateInternalRZDOTUTest', 'checks.ValidateInternalRZDOTUTest', ([], {}), '()\n', (21387, 21389), False, 'from tests.checks import checks\n'), ((21418, 21453), 'tests.checks.checks.ValidateInternalRZDOTCTest', 'checks.ValidateInternalRZDOTCTest', ([], {}), '()\n', (21451, 21453), False, 'from tests.checks import checks\n'), ((22247, 22281), 'tests.checks.checks.ValidateInternalRSSUMTest', 'checks.ValidateInternalRSSUMTest', ([], {}), '()\n', (22279, 22281), False, 'from tests.checks import checks\n'), ((22310, 22346), 'tests.checks.checks.ValidateInternalSBSBADDTest', 'checks.ValidateInternalSBSBADDTest', ([], {}), '()\n', (22344, 22346), False, 'from tests.checks import checks\n'), ((22375, 22410), 'tests.checks.checks.ValidateInternalSISADDTest', 'checks.ValidateInternalSISADDTest', ([], {}), '()\n', (22408, 22410), False, 'from tests.checks import checks\n'), ((22439, 22478), 'tests.checks.checks.ValidateInternalSISDEPOSITTest', 'checks.ValidateInternalSISDEPOSITTest', ([], {}), '()\n', (22476, 22478), False, 'from tests.checks import checks\n'), ((22507, 22542), 'tests.checks.checks.ValidateInternalRSASUMTest', 'checks.ValidateInternalRSASUMTest', ([], {}), '()\n', (22540, 22542), False, 'from tests.checks import checks\n'), ((22571, 22606), 'tests.checks.checks.ValidateInternalRSNRM2Test', 'checks.ValidateInternalRSNRM2Test', ([], {}), '()\n', (22604, 22606), False, 'from tests.checks import checks\n'), ((22635, 22669), 'tests.checks.checks.ValidateInternalRSDOTTest', 'checks.ValidateInternalRSDOTTest', ([], {}), '()\n', (22667, 22669), False, 'from tests.checks import checks\n'), ((22964, 22998), 'tests.checks.checks.ValidateInternalRSSUMTest', 'checks.ValidateInternalRSSUMTest', ([], {}), '()\n', (22996, 22998), False, 'from tests.checks import checks\n'), ((23027, 23063), 'tests.checks.checks.ValidateInternalSBSBADDTest', 'checks.ValidateInternalSBSBADDTest', ([], {}), '()\n', (23061, 23063), False, 'from tests.checks import checks\n'), ((23092, 23127), 'tests.checks.checks.ValidateInternalSISADDTest', 'checks.ValidateInternalSISADDTest', ([], {}), '()\n', (23125, 23127), False, 'from tests.checks import checks\n'), ((23156, 23195), 'tests.checks.checks.ValidateInternalSISDEPOSITTest', 'checks.ValidateInternalSISDEPOSITTest', ([], {}), '()\n', (23193, 23195), False, 'from tests.checks import checks\n'), ((23224, 23259), 'tests.checks.checks.ValidateInternalRSASUMTest', 'checks.ValidateInternalRSASUMTest', ([], {}), '()\n', (23257, 23259), False, 'from tests.checks import checks\n'), ((23288, 23323), 'tests.checks.checks.ValidateInternalRSNRM2Test', 'checks.ValidateInternalRSNRM2Test', ([], {}), '()\n', (23321, 23323), False, 'from tests.checks import checks\n'), ((23352, 23386), 'tests.checks.checks.ValidateInternalRSDOTTest', 'checks.ValidateInternalRSDOTTest', ([], {}), '()\n', (23384, 23386), False, 'from tests.checks import checks\n'), ((23884, 23918), 'tests.checks.checks.ValidateInternalRSDOTTest', 'checks.ValidateInternalRSDOTTest', ([], {}), '()\n', (23916, 23918), False, 'from tests.checks import checks\n'), ((24416, 24450), 'tests.checks.checks.ValidateInternalRSDOTTest', 'checks.ValidateInternalRSDOTTest', ([], {}), '()\n', (24448, 24450), False, 'from tests.checks import checks\n'), ((25066, 25100), 'tests.checks.checks.ValidateInternalRCSUMTest', 'checks.ValidateInternalRCSUMTest', ([], {}), '()\n', (25098, 25100), False, 'from tests.checks import checks\n'), ((25129, 25165), 'tests.checks.checks.ValidateInternalCBCBADDTest', 'checks.ValidateInternalCBCBADDTest', ([], {}), '()\n', (25163, 25165), False, 'from tests.checks import checks\n'), ((25194, 25229), 'tests.checks.checks.ValidateInternalCICADDTest', 'checks.ValidateInternalCICADDTest', ([], {}), '()\n', (25227, 25229), False, 'from tests.checks import checks\n'), ((25258, 25297), 'tests.checks.checks.ValidateInternalCICDEPOSITTest', 'checks.ValidateInternalCICDEPOSITTest', ([], {}), '()\n', (25295, 25297), False, 'from tests.checks import checks\n'), ((25326, 25362), 'tests.checks.checks.ValidateInternalRSCASUMTest', 'checks.ValidateInternalRSCASUMTest', ([], {}), '()\n', (25360, 25362), False, 'from tests.checks import checks\n'), ((25391, 25427), 'tests.checks.checks.ValidateInternalRSCNRM2Test', 'checks.ValidateInternalRSCNRM2Test', ([], {}), '()\n', (25425, 25427), False, 'from tests.checks import checks\n'), ((25456, 25491), 'tests.checks.checks.ValidateInternalRCDOTUTest', 'checks.ValidateInternalRCDOTUTest', ([], {}), '()\n', (25489, 25491), False, 'from tests.checks import checks\n'), ((25520, 25555), 'tests.checks.checks.ValidateInternalRCDOTCTest', 'checks.ValidateInternalRCDOTCTest', ([], {}), '()\n', (25553, 25555), False, 'from tests.checks import checks\n'), ((26188, 26223), 'tests.checks.checks.ValidateInternalRCDOTUTest', 'checks.ValidateInternalRCDOTUTest', ([], {}), '()\n', (26221, 26223), False, 'from tests.checks import checks\n'), ((26252, 26287), 'tests.checks.checks.ValidateInternalRCDOTCTest', 'checks.ValidateInternalRCDOTCTest', ([], {}), '()\n', (26285, 26287), False, 'from tests.checks import checks\n'), ((26920, 26955), 'tests.checks.checks.ValidateInternalRCDOTUTest', 'checks.ValidateInternalRCDOTUTest', ([], {}), '()\n', (26953, 26955), False, 'from tests.checks import checks\n'), ((26984, 27019), 'tests.checks.checks.ValidateInternalRCDOTCTest', 'checks.ValidateInternalRCDOTCTest', ([], {}), '()\n', (27017, 27019), False, 'from tests.checks import checks\n')]
|
#!/usr/bin/env python
from __future__ import print_function
import time
import traceback
import os
import base64
import random
import sys
import json
import subprocess
from os import walk
from os.path import splitext, join
import io
if sys.version_info >= (3,):
def unicode(s):
return s
from sandbox import get_sandbox
class HeadTail(object):
'Capture first part of file write and discard remainder'
def __init__(self, file, max_capture=510):
self.file = file
self.max_capture = max_capture
self.capture_head_len = 0
self.capture_head = unicode('')
self.capture_tail = unicode('')
def write(self, data):
if self.file:
self.file.write(data)
capture_head_left = self.max_capture - self.capture_head_len
if capture_head_left > 0:
data_len = len(data)
if data_len <= capture_head_left:
self.capture_head += data
self.capture_head_len += data_len
else:
self.capture_head += data[:capture_head_left]
self.capture_head_len = self.max_capture
self.capture_tail += data[capture_head_left:]
self.capture_tail = self.capture_tail[-self.max_capture:]
else:
self.capture_tail += data
self.capture_tail = self.capture_tail[-self.max_capture:]
def flush(self):
if self.file:
self.file.flush()
def close(self):
if self.file:
self.file.close()
def head(self):
return self.capture_head
def tail(self):
return self.capture_tail
def headtail(self):
if self.capture_head != '' and self.capture_tail != '':
sep = unicode('\n..\n')
else:
sep = unicode('')
return self.capture_head + sep + self.capture_tail
def run_game(game, botcmds, options):
# file descriptors for replay and streaming formats
replay_log = options.get('replay_log', None)
stream_log = options.get('stream_log', None)
verbose_log = options.get('verbose_log', None)
debug_log = options.get('debug_log', None)
debug_in_replay = options.get('debug_in_replay', None)
debug_max_length = options.get('debug_max_length', None)
debug_max_count = options.get('debug_max_count', None)
# file descriptors for bots, should be list matching # of bots
input_logs = options.get('input_logs', [None]*len(botcmds))
output_logs = options.get('output_logs', [None]*len(botcmds))
error_logs = options.get('error_logs', [None]*len(botcmds))
capture_errors = options.get('capture_errors', False)
capture_errors_max = options.get('capture_errors_max', 510)
turns = int(options['turns'])
loadtime = float(options['loadtime']) / 1000
turntime = float(options['turntime']) / 1000
strict = options.get('strict', False)
end_wait = options.get('end_wait', 0.0)
location = options.get('location', 'localhost')
game_id = options.get('game_id', 0)
error = ''
bots = []
bot_status = []
bot_turns = []
debug_msgs = [[] for _ in range(len(botcmds))]
debug_msgs_length = [0 for _ in range(len(botcmds))]
debug_msgs_count = [0 for _ in range(len(botcmds))]
debug_msgs_exceeded = [False for _ in range(len(botcmds))]
#helper function to add messages for replay data
def add_debug_messages(bot_index, turn, level, messages):
if (not debug_in_replay) or len(messages) == 0:
return
# In order to calculate this only if we not already exceeded
if not debug_msgs_exceeded[bot_index]:
messages_size = sum(map(lambda m: len(m), messages))
debug_msgs_length[bot_index] += messages_size
debug_msgs_count[bot_index] += len(messages)
if (debug_msgs_count[bot_index] > debug_max_count) or (
debug_msgs_length[bot_index] > debug_max_length):
# update the calculated exceeded
debug_msgs_exceeded[bot_index] = True
if (debug_msgs_exceeded[bot_index]):
debug_msgs[bot_index].append([turn, 2, ["Exceeded debug messages limit."]])
if error_logs and error_logs[bot_index]:
error_logs[bot_index].write("Exceeded debug messages limit.\n")
else:
debug_msgs[bot_index].append([turn, level, messages])
if capture_errors:
error_logs = [HeadTail(log, capture_errors_max) for log in error_logs]
try:
# TODO: where did this come from?? do we need it??
for b, bot in enumerate(botcmds):
# this struct is given to us from the playgame.py file
bot_cwd, bot_path, bot_name = bot
# generate the appropriate command from file extension
bot_cmd = generate_cmd(bot_path)
# generate the sandbox from the bot working directory
sandbox = get_sandbox(bot_cwd, protected_files=[bot_path], secure=options.get('secure_jail', None))
if bot_cmd:
sandbox.start(bot_cmd)
bots.append(sandbox)
bot_status.append('alive')
bot_turns.append(0)
# ensure it started
if not sandbox.is_alive:
bot_status[-1] = 'crashed 0'
bot_turns[-1] = 0
if verbose_log:
verbose_log.write('bot %s did not start\n' % bot_name)
game.kill_player(b)
sandbox.pause()
if not bot_cmd:
# couldnt generate bot command - couldnt recognize the language of the code
add_debug_messages(b, 0, 2, ["Couldnt recognize code language. Are you sure code files are correct?"])
if stream_log:
# stream the start info - including non-player info
stream_log.write(game.get_player_start())
stream_log.flush()
if verbose_log:
verbose_log.write('running for %s turns\n' % turns)
for turn in range(turns+1):
if turn == 0:
game.start_game()
# send game state to each player
for b, bot in enumerate(bots):
if game.is_alive(b):
if turn == 0:
start = game.get_player_start(b) + 'ready\n'
bot.write(start)
if input_logs and input_logs[b]:
input_logs[b].write(start)
input_logs[b].flush()
else:
state = 'turn ' + str(turn) + '\n' + game.get_player_state(b) + 'go\n'
bot.write(state)
if input_logs and input_logs[b]:
input_logs[b].write(state)
input_logs[b].flush()
bot_turns[b] = turn
if turn > 0:
if stream_log:
stream_log.write('turn %s\n' % turn)
stream_log.write('score %s\n' % ' '.join([str(s) for s in game.get_scores()]))
stream_log.write(game.get_state())
stream_log.flush()
game.start_turn()
# get moves from each player
if turn == 0:
time_limit = loadtime
elif turn == 1:
time_limit = max([turntime * 10, 1.500])
else:
time_limit = turntime
if options.get('serial', False):
simul_num = int(options['serial']) # int(True) is 1
else:
simul_num = len(bots)
bot_moves = [[] for b in bots]
error_lines = [[] for b in bots]
statuses = [None for b in bots]
bot_list = [(b, bot) for b, bot in enumerate(bots)
if game.is_alive(b)]
#random.shuffle(bot_list)
for group_num in range(0, len(bot_list), simul_num):
pnums, pbots = zip(*bot_list[group_num:group_num + simul_num])
# get the moves from each bot
moves, errors, status = get_moves(game, pbots, pnums,
time_limit, turn)
for p, b in enumerate(pnums):
bot_moves[b] = moves[p]
error_lines[b] = errors[p]
statuses[b] = status[p]
# print debug messages from bots
if debug_log:
for b, moves in enumerate(bot_moves):
bot_name = botcmds[b][2]
messages = []
for move in moves:
if not move.startswith('m'):
# break since messages come only before orders
break
messages.append(base64.b64decode(move.split(' ')[1]))
if messages:
debug_log.write('turn %4d bot %s Debug prints:\n' % (turn, bot_name))
debug_log.write('Debug>> ' + '\nDebug>> '.join(messages)+'\n')
add_debug_messages(b, turn, 0, messages)
# handle any logs that get_moves produced
for b, errors in enumerate(error_lines):
if errors:
if error_logs and error_logs[b]:
error_logs[b].write(unicode('\n').join(errors)+unicode('\n'))
add_debug_messages(b, turn, 2, [unicode('\n').join(errors)+unicode('\n')])
# set status for timeouts and crashes
for b, status in enumerate(statuses):
if status != None:
bot_status[b] = status
bot_turns[b] = turn
# process all moves
bot_alive = [game.is_alive(b) for b in range(len(bots))]
if turn > 0 and not game.game_over():
for b, moves in enumerate(bot_moves):
valid, ignored, invalid = game.do_moves(b, moves)
bot_name = botcmds[b][2]
if output_logs and output_logs[b]:
output_logs[b].write('# turn %s\n' % turn)
if valid:
if output_logs and output_logs[b]:
output_logs[b].write('\n'.join(valid)+'\n')
output_logs[b].flush()
if ignored:
if error_logs and error_logs[b]:
error_logs[b].write('turn %4d bot %s ignored actions:\n' % (turn, bot_name))
error_logs[b].write('\n'.join(ignored)+'\n')
error_logs[b].flush()
if output_logs and output_logs[b]:
output_logs[b].write('\n'.join(ignored)+'\n')
output_logs[b].flush()
add_debug_messages(b, turn, 1, ignored)
if invalid:
if strict:
game.kill_player(b)
bot_status[b] = 'invalid'
bot_turns[b] = turn
if error_logs and error_logs[b]:
error_logs[b].write('turn %4d bot [%s] invalid actions:\n' % (turn, bot_name))
error_logs[b].write('\n'.join(invalid)+'\n')
error_logs[b].flush()
if output_logs and output_logs[b]:
output_logs[b].write('\n'.join(invalid)+'\n')
output_logs[b].flush()
add_debug_messages(b, turn, 1, invalid)
if turn > 0:
game.finish_turn()
# send ending info to eliminated bots
bots_eliminated = []
for b, alive in enumerate(bot_alive):
if alive and not game.is_alive(b):
bots_eliminated.append(b)
for b in bots_eliminated:
if verbose_log:
verbose_log.write('turn %4d bot %s defeated\n' % (turn, bot_name))
if bot_status[b] == 'alive': # could be invalid move
bot_status[b] = 'defeated'
bot_turns[b] = turn
score_line ='score %s\n' % ' '.join([str(s) for s in game.get_scores(b)])
status_line = 'status %s\n' % ' '.join(map(str, game.order_for_player(b, bot_status)))
status_line += 'playerturns %s\n' % ' '.join(map(str, game.order_for_player(b, bot_turns)))
end_line = 'end\nplayers %s\n' % len(bots) + score_line + status_line
state = end_line + game.get_player_state(b) + 'go\n'
bots[b].write(state)
if input_logs and input_logs[b]:
input_logs[b].write(state)
input_logs[b].flush()
if end_wait:
bots[b].resume()
if bots_eliminated and end_wait:
if verbose_log:
verbose_log.write('waiting {0} seconds for bots to process end turn\n'.format(end_wait))
time.sleep(end_wait)
for b in bots_eliminated:
bots[b].kill()
# with verbose log we want to display the following <pirateCount> <islandCount> <Ranking/leading> <scores>
if verbose_log:
stats = game.get_stats()
stat_keys = sorted(stats.keys())
s = 'turn %4d stats: ' % turn
if turn % 50 == 0:
verbose_log.write(' '*len(s))
for key in stat_keys:
values = stats[key]
verbose_log.write(' {0:^{1}}'.format(key, max(len(key), len(str(values)))))
verbose_log.write('\n')
verbose_log.write(s)
for key in stat_keys:
values = stats[key]
if type(values) == list:
values = '[' + ','.join(map(str,values)) + ']'
verbose_log.write(' {0:^{1}}'.format(values, max(len(key), len(str(values)))))
verbose_log.write('\n')
else:
# no verbose log - print progress every 20 turns
if turn % 20 == 0:
turn_prompt = "turn #%d of max %d\n" % (turn,turns)
sys.stdout.write(turn_prompt)
#alive = [game.is_alive(b) for b in range(len(bots))]
#if sum(alive) <= 1:
if game.game_over():
break
# send bots final state and score, output to replay file
game.finish_game()
score_line ='score %s\n' % ' '.join(map(str, game.get_scores()))
status_line = ''
if game.get_winner() and len(game.get_winner()) == 1:
winner = game.get_winner()[0]
winner_line = 'player %s [%s] is the Winner!\n' % (winner + 1, botcmds[winner][2])
else:
winner_line = 'Game finished at a tie - there is no winner'
status_line += winner_line
end_line = 'end\nplayers %s\n' % len(bots) + score_line + status_line
if stream_log:
stream_log.write(end_line)
stream_log.write(game.get_state())
stream_log.flush()
if verbose_log:
verbose_log.write(score_line)
verbose_log.write(status_line)
verbose_log.flush()
else:
sys.stdout.write(score_line)
sys.stdout.write(status_line)
for b, bot in enumerate(bots):
if game.is_alive(b):
score_line ='score %s\n' % ' '.join([str(s) for s in game.get_scores(b)])
status_line = 'status %s\n' % ' '.join(map(str, game.order_for_player(b, bot_status)))
status_line += 'playerturns %s\n' % ' '.join(map(str, game.order_for_player(b, bot_turns)))
end_line = 'end\nplayers %s\n' % len(bots) + score_line + status_line
state = end_line + game.get_player_state(b) + 'go\n'
bot.write(state)
if input_logs and input_logs[b]:
input_logs[b].write(state)
input_logs[b].flush()
except Exception as e:
# TODO: sanitize error output, tracebacks shouldn't be sent to workers
error = traceback.format_exc()
sys.stderr.write('Error Occurred\n')
sys.stderr.write(str(e) + '\n')
if verbose_log:
verbose_log.write(error)
# error = str(e)
finally:
if end_wait:
for bot in bots:
bot.resume()
if verbose_log and end_wait > 1:
verbose_log.write('waiting {0} seconds for bots to process end turn\n'.format(end_wait))
time.sleep(end_wait)
for bot in bots:
if bot.is_alive:
bot.kill()
bot.release()
if error:
game_result = { 'error': error }
else:
scores = game.get_scores()
game_result = {
'challenge': game.__class__.__name__.lower(),
'location': location,
'game_id': game_id,
'status': bot_status,
'playerturns': bot_turns,
'score': scores,
'winner_names': [botcmds[win][2] for win in game.get_winner()],
'rank': [sorted(scores, reverse=True).index(x) for x in scores],
'replayformat': 'json',
'replaydata': game.get_replay(),
'game_length': turn,
'debug_messages': debug_msgs,
}
if capture_errors:
game_result['errors'] = [head.headtail() for head in error_logs]
if replay_log:
json.dump(game_result, replay_log, sort_keys=True)
return game_result
def get_moves(game, bots, bot_nums, time_limit, turn):
bot_finished = [not game.is_alive(bot_nums[b]) for b in range(len(bots))]
bot_moves = [[] for b in bots]
error_lines = [[] for b in bots]
statuses = [None for b in bots]
# resume all bots
for bot in bots:
if bot.is_alive:
bot.resume()
# don't start timing until the bots are started
start_time = time.time()
# loop until received all bots send moves or are dead
# or when time is up
while (sum(bot_finished) < len(bot_finished) and
time.time() - start_time < time_limit):
time.sleep(0.003)
for b, bot in enumerate(bots):
if bot_finished[b]:
continue # already got bot moves
if not bot.is_alive:
error_lines[b].append(unicode('turn %4d bot %s crashed') % (turn, bot_nums[b]))
statuses[b] = 'crashed'
line = bot.read_error()
while line != None:
error_lines[b].append(line)
line = bot.read_error()
bot_finished[b] = True
game.kill_player(bot_nums[b])
continue # bot is dead
# read a maximum of 100 lines per iteration
for x in range(100):
line = bot.read_line()
if line is None:
# stil waiting for more data
break
line = line.strip()
if line.lower() == 'go':
bot_finished[b] = True
# bot finished sending data for this turn
break
bot_moves[b].append(line)
for x in range(100):
line = bot.read_error()
if line is None:
break
error_lines[b].append(line)
# pause all bots again
for bot in bots:
if bot.is_alive:
bot.pause()
# check for any final output from bots
for b, bot in enumerate(bots):
if bot_finished[b]:
continue # already got bot moves
if not bot.is_alive:
error_lines[b].append(unicode('turn %4d bot %s crashed') % (turn, bot_nums[b]))
statuses[b] = 'crashed'
line = bot.read_error()
while line != None:
error_lines[b].append(line)
line = bot.read_error()
bot_finished[b] = True
game.kill_player(bot_nums[b])
continue # bot is dead
line = bot.read_line()
while line is not None and len(bot_moves[b]) < 40000:
line = line.strip()
if line.lower() == 'go':
bot_finished[b] = True
# bot finished sending data for this turn
break
bot_moves[b].append(line)
line = bot.read_line()
line = bot.read_error()
while line is not None and len(error_lines[b]) < 1000:
error_lines[b].append(line)
line = bot.read_error()
# kill timed out bots
for b, finished in enumerate(bot_finished):
if not finished:
error_lines[b].append(unicode('turn %4d bot %s timed out') % (turn, bot_nums[b]))
statuses[b] = 'timeout'
bot = bots[b]
for x in range(100):
line = bot.read_error()
if line is None:
break
error_lines[b].append(line)
game.kill_player(bot_nums[b])
bots[b].kill()
return bot_moves, error_lines, statuses
def get_java_path():
if (os.name != "nt"):
return 'java'
# TODO: search path as well!
# TODO: actually run os.system('java -version') to see version
javas = []
if os.path.exists("C:\\Program Files\\java"):
javas += [os.path.join("C:\\Program Files\\java",i) for i in os.listdir("C:\\Program Files\\java")]
if os.path.exists("C:\\Program Files (x86)\\java"):
javas += [os.path.join("C:\\Program Files (x86)\\java",i) for i in os.listdir("C:\\Program Files (x86)\\java")]
javas.reverse() # this will make us pick the higher version
for java in javas:
if 'jdk' in java.lower() and any([ver in java for ver in ['1.6','1.7','1.8']]):
return os.path.join(java,"bin","java.exe")
print("Cannot find path of Java JDK version 1.6 or over!")
# we should really quit but since we dont yet search path - first try default
return 'java'
def get_dot_net_version():
pass
def select_files(root, files, suffix):
"""
simple logic here to filter out interesting files
"""
selected_files = []
for file in files:
#do concatenation here to get full path
full_path = join(root, file)
ext = splitext(file)[1]
if ext == suffix:
selected_files.append(full_path)
return selected_files
def build_recursive_dir_tree(path, suffix):
"""
path - where to begin folder scan
"""
selected_files = []
for root, dirs, files in walk(path):
selected_files += select_files(root, files, suffix)
return selected_files
def recognize_language(bot_path):
'''Decide between java, python or csh'''
'''First do single file case'''
if not os.path.isdir(bot_path):
if bot_path.endswith('.py') or bot_path.endswith('.pyc'):
return 'python'
elif bot_path.endswith('.cs'):
return 'csh'
elif bot_path.endswith('.java'):
return 'java'
else:
return
''' Now handle directory case '''
java_files = build_recursive_dir_tree(bot_path, '.java')
csh_files = build_recursive_dir_tree(bot_path, '.cs')
python_files = build_recursive_dir_tree(bot_path, '.py')
max_files = max(len(java_files), len(csh_files), len(python_files))
if max_files == 0:
return
if len(java_files) == max_files:
return 'java'
elif len(csh_files) == max_files:
return 'csh'
elif len(python_files) == max_files:
return 'python'
return
def generate_cmd(bot_path):
''' Generates the command to run and returns other information from the filename given '''
csh_runner_path = os.path.join(os.path.dirname(__file__), "cshRunner.exe")
java_runner_path = os.path.join(os.path.dirname(__file__), "javaRunner.jar")
python_runner_path = os.path.join(os.path.dirname( __file__), "pythonRunner.py")
command = ''
lang = recognize_language(bot_path)
if lang == 'python':
command = 'python "%s" "%s"' % (python_runner_path, bot_path)
elif lang == 'csh':
# Run with Mono if Unix. But in the future just receive source code (.cs) and compile on the fly
if (os.name == "nt"):
command = '"%s" "%s"' % (csh_runner_path, bot_path)
else:
command = 'mono --debug %s %s' % (csh_runner_path, bot_path)
elif lang == 'java':
command = '"%s" -jar "%s" "%s"' % (get_java_path(), java_runner_path, bot_path)
else:
if os.path.isdir(bot_path):
sys.stdout.write('Couldnt find code in folder! %s\n' % (bot_path))
else:
sys.stdout.write('Unknown file format! %s\nPlease give file that ends with .cs , .java or .py\n' % (bot_path))
#sys.exit(-1)
#print(command)
return command
|
[
"sys.stdout.write",
"json.dump",
"os.path.isdir",
"os.path.dirname",
"os.walk",
"os.path.exists",
"time.sleep",
"time.time",
"traceback.format_exc",
"os.path.splitext",
"sys.stderr.write",
"os.path.join",
"os.listdir"
] |
[((18473, 18484), 'time.time', 'time.time', ([], {}), '()\n', (18482, 18484), False, 'import time\n'), ((21899, 21940), 'os.path.exists', 'os.path.exists', (['"""C:\\\\Program Files\\\\java"""'], {}), "('C:\\\\Program Files\\\\java')\n", (21913, 21940), False, 'import os\n'), ((22057, 22104), 'os.path.exists', 'os.path.exists', (['"""C:\\\\Program Files (x86)\\\\java"""'], {}), "('C:\\\\Program Files (x86)\\\\java')\n", (22071, 22104), False, 'import os\n'), ((23190, 23200), 'os.walk', 'walk', (['path'], {}), '(path)\n', (23194, 23200), False, 'from os import walk\n'), ((17992, 18042), 'json.dump', 'json.dump', (['game_result', 'replay_log'], {'sort_keys': '(True)'}), '(game_result, replay_log, sort_keys=True)\n', (18001, 18042), False, 'import json\n'), ((18684, 18701), 'time.sleep', 'time.sleep', (['(0.003)'], {}), '(0.003)\n', (18694, 18701), False, 'import time\n'), ((22883, 22899), 'os.path.join', 'join', (['root', 'file'], {}), '(root, file)\n', (22887, 22899), False, 'from os.path import splitext, join\n'), ((23418, 23441), 'os.path.isdir', 'os.path.isdir', (['bot_path'], {}), '(bot_path)\n', (23431, 23441), False, 'import os\n'), ((24387, 24412), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (24402, 24412), False, 'import os\n'), ((24467, 24492), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (24482, 24492), False, 'import os\n'), ((24550, 24575), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (24565, 24575), False, 'import os\n'), ((15721, 15749), 'sys.stdout.write', 'sys.stdout.write', (['score_line'], {}), '(score_line)\n', (15737, 15749), False, 'import sys\n'), ((15762, 15791), 'sys.stdout.write', 'sys.stdout.write', (['status_line'], {}), '(status_line)\n', (15778, 15791), False, 'import sys\n'), ((16614, 16636), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (16634, 16636), False, 'import traceback\n'), ((16645, 16681), 'sys.stderr.write', 'sys.stderr.write', (['"""Error Occurred\n"""'], {}), "('Error Occurred\\n')\n", (16661, 16681), False, 'import sys\n'), ((17062, 17082), 'time.sleep', 'time.sleep', (['end_wait'], {}), '(end_wait)\n', (17072, 17082), False, 'import time\n'), ((21960, 22002), 'os.path.join', 'os.path.join', (['"""C:\\\\Program Files\\\\java"""', 'i'], {}), "('C:\\\\Program Files\\\\java', i)\n", (21972, 22002), False, 'import os\n'), ((22124, 22172), 'os.path.join', 'os.path.join', (['"""C:\\\\Program Files (x86)\\\\java"""', 'i'], {}), "('C:\\\\Program Files (x86)\\\\java', i)\n", (22136, 22172), False, 'import os\n'), ((22420, 22457), 'os.path.join', 'os.path.join', (['java', '"""bin"""', '"""java.exe"""'], {}), "(java, 'bin', 'java.exe')\n", (22432, 22457), False, 'import os\n'), ((22914, 22928), 'os.path.splitext', 'splitext', (['file'], {}), '(file)\n', (22922, 22928), False, 'from os.path import splitext, join\n'), ((13371, 13391), 'time.sleep', 'time.sleep', (['end_wait'], {}), '(end_wait)\n', (13381, 13391), False, 'import time\n'), ((18636, 18647), 'time.time', 'time.time', ([], {}), '()\n', (18645, 18647), False, 'import time\n'), ((22011, 22048), 'os.listdir', 'os.listdir', (['"""C:\\\\Program Files\\\\java"""'], {}), "('C:\\\\Program Files\\\\java')\n", (22021, 22048), False, 'import os\n'), ((22181, 22224), 'os.listdir', 'os.listdir', (['"""C:\\\\Program Files (x86)\\\\java"""'], {}), "('C:\\\\Program Files (x86)\\\\java')\n", (22191, 22224), False, 'import os\n'), ((25230, 25253), 'os.path.isdir', 'os.path.isdir', (['bot_path'], {}), '(bot_path)\n', (25243, 25253), False, 'import os\n'), ((14640, 14669), 'sys.stdout.write', 'sys.stdout.write', (['turn_prompt'], {}), '(turn_prompt)\n', (14656, 14669), False, 'import sys\n'), ((25267, 25331), 'sys.stdout.write', 'sys.stdout.write', (["('Couldnt find code in folder! %s\\n' % bot_path)"], {}), "('Couldnt find code in folder! %s\\n' % bot_path)\n", (25283, 25331), False, 'import sys\n'), ((25360, 25480), 'sys.stdout.write', 'sys.stdout.write', (['("""Unknown file format! %s\nPlease give file that ends with .cs , .java or .py\n"""\n % bot_path)'], {}), '(\n """Unknown file format! %s\nPlease give file that ends with .cs , .java or .py\n"""\n % bot_path)\n', (25376, 25480), False, 'import sys\n')]
|
#!/usr/bin/env python
"""Check Identity class"""
from matplotlib import pyplot as plt
import numpy as N
from load import ROOT as R
from matplotlib.ticker import MaxNLocator
from gna import constructors as C
from gna.bindings import DataType
from gna.unittest import *
from gna import context
#
# Create the matrix
#
def test_io(opts):
print('Test inputs/outputs (Identity)')
mat = N.arange(12, dtype='d').reshape(3, 4)
print( 'Input matrix (numpy)' )
print( mat )
print()
#
# Create transformations
#
points = C.Points(mat)
identity = R.Identity()
identity.identity.switchFunction('identity_gpuargs_h')
points.points.points >> identity.identity.source
identity.print()
res = identity.identity.target.data()
dt = identity.identity.target.datatype()
assert N.allclose(mat, res), "C++ and Python results doesn't match"
#
# Dump
#
print( 'Eigen dump (C++)' )
identity.dump()
print()
print( 'Result (C++ Data to numpy)' )
print( res )
print()
print( 'Datatype:', str(dt) )
def gpuargs_make(nsname, mat1, mat2):
from gna.env import env
ns = env.globalns(nsname)
ns.reqparameter('par1', central=1.0, fixed=True, label='Dummy parameter 1')
ns.reqparameter('par2', central=1.5, fixed=True, label='Dummy parameter 2')
ns.reqparameter('par3', central=1.01e5, fixed=True, label='Dummy parameter 3')
ns.printparameters(labels=True)
points1, points2 = C.Points(mat1), C.Points(mat2)
with ns:
dummy = C.Dummy(4, "dummy", ['par1', 'par2', 'par3'])
return dummy, points1, points2, ns
@floatcopy(globals(), addname=True)
def test_vars_01_local(opts, function_name):
print('Test inputs/outputs/variables (Dummy)')
mat1 = N.arange(12, dtype='d').reshape(3, 4)
mat2 = N.arange(15, dtype='d').reshape(5, 3)
dummy, points1, points2, ns = gpuargs_make(function_name, mat1, mat2)
dummy.dummy.switchFunction('dummy_gpuargs_h_local')
dummy.add_input(points1, 'input1')
dummy.add_input(points2, 'input2')
dummy.add_output('out1')
dummy.add_output('out2')
dummy.print()
res1 = dummy.dummy.out1.data()
res2 = dummy.dummy.out2.data()
dt1 = dummy.dummy.out1.datatype()
dt2 = dummy.dummy.out2.datatype()
assert N.allclose(res1, 0.0), "C++ and Python results doesn't match"
assert N.allclose(res2, 1.0), "C++ and Python results doesn't match"
print( 'Result (C++ Data to numpy)' )
print( res1 )
print( res2 )
print()
print( 'Datatype:', str(dt1) )
print( 'Datatype:', str(dt2) )
print('Change 3d variable')
ns['par3'].set(-1.0)
res1 = dummy.dummy.out1.data()
@floatcopy(globals(), addname=True)
def test_vars_02(opts, function_name):
print('Test inputs/outputs/variables (Dummy)')
mat1 = N.arange(12, dtype='d').reshape(3, 4)
mat2 = N.arange(15, dtype='d').reshape(5, 3)
with context.manager(100) as manager:
dummy, points1, points2, ns = gpuargs_make(function_name, mat1, mat2)
manager.setVariables(C.stdvector([par.getVariable() for (name, par) in ns.walknames()]))
dummy.dummy.switchFunction('dummy_gpuargs_h')
dummy.add_input(points1, 'input1')
dummy.add_input(points2, 'input2')
dummy.add_output('out1')
dummy.add_output('out2')
dummy.print()
res1 = dummy.dummy.out1.data()
res2 = dummy.dummy.out2.data()
dt1 = dummy.dummy.out1.datatype()
dt2 = dummy.dummy.out2.datatype()
assert N.allclose(res1, 0.0), "C++ and Python results doesn't match"
assert N.allclose(res2, 1.0), "C++ and Python results doesn't match"
print( 'Result (C++ Data to numpy)' )
print( res1 )
print( res2 )
print()
print( 'Datatype:', str(dt1) )
print( 'Datatype:', str(dt2) )
print('Change 3d variable')
ns['par3'].set(-1.0)
res1 = dummy.dummy.out1.data()
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser()
# parser.add_argument('-g', '--gpuargs', action='store_true')
run_unittests(globals(), parser.parse_args())
|
[
"argparse.ArgumentParser",
"numpy.allclose",
"gna.constructors.Points",
"gna.env.env.globalns",
"load.ROOT.Identity",
"numpy.arange",
"gna.constructors.Dummy",
"gna.context.manager"
] |
[((551, 564), 'gna.constructors.Points', 'C.Points', (['mat'], {}), '(mat)\n', (559, 564), True, 'from gna import constructors as C\n'), ((580, 592), 'load.ROOT.Identity', 'R.Identity', ([], {}), '()\n', (590, 592), True, 'from load import ROOT as R\n'), ((829, 849), 'numpy.allclose', 'N.allclose', (['mat', 'res'], {}), '(mat, res)\n', (839, 849), True, 'import numpy as N\n'), ((1161, 1181), 'gna.env.env.globalns', 'env.globalns', (['nsname'], {}), '(nsname)\n', (1173, 1181), False, 'from gna.env import env\n'), ((2316, 2337), 'numpy.allclose', 'N.allclose', (['res1', '(0.0)'], {}), '(res1, 0.0)\n', (2326, 2337), True, 'import numpy as N\n'), ((2389, 2410), 'numpy.allclose', 'N.allclose', (['res2', '(1.0)'], {}), '(res2, 1.0)\n', (2399, 2410), True, 'import numpy as N\n'), ((3516, 3537), 'numpy.allclose', 'N.allclose', (['res1', '(0.0)'], {}), '(res1, 0.0)\n', (3526, 3537), True, 'import numpy as N\n'), ((3589, 3610), 'numpy.allclose', 'N.allclose', (['res2', '(1.0)'], {}), '(res2, 1.0)\n', (3599, 3610), True, 'import numpy as N\n'), ((3987, 4003), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (4001, 4003), False, 'from argparse import ArgumentParser\n'), ((1491, 1505), 'gna.constructors.Points', 'C.Points', (['mat1'], {}), '(mat1)\n', (1499, 1505), True, 'from gna import constructors as C\n'), ((1507, 1521), 'gna.constructors.Points', 'C.Points', (['mat2'], {}), '(mat2)\n', (1515, 1521), True, 'from gna import constructors as C\n'), ((1551, 1596), 'gna.constructors.Dummy', 'C.Dummy', (['(4)', '"""dummy"""', "['par1', 'par2', 'par3']"], {}), "(4, 'dummy', ['par1', 'par2', 'par3'])\n", (1558, 1596), True, 'from gna import constructors as C\n'), ((2941, 2961), 'gna.context.manager', 'context.manager', (['(100)'], {}), '(100)\n', (2956, 2961), False, 'from gna import context\n'), ((392, 415), 'numpy.arange', 'N.arange', (['(12)'], {'dtype': '"""d"""'}), "(12, dtype='d')\n", (400, 415), True, 'import numpy as N\n'), ((1781, 1804), 'numpy.arange', 'N.arange', (['(12)'], {'dtype': '"""d"""'}), "(12, dtype='d')\n", (1789, 1804), True, 'import numpy as N\n'), ((1830, 1853), 'numpy.arange', 'N.arange', (['(15)'], {'dtype': '"""d"""'}), "(15, dtype='d')\n", (1838, 1853), True, 'import numpy as N\n'), ((2844, 2867), 'numpy.arange', 'N.arange', (['(12)'], {'dtype': '"""d"""'}), "(12, dtype='d')\n", (2852, 2867), True, 'import numpy as N\n'), ((2893, 2916), 'numpy.arange', 'N.arange', (['(15)'], {'dtype': '"""d"""'}), "(15, dtype='d')\n", (2901, 2916), True, 'import numpy as N\n')]
|
# Copyright (C) 2013 ~ 2016 - <NAME> <<EMAIL>>
# This program is Free Software see LICENSE file for details
import sublime
from ._typing import List
from Default.history_list import get_jump_history_for_view
class ExplorerPanel:
"""
Creates a panel that can be used to explore nested options sets
The data structure for the options is as follows:
Options[
{
'title': 'Title Data'
'details': 'Details Data',
'location': 'File: {} Line: {} Column: {}',
'position': 'filepath:line:col',
'options': [
{
'title': 'Title Data'
'details': 'Details Data',
'location': 'File: {} Line: {} Column: {}',
'position': 'filepath:line:col',
'options': [
]...
}
]
}
]
So we can nest as many levels as we want
"""
def __init__(self, view: sublime.View, options: List) -> None:
self.options = options
self.view = view
self.selected = [] # type: List
self.restore_point = view.sel()[0]
def show(self, cluster: List, forced: bool=False) -> None:
"""Show the quick panel with the given options
"""
if not cluster:
cluster = self.options
if len(cluster) == 1 and not forced:
try:
Jumper(self.view, cluster[0]['position']).jump()
except KeyError:
if len(cluster[0].get('options', [])) == 1 and not forced:
Jumper(
self.view, cluster[0]['options'][0]['position']).jump()
return
self.last_cluster = cluster
quick_panel_options = []
for data in cluster:
tmp = [data['title']]
if 'details' in data:
tmp.append(data['details'])
if 'location' in data:
tmp.append(data['location'])
quick_panel_options.append(tmp)
self.view.window().show_quick_panel(
quick_panel_options,
on_select=self.on_select,
on_highlight=lambda index: self.on_select(index, True)
)
def on_select(self, index: int, transient: bool=False) -> None:
"""Called when an option is been made in the quick panel
"""
if index == -1:
self._restore_view()
return
cluster = self.last_cluster
node = cluster[index]
if transient and 'options' in node:
return
if 'options' in node:
self.prev_cluster = self.last_cluster
opts = node['options'][:]
opts.insert(0, {'title': '<- Go Back', 'position': 'back'})
sublime.set_timeout(lambda: self.show(opts), 0)
else:
if node['position'] == 'back' and not transient:
sublime.set_timeout(lambda: self.show(self.prev_cluster), 0)
elif node['position'] != 'back':
Jumper(self.view, node['position']).jump(transient)
def _restore_view(self):
"""Restore the view and location
"""
sublime.active_window().focus_view(self.view)
self.view.show(self.restore_point)
if self.view.sel()[0] != self.restore_point:
self.view.sel().clear()
self.view.sel().add(self.restore_point)
class Jumper:
"""Jump to the specified file line and column making an indicator to toggle
"""
def __init__(self, view: sublime.View, position: str) -> None:
self.position = position
self.view = view
def jump(self, transient: bool=False) -> None:
"""Jump to the selection
"""
flags = sublime.ENCODED_POSITION
if transient is True:
flags |= sublime.TRANSIENT
get_jump_history_for_view(self.view).push_selection(self.view)
sublime.active_window().open_file(self.position, flags)
if not transient:
self._toggle_indicator()
def _toggle_indicator(self) -> None:
"""Toggle mark indicator to focus the cursor
"""
path, line, column = self.position.rsplit(':', 2)
pt = self.view.text_point(int(line) - 1, int(column))
region_name = 'anaconda.indicator.{}.{}'.format(
self.view.id(), line
)
for i in range(3):
delta = 300 * i * 2
sublime.set_timeout(lambda: self.view.add_regions(
region_name,
[sublime.Region(pt, pt)],
'comment',
'bookmark',
sublime.DRAW_EMPTY_AS_OVERWRITE
), delta)
sublime.set_timeout(
lambda: self.view.erase_regions(region_name),
delta + 300
)
|
[
"sublime.active_window",
"sublime.Region",
"Default.history_list.get_jump_history_for_view"
] |
[((3292, 3315), 'sublime.active_window', 'sublime.active_window', ([], {}), '()\n', (3313, 3315), False, 'import sublime\n'), ((3970, 4006), 'Default.history_list.get_jump_history_for_view', 'get_jump_history_for_view', (['self.view'], {}), '(self.view)\n', (3995, 4006), False, 'from Default.history_list import get_jump_history_for_view\n'), ((4041, 4064), 'sublime.active_window', 'sublime.active_window', ([], {}), '()\n', (4062, 4064), False, 'import sublime\n'), ((4657, 4679), 'sublime.Region', 'sublime.Region', (['pt', 'pt'], {}), '(pt, pt)\n', (4671, 4679), False, 'import sublime\n')]
|
from __future__ import annotations
import glob
import os
import uuid
from typing import Callable
import pandas as pd
import pytest
from black import itertools
from pyriksprot import interface, to_speech
from pyriksprot.corpus import tagged as tagged_corpus
from .utility import TAGGED_SOURCE_PATTERN, UTTERANCES_DICTS, create_utterances
# pylint: disable=redefined-outer-name
jj = os.path.join
@pytest.fixture(scope='module')
def utterances() -> list[interface.Utterance]:
return create_utterances()
def test_utterance_text():
u: interface.Utterance = interface.Utterance(u_id="A", speaker_hash="x", who="x", paragraphs=["X", "Y", "C"])
assert u.text == '\n'.join(["X", "Y", "C"])
def test_utterance_checksumtext():
u: interface.Utterance = interface.Utterance(u_id="A", speaker_hash="x", who="x", paragraphs=["X", "Y", "C"])
assert u.checksum() == '6060d006e0494206'
def test_utterances_to_dict():
who_sequences: list[list[interface.Utterance]] = to_speech.MergeByWhoSequence().cluster(None)
assert who_sequences == []
who_sequences: list[list[interface.Utterance]] = to_speech.MergeByWhoSequence().cluster([])
assert who_sequences == []
utterances: list[interface.Utterance] = [
interface.Utterance(u_id=f'{uuid.uuid4()}', speaker_hash="xa1", who='A'),
interface.Utterance(u_id=f'{uuid.uuid4()}', speaker_hash="xa1", who='A'),
interface.Utterance(u_id=f'{uuid.uuid4()}', speaker_hash="xb1", who='B'),
interface.Utterance(u_id=f'{uuid.uuid4()}', speaker_hash="xb1", who='B'),
interface.Utterance(u_id=f'{uuid.uuid4()}', speaker_hash="xa2", who='A'),
]
who_sequences: list[list[interface.Utterance]] = to_speech.MergeByWhoSequence().cluster(utterances)
assert len(who_sequences) == 3
assert len(who_sequences[0]) == 2
assert len(who_sequences[1]) == 2
assert len(who_sequences[2]) == 1
assert set(x.who for x in who_sequences[0]) == {'A'}
assert set(x.who for x in who_sequences[1]) == {'B'}
assert set(x.who for x in who_sequences[2]) == {'A'}
def test_utterances_who_sequences(utterances: list[interface.Utterance]):
data = interface.UtteranceHelper.to_dict(utterances)
assert data == UTTERANCES_DICTS
def test_utterances_to_csv(utterances: list[interface.Utterance]):
data: str = interface.UtteranceHelper.to_csv(utterances)
loaded_utterances = interface.UtteranceHelper.from_csv(data)
assert [x.__dict__ for x in utterances] == [x.__dict__ for x in loaded_utterances]
def test_utterances_to_json(utterances: list[interface.Utterance]):
data: str = interface.UtteranceHelper.to_json(utterances)
loaded_utterances = interface.UtteranceHelper.from_json(data)
assert [x.__dict__ for x in utterances] == [x.__dict__ for x in loaded_utterances]
def test_utterances_to_pandas(utterances: list[interface.Utterance]):
data: pd.DataFrame = interface.UtteranceHelper.to_dataframe(utterances)
assert data.reset_index().to_dict(orient='records') == UTTERANCES_DICTS
def test_protocol_create(utterances: list[interface.Utterance]):
protocol: interface.Protocol = interface.Protocol(
date="1958", name="prot-1958-fake", utterances=utterances, speaker_notes={}
)
assert protocol is not None
assert len(protocol.utterances) == 5
assert len(protocol) == 5
assert protocol.name == "prot-1958-fake"
assert protocol.date == "1958"
assert protocol.name == 'prot-1958-fake'
assert protocol.date == '1958'
assert protocol.has_text, 'has text'
assert protocol.checksum() == '7e5112f9db8c8462d89fac08714ce15b432d7733', 'checksum'
assert protocol.text == '\n'.join(text.text for text in utterances)
def test_protocol_preprocess():
"""Modifies utterances:"""
utterances: list[interface.Utterance] = create_utterances()
protocol: interface.Protocol = interface.Protocol(
date="1950", name="prot-1958-fake", utterances=utterances, speaker_notes={}
)
preprocess: Callable[[str], str] = lambda t: 'APA'
protocol.preprocess(preprocess=preprocess)
assert protocol.text == 'APA\nAPA\nAPA\nAPA\nAPA\nAPA'
def test_protocols_to_items():
filenames: list[str] = glob.glob(TAGGED_SOURCE_PATTERN, recursive=True)
protocols: list[interface.Protocol] = [p for p in tagged_corpus.load_protocols(source=filenames)]
_ = itertools.chain(
p.to_segments(content_type=interface.ContentType.Text, segment_level=interface.SegmentLevel.Who)
for p in protocols
)
|
[
"pyriksprot.interface.UtteranceHelper.from_json",
"pyriksprot.interface.UtteranceHelper.to_json",
"pyriksprot.to_speech.MergeByWhoSequence",
"pyriksprot.interface.UtteranceHelper.from_csv",
"uuid.uuid4",
"pyriksprot.interface.Protocol",
"pyriksprot.corpus.tagged.load_protocols",
"pytest.fixture",
"pyriksprot.interface.UtteranceHelper.to_dataframe",
"pyriksprot.interface.UtteranceHelper.to_csv",
"glob.glob",
"pyriksprot.interface.UtteranceHelper.to_dict",
"pyriksprot.interface.Utterance"
] |
[((403, 433), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (417, 433), False, 'import pytest\n'), ((570, 658), 'pyriksprot.interface.Utterance', 'interface.Utterance', ([], {'u_id': '"""A"""', 'speaker_hash': '"""x"""', 'who': '"""x"""', 'paragraphs': "['X', 'Y', 'C']"}), "(u_id='A', speaker_hash='x', who='x', paragraphs=['X',\n 'Y', 'C'])\n", (589, 658), False, 'from pyriksprot import interface, to_speech\n'), ((769, 857), 'pyriksprot.interface.Utterance', 'interface.Utterance', ([], {'u_id': '"""A"""', 'speaker_hash': '"""x"""', 'who': '"""x"""', 'paragraphs': "['X', 'Y', 'C']"}), "(u_id='A', speaker_hash='x', who='x', paragraphs=['X',\n 'Y', 'C'])\n", (788, 857), False, 'from pyriksprot import interface, to_speech\n'), ((2168, 2213), 'pyriksprot.interface.UtteranceHelper.to_dict', 'interface.UtteranceHelper.to_dict', (['utterances'], {}), '(utterances)\n', (2201, 2213), False, 'from pyriksprot import interface, to_speech\n'), ((2336, 2380), 'pyriksprot.interface.UtteranceHelper.to_csv', 'interface.UtteranceHelper.to_csv', (['utterances'], {}), '(utterances)\n', (2368, 2380), False, 'from pyriksprot import interface, to_speech\n'), ((2405, 2445), 'pyriksprot.interface.UtteranceHelper.from_csv', 'interface.UtteranceHelper.from_csv', (['data'], {}), '(data)\n', (2439, 2445), False, 'from pyriksprot import interface, to_speech\n'), ((2620, 2665), 'pyriksprot.interface.UtteranceHelper.to_json', 'interface.UtteranceHelper.to_json', (['utterances'], {}), '(utterances)\n', (2653, 2665), False, 'from pyriksprot import interface, to_speech\n'), ((2690, 2731), 'pyriksprot.interface.UtteranceHelper.from_json', 'interface.UtteranceHelper.from_json', (['data'], {}), '(data)\n', (2725, 2731), False, 'from pyriksprot import interface, to_speech\n'), ((2917, 2967), 'pyriksprot.interface.UtteranceHelper.to_dataframe', 'interface.UtteranceHelper.to_dataframe', (['utterances'], {}), '(utterances)\n', (2955, 2967), False, 'from pyriksprot import interface, to_speech\n'), ((3147, 3247), 'pyriksprot.interface.Protocol', 'interface.Protocol', ([], {'date': '"""1958"""', 'name': '"""prot-1958-fake"""', 'utterances': 'utterances', 'speaker_notes': '{}'}), "(date='1958', name='prot-1958-fake', utterances=\n utterances, speaker_notes={})\n", (3165, 3247), False, 'from pyriksprot import interface, to_speech\n'), ((3890, 3990), 'pyriksprot.interface.Protocol', 'interface.Protocol', ([], {'date': '"""1950"""', 'name': '"""prot-1958-fake"""', 'utterances': 'utterances', 'speaker_notes': '{}'}), "(date='1950', name='prot-1958-fake', utterances=\n utterances, speaker_notes={})\n", (3908, 3990), False, 'from pyriksprot import interface, to_speech\n'), ((4224, 4272), 'glob.glob', 'glob.glob', (['TAGGED_SOURCE_PATTERN'], {'recursive': '(True)'}), '(TAGGED_SOURCE_PATTERN, recursive=True)\n', (4233, 4272), False, 'import glob\n'), ((987, 1017), 'pyriksprot.to_speech.MergeByWhoSequence', 'to_speech.MergeByWhoSequence', ([], {}), '()\n', (1015, 1017), False, 'from pyriksprot import interface, to_speech\n'), ((1117, 1147), 'pyriksprot.to_speech.MergeByWhoSequence', 'to_speech.MergeByWhoSequence', ([], {}), '()\n', (1145, 1147), False, 'from pyriksprot import interface, to_speech\n'), ((1708, 1738), 'pyriksprot.to_speech.MergeByWhoSequence', 'to_speech.MergeByWhoSequence', ([], {}), '()\n', (1736, 1738), False, 'from pyriksprot import interface, to_speech\n'), ((4327, 4373), 'pyriksprot.corpus.tagged.load_protocols', 'tagged_corpus.load_protocols', ([], {'source': 'filenames'}), '(source=filenames)\n', (4355, 4373), True, 'from pyriksprot.corpus import tagged as tagged_corpus\n'), ((1274, 1286), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1284, 1286), False, 'import uuid\n'), ((1356, 1368), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1366, 1368), False, 'import uuid\n'), ((1438, 1450), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1448, 1450), False, 'import uuid\n'), ((1520, 1532), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1530, 1532), False, 'import uuid\n'), ((1602, 1614), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1612, 1614), False, 'import uuid\n')]
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("L1SKIM")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery = 100000
process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) )
####################### configure pool source #############################
process.source = cms.Source("PoolSource",
fileNames =cms.untracked.vstring(
'/store/data/Run2010A/MinimumBias/RECO/Apr21ReReco-v1/0000/08275F4A-5270-E011-9DC3-003048635E02.root'
),
skipEvents = cms.untracked.uint32(0)
)
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(100) )
##################### digi-2-raw plus L1 emulation #########################
process.load("Configuration.StandardSequences.Services_cff")
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
#################### Conditions and L1 menu ################################
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
from Configuration.AlCa.autoCond import autoCond
process.GlobalTag.globaltag=autoCond['run1_data']
############ Skim the events according to the L1 seeds ####################
#select on HLT_HcalNZS_8E29 trigger
import HLTrigger.HLTfilters.hltLevel1GTSeed_cfi
process.skimL1Seeds = HLTrigger.HLTfilters.hltLevel1GTSeed_cfi.hltLevel1GTSeed.clone()
process.skimL1Seeds.L1GtReadoutRecordTag = cms.InputTag("gtDigis")
process.skimL1Seeds.L1GtObjectMapTag = cms.InputTag("hltL1GtObjectMap")
process.skimL1Seeds.L1CollectionsTag = cms.InputTag("l1extraParticles")
process.skimL1Seeds.L1MuonCollectionTag = cms.InputTag("l1extraParticles")
process.skimL1Seeds.L1SeedsLogicalExpression = "L1_SingleEG2 OR L1_SingleEG5 OR L1_SingleEG8 OR L1_SingleEG10 OR L1_SingleEG12 OR L1_SingleEG15 OR L1_SingleEG20 OR L1_SingleIsoEG5 OR L1_SingleIsoEG8 OR L1_SingleIsoEG10 OR L1_SingleIsoEG12 OR L1_SingleIsoEG15 OR L1_SingleJet6U OR L1_SingleJet10U OR L1_SingleJet20U OR L1_SingleJet30U OR L1_SingleJet40U OR L1_SingleJet50U OR L1_SingleJet60U OR L1_SingleTauJet10U OR L1_SingleTauJet20U OR L1_SingleTauJet30U OR L1_SingleTauJet50U OR L1_SingleMuOpen OR L1_SingleMu0 OR L1_SingleMu3 OR L1_SingleMu5 OR L1_SingleMu7 OR L1_SingleMu10 OR L1_SingleMu14 OR L1_SingleMu20 OR L1_ZeroBias"
# select on HLT_HcalPhiSym trigger
process.load("HLTrigger.HLTfilters.hltLevel1Activity_cfi")
process.hltLevel1Activity.L1GtReadoutRecordTag = cms.InputTag('gtDigis')
######################## Configure Analyzer ###############################
process.load("RecoLocalCalo.EcalRecAlgos.EcalSeverityLevelESProducer_cfi")
process.load("Calibration.IsolatedParticles.isolatedTracksNxN_cfi")
process.isolatedTracksNxN.Verbosity = cms.untracked.int32( 0 )
process.isolatedTracksNxN.HBHERecHitSource = cms.InputTag("hbhereco")
process.isolatedTracksNxN.L1TriggerAlgoInfo = True
#process.isolatedTracksNxN.DebugL1Info = True
process.isolatedTracksNxN_NZS = process.isolatedTracksNxN.clone(
Verbosity = cms.untracked.int32( 0 ),
HBHERecHitSource = cms.InputTag("hbherecoMB"),
L1TriggerAlgoInfo = True
)
process.TFileService = cms.Service("TFileService",
fileName = cms.string('IsolatedTracksNxNData.root')
)
# configure Technical Bits to ensure collision and remove BeamHalo
process.load('L1TriggerConfig.L1GtConfigProducers.L1GtTriggerMaskTechTrigConfig_cff')
process.load('HLTrigger/HLTfilters/hltLevel1GTSeed_cfi')
process.hltLevel1GTSeed.L1TechTriggerSeeding = cms.bool(True)
process.hltLevel1GTSeed.L1SeedsLogicalExpression = cms.string('0 AND NOT (36 OR 37 OR 38 OR 39)')
# filter out scrapping events
process.noScraping= cms.EDFilter("FilterOutScraping",
applyfilter = cms.untracked.bool(True),
debugOn = cms.untracked.bool(False), ## Or 'True' to get some per-event info
numtrack = cms.untracked.uint32(10),
thresh = cms.untracked.double(0.25)
)
# select on primary vertex
process.primaryVertexFilter = cms.EDFilter("GoodVertexFilter",
vertexCollection = cms.InputTag('offlinePrimaryVertices'),
minimumNDOF = cms.uint32(4) ,
maxAbsZ = cms.double(25.0),
maxd0 = cms.double(5.0)
)
#=============================================================================
# define an EndPath to analyze all other path results
process.hltTrigReport = cms.EDAnalyzer( 'HLTrigReport',
HLTriggerResults = cms.InputTag( 'TriggerResults','','HLT')
)
process.load("L1Trigger.GlobalTriggerAnalyzer.l1GtTrigReport_cfi")
process.l1GtTrigReport.L1GtRecordInputTag = 'gtDigis'
process.l1GtTrigReport.PrintVerbosity = 1
#=============================================================================
#### by Benedikt
process.p1 = cms.Path(process.primaryVertexFilter * process.hltLevel1GTSeed * process.noScraping * process.skimL1Seeds *process.isolatedTracksNxN * process.isolatedTracksNxN_NZS)
process.e = cms.EndPath(process.l1GtTrigReport + process.hltTrigReport)
|
[
"FWCore.ParameterSet.Config.string",
"FWCore.ParameterSet.Config.untracked.int32",
"FWCore.ParameterSet.Config.double",
"FWCore.ParameterSet.Config.untracked.vstring",
"FWCore.ParameterSet.Config.untracked.double",
"FWCore.ParameterSet.Config.untracked.bool",
"FWCore.ParameterSet.Config.EndPath",
"FWCore.ParameterSet.Config.Process",
"FWCore.ParameterSet.Config.uint32",
"FWCore.ParameterSet.Config.bool",
"FWCore.ParameterSet.Config.untracked.uint32",
"FWCore.ParameterSet.Config.InputTag",
"FWCore.ParameterSet.Config.Path"
] |
[((52, 73), 'FWCore.ParameterSet.Config.Process', 'cms.Process', (['"""L1SKIM"""'], {}), "('L1SKIM')\n", (63, 73), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1476, 1499), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""gtDigis"""'], {}), "('gtDigis')\n", (1488, 1499), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1543, 1575), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""hltL1GtObjectMap"""'], {}), "('hltL1GtObjectMap')\n", (1555, 1575), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1619, 1651), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""l1extraParticles"""'], {}), "('l1extraParticles')\n", (1631, 1651), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1695, 1727), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""l1extraParticles"""'], {}), "('l1extraParticles')\n", (1707, 1727), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2502, 2525), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""gtDigis"""'], {}), "('gtDigis')\n", (2514, 2525), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2785, 2807), 'FWCore.ParameterSet.Config.untracked.int32', 'cms.untracked.int32', (['(0)'], {}), '(0)\n', (2804, 2807), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2855, 2879), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""hbhereco"""'], {}), "('hbhereco')\n", (2867, 2879), True, 'import FWCore.ParameterSet.Config as cms\n'), ((3605, 3619), 'FWCore.ParameterSet.Config.bool', 'cms.bool', (['(True)'], {}), '(True)\n', (3613, 3619), True, 'import FWCore.ParameterSet.Config as cms\n'), ((3671, 3717), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""0 AND NOT (36 OR 37 OR 38 OR 39)"""'], {}), "('0 AND NOT (36 OR 37 OR 38 OR 39)')\n", (3681, 3717), True, 'import FWCore.ParameterSet.Config as cms\n'), ((5179, 5355), 'FWCore.ParameterSet.Config.Path', 'cms.Path', (['(process.primaryVertexFilter * process.hltLevel1GTSeed * process.noScraping *\n process.skimL1Seeds * process.isolatedTracksNxN * process.\n isolatedTracksNxN_NZS)'], {}), '(process.primaryVertexFilter * process.hltLevel1GTSeed * process.\n noScraping * process.skimL1Seeds * process.isolatedTracksNxN * process.\n isolatedTracksNxN_NZS)\n', (5187, 5355), True, 'import FWCore.ParameterSet.Config as cms\n'), ((5363, 5422), 'FWCore.ParameterSet.Config.EndPath', 'cms.EndPath', (['(process.l1GtTrigReport + process.hltTrigReport)'], {}), '(process.l1GtTrigReport + process.hltTrigReport)\n', (5374, 5422), True, 'import FWCore.ParameterSet.Config as cms\n'), ((242, 266), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(True)'], {}), '(True)\n', (260, 266), True, 'import FWCore.ParameterSet.Config as cms\n'), ((400, 534), 'FWCore.ParameterSet.Config.untracked.vstring', 'cms.untracked.vstring', (['"""/store/data/Run2010A/MinimumBias/RECO/Apr21ReReco-v1/0000/08275F4A-5270-E011-9DC3-003048635E02.root"""'], {}), "(\n '/store/data/Run2010A/MinimumBias/RECO/Apr21ReReco-v1/0000/08275F4A-5270-E011-9DC3-003048635E02.root'\n )\n", (421, 534), True, 'import FWCore.ParameterSet.Config as cms\n'), ((550, 573), 'FWCore.ParameterSet.Config.untracked.uint32', 'cms.untracked.uint32', (['(0)'], {}), '(0)\n', (570, 573), True, 'import FWCore.ParameterSet.Config as cms\n'), ((624, 648), 'FWCore.ParameterSet.Config.untracked.int32', 'cms.untracked.int32', (['(100)'], {}), '(100)\n', (643, 648), True, 'import FWCore.ParameterSet.Config as cms\n'), ((3059, 3081), 'FWCore.ParameterSet.Config.untracked.int32', 'cms.untracked.int32', (['(0)'], {}), '(0)\n', (3078, 3081), True, 'import FWCore.ParameterSet.Config as cms\n'), ((3108, 3134), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""hbherecoMB"""'], {}), "('hbherecoMB')\n", (3120, 3134), True, 'import FWCore.ParameterSet.Config as cms\n'), ((3269, 3309), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""IsolatedTracksNxNData.root"""'], {}), "('IsolatedTracksNxNData.root')\n", (3279, 3309), True, 'import FWCore.ParameterSet.Config as cms\n'), ((3850, 3874), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(True)'], {}), '(True)\n', (3868, 3874), True, 'import FWCore.ParameterSet.Config as cms\n'), ((3923, 3948), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(False)'], {}), '(False)\n', (3941, 3948), True, 'import FWCore.ParameterSet.Config as cms\n'), ((4037, 4061), 'FWCore.ParameterSet.Config.untracked.uint32', 'cms.untracked.uint32', (['(10)'], {}), '(10)\n', (4057, 4061), True, 'import FWCore.ParameterSet.Config as cms\n'), ((4110, 4136), 'FWCore.ParameterSet.Config.untracked.double', 'cms.untracked.double', (['(0.25)'], {}), '(0.25)\n', (4130, 4136), True, 'import FWCore.ParameterSet.Config as cms\n'), ((4325, 4363), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""offlinePrimaryVertices"""'], {}), "('offlinePrimaryVertices')\n", (4337, 4363), True, 'import FWCore.ParameterSet.Config as cms\n'), ((4427, 4440), 'FWCore.ParameterSet.Config.uint32', 'cms.uint32', (['(4)'], {}), '(4)\n', (4437, 4440), True, 'import FWCore.ParameterSet.Config as cms\n'), ((4505, 4521), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(25.0)'], {}), '(25.0)\n', (4515, 4521), True, 'import FWCore.ParameterSet.Config as cms\n'), ((4585, 4600), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(5.0)'], {}), '(5.0)\n', (4595, 4600), True, 'import FWCore.ParameterSet.Config as cms\n'), ((4862, 4903), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""TriggerResults"""', '""""""', '"""HLT"""'], {}), "('TriggerResults', '', 'HLT')\n", (4874, 4903), True, 'import FWCore.ParameterSet.Config as cms\n')]
|
import os
import random
import sys
import pygame
from pygame.locals import *
class Satyr(pygame.sprite.Sprite):
def __init__(self, position, ipath, scale=0.5):
pygame.sprite.Sprite.__init__(self)
img = pygame.image.load(ipath)
rect = img.get_rect(center=position)
w, h = rect.size[0], rect.size[1]
w, h = int(w * scale), int(h * scale)
img = pygame.transform.scale(img, (w, h))
rect = img.get_rect(center=position)
self.image = img
self.rect = rect
self.rect.center = position
def draw(self, surface, position):
self.rect.center = position
surface.blit(self.image, position)
class Image(pygame.sprite.Sprite):
def __init__(self, position, ipath, scale=1.0):
pygame.sprite.Sprite.__init__(self)
img = pygame.image.load(ipath)
w, h = img.get_rect().size[0], img.get_rect().size[1]
w, h = int(w * scale), int(h * scale)
img = pygame.transform.scale(img, (w, h))
rect = img.get_rect(center=position)
self.image = img
self.rect = rect
self.rect.center = position
self.dx = 1 if random.random() < 0.5 else -1
self.dy = 1 if random.random() < 0.5 else 1
def draw(self, surface):
surface.blit(self.image, self.rect.center)
def update(self, width, height):
x, y = self.rect.center
if x + self.image.get_rect().size[0] >= width:
self.dx = -1
elif x <= 0:
self.dx = 1
if y + self.image.get_rect().size[1] >= height:
self.dy = -1
elif y <= 0:
self.dy = 1
x, y = x + 1 * self.dx, y + 1 * self.dy
self.rect.center = x, y
def start():
pygame.init()
FPS = 30
width = 400
height = 400
DISPLAYSURF = pygame.display.set_mode((width, height))
DISPLAYSURF.fill((255, 255, 255))
pygame.display.set_caption('Key Events')
fps_clock = pygame.time.Clock()
satyr = Satyr((200, 200), './images/Satyr_01_Idle_000.png', scale=0.25)
ball = Image((random.randint(0, width), random.randint(0, height)), './images/ball.png', scale=0.25)
ball_group = pygame.sprite.Group()
ball_group.add(ball)
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
DISPLAYSURF.fill((255, 255, 255, 0))
if pygame.sprite.spritecollide(satyr, ball_group, False):
print(f'collided {ball.rect.center}')
satyr.draw(DISPLAYSURF, pygame.mouse.get_pos())
ball.draw(DISPLAYSURF)
ball.update(width, height)
pygame.display.update()
fps_clock.tick(FPS)
if __name__ == '__main__':
os.environ['SDL_VIDEO_CENTERED'] = '1'
start()
|
[
"pygame.quit",
"random.randint",
"pygame.event.get",
"pygame.display.set_mode",
"pygame.init",
"pygame.sprite.Group",
"pygame.transform.scale",
"pygame.display.update",
"random.random",
"pygame.sprite.Sprite.__init__",
"pygame.sprite.spritecollide",
"pygame.image.load",
"pygame.mouse.get_pos",
"pygame.display.set_caption",
"pygame.time.Clock",
"sys.exit"
] |
[((1759, 1772), 'pygame.init', 'pygame.init', ([], {}), '()\n', (1770, 1772), False, 'import pygame\n'), ((1838, 1878), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(width, height)'], {}), '((width, height))\n', (1861, 1878), False, 'import pygame\n'), ((1921, 1961), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Key Events"""'], {}), "('Key Events')\n", (1947, 1961), False, 'import pygame\n'), ((1978, 1997), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (1995, 1997), False, 'import pygame\n'), ((2198, 2219), 'pygame.sprite.Group', 'pygame.sprite.Group', ([], {}), '()\n', (2217, 2219), False, 'import pygame\n'), ((175, 210), 'pygame.sprite.Sprite.__init__', 'pygame.sprite.Sprite.__init__', (['self'], {}), '(self)\n', (204, 210), False, 'import pygame\n'), ((226, 250), 'pygame.image.load', 'pygame.image.load', (['ipath'], {}), '(ipath)\n', (243, 250), False, 'import pygame\n'), ((398, 433), 'pygame.transform.scale', 'pygame.transform.scale', (['img', '(w, h)'], {}), '(img, (w, h))\n', (420, 433), False, 'import pygame\n'), ((783, 818), 'pygame.sprite.Sprite.__init__', 'pygame.sprite.Sprite.__init__', (['self'], {}), '(self)\n', (812, 818), False, 'import pygame\n'), ((834, 858), 'pygame.image.load', 'pygame.image.load', (['ipath'], {}), '(ipath)\n', (851, 858), False, 'import pygame\n'), ((981, 1016), 'pygame.transform.scale', 'pygame.transform.scale', (['img', '(w, h)'], {}), '(img, (w, h))\n', (1003, 1016), False, 'import pygame\n'), ((2283, 2301), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (2299, 2301), False, 'import pygame\n'), ((2453, 2506), 'pygame.sprite.spritecollide', 'pygame.sprite.spritecollide', (['satyr', 'ball_group', '(False)'], {}), '(satyr, ball_group, False)\n', (2480, 2506), False, 'import pygame\n'), ((2690, 2713), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (2711, 2713), False, 'import pygame\n'), ((2094, 2118), 'random.randint', 'random.randint', (['(0)', 'width'], {}), '(0, width)\n', (2108, 2118), False, 'import random\n'), ((2120, 2145), 'random.randint', 'random.randint', (['(0)', 'height'], {}), '(0, height)\n', (2134, 2145), False, 'import random\n'), ((2591, 2613), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (2611, 2613), False, 'import pygame\n'), ((1173, 1188), 'random.random', 'random.random', ([], {}), '()\n', (1186, 1188), False, 'import random\n'), ((1226, 1241), 'random.random', 'random.random', ([], {}), '()\n', (1239, 1241), False, 'import random\n'), ((2354, 2367), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (2365, 2367), False, 'import pygame\n'), ((2384, 2394), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2392, 2394), False, 'import sys\n')]
|
from direct.distributed.DistributedObjectGlobalAI import DistributedObjectGlobalAI
from direct.distributed.PyDatagram import *
from direct.directnotify.DirectNotifyGlobal import directNotify
class GlobalLobbyManagerAI(DistributedObjectGlobalAI):
notify = directNotify.newCategory('GlobalLobbyManagerAI')
def announceGenerate(self):
DistributedObjectGlobalAI.announceGenerate(self)
self.sendUpdate('lobbyManagerAIHello', [simbase.air.lobbyManager.doId])
def sendAddLobby(self, avId, lobbyId):
self.sendUpdate('addLobby', [avId, lobbyId])
def queryLobbyForHost(self, hostId):
self.sendUpdate('queryLobby', [hostId])
def d_lobbyStarted(self, lobbyId, shardId, zoneId, hostName):
self.sendUpdate('lobbyHasStarted', [lobbyId, shardId, zoneId, hostName])
def lobbyStarted(self, lobbyId, shardId, zoneId, hostName):
pass
def d_lobbyDone(self, lobbyId):
self.sendUpdate('lobbyDone', [lobbyId])
def lobbyDone(self, lobbyId):
pass
def d_toonJoinedLobby(self, lobbyId, avId):
self.sendUpdate('toonJoinedLobby', [lobbyId, avId])
def toonJoinedLobby(self, lobbyId, avId):
pass
def d_toonLeftLobby(self, lobbyId, avId):
self.sendUpdate('toonLeftLobby', [lobbyId, avId])
def toonLeftLobby(self, lobbyId, avId):
pass
def d_requestLobbySlot(self, lobbyId, avId):
self.sendUpdate('requestLobbySlot', [lobbyId, avId])
def requestLobbySlot(self, lobbyId, avId):
pass
def d_allocIds(self, numIds):
self.sendUpdate('allocIds', [numIds])
def allocIds(self, numIds):
pass
|
[
"direct.directnotify.DirectNotifyGlobal.directNotify.newCategory",
"direct.distributed.DistributedObjectGlobalAI.DistributedObjectGlobalAI.announceGenerate"
] |
[((260, 308), 'direct.directnotify.DirectNotifyGlobal.directNotify.newCategory', 'directNotify.newCategory', (['"""GlobalLobbyManagerAI"""'], {}), "('GlobalLobbyManagerAI')\n", (284, 308), False, 'from direct.directnotify.DirectNotifyGlobal import directNotify\n'), ((350, 398), 'direct.distributed.DistributedObjectGlobalAI.DistributedObjectGlobalAI.announceGenerate', 'DistributedObjectGlobalAI.announceGenerate', (['self'], {}), '(self)\n', (392, 398), False, 'from direct.distributed.DistributedObjectGlobalAI import DistributedObjectGlobalAI\n')]
|
"""
Combines predictions based on votes by a set of answer files.
"""
import re
from os import listdir
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
from sklearn.metrics import accuracy_score
from .constants import LABEL
from .preprocessing import get_train_dev_test
def vote(y_true, y_pred, conf):
""" confidence vote """
conf_argmax = np.argmax(conf, axis=0)
conf_vote = y_pred.T[np.arange(len(y_pred.T)), conf_argmax]
acc_conf = accuracy_score(y_true=y_true, y_pred=conf_vote)
""" majority vote """
pred = np.mean(y_pred, axis=0)
# in case of a tie use the predictions from the confidence vote
tie = np.isclose(pred, 0.5)
pred[tie] = conf_vote[tie]
pred = (pred >= 0.5)
acc_major = accuracy_score(y_true=y_true, y_pred=pred)
return acc_conf, acc_major
def plot_axis(df, ax, legend_pos='orig1'):
df.plot(x=np.arange(1, len(df) + 1), ax=ax, use_index=False, xlim=[-25, len(df) + 25], ylim=[0.5, 0.775],
style=['-', '-', '-', '-'], lw=1.5,
yticks=[.5, .525, .55, .575, .6, .625, .65, .675, .7, .725, .75, .775])
ax.lines[1].set_linewidth(0.9) # 1.15
ax.lines[3].set_linewidth(0.9) # 1.15
col1 = ax.lines[0].get_color()
col2 = ax.lines[2].get_color()
ax.lines[1].set_color(tuple(1.3*c for c in col1)) # 1.1*
ax.lines[3].set_color(tuple(1.3*c for c in col2)) # 1.1*
ax.grid(b=True, which='major', linestyle='-', linewidth=0.85)
ax.grid(b=True, which='minor', linestyle=':', linewidth=0.75)
if legend_pos == 'orig1':
ax.legend(loc='center', bbox_to_anchor=(0.5, 0.365))
elif legend_pos == 'orig2':
ax.legend().remove()
elif legend_pos == 'alt1':
ax.legend(loc='center', bbox_to_anchor=(0.5, 0.14))
elif legend_pos == 'alt2':
ax.legend(loc='lower left', bbox_to_anchor=(0.02, 0))
else:
ax.legend()
ax.set_xlabel('number of models', weight='bold')
ax.set_ylabel('accuracy', weight='bold')
# majorLocator_x = MultipleLocator(500)
majorLocator_y = MultipleLocator(.05)
majorFormatter_y = FormatStrFormatter('%.2f')
minorLocator_y = MultipleLocator(.025)
# ax.xaxis.set_major_locator(majorLocator_x)
ax.yaxis.set_major_locator(majorLocator_y)
ax.yaxis.set_major_formatter(majorFormatter_y)
ax.yaxis.set_minor_locator(minorLocator_y)
def plot_figure(dfs: list, name, show=True, save=False, legend_pos: list=None, align='h'):
length = len(dfs)
if legend_pos is None:
legend_pos = [''] * length
sns.set(color_codes=True, font_scale=1)
sns.set_style("whitegrid", {'legend.frameon': True})
sns.set_palette("deep")
if align == 'h':
fig, ax = plt.subplots(ncols=length, figsize=(5*length, 5), sharey=True)
else:
fig, ax = plt.subplots(nrows=length, figsize=(5, 5*length))
if length > 1:
for i, df in enumerate(dfs):
plot_axis(df, ax[i], legend_pos=legend_pos[i])
ax[0].set_title('original dataset')
ax[1].set_title('alternative (randomized) data split')
else:
plot_axis(dfs[0], ax, legend_pos=legend_pos[0])
fig.tight_layout()
if show:
plt.show()
if save:
fig.savefig(name + '.pdf', bbox_inches='tight')
plt.close('all')
def build_df(files, y_true):
probs_ser_lst = [pd.Series(np.load(f).flatten(), name=f[-48:-4].replace(' ', '0')) for f in files]
probs_df = pd.DataFrame(probs_ser_lst)
preds_df = probs_df.applymap(lambda x: x >= 0.5)
confs_df = probs_df.apply(lambda x: np.abs(x - 0.5))
accs_ser = preds_df.apply(lambda row: accuracy_score(y_true=y_true, y_pred=row), axis=1)
df = pd.concat([accs_ser, preds_df, probs_df, confs_df], axis=1,
keys=['acc', 'pred', 'prob', 'conf'])
return df
def main():
names = {
# 'tensorL05con2redo2': '/media/andreas/Linux_Data/hpc-semeval/tensorL05con2redo2/out/',
'alt_split_odd': '/media/andreas/Linux_Data/hpc-semeval/alt_split_odd_both/',
}
_, df_dev_data, df_tst_data = get_train_dev_test(options=dict(alt_split=True))
dev_true = df_dev_data[LABEL].values.flatten()
tst_true = df_tst_data[LABEL].values.flatten()
for k, d in names.items():
directory = listdir(d)
dev_files = [d + f for f in directory if re.match(r'^probabilities-' + 'dev', f)]
tst_files = [d + f for f in directory if re.match(r'^probabilities-' + 'tst', f)]
df_dev = build_df(dev_files, dev_true)
df_tst = build_df(tst_files, tst_true)
df = pd.concat([df_dev, df_tst], axis=1, keys=['dev', 'tst'])
df = df.sort_values(('dev', 'acc', 0), ascending=False)
dev_acc_filter = 0.
if dev_acc_filter:
row_filter = df['dev', 'acc', 0] >= dev_acc_filter
df = df[row_filter.values]
print('filtered for dev accuracies >=', dev_acc_filter)
dev_mean = np.mean(df['dev', 'acc', 0].values)
tst_mean = np.mean(df['tst', 'acc', 0].values)
dev_preds_np = df['dev', 'pred'].values
dev_confs_np = df['dev', 'conf'].values
tst_preds_np = df['tst', 'pred'].values
tst_confs_np = df['tst', 'conf'].values
# print more stats
if False:
pd.set_option('display.float_format', lambda x: '%.6f' % x)
print('dev:\n', pd.Series(dev_mean).describe())
print('test:\n', pd.Series(tst_mean).describe())
dev_conf_scores = list()
tst_conf_scores = list()
dev_major_scores = list()
tst_major_scores = list()
for i in range(1, len(df)+1):
acc_conf_dev, acc_major_dev = vote(dev_true, y_pred=dev_preds_np[:i], conf=dev_confs_np[:i])
acc_conf_tst, acc_major_tst = vote(tst_true, y_pred=tst_preds_np[:i], conf=tst_confs_np[:i])
dev_conf_scores.append(acc_conf_dev)
tst_conf_scores.append(acc_conf_tst)
dev_major_scores.append(acc_major_dev)
tst_major_scores.append(acc_major_tst)
mtrx = {
# 'dev: confidence vote': dev_conf_scores,
# 'test: confidence vote': tst_conf_scores,
'test: mean accuracy': tst_mean,
'dev: sorted accuracy': df['dev', 'acc', 0],
'dev: majority vote': dev_major_scores,
'test: majority vote': tst_major_scores,
# 'dev: mean accuracy': dev_mean,
}
df = pd.DataFrame(mtrx)
plot_figure([df], k + 'all_')
# df.to_csv('../out/alt-split_2560.csv', sep='\t')
if __name__ == '__main__':
# TODO: clean up code or add argument flags
# main()
# df1 = pd.read_csv('../out/orig-split.csv', sep='\t')
df2 = pd.read_csv('../out/alt-split_2560.csv', sep='\t')
# plot_figure([df1], '../out/orig-split_2', save=True, legend_pos=['orig1'])
plot_figure([df2], '../out/alt-split_2560', save=True, legend_pos=['alt1'])
# plot_figure([df1, df2], '../out/ensemble_h_2', save=True, legend_pos=['orig2', 'alt2'], align='h')
# plot_figure([df1, df2], '../out/ensemble_v_2', save=True, legend_pos=['orig2', 'alt2'], align='v')
|
[
"numpy.load",
"numpy.abs",
"numpy.argmax",
"pandas.read_csv",
"sklearn.metrics.accuracy_score",
"numpy.isclose",
"numpy.mean",
"pandas.set_option",
"pandas.DataFrame",
"matplotlib.pyplot.close",
"matplotlib.ticker.FormatStrFormatter",
"matplotlib.ticker.MultipleLocator",
"matplotlib.pyplot.subplots",
"seaborn.set",
"pandas.concat",
"seaborn.set_style",
"matplotlib.pyplot.show",
"re.match",
"pandas.Series",
"seaborn.set_palette",
"os.listdir"
] |
[((463, 486), 'numpy.argmax', 'np.argmax', (['conf'], {'axis': '(0)'}), '(conf, axis=0)\n', (472, 486), True, 'import numpy as np\n'), ((566, 613), 'sklearn.metrics.accuracy_score', 'accuracy_score', ([], {'y_true': 'y_true', 'y_pred': 'conf_vote'}), '(y_true=y_true, y_pred=conf_vote)\n', (580, 613), False, 'from sklearn.metrics import accuracy_score\n'), ((652, 675), 'numpy.mean', 'np.mean', (['y_pred'], {'axis': '(0)'}), '(y_pred, axis=0)\n', (659, 675), True, 'import numpy as np\n'), ((754, 775), 'numpy.isclose', 'np.isclose', (['pred', '(0.5)'], {}), '(pred, 0.5)\n', (764, 775), True, 'import numpy as np\n'), ((848, 890), 'sklearn.metrics.accuracy_score', 'accuracy_score', ([], {'y_true': 'y_true', 'y_pred': 'pred'}), '(y_true=y_true, y_pred=pred)\n', (862, 890), False, 'from sklearn.metrics import accuracy_score\n'), ((2155, 2176), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(0.05)'], {}), '(0.05)\n', (2170, 2176), False, 'from matplotlib.ticker import MultipleLocator, FormatStrFormatter\n'), ((2199, 2225), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%.2f"""'], {}), "('%.2f')\n", (2217, 2225), False, 'from matplotlib.ticker import MultipleLocator, FormatStrFormatter\n'), ((2247, 2269), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(0.025)'], {}), '(0.025)\n', (2262, 2269), False, 'from matplotlib.ticker import MultipleLocator, FormatStrFormatter\n'), ((2646, 2685), 'seaborn.set', 'sns.set', ([], {'color_codes': '(True)', 'font_scale': '(1)'}), '(color_codes=True, font_scale=1)\n', (2653, 2685), True, 'import seaborn as sns\n'), ((2690, 2742), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""', "{'legend.frameon': True}"], {}), "('whitegrid', {'legend.frameon': True})\n", (2703, 2742), True, 'import seaborn as sns\n'), ((2747, 2770), 'seaborn.set_palette', 'sns.set_palette', (['"""deep"""'], {}), "('deep')\n", (2762, 2770), True, 'import seaborn as sns\n'), ((3370, 3386), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (3379, 3386), True, 'import matplotlib.pyplot as plt\n'), ((3536, 3563), 'pandas.DataFrame', 'pd.DataFrame', (['probs_ser_lst'], {}), '(probs_ser_lst)\n', (3548, 3563), True, 'import pandas as pd\n'), ((3776, 3877), 'pandas.concat', 'pd.concat', (['[accs_ser, preds_df, probs_df, confs_df]'], {'axis': '(1)', 'keys': "['acc', 'pred', 'prob', 'conf']"}), "([accs_ser, preds_df, probs_df, confs_df], axis=1, keys=['acc',\n 'pred', 'prob', 'conf'])\n", (3785, 3877), True, 'import pandas as pd\n'), ((6818, 6868), 'pandas.read_csv', 'pd.read_csv', (['"""../out/alt-split_2560.csv"""'], {'sep': '"""\t"""'}), "('../out/alt-split_2560.csv', sep='\\t')\n", (6829, 6868), True, 'import pandas as pd\n'), ((2811, 2875), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': 'length', 'figsize': '(5 * length, 5)', 'sharey': '(True)'}), '(ncols=length, figsize=(5 * length, 5), sharey=True)\n', (2823, 2875), True, 'import matplotlib.pyplot as plt\n'), ((2902, 2953), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'length', 'figsize': '(5, 5 * length)'}), '(nrows=length, figsize=(5, 5 * length))\n', (2914, 2953), True, 'import matplotlib.pyplot as plt\n'), ((3286, 3296), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3294, 3296), True, 'import matplotlib.pyplot as plt\n'), ((4363, 4373), 'os.listdir', 'listdir', (['d'], {}), '(d)\n', (4370, 4373), False, 'from os import listdir\n'), ((4663, 4719), 'pandas.concat', 'pd.concat', (['[df_dev, df_tst]'], {'axis': '(1)', 'keys': "['dev', 'tst']"}), "([df_dev, df_tst], axis=1, keys=['dev', 'tst'])\n", (4672, 4719), True, 'import pandas as pd\n'), ((5030, 5065), 'numpy.mean', 'np.mean', (["df['dev', 'acc', 0].values"], {}), "(df['dev', 'acc', 0].values)\n", (5037, 5065), True, 'import numpy as np\n'), ((5085, 5120), 'numpy.mean', 'np.mean', (["df['tst', 'acc', 0].values"], {}), "(df['tst', 'acc', 0].values)\n", (5092, 5120), True, 'import numpy as np\n'), ((6543, 6561), 'pandas.DataFrame', 'pd.DataFrame', (['mtrx'], {}), '(mtrx)\n', (6555, 6561), True, 'import pandas as pd\n'), ((3657, 3672), 'numpy.abs', 'np.abs', (['(x - 0.5)'], {}), '(x - 0.5)\n', (3663, 3672), True, 'import numpy as np\n'), ((3716, 3757), 'sklearn.metrics.accuracy_score', 'accuracy_score', ([], {'y_true': 'y_true', 'y_pred': 'row'}), '(y_true=y_true, y_pred=row)\n', (3730, 3757), False, 'from sklearn.metrics import accuracy_score\n'), ((5372, 5431), 'pandas.set_option', 'pd.set_option', (['"""display.float_format"""', "(lambda x: '%.6f' % x)"], {}), "('display.float_format', lambda x: '%.6f' % x)\n", (5385, 5431), True, 'import pandas as pd\n'), ((4423, 4461), 're.match', 're.match', (["('^probabilities-' + 'dev')", 'f'], {}), "('^probabilities-' + 'dev', f)\n", (4431, 4461), False, 'import re\n'), ((4513, 4551), 're.match', 're.match', (["('^probabilities-' + 'tst')", 'f'], {}), "('^probabilities-' + 'tst', f)\n", (4521, 4551), False, 'import re\n'), ((3449, 3459), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (3456, 3459), True, 'import numpy as np\n'), ((5460, 5479), 'pandas.Series', 'pd.Series', (['dev_mean'], {}), '(dev_mean)\n', (5469, 5479), True, 'import pandas as pd\n'), ((5521, 5540), 'pandas.Series', 'pd.Series', (['tst_mean'], {}), '(tst_mean)\n', (5530, 5540), True, 'import pandas as pd\n')]
|
import os
import m5
from m5.objects import *
m5.util.addToPath('../common')
spec_dist = os.environ.get('M5_CPU2006', '/dist/m5/cpu2006')
binary_dir = spec_dist
data_dir = binary_dir
current_pid = 100
# 400.perlbench
def perlbench():
process = Process(pid=current_pid)
process.cwd = binary_dir + '400.perlbench/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd + 'perlbench_base.amd64-m64-gcc42-nn'
process.cmd = [process.executable] + ['-I./lib', 'checkspam.pl', '2500', '5', '25', '11', '150', '1', '1', '1', '1']
return process
#401.bzip2
def bzip2():
global current_pid
process = Process(pid=current_pid)
current_pid = current_pid + 1
process.cwd = binary_dir + '401.bzip2/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'bzip2_base.amd64-m64-gcc42-nn'
data = process.cwd+'input.program'
process.cmd = [process.executable] + [data, '280']
return process
#403.gcc
def gcc():
process = Process()
process.cwd = binary_dir + '403.gcc/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'gcc_base.amd64-m64-gcc42-nn'
data = process.cwd +'166.i'
output = process.cwd +'166.s'
process.cmd = [process.executable] + [data]+['-o',output]
return process
#410.bwaves
def bwaves():
process = Process()
process.cwd = binary_dir + '410.bwaves/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'bwaves_base.amd64-m64-gcc42-nn'
process.cmd = [process.executable]
return process
#416.gamess
def gamess():
prorcess=Process()
prorcess.cwd = binary_dir + '416.gamess/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
prorcess.executable = prorcess.cwd + 'gamess_base.amd64-m64-gcc42-nn'
prorcess.cmd = [prorcess.executable]
prorcess.input= prorcess.cwd + 'cytosine.2.config'
return prorcess
#429.mcf
def mcf():
process = Process()
process.cwd = binary_dir + '429.mcf/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'mcf_base.amd64-m64-gcc42-nn'
data = process.cwd+'inp.in'
process.cmd = [process.executable] + [data]
return process
#433.milc
def milc():
process=Process()
process.cwd = binary_dir + '433.milc/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'milc_base.amd64-m64-gcc42-nn'
stdin=process.cwd +'su3imp.in'
process.cmd = [process.executable]
process.input=stdin
return process
#434.zeusmp
def zeusmp():
process=Process()
process.cwd = binary_dir+'434.zeusmp/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd + 'zeusmp_base.amd64-m64-gcc42-nn'
process.cmd = [process.executable]
return process
#435.gromacs
def gromacs():
process = Process()
process.cwd = binary_dir+'435.gromacs/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'gromacs_base.amd64-m64-gcc42-nn'
data = process.cwd +'gromacs.tpr'
process.cmd = [process.executable] + ['-silent','-deffnm',data,'-nice','0']
return process
#436.cactusADM
def cactusADM():
process = Process()
process.cwd = binary_dir+'436.cactusADM/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'cactusADM_base.amd64-m64-gcc42-nn'
data = process.cwd+'benchADM.par'
process.cmd = [process.executable] + [data]
return process
# 437.leslie3d
def leslie3d():
process = Process()
process.cwd = binary_dir + '437.leslie3d/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd + 'leslie3d_base.amd64-m64-gcc42-nn'
process.cmd = [process.executable]
process.input = process.cwd + 'leslie3d.in'
return process
#444.namd
def namd():
process = Process()
process.cwd = binary_dir + '444.namd/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'namd_base.amd64-m64-gcc42-nn'
input= process.cwd +'namd.input'
process.cmd = [process.executable] + ['--input',input,'--iterations','38','--output','namd.out']
return process
#445.gobmk
def gobmk():
process=Process()
process.cwd = binary_dir + '445.gobmk/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'gobmk_base.amd64-m64-gcc42-nn'
stdin= process.cwd +'nngs.tst'
process.cmd = [process.executable]+['--quiet','--mode','gtp']
process.input=stdin
return process
# 447.dealII TODO
#450.soplex
def soplex():
process=Process()
process.cwd = binary_dir + '450.soplex/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'soplex_base.amd64-m64-gcc42-nn'
data= process.cwd +'ref.mps'
process.cmd = [process.executable]+['-m3500',data]
return process
#453.povray
def povray():
process=Process()
process.cwd = binary_dir + '453.povray/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'povray_base.amd64-m64-gcc42-nn'
data = process.cwd +'SPEC-benchmark-ref.ini'
process.cmd = [process.executable]+[data]
return process
#454.calculix
def calculix():
process=Process()
process.cwd = binary_dir + '454.calculix/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd + 'calculix_base.amd64-m64-gcc42-nn'
data = process.cwd +'hyperviscoplastic'
process.cmd = [process.executable]+['-i',data]
return process
#456.hmmer
def hmmer():
process=Process()
process.cwd = binary_dir + '456.hmmer/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'hmmer_base.amd64-m64-gcc42-nn'
data = process.cwd +'retro.hmm'
process.cmd = [process.executable]+['--fixed', '0', '--mean', '500', '--num', '500000', '--sd', '350', '--seed', '0', data]
return process
#458.sjeng
def sjeng():
process=Process()
process.cwd = binary_dir + '458.sjeng/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'sjeng_base.amd64-m64-gcc42-nn'
data= process.cwd +'ref.txt'
process.cmd = [process.executable]+[data]
return process
#459.GemsFDTD
def GemsFDTD():
process=Process()
process.cwd = binary_dir + '459.GemsFDTD/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'GemsFDTD_base.amd64-m64-gcc42-nn'
process.cmd = [process.executable]
return process
#462.libquantum
def libquantum():
process=Process()
process.cwd = binary_dir + '462.libquantum/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'libquantum_base.amd64-m64-gcc42-nn'
process.cmd = [process.executable],'1397','8'
return process
#464.h264ref
def h264ref():
process=Process()
process.cwd = binary_dir + '464.h264ref/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'h264ref_base.amd64-m64-gcc42-nn'
data = process.cwd + 'foreman_ref_encoder_baseline.cfg'
process.cmd = [process.executable]+['-d',data]
return process
#470.lbm
def lbm():
process=Process()
process.cwd = binary_dir + '470.lbm/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'lbm_base.amd64-m64-gcc42-nn'
data= process.cwd +'100_100_130_ldc.of'
process.cmd = [process.executable]+['3000', 'reference.dat', '0', '0' ,data]
return process
#471.omnetpp
def omnetpp():
process=Process()
process.cwd = binary_dir + '471.omnetpp/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'omnetpp_base.amd64-m64-gcc42-nn'
data=process.cwd +'omnetpp.ini'
process.cmd = [process.executable]+[data]
return process
#473.astar
def astar():
process=Process()
process.cwd = binary_dir + '473.astar/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'astar_base.amd64-m64-gcc42-nn'
process.cmd = [process.executable]+['BigLakes2048.cfg']
return process
#481.wrf
def wrf():
process=Process()
process.cwd = binary_dir + '481.wrf/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'wrf_base.amd64-m64-gcc42-nn'
process.cmd = [process.executable]+['namelist.input']
return process
#482.sphinx3
def sphinx3():
process=Process()
process.cwd = binary_dir + '482.sphinx3/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'sphinx_livepretend_base.amd64-m64-gcc42-nn'
process.cmd = [process.executable]+['ctlfile', '.', 'args.an4']
return process
#483.xalancbmk TODO
#998.specrand
def specrand_i():
process=Process()
process.cwd = binary_dir + '998.specrand/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd + 'specrand_base.amd64-m64-gcc42-nn'
process.cmd = [process.executable] + ['1255432124','234923']
return process
#999.specrand
def specrand_f():
process=Process()
process.cwd = binary_dir + '999.specrand/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'specrand_base.amd64-m64-gcc42-nn'
process.cmd = [process.executable] + ['1255432124','234923']
return process
|
[
"os.environ.get",
"m5.util.addToPath"
] |
[((47, 77), 'm5.util.addToPath', 'm5.util.addToPath', (['"""../common"""'], {}), "('../common')\n", (64, 77), False, 'import m5\n'), ((91, 139), 'os.environ.get', 'os.environ.get', (['"""M5_CPU2006"""', '"""/dist/m5/cpu2006"""'], {}), "('M5_CPU2006', '/dist/m5/cpu2006')\n", (105, 139), False, 'import os\n')]
|
# import libraries
import os
import argparse
import cudf
import mlflow
# define functions
def main():
# Set the columns and their datatypes for conversion and parsing
cols = ['Flight_Number_Reporting_Airline', 'Year', 'Quarter', 'Month', 'DayOfWeek', 'DOT_ID_Reporting_Airline', 'OriginCityMarketID', 'DestCityMarketID', 'DepTime', 'DepDelay', 'DepDel15', 'ArrTime', 'ArrDelay', 'ArrDel15', 'CRSDepTime', 'CRSArrTime', 'AirTime', 'Distance', 'Reporting_Airline', 'IATA_CODE_Reporting_Airline', 'Origin', 'OriginCityName', 'Dest', 'DestCityName', 'Cancelled']
dtypes = {'Flight_Number_Reporting_Airline': 'float32', 'Year': 'float32', 'Quarter': 'float32', 'Month': 'float32', 'DayOfWeek': 'float32', 'DOT_ID_Reporting_Airline': 'float32', 'OriginCityMarketID': 'float32', 'DestCityMarketID': 'float32', 'DepTime': 'float32', 'DepDelay': 'float32', 'DepDel15': 'int', 'ArrTime': 'float32', 'ArrDelay': 'float32', 'ArrDel15': 'int', 'CRSDepTime': 'float32', 'CRSArrTime': 'float32', 'AirTime': 'float32', 'Distance': 'float32', 'Reporting_Airline': 'str', 'IATA_CODE_Reporting_Airline': 'str', 'Origin': 'str', 'OriginCityName': 'str', 'Dest': 'str', 'DestCityName': 'str', 'Cancelled': 'str'}
categorical_columns = ['Flight_Number_Reporting_Airline', 'DepTime', 'ArrTime', 'CRSDepTime', 'CRSArrTime', 'Reporting_Airline', 'Origin', 'OriginCityName', 'Dest', 'DestCityName', 'Airline']
# Process the full dataset and save it to file
processed_data = process_data(cols, dtypes, categorical_columns)
count_rows = len(processed_data)
mlflow.log_metric("processed rows", count_rows)
processed_data.to_csv('outputs/processed_data.csv', index=False)
# Define a function to process an entire dataset
def process_data(cols, dtypes, categorical_columns):
# Ingest - Read the CSV files into the DataFrame
data = cudf.read_csv('./data/airlines_raw.csv', cols=cols, dtypes=dtypes)[cols] # Read in data, ignoring any column not in cols
carriers = cudf.read_csv('./data/carriers.csv')
airports = cudf.read_csv('./data/airports.csv', usecols=['iata_code', 'latitude_deg', 'longitude_deg', 'elevation_ft'])
# Merge - Combine the external data with the airline data
data = cudf.merge(data, carriers, left_on='IATA_CODE_Reporting_Airline', right_on='Code', how='left')
data = cudf.merge(data, airports, left_on='Dest', right_on='iata_code', how='left')
data = cudf.merge(data, airports, left_on='Origin', right_on='iata_code', how='left')
# Rename - Add clarity to the combined dataset by renaming columns
data = data.rename(columns= { 'latitude_deg_x' : 'dest_lat', 'longitude_deg_x': 'dest_long',
'latitude_deg_y' : 'origin_lat', 'longitude_deg_y': 'origin_long',
'elevation_ft_x' : 'dest_elevation', 'elevation_ft_y' : 'origin_elevation',
'Description' : 'Airline'})
# Remove duplicates columns
data = data.drop(['iata_code_x', 'iata_code_y','IATA_CODE_Reporting_Airline', 'Code'], axis=1)
print(f'Added the following columns/features:\n { set(data.columns).difference(cols) }\n')
print(f'Data currently has {data.shape[0]} rows and {data.shape[1]} columns\n')
# Remove rows missing data
data = data.dropna()
print(f'Dropping rows with missing or NA values, data now has {data.shape[0]} rows and {data.shape[1]} columns\n')
# Encode - Convert human-readable names to corresponding computer-readable integers
encodings, mappings = data['OriginCityName'].factorize() # encode/categorize a sample feature
print("Example encoding:")
numeric_columns = []
for colname in data.columns:
if colname in categorical_columns:
values = data[colname].astype('category').cat.codes.astype('float32')
colname = 'enc_' + colname
data.insert(0, colname, values)
numeric_columns += [colname]
print(list(zip(data['OriginCityName'][0:3].values_host, encodings[0:3])))
# Remove redundant, surrogate, and unwanted columns from the data
remove_cols = set (['Year', 'Cancelled', 'DOT_ID_Reporting_Airline', 'enc_IATA_CODE_Reporting_Airline', 'ArrTime']);
output_columns = list(set(numeric_columns).difference(remove_cols))
# Add back additional columns that are used for data visualization, but not training
output_columns = output_columns + ['OriginCityName', 'DestCityName']
data = data[output_columns]
print(f'Encoded and removed extra columns, data now has {data.shape[0]} rows and {data.shape[1]} columns\n')
print(f'Removed: {remove_cols}')
print(f'Returning: {output_columns}')
return data
# run script
if __name__ == "__main__":
# add space in logs
print("\n\n")
print("*" * 60)
# run main function
main()
# add space in logs
print("*" * 60)
print("\n\n")
|
[
"cudf.read_csv",
"mlflow.log_metric",
"cudf.merge"
] |
[((1574, 1621), 'mlflow.log_metric', 'mlflow.log_metric', (['"""processed rows"""', 'count_rows'], {}), "('processed rows', count_rows)\n", (1591, 1621), False, 'import mlflow\n'), ((2008, 2044), 'cudf.read_csv', 'cudf.read_csv', (['"""./data/carriers.csv"""'], {}), "('./data/carriers.csv')\n", (2021, 2044), False, 'import cudf\n'), ((2060, 2172), 'cudf.read_csv', 'cudf.read_csv', (['"""./data/airports.csv"""'], {'usecols': "['iata_code', 'latitude_deg', 'longitude_deg', 'elevation_ft']"}), "('./data/airports.csv', usecols=['iata_code', 'latitude_deg',\n 'longitude_deg', 'elevation_ft'])\n", (2073, 2172), False, 'import cudf\n'), ((2264, 2363), 'cudf.merge', 'cudf.merge', (['data', 'carriers'], {'left_on': '"""IATA_CODE_Reporting_Airline"""', 'right_on': '"""Code"""', 'how': '"""left"""'}), "(data, carriers, left_on='IATA_CODE_Reporting_Airline', right_on=\n 'Code', how='left')\n", (2274, 2363), False, 'import cudf\n'), ((2370, 2446), 'cudf.merge', 'cudf.merge', (['data', 'airports'], {'left_on': '"""Dest"""', 'right_on': '"""iata_code"""', 'how': '"""left"""'}), "(data, airports, left_on='Dest', right_on='iata_code', how='left')\n", (2380, 2446), False, 'import cudf\n'), ((2458, 2536), 'cudf.merge', 'cudf.merge', (['data', 'airports'], {'left_on': '"""Origin"""', 'right_on': '"""iata_code"""', 'how': '"""left"""'}), "(data, airports, left_on='Origin', right_on='iata_code', how='left')\n", (2468, 2536), False, 'import cudf\n'), ((1872, 1938), 'cudf.read_csv', 'cudf.read_csv', (['"""./data/airlines_raw.csv"""'], {'cols': 'cols', 'dtypes': 'dtypes'}), "('./data/airlines_raw.csv', cols=cols, dtypes=dtypes)\n", (1885, 1938), False, 'import cudf\n')]
|
# Databricks notebook source
import numpy as np
import pandas as pd
from scipy import stats
# COMMAND ----------
# Simulate original ice cream dataset
df = pd.DataFrame()
df['temperature'] = np.random.uniform(60, 80, 1000)
df['number_of_cones_sold'] = np.random.uniform(0, 20, 1000)
flavors = ["Vanilla"] * 300 + ['Chocolate'] * 200 + ['Cookie Dough'] * 300 + ['Coffee'] * 200
np.random.shuffle(flavors)
df['most_popular_ice_cream_flavor'] = flavors
df['number_bowls_sold'] = np.random.uniform(0, 20, 1000)
sorbet = ["Raspberry "] * 250 + ['Lemon'] * 250 + ['Lime'] * 250 + ['Orange'] * 250
np.random.shuffle(sorbet)
df['most_popular_sorbet_flavor'] = sorbet
df['total_store_sales'] = np.random.normal(100, 10, 1000)
df['total_sales_predicted'] = np.random.normal(100, 10, 1000)
# Simulate new ice cream dataset
df2 = pd.DataFrame()
df2['temperature'] = (df['temperature'] - 32) * (5/9) # F -> C
df2['number_of_cones_sold'] = np.random.uniform(0, 20, 1000) #stay same
flavors = ["Vanilla"] * 100 + ['Chocolate'] * 300 + ['Cookie Dough'] * 400 + ['Coffee'] * 200
np.random.shuffle(flavors)
df2['most_popular_ice_cream_flavor'] = flavors
df2['number_bowls_sold'] = np.random.uniform(10, 30, 1000)
sorbet = ["Raspberry "] * 200 + ['Lemon'] * 200 + ['Lime'] * 200 + ['Orange'] * 200 + [None] * 200
np.random.shuffle(sorbet)
df2['most_popular_sorbet_flavor'] = sorbet
df2['total_store_sales'] = np.random.normal(150, 10, 1000) # increased
df2['total_sales_predicted'] = np.random.normal(80, 10, 1000) # decreased
|
[
"pandas.DataFrame",
"numpy.random.uniform",
"numpy.random.normal",
"numpy.random.shuffle"
] |
[((160, 174), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (172, 174), True, 'import pandas as pd\n'), ((195, 226), 'numpy.random.uniform', 'np.random.uniform', (['(60)', '(80)', '(1000)'], {}), '(60, 80, 1000)\n', (212, 226), True, 'import numpy as np\n'), ((256, 286), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(20)', '(1000)'], {}), '(0, 20, 1000)\n', (273, 286), True, 'import numpy as np\n'), ((382, 408), 'numpy.random.shuffle', 'np.random.shuffle', (['flavors'], {}), '(flavors)\n', (399, 408), True, 'import numpy as np\n'), ((481, 511), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(20)', '(1000)'], {}), '(0, 20, 1000)\n', (498, 511), True, 'import numpy as np\n'), ((597, 622), 'numpy.random.shuffle', 'np.random.shuffle', (['sorbet'], {}), '(sorbet)\n', (614, 622), True, 'import numpy as np\n'), ((691, 722), 'numpy.random.normal', 'np.random.normal', (['(100)', '(10)', '(1000)'], {}), '(100, 10, 1000)\n', (707, 722), True, 'import numpy as np\n'), ((753, 784), 'numpy.random.normal', 'np.random.normal', (['(100)', '(10)', '(1000)'], {}), '(100, 10, 1000)\n', (769, 784), True, 'import numpy as np\n'), ((825, 839), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (837, 839), True, 'import pandas as pd\n'), ((933, 963), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(20)', '(1000)'], {}), '(0, 20, 1000)\n', (950, 963), True, 'import numpy as np\n'), ((1069, 1095), 'numpy.random.shuffle', 'np.random.shuffle', (['flavors'], {}), '(flavors)\n', (1086, 1095), True, 'import numpy as np\n'), ((1170, 1201), 'numpy.random.uniform', 'np.random.uniform', (['(10)', '(30)', '(1000)'], {}), '(10, 30, 1000)\n', (1187, 1201), True, 'import numpy as np\n'), ((1301, 1326), 'numpy.random.shuffle', 'np.random.shuffle', (['sorbet'], {}), '(sorbet)\n', (1318, 1326), True, 'import numpy as np\n'), ((1397, 1428), 'numpy.random.normal', 'np.random.normal', (['(150)', '(10)', '(1000)'], {}), '(150, 10, 1000)\n', (1413, 1428), True, 'import numpy as np\n'), ((1472, 1502), 'numpy.random.normal', 'np.random.normal', (['(80)', '(10)', '(1000)'], {}), '(80, 10, 1000)\n', (1488, 1502), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
from PyQt5.QtWidgets import QVBoxLayout, QDialog, QLineEdit, QHBoxLayout, QPushButton, QProgressDialog
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtCore import pyqtSignal, Qt
from network import Server, Client
import logging
import socket
class ConnectionWindow(QDialog):
got_connection = pyqtSignal()
def __init__(self):
super().__init__()
self.setWindowFlags(self.windowFlags() ^ Qt.WindowContextHelpButtonHint)
self.setWindowIcon(QIcon(':/graphics\internet.png'))
self.setWindowTitle("Multiplayer")
self.resize(400, 300)
grid = QVBoxLayout()
grid.addLayout(self.client())
grid.addLayout(self.server())
grid.addStretch(1)
self.setLayout(grid)
self.waiting_window = None
self.connection = None
def server(self):
self.server_ip_address = QLineEdit(socket.gethostbyname(socket.gethostname()))
self.server_port = QLineEdit("25565")
self.server_port.setMaximumWidth(80)
self.server_password = QLineEdit("password")
host_button = QPushButton("Host")
host_button.clicked.connect(self.host_button_clicked)
hbox = QHBoxLayout()
hbox.addWidget(self.server_ip_address)
hbox.addWidget(self.server_port)
hbox.addWidget(self.server_password)
hbox.addWidget(host_button)
return hbox
def host_button_clicked(self):
if self.connection:
return
self.connection = Server()
self.waiting_window = QProgressDialog("Waiting for network...", "Cancel", 0, 0)
self.waiting_window.setWindowTitle("Waiting")
self.waiting_window.setWindowIcon(QIcon(':/graphics\internet.png'))
self.waiting_window.setWindowFlags(self.waiting_window.windowFlags() ^ Qt.WindowContextHelpButtonHint)
self.connection.got_connection.connect(self.waiting_window.deleteLater)
self.connection.got_connection.connect(self.got_connection)
self.connection.got_connection.connect(self.deleteLater)
self.connection.connection_error.connect(self.connection_error)
self.connection.connection_error.connect(self.waiting_window.deleteLater)
self.waiting_window.canceled.connect(self.connection.close)
self.connection.start(self.server_ip_address.text(), self.server_port.text(), self.server_password.text())
self.waiting_window.exec()
def client(self):
self.client_ip_address = QLineEdit(socket.gethostbyname(socket.gethostname()))
self.client_port = QLineEdit("25565")
self.client_port.setMaximumWidth(80)
self.client_password = QLineEdit("password")
connect_button = QPushButton("Connect")
connect_button.clicked.connect(self.connect_button_clicked)
hbox = QHBoxLayout()
hbox.addWidget(self.client_ip_address)
hbox.addWidget(self.client_port)
hbox.addWidget(self.client_password)
hbox.addWidget(connect_button)
return hbox
def connect_button_clicked(self):
if self.connection:
return
self.connection = Client()
self.waiting_window = QProgressDialog("Waiting for server...", "Cancel", 0, 0)
self.waiting_window.setWindowTitle("Connecting")
self.waiting_window.setWindowFlags(self.waiting_window.windowFlags() ^ Qt.WindowContextHelpButtonHint)
self.waiting_window.setWindowIcon(QIcon(':/graphics\internet.png'))
self.connection.got_connection.connect(self.waiting_window.close)
self.connection.got_connection.connect(self.got_connection)
self.connection.got_connection.connect(self.deleteLater)
self.connection.connection_error.connect(self.connection_error)
self.connection.connection_error.connect(self.waiting_window.deleteLater)
self.connection.start(self.client_ip_address.text(), self.client_port.text(), self.client_password.text())
self.waiting_window.exec()
def connection_error(self, err):
QMessageBox.warning(self, 'Connection Error', " "+err+" ")
|
[
"PyQt5.QtCore.pyqtSignal",
"PyQt5.QtWidgets.QProgressDialog",
"PyQt5.QtGui.QIcon",
"network.Client",
"PyQt5.QtWidgets.QLineEdit",
"PyQt5.QtWidgets.QHBoxLayout",
"PyQt5.QtWidgets.QPushButton",
"PyQt5.QtWidgets.QMessageBox.warning",
"PyQt5.QtWidgets.QVBoxLayout",
"socket.gethostname",
"network.Server"
] |
[((359, 371), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', ([], {}), '()\n', (369, 371), False, 'from PyQt5.QtCore import pyqtSignal, Qt\n'), ((655, 668), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (666, 668), False, 'from PyQt5.QtWidgets import QVBoxLayout, QDialog, QLineEdit, QHBoxLayout, QPushButton, QProgressDialog\n'), ((1006, 1024), 'PyQt5.QtWidgets.QLineEdit', 'QLineEdit', (['"""25565"""'], {}), "('25565')\n", (1015, 1024), False, 'from PyQt5.QtWidgets import QVBoxLayout, QDialog, QLineEdit, QHBoxLayout, QPushButton, QProgressDialog\n'), ((1102, 1123), 'PyQt5.QtWidgets.QLineEdit', 'QLineEdit', (['"""password"""'], {}), "('password')\n", (1111, 1123), False, 'from PyQt5.QtWidgets import QVBoxLayout, QDialog, QLineEdit, QHBoxLayout, QPushButton, QProgressDialog\n'), ((1147, 1166), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""Host"""'], {}), "('Host')\n", (1158, 1166), False, 'from PyQt5.QtWidgets import QVBoxLayout, QDialog, QLineEdit, QHBoxLayout, QPushButton, QProgressDialog\n'), ((1246, 1259), 'PyQt5.QtWidgets.QHBoxLayout', 'QHBoxLayout', ([], {}), '()\n', (1257, 1259), False, 'from PyQt5.QtWidgets import QVBoxLayout, QDialog, QLineEdit, QHBoxLayout, QPushButton, QProgressDialog\n'), ((1558, 1566), 'network.Server', 'Server', ([], {}), '()\n', (1564, 1566), False, 'from network import Server, Client\n'), ((1598, 1655), 'PyQt5.QtWidgets.QProgressDialog', 'QProgressDialog', (['"""Waiting for network..."""', '"""Cancel"""', '(0)', '(0)'], {}), "('Waiting for network...', 'Cancel', 0, 0)\n", (1613, 1655), False, 'from PyQt5.QtWidgets import QVBoxLayout, QDialog, QLineEdit, QHBoxLayout, QPushButton, QProgressDialog\n'), ((2624, 2642), 'PyQt5.QtWidgets.QLineEdit', 'QLineEdit', (['"""25565"""'], {}), "('25565')\n", (2633, 2642), False, 'from PyQt5.QtWidgets import QVBoxLayout, QDialog, QLineEdit, QHBoxLayout, QPushButton, QProgressDialog\n'), ((2720, 2741), 'PyQt5.QtWidgets.QLineEdit', 'QLineEdit', (['"""password"""'], {}), "('password')\n", (2729, 2741), False, 'from PyQt5.QtWidgets import QVBoxLayout, QDialog, QLineEdit, QHBoxLayout, QPushButton, QProgressDialog\n'), ((2768, 2790), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""Connect"""'], {}), "('Connect')\n", (2779, 2790), False, 'from PyQt5.QtWidgets import QVBoxLayout, QDialog, QLineEdit, QHBoxLayout, QPushButton, QProgressDialog\n'), ((2876, 2889), 'PyQt5.QtWidgets.QHBoxLayout', 'QHBoxLayout', ([], {}), '()\n', (2887, 2889), False, 'from PyQt5.QtWidgets import QVBoxLayout, QDialog, QLineEdit, QHBoxLayout, QPushButton, QProgressDialog\n'), ((3194, 3202), 'network.Client', 'Client', ([], {}), '()\n', (3200, 3202), False, 'from network import Server, Client\n'), ((3234, 3290), 'PyQt5.QtWidgets.QProgressDialog', 'QProgressDialog', (['"""Waiting for server..."""', '"""Cancel"""', '(0)', '(0)'], {}), "('Waiting for server...', 'Cancel', 0, 0)\n", (3249, 3290), False, 'from PyQt5.QtWidgets import QVBoxLayout, QDialog, QLineEdit, QHBoxLayout, QPushButton, QProgressDialog\n'), ((4095, 4167), 'PyQt5.QtWidgets.QMessageBox.warning', 'QMessageBox.warning', (['self', '"""Connection Error"""', "(' ' + err + ' ')"], {}), "(self, 'Connection Error', ' ' + err + ' ')\n", (4114, 4167), False, 'from PyQt5.QtWidgets import QMessageBox\n'), ((532, 565), 'PyQt5.QtGui.QIcon', 'QIcon', (['""":/graphics\\\\internet.png"""'], {}), "(':/graphics\\\\internet.png')\n", (537, 565), False, 'from PyQt5.QtGui import QIcon\n'), ((1752, 1785), 'PyQt5.QtGui.QIcon', 'QIcon', (['""":/graphics\\\\internet.png"""'], {}), "(':/graphics\\\\internet.png')\n", (1757, 1785), False, 'from PyQt5.QtGui import QIcon\n'), ((3501, 3534), 'PyQt5.QtGui.QIcon', 'QIcon', (['""":/graphics\\\\internet.png"""'], {}), "(':/graphics\\\\internet.png')\n", (3506, 3534), False, 'from PyQt5.QtGui import QIcon\n'), ((955, 975), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (973, 975), False, 'import socket\n'), ((2573, 2593), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (2591, 2593), False, 'import socket\n')]
|
from __future__ import division
__author__ = '<NAME>'
import numpy as np
from scipy.stats import norm
import string
import bottleneck as bn
import math
# paa tranformation, window = incoming data, string_length = length of outcoming data
class sax():
def process(self, window, output_length, sax_vocab):
sax = to_sax(to_paa(normalize(window),output_length),sax_vocab)
#return vocabToCoordinates(len(window),output_length,sax[0],4)
return vocabToCoordinates(output_length,output_length,sax[0],sax_vocab)
def getConfigurationParams(self):
return {"output_length":"100","sax_vocab":"4"}
def normalize(data):
data2 = np.array(data)
data2 = data2 - (np.mean(data))
data2 = data2 /data2.std()
return data2
def to_paa(data,string_length):
data = np.array_split(data, string_length)
return [np.mean(section) for section in data]
def gen_breakpoints(symbol_count):
breakpoints = norm.ppf(np.linspace(1. / symbol_count, 1 - 1. / symbol_count, symbol_count - 1))
breakpoints = np.concatenate((breakpoints, np.array([np.Inf])))
return breakpoints
def to_sax(data,symbol_count):
breakpoints = gen_breakpoints(symbol_count)
locations = [np.where(breakpoints > section_mean)[0][0] for section_mean in data]
return [''.join([string.ascii_letters[ind] for ind in locations])]
def vocabToCoordinates(time_window, phrase_length, phrases, symbol_count):
breakpoints = gen_breakpoints(symbol_count)
newCutlines = breakpoints.tolist()
max_value = breakpoints[symbol_count - 2] + ((breakpoints[symbol_count - 2] - breakpoints[symbol_count - 3]) * 2)
# HERE IS SOMETHING WRONG // ONLY IN VISUALISATION
min_value = breakpoints[0] - ((breakpoints[1] - breakpoints[0]) * 2)
infi = newCutlines.pop()
newCutlines.append(max_value)
newCutlines.append(infi)
newCutlines.insert(0, min_value)
#newCutlines.insert(0,-np.Inf)
co1 = time_window / float(phrase_length)
g = 0
retList = []
for s in phrases:
if s is "#":
for i in range(int(co1)):
retList.append(np.NaN)
g+=1
else:
for i in range(int(co1)):
retList.append(newCutlines[ord(s) - 97])
g+=1
#print co1,time_window,phrase_length,g,len(phrases)
return retList
def convertSaxBackToContinious(string_length, symbol_count, data):
points, phrases = norm(data,string_length, symbol_count)
retList = vocabToCoordinates(data, string_length, phrases, points, symbol_count)
#print phrases[0]
return retList
def saxDistance(w1, w2,original_length,symbol_count):
if len(w1) != len(w2):
raise Exception("not equal string length")
string_length=len(w1)
dist = 0
for (l, k) in zip(w1, w2):
dist += saxDistanceLetter(l, k,symbol_count)
result = np.sqrt(dist) * np.sqrt(np.divide(original_length, string_length))
return result
def saxDistanceLetter(w1, w2, symbol_count):
n1 = ord(w1) - 97
n2 = ord(w2) - 97
lookupTable= createLookup(symbol_count,gen_breakpoints(symbol_count))
if n1 > symbol_count:
raise Exception(" letter not in Dictionary " + w1)
if n2 > symbol_count:
raise Exception(" letter not in Dictionary " + w2)
return lookupTable[n1][n2]
def createLookup(symbol_count, breakpoints):
return make_matrix(symbol_count, symbol_count, breakpoints)
def make_list(row, size, breakpoints):
mylist = []
for i in range(size):
i = i + 1
if abs(row - i) <= 1:
mylist.append(0)
else:
v = breakpoints[(max(row, i) - 2)] - breakpoints[min(row, i) - 1]
mylist.append(v)
return mylist
def make_matrix(rows, cols, breakpoints):
matrix = []
for i in range(rows):
i = i + 1
matrix.append(make_list(i, cols, breakpoints))
return matrix
|
[
"numpy.divide",
"scipy.stats.norm",
"numpy.mean",
"numpy.array",
"numpy.where",
"numpy.linspace",
"numpy.array_split",
"numpy.sqrt"
] |
[((661, 675), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (669, 675), True, 'import numpy as np\n'), ((804, 839), 'numpy.array_split', 'np.array_split', (['data', 'string_length'], {}), '(data, string_length)\n', (818, 839), True, 'import numpy as np\n'), ((2446, 2485), 'scipy.stats.norm', 'norm', (['data', 'string_length', 'symbol_count'], {}), '(data, string_length, symbol_count)\n', (2450, 2485), False, 'from scipy.stats import norm\n'), ((697, 710), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (704, 710), True, 'import numpy as np\n'), ((853, 869), 'numpy.mean', 'np.mean', (['section'], {}), '(section)\n', (860, 869), True, 'import numpy as np\n'), ((955, 1028), 'numpy.linspace', 'np.linspace', (['(1.0 / symbol_count)', '(1 - 1.0 / symbol_count)', '(symbol_count - 1)'], {}), '(1.0 / symbol_count, 1 - 1.0 / symbol_count, symbol_count - 1)\n', (966, 1028), True, 'import numpy as np\n'), ((2880, 2893), 'numpy.sqrt', 'np.sqrt', (['dist'], {}), '(dist)\n', (2887, 2893), True, 'import numpy as np\n'), ((1075, 1093), 'numpy.array', 'np.array', (['[np.Inf]'], {}), '([np.Inf])\n', (1083, 1093), True, 'import numpy as np\n'), ((2904, 2945), 'numpy.divide', 'np.divide', (['original_length', 'string_length'], {}), '(original_length, string_length)\n', (2913, 2945), True, 'import numpy as np\n'), ((1218, 1254), 'numpy.where', 'np.where', (['(breakpoints > section_mean)'], {}), '(breakpoints > section_mean)\n', (1226, 1254), True, 'import numpy as np\n')]
|
import komand
from .schema import GetFileInput, GetFileOutput
# Custom imports below
from komand_get_url.util.utils import Utils
class GetFile(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name='get_file',
description='Download a file by URL',
input=GetFileInput(),
output=GetFileOutput())
def run(self, params={}):
utils = Utils(action=self)
url = params.get('url')
checksum = params.get('checksum')
tout = params.get('timeout', 60)
is_verify = params.get('is_verify', True)
# Check for supported url prefix
utils.validate_url(url)
meta = utils.hash_url(url)
cache_file = '/var/cache/' + meta['file']
# Attempt to retrieve headers from past request
headers = {}
if komand.helper.check_cachefile(meta['metafile']):
headers = utils.check_url_meta_file(meta)
# Download file
urlobj = komand.helper.open_url(
url, timeout=tout, verify=is_verify,
If_None_Match=headers.get('etag', ''),
If_Modified_Since=headers.get('last-modified', ''))
if urlobj:
contents = urlobj.read()
# Optional integrity check of file
if checksum:
if not komand.helper.check_hashes(contents, checksum):
self.logger.error('GetFile: File Checksum Failed')
raise Exception('GetURL Failed')
# Write etag and last modified to cache
utils.create_url_meta_file(meta, urlobj)
# Write URL file contents to cache
f = komand.helper.open_cachefile(cache_file)
f.write(contents)
f.close()
# Check URL status code and return file contents
if urlobj.code >= 200 or urlobj.code <= 299:
f = komand.helper.encode_string(contents)
if f:
return {'file': f, 'status_code': urlobj.code or 200}
# When the download fails or file is not modified
if urlobj is None:
# Attempt to return file from cache if available
self.logger.info('GetURL: File not modified: %s', url)
if komand.helper.check_cachefile(cache_file):
f = komand.helper.encode_file(cache_file)
self.logger.info('GetURL: File returned from cache: %s', cache_file)
return {'bytes': f, 'status_code': 200}
# If file hasn't been returned then we fail
self.logger.info('GetURL: Download failed for %s', url)
raise Exception('GetURL Failed')
def test(self, params={}):
url = 'https://www.google.com'
komand.helper.check_url(url)
return {}
|
[
"komand.helper.open_cachefile",
"komand.helper.check_cachefile",
"komand_get_url.util.utils.Utils",
"komand.helper.check_hashes",
"komand.helper.check_url",
"komand.helper.encode_file",
"komand.helper.encode_string"
] |
[((428, 446), 'komand_get_url.util.utils.Utils', 'Utils', ([], {'action': 'self'}), '(action=self)\n', (433, 446), False, 'from komand_get_url.util.utils import Utils\n'), ((861, 908), 'komand.helper.check_cachefile', 'komand.helper.check_cachefile', (["meta['metafile']"], {}), "(meta['metafile'])\n", (890, 908), False, 'import komand\n'), ((2763, 2791), 'komand.helper.check_url', 'komand.helper.check_url', (['url'], {}), '(url)\n', (2786, 2791), False, 'import komand\n'), ((1689, 1729), 'komand.helper.open_cachefile', 'komand.helper.open_cachefile', (['cache_file'], {}), '(cache_file)\n', (1717, 1729), False, 'import komand\n'), ((2284, 2325), 'komand.helper.check_cachefile', 'komand.helper.check_cachefile', (['cache_file'], {}), '(cache_file)\n', (2313, 2325), False, 'import komand\n'), ((1921, 1958), 'komand.helper.encode_string', 'komand.helper.encode_string', (['contents'], {}), '(contents)\n', (1948, 1958), False, 'import komand\n'), ((2347, 2384), 'komand.helper.encode_file', 'komand.helper.encode_file', (['cache_file'], {}), '(cache_file)\n', (2372, 2384), False, 'import komand\n'), ((1347, 1393), 'komand.helper.check_hashes', 'komand.helper.check_hashes', (['contents', 'checksum'], {}), '(contents, checksum)\n', (1373, 1393), False, 'import komand\n')]
|
import tempfile
import os
from PIL import Image
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Recipe, Tag, Ingredient
from recipe.serializers import RecipeSerializer, RecipeDetailSerializer
RECIPE_URL = reverse('recipe:recipe-list')
def image_upload_url(recipe_id):
"""Return url for image"""
return reverse('recipe:recipe-upload-image', args=[recipe_id])
def detail_url(recipe_id):
"""Return recipe detail url"""
return reverse('recipe:recipe-detail', args=[recipe_id])
def sample_tag(user, name="default tag"):
"""return sample tag"""
return Tag.objects.create(user=user, name=name)
def sample_ingredient(user, name="default ingredient"):
"""return ingrediant"""
return Ingredient.objects.create(user=user, name=name)
def sample_recipe(user, **params):
"""Create and return a sample recipe"""
defaults = {
'title': 'Sample Recipe',
'time_minutes': 10,
'price': 5.00
}
defaults.update(params)
return Recipe.objects.create(user=user, **defaults)
class PublicRecipeAPITests(TestCase):
"""Test unauthed recipe api access"""
def setUp(self):
self.client = APIClient()
def test_auth_required(self):
"""test that auth is required"""
response = self.client.get(RECIPE_URL)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateRecipeAPITest(TestCase):
"""Test authed recipe endpoints"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'<EMAIL>',
'pestPW'
)
self.client.force_authenticate(self.user)
def test_retrieve_recipes(self):
sample_recipe(user=self.user)
sample_recipe(user=self.user, title="Another Recipe")
response = self.client.get(RECIPE_URL)
recipes = Recipe.objects.all().order_by('-id')
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, serializer.data)
def test_recipes_limited_to_user(self):
"""test that only authed user recipes returned"""
user_2 = get_user_model().objects.create_user(
'<EMAIL>',
'otherPW'
)
sample_recipe(user=user_2)
sample_recipe(user=user_2, title="Another Recipe")
sample_recipe(user=self.user)
response = self.client.get(RECIPE_URL)
recipes = Recipe.objects.filter(user=self.user)
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data, serializer.data)
def test_view_recipe_detail(self):
"""test viewing recipe detail"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
recipe.ingredients.add(sample_ingredient(user=self.user))
url = detail_url(recipe.id)
response = self.client.get(url)
serialiser = RecipeDetailSerializer(recipe)
self.assertEqual(response.data, serialiser.data)
def test_create_basic_recipe(self):
"""Test creating recipe"""
payload = {
'title': 'Choco Cheese',
'time_minutes': 10,
'price': 20
}
response = self.client.post(RECIPE_URL, payload)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=response.data['id'])
for k, v in payload.items():
self.assertEqual(v, getattr(recipe, k))
def test_create_recipe_with_tags(self):
"""Test creating a recipe with tags"""
tag_1 = sample_tag(user=self.user, name='Vegan')
tag_2 = sample_tag(user=self.user, name="Dessert")
payload = {
'title': 'Cheesecake',
'tags': [tag_1.id, tag_2.id],
'time_minutes': 60,
'price': 20.00
}
response = self.client.post(RECIPE_URL, payload)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=response.data['id'])
tags = recipe.tags.all()
self.assertEqual(tags.count(), 2)
self.assertIn(tag_1, tags)
self.assertIn(tag_2, tags)
def test_create_recipe_with_ingredients(self):
"""Test creating recipe with ingredients"""
ingredient1 = sample_ingredient(user=self.user, name='Ingredient 1')
ingredient2 = sample_ingredient(user=self.user, name='Ingredient 2')
payload = {
'title': 'Test recipe with ingredients',
'ingredients': [ingredient1.id, ingredient2.id],
'time_minutes': 45,
'price': 15.00
}
res = self.client.post(RECIPE_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
ingredients = recipe.ingredients.all()
self.assertEqual(ingredients.count(), 2)
self.assertIn(ingredient1, ingredients)
self.assertIn(ingredient2, ingredients)
def test_partial_update_recipe(self):
"""Test updating a recipe with patch"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
new_tag = sample_tag(user=self.user, name='Curry')
payload = {'title': 'Chicken tikka', 'tags': [new_tag.id]}
url = detail_url(recipe.id)
self.client.patch(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 1)
self.assertIn(new_tag, tags)
def test_full_update_recipe(self):
"""Test updating a recipe with put"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
payload = {
'title': 'Spaghetti carbonara',
'time_minutes': 25,
'price': 5.00
}
url = detail_url(recipe.id)
self.client.put(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
self.assertEqual(recipe.time_minutes, payload['time_minutes'])
self.assertEqual(recipe.price, payload['price'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 0)
class RecipeImageUploadTests(TestCase):
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'<EMAIL>',
'testPW'
)
self.client.force_authenticate(self.user)
self.recipe = sample_recipe(user=self.user)
def tearDown(self):
self.recipe.image.delete()
def test_upload_image_to_recipe(self):
"""Test uploading an image to recipe"""
url = image_upload_url(self.recipe.id)
# this will create a temp file that we can write to within the
# context of the with and will remove it once we exit
with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:
img = Image.new('RGB', (10, 10))
img.save(ntf, format='JPEG')
ntf.seek(0) # way that python reads files; resets to beginning of file
res = self.client.post(url, {'image': ntf}, format='multipart')
self.recipe.refresh_from_db()
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertIn('image', res.data)
self.assertTrue(os.path.exists(self.recipe.image.path))
def test_upload_image_bad_request(self):
"""Test uploading an invalid image"""
url = image_upload_url(self.recipe.id)
res = self.client.post(url, {'image': 'notimage'}, format='multipart')
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
|
[
"core.models.Recipe.objects.filter",
"core.models.Tag.objects.create",
"tempfile.NamedTemporaryFile",
"PIL.Image.new",
"core.models.Recipe.objects.create",
"core.models.Recipe.objects.all",
"recipe.serializers.RecipeDetailSerializer",
"core.models.Recipe.objects.get",
"os.path.exists",
"django.contrib.auth.get_user_model",
"django.urls.reverse",
"core.models.Ingredient.objects.create",
"recipe.serializers.RecipeSerializer",
"rest_framework.test.APIClient"
] |
[((376, 405), 'django.urls.reverse', 'reverse', (['"""recipe:recipe-list"""'], {}), "('recipe:recipe-list')\n", (383, 405), False, 'from django.urls import reverse\n'), ((482, 537), 'django.urls.reverse', 'reverse', (['"""recipe:recipe-upload-image"""'], {'args': '[recipe_id]'}), "('recipe:recipe-upload-image', args=[recipe_id])\n", (489, 537), False, 'from django.urls import reverse\n'), ((613, 662), 'django.urls.reverse', 'reverse', (['"""recipe:recipe-detail"""'], {'args': '[recipe_id]'}), "('recipe:recipe-detail', args=[recipe_id])\n", (620, 662), False, 'from django.urls import reverse\n'), ((746, 786), 'core.models.Tag.objects.create', 'Tag.objects.create', ([], {'user': 'user', 'name': 'name'}), '(user=user, name=name)\n', (764, 786), False, 'from core.models import Recipe, Tag, Ingredient\n'), ((884, 931), 'core.models.Ingredient.objects.create', 'Ingredient.objects.create', ([], {'user': 'user', 'name': 'name'}), '(user=user, name=name)\n', (909, 931), False, 'from core.models import Recipe, Tag, Ingredient\n'), ((1161, 1205), 'core.models.Recipe.objects.create', 'Recipe.objects.create', ([], {'user': 'user'}), '(user=user, **defaults)\n', (1182, 1205), False, 'from core.models import Recipe, Tag, Ingredient\n'), ((1333, 1344), 'rest_framework.test.APIClient', 'APIClient', ([], {}), '()\n', (1342, 1344), False, 'from rest_framework.test import APIClient\n'), ((1670, 1681), 'rest_framework.test.APIClient', 'APIClient', ([], {}), '()\n', (1679, 1681), False, 'from rest_framework.test import APIClient\n'), ((2107, 2143), 'recipe.serializers.RecipeSerializer', 'RecipeSerializer', (['recipes'], {'many': '(True)'}), '(recipes, many=True)\n', (2123, 2143), False, 'from recipe.serializers import RecipeSerializer, RecipeDetailSerializer\n'), ((2682, 2719), 'core.models.Recipe.objects.filter', 'Recipe.objects.filter', ([], {'user': 'self.user'}), '(user=self.user)\n', (2703, 2719), False, 'from core.models import Recipe, Tag, Ingredient\n'), ((2741, 2777), 'recipe.serializers.RecipeSerializer', 'RecipeSerializer', (['recipes'], {'many': '(True)'}), '(recipes, many=True)\n', (2757, 2777), False, 'from recipe.serializers import RecipeSerializer, RecipeDetailSerializer\n'), ((3299, 3329), 'recipe.serializers.RecipeDetailSerializer', 'RecipeDetailSerializer', (['recipe'], {}), '(recipe)\n', (3321, 3329), False, 'from recipe.serializers import RecipeSerializer, RecipeDetailSerializer\n'), ((3735, 3777), 'core.models.Recipe.objects.get', 'Recipe.objects.get', ([], {'id': "response.data['id']"}), "(id=response.data['id'])\n", (3753, 3777), False, 'from core.models import Recipe, Tag, Ingredient\n'), ((4393, 4435), 'core.models.Recipe.objects.get', 'Recipe.objects.get', ([], {'id': "response.data['id']"}), "(id=response.data['id'])\n", (4411, 4435), False, 'from core.models import Recipe, Tag, Ingredient\n'), ((5181, 5218), 'core.models.Recipe.objects.get', 'Recipe.objects.get', ([], {'id': "res.data['id']"}), "(id=res.data['id'])\n", (5199, 5218), False, 'from core.models import Recipe, Tag, Ingredient\n'), ((6789, 6800), 'rest_framework.test.APIClient', 'APIClient', ([], {}), '()\n', (6798, 6800), False, 'from rest_framework.test import APIClient\n'), ((7366, 7408), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".jpg"""'}), "(suffix='.jpg')\n", (7393, 7408), False, 'import tempfile\n'), ((7435, 7461), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(10, 10)'], {}), "('RGB', (10, 10))\n", (7444, 7461), False, 'from PIL import Image\n'), ((7828, 7866), 'os.path.exists', 'os.path.exists', (['self.recipe.image.path'], {}), '(self.recipe.image.path)\n', (7842, 7866), False, 'import os\n'), ((2049, 2069), 'core.models.Recipe.objects.all', 'Recipe.objects.all', ([], {}), '()\n', (2067, 2069), False, 'from core.models import Recipe, Tag, Ingredient\n'), ((1702, 1718), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (1716, 1718), False, 'from django.contrib.auth import get_user_model\n'), ((2389, 2405), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (2403, 2405), False, 'from django.contrib.auth import get_user_model\n'), ((6821, 6837), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (6835, 6837), False, 'from django.contrib.auth import get_user_model\n')]
|
from kivy.config import Config # isort:skip
Config.set("input", "mouse", "mouse,multitouch_on_demand") # isort:skip # no red dots on right click
ICON = "img/icon.png"
Config.set("kivy", "window_icon", ICON) # isort:skip # set icon before Window is imported
import signal
import os
import sys
import threading
import traceback
from queue import Queue
from kivy.app import App
from kivy.core.clipboard import Clipboard
from kivy.storage.jsonstore import JsonStore
from kivy.uix.popup import Popup
from core.ai import ai_move
from core.common import OUTPUT_INFO, OUTPUT_ERROR, OUTPUT_DEBUG, OUTPUT_EXTRA_DEBUG, OUTPUT_KATAGO_STDERR
from core.engine import KataGoEngine
from core.game import Game, IllegalMoveException, KaTrainSGF
from core.sgf_parser import Move, ParseError
from gui import *
class KaTrainGui(BoxLayout):
"""Top level class responsible for tying everything together"""
def __init__(self, **kwargs):
super(KaTrainGui, self).__init__(**kwargs)
self.debug_level = 0
self.engine = None
self.game = None
self.new_game_popup = None
self.fileselect_popup = None
self.config_popup = None
self.logger = lambda message, level=OUTPUT_INFO: self.log(message, level)
self._load_config()
self.debug_level = self.config("debug/level", OUTPUT_INFO)
self.controls.ai_mode_groups["W"].values = self.controls.ai_mode_groups["B"].values = list(self.config("ai").keys())
self.message_queue = Queue()
self._keyboard = Window.request_keyboard(None, self, "")
self._keyboard.bind(on_key_down=self._on_keyboard_down)
def log(self, message, level=OUTPUT_INFO):
if level == OUTPUT_KATAGO_STDERR:
if "starting" in message.lower():
self.controls.set_status(f"KataGo engine starting...")
if message.startswith("Tuning"):
self.controls.set_status(f"KataGo is tuning settings for first startup, please wait." + message)
if "ready" in message.lower():
self.controls.set_status(f"KataGo engine ready.")
print(f"[KG:STDERR]{message.strip()}")
elif level == OUTPUT_ERROR:
self.controls.set_status(f"ERROR: {message}")
print(f"ERROR: {message}")
elif self.debug_level >= level:
print(message)
def _load_config(self):
base_path = getattr(sys, "_MEIPASS", os.getcwd()) # for pyinstaller
config_file = sys.argv[1] if len(sys.argv) > 1 else os.path.join(base_path, "config.json")
try:
self.log(f"Using config file {config_file}", OUTPUT_INFO)
self._config_store = JsonStore(config_file, indent=4)
self._config = dict(self._config_store)
except Exception as e:
self.log(f"Failed to load config {config_file}: {e}", OUTPUT_ERROR)
sys.exit(1)
def save_config(self):
for k, v in self._config.items():
self._config_store.put(k, **v)
def config(self, setting, default=None):
try:
if "/" in setting:
cat, key = setting.split("/")
return self._config[cat].get(key, default)
else:
return self._config[setting]
except KeyError:
self.log(f"Missing configuration option {setting}", OUTPUT_ERROR)
def start(self):
if self.engine:
return
self.board_gui.trainer_config = self.config("trainer")
self.board_gui.ui_config = self.config("board_ui")
self.engine = KataGoEngine(self, self.config("engine"))
threading.Thread(target=self._message_loop_thread, daemon=True).start()
self._do_new_game()
def update_state(self, redraw_board=False): # is called after every message and on receiving analyses and config changes
# AI and Trainer/auto-undo handlers
cn = self.game.current_node
auto_undo = cn.player and "undo" in self.controls.player_mode(cn.player)
if auto_undo and cn.analysis_ready and cn.parent and cn.parent.analysis_ready and not cn.children and not self.game.ended:
self.game.analyze_undo(cn, self.config("trainer")) # not via message loop
if cn.analysis_ready and "ai" in self.controls.player_mode(cn.next_player).lower() and not cn.children and not self.game.ended and not (auto_undo and cn.auto_undo is None):
self._do_ai_move(cn) # cn mismatch stops this if undo fired. avoid message loop here or fires repeatedly.
# Handle prisoners and next player display
prisoners = self.game.prisoner_count
top, bot = self.board_controls.black_prisoners.__self__, self.board_controls.white_prisoners.__self__ # no weakref
if self.game.next_player == "W":
top, bot = bot, top
self.board_controls.mid_circles_container.clear_widgets()
self.board_controls.mid_circles_container.add_widget(bot)
self.board_controls.mid_circles_container.add_widget(top)
self.board_controls.black_prisoners.text = str(prisoners["W"])
self.board_controls.white_prisoners.text = str(prisoners["B"])
# update engine status dot
if not self.engine or not self.engine.katago_process or self.engine.katago_process.poll() is not None:
self.board_controls.engine_status_col = self.config("board_ui/engine_down_col")
elif len(self.engine.queries) >= 4:
self.board_controls.engine_status_col = self.config("board_ui/engine_busy_col")
elif len(self.engine.queries) >= 2:
self.board_controls.engine_status_col = self.config("board_ui/engine_little_busy_col")
elif len(self.engine.queries) == 0:
self.board_controls.engine_status_col = self.config("board_ui/engine_ready_col")
else:
self.board_controls.engine_status_col = self.config("board_ui/engine_almost_done_col")
# redraw
if redraw_board:
Clock.schedule_once(self.board_gui.draw_board, -1)
self.board_gui.redraw_board_contents_trigger()
self.controls.update_evaluation()
def _message_loop_thread(self):
while True:
game, msg, *args = self.message_queue.get()
try:
self.log(f"Message Loop Received {msg}: {args} for Game {game}", OUTPUT_EXTRA_DEBUG)
if game != self.game.game_id:
self.log(f"Message skipped as it is outdated (current game is {self.game.game_id}", OUTPUT_EXTRA_DEBUG)
continue
getattr(self, f"_do_{msg.replace('-','_')}")(*args)
self.update_state()
except Exception as e:
self.log(f"Exception in processing message {msg} {args}: {e}", OUTPUT_ERROR)
traceback.print_exc()
def __call__(self, message, *args):
if self.game:
self.message_queue.put([self.game.game_id, message, *args])
def _do_new_game(self, move_tree=None, analyze_fast=False):
self.engine.on_new_game() # clear queries
self.game = Game(self, self.engine, self.config("game"), move_tree=move_tree, analyze_fast=analyze_fast)
self.controls.select_mode("analyze" if move_tree and len(move_tree.nodes_in_tree) > 1 else "play")
self.controls.graph.initialize_from_game(self.game.root)
self.update_state(redraw_board=True)
def _do_ai_move(self, node=None):
if node is None or self.game.current_node == node:
mode = self.controls.ai_mode(self.game.current_node.next_player)
settings = self.config(f"ai/{mode}")
if settings:
ai_move(self.game, mode, settings)
def _do_undo(self, n_times=1):
self.game.undo(n_times)
def _do_redo(self, n_times=1):
self.game.redo(n_times)
def _do_switch_branch(self, direction):
self.game.switch_branch(direction)
def _do_play(self, coords):
try:
self.game.play(Move(coords, player=self.game.next_player))
except IllegalMoveException as e:
self.controls.set_status(f"Illegal Move: {str(e)}")
def _do_analyze_extra(self, mode):
self.game.analyze_extra(mode)
def _do_analyze_sgf_popup(self):
if not self.fileselect_popup:
self.fileselect_popup = Popup(title="Double Click SGF file to analyze", size_hint=(0.8, 0.8)).__self__
popup_contents = LoadSGFPopup()
self.fileselect_popup.add_widget(popup_contents)
popup_contents.filesel.path = os.path.abspath(os.path.expanduser(self.config("sgf/sgf_load")))
def readfile(files, _mouse):
self.fileselect_popup.dismiss()
try:
move_tree = KaTrainSGF.parse_file(files[0])
except ParseError as e:
self.log(f"Failed to load SGF. Parse Error: {e}", OUTPUT_ERROR)
return
self._do_new_game(move_tree=move_tree, analyze_fast=popup_contents.fast.active)
if not popup_contents.rewind.active:
self.game.redo(999)
popup_contents.filesel.on_submit = readfile
self.fileselect_popup.open()
def _do_new_game_popup(self):
if not self.new_game_popup:
self.new_game_popup = Popup(title="New Game", size_hint=(0.5, 0.6)).__self__
popup_contents = NewGamePopup(self, self.new_game_popup, {k: v[0] for k, v in self.game.root.properties.items() if len(v) == 1})
self.new_game_popup.add_widget(popup_contents)
self.new_game_popup.open()
def _do_config_popup(self):
if not self.config_popup:
self.config_popup = Popup(title="Edit Settings", size_hint=(0.9, 0.9)).__self__
popup_contents = ConfigPopup(self, self.config_popup, dict(self._config), ignore_cats=("trainer", "ai"))
self.config_popup.add_widget(popup_contents)
self.config_popup.open()
def _do_output_sgf(self):
for pl in Move.PLAYERS:
if not self.game.root.get_property(f"P{pl}"):
_, model_file = os.path.split(self.engine.config["model"])
self.game.root.set_property(
f"P{pl}", f"AI {self.controls.ai_mode(pl)} (KataGo { os.path.splitext(model_file)[0]})" if "ai" in self.controls.player_mode(pl) else "Player"
)
msg = self.game.write_sgf(
self.config("sgf/sgf_save"),
trainer_config=self.config("trainer"),
save_feedback=self.config("sgf/save_feedback"),
eval_thresholds=self.config("trainer/eval_thresholds"),
)
self.log(msg, OUTPUT_INFO)
self.controls.set_status(msg)
def load_sgf_from_clipboard(self):
clipboard = Clipboard.paste()
if not clipboard:
self.controls.set_status(f"Ctrl-V pressed but clipboard is empty.")
return
try:
move_tree = KaTrainSGF.parse(clipboard)
except Exception as e:
self.controls.set_status(f"Failed to imported game from clipboard: {e}\nClipboard contents: {clipboard[:50]}...")
return
move_tree.nodes_in_tree[-1].analyze(self.engine, analyze_fast=False) # speed up result for looking at end of game
self._do_new_game(move_tree=move_tree, analyze_fast=True)
self("redo", 999)
self.log("Imported game from clipboard.", OUTPUT_INFO)
def on_touch_up(self, touch):
if self.board_gui.collide_point(*touch.pos) or self.board_controls.collide_point(*touch.pos):
if touch.button == "scrollup":
self("redo")
elif touch.button == "scrolldown":
self("undo")
return super().on_touch_up(touch)
def _on_keyboard_down(self, _keyboard, keycode, _text, modifiers):
if isinstance(App.get_running_app().root_window.children[0], Popup):
return # if in new game or load, don't allow keyboard shortcuts
shortcuts = {
"q": self.controls.show_children,
"w": self.controls.eval,
"e": self.controls.hints,
"r": self.controls.ownership,
"t": self.controls.policy,
"enter": ("ai-move",),
"a": self.controls.analyze_extra,
"s": self.controls.analyze_equalize,
"d": self.controls.analyze_sweep,
"right": ("switch-branch", 1),
"left": ("switch-branch", -1),
}
if keycode[1] in shortcuts.keys():
shortcut = shortcuts[keycode[1]]
if isinstance(shortcut, Widget):
shortcut.trigger_action(duration=0)
else:
self(*shortcut)
elif keycode[1] == "tab":
self.controls.switch_mode()
elif keycode[1] == "spacebar":
self("play", None) # pass
elif keycode[1] in ["`", "~", "p"]:
self.controls_box.hidden = not self.controls_box.hidden
elif keycode[1] in ["up", "z"]:
self("undo", 1 + ("shift" in modifiers) * 9 + ("ctrl" in modifiers) * 999)
elif keycode[1] in ["down", "x"]:
self("redo", 1 + ("shift" in modifiers) * 9 + ("ctrl" in modifiers) * 999)
elif keycode[1] == "n" and "ctrl" in modifiers:
self("new-game-popup")
elif keycode[1] == "l" and "ctrl" in modifiers:
self("analyze-sgf-popup")
elif keycode[1] == "s" and "ctrl" in modifiers:
self("output-sgf")
elif keycode[1] == "c" and "ctrl" in modifiers:
Clipboard.copy(self.game.root.sgf())
self.controls.set_status("Copied SGF to clipboard.")
elif keycode[1] == "v" and "ctrl" in modifiers:
self.load_sgf_from_clipboard()
return True
class KaTrainApp(App):
gui = ObjectProperty(None)
def build(self):
self.icon = ICON # how you're supposed to set an icon
self.gui = KaTrainGui()
print(self.get_application_icon())
Window.bind(on_request_close=self.on_request_close)
return self.gui
def on_start(self):
self.gui.start()
def on_request_close(self, *args):
if getattr(self, "gui", None) and self.gui.engine:
self.gui.engine.shutdown()
def signal_handler(self, *args):
if self.gui.debug_level >= OUTPUT_DEBUG:
print("TRACEBACKS")
for threadId, stack in sys._current_frames().items():
print(f"\n# ThreadID: {threadId}")
for filename, lineno, name, line in traceback.extract_stack(stack):
print(f"\tFile: {filename}, line {lineno}, in {name}")
if line:
print(f"\t\t{line.strip()}")
self.on_request_close()
sys.exit(0)
if __name__ == "__main__":
app = KaTrainApp()
signal.signal(signal.SIGINT, app.signal_handler)
try:
app.run()
except Exception:
app.on_request_close()
raise
|
[
"kivy.config.Config.set",
"core.game.KaTrainSGF.parse",
"os.path.join",
"traceback.print_exc",
"sys._current_frames",
"threading.Thread",
"core.game.KaTrainSGF.parse_file",
"core.ai.ai_move",
"core.sgf_parser.Move",
"signal.signal",
"queue.Queue",
"sys.exit",
"kivy.uix.popup.Popup",
"os.getcwd",
"kivy.storage.jsonstore.JsonStore",
"traceback.extract_stack",
"kivy.app.App.get_running_app",
"os.path.splitext",
"kivy.core.clipboard.Clipboard.paste",
"os.path.split"
] |
[((46, 104), 'kivy.config.Config.set', 'Config.set', (['"""input"""', '"""mouse"""', '"""mouse,multitouch_on_demand"""'], {}), "('input', 'mouse', 'mouse,multitouch_on_demand')\n", (56, 104), False, 'from kivy.config import Config\n'), ((171, 210), 'kivy.config.Config.set', 'Config.set', (['"""kivy"""', '"""window_icon"""', 'ICON'], {}), "('kivy', 'window_icon', ICON)\n", (181, 210), False, 'from kivy.config import Config\n'), ((14950, 14998), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'app.signal_handler'], {}), '(signal.SIGINT, app.signal_handler)\n', (14963, 14998), False, 'import signal\n'), ((1503, 1510), 'queue.Queue', 'Queue', ([], {}), '()\n', (1508, 1510), False, 'from queue import Queue\n'), ((10849, 10866), 'kivy.core.clipboard.Clipboard.paste', 'Clipboard.paste', ([], {}), '()\n', (10864, 10866), False, 'from kivy.core.clipboard import Clipboard\n'), ((14882, 14893), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (14890, 14893), False, 'import sys\n'), ((2440, 2451), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2449, 2451), False, 'import os\n'), ((2532, 2570), 'os.path.join', 'os.path.join', (['base_path', '"""config.json"""'], {}), "(base_path, 'config.json')\n", (2544, 2570), False, 'import os\n'), ((2687, 2719), 'kivy.storage.jsonstore.JsonStore', 'JsonStore', (['config_file'], {'indent': '(4)'}), '(config_file, indent=4)\n', (2696, 2719), False, 'from kivy.storage.jsonstore import JsonStore\n'), ((11029, 11056), 'core.game.KaTrainSGF.parse', 'KaTrainSGF.parse', (['clipboard'], {}), '(clipboard)\n', (11045, 11056), False, 'from core.game import Game, IllegalMoveException, KaTrainSGF\n'), ((2895, 2906), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2903, 2906), False, 'import sys\n'), ((3640, 3703), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._message_loop_thread', 'daemon': '(True)'}), '(target=self._message_loop_thread, daemon=True)\n', (3656, 3703), False, 'import threading\n'), ((7696, 7730), 'core.ai.ai_move', 'ai_move', (['self.game', 'mode', 'settings'], {}), '(self.game, mode, settings)\n', (7703, 7730), False, 'from core.ai import ai_move\n'), ((8028, 8070), 'core.sgf_parser.Move', 'Move', (['coords'], {'player': 'self.game.next_player'}), '(coords, player=self.game.next_player)\n', (8032, 8070), False, 'from core.sgf_parser import Move, ParseError\n'), ((8368, 8437), 'kivy.uix.popup.Popup', 'Popup', ([], {'title': '"""Double Click SGF file to analyze"""', 'size_hint': '(0.8, 0.8)'}), "(title='Double Click SGF file to analyze', size_hint=(0.8, 0.8))\n", (8373, 8437), False, 'from kivy.uix.popup import Popup\n'), ((9373, 9418), 'kivy.uix.popup.Popup', 'Popup', ([], {'title': '"""New Game"""', 'size_hint': '(0.5, 0.6)'}), "(title='New Game', size_hint=(0.5, 0.6))\n", (9378, 9418), False, 'from kivy.uix.popup import Popup\n'), ((9762, 9812), 'kivy.uix.popup.Popup', 'Popup', ([], {'title': '"""Edit Settings"""', 'size_hint': '(0.9, 0.9)'}), "(title='Edit Settings', size_hint=(0.9, 0.9))\n", (9767, 9812), False, 'from kivy.uix.popup import Popup\n'), ((10182, 10224), 'os.path.split', 'os.path.split', (["self.engine.config['model']"], {}), "(self.engine.config['model'])\n", (10195, 10224), False, 'import os\n'), ((14653, 14683), 'traceback.extract_stack', 'traceback.extract_stack', (['stack'], {}), '(stack)\n', (14676, 14683), False, 'import traceback\n'), ((6828, 6849), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (6847, 6849), False, 'import traceback\n'), ((8802, 8833), 'core.game.KaTrainSGF.parse_file', 'KaTrainSGF.parse_file', (['files[0]'], {}), '(files[0])\n', (8823, 8833), False, 'from core.game import Game, IllegalMoveException, KaTrainSGF\n'), ((14519, 14540), 'sys._current_frames', 'sys._current_frames', ([], {}), '()\n', (14538, 14540), False, 'import sys\n'), ((11932, 11953), 'kivy.app.App.get_running_app', 'App.get_running_app', ([], {}), '()\n', (11951, 11953), False, 'from kivy.app import App\n'), ((10343, 10371), 'os.path.splitext', 'os.path.splitext', (['model_file'], {}), '(model_file)\n', (10359, 10371), False, 'import os\n')]
|
# Copyright 2020 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Decodes arguments and formats tokenized messages.
The decode(format_string, encoded_arguments) function provides a simple way to
format a string with encoded arguments. The FormatString class may also be used.
Missing, truncated, or otherwise corrupted arguments are handled and displayed
in the resulting string with an error message.
"""
from datetime import datetime
import re
import struct
from typing import Iterable, List, NamedTuple, Match, Sequence, Tuple
def zigzag_decode(value: int) -> int:
"""ZigZag decode function from protobuf's wire_format module."""
if not value & 0x1:
return value >> 1
return (value >> 1) ^ (~0)
class FormatSpec:
"""Represents a format specifier parsed from a printf-style string."""
# Regular expression for finding format specifiers.
FORMAT_SPEC = re.compile(r'%(?:(?P<flags>[+\- #0]*\d*(?:\.\d+)?)'
r'(?P<length>hh|h|ll|l|j|z|t|L)?'
r'(?P<type>[csdioxXufFeEaAgGnp])|%)')
# Conversions to make format strings Python compatible.
_UNSUPPORTED_LENGTH = frozenset(['hh', 'll', 'j', 'z', 't'])
_REMAP_TYPE = {'a': 'f', 'A': 'F'}
# Conversion specifiers by type; n is not supported.
_SIGNED_INT = 'di'
_UNSIGNED_INT = frozenset('oxXup')
_FLOATING_POINT = frozenset('fFeEaAgG')
_PACKED_FLOAT = struct.Struct('<f')
@classmethod
def from_string(cls, format_specifier: str):
"""Creates a FormatSpec from a str with a single format specifier."""
match = cls.FORMAT_SPEC.fullmatch(format_specifier)
if not match:
raise ValueError(
'{!r} is not a valid single format specifier'.format(
format_specifier))
return cls(match)
def __init__(self, re_match: Match):
"""Constructs a FormatSpec from an re.Match object for FORMAT_SPEC."""
self.match = re_match
self.specifier: str = self.match.group()
self.flags: str = self.match.group('flags') or ''
self.length: str = self.match.group('length') or ''
# If there is no type, the format spec is %%.
self.type: str = self.match.group('type') or '%'
# %p prints as 0xFEEDBEEF; other specs may need length/type switched
if self.type == 'p':
self.compatible = '0x%08X'
else:
self.compatible = ''.join([
'%', self.flags,
'' if self.length in self._UNSUPPORTED_LENGTH else '',
self._REMAP_TYPE.get(self.type, self.type)
])
def decode(self, encoded_arg: bytes) -> 'DecodedArg':
"""Decodes the provided data according to this format specifier."""
if self.type == '%': # literal %
return DecodedArg(self, (),
b'') # Use () as the value for % formatting.
if self.type == 's': # string
return self._decode_string(encoded_arg)
if self.type == 'c': # character
return self._decode_char(encoded_arg)
if self.type in self._SIGNED_INT:
return self._decode_signed_integer(encoded_arg)
if self.type in self._UNSIGNED_INT:
return self._decode_unsigned_integer(encoded_arg)
if self.type in self._FLOATING_POINT:
return self._decode_float(encoded_arg)
# Unsupported specifier (e.g. %n)
return DecodedArg(
self, None, b'', DecodedArg.DECODE_ERROR,
'Unsupported conversion specifier "{}"'.format(self.type))
def _decode_signed_integer(self, encoded: bytes) -> 'DecodedArg':
"""Decodes a signed variable-length integer."""
if not encoded:
return DecodedArg.missing(self)
count = 0
result = 0
shift = 0
for byte in encoded:
count += 1
result |= (byte & 0x7f) << shift
if not byte & 0x80:
return DecodedArg(self, zigzag_decode(result), encoded[:count])
shift += 7
if shift >= 64:
break
return DecodedArg(self, None, encoded[:count], DecodedArg.DECODE_ERROR,
'Unterminated variable-length integer')
def _decode_unsigned_integer(self, encoded: bytes) -> 'DecodedArg':
arg = self._decode_signed_integer(encoded)
# Since ZigZag encoding is used, unsigned integers must be masked off to
# their original bit length.
if arg.value is not None:
arg.value &= (1 << self.size_bits()) - 1
return arg
def _decode_float(self, encoded: bytes) -> 'DecodedArg':
if len(encoded) < 4:
return DecodedArg.missing(self)
return DecodedArg(self,
self._PACKED_FLOAT.unpack_from(encoded)[0],
encoded[:4])
def _decode_string(self, encoded: bytes) -> 'DecodedArg':
"""Reads a unicode string from the encoded data."""
if not encoded:
return DecodedArg.missing(self)
size_and_status = encoded[0]
status = DecodedArg.OK
if size_and_status & 0x80:
status |= DecodedArg.TRUNCATED
size_and_status &= 0x7f
raw_data = encoded[0:size_and_status + 1]
data = raw_data[1:]
if len(data) < size_and_status:
status |= DecodedArg.DECODE_ERROR
try:
decoded = data.decode()
except UnicodeDecodeError as err:
return DecodedArg(self,
repr(bytes(data)).lstrip('b'), raw_data,
status | DecodedArg.DECODE_ERROR, err)
return DecodedArg(self, decoded, raw_data, status)
def _decode_char(self, encoded: bytes) -> 'DecodedArg':
"""Reads an integer from the data, then converts it to a string."""
arg = self._decode_signed_integer(encoded)
if arg.ok():
try:
arg.value = chr(arg.value)
except (OverflowError, ValueError) as err:
arg.error = err
arg.status |= DecodedArg.DECODE_ERROR
return arg
def size_bits(self) -> int:
"""Size of the argument in bits; 0 for strings."""
if self.type == 's':
return 0
# TODO(hepler): 64-bit targets likely have 64-bit l, j, z, and t.
return 64 if self.length in ['ll', 'j'] else 32
def __str__(self) -> str:
return self.specifier
class DecodedArg:
"""Represents a decoded argument that is ready to be formatted."""
# Status flags for a decoded argument. These values should match the
# DecodingStatus enum in pw_tokenizer/internal/decode.h.
OK = 0 # decoding was successful
MISSING = 1 # the argument was not present in the data
TRUNCATED = 2 # the argument was truncated during encoding
DECODE_ERROR = 4 # an error occurred while decoding the argument
SKIPPED = 8 # argument was skipped due to a previous error
@classmethod
def missing(cls, specifier: FormatSpec):
return cls(specifier, None, b'', cls.MISSING)
def __init__(self,
specifier: FormatSpec,
value,
raw_data: bytes,
status: int = OK,
error=None):
self.specifier = specifier # FormatSpec (e.g. to represent "%0.2f")
self.value = value # the decoded value, or None if decoding failed
self.raw_data = bytes(
raw_data) # the exact bytes used to decode this arg
self._status = status
self.error = error
def ok(self) -> bool:
"""The argument was decoded without errors."""
return self.status == self.OK or self.status == self.TRUNCATED
@property
def status(self) -> int:
return self._status
@status.setter
def status(self, status: int):
# The %% specifier is always OK and should always be printed normally.
self._status = status if self.specifier.type != '%' else self.OK
def format(self) -> str:
"""Returns formatted version of this argument, with error handling."""
if self.status == self.TRUNCATED:
return self.specifier.compatible % (self.value + '[...]')
if self.ok():
try:
return self.specifier.compatible % self.value
except (OverflowError, TypeError, ValueError) as err:
self.status |= self.DECODE_ERROR
self.error = err
if self.status & self.SKIPPED:
message = '{} SKIPPED'.format(self.specifier)
elif self.status == self.MISSING:
message = '{} MISSING'.format(self.specifier)
elif self.status & self.DECODE_ERROR:
message = '{} ERROR'.format(self.specifier)
else:
raise AssertionError('Unhandled DecodedArg status {:x}!'.format(
self.status))
if self.value is None or not str(self.value):
return '<[{}]>'.format(message)
return '<[{} ({})]>'.format(message, self.value)
def __str__(self) -> str:
return self.format()
def __repr__(self) -> str:
return f'DecodedArg({self})'
def parse_format_specifiers(format_string: str) -> Iterable[FormatSpec]:
for spec in FormatSpec.FORMAT_SPEC.finditer(format_string):
yield FormatSpec(spec)
class FormattedString(NamedTuple):
value: str
args: Sequence[DecodedArg]
remaining: bytes
def ok(self) -> bool:
"""Arg data decoded successfully and all expected args were found."""
return all(arg.ok() for arg in self.args) and not self.remaining
def score(self, date_removed: datetime = None) -> tuple:
"""Returns a key for sorting by how successful a decode was.
Decoded strings are sorted by whether they
1. decoded all bytes for all arguments without errors,
2. decoded all data,
3. have the fewest decoding errors,
4. decoded the most arguments successfully, or
5. have the most recent removal date, if they were removed.
This must match the collision resolution logic in detokenize.cc.
To format a list of FormattedStrings from most to least successful,
use sort(key=FormattedString.score, reverse=True).
"""
return (
self.ok(), # decocoded all data and all expected args were found
not self.remaining, # decoded all data
-sum(not arg.ok() for arg in self.args), # fewest errors
len(self.args), # decoded the most arguments
date_removed or datetime.max) # most recently present
class FormatString:
"""Represents a printf-style format string."""
def __init__(self, format_string: str):
"""Parses format specifiers in the format string."""
self.format_string = format_string
self.specifiers = tuple(parse_format_specifiers(self.format_string))
# List of non-specifier string pieces with room for formatted arguments.
self._segments = self._parse_string_segments()
def _parse_string_segments(self) -> List:
"""Splits the format string by format specifiers."""
if not self.specifiers:
return [self.format_string]
spec_spans = [spec.match.span() for spec in self.specifiers]
# Start with the part of the format string up to the first specifier.
string_pieces = [self.format_string[:spec_spans[0][0]]]
for ((_, end1), (start2, _)) in zip(spec_spans[:-1], spec_spans[1:]):
string_pieces.append(self.format_string[end1:start2])
# Append the format string segment after the last format specifier.
string_pieces.append(self.format_string[spec_spans[-1][1]:])
# Make a list with spots for the replacements between the string pieces.
segments: List = [None] * (len(string_pieces) + len(self.specifiers))
segments[::2] = string_pieces
return segments
def decode(self, encoded: bytes) -> Tuple[Sequence[DecodedArg], bytes]:
"""Decodes arguments according to the format string.
Args:
encoded: bytes; the encoded arguments
Returns:
tuple with the decoded arguments and any unparsed data
"""
decoded_args = []
fatal_error = False
index = 0
for spec in self.specifiers:
arg = spec.decode(encoded[index:])
if fatal_error:
# After an error is encountered, continue to attempt to parse
# arguments, but mark them all as SKIPPED. If an error occurs,
# it's impossible to know if subsequent arguments are valid.
arg.status |= DecodedArg.SKIPPED
elif not arg.ok():
fatal_error = True
decoded_args.append(arg)
index += len(arg.raw_data)
return tuple(decoded_args), encoded[index:]
def format(self,
encoded_args: bytes,
show_errors: bool = False) -> FormattedString:
"""Decodes arguments and formats the string with them.
Args:
encoded_args: the arguments to decode and format the string with
show_errors: if True, an error message is used in place of the %
conversion specifier when an argument fails to decode
Returns:
tuple with the formatted string, decoded arguments, and remaining data
"""
# Insert formatted arguments in place of each format specifier.
args, remaining = self.decode(encoded_args)
if show_errors:
self._segments[1::2] = (arg.format() for arg in args)
else:
self._segments[1::2] = (arg.format()
if arg.ok() else arg.specifier.specifier
for arg in args)
return FormattedString(''.join(self._segments), args, remaining)
def decode(format_string: str,
encoded_arguments: bytes,
show_errors: bool = False) -> str:
"""Decodes arguments and formats them with the provided format string.
Args:
format_string: the printf-style format string
encoded_arguments: encoded arguments with which to format
format_string; must exclude the 4-byte string token
show_errors: if True, an error message is used in place of the %
conversion specifier when an argument fails to decode
Returns:
the printf-style formatted string
"""
return FormatString(format_string).format(encoded_arguments,
show_errors).value
|
[
"struct.Struct",
"re.compile"
] |
[((1414, 1542), 're.compile', 're.compile', (['"""%(?:(?P<flags>[+\\\\- #0]*\\\\d*(?:\\\\.\\\\d+)?)(?P<length>hh|h|ll|l|j|z|t|L)?(?P<type>[csdioxXufFeEaAgGnp])|%)"""'], {}), "(\n '%(?:(?P<flags>[+\\\\- #0]*\\\\d*(?:\\\\.\\\\d+)?)(?P<length>hh|h|ll|l|j|z|t|L)?(?P<type>[csdioxXufFeEaAgGnp])|%)'\n )\n", (1424, 1542), False, 'import re\n'), ((1946, 1965), 'struct.Struct', 'struct.Struct', (['"""<f"""'], {}), "('<f')\n", (1959, 1965), False, 'import struct\n')]
|
#!/usr/bin/env python
import unittest
from pyspark.sql import SparkSession
from mmtfPyspark.datasets import dbPtmDataset as pm
from mmtfPyspark.datasets.dbPtmDataset import PtmType
class DbPtmDatasetTest(unittest.TestCase):
def setUp(self):
self.spark = SparkSession.builder.master("local[*]") \
.appName("DbPtmDatasetTest") \
.getOrCreate()
def test1(self):
ds = pm.download_ptm_dataset(PtmType.S_LINKEDGLYCOSYLATION)
self.assertGreater(ds.count(), 4)
def test2(self):
ds = pm.get_ptm_dataset()
self.assertGreater(ds.count(), 900000)
def tearDown(self):
self.spark.stop()
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"mmtfPyspark.datasets.dbPtmDataset.get_ptm_dataset",
"mmtfPyspark.datasets.dbPtmDataset.download_ptm_dataset",
"pyspark.sql.SparkSession.builder.master"
] |
[((703, 718), 'unittest.main', 'unittest.main', ([], {}), '()\n', (716, 718), False, 'import unittest\n'), ((419, 473), 'mmtfPyspark.datasets.dbPtmDataset.download_ptm_dataset', 'pm.download_ptm_dataset', (['PtmType.S_LINKEDGLYCOSYLATION'], {}), '(PtmType.S_LINKEDGLYCOSYLATION)\n', (442, 473), True, 'from mmtfPyspark.datasets import dbPtmDataset as pm\n'), ((551, 571), 'mmtfPyspark.datasets.dbPtmDataset.get_ptm_dataset', 'pm.get_ptm_dataset', ([], {}), '()\n', (569, 571), True, 'from mmtfPyspark.datasets import dbPtmDataset as pm\n'), ((272, 311), 'pyspark.sql.SparkSession.builder.master', 'SparkSession.builder.master', (['"""local[*]"""'], {}), "('local[*]')\n", (299, 311), False, 'from pyspark.sql import SparkSession\n')]
|
import numpy as np
from astropy.table import Table
import glob
models = ['MIST_v1.2_feh_m4.00_afe_p0.0_vvcrit0.0_EEPS',
'MIST_v1.2_feh_m4.00_afe_p0.0_vvcrit0.4_EEPS',
'MIST_v1.2_feh_p0.00_afe_p0.0_vvcrit0.0_EEPS',
'MIST_v1.2_feh_p0.00_afe_p0.0_vvcrit0.4_EEPS',
'MIST_v1.2_feh_p0.50_afe_p0.0_vvcrit0.0_EEPS',
'MIST_v1.2_feh_p0.50_afe_p0.0_vvcrit0.4_EEPS']
for model in models:
print(model)
initial_mass = []
ms_age = []
for file in list(glob.glob(model+'/*.txt')):
table = np.loadtxt(file)
n = len(table[:,0])
initial_mass.append(table[0,1])
ms_age.append(table[n-1,0])
summary = Table()
summary['initial_mass'] = initial_mass
summary['ms_age'] = ms_age
summary.write(model+'_sum.csv')
|
[
"astropy.table.Table",
"numpy.loadtxt",
"glob.glob"
] |
[((820, 827), 'astropy.table.Table', 'Table', ([], {}), '()\n', (825, 827), False, 'from astropy.table import Table\n'), ((640, 667), 'glob.glob', 'glob.glob', (["(model + '/*.txt')"], {}), "(model + '/*.txt')\n", (649, 667), False, 'import glob\n'), ((684, 700), 'numpy.loadtxt', 'np.loadtxt', (['file'], {}), '(file)\n', (694, 700), True, 'import numpy as np\n')]
|
import json
import csv
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import itertools
import os
import shutil
def get_dtype_groups(data_types):
float_unis = []
object_unis = []
int_unis = []
for i, v in data_types.items():
if i == np.dtype('float64') or i == np.dtype('float32'):
float_unis.append(v)
if i == np.dtype('O'):
object_unis.append(v)
if i == np.dtype('int64') or i == np.dtype('int32') or i == np.dtype('int16'):
int_unis.append(v)
return float_unis, object_unis, int_unis
def findsubsets(s, n):
return list(itertools.permutations(s, n))
def plot_3d(data, headers, data_types, filename):
dirpath = 'saved_plots/{}_Misc_Plots'.format(filename)
sub_folders = ['scatter_3dPlots']
if os.path.exists(dirpath) and os.path.isdir(dirpath):
shutil.rmtree(dirpath)
os.makedirs(dirpath)
for i in sub_folders:
if os.path.exists(i) and os.path.isdir(i):
shutil.rmtree(i)
os.makedirs(dirpath+'/'+i)
fig = plt.figure()
ax = plt.axes(projection='3d')
float_unis, object_unis, int_unis = get_dtype_groups(data_types)
palette = itertools.cycle(sns.color_palette())
if len(float_unis) > 0:
if len(int_unis) > 0:
cols = float_unis[0]+int_unis[0]
if len(cols) > 4:
cols = cols[:4]
pairs_3d = findsubsets(cols, 3)
else:
cols = float_unis[0]
if len(cols) > 4:
cols = cols[:4]
pairs_3d = findsubsets(cols, 3)
try:
for j in pairs_3d:
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.scatter(data[j[0]], data[j[1]],
data[j[2]], color=next(palette))
ax.legend()
x = j[0]
ax.set_xlabel(x, fontsize=20)
ax.set_ylabel(j[1], fontsize=20)
ax.set_zlabel(j[2], fontsize=20, rotation=0)
fig.set_size_inches(18.5, 10.5)
plt.savefig(
'./{}/scatter_3dPlots/{}_{}_{}_set.png'.format(dirpath, j[0], j[1], j[2]))
except Exception as e:
print(e)
print('error occured while plotting {} columns.'.format(j))
def plot_groupby(data, headers, data_types, filename):
dirpath = 'saved_plots/{}_Misc_Plots'.format(filename)
float_unis, object_unis, int_unis = get_dtype_groups(data_types)
try:
if len(object_unis) > 0:
for j in object_unis[0]:
df = data.groupby(j).mean()
# print(df)
fig, ax = plt.subplots()
df.plot(kind='bar')
fig.set_size_inches(18.5, 10.5)
unique_values = len(pd.unique(data[j]))
if unique_values > 30:
continue
plt.savefig('./{}/groupby_{}_bar_plot.png'.format(dirpath, j))
except Exception as e:
print(e)
print('error occured while plotting groupby by {} column.'.format(j))
|
[
"os.makedirs",
"matplotlib.pyplot.axes",
"os.path.isdir",
"itertools.permutations",
"numpy.dtype",
"os.path.exists",
"pandas.unique",
"matplotlib.pyplot.figure",
"seaborn.color_palette",
"shutil.rmtree",
"matplotlib.pyplot.subplots"
] |
[((933, 953), 'os.makedirs', 'os.makedirs', (['dirpath'], {}), '(dirpath)\n', (944, 953), False, 'import os\n'), ((1107, 1119), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1117, 1119), True, 'import matplotlib.pyplot as plt\n'), ((1129, 1154), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (1137, 1154), True, 'import matplotlib.pyplot as plt\n'), ((658, 686), 'itertools.permutations', 'itertools.permutations', (['s', 'n'], {}), '(s, n)\n', (680, 686), False, 'import itertools\n'), ((845, 868), 'os.path.exists', 'os.path.exists', (['dirpath'], {}), '(dirpath)\n', (859, 868), False, 'import os\n'), ((873, 895), 'os.path.isdir', 'os.path.isdir', (['dirpath'], {}), '(dirpath)\n', (886, 895), False, 'import os\n'), ((905, 927), 'shutil.rmtree', 'shutil.rmtree', (['dirpath'], {}), '(dirpath)\n', (918, 927), False, 'import shutil\n'), ((1069, 1099), 'os.makedirs', 'os.makedirs', (["(dirpath + '/' + i)"], {}), "(dirpath + '/' + i)\n", (1080, 1099), False, 'import os\n'), ((1255, 1274), 'seaborn.color_palette', 'sns.color_palette', ([], {}), '()\n', (1272, 1274), True, 'import seaborn as sns\n'), ((403, 416), 'numpy.dtype', 'np.dtype', (['"""O"""'], {}), "('O')\n", (411, 416), True, 'import numpy as np\n'), ((991, 1008), 'os.path.exists', 'os.path.exists', (['i'], {}), '(i)\n', (1005, 1008), False, 'import os\n'), ((1013, 1029), 'os.path.isdir', 'os.path.isdir', (['i'], {}), '(i)\n', (1026, 1029), False, 'import os\n'), ((1043, 1059), 'shutil.rmtree', 'shutil.rmtree', (['i'], {}), '(i)\n', (1056, 1059), False, 'import shutil\n'), ((305, 324), 'numpy.dtype', 'np.dtype', (['"""float64"""'], {}), "('float64')\n", (313, 324), True, 'import numpy as np\n'), ((333, 352), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (341, 352), True, 'import numpy as np\n'), ((468, 485), 'numpy.dtype', 'np.dtype', (['"""int64"""'], {}), "('int64')\n", (476, 485), True, 'import numpy as np\n'), ((494, 511), 'numpy.dtype', 'np.dtype', (['"""int32"""'], {}), "('int32')\n", (502, 511), True, 'import numpy as np\n'), ((520, 537), 'numpy.dtype', 'np.dtype', (['"""int16"""'], {}), "('int16')\n", (528, 537), True, 'import numpy as np\n'), ((1706, 1718), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1716, 1718), True, 'import matplotlib.pyplot as plt\n'), ((1740, 1765), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (1748, 1765), True, 'import matplotlib.pyplot as plt\n'), ((2744, 2758), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2756, 2758), True, 'import matplotlib.pyplot as plt\n'), ((2879, 2897), 'pandas.unique', 'pd.unique', (['data[j]'], {}), '(data[j])\n', (2888, 2897), True, 'import pandas as pd\n')]
|
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from dlpy.sequential import Sequential
from dlpy.layers import InputLayer, Conv2d, BN, Pooling, Concat, OutputLayer, GlobalAveragePooling2D
from dlpy.blocks import DenseNetBlock
from .application_utils import get_layer_options, input_layer_options
from dlpy.model import Model
from dlpy.utils import DLPyError
from dlpy.network import extract_input_layer, extract_output_layer, extract_conv_layer
def DenseNet(conn, model_table='DenseNet', n_classes=None, conv_channel=16, growth_rate=12, n_blocks=4,
n_cells=4, n_channels=3, width=32, height=32, scale=1, random_flip=None, random_crop=None,
offsets=(85, 111, 139), random_mutation=None):
'''
Generates a deep learning model with the DenseNet architecture.
Parameters
----------
conn : CAS
Specifies the connection of the CAS connection.
model_table : string
Specifies the name of CAS table to store the model.
n_classes : int, optional
Specifies the number of classes. If None is assigned, the model will
automatically detect the number of classes based on the training set.
Default: None
conv_channel : int, optional
Specifies the number of filters of the first convolution layer.
Default: 16
growth_rate : int, optional
Specifies the growth rate of convolution layers.
Default: 12
n_blocks : int, optional
Specifies the number of DenseNet blocks.
Default: 4
n_cells : int, optional
Specifies the number of dense connection for each DenseNet block.
Default: 4
n_channels : int, optional
Specifies the number of the channels (i.e., depth) of the input layer.
Default: 3
width : int, optional
Specifies the width of the input layer.
Default: 32
height : int, optional
Specifies the height of the input layer.
Default: 32
scale : double, optional
Specifies a scaling factor to be applied to each pixel intensity values.
Default: 1
random_flip : string, optional
Specifies how to flip the data in the input layer when image data is
used. Approximately half of the input data is subject to flipping.
Valid Values: 'h', 'hv', 'v', 'none'
random_crop : string, optional
Specifies how to crop the data in the input layer when image data is
used. Images are cropped to the values that are specified in the width
and height parameters. Only the images with one or both dimensions
that are larger than those sizes are cropped.
Valid Values: 'none', 'unique', 'randomresized', 'resizethencrop'
offsets : double or iter-of-doubles, optional
Specifies an offset for each channel in the input data. The final input
data is set after applying scaling and subtracting the specified offsets.
Default: (85, 111, 139)
random_mutation : string, optional
Specifies how to apply data augmentations/mutations to the data in the input layer.
Valid Values: 'none', 'random'
Returns
-------
:class:`Sequential`
References
----------
https://arxiv.org/pdf/1608.06993.pdf
'''
conn.retrieve('loadactionset', _messagelevel='error', actionset='deeplearn')
# get all the parms passed in
parameters = locals()
channel_in = conv_channel # number of channel of transition conv layer
model = Sequential(conn=conn, model_table=model_table)
# get the input parameters
input_parameters = get_layer_options(input_layer_options, parameters)
model.add(InputLayer(**input_parameters))
# Top layers
model.add(Conv2d(conv_channel, width=3, act='identity', include_bias=False, stride=1))
for i in range(n_blocks):
model.add(DenseNetBlock(n_cells=n_cells, kernel_size=3, n_filter=growth_rate, stride=1))
# transition block
channel_in += (growth_rate * n_cells)
model.add(BN(act='relu'))
if i != (n_blocks - 1):
model.add(Conv2d(channel_in, width=3, act='identity', include_bias=False, stride=1))
model.add(Pooling(width=2, height=2, pool='mean'))
model.add(GlobalAveragePooling2D())
model.add(OutputLayer(act='softmax', n=n_classes))
return model
def DenseNet121(conn, model_table='DENSENET121', n_classes=1000, conv_channel=64, growth_rate=32,
n_cells=[6, 12, 24, 16], n_channels=3, reduction=0.5, width=224, height=224, scale=1,
random_flip=None, random_crop=None, offsets=(103.939, 116.779, 123.68), random_mutation=None):
'''
Generates a deep learning model with the DenseNet121 architecture.
Parameters
----------
conn : CAS
Specifies the connection of the CAS connection.
model_table : string
Specifies the name of CAS table to store the model.
n_classes : int, optional
Specifies the number of classes. If None is assigned, the model will
automatically detect the number of classes based on the training set.
Default: 1000
conv_channel : int, optional
Specifies the number of filters of the first convolution layer.
Default: 64
growth_rate : int, optional
Specifies the growth rate of convolution layers.
Default: 32
n_cells : int array length=4, optional
Specifies the number of dense connection for each DenseNet block.
Default: [6, 12, 24, 16]
reduction : double, optional
Specifies the factor of transition blocks.
Default: 0.5
n_channels : int, optional
Specifies the number of the channels (i.e., depth) of the input layer.
Default: 3.
width : int, optional
Specifies the width of the input layer.
Default: 224.
height : int, optional
Specifies the height of the input layer.
Default: 224.
scale : double, optional
Specifies a scaling factor to be applied to each pixel intensity values.
Default: 1.
random_flip : string, optional
Specifies how to flip the data in the input layer when image data is
used. Approximately half of the input data is subject to flipping.
Valid Values: 'h', 'hv', 'v', 'none'
random_crop : string, optional
Specifies how to crop the data in the input layer when image data is
used. Images are cropped to the values that are specified in the width
and height parameters. Only the images with one or both dimensions
that are larger than those sizes are cropped.
Valid Values: 'none', 'unique', 'randomresized', 'resizethencrop'
offsets : double or iter-of-doubles, optional
Specifies an offset for each channel in the input data. The final input
data is set after applying scaling and subtracting the specified offsets.
Default: (103.939, 116.779, 123.68)
random_mutation : string, optional
Specifies how to apply data augmentations/mutations to the data in the input layer.
Valid Values: 'none', 'random'
Returns
-------
:class:`Sequential`
References
----------
https://arxiv.org/pdf/1608.06993.pdf
'''
conn.retrieve('loadactionset', _messagelevel='error', actionset='deeplearn')
# get all the parms passed in
parameters = locals()
n_blocks = len(n_cells)
model = Sequential(conn=conn, model_table=model_table)
# get the input parameters
input_parameters = get_layer_options(input_layer_options, parameters)
model.add(InputLayer(**input_parameters))
# Top layers
model.add(Conv2d(conv_channel, width=7, act='identity', include_bias=False, stride=2))
model.add(BN(act='relu'))
src_layer = Pooling(width=3, height=3, stride=2, padding=1, pool='max')
model.add(src_layer)
for i in range(n_blocks):
for _ in range(n_cells[i]):
model.add(BN(act='relu'))
model.add(Conv2d(n_filters=growth_rate * 4, width=1, act='identity', stride=1, include_bias=False))
model.add(BN(act='relu'))
src_layer2 = Conv2d(n_filters=growth_rate, width=3, act='identity', stride=1, include_bias=False)
model.add(src_layer2)
src_layer = Concat(act='identity', src_layers=[src_layer, src_layer2])
model.add(src_layer)
conv_channel += growth_rate
if i != (n_blocks - 1):
# transition block
conv_channel = int(conv_channel * reduction)
model.add(BN(act='relu'))
model.add(Conv2d(n_filters=conv_channel, width=1, act='identity', stride=1, include_bias=False))
src_layer = Pooling(width=2, height=2, stride=2, pool='mean')
model.add(src_layer)
model.add(BN(act='identity'))
# Bottom Layers
model.add(GlobalAveragePooling2D())
model.add(OutputLayer(act='softmax', n=n_classes))
return model
def DenseNet121_ONNX(conn, model_file, n_classes=1000, width=224, height=224,
offsets=(255*0.406, 255*0.456, 255*0.485), norm_stds=(255*0.225, 255*0.224, 255*0.229),
random_flip=None, random_crop=None, random_mutation=None, include_top=False):
"""
Generates a deep learning model with the DenseNet121_ONNX architecture.
The model architecture and pre-trained weights is generated from DenseNet121 ONNX trained on ImageNet dataset.
The model file and the weights file can be downloaded from https://support.sas.com/documentation/prod-p/vdmml/zip/.
To learn more information about the model and pre-processing.
Please go to the websites: https://github.com/onnx/models/tree/master/vision/classification/densenet-121.
Parameters
----------
conn : CAS
Specifies the CAS connection object.
model_file : string
Specifies the absolute server-side path of the model table file.
The model table file can be downloaded from https://support.sas.com/documentation/prod-p/vdmml/zip/.
n_classes : int, optional
Specifies the number of classes.
Default: 1000
width : int, optional
Specifies the width of the input layer.
Default: 224
height : int, optional
Specifies the height of the input layer.
Default: 224
offsets : double or iter-of-doubles, optional
Specifies an offset for each channel in the input data. The final input
data is set after applying scaling and subtracting the specified offsets.
The channel order is BGR.
Default: (255*0.406, 255*0.456, 255*0.485)
norm_stds : double or iter-of-doubles, optional
Specifies a standard deviation for each channel in the input data.
The final input data is normalized with specified means and standard deviations.
The channel order is BGR.
Default: (255*0.225, 255*0.224, 255*0.229)
random_flip : string, optional
Specifies how to flip the data in the input layer when image data is
used. Approximately half of the input data is subject to flipping.
Valid Values: 'h', 'hv', 'v', 'none'
random_crop : string, optional
Specifies how to crop the data in the input layer when image data is
used. Images are cropped to the values that are specified in the width
and height parameters. Only the images with one or both dimensions
that are larger than those sizes are cropped.
Valid Values: 'none', 'unique', 'randomresized', 'resizethencrop'
random_mutation : string, optional
Specifies how to apply data augmentations/mutations to the data in the input layer.
Valid Values: 'none', 'random'
include_top : bool, optional
Specifies whether to include pre-trained weights of the top layers (i.e., the FC layers)
Default: False
"""
parameters = locals()
input_parameters = get_layer_options(input_layer_options, parameters)
# load model and model weights
model = Model.from_sashdat(conn, path = model_file)
# check if a user points to a correct model.
if model.summary.shape[0] != 307:
raise DLPyError("The model file doesn't point to a valid DenseNet121_ONNX model. "
"Please check the SASHDAT file.")
# extract input layer config
model_table_df = conn.CASTable(**model.model_table).to_frame()
input_layer_df = model_table_df[model_table_df['_DLLayerID_'] == 0]
input_layer = extract_input_layer(input_layer_df)
input_layer_config = input_layer.config
# update input layer config
input_layer_config.update(input_parameters)
# update the layer list
model.layers[0] = InputLayer(**input_layer_config, name=model.layers[0].name)
# warning if model weights doesn't exist
if not conn.tableexists(model.model_weights.name).exists:
weights_file_path = os.path.join(os.path.dirname(model_file), model.model_name + '_weights.sashdat')
print('WARNING: Model weights is not attached '
'since system cannot find a weights file located at {}'.format(weights_file_path))
if include_top:
if n_classes != 1000:
raise DLPyError("If include_top is enabled, n_classes has to be 1000.")
else:
# since the output layer is non fully connected layer,
# we need to modify the convolution right before the output. The number of filter is set to n_classes.
conv_layer_df = model_table_df[model_table_df['_DLLayerID_'] == 305]
conv_layer = extract_conv_layer(conv_layer_df)
conv_layer_config = conv_layer.config
# update input layer config
conv_layer_config.update({'n_filters': n_classes})
# update the layer list
model.layers[-2] = Conv2d(**conv_layer_config,
name=model.layers[-2].name, src_layers=model.layers[-3])
# overwrite n_classes in output layer
out_layer_df = model_table_df[model_table_df['_DLLayerID_'] == 306]
out_layer = extract_output_layer(out_layer_df)
out_layer_config = out_layer.config
# update input layer config
out_layer_config.update({'n': n_classes})
# update the layer list
model.layers[-1] = OutputLayer(**out_layer_config,
name = model.layers[-1].name, src_layers=model.layers[-2])
# remove top weights
model.model_weights.append_where('_LayerID_<305')
model._retrieve_('table.partition', table=model.model_weights,
casout=dict(replace=True, name=model.model_weights.name))
model.set_weights(model.model_weights.name)
# recompile the whole network according to the new layer list
model.compile()
return model
|
[
"dlpy.network.extract_conv_layer",
"dlpy.utils.DLPyError",
"dlpy.sequential.Sequential",
"dlpy.network.extract_input_layer",
"dlpy.layers.Concat",
"dlpy.layers.OutputLayer",
"os.path.dirname",
"dlpy.layers.Conv2d",
"dlpy.network.extract_output_layer",
"dlpy.layers.InputLayer",
"dlpy.layers.GlobalAveragePooling2D",
"dlpy.layers.Pooling",
"dlpy.model.Model.from_sashdat",
"dlpy.layers.BN",
"dlpy.blocks.DenseNetBlock"
] |
[((4077, 4123), 'dlpy.sequential.Sequential', 'Sequential', ([], {'conn': 'conn', 'model_table': 'model_table'}), '(conn=conn, model_table=model_table)\n', (4087, 4123), False, 'from dlpy.sequential import Sequential\n'), ((8023, 8069), 'dlpy.sequential.Sequential', 'Sequential', ([], {'conn': 'conn', 'model_table': 'model_table'}), '(conn=conn, model_table=model_table)\n', (8033, 8069), False, 'from dlpy.sequential import Sequential\n'), ((8377, 8436), 'dlpy.layers.Pooling', 'Pooling', ([], {'width': '(3)', 'height': '(3)', 'stride': '(2)', 'padding': '(1)', 'pool': '"""max"""'}), "(width=3, height=3, stride=2, padding=1, pool='max')\n", (8384, 8436), False, 'from dlpy.layers import InputLayer, Conv2d, BN, Pooling, Concat, OutputLayer, GlobalAveragePooling2D\n'), ((12636, 12677), 'dlpy.model.Model.from_sashdat', 'Model.from_sashdat', (['conn'], {'path': 'model_file'}), '(conn, path=model_file)\n', (12654, 12677), False, 'from dlpy.model import Model\n'), ((13106, 13141), 'dlpy.network.extract_input_layer', 'extract_input_layer', (['input_layer_df'], {}), '(input_layer_df)\n', (13125, 13141), False, 'from dlpy.network import extract_input_layer, extract_output_layer, extract_conv_layer\n'), ((13316, 13375), 'dlpy.layers.InputLayer', 'InputLayer', ([], {'name': 'model.layers[0].name'}), '(**input_layer_config, name=model.layers[0].name)\n', (13326, 13375), False, 'from dlpy.layers import InputLayer, Conv2d, BN, Pooling, Concat, OutputLayer, GlobalAveragePooling2D\n'), ((4244, 4274), 'dlpy.layers.InputLayer', 'InputLayer', ([], {}), '(**input_parameters)\n', (4254, 4274), False, 'from dlpy.layers import InputLayer, Conv2d, BN, Pooling, Concat, OutputLayer, GlobalAveragePooling2D\n'), ((4308, 4383), 'dlpy.layers.Conv2d', 'Conv2d', (['conv_channel'], {'width': '(3)', 'act': '"""identity"""', 'include_bias': '(False)', 'stride': '(1)'}), "(conv_channel, width=3, act='identity', include_bias=False, stride=1)\n", (4314, 4383), False, 'from dlpy.layers import InputLayer, Conv2d, BN, Pooling, Concat, OutputLayer, GlobalAveragePooling2D\n'), ((4827, 4851), 'dlpy.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {}), '()\n', (4849, 4851), False, 'from dlpy.layers import InputLayer, Conv2d, BN, Pooling, Concat, OutputLayer, GlobalAveragePooling2D\n'), ((4868, 4907), 'dlpy.layers.OutputLayer', 'OutputLayer', ([], {'act': '"""softmax"""', 'n': 'n_classes'}), "(act='softmax', n=n_classes)\n", (4879, 4907), False, 'from dlpy.layers import InputLayer, Conv2d, BN, Pooling, Concat, OutputLayer, GlobalAveragePooling2D\n'), ((8190, 8220), 'dlpy.layers.InputLayer', 'InputLayer', ([], {}), '(**input_parameters)\n', (8200, 8220), False, 'from dlpy.layers import InputLayer, Conv2d, BN, Pooling, Concat, OutputLayer, GlobalAveragePooling2D\n'), ((8254, 8329), 'dlpy.layers.Conv2d', 'Conv2d', (['conv_channel'], {'width': '(7)', 'act': '"""identity"""', 'include_bias': '(False)', 'stride': '(2)'}), "(conv_channel, width=7, act='identity', include_bias=False, stride=2)\n", (8260, 8329), False, 'from dlpy.layers import InputLayer, Conv2d, BN, Pooling, Concat, OutputLayer, GlobalAveragePooling2D\n'), ((8345, 8359), 'dlpy.layers.BN', 'BN', ([], {'act': '"""relu"""'}), "(act='relu')\n", (8347, 8359), False, 'from dlpy.layers import InputLayer, Conv2d, BN, Pooling, Concat, OutputLayer, GlobalAveragePooling2D\n'), ((9413, 9431), 'dlpy.layers.BN', 'BN', ([], {'act': '"""identity"""'}), "(act='identity')\n", (9415, 9431), False, 'from dlpy.layers import InputLayer, Conv2d, BN, Pooling, Concat, OutputLayer, GlobalAveragePooling2D\n'), ((9467, 9491), 'dlpy.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {}), '()\n', (9489, 9491), False, 'from dlpy.layers import InputLayer, Conv2d, BN, Pooling, Concat, OutputLayer, GlobalAveragePooling2D\n'), ((9508, 9547), 'dlpy.layers.OutputLayer', 'OutputLayer', ([], {'act': '"""softmax"""', 'n': 'n_classes'}), "(act='softmax', n=n_classes)\n", (9519, 9547), False, 'from dlpy.layers import InputLayer, Conv2d, BN, Pooling, Concat, OutputLayer, GlobalAveragePooling2D\n'), ((12781, 12898), 'dlpy.utils.DLPyError', 'DLPyError', (['"""The model file doesn\'t point to a valid DenseNet121_ONNX model. Please check the SASHDAT file."""'], {}), '(\n "The model file doesn\'t point to a valid DenseNet121_ONNX model. Please check the SASHDAT file."\n )\n', (12790, 12898), False, 'from dlpy.utils import DLPyError\n'), ((14163, 14196), 'dlpy.network.extract_conv_layer', 'extract_conv_layer', (['conv_layer_df'], {}), '(conv_layer_df)\n', (14181, 14196), False, 'from dlpy.network import extract_input_layer, extract_output_layer, extract_conv_layer\n'), ((14397, 14486), 'dlpy.layers.Conv2d', 'Conv2d', ([], {'name': 'model.layers[-2].name', 'src_layers': 'model.layers[-3]'}), '(**conv_layer_config, name=model.layers[-2].name, src_layers=model.\n layers[-3])\n', (14403, 14486), False, 'from dlpy.layers import InputLayer, Conv2d, BN, Pooling, Concat, OutputLayer, GlobalAveragePooling2D\n'), ((14659, 14693), 'dlpy.network.extract_output_layer', 'extract_output_layer', (['out_layer_df'], {}), '(out_layer_df)\n', (14679, 14693), False, 'from dlpy.network import extract_input_layer, extract_output_layer, extract_conv_layer\n'), ((14883, 14976), 'dlpy.layers.OutputLayer', 'OutputLayer', ([], {'name': 'model.layers[-1].name', 'src_layers': 'model.layers[-2]'}), '(**out_layer_config, name=model.layers[-1].name, src_layers=\n model.layers[-2])\n', (14894, 14976), False, 'from dlpy.layers import InputLayer, Conv2d, BN, Pooling, Concat, OutputLayer, GlobalAveragePooling2D\n'), ((4434, 4511), 'dlpy.blocks.DenseNetBlock', 'DenseNetBlock', ([], {'n_cells': 'n_cells', 'kernel_size': '(3)', 'n_filter': 'growth_rate', 'stride': '(1)'}), '(n_cells=n_cells, kernel_size=3, n_filter=growth_rate, stride=1)\n', (4447, 4511), False, 'from dlpy.blocks import DenseNetBlock\n'), ((4604, 4618), 'dlpy.layers.BN', 'BN', ([], {'act': '"""relu"""'}), "(act='relu')\n", (4606, 4618), False, 'from dlpy.layers import InputLayer, Conv2d, BN, Pooling, Concat, OutputLayer, GlobalAveragePooling2D\n'), ((8744, 8832), 'dlpy.layers.Conv2d', 'Conv2d', ([], {'n_filters': 'growth_rate', 'width': '(3)', 'act': '"""identity"""', 'stride': '(1)', 'include_bias': '(False)'}), "(n_filters=growth_rate, width=3, act='identity', stride=1,\n include_bias=False)\n", (8750, 8832), False, 'from dlpy.layers import InputLayer, Conv2d, BN, Pooling, Concat, OutputLayer, GlobalAveragePooling2D\n'), ((8888, 8946), 'dlpy.layers.Concat', 'Concat', ([], {'act': '"""identity"""', 'src_layers': '[src_layer, src_layer2]'}), "(act='identity', src_layers=[src_layer, src_layer2])\n", (8894, 8946), False, 'from dlpy.layers import InputLayer, Conv2d, BN, Pooling, Concat, OutputLayer, GlobalAveragePooling2D\n'), ((9314, 9363), 'dlpy.layers.Pooling', 'Pooling', ([], {'width': '(2)', 'height': '(2)', 'stride': '(2)', 'pool': '"""mean"""'}), "(width=2, height=2, stride=2, pool='mean')\n", (9321, 9363), False, 'from dlpy.layers import InputLayer, Conv2d, BN, Pooling, Concat, OutputLayer, GlobalAveragePooling2D\n'), ((13525, 13552), 'os.path.dirname', 'os.path.dirname', (['model_file'], {}), '(model_file)\n', (13540, 13552), False, 'import os\n'), ((13815, 13880), 'dlpy.utils.DLPyError', 'DLPyError', (['"""If include_top is enabled, n_classes has to be 1000."""'], {}), "('If include_top is enabled, n_classes has to be 1000.')\n", (13824, 13880), False, 'from dlpy.utils import DLPyError\n'), ((4674, 4747), 'dlpy.layers.Conv2d', 'Conv2d', (['channel_in'], {'width': '(3)', 'act': '"""identity"""', 'include_bias': '(False)', 'stride': '(1)'}), "(channel_in, width=3, act='identity', include_bias=False, stride=1)\n", (4680, 4747), False, 'from dlpy.layers import InputLayer, Conv2d, BN, Pooling, Concat, OutputLayer, GlobalAveragePooling2D\n'), ((4771, 4810), 'dlpy.layers.Pooling', 'Pooling', ([], {'width': '(2)', 'height': '(2)', 'pool': '"""mean"""'}), "(width=2, height=2, pool='mean')\n", (4778, 4810), False, 'from dlpy.layers import InputLayer, Conv2d, BN, Pooling, Concat, OutputLayer, GlobalAveragePooling2D\n'), ((8552, 8566), 'dlpy.layers.BN', 'BN', ([], {'act': '"""relu"""'}), "(act='relu')\n", (8554, 8566), False, 'from dlpy.layers import InputLayer, Conv2d, BN, Pooling, Concat, OutputLayer, GlobalAveragePooling2D\n'), ((8590, 8682), 'dlpy.layers.Conv2d', 'Conv2d', ([], {'n_filters': '(growth_rate * 4)', 'width': '(1)', 'act': '"""identity"""', 'stride': '(1)', 'include_bias': '(False)'}), "(n_filters=growth_rate * 4, width=1, act='identity', stride=1,\n include_bias=False)\n", (8596, 8682), False, 'from dlpy.layers import InputLayer, Conv2d, BN, Pooling, Concat, OutputLayer, GlobalAveragePooling2D\n'), ((8703, 8717), 'dlpy.layers.BN', 'BN', ([], {'act': '"""relu"""'}), "(act='relu')\n", (8705, 8717), False, 'from dlpy.layers import InputLayer, Conv2d, BN, Pooling, Concat, OutputLayer, GlobalAveragePooling2D\n'), ((9165, 9179), 'dlpy.layers.BN', 'BN', ([], {'act': '"""relu"""'}), "(act='relu')\n", (9167, 9179), False, 'from dlpy.layers import InputLayer, Conv2d, BN, Pooling, Concat, OutputLayer, GlobalAveragePooling2D\n'), ((9203, 9292), 'dlpy.layers.Conv2d', 'Conv2d', ([], {'n_filters': 'conv_channel', 'width': '(1)', 'act': '"""identity"""', 'stride': '(1)', 'include_bias': '(False)'}), "(n_filters=conv_channel, width=1, act='identity', stride=1,\n include_bias=False)\n", (9209, 9292), False, 'from dlpy.layers import InputLayer, Conv2d, BN, Pooling, Concat, OutputLayer, GlobalAveragePooling2D\n')]
|
#!/usr/bin/env python3
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import numpy as np
import sys
import operator
import bisect
import os
class Scale:
def __init__(self, prefix, mult):
self.prefix = prefix
self.mult = float(mult)
def __call__(self):
return self.mult
def __repr__(self):
return self.prefix
Unit = Scale("", 1)
K = Scale("K", 1000)
Ki = Scale("Ki", 1024)
Mi = Scale("Mi", 1<<20)
Gi = Scale("Gi", 1<<30)
G = Scale("G", 1e9)
class PlotHelper:
def __init__(self, numPlots, scale=1, columns=None):
self.numPlots = numPlots
if columns:
self.columns = columns
else:
self.columns = 2 if numPlots > 4 else 1
self.rows = int((numPlots+0.5)/self.columns)
# You may need: sudo pip3 install --upgrade matplotlib
self.fig = plt.figure(#constrained_layout=True,
figsize = (scale*7*self.columns, scale*self.rows*2))
self.gridspec = GridSpec(self.rows, self.columns)
#self.fig, self.axes = plt.subplots(rows, columns, figsize=())
#self.axes = self.axes.transpose().flatten()
plt.subplots_adjust(left=0.06, right=0.94, hspace=0.6, top=0.95, bottom=0.05);
self.nextAxisSlot = 0
def nextAxis(self, depth=1):
startSpot = self.nextAxisSlot
self.nextAxisSlot += depth
col = int(startSpot / self.rows)
row = int(startSpot % self.rows)
endRow = row + depth
return self.fig.add_subplot(self.gridspec[row:endRow, col])
def save(self, figname):
#plt.tight_layout()
plt.savefig(figname)
class LambdaTrace:
"""Wrap a trace in a function."""
def __init__(self, lam, units):
self.lam = lam
self.units = units
def __getitem__(self, opn):
return self.lam(opn)
class StackedTraces:
"""Sum a set of traces."""
def __init__(self, traces):
self.traces = traces
self.units = traces[0].units
def __getitem__(self, opn):
return sum([tr[opn] for tr in self.traces])
def plotVsKop(ax, exp, lam, debug=False):
# ax: which axis to apply the x-label to
# lam(opn): compute a y value for a given opn value
# returns xs,ys suitable to be passed to plt.plot
ax.set_xlabel("op num (K)")
ax.set_xlim(left = 0, right=exp.op_max/K())
xs = []
ys = []
for opn in exp.sortedOpns:
try:
x = opn/K()
y = lam(opn)
if x!=None and y != None:
xs.append(x)
ys.append(y)
elif debug:
print (x, y)
except KeyError:
if debug: raise
else: pass
except IndexError:
if debug: raise
else: pass
assert None not in xs
assert None not in ys
return xs,ys
def windowedPair(ax, num_trace, denom_trace, scale=Unit, window=100*K()):
ax.set_ylabel("%s%s/%s" % (scale, num_trace.units, denom_trace.units))
def val(opn):
opnBefore = opn - window
#if opnBefore < 0: return None
try:
num = num_trace[opn] - num_trace[opnBefore]
denom = denom_trace[opn] - denom_trace[opnBefore]
except TypeError: # None because some opn isn't defined
return None
if denom == 0:
return None
rate = num/scale()/denom
return rate
return val
def singleTrace(ax, trace, scale=Unit):
ax.set_ylabel("%s%s" % (scale, trace.units))
def lam(opn):
try:
return trace[opn]/scale()
except TypeError: # None because trace undefined at opn
return None
return lam
def set_xlim(ax, experiments):
xlim_right = 0
for exp in experiments:
xlim_right = max(xlim_right, exp.op_max/K())
ax.set_xlim(left = 0, right=xlim_right)
resistor_spectrum_ = ["black", "brown", "red", "orange", "green", "indigo", "blue", "violet"]
# same colors as in the aws automation console
spectrum_ = ["red", "yellow", "green", "cyan", "blue", "magenta",
"#800000", "#808000", "#008000", "#008080", "#000080", "#800080"]
def spectrum(idx):
return spectrum_[idx % len(spectrum_)]
def plotThroughput(ax, experiments):
ax.set_title("op throughput")
a2 = ax.twinx()
a2.set_ylabel("s")
for expi in range(len(experiments)):
exp = experiments[expi]
line, = ax.plot(*plotVsKop(ax, exp, windowedPair(ax, exp.operation, exp.elapsed, scale=K)), color=spectrum(expi))
line.set_label(exp.nickname + " tput")
ax.plot(*plotVsKop(ax, exp, windowedPair(ax, exp.operation, exp.elapsed, window=1000*K(), scale=K)), color=spectrum(expi), linestyle="dotted")
def elapsedTime(opn):
return exp.elapsed[opn]
line, = a2.plot(*plotVsKop(ax, exp, elapsedTime), color=spectrum(expi))
line.set_label(exp.nickname + " rate")
ax.legend(loc="upper left")
ax.set_yscale("log")
ax.set_ylim(bottom=0.1)
ax.grid(which="major", color="black")
ax.grid(which="minor", color="#dddddd")
set_xlim(ax, experiments)
a2.legend(loc="lower left")
for exp in experiments[:1]:
for phase,opn in exp.phase_starts.items():
#print (phase,opn,opn/K())
ax.text(opn/K(), ax.get_ylim()[0], phase)
def plotManyForeach(ax, experiments, plotOneFunc):
for i in range(len(experiments)):
exp = experiments[i]
plotkwargs = {"color": spectrum(i)}
plotOneFunc(exp, plotkwargs)
def plotMany(ax, experiments, plotOneFunc):
"""plotMany with some standard axes adjustments for op x axis"""
plotManyForeach(ax, experiments, plotOneFunc)
ax.set_ylim(bottom=0)
set_xlim(ax, experiments)
ax.legend()
def plotGrandUnifiedMemory(ax, experiments):
ax.set_title("Grand Unified Memory")
linestyles=["solid", "dashed", "dotted", "-."]
coloridx = [0]
def plotOneExp(exp, plotkwargs):
labelidx = [0]
plotkwargs["color"] = spectrum(coloridx[0])
is_first_exp = coloridx[0]==0
coloridx[0] += 1
def plotWithLabel(lam, exp_nick, lbl, always=False):
plotkwargs["linestyle"] = linestyles[labelidx[0] % len(linestyles)]
#print("using color %s for label %s" % (plotkwargs["color"], lbl))
labelidx[0] += 1
xs,ys = plotVsKop(ax, exp, lam)
if len(xs)==0:
# don't clutter legendspace
return
line, = ax.plot(xs, ys, **plotkwargs)
if is_first_exp or always:
line.set_label(exp_nick + lbl + (" %.2f%sB" % (ys[-1], Gi.prefix)))
plotWithLabel(singleTrace(ax, exp.os_map_total, scale=Gi),
exp.nickname, " OS mem")
# plotWithLabel(singleTrace(ax, exp.os_map_heap, scale=Gi),
# exp.nickname, " OS heap")
plotWithLabel(singleTrace(ax, exp.cgroups_memory_usage_bytes, scale=Gi),
exp.nickname, " cgroups-usage", always=True)
# malloc & jemalloc
plotWithLabel(singleTrace(ax, exp.jem_mapped, scale=Gi),
exp.nickname, " jem mapped")
# plotWithLabel(singleTrace(ax, exp.jem_active, scale=Gi),
# exp.nickname, " jem active")
plotWithLabel(singleTrace(ax, exp.jem_allocated, scale=Gi),
exp.nickname, " jem alloc")
mallocLam = singleTrace(ax, exp.microscopes["total"].getTrace("open_byte"), scale=Gi) if "total" in exp.microscopes else lambda opn: None
plotWithLabel(mallocLam, exp.nickname, " malloc")
# "underlying" view: measured in C++ below Dafny but above malloc
plotWithLabel(singleTrace(ax, exp.kvl_underlying, scale=Gi),
exp.nickname, " underlying")
# internal views, stacked
traceNames = ["bucket-message-bytes", "bucket-key-bytes", "pivot-key-bytes"]
def StackFor(count):
return [exp.accum[n] for n in traceNames[:count+1]]
# Just plot the sum of internal stuff
try:
stackedTraces = StackedTraces(StackFor(len(traceNames)))
plotWithLabel(singleTrace(ax, stackedTraces, scale=Gi),
exp.nickname, " internal-accum-bytes", always=True)
except: pass
for i in range(len(experiments)):
exp = experiments[i]
plotOneExp(exp, {"linestyle": linestyles[i % len(linestyles)]})
ax.legend()
set_xlim(ax, experiments)
def plotRocksIo(ax, experiments):
ax.set_title("rocks io")
window = 10*K()
def plotOneExp(exp, plotkwargs):
hit_ratio = LambdaTrace(lambda opn: exp.rocks_io_hits[opn]/exp.rocks_io_reads[opn], "frac")
line, = ax.plot(*plotVsKop(ax, exp, windowedPair(ax, exp.rocks_io_hits, exp.rocks_io_reads, window=window)), **plotkwargs)
line.set_label(exp.nickname + " rio_ratio")
line, = ax.plot(*plotVsKop(ax, exp, windowedPair(ax, exp.rocks_io_hits, exp.rocks_io_reads, window=100*window)), linestyle="dotted", **plotkwargs)
# line, = ax.plot(*plotVsKop(ax, exp, windowedPair(ax, exp.rocks_io_reads, exp.operation, window=window)))
# line.set_label("rio_access")
miss_pages = LambdaTrace(lambda opn: (exp.rocks_io_reads[opn] - exp.rocks_io_hits[opn]), "pages")
line, = ax.plot(*plotVsKop(ax, exp, windowedPair(ax, miss_pages, exp.operation, scale=Unit, window=100*K())), **plotkwargs)
line.set_label(exp.nickname + " miss_per_opn (%s)" % miss_pages.units)
line, = ax.plot(*plotVsKop(ax, exp, windowedPair(ax, miss_pages, exp.operation, scale=Unit, window=1000*K())), linestyle="dotted", **plotkwargs)
plotMany(ax, experiments, plotOneExp)
def plotCpuTime(ax, experiments):
ax.set_title("CPU time")
def plotOneExp(exp, plotkwargs):
ticksPerSecond = os.sysconf(os.sysconf_names['SC_CLK_TCK'])
user_sec = LambdaTrace(lambda opn: exp.utime[opn]/ticksPerSecond, "s")
sys_sec = LambdaTrace(lambda opn: exp.stime[opn]/ticksPerSecond, "s")
#print("ticksPerSecond", ticksPerSecond)
line, = ax.plot(*plotVsKop(ax, exp, windowedPair(ax, user_sec, exp.elapsed)), **plotkwargs)
line.set_label(exp.nickname+" user")
line, = ax.plot(*plotVsKop(ax, exp, windowedPair(ax, sys_sec, exp.elapsed)), **plotkwargs, linestyle="dotted")
line.set_label(exp.nickname+" sys")
plotMany(ax, experiments, plotOneExp)
def plotProcIoBytes(ax, experiments):
ax.set_title("proc io bytes")
def plotOneExp(exp, plotkwargs):
window = 1000*K()
line, = ax.plot(*plotVsKop(ax, exp, windowedPair(ax, exp.procio_read_bytes, exp.operation, scale=Ki, window=window)), **plotkwargs)
line.set_label(exp.nickname + " read")
line, = ax.plot(*plotVsKop(ax, exp, windowedPair(ax, exp.procio_write_bytes, exp.operation, scale=Ki, window=window)), linestyle="dotted", **plotkwargs)
line.set_label(exp.nickname + " write")
plotMany(ax, experiments, plotOneExp)
ax.grid(which="major", color="#dddddd")
def plotIoLatencyCdf(ax, experiments):
ax.set_title("io latency")
ax.set_yscale("log")
# retrieve from metadata?
assumeProcCyclesPerSec = 2.2*G()
def plotOneExpAt(exp, plotkwargs, opn):
for cdf_src,label,linestyle in (
(exp.iolatency_read, "read", "-"),
(exp.iolatency_write, "write", "dotted")):
cdf = cdf_src[opn]
if cdf==None: continue
line, = ax.plot([cycles/assumeProcCyclesPerSec*K() for cycles in cdf.xs], cdf.ys, linestyle=linestyle, **plotkwargs)
line.set_label("%s %s @%dKop" % (exp.nickname, label, opn/K()))
def plotOneExp(exp, plotkwargs):
try: pass #print(exp.nickname, exp.iolatency_read.sortedKeys())
except: pass
# plotOneExpAt(exp, plotkwargs, 500000)
#print(plotkwargs)
plotOneExpAt(exp, plotkwargs, 8000000)
# plotOneExpAt(exp, plotkwargs, 2700000)
plotManyForeach(ax, experiments, plotOneExp)
ax.set_xlabel("ms assuming clock %.1f%sHz" % (assumeProcCyclesPerSec/G(), G))
ax.legend()
def plotSlowIos(ax, experiments):
threshTraces = set()
for exp in experiments:
try: threshTraces.add(exp.slow_thresh)
except IndexError: pass
try:
threshValues = set([t[t.sortedKeys()[0]] for t in threshTraces if not t.empty()])
descr = str(list(threshValues)[0]) if len(threshValues)==1 else str(threshValues)
ax.set_title("slow ios (thresh %s %s)" % (
descr, list(threshTraces)[0].units))
except: raise
window = 10*K()
def plotOneExp(exp, plotkwargs):
line, = ax.plot(*plotVsKop(ax, exp, windowedPair(ax, exp.slow_reads, exp.operation, window=window)), **plotkwargs)
print(exp.nickname, len(exp.slow_reads.data))
if not exp.slow_reads.empty():
line.set_label(exp.nickname + " reads")
line, = ax.plot(*plotVsKop(ax, exp, windowedPair(ax, exp.slow_writes, exp.operation, window=window)), linestyle="dotted", **plotkwargs)
if not exp.slow_writes.empty():
line.set_label(exp.nickname + " writes")
plotManyForeach(ax, experiments, plotOneExp)
ax.legend()
ax.grid(which="major", color="#dddddd")
set_xlim(ax, experiments)
ax.set_ylim(top=1)
def plotCacheStats(ax, experiments):
ax.set_title("cache stats")
def plotOneExp(exp, plotkwargs):
line, = ax.plot(*plotVsKop(ax, exp, windowedPair(ax, exp.writeback_stalls, exp.operation, window=10*K())), **plotkwargs)
line.set_label(exp.nickname + " stalls")
plotManyForeach(ax, experiments, plotOneExp)
ax.legend()
ax.grid(which="major", color="#dddddd")
set_xlim(ax, experiments)
|
[
"os.sysconf",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.savefig"
] |
[((898, 967), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(scale * 7 * self.columns, scale * self.rows * 2)'}), '(figsize=(scale * 7 * self.columns, scale * self.rows * 2))\n', (908, 967), True, 'import matplotlib.pyplot as plt\n'), ((1032, 1065), 'matplotlib.gridspec.GridSpec', 'GridSpec', (['self.rows', 'self.columns'], {}), '(self.rows, self.columns)\n', (1040, 1065), False, 'from matplotlib.gridspec import GridSpec\n'), ((1198, 1275), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.06)', 'right': '(0.94)', 'hspace': '(0.6)', 'top': '(0.95)', 'bottom': '(0.05)'}), '(left=0.06, right=0.94, hspace=0.6, top=0.95, bottom=0.05)\n', (1217, 1275), True, 'import matplotlib.pyplot as plt\n'), ((1660, 1680), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figname'], {}), '(figname)\n', (1671, 1680), True, 'import matplotlib.pyplot as plt\n'), ((9869, 9911), 'os.sysconf', 'os.sysconf', (["os.sysconf_names['SC_CLK_TCK']"], {}), "(os.sysconf_names['SC_CLK_TCK'])\n", (9879, 9911), False, 'import os\n')]
|
from django.contrib import admin
from .models import Author, Genre, Artwork, Painting, Book, Media
class AuthorAdmin(admin.ModelAdmin):
list_display = ('id', 'name', )
list_display_links = ('id', 'name', )
search_fields = ('name', )
class GenreAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'artwork_type')
list_display_links = ('id', 'title', )
list_filter = ('artwork_type',)
search_fields = ('title', )
class ArtworkAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'type', 'genre', 'author', 'pub_year', 'price')
list_display_links = ('id', 'title', )
search_fields = ('title', )
class PaintingAdmin(admin.ModelAdmin):
list_display = ('artwork', 'height', 'width', 'paint')
list_display_links = ('artwork',)
class BookAdmin(admin.ModelAdmin):
list_display = ('artwork', 'pages', 'cover')
list_display_links = ('artwork',)
class MediaAdmin(admin.ModelAdmin):
list_display = ('artwork', 'media_type', 'duration')
list_display_links = ('artwork',)
admin.site.register(Author, AuthorAdmin)
admin.site.register(Genre, GenreAdmin)
admin.site.register(Artwork, ArtworkAdmin)
admin.site.register(Painting, PaintingAdmin)
admin.site.register(Book, BookAdmin)
admin.site.register(Media, MediaAdmin)
|
[
"django.contrib.admin.site.register"
] |
[((1043, 1083), 'django.contrib.admin.site.register', 'admin.site.register', (['Author', 'AuthorAdmin'], {}), '(Author, AuthorAdmin)\n', (1062, 1083), False, 'from django.contrib import admin\n'), ((1084, 1122), 'django.contrib.admin.site.register', 'admin.site.register', (['Genre', 'GenreAdmin'], {}), '(Genre, GenreAdmin)\n', (1103, 1122), False, 'from django.contrib import admin\n'), ((1123, 1165), 'django.contrib.admin.site.register', 'admin.site.register', (['Artwork', 'ArtworkAdmin'], {}), '(Artwork, ArtworkAdmin)\n', (1142, 1165), False, 'from django.contrib import admin\n'), ((1166, 1210), 'django.contrib.admin.site.register', 'admin.site.register', (['Painting', 'PaintingAdmin'], {}), '(Painting, PaintingAdmin)\n', (1185, 1210), False, 'from django.contrib import admin\n'), ((1211, 1247), 'django.contrib.admin.site.register', 'admin.site.register', (['Book', 'BookAdmin'], {}), '(Book, BookAdmin)\n', (1230, 1247), False, 'from django.contrib import admin\n'), ((1248, 1286), 'django.contrib.admin.site.register', 'admin.site.register', (['Media', 'MediaAdmin'], {}), '(Media, MediaAdmin)\n', (1267, 1286), False, 'from django.contrib import admin\n')]
|
import re
import json
from urllib.parse import parse_qs, urlparse
from json import JSONDecodeError
from django.template.defaultfilters import truncatechars
from django.utils.encoding import force_text
from security.config import settings
from security.utils import remove_nul_from_string
def is_base_collection(v):
return isinstance(v, (list, tuple, set))
def get_headers(request):
regex = re.compile('^HTTP_')
return dict((regex.sub('', header), value) for (header, value)
in request.META.items() if header.startswith('HTTP_'))
def regex_sub_groups_global(pattern, repl, string):
"""
Globally replace all groups inside pattern with `repl`.
If `pattern` doesn't have groups the whole match is replaced.
"""
for search in reversed(list(re.finditer(pattern, string))):
for i in range(len(search.groups()), 0 if search.groups() else -1, -1):
start, end = search.span(i)
string = string[:start] + repl + string[end:]
return string
def flat_params(params):
return {
k: v[0] if is_base_collection(v) and len(v) == 1 else v
for k, v in params.items()
}
def list_params(params):
return {
k: list(v) if is_base_collection(v) else [v]
for k, v in params.items()
}
def get_logged_params(url):
return flat_params(parse_qs(urlparse(url).query))
def hide_sensitive_data_body(content):
if settings.HIDE_SENSITIVE_DATA:
for pattern in settings.HIDE_SENSITIVE_DATA_PATTERNS.get('BODY', ()):
content = regex_sub_groups_global(pattern, settings.SENSITIVE_DATA_REPLACEMENT, content)
return content
def hide_sensitive_data_headers(headers):
if settings.HIDE_SENSITIVE_DATA:
headers = dict(headers)
for pattern in settings.HIDE_SENSITIVE_DATA_PATTERNS.get('HEADERS', ()):
for header_name, header in headers.items():
if re.match(pattern, header_name, re.IGNORECASE):
headers[header_name] = settings.SENSITIVE_DATA_REPLACEMENT
return headers
def hide_sensitive_data_queries(queries):
if settings.HIDE_SENSITIVE_DATA:
queries = dict(queries)
for pattern in settings.HIDE_SENSITIVE_DATA_PATTERNS.get('QUERIES', ()):
for query_name, query in queries.items():
if re.match(pattern, query_name, re.IGNORECASE):
queries[query_name] = (
len(query) * [settings.SENSITIVE_DATA_REPLACEMENT] if is_base_collection(query)
else settings.SENSITIVE_DATA_REPLACEMENT
)
return queries
def truncate_json_data(data):
if isinstance(data, dict):
return {key: truncate_json_data(val) for key, val in data.items()}
elif isinstance(data, list):
return [truncate_json_data(val) for val in data]
elif isinstance(data, str):
return truncatechars(data, settings.LOG_JSON_STRING_LENGTH)
else:
return data
def truncate_body(content, max_length):
content = force_text(content, errors='replace')
if len(content) > max_length:
try:
json_content = json.loads(content)
return (
json.dumps(truncate_json_data(json_content))
if isinstance(json_content, (dict, list)) and settings.LOG_JSON_STRING_LENGTH is not None
else content[:max_length + 1]
)
except JSONDecodeError:
return content[:max_length + 1]
else:
return content
def clean_body(body, max_length):
if body is None:
return body
body = force_text(body, errors='replace')
cleaned_body = truncatechars(
truncate_body(body, max_length), max_length + len(settings.SENSITIVE_DATA_REPLACEMENT)
) if max_length is not None else str(body)
cleaned_body = hide_sensitive_data_body(remove_nul_from_string(cleaned_body)) if cleaned_body else cleaned_body
cleaned_body = truncatechars(cleaned_body, max_length) if max_length else cleaned_body
return cleaned_body
def clean_json(data):
return {remove_nul_from_string(k): remove_nul_from_string(v) if isinstance(v, str) else v for k, v in data.items()}
def clean_headers(headers):
return hide_sensitive_data_headers(clean_json(headers)) if headers else headers
def clean_queries(queries):
return hide_sensitive_data_queries(clean_json(queries)) if queries else queries
def log_input_request_with_data(request, related_objects=None, slug=None, extra_data=None):
input_request_logger = getattr(request, 'input_request_logger', None)
if not input_request_logger:
return False
if related_objects:
input_request_logger.add_related_objects(*related_objects)
if slug:
input_request_logger.set_slug(slug)
if extra_data:
input_request_logger.update_extra_data(extra_data)
return True
|
[
"json.loads",
"re.finditer",
"security.config.settings.HIDE_SENSITIVE_DATA_PATTERNS.get",
"re.match",
"django.template.defaultfilters.truncatechars",
"security.utils.remove_nul_from_string",
"django.utils.encoding.force_text",
"urllib.parse.urlparse",
"re.compile"
] |
[((406, 426), 're.compile', 're.compile', (['"""^HTTP_"""'], {}), "('^HTTP_')\n", (416, 426), False, 'import re\n'), ((3055, 3092), 'django.utils.encoding.force_text', 'force_text', (['content'], {'errors': '"""replace"""'}), "(content, errors='replace')\n", (3065, 3092), False, 'from django.utils.encoding import force_text\n'), ((3632, 3666), 'django.utils.encoding.force_text', 'force_text', (['body'], {'errors': '"""replace"""'}), "(body, errors='replace')\n", (3642, 3666), False, 'from django.utils.encoding import force_text\n'), ((1485, 1538), 'security.config.settings.HIDE_SENSITIVE_DATA_PATTERNS.get', 'settings.HIDE_SENSITIVE_DATA_PATTERNS.get', (['"""BODY"""', '()'], {}), "('BODY', ())\n", (1526, 1538), False, 'from security.config import settings\n'), ((1796, 1852), 'security.config.settings.HIDE_SENSITIVE_DATA_PATTERNS.get', 'settings.HIDE_SENSITIVE_DATA_PATTERNS.get', (['"""HEADERS"""', '()'], {}), "('HEADERS', ())\n", (1837, 1852), False, 'from security.config import settings\n'), ((2210, 2266), 'security.config.settings.HIDE_SENSITIVE_DATA_PATTERNS.get', 'settings.HIDE_SENSITIVE_DATA_PATTERNS.get', (['"""QUERIES"""', '()'], {}), "('QUERIES', ())\n", (2251, 2266), False, 'from security.config import settings\n'), ((3978, 4017), 'django.template.defaultfilters.truncatechars', 'truncatechars', (['cleaned_body', 'max_length'], {}), '(cleaned_body, max_length)\n', (3991, 4017), False, 'from django.template.defaultfilters import truncatechars\n'), ((4110, 4135), 'security.utils.remove_nul_from_string', 'remove_nul_from_string', (['k'], {}), '(k)\n', (4132, 4135), False, 'from security.utils import remove_nul_from_string\n'), ((793, 821), 're.finditer', 're.finditer', (['pattern', 'string'], {}), '(pattern, string)\n', (804, 821), False, 'import re\n'), ((3167, 3186), 'json.loads', 'json.loads', (['content'], {}), '(content)\n', (3177, 3186), False, 'import json\n'), ((3887, 3923), 'security.utils.remove_nul_from_string', 'remove_nul_from_string', (['cleaned_body'], {}), '(cleaned_body)\n', (3909, 3923), False, 'from security.utils import remove_nul_from_string\n'), ((4137, 4162), 'security.utils.remove_nul_from_string', 'remove_nul_from_string', (['v'], {}), '(v)\n', (4159, 4162), False, 'from security.utils import remove_nul_from_string\n'), ((1362, 1375), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (1370, 1375), False, 'from urllib.parse import parse_qs, urlparse\n'), ((1929, 1974), 're.match', 're.match', (['pattern', 'header_name', 're.IGNORECASE'], {}), '(pattern, header_name, re.IGNORECASE)\n', (1937, 1974), False, 'import re\n'), ((2341, 2385), 're.match', 're.match', (['pattern', 'query_name', 're.IGNORECASE'], {}), '(pattern, query_name, re.IGNORECASE)\n', (2349, 2385), False, 'import re\n'), ((2916, 2968), 'django.template.defaultfilters.truncatechars', 'truncatechars', (['data', 'settings.LOG_JSON_STRING_LENGTH'], {}), '(data, settings.LOG_JSON_STRING_LENGTH)\n', (2929, 2968), False, 'from django.template.defaultfilters import truncatechars\n')]
|
#!/usr/bin/env python
'''
Example code for
k-point spin-restricted periodic MP2 calculation using the staggered mesh method
Author: <NAME> (<EMAIL>)
Reference: Staggered Mesh Method for Correlation Energy Calculations of Solids: Second-Order
Møller–Plesset Perturbation Theory, J. Chem. Theory Comput. 2021, 17, 8, 4733-4745
'''
from pyscf.pbc.mp.kmp2_stagger import KMP2_stagger
from pyscf.pbc import df, gto, scf, mp
'''
Hydrogen dimer
'''
cell = gto.Cell()
cell.pseudo = 'gth-pade'
cell.basis = 'gth-szv'
cell.ke_cutoff=100
cell.atom='''
H 3.00 3.00 2.10
H 3.00 3.00 3.90
'''
cell.a = '''
6.0 0.0 0.0
0.0 6.0 0.0
0.0 0.0 6.0
'''
cell.unit = 'B'
cell.verbose = 4
cell.build()
# HF calculation using FFTDF
nks_mf = [2,2,2]
kpts = cell.make_kpts(nks_mf, with_gamma_point=True)
kmf = scf.KRHF(cell, kpts, exxdiv='ewald')
ehf = kmf.kernel()
# staggered mesh KMP2 calculation using two submeshes of size [1,1,1] in kmf.kpts
kmp = KMP2_stagger(kmf, flag_submesh=True)
emp2 = kmp.kernel()
assert((abs(emp2 - -0.0160902544091997))<1e-5)
# staggered mesh KMP2 calculation using two meshes of size [2,2,2], one of them is kmf.kpts
kmp = KMP2_stagger(kmf, flag_submesh=False)
emp2 = kmp.kernel()
assert((abs(emp2 - -0.0140289970302513))<1e-5)
# standard KMP2 calculation
kmp = mp.KMP2(kmf)
emp2, _ = kmp.kernel()
assert((abs(emp2 - -0.0143904878990777))<1e-5)
# HF calculation using GDF
nks_mf = [2,2,2]
kpts = cell.make_kpts(nks_mf, with_gamma_point=True)
kmf = scf.KRHF(cell, kpts, exxdiv='ewald')
gdf = df.GDF(cell, kpts).build()
kmf.with_df = gdf
ehf = kmf.kernel()
# staggered mesh KMP2 calculation using two submeshes of size [1,1,1] in kmf.kpts
kmp = KMP2_stagger(kmf, flag_submesh=True)
emp2 = kmp.kernel()
assert((abs(emp2 - -0.0158364523431071))<1e-5)
# staggered mesh KMP2 calculation using two meshes of size [2,2,2], one of them is kmf.kpts
kmp = KMP2_stagger(kmf, flag_submesh=False)
emp2 = kmp.kernel()
assert((abs(emp2 - -0.0140280303691396))<1e-5)
# standard KMP2 calculation
kmp = mp.KMP2(kmf)
emp2, _ = kmp.kernel()
assert((abs(emp2 - -0.0141829343769316))<1e-5)
'''
Diamond system
'''
cell = gto.Cell()
cell.pseudo = 'gth-pade'
cell.basis = 'gth-szv'
cell.ke_cutoff=100
cell.atom='''
C 0. 0. 0.
C 1.26349729, 0.7294805 , 0.51582061
'''
cell.a = '''
2.52699457, 0. , 0.
1.26349729, 2.18844149, 0.
1.26349729, 0.7294805 , 2.06328243
'''
cell.unit = 'angstrom'
cell.verbose = 4
cell.build()
# HF calculation using FFTDF
nks_mf = [2,2,2]
kpts = cell.make_kpts(nks_mf, with_gamma_point=True)
kmf = scf.KRHF(cell, kpts, exxdiv='ewald')
ehf = kmf.kernel()
# staggered mesh KMP2 calculation using two submeshes of size [1,1,1] in kmf.kpts
kmp = KMP2_stagger(kmf, flag_submesh=True)
emp2 = kmp.kernel()
assert((abs(emp2 - -0.156289981810986))<1e-5)
# staggered mesh KMP2 calculation using two meshes of size [2,2,2], one of them is kmf.kpts
kmp = KMP2_stagger(kmf, flag_submesh=False)
emp2 = kmp.kernel()
assert((abs(emp2 - -0.105454107635884))<1e-5)
# standard KMP2 calculation
kmp = mp.KMP2(kmf)
emp2, _ = kmp.kernel()
assert((abs(emp2 - -0.095517731535516))<1e-5)
# HF calculation using GDF
nks_mf = [2,2,2]
kpts = cell.make_kpts(nks_mf, with_gamma_point=True)
kmf = scf.KRHF(cell, kpts, exxdiv='ewald')
gdf = df.GDF(cell, kpts).build()
kmf.with_df = gdf
ehf = kmf.kernel()
# staggered mesh KMP2 calculation using two submeshes of size [1,1,1] in kmf.kpts
kmp = KMP2_stagger(kmf, flag_submesh=True)
emp2 = kmp.kernel()
assert((abs(emp2 - -0.154923152683604))<1e-5)
# staggered mesh KMP2 calculation using two meshes of size [2,2,2], one of them is kmf.kpts
kmp = KMP2_stagger(kmf, flag_submesh=False)
emp2 = kmp.kernel()
assert((abs(emp2 - -0.105421948003715))<1e-5)
# standard KMP2 calculation
kmp = mp.KMP2(kmf)
emp2, _ = kmp.kernel()
assert((abs(emp2 - -0.0952009565805345))<1e-5)
|
[
"pyscf.pbc.scf.KRHF",
"pyscf.pbc.df.GDF",
"pyscf.pbc.mp.KMP2",
"pyscf.pbc.mp.kmp2_stagger.KMP2_stagger",
"pyscf.pbc.gto.Cell"
] |
[((462, 472), 'pyscf.pbc.gto.Cell', 'gto.Cell', ([], {}), '()\n', (470, 472), False, 'from pyscf.pbc import df, gto, scf, mp\n'), ((848, 884), 'pyscf.pbc.scf.KRHF', 'scf.KRHF', (['cell', 'kpts'], {'exxdiv': '"""ewald"""'}), "(cell, kpts, exxdiv='ewald')\n", (856, 884), False, 'from pyscf.pbc import df, gto, scf, mp\n'), ((995, 1031), 'pyscf.pbc.mp.kmp2_stagger.KMP2_stagger', 'KMP2_stagger', (['kmf'], {'flag_submesh': '(True)'}), '(kmf, flag_submesh=True)\n', (1007, 1031), False, 'from pyscf.pbc.mp.kmp2_stagger import KMP2_stagger\n'), ((1200, 1237), 'pyscf.pbc.mp.kmp2_stagger.KMP2_stagger', 'KMP2_stagger', (['kmf'], {'flag_submesh': '(False)'}), '(kmf, flag_submesh=False)\n', (1212, 1237), False, 'from pyscf.pbc.mp.kmp2_stagger import KMP2_stagger\n'), ((1343, 1355), 'pyscf.pbc.mp.KMP2', 'mp.KMP2', (['kmf'], {}), '(kmf)\n', (1350, 1355), False, 'from pyscf.pbc import df, gto, scf, mp\n'), ((1533, 1569), 'pyscf.pbc.scf.KRHF', 'scf.KRHF', (['cell', 'kpts'], {'exxdiv': '"""ewald"""'}), "(cell, kpts, exxdiv='ewald')\n", (1541, 1569), False, 'from pyscf.pbc import df, gto, scf, mp\n'), ((1731, 1767), 'pyscf.pbc.mp.kmp2_stagger.KMP2_stagger', 'KMP2_stagger', (['kmf'], {'flag_submesh': '(True)'}), '(kmf, flag_submesh=True)\n', (1743, 1767), False, 'from pyscf.pbc.mp.kmp2_stagger import KMP2_stagger\n'), ((1936, 1973), 'pyscf.pbc.mp.kmp2_stagger.KMP2_stagger', 'KMP2_stagger', (['kmf'], {'flag_submesh': '(False)'}), '(kmf, flag_submesh=False)\n', (1948, 1973), False, 'from pyscf.pbc.mp.kmp2_stagger import KMP2_stagger\n'), ((2079, 2091), 'pyscf.pbc.mp.KMP2', 'mp.KMP2', (['kmf'], {}), '(kmf)\n', (2086, 2091), False, 'from pyscf.pbc import df, gto, scf, mp\n'), ((2197, 2207), 'pyscf.pbc.gto.Cell', 'gto.Cell', ([], {}), '()\n', (2205, 2207), False, 'from pyscf.pbc import df, gto, scf, mp\n'), ((2655, 2691), 'pyscf.pbc.scf.KRHF', 'scf.KRHF', (['cell', 'kpts'], {'exxdiv': '"""ewald"""'}), "(cell, kpts, exxdiv='ewald')\n", (2663, 2691), False, 'from pyscf.pbc import df, gto, scf, mp\n'), ((2802, 2838), 'pyscf.pbc.mp.kmp2_stagger.KMP2_stagger', 'KMP2_stagger', (['kmf'], {'flag_submesh': '(True)'}), '(kmf, flag_submesh=True)\n', (2814, 2838), False, 'from pyscf.pbc.mp.kmp2_stagger import KMP2_stagger\n'), ((3006, 3043), 'pyscf.pbc.mp.kmp2_stagger.KMP2_stagger', 'KMP2_stagger', (['kmf'], {'flag_submesh': '(False)'}), '(kmf, flag_submesh=False)\n', (3018, 3043), False, 'from pyscf.pbc.mp.kmp2_stagger import KMP2_stagger\n'), ((3148, 3160), 'pyscf.pbc.mp.KMP2', 'mp.KMP2', (['kmf'], {}), '(kmf)\n', (3155, 3160), False, 'from pyscf.pbc import df, gto, scf, mp\n'), ((3338, 3374), 'pyscf.pbc.scf.KRHF', 'scf.KRHF', (['cell', 'kpts'], {'exxdiv': '"""ewald"""'}), "(cell, kpts, exxdiv='ewald')\n", (3346, 3374), False, 'from pyscf.pbc import df, gto, scf, mp\n'), ((3536, 3572), 'pyscf.pbc.mp.kmp2_stagger.KMP2_stagger', 'KMP2_stagger', (['kmf'], {'flag_submesh': '(True)'}), '(kmf, flag_submesh=True)\n', (3548, 3572), False, 'from pyscf.pbc.mp.kmp2_stagger import KMP2_stagger\n'), ((3740, 3777), 'pyscf.pbc.mp.kmp2_stagger.KMP2_stagger', 'KMP2_stagger', (['kmf'], {'flag_submesh': '(False)'}), '(kmf, flag_submesh=False)\n', (3752, 3777), False, 'from pyscf.pbc.mp.kmp2_stagger import KMP2_stagger\n'), ((3881, 3893), 'pyscf.pbc.mp.KMP2', 'mp.KMP2', (['kmf'], {}), '(kmf)\n', (3888, 3893), False, 'from pyscf.pbc import df, gto, scf, mp\n'), ((1576, 1594), 'pyscf.pbc.df.GDF', 'df.GDF', (['cell', 'kpts'], {}), '(cell, kpts)\n', (1582, 1594), False, 'from pyscf.pbc import df, gto, scf, mp\n'), ((3381, 3399), 'pyscf.pbc.df.GDF', 'df.GDF', (['cell', 'kpts'], {}), '(cell, kpts)\n', (3387, 3399), False, 'from pyscf.pbc import df, gto, scf, mp\n')]
|
try:
from django.utils.deprecation import MiddlewareMixin as MIDDLEWARE_BASE_CLASS
except ImportError:
MIDDLEWARE_BASE_CLASS = object
from django.contrib.auth import views as auth_views
from django.utils.decorators import method_decorator
from .decorators import watch_login
class FailedLoginMiddleware(MIDDLEWARE_BASE_CLASS):
""" Failed login middleware """
patched = False
def __init__(self, *args, **kwargs):
super(FailedLoginMiddleware, self).__init__(*args, **kwargs)
# Watch the auth login.
# Monkey-patch only once - otherwise we would be recording
# failed attempts multiple times!
if not FailedLoginMiddleware.patched:
# Django 1.11 turned the `login` function view into the
# `LoginView` class-based view
try:
from django.contrib.auth.views import LoginView
our_decorator = watch_login()
watch_login_method = method_decorator(our_decorator)
LoginView.dispatch = watch_login_method(LoginView.dispatch)
except ImportError: # Django < 1.11
auth_views.login = watch_login()(auth_views.login)
FailedLoginMiddleware.patched = True
|
[
"django.utils.decorators.method_decorator"
] |
[((969, 1000), 'django.utils.decorators.method_decorator', 'method_decorator', (['our_decorator'], {}), '(our_decorator)\n', (985, 1000), False, 'from django.utils.decorators import method_decorator\n')]
|
# Copyright (c) Facebook, Inc. and its affiliates.
# TODO: Once internal torchvision transforms become stable either in torchvision
# or in pytorchvideo, move to use those transforms.
import random
import mmf.datasets.processors.functional as F
import torch
from mmf.common.registry import registry
from mmf.datasets.processors import BaseProcessor
@registry.register_processor("video_random_crop")
class VideoRandomCrop(BaseProcessor):
def __init__(self, *args, size=None, **kwargs):
super().__init__()
if size is None:
raise TypeError("Parameter 'size' is required")
self.size = size
@staticmethod
def get_params(vid, output_size):
"""Get parameters for ``crop`` for a random crop.
"""
h, w = vid.shape[-2:]
th, tw = output_size
if w == tw and h == th:
return 0, 0, h, w
i = random.randint(0, h - th)
j = random.randint(0, w - tw)
return i, j, th, tw
def __call__(self, vid):
i, j, h, w = self.get_params(vid, self.size)
return F.video_crop(vid, i, j, h, w)
@registry.register_processor("video_center_crop")
class VideoCenterCrop(BaseProcessor):
def __init__(self, *args, size=None, **kwargs):
super().__init__()
if size is None:
raise TypeError("Parameter 'size' is required")
self.size = size
def __call__(self, vid):
return F.video_center_crop(vid, self.size)
@registry.register_processor("video_resize")
class VideoResize(BaseProcessor):
def __init__(self, *args, size=None, **kwargs):
if size is None:
raise TypeError("Parameter 'size' is required")
self.size = size
def __call__(self, vid):
return F.video_resize(vid, self.size)
@registry.register_processor("video_to_tensor")
class VideoToTensor(BaseProcessor):
def __init__(self, *args, **kwargs):
super().__init__()
pass
def __call__(self, vid):
return F.video_to_normalized_float_tensor(vid)
@registry.register_processor("video_normalize")
class VideoNormalize(BaseProcessor):
def __init__(self, mean=None, std=None, **kwargs):
super().__init__()
if mean is None and std is None:
raise TypeError("'mean' and 'std' params are required")
self.mean = mean
self.std = std
def __call__(self, vid):
return F.video_normalize(vid, self.mean, self.std)
@registry.register_processor("video_random_horizontal_flip")
class VideoRandomHorizontalFlip(BaseProcessor):
def __init__(self, p=0.5, **kwargs):
super().__init__()
self.p = p
def __call__(self, vid):
if random.random() < self.p:
return F.video_hflip(vid)
return vid
@registry.register_processor("video_pad")
class Pad(BaseProcessor):
def __init__(self, padding=None, fill=0, **kwargs):
super().__init__()
if padding is None:
raise TypeError("Parameter 'padding' is required")
self.padding = padding
self.fill = fill
def __call__(self, vid):
return F.video_pad(vid, self.padding, self.fill)
@registry.register_processor("truncate_or_pad")
class TruncateOrPad(BaseProcessor):
# truncate or add 0 until the desired output size
def __init__(self, output_size=None, **kwargs):
super().__init__()
if output_size is None:
raise TypeError("Parameter 'output_size' is required")
assert isinstance(output_size, (int, tuple))
self.output_size = output_size
def __call__(self, sample):
if sample.shape[1] >= self.output_size:
return sample[0, : self.output_size]
else:
return torch.cat(
(sample[0, :], torch.zeros(1, self.output_size - sample.shape[1])),
axis=1,
)
|
[
"mmf.datasets.processors.functional.video_hflip",
"random.randint",
"mmf.datasets.processors.functional.video_to_normalized_float_tensor",
"mmf.datasets.processors.functional.video_resize",
"torch.zeros",
"mmf.datasets.processors.functional.video_crop",
"mmf.common.registry.registry.register_processor",
"random.random",
"mmf.datasets.processors.functional.video_pad",
"mmf.datasets.processors.functional.video_center_crop",
"mmf.datasets.processors.functional.video_normalize"
] |
[((354, 402), 'mmf.common.registry.registry.register_processor', 'registry.register_processor', (['"""video_random_crop"""'], {}), "('video_random_crop')\n", (381, 402), False, 'from mmf.common.registry import registry\n'), ((1113, 1161), 'mmf.common.registry.registry.register_processor', 'registry.register_processor', (['"""video_center_crop"""'], {}), "('video_center_crop')\n", (1140, 1161), False, 'from mmf.common.registry import registry\n'), ((1473, 1516), 'mmf.common.registry.registry.register_processor', 'registry.register_processor', (['"""video_resize"""'], {}), "('video_resize')\n", (1500, 1516), False, 'from mmf.common.registry import registry\n'), ((1792, 1838), 'mmf.common.registry.registry.register_processor', 'registry.register_processor', (['"""video_to_tensor"""'], {}), "('video_to_tensor')\n", (1819, 1838), False, 'from mmf.common.registry import registry\n'), ((2044, 2090), 'mmf.common.registry.registry.register_processor', 'registry.register_processor', (['"""video_normalize"""'], {}), "('video_normalize')\n", (2071, 2090), False, 'from mmf.common.registry import registry\n'), ((2459, 2518), 'mmf.common.registry.registry.register_processor', 'registry.register_processor', (['"""video_random_horizontal_flip"""'], {}), "('video_random_horizontal_flip')\n", (2486, 2518), False, 'from mmf.common.registry import registry\n'), ((2781, 2821), 'mmf.common.registry.registry.register_processor', 'registry.register_processor', (['"""video_pad"""'], {}), "('video_pad')\n", (2808, 2821), False, 'from mmf.common.registry import registry\n'), ((3168, 3214), 'mmf.common.registry.registry.register_processor', 'registry.register_processor', (['"""truncate_or_pad"""'], {}), "('truncate_or_pad')\n", (3195, 3214), False, 'from mmf.common.registry import registry\n'), ((890, 915), 'random.randint', 'random.randint', (['(0)', '(h - th)'], {}), '(0, h - th)\n', (904, 915), False, 'import random\n'), ((928, 953), 'random.randint', 'random.randint', (['(0)', '(w - tw)'], {}), '(0, w - tw)\n', (942, 953), False, 'import random\n'), ((1080, 1109), 'mmf.datasets.processors.functional.video_crop', 'F.video_crop', (['vid', 'i', 'j', 'h', 'w'], {}), '(vid, i, j, h, w)\n', (1092, 1109), True, 'import mmf.datasets.processors.functional as F\n'), ((1434, 1469), 'mmf.datasets.processors.functional.video_center_crop', 'F.video_center_crop', (['vid', 'self.size'], {}), '(vid, self.size)\n', (1453, 1469), True, 'import mmf.datasets.processors.functional as F\n'), ((1758, 1788), 'mmf.datasets.processors.functional.video_resize', 'F.video_resize', (['vid', 'self.size'], {}), '(vid, self.size)\n', (1772, 1788), True, 'import mmf.datasets.processors.functional as F\n'), ((2001, 2040), 'mmf.datasets.processors.functional.video_to_normalized_float_tensor', 'F.video_to_normalized_float_tensor', (['vid'], {}), '(vid)\n', (2035, 2040), True, 'import mmf.datasets.processors.functional as F\n'), ((2412, 2455), 'mmf.datasets.processors.functional.video_normalize', 'F.video_normalize', (['vid', 'self.mean', 'self.std'], {}), '(vid, self.mean, self.std)\n', (2429, 2455), True, 'import mmf.datasets.processors.functional as F\n'), ((3123, 3164), 'mmf.datasets.processors.functional.video_pad', 'F.video_pad', (['vid', 'self.padding', 'self.fill'], {}), '(vid, self.padding, self.fill)\n', (3134, 3164), True, 'import mmf.datasets.processors.functional as F\n'), ((2695, 2710), 'random.random', 'random.random', ([], {}), '()\n', (2708, 2710), False, 'import random\n'), ((2740, 2758), 'mmf.datasets.processors.functional.video_hflip', 'F.video_hflip', (['vid'], {}), '(vid)\n', (2753, 2758), True, 'import mmf.datasets.processors.functional as F\n'), ((3780, 3830), 'torch.zeros', 'torch.zeros', (['(1)', '(self.output_size - sample.shape[1])'], {}), '(1, self.output_size - sample.shape[1])\n', (3791, 3830), False, 'import torch\n')]
|
import math
import os
from datetime import timedelta, datetime
import unicodedata
from typing import List
import re
import pandas as pd
from dateutil import parser
# timedelta/datetime
def strtotimedelta(s: str):
"""XM X minutes, XH X hours, Xd X days, Xw X weeks, Xm X months, all separated by a space"""
t = timedelta()
s_array = s.split()
for _s in s_array: # Only return first timedelta
(d, a) = drsplit(_s)
if d is None:
pass
elif a.lower() == "m" or "minute".casefold() in a.casefold():
t += timedelta(minutes=d)
elif a.lower() == "h" or "hour".casefold() in a.casefold():
t += timedelta(hours=d)
elif a.lower() == "d" or "day".casefold() in a.casefold():
t += timedelta(days=d)
elif a.lower() == "wk" or "week".casefold() in a.casefold():
t += timedelta(weeks=d)
elif a.lower() == "mo" or "month".casefold() in a.casefold():
t += timedelta(weeks=d * 4)
elif a.lower() == "y" or "year".casefold() in a.casefold():
t += timedelta(weeks=d * 48)
elif a.lower() == "s" or "second".casefold() in a.casefold():
t += timedelta(seconds=d)
elif s.lower() == "max":
return s
return t
def get_yahoo_intervals():
interval = ['1M', '2M', '5M', '15M', '30M', '60M', '1h', '90M', '1d', '5d', '1wk', '1mo', '3mo']
return interval
def strtoyahootimestr(s: str):
"""XM X minutes, XH X hours, Xd X days, Xw X weeks, Xm X months, all separated by a space
Interval closest to '1m,2m,5m,15m,30m,60m,90m,1h,1d,5d,1wk,1mo,3mo' will be chosen."""
interval = get_yahoo_intervals()
idx, prev_idx = len(interval) // 2, 0
left, right = 0, len(interval)
chosen_interval = strtotimedelta(s)
while not (right - left) < 2:
prev_idx = idx
diff = strtotimedelta(interval[idx]) - chosen_interval
# Check if chosen interval is smaller or greater than measured interval, move boundaries accordingly
if diff > timedelta(0):
right = idx
idx = (idx + left) // 2
elif diff == timedelta(0):
return interval[idx]
else:
left = idx
idx = (idx + right) // 2
# Compare which is better
diff1 = strtotimedelta(interval[idx]) - chosen_interval
diff2 = strtotimedelta(interval[prev_idx]) - chosen_interval
if diff1 > timedelta(0):
# interval_1 is larger than chosen interval, so interval_2 is smaller (diff2 is negative)
if diff1 + diff2 > timedelta(0):
# diff1 is larger than diff2, and so chosen interval is closer to interval_2
idx = prev_idx
else:
# interval_1 is smaller than chosen interval, so interval_2 is bigger (diff2 is positive, diff1 is negative)
if diff1 + diff2 < timedelta(0):
# diff2 (positive) is not large enough to compensate for diff1 and so chosen interval is closer to interval_2
idx = prev_idx
return interval[idx]
def checkifyahootimestr(s: str):
if re.match(r"^\d+[a-zA-Z]+$", s):
return True
return False
def timedeltatoyahootimestr(_interval: timedelta):
"""XM X minutes, XH X hours, Xd X days, Xw X weeks, Xm X months, all separated by a space
Interval closest to '1m,2m,5m,15m,30m,60m,90m,1h,1d,5d,1wk,1mo,3mo' will be chosen."""
interval = ['1m', '2m', '5m', '15m', '30m', '60m', '1h', '90m', '1d', '5d', '1wk', '1mo', '3mo']
idx, prev_idx = len(interval) // 2, 0
left, right = 0, len(interval)
chosen_interval = _interval
while not (right - left) < 2:
prev_idx = idx
diff = strtotimedelta(interval[idx]) - chosen_interval
# Check if chosen interval is smaller or greater than measured interval, move boundaries accordingly
if diff > timedelta(0):
right = idx
idx = (idx + left) // 2
elif diff == timedelta(0):
return interval[idx]
else:
left = idx
idx = (idx + right) // 2
# Compare which is better
diff1 = strtotimedelta(interval[idx]) - chosen_interval
diff2 = strtotimedelta(interval[prev_idx]) - chosen_interval
if diff1 > timedelta(0):
# interval_1 is larger than chosen interval, so interval_2 is smaller (diff2 is negative)
if diff1 + diff2 > timedelta(0):
# diff1 is larger than diff2, and so chosen interval is closer to interval_2
idx = prev_idx
else:
# interval_1 is smaller than chosen interval, so interval_2 is bigger (diff2 is positive, diff1 is negative)
if diff1 + diff2 < timedelta(0):
# diff2 (positive) is not large enough to compensate for diff1 and so chosen interval is closer to interval_2
idx = prev_idx
return interval[idx]
def timedeltatosigstr(s: timedelta):
"""Takes in datetime and returns string containing only one significant time denomination without spaces"""
if s.days > 0:
return F'{s.days}d'
elif s.seconds >= 60 * 60:
return F'{s.seconds // (60 * 60)}h'
elif s.seconds > 60:
return F'{s.seconds // 60}M'
else:
return F"{s.seconds}s"
def yahoolimitperiod(period: timedelta, interval: str):
"""Divides period into smaller chunks depending on the interval. Outputs new_period, n_loop"""
n_loop = 1
loop_period = period
min_dict = {
'1M': '7d',
'2M': '7d',
'5M': '7d',
'15M': '60d',
'30M': '60d',
'60M': '60d',
'90M': '60d',
'1h': '60d',
}
eff_interval = '1M'
diff = strtotimedelta(eff_interval) - strtotimedelta(interval)
for key in min_dict.keys():
_diff = strtotimedelta(key) - strtotimedelta(interval)
if timedelta() > _diff > diff:
diff = _diff
eff_interval = key
max_period = strtotimedelta(min_dict[eff_interval])
if period > max_period:
n_loop = math.ceil(period / max_period)
eff_period = period / n_loop
return eff_period, n_loop
return period, 1
def yahoolimitperiod_leftover(period: timedelta, interval: str):
"""Divides period into smaller defined chunks, depending on the interval.
Outputs new_period, n_loop and period_leftover"""
min_dict = {
'1m': '7d',
'2m': '7d',
'5m': '7d',
'15m': '60d',
'30m': '100d',
'60m': '365d',
'1h': '365d',
'90m': '365d',
'1d': '1000d',
}
eff_interval = '1m'
diff = strtotimedelta(eff_interval) - strtotimedelta(interval)
for key in min_dict.keys():
_diff = strtotimedelta(key) - strtotimedelta(interval)
if timedelta() > _diff > diff:
diff = _diff
eff_interval = key
max_period = strtotimedelta(min_dict[eff_interval])
if period > max_period:
n_loop = math.floor(period / max_period)
leftover = period - max_period * n_loop
if leftover < strtotimedelta(interval):
leftover = strtotimedelta(interval)
return max_period, n_loop, leftover
return period, 1, timedelta(0)
def strtodatetime(s: str) -> datetime:
# 2022 - 02 - 23
# OR
# 2022 - 02 - 23
# 09: 30:00 - 05: 00
# hh: mm:ss tzd
return parser.parse(s)
def check_if_valid_timestr(s: str):
return is_lnumber(s) and is_not_rnumber(s)
# String manipulation
def drsplit(s: str):
alpha = s.lstrip('0123456789')
digit = s[:len(s) - len(alpha)]
digit = try_int(digit)
if not digit:
digit = 0
return digit, alpha
def is_lnumber(s: str):
return not s == s.lstrip('0123456789')
def is_not_rnumber(s: str):
return s == s.rstrip('0123456789')
def is_datetime(v):
return isinstance(v, datetime)
def is_datetimestring(s):
return False # todo
# Names/File Names
def normify_name(s: str):
return s.replace(' ', '')
def snake_to_proper_case(s: str):
"""to_proper_case -> To Proper Case"""
s_arr = s.split('_')
for i in range(len(s_arr)):
s_arr[i] = s_arr[i].upper()
return ' '.join(s_arr)
def remove_special_char(s: str):
return s.replace('_', '')
def to_camel_case(s: str):
return s
# Try
def try_int(s: str) -> int:
try:
if s is None:
return 0
return int(s)
except ValueError:
return 0
def try_float(s: str) -> float:
try:
if s is None:
return 0
return float(s)
except ValueError:
return 0
def try_key(dict: {}, key: str):
if key in dict:
return dict['key']
else:
return "-"
def try_divide(n1, n2):
if n2 == 0:
return math.inf
return n1 / n2
def try_max(list):
if len(list) < 1:
return 0
return max(list)
def try_min(list):
if len(list) < 1:
return 0
return min(list)
def try_mean(list):
if len(list) < 1:
return 0
t, l = 0, len(list)
for i in list:
if i is None:
t += 0
l -= 0
continue
t += i
return try_divide(t, l)
def try_width(list):
if len([l for l in list if l is not None]) < 1:
return 0
max, min = 0, math.inf
# Get max and min
for x in list:
if x is None:
continue
if x > max:
max = x
if x < min:
min = x
if min == math.inf:
return math.inf
return max-min
def try_sgn(n1):
n1 = try_float(n1)
if n1:
sgn = math.copysign(1, n1)
return sgn
return 0
def in_range(n1, n2=[0, 1]):
n1 = try_float(n1)
if n1 and len(n2) >= 2:
if n2[0] < n1 < n2[1]:
return True
return False
def in_std_range(n1, avg, stdev, order=1):
return in_range(n1, [avg-order*stdev, avg+order*stdev])
# XVar
def pip_conversion(currency_pair: str):
if 'USD' in currency_pair and 'JPY' in currency_pair:
return 1 / 100
else:
return 1 / 10000
def leverage_to_float(lev: str):
"""Input: 'int1:int2. Output: int2/int1'"""
integers = lev.split(':')
if len(integers) == 2:
int1 = try_int(integers[0])
int2 = try_int(integers[1])
if int1 and int2:
return int2 / int1
return 0
def get_sim_speed(s: str):
d, _s = drsplit(s)
return d
# Instrument Type
def get_instrument_type(symbol: str):
# todo future
if symbol in ['CAD=X']:
return "Forex"
return "Forex"
def craft_instrument_filename(sym: str, interval: str, period: str):
return F'{sym}-{interval}-{period}.csv'
def get_instrument_from_filename(s: str):
parts = s.split('-')
l = len(parts) - 2
if l < 1:
return "", "", ""
period = parts[-1]
interval = parts[-2]
sym = "-".join(parts[0: l - 1])
return sym, interval, period
def craft_test_filename(ta_name: str, ivar_name: str, ds_names: List[str]):
"""Test name can be set by user. This is an auto-generated filename"""
return F'{ta_name}-{ivar_name}'
def get_size_bytes(bytes, suffix="B"):
"""
Scale bytes to its proper format
e.g:
1253656 => '1.20MB'
1253656678 => '1.17GB'
"""
factor = 1024
for unit in ["", "K", "M", "G", "T", "P"]:
if bytes < factor:
return f"{bytes:.2f}{unit}{suffix}"
bytes /= factor
def to_dataname(s, interval, period):
return F'{s}-{interval}-{timedeltatosigstr(period)}'
def from_dataname(s: str):
arr = s.split('-')
if len(arr) < 3:
return ('Str_Error', '', '')
return (arr[0], arr[1], arr[2])
def get_file_name(s: str):
return os.path.splitext(s)[0]
# Test
def get_test_name(s: str):
if s.endswith('.csv'):
return s[0:-4]
s_arr = s.split('.')
if len(s_arr) == 1:
return s
return '.'.join(s_arr[0:-2])
# data = yf.download( # or pdr.get_data_yahoo(...
# # tickers list or string as well
# tickers = "SPY AAPL MSFT",
#
# # use "period" instead of start/end
# # valid periods: 1d,5d,1mo,3mo,6mo,1y,2y,5y,10y,ytd,max
# # (optional, default is '1mo')
# period = "ytd",
#
# # fetch data by interval (including intraday if period < 60 days)
# # valid intervals: 1m,2m,5m,15m,30m,60m,90m,1h,1d,5d,1wk,1mo,3mo
# # (optional, default is '1d')
# interval = "1m",
#
# # group by ticker (to access via data['SPY'])
# # (optional, default is 'column')
# group_by = 'ticker',
#
# # adjust all OHLC automatically
# # (optional, default is False)
# auto_adjust = True,
#
# # download pre/post regular market hours data
# # (optional, default is False)
# prepost = True,
#
# # use threads for mass downloading? (True/False/Integer)
# # (optional, default is True)
# threads = True,
#
# # proxy URL scheme use use when downloading?
# # (optional, default is None)
# proxy = None
# )
|
[
"dateutil.parser.parse",
"math.ceil",
"math.floor",
"re.match",
"math.copysign",
"datetime.timedelta",
"os.path.splitext"
] |
[((323, 334), 'datetime.timedelta', 'timedelta', ([], {}), '()\n', (332, 334), False, 'from datetime import timedelta, datetime\n'), ((3105, 3135), 're.match', 're.match', (['"""^\\\\d+[a-zA-Z]+$"""', 's'], {}), "('^\\\\d+[a-zA-Z]+$', s)\n", (3113, 3135), False, 'import re\n'), ((7350, 7365), 'dateutil.parser.parse', 'parser.parse', (['s'], {}), '(s)\n', (7362, 7365), False, 'from dateutil import parser\n'), ((2451, 2463), 'datetime.timedelta', 'timedelta', (['(0)'], {}), '(0)\n', (2460, 2463), False, 'from datetime import timedelta, datetime\n'), ((4257, 4269), 'datetime.timedelta', 'timedelta', (['(0)'], {}), '(0)\n', (4266, 4269), False, 'from datetime import timedelta, datetime\n'), ((6020, 6050), 'math.ceil', 'math.ceil', (['(period / max_period)'], {}), '(period / max_period)\n', (6029, 6050), False, 'import math\n'), ((6947, 6978), 'math.floor', 'math.floor', (['(period / max_period)'], {}), '(period / max_period)\n', (6957, 6978), False, 'import math\n'), ((7189, 7201), 'datetime.timedelta', 'timedelta', (['(0)'], {}), '(0)\n', (7198, 7201), False, 'from datetime import timedelta, datetime\n'), ((9594, 9614), 'math.copysign', 'math.copysign', (['(1)', 'n1'], {}), '(1, n1)\n', (9607, 9614), False, 'import math\n'), ((11731, 11750), 'os.path.splitext', 'os.path.splitext', (['s'], {}), '(s)\n', (11747, 11750), False, 'import os\n'), ((2064, 2076), 'datetime.timedelta', 'timedelta', (['(0)'], {}), '(0)\n', (2073, 2076), False, 'from datetime import timedelta, datetime\n'), ((2590, 2602), 'datetime.timedelta', 'timedelta', (['(0)'], {}), '(0)\n', (2599, 2602), False, 'from datetime import timedelta, datetime\n'), ((2874, 2886), 'datetime.timedelta', 'timedelta', (['(0)'], {}), '(0)\n', (2883, 2886), False, 'from datetime import timedelta, datetime\n'), ((3870, 3882), 'datetime.timedelta', 'timedelta', (['(0)'], {}), '(0)\n', (3879, 3882), False, 'from datetime import timedelta, datetime\n'), ((4396, 4408), 'datetime.timedelta', 'timedelta', (['(0)'], {}), '(0)\n', (4405, 4408), False, 'from datetime import timedelta, datetime\n'), ((4680, 4692), 'datetime.timedelta', 'timedelta', (['(0)'], {}), '(0)\n', (4689, 4692), False, 'from datetime import timedelta, datetime\n'), ((5833, 5844), 'datetime.timedelta', 'timedelta', ([], {}), '()\n', (5842, 5844), False, 'from datetime import timedelta, datetime\n'), ((6760, 6771), 'datetime.timedelta', 'timedelta', ([], {}), '()\n', (6769, 6771), False, 'from datetime import timedelta, datetime\n'), ((568, 588), 'datetime.timedelta', 'timedelta', ([], {'minutes': 'd'}), '(minutes=d)\n', (577, 588), False, 'from datetime import timedelta, datetime\n'), ((2159, 2171), 'datetime.timedelta', 'timedelta', (['(0)'], {}), '(0)\n', (2168, 2171), False, 'from datetime import timedelta, datetime\n'), ((3965, 3977), 'datetime.timedelta', 'timedelta', (['(0)'], {}), '(0)\n', (3974, 3977), False, 'from datetime import timedelta, datetime\n'), ((674, 692), 'datetime.timedelta', 'timedelta', ([], {'hours': 'd'}), '(hours=d)\n', (683, 692), False, 'from datetime import timedelta, datetime\n'), ((777, 794), 'datetime.timedelta', 'timedelta', ([], {'days': 'd'}), '(days=d)\n', (786, 794), False, 'from datetime import timedelta, datetime\n'), ((881, 899), 'datetime.timedelta', 'timedelta', ([], {'weeks': 'd'}), '(weeks=d)\n', (890, 899), False, 'from datetime import timedelta, datetime\n'), ((987, 1009), 'datetime.timedelta', 'timedelta', ([], {'weeks': '(d * 4)'}), '(weeks=d * 4)\n', (996, 1009), False, 'from datetime import timedelta, datetime\n'), ((1095, 1118), 'datetime.timedelta', 'timedelta', ([], {'weeks': '(d * 48)'}), '(weeks=d * 48)\n', (1104, 1118), False, 'from datetime import timedelta, datetime\n'), ((1206, 1226), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'd'}), '(seconds=d)\n', (1215, 1226), False, 'from datetime import timedelta, datetime\n')]
|
"""Test Keysmith."""
import subprocess
import sys
import keysmith
def test_script():
"""Test a full run when directly invoking."""
subprocess.check_call([sys.executable, keysmith.__file__])
def test_python_m():
"""Test python -m."""
command = [sys.executable, '-m', keysmith.__name__]
assert subprocess.run(command).returncode == 0
def test_main_stats():
"""Test a full run with statistics."""
assert keysmith.main(['--stats']) == 0
def test_main_population():
"""Test a population file that does not exist."""
assert keysmith.main(['--population', 'nonexistent']) == 1
|
[
"subprocess.run",
"keysmith.main",
"subprocess.check_call"
] |
[((143, 201), 'subprocess.check_call', 'subprocess.check_call', (['[sys.executable, keysmith.__file__]'], {}), '([sys.executable, keysmith.__file__])\n', (164, 201), False, 'import subprocess\n'), ((437, 463), 'keysmith.main', 'keysmith.main', (["['--stats']"], {}), "(['--stats'])\n", (450, 463), False, 'import keysmith\n'), ((564, 610), 'keysmith.main', 'keysmith.main', (["['--population', 'nonexistent']"], {}), "(['--population', 'nonexistent'])\n", (577, 610), False, 'import keysmith\n'), ((318, 341), 'subprocess.run', 'subprocess.run', (['command'], {}), '(command)\n', (332, 341), False, 'import subprocess\n')]
|
import numpy as np
from deepen.activation import relu, relu_backward, sigmoid, sigmoid_backward
def initialize_params(layer_dims):
"""Create and initialize the params of an L-layer neural network.
Parameters
----------
layer_dims : list or tuple of int
The number of neurons in each layer of the network.
Returns
-------
params : dict of {str: ndarray}
Initialized parameters for each layer, l, of the L-layer network.
Wl : ndarray
Weights matrix of shape (`layer_dims[l]`, `layer_dims[l-1]`).
bl : ndarray
Biases vector of shape (`layer_dims[l]`, 1).
"""
params = {}
L = len(layer_dims)
for l in range(1, L):
params['W' + str(l)] = (
np.random.randn(layer_dims[l], layer_dims[l-1]) / np.sqrt(layer_dims[l-1])
)
params['b' + str(l)] = np.zeros((layer_dims[l], 1))
return params
def linear_forward(A, W, b):
"""Calculate the linear part of forward propagation for the current layer.
.. math:: $$Z^{[l]} = W^{[l]}A^{[l-1]} +b^{[l]}, where $A^{[0]} = X$
Parameters
----------
A : ndarray
Activations from the previous layer, of shape (size of previous layer,
number of examples).
W : ndarray
Weights matrix of shape (size of current layer, size of previous layer).
b : ndarray
Bias vector of shape (size of current layer, 1).
Returns
-------
Z : ndarray
Input of the activation function, also called pre-activation parameter,
of shape (size of current layer, number of examples).
cache : tuple of ndarray
Store `A`, `W`, and `b` for computing the backward pass efficiently.
"""
Z = np.dot(W, A) + b
cache = (A, W, b)
return Z, cache
def layer_forward(A_prev, W, b, activation):
"""Compute forward propagation for a single layer.
Parameters
----------
A_prev : ndarray
Activations from the previous layer of shape (size of previous layer,
number of examples).
W : ndarray
Weights matrix of shape (size of current layer, size of previous layer).
b : ndarray
Bias vector of shape (size of the current layer, 1).
activation : str {"sigmoid", "relu"}
Activation function to be used in this layer.
Returns
-------
A : ndarray
Output of the activation function of shape (size of current layer,
number of examples).
cache : tuple of (tuple of ndarray, ndarray)
Stored for computing the backward pass efficiently.
linear_cache : tuple of ndarray
Stores `cache` returned by `linear_forward()`.
activation_cache : ndarray
Stores `Z` returned by 'linear_forward()`.
"""
Z, linear_cache = linear_forward(A_prev, W, b)
if activation == "sigmoid":
A, activation_cache = sigmoid(Z)
elif activation == "relu":
A, activation_cache = relu(Z)
cache = (linear_cache, activation_cache)
return A, cache
def model_forward(X, parameters):
"""Compute forward propagation for [LINEAR->RELU]*(L-1) -> [LINEAR->SIGMOID].
Parameters
----------
X : ndarray
Input data of shape (input size, number of examples)
parameters : dict of {str: ndarray}
Output of initialize_parameters_deep()
Returns
-------
Y_hat : ndarray
Vector of prediction probabilities of shape (1, number of
examples).
caches : list of (tuple of (tuple of ndarray, ndarray))
The L `cache` results from `layer_forward()`.
"""
caches = []
A = X
L = len(parameters) // 2
for l in range(1, L):
A_prev = A
A, cache = layer_forward(
A_prev,
parameters["W" + str(l)],
parameters["b" + str(l)],
"relu"
)
caches.append(cache)
Y_hat, cache = layer_forward(
A,
parameters["W" + str(L)],
parameters["b" + str(L)],
"sigmoid"
)
caches.append(cache)
return Y_hat, caches
def compute_cost(Y_hat, Y):
"""Compute the cross-entropy cost.
.. math:: $$-\frac{1}{m} \sum\limits_{i = 1}^{m} (y^{(i)}\log\left(a^{[L] (i)}\right) + (1-y^{(i)})\log\left(1- a^{[L](i)}\right))
Parameters
----------
Y_hat : ndarray
Vector of prediction probabilities from `model_forward()` of shape
(1, number of examples).
Y : ndarray
Vector of true values of shape (1, number of examples).
Returns
-------
cost : list of int
Cross-entropy cost.
"""
m = Y.shape[1]
cost = (1./m) * (-np.dot(Y, np.log(Y_hat).T) - np.dot(1-Y, np.log(1-Y_hat).T))
cost = np.squeeze(cost)
return cost
def linear_backward(dZ, cache):
"""Calculate the linear portion of backward propagation for a single layer.
Parameters
----------
dZ : ndarray
Gradient of the cost with respect to the linear output of layer l.
cache : tuple of ndarray
Stored `A`, `W`, `b` from `linear_forward()`.
Returns
-------
dA_prev : ndarray
Gradient of the cost with respect to the activation of the previous
layer, l-1. Shape of `cache['A']`.
dW : ndarray
Gradient of the cost with respect to W for the current layer, l. Shape
of `cache['W']`.
db : ndarray
Gradient of the cost with respect to b for the current layer, l. Shape
of `cache['b']`.
"""
A_prev, W, b = cache
m = A_prev.shape[1]
dW = (1/m) * np.dot(dZ, A_prev.T)
db = (1/m) * np.sum(dZ, axis=1, keepdims=True)
dA_prev = np.dot(W.T, dZ)
return dA_prev, dW, db
def layer_backward(dA, cache, activation):
"""Compute backward propagation for a single layer.
Parameters
----------
dA: ndarray
Post-activation gradient for current layer, l.
cache : tuple of (tuple of ndarray, ndarray)
Stored `(linear_cache, activation_cache)` from `layer_forward()`.
activation : str {"relu", "sigmoid"}
Activation function to be used in this layer.
Returns
-------
dA_prev : ndarray
Gradient of the cost with respect to the activation of the previous
layer, l-1. Shape of `cache['A']`.
dW : ndarray
Gradient of the cost with respect to W for the current layer, l. Shape
of `cache['W']`.
db : ndarray
Gradient of the cost with respect to b for the current layer, l. Shape
of `cache['b']`.
"""
linear_cache, activation_cache = cache
if activation == "relu":
dZ = relu_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)
elif activation == "sigmoid":
dZ = sigmoid_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)
return dA_prev, dW, db
def model_backward(Y_hat, Y, caches):
"""Compute backward propagation for [LINEAR->RELU]*(L-1) -> [LINEAR->SIGMOID].
Parameters
----------
Y_hat : ndarray
Vector of prediction probabilities from `model_forward()` of shape
(1, number of examples).
Y : ndarray
Vector of true values of shape (1, number of examples).
caches : list of (tuple of (tuple of ndarray, ndarray))
Stored results of `model_forward()`.
Returns
-------
grads : dict of {str: ndarray}
Gradients for layer `l` in `range(L-1)`.
dAl : ndarray
Gradient of the activations for layer `l`.
dWl : ndarray
Gradient of the weights for layer `l`.
dbl : ndarray
Gradient of the biases for layer `l`.
"""
grads = {}
L = len(caches)
m = Y_hat.shape[1]
Y = Y.reshape(Y_hat.shape)
dY_hat = -(np.divide(Y, Y_hat) - np.divide(1-Y, 1-Y_hat))
current_cache = caches[L-1]
grads["dA" + str(L-1)], grads["dW" + str(L)], grads["db" + str(L)] = (
layer_backward(dY_hat, current_cache, "sigmoid")
)
for l in reversed(range(L-1)):
current_cache = caches[l]
dA_prev_temp, dW_temp, db_temp = (
layer_backward(grads["dA" + str(l+1)], current_cache, "relu")
)
grads["dA" + str(l)] = dA_prev_temp
grads["dW" + str(l + 1)] = dW_temp
grads["db" + str(l + 1)] = db_temp
return grads
def update_params(params, grads, learning_rate):
"""Update parameters using gradient descent.
Parameters
----------
params : dict of {str: ndarray}
Initialized parameters from `intialize_params()`.
grads : dict of {str: ndarray}
Gradients from `model_backward()`.
learning_rate : float in (0, 1)
Learning rate for the model.
Returns
-------
params : dict of {str: ndarray}
Updated parameters.
`Wl` : ndarray
Updated weights matrix.
`bl` : ndarray
Updated biases vector.
"""
L = len(params) // 2
for l in range(L):
params["W" + str(l+1)] -= learning_rate * grads["dW" + str(l+1)]
params["b" + str(l+1)] -= learning_rate * grads["db" + str(l+1)]
return params
|
[
"numpy.divide",
"deepen.activation.relu",
"numpy.sum",
"numpy.log",
"numpy.random.randn",
"numpy.zeros",
"deepen.activation.sigmoid",
"numpy.squeeze",
"numpy.dot",
"deepen.activation.relu_backward",
"deepen.activation.sigmoid_backward",
"numpy.sqrt"
] |
[((4728, 4744), 'numpy.squeeze', 'np.squeeze', (['cost'], {}), '(cost)\n', (4738, 4744), True, 'import numpy as np\n'), ((5651, 5666), 'numpy.dot', 'np.dot', (['W.T', 'dZ'], {}), '(W.T, dZ)\n', (5657, 5666), True, 'import numpy as np\n'), ((877, 905), 'numpy.zeros', 'np.zeros', (['(layer_dims[l], 1)'], {}), '((layer_dims[l], 1))\n', (885, 905), True, 'import numpy as np\n'), ((1739, 1751), 'numpy.dot', 'np.dot', (['W', 'A'], {}), '(W, A)\n', (1745, 1751), True, 'import numpy as np\n'), ((2896, 2906), 'deepen.activation.sigmoid', 'sigmoid', (['Z'], {}), '(Z)\n', (2903, 2906), False, 'from deepen.activation import relu, relu_backward, sigmoid, sigmoid_backward\n'), ((5565, 5585), 'numpy.dot', 'np.dot', (['dZ', 'A_prev.T'], {}), '(dZ, A_prev.T)\n', (5571, 5585), True, 'import numpy as np\n'), ((5603, 5636), 'numpy.sum', 'np.sum', (['dZ'], {'axis': '(1)', 'keepdims': '(True)'}), '(dZ, axis=1, keepdims=True)\n', (5609, 5636), True, 'import numpy as np\n'), ((6618, 6653), 'deepen.activation.relu_backward', 'relu_backward', (['dA', 'activation_cache'], {}), '(dA, activation_cache)\n', (6631, 6653), False, 'from deepen.activation import relu, relu_backward, sigmoid, sigmoid_backward\n'), ((761, 810), 'numpy.random.randn', 'np.random.randn', (['layer_dims[l]', 'layer_dims[l - 1]'], {}), '(layer_dims[l], layer_dims[l - 1])\n', (776, 810), True, 'import numpy as np\n'), ((811, 837), 'numpy.sqrt', 'np.sqrt', (['layer_dims[l - 1]'], {}), '(layer_dims[l - 1])\n', (818, 837), True, 'import numpy as np\n'), ((2968, 2975), 'deepen.activation.relu', 'relu', (['Z'], {}), '(Z)\n', (2972, 2975), False, 'from deepen.activation import relu, relu_backward, sigmoid, sigmoid_backward\n'), ((6761, 6799), 'deepen.activation.sigmoid_backward', 'sigmoid_backward', (['dA', 'activation_cache'], {}), '(dA, activation_cache)\n', (6777, 6799), False, 'from deepen.activation import relu, relu_backward, sigmoid, sigmoid_backward\n'), ((7800, 7819), 'numpy.divide', 'np.divide', (['Y', 'Y_hat'], {}), '(Y, Y_hat)\n', (7809, 7819), True, 'import numpy as np\n'), ((7822, 7849), 'numpy.divide', 'np.divide', (['(1 - Y)', '(1 - Y_hat)'], {}), '(1 - Y, 1 - Y_hat)\n', (7831, 7849), True, 'import numpy as np\n'), ((4697, 4714), 'numpy.log', 'np.log', (['(1 - Y_hat)'], {}), '(1 - Y_hat)\n', (4703, 4714), True, 'import numpy as np\n'), ((4666, 4679), 'numpy.log', 'np.log', (['Y_hat'], {}), '(Y_hat)\n', (4672, 4679), True, 'import numpy as np\n')]
|
#!E:\py_virtual_env\saas_project\Scripts\python.exe
# -*- coding: utf-8 -*-
from django.template import Library
from web import models
register = Library()
@register.inclusion_tag('inclusion/all_project_list.html')
def all_project_list(request):
"""
1。获取我创建的所有项目
2.获取我参与的所有项目
:return:
"""
my_project_list = models.Project.objects.filter(creator=request.tracer.user)
join_project_list = models.ProjectUser.objects.filter(user=request.tracer.user)
return {'my':my_project_list,'join':join_project_list,'request':request}
|
[
"django.template.Library",
"web.models.ProjectUser.objects.filter",
"web.models.Project.objects.filter"
] |
[((146, 155), 'django.template.Library', 'Library', ([], {}), '()\n', (153, 155), False, 'from django.template import Library\n'), ((332, 390), 'web.models.Project.objects.filter', 'models.Project.objects.filter', ([], {'creator': 'request.tracer.user'}), '(creator=request.tracer.user)\n', (361, 390), False, 'from web import models\n'), ((415, 474), 'web.models.ProjectUser.objects.filter', 'models.ProjectUser.objects.filter', ([], {'user': 'request.tracer.user'}), '(user=request.tracer.user)\n', (448, 474), False, 'from web import models\n')]
|
from nmigen import *
from nmigen.cli import main
from nmigen.asserts import *
from enum import Enum, unique, IntEnum
# Two different types of "enums".
class Thing(object):
"""A fake enumerated value.
This isn't actually an Enum, but is just a bag of constants,
and not at all Pythonic.
"""
DOG = 0
CAT = 1
SQUIRREL = 2
@classmethod
def signal(cls):
"""Returns a Signal with the right number of bits for the Enum.
src_loc_at tells nMigen which stack frame to get the variable
name from, so the one above this one.
"""
return Signal.range(0, 3, src_loc_at=1)
@unique
class ConstingEnumThing(Enum):
"""A real Enum.
In general, we shouldn't care what the actual value of an enum
is. If you do, you probably don't want an enum but a Const.
We'll see later that the enum values don't matter. You can auto() them,
or even use strings.
"""
DOG = 0
CAT = 1
SQUIRREL = 2
@classmethod
def signal(cls):
"""Returns a Signal with the right number of bits for the Enum.
src_loc_at tells nMigen which stack frame to get the variable
name from, so the one above this one.
"""
return Signal.range(0, 3, src_loc_at=1)
@property
def const(self):
"""Returns a Const with the right number of bits for the Enum.
Although we use self.value, we'll see later on that we can
dispense with that.
"""
return Const(self.value, ConstingEnumThing.signal().shape())
class CatDetector(Elaboratable):
"""Uses Thing, which isn't an Enum.
This issues a warning because width(Thing.CAT) = 1, and
width(Thing.DOG) = 2.
"""
def __init__(self):
self.input = Thing.signal()
self.output = Signal()
def elaborate(self, platform):
m = Module()
self.detectCat(m, Thing.CAT)
return m
def detectCat(self, m, comparand):
with m.Switch(comparand):
with m.Case(Thing.DOG, Thing.SQUIRREL):
m.d.comb += self.output.eq(0)
with m.Default():
m.d.comb += self.output.eq(1)
# This is the way we want things to work:
#
# But, we get an error:
# TypeError: Object '<ConstingEnumThing.CAT: 1>' is not an nMigen value
#
# class EnumCatDetector(Elaboratable):
# def __init__(self):
# self.input = ConstingEnumThing.signal()
# self.output = Signal()
# def elaborate(self, platform):
# m = Module()
# self.detectCat(m, ConstingEnumThing.CAT)
# return m
# def detectCat(self, m, comparand):
# with m.Switch(comparand):
# with m.Case(ConstingEnumThing.DOG,
# ConstingEnumThing.SQUIRREL):
# m.d.comb += self.output.eq(0)
# with m.Default():
# m.d.comb += self.output.eq(1)
class EnumValueCatDetector(Elaboratable):
"""
We can try to fix the error by using .value on all the enum values. But
we run into the same width warning.
"""
def __init__(self):
self.input = ConstingEnumThing.signal()
self.output = Signal()
def elaborate(self, platform):
m = Module()
self.detectCat(m, ConstingEnumThing.CAT.value)
return m
def detectCat(self, m, comparand):
with m.Switch(comparand):
with m.Case(ConstingEnumThing.DOG.value,
ConstingEnumThing.SQUIRREL.value):
m.d.comb += self.output.eq(0)
with m.Default():
m.d.comb += self.output.eq(1)
# We can't use the Const version on all the enum values, because:
#
# File "/home/robertbaruch/.local/lib/python3.6/site-packages/nmigen/hdl/dsl.py", line 286, in Case
# switch_data["cases"][new_values] = self._statements
# TypeError: unhashable type: 'Const'
#
# class EnumConstCatDetector(Elaboratable):
# def __init__(self):
# self.input = ConstingEnumThing.signal()
# self.output = Signal()
# def elaborate(self, platform):
# m = Module()
# self.detectCat(m, ConstingEnumThing.CAT.const)
# return m
# def detectCat(self, m, comparand):
# with m.Switch(comparand):
# with m.Case(ConstingEnumThing.DOG.const, ConstingEnumThing.const):
# m.d.comb += self.output.eq(0)
# with m.Default():
# m.d.comb += self.output.eq(1)
class EnumConstCatDetector(Elaboratable):
"""
This is how you can use the Enum, but it's not great because
now you have to remember when to use .value and when to use .const.
And you still have to care about the actual numeric values of the Enum
values, which isn't great.
"""
def __init__(self):
self.input = ConstingEnumThing.signal()
self.output = Signal()
def elaborate(self, platform):
m = Module()
self.detectCat(m, ConstingEnumThing.CAT.const)
return m
def detectCat(self, m, comparand):
with m.Switch(comparand):
with m.Case(ConstingEnumThing.DOG.value,
ConstingEnumThing.SQUIRREL.value):
m.d.comb += self.output.eq(0)
with m.Default():
m.d.comb += self.output.eq(1)
def enumToConst(v):
"""Converts an Enum value to a Const of the correct size for the Enum.
"""
assert (isinstance(v, Enum))
s = Signal.range(0, len(type(v)))
return Const(enumToValue(v), s.shape())
def enumToValue(v):
"""Converts an Enum value to a Value of the correct size for the Enum.
Note that we do not care what the actual numeric values of the Enum
values are. All we care about is that they are unique, start from 0,
and monotonically increase by 1.
O(N), but there's no reason you can't memoize a hashtable.
"""
assert (isinstance(v, Enum))
return list(type(v)).index(v)
def enumToSignal(t):
"""Converts an Enum type to a Signal of the correct size for the Enum.
This could be useful if the Signal constructor accepts an Enum.
"""
assert (issubclass(t, Enum))
return Signal.range(0, len(t), src_loc_at=1)
class IdealEnumCatDetector(Elaboratable):
"""
This is the ideal, if we imagine that Switch accepts Enum values and calls
enumToConst() on them, Case accepts Enum values and calls enumToValue()
on them, and Signal() accepts Enum classes and calls enumToSignal() on them.
"""
def __init__(self):
self.input = enumToSignal(ConstingEnumThing)
self.output = Signal()
def elaborate(self, platform):
m = Module()
self.detectCat(m, ConstingEnumThing.CAT)
return m
def detectCat(self, m, comparand):
with m.Switch(enumToConst(comparand)):
with m.Case(
enumToValue(ConstingEnumThing.DOG),
enumToValue(ConstingEnumThing.SQUIRREL)):
m.d.comb += self.output.eq(0)
with m.Default():
m.d.comb += self.output.eq(1)
class OtherUses(Elaboratable):
def __init__(self):
self.input = enumToSignal(ConstingEnumThing)
self.output1 = Signal()
self.output2 = enumToSignal(ConstingEnumThing)
self.output3 = Signal()
def elaborate(self, platform):
m = Module()
with m.If(self.input == enumToValue(ConstingEnumThing.CAT)):
m.d.comb += self.output1.eq(1)
with m.Else():
m.d.comb += self.output1.eq(0)
# These should be disallowed, as should any math on enum values
# except equality/inequality comparison.
m.d.comb += self.output2.eq((self.input < 1) | (1 + self.input))
# matches should allow Enums.
m.d.comb += self.output3.eq(
self.input.matches(enumToValue(ConstingEnumThing.CAT)))
return m
class IntThing(IntEnum):
"""An enumerated value based on IntEnum.
"""
DOG = 0
CAT = 1
SQUIRREL = 2
@classmethod
def signal(cls):
"""Returns a Signal with the right number of bits for the Enum.
src_loc_at tells nMigen which stack frame to get the variable
name from, so the one above this one.
"""
return Signal.range(0, 3, src_loc_at=1)
@property
def const(self):
"""Returns a Const with the right number of bits for the Enum.
Although we use self.value, we'll see later on that we can
dispense with that.
"""
return Const(self.value, ConstingEnumThing.signal().shape())
class IntEnumCatDetector(Elaboratable):
"""CatDetector using IntEnum.
You can dispense with .value, but you must still use .const.
"""
def __init__(self):
self.input = IntThing.signal()
self.output = Signal()
def elaborate(self, platform):
m = Module()
self.detectCat(m, IntThing.CAT.const)
return m
def detectCat(self, m, comparand):
with m.Switch(comparand):
with m.Case(IntThing.DOG, IntThing.SQUIRREL):
m.d.comb += self.output.eq(0)
with m.Default():
m.d.comb += self.output.eq(1)
if __name__ == "__main__":
meow = CatDetector()
# meow2 = EnumCatDetector()
meow3 = EnumValueCatDetector()
meow4 = EnumConstCatDetector()
meow5 = IdealEnumCatDetector()
meow6 = OtherUses()
meow7 = IntEnumCatDetector()
m = Module()
m.submodules.meow = meow
# m.submodules.meow2 = meow2
m.submodules.meow3 = meow3
m.submodules.meow4 = meow4
m.submodules.meow5 = meow5
m.submodules.meow6 = meow6
m.submodules.meow7 = meow7
main(
m,
ports=[
meow.input,
meow.output,
# meow2.input, meow2.output,
meow3.input,
meow3.output,
meow4.input,
meow4.output,
meow5.input,
meow5.output,
meow6.input,
meow6.output1,
meow6.output2,
meow6.output3,
meow7.input,
meow7.output
])
|
[
"nmigen.cli.main"
] |
[((10042, 10258), 'nmigen.cli.main', 'main', (['m'], {'ports': '[meow.input, meow.output, meow3.input, meow3.output, meow4.input, meow4.\n output, meow5.input, meow5.output, meow6.input, meow6.output1, meow6.\n output2, meow6.output3, meow7.input, meow7.output]'}), '(m, ports=[meow.input, meow.output, meow3.input, meow3.output, meow4.\n input, meow4.output, meow5.input, meow5.output, meow6.input, meow6.\n output1, meow6.output2, meow6.output3, meow7.input, meow7.output])\n', (10046, 10258), False, 'from nmigen.cli import main\n')]
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
"""
The MetadataWizard(pymdwizard) software was developed by the
U.S. Geological Survey Fort Collins Science Center.
See: https://github.com/usgs/fort-pymdwizard for current project source code
See: https://usgs.github.io/fort-pymdwizard/ for current user documentation
See: https://github.com/usgs/fort-pymdwizard/tree/master/examples
for examples of use in other scripts
License: Creative Commons Attribution 4.0 International (CC BY 4.0)
http://creativecommons.org/licenses/by/4.0/
PURPOSE
------------------------------------------------------------------------------
The widget for the main metadata root item.
This is the container for an FGDC record without the application wrapper,
menu bar, etc.
SCRIPT DEPENDENCIES
------------------------------------------------------------------------------
This script is part of the pymdwizard package and is not intented to be
used independently. All pymdwizard package requirements are needed.
See imports section for external packages used in this script as well as
inter-package dependencies
U.S. GEOLOGICAL SURVEY DISCLAIMER
------------------------------------------------------------------------------
This software has been approved for release by the U.S. Geological Survey
(USGS). Although the software has been subjected to rigorous review,
the USGS reserves the right to update the software as needed pursuant to
further analysis and review. No warranty, expressed or implied, is made by
the USGS or the U.S. Government as to the functionality of the software and
related material nor shall the fact of release constitute any such warranty.
Furthermore, the software is released on condition that neither the USGS nor
the U.S. Government shall be held liable for any damages resulting from
its authorized or unauthorized use.
Any use of trade, product or firm names is for descriptive purposes only and
does not imply endorsement by the U.S. Geological Survey.
Although this information product, for the most part, is in the public domain,
it also contains copyrighted material as noted in the text. Permission to
reproduce copyrighted items for other than personal use must be secured from
the copyright owner.
------------------------------------------------------------------------------
"""
from lxml import etree
from PyQt5.QtGui import QPainter
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import QWidget
from PyQt5.QtCore import QTimeLine
from pymdwizard.core import utils
from pymdwizard.core import xml_utils
from pymdwizard.gui.wiz_widget import WizardWidget
from pymdwizard.gui.ui_files import UI_MetadataRoot
from pymdwizard.gui.IDInfo import IdInfo
from pymdwizard.gui.spatial_tab import SpatialTab
from pymdwizard.gui.EA import EA
from pymdwizard.gui.DataQuality import DataQuality
from pymdwizard.gui.metainfo import MetaInfo
from pymdwizard.gui.distinfo import DistInfo
class MetadataRoot(WizardWidget):
drag_label = "Metadata <metadata>"
acceptable_tags = ["abstract"]
ui_class = UI_MetadataRoot.Ui_metadata_root
def __init__(self, parent=None):
self.schema = "bdp"
super(self.__class__, self).__init__(parent=parent)
self.use_dataqual = True
self.use_spatial = True
self.use_eainfo = True
self.use_distinfo = True
def build_ui(self):
"""
Build and modify this widget's GUI
Returns
-------
None
"""
self.ui = self.ui_class()
self.ui.setupUi(self)
self.setup_dragdrop(self, enable=True)
self.idinfo = IdInfo(root_widget=self, parent=self)
self.ui.page_idinfo.layout().addWidget(self.idinfo)
self.dataqual = DataQuality()
self.ui.page_dataqual.layout().addWidget(self.dataqual)
self.spatial_tab = SpatialTab(root_widget=self)
self.ui.page_spatial.layout().addWidget(self.spatial_tab)
self.eainfo = EA()
self.ui.page_eainfo.layout().addWidget(self.eainfo)
self.metainfo = MetaInfo(root_widget=self)
self.ui.page_metainfo.layout().addWidget(self.metainfo)
self.distinfo = DistInfo(root_widget=self)
self.ui.page_distinfo.layout().addWidget(self.distinfo)
def connect_events(self):
"""
Connect the appropriate GUI components with the corresponding functions
Returns
-------
None
"""
self.ui.idinfo_button.pressed.connect(self.section_changed)
self.ui.dataquality_button.pressed.connect(self.section_changed)
self.ui.spatial_button.pressed.connect(self.section_changed)
self.ui.eainfo_button.pressed.connect(self.section_changed)
self.ui.distinfo_button.pressed.connect(self.section_changed)
self.ui.metainfo_button.pressed.connect(self.section_changed)
def section_changed(self):
"""
The event which switches the currently displayed main section
when a user clicks on one of the top level section header buttons.
Returns
-------
None
"""
button_name = self.sender().objectName()
index_lookup = {
"idinfo_button": 0,
"dataquality_button": 1,
"spatial_button": 2,
"eainfo_button": 3,
"distinfo_button": 4,
"metainfo_button": 5,
}
new_index = index_lookup[button_name]
self.switch_section(which_index=new_index)
def switch_section(self, which_index):
"""
sub funtion that does the actual switching, creating a fader widget,
etc.
Parameters
----------
which_index : int
The index of the section to display
Returns
-------
None
"""
if which_index == 0:
self.ui.idinfo_button.setChecked(True)
elif which_index == 1:
self.ui.dataquality_button.setChecked(True)
elif which_index == 2:
self.ui.spatial_button.setChecked(True)
elif which_index == 3:
self.ui.eainfo_button.setChecked(True)
elif which_index == 4:
self.ui.distinfo_button.setChecked(True)
elif which_index == 5:
self.ui.metainfo_button.setChecked(True)
old_widget = self.ui.fgdc_metadata.currentWidget()
new_widget = self.ui.fgdc_metadata.widget(which_index)
FaderWidget(old_widget, new_widget)
self.ui.fgdc_metadata.setCurrentIndex(which_index)
return new_widget
def switch_schema(self, schema):
"""
Switch the displayed schema between straight FGDC and BDP
Parameters
----------
schema : str
Returns
-------
"""
self.schema = schema
self.idinfo.switch_schema(schema)
self.spatial_tab.switch_schema(schema)
def use_section(self, which, value):
"""
enable or disable top optional top level sections
Parameters
----------
which : str
Which section to change: ['dataqual', 'spatial', 'ea',
'distinfo']
value : bool
Whether to enable (True) or disable (False)
Returns
-------
None
"""
if which == "dataqual":
self.use_dataqual = value
self.dataqual.setVisible(value)
if which == "spatial":
self.use_spatial = value
self.spatial_tab.setVisible(value)
if which == "eainfo":
self.use_eainfo = value
self.eainfo.setVisible(value)
if which == "distinfo":
self.use_distinfo = value
self.distinfo.setVisible(value)
def to_xml(self):
metadata_node = xml_utils.xml_node(tag="metadata")
idinfo = self.idinfo.to_xml()
metadata_node.append(idinfo)
if self.use_dataqual:
dataqual = self.dataqual.to_xml()
metadata_node.append(dataqual)
if self.spatial_tab.spdoinfo.has_content() and self.use_spatial:
spdoinfo = self.spatial_tab.spdoinfo.to_xml()
metadata_node.append(spdoinfo)
if self.spatial_tab.spref.has_content() and self.use_spatial:
spref = self.spatial_tab.spref.to_xml()
metadata_node.append(spref)
if self.eainfo.has_content() and self.use_eainfo:
eainfo = self.eainfo.to_xml()
metadata_node.append(eainfo)
if self.use_distinfo:
distinfo = self.distinfo.to_xml()
metadata_node.append(distinfo)
metainfo = self.metainfo.to_xml()
metadata_node.append(metainfo)
return metadata_node
def from_xml(self, metadata_element):
self.populate_section(metadata_element, "spdoinfo", self.spatial_tab.spdoinfo)
self.populate_section(metadata_element, "spref", self.spatial_tab.spref)
self.populate_section(metadata_element, "idinfo", self.idinfo)
self.populate_section(metadata_element, "dataqual", self.dataqual)
self.populate_section(metadata_element, "eainfo", self.eainfo)
self.populate_section(metadata_element, "distinfo", self.distinfo)
self.populate_section(metadata_element, "metainfo", self.metainfo)
def populate_section(self, metadata_element, section_name, widget):
"""
Since the content of top level sections might contain items that
need to go to separate top level items, this function handles the
divvying up of sub-content.
Parameters
----------
metadata_element : XML Element
section_name : Section tag to populate
widget : The section widget
Returns
-------
"""
just_this_one = type(metadata_element) == etree._Element
if just_this_one and metadata_element.tag == section_name:
section = metadata_element
elif just_this_one:
return True
else:
section = xml_utils.search_xpath(metadata_element, section_name)
if section is not None:
widget.from_xml(section)
elif not just_this_one:
widget.clear_widget()
class FaderWidget(QWidget):
"""
A QWidget that allows for fading in and out on display.
"""
def __init__(self, old_widget, new_widget):
QWidget.__init__(self, new_widget)
self.old_pixmap = QPixmap(new_widget.size())
old_widget.render(self.old_pixmap)
self.pixmap_opacity = 1.0
self.timeline = QTimeLine()
self.timeline.valueChanged.connect(self.animate)
self.timeline.finished.connect(self.close)
self.timeline.setDuration(450)
self.timeline.start()
self.resize(new_widget.size())
self.show()
def paintEvent(self, event):
painter = QPainter()
painter.begin(self)
painter.setOpacity(self.pixmap_opacity)
painter.drawPixmap(0, 0, self.old_pixmap)
painter.end()
def animate(self, value):
self.pixmap_opacity = 1.0 - value
self.repaint()
if __name__ == "__main__":
utils.launch_widget(MetadataRoot, "MetadataRoot testing")
|
[
"PyQt5.QtGui.QPainter",
"pymdwizard.gui.distinfo.DistInfo",
"PyQt5.QtCore.QTimeLine",
"pymdwizard.gui.DataQuality.DataQuality",
"pymdwizard.gui.IDInfo.IdInfo",
"pymdwizard.gui.spatial_tab.SpatialTab",
"pymdwizard.core.xml_utils.xml_node",
"pymdwizard.gui.EA.EA",
"pymdwizard.gui.metainfo.MetaInfo",
"PyQt5.QtWidgets.QWidget.__init__",
"pymdwizard.core.utils.launch_widget",
"pymdwizard.core.xml_utils.search_xpath"
] |
[((11272, 11329), 'pymdwizard.core.utils.launch_widget', 'utils.launch_widget', (['MetadataRoot', '"""MetadataRoot testing"""'], {}), "(MetadataRoot, 'MetadataRoot testing')\n", (11291, 11329), False, 'from pymdwizard.core import utils\n'), ((3654, 3691), 'pymdwizard.gui.IDInfo.IdInfo', 'IdInfo', ([], {'root_widget': 'self', 'parent': 'self'}), '(root_widget=self, parent=self)\n', (3660, 3691), False, 'from pymdwizard.gui.IDInfo import IdInfo\n'), ((3777, 3790), 'pymdwizard.gui.DataQuality.DataQuality', 'DataQuality', ([], {}), '()\n', (3788, 3790), False, 'from pymdwizard.gui.DataQuality import DataQuality\n'), ((3883, 3911), 'pymdwizard.gui.spatial_tab.SpatialTab', 'SpatialTab', ([], {'root_widget': 'self'}), '(root_widget=self)\n', (3893, 3911), False, 'from pymdwizard.gui.spatial_tab import SpatialTab\n'), ((4001, 4005), 'pymdwizard.gui.EA.EA', 'EA', ([], {}), '()\n', (4003, 4005), False, 'from pymdwizard.gui.EA import EA\n'), ((4091, 4117), 'pymdwizard.gui.metainfo.MetaInfo', 'MetaInfo', ([], {'root_widget': 'self'}), '(root_widget=self)\n', (4099, 4117), False, 'from pymdwizard.gui.metainfo import MetaInfo\n'), ((4207, 4233), 'pymdwizard.gui.distinfo.DistInfo', 'DistInfo', ([], {'root_widget': 'self'}), '(root_widget=self)\n', (4215, 4233), False, 'from pymdwizard.gui.distinfo import DistInfo\n'), ((7874, 7908), 'pymdwizard.core.xml_utils.xml_node', 'xml_utils.xml_node', ([], {'tag': '"""metadata"""'}), "(tag='metadata')\n", (7892, 7908), False, 'from pymdwizard.core import xml_utils\n'), ((10491, 10525), 'PyQt5.QtWidgets.QWidget.__init__', 'QWidget.__init__', (['self', 'new_widget'], {}), '(self, new_widget)\n', (10507, 10525), False, 'from PyQt5.QtWidgets import QWidget\n'), ((10682, 10693), 'PyQt5.QtCore.QTimeLine', 'QTimeLine', ([], {}), '()\n', (10691, 10693), False, 'from PyQt5.QtCore import QTimeLine\n'), ((10984, 10994), 'PyQt5.QtGui.QPainter', 'QPainter', ([], {}), '()\n', (10992, 10994), False, 'from PyQt5.QtGui import QPainter\n'), ((10136, 10190), 'pymdwizard.core.xml_utils.search_xpath', 'xml_utils.search_xpath', (['metadata_element', 'section_name'], {}), '(metadata_element, section_name)\n', (10158, 10190), False, 'from pymdwizard.core import xml_utils\n')]
|
import uuid
import pytest
from selenium.common.exceptions import TimeoutException
from skyportal.tests import api
def enter_comment_text(driver, comment_text):
comment_xpath = "//div[contains(@data-testid, 'individual-spectrum-id_')]//textarea[@name='text']"
comment_box = driver.wait_for_xpath(comment_xpath)
driver.click_xpath(comment_xpath)
comment_box.send_keys(comment_text)
def add_comment(driver, comment_text):
enter_comment_text(driver, comment_text)
driver.click_xpath(
"//div[contains(@data-testid, 'individual-spectrum-id_')]//*[@name='submitCommentButton']"
)
def add_comment_and_wait_for_display(driver, comment_text):
add_comment(driver, comment_text)
try:
driver.wait_for_xpath(f'//p[text()="{comment_text}"]', timeout=20)
except TimeoutException:
driver.refresh()
driver.wait_for_xpath(f'//p[text()="{comment_text}"]')
@pytest.mark.flaky(reruns=2)
def test_comments(driver, user, public_source):
driver.get(f"/become_user/{user.id}")
comment_text = str(uuid.uuid4())
# now test the Manage Data page
driver.get(f"/manage_data/{public_source.id}")
# little triangle you push to expand the table
driver.click_xpath("//*[@id='expandable-button']")
add_comment_and_wait_for_display(driver, comment_text)
# Make sure individual spectra comments appear on the Source page
driver.get(f"/source/{public_source.id}")
driver.wait_for_xpath(f'//p[contains(text(), "{comment_text}")]')
def test_annotations(
driver, user, annotation_token, upload_data_token, public_source, lris
):
driver.get(f"/become_user/{user.id}")
annotation_data = str(uuid.uuid4())
status, data = api(
'POST',
'spectrum',
data={
'obj_id': str(public_source.id),
'observed_at': '2021-11-02 12:00:00',
'instrument_id': lris.id,
'wavelengths': [664, 665, 666],
'fluxes': [234.2, 232.1, 235.3],
},
token=upload_data_token,
)
assert status == 200
assert data['status'] == 'success'
spectrum_id = data["data"]["id"]
status, data = api(
'POST',
f'spectra/{spectrum_id}/annotations',
data={
'origin': 'kowalski',
'data': {'useful_info': annotation_data},
},
token=annotation_token,
)
assert status == 200
# ----> now test the Manage Data page <----
driver.get(f"/manage_data/{public_source.id}")
# need to filter out only the new spectrum we've added
# open the filter menu
driver.click_xpath(
"//*[@data-testid='spectrum-div']//button[@data-testid='Filter Table-iconButton']"
)
# click the filter on ID button
driver.click_xpath("//div[@id='mui-component-select-id']", scroll_parent=True)
# choose the one we've added based on ID
driver.click_xpath(f"//li[@data-value='{spectrum_id}']", scroll_parent=True)
# close the filter menu
driver.click_xpath("//*[contains(@class, 'filterClose')]")
# push the little triangle to expand the table
driver.click_xpath("//*[@data-testid='spectrum-div']//*[@id='expandable-button']")
driver.wait_for_xpath(f'//div[text()="{annotation_data}"]')
# ----> now go to the source page <----
driver.get(f"/source/{public_source.id}")
driver.wait_for_xpath('//div[text()="Spectrum Obs. at"]')
# filter once more for only this spectrum
driver.click_xpath(
"//*[@id='annotations-content']//button[@data-testid='Filter Table-iconButton']"
)
# click the filter on ID button
driver.click_xpath(
"//div[@id='mui-component-select-observed_at']", scroll_parent=True
)
# choose the one we've added based on ID
driver.click_xpath("//li[@data-value='2021-11-02.5']", scroll_parent=True)
# close the filter menu
driver.click_xpath("//*[contains(@class, 'filterClose')]")
driver.wait_for_xpath('//div[text()="2021-11-02.5"]')
driver.wait_for_xpath(f'//div[text()="{annotation_data}"]')
|
[
"uuid.uuid4",
"skyportal.tests.api",
"pytest.mark.flaky"
] |
[((920, 947), 'pytest.mark.flaky', 'pytest.mark.flaky', ([], {'reruns': '(2)'}), '(reruns=2)\n', (937, 947), False, 'import pytest\n'), ((2173, 2326), 'skyportal.tests.api', 'api', (['"""POST"""', 'f"""spectra/{spectrum_id}/annotations"""'], {'data': "{'origin': 'kowalski', 'data': {'useful_info': annotation_data}}", 'token': 'annotation_token'}), "('POST', f'spectra/{spectrum_id}/annotations', data={'origin':\n 'kowalski', 'data': {'useful_info': annotation_data}}, token=\n annotation_token)\n", (2176, 2326), False, 'from skyportal.tests import api\n'), ((1063, 1075), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1073, 1075), False, 'import uuid\n'), ((1690, 1702), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1700, 1702), False, 'import uuid\n')]
|
import string
import pytest
from pysj import md5, sha1, sha256, uuid
from pysj.crypto import ALPHABET
@pytest.mark.parametrize(
"hash_algorithm, digest",
[
("sha256", "9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08"),
("sha1", "a94a8fe5ccb19ba61c4c0873d391e987982fbbd3"),
("md5", "098f6bcd4621d373cade4e832627b4f6"),
],
)
def test_hashing(hash_algorithm, digest):
assert eval(f"{hash_algorithm}('test')") == digest
def test_uuid_generation():
assert len(uuid()) == 36 and all([x in string.hexdigits + "-" for x in uuid()])
def test_uuid_alpha_generation():
assert 20 < len(uuid("alpha")) < 24 and all([x in ALPHABET for x in uuid("alpha")])
def test_uuid_int_generation():
assert isinstance(uuid("int"), int)
def test_force_mutable():
assert sha256([1, 2, 3], force_mutable=True) == sha256("[1, 2, 3]")
def test_force_mutable_error_when_flag_is_false():
with pytest.raises(AttributeError):
assert sha256([1, 2, 3])
|
[
"pytest.raises",
"pytest.mark.parametrize",
"pysj.uuid",
"pysj.sha256"
] |
[((107, 350), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""hash_algorithm, digest"""', "[('sha256',\n '9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08'), (\n 'sha1', 'a94a8fe5ccb19ba61c4c0873d391e987982fbbd3'), ('md5',\n '098f6bcd4621d373cade4e832627b4f6')]"], {}), "('hash_algorithm, digest', [('sha256',\n '9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08'), (\n 'sha1', 'a94a8fe5ccb19ba61c4c0873d391e987982fbbd3'), ('md5',\n '098f6bcd4621d373cade4e832627b4f6')])\n", (130, 350), False, 'import pytest\n'), ((771, 782), 'pysj.uuid', 'uuid', (['"""int"""'], {}), "('int')\n", (775, 782), False, 'from pysj import md5, sha1, sha256, uuid\n'), ((828, 865), 'pysj.sha256', 'sha256', (['[1, 2, 3]'], {'force_mutable': '(True)'}), '([1, 2, 3], force_mutable=True)\n', (834, 865), False, 'from pysj import md5, sha1, sha256, uuid\n'), ((869, 888), 'pysj.sha256', 'sha256', (['"""[1, 2, 3]"""'], {}), "('[1, 2, 3]')\n", (875, 888), False, 'from pysj import md5, sha1, sha256, uuid\n'), ((951, 980), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (964, 980), False, 'import pytest\n'), ((997, 1014), 'pysj.sha256', 'sha256', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1003, 1014), False, 'from pysj import md5, sha1, sha256, uuid\n'), ((522, 528), 'pysj.uuid', 'uuid', ([], {}), '()\n', (526, 528), False, 'from pysj import md5, sha1, sha256, uuid\n'), ((647, 660), 'pysj.uuid', 'uuid', (['"""alpha"""'], {}), "('alpha')\n", (651, 660), False, 'from pysj import md5, sha1, sha256, uuid\n'), ((582, 588), 'pysj.uuid', 'uuid', ([], {}), '()\n', (586, 588), False, 'from pysj import md5, sha1, sha256, uuid\n'), ((699, 712), 'pysj.uuid', 'uuid', (['"""alpha"""'], {}), "('alpha')\n", (703, 712), False, 'from pysj import md5, sha1, sha256, uuid\n')]
|
#!/usr/bin/env python3
"""Tetrix
This is a Qt6 version of the classic Tetrix game.
The object of the game is to stack pieces dropped from the top of the playing
area so that they fill entire rows at the bottom of the playing area.
When a row is filled, all the blocks on that row are removed, the player earns
a number of points, and the peices above are moved down to occupy that row. If
more that one row is filled, the blocks on each row are removed, and the player
earns extra points.
The LEFT cursors cursor key moves the current piece one space to the left. the
RIGHT cursor key moves it one space to the right, the UP cursor key rotates the
piece counter-clockwise by 90 degrees, and the DOWN cursor key rotates the piece
clockwise by 90 degrees.
To avoid waiting for a piece to fall to the bottom of the board, press D to
immediately move the piece down by one row, or press the SPACE key to drop
it as close to the bottom of the board as possible.
This example example shows how a simple game can be created using only three
classes.
- The *TetrixWindow* class is used to display the payer's score, number of
lives, and information about the next piece to appear.
- The *TetrixBoard* class contains the next game logic, handles keyboard input,
and displays the pieces on the playing area.
- The *TetrixPiece* class contains information about each piece.
In this approach, the *TetrixBoard* clss is the most complex class, since it
handle the game logic and rendering. One benefit of this is that the
*TetrixWindow* and *TeTrixPiece* class are very simple and contains only a
minimum of code.
Credit: Most of the code here is based on official PySide6 example of Tetrix
code.
Note: This code is used for learning Qt6 Programming.
"""
import random
from enum import IntEnum, auto
from unittest import result
from PySide6 import QtCore
from PySide6 import QtGui
from PySide6 import QtWidgets
class ShapeEnum(IntEnum):
NO_SHAPE = 0
Z_SHAPE = 1
S_SHAPE = 2
LINE_SHAPE = 3
T_SHAPE = 4
SQUARE_SHAPE = 5
L_SHAPE = 6
MIRRORED_L_SHAPE = 7
class TetrixWindow(QtWidgets.QWidget):
"""TetrixWindow."""
def __init__(self, root):
super(TetrixWindow, self).__init__()
self.board = TetrixBoard()
nxtPieceLabel = QtWidgets.QLabel()
nxtPieceLabel.setFrameStyle(
QtWidgets.QFrame.Box | QtWidgets.QFrame.Raised)
nxtPieceLabel.setAlignment(QtCore.Qt.AlignCenter)
self.board.setNextPieceLabel(nxtPieceLabel)
scoreLcd = QtWidgets.QLCDNumber(5)
scoreLcd.setSegmentStyle(QtWidgets.QLCDNumber.Filled)
levelLcd = QtWidgets.QLCDNumber(2)
levelLcd.setSegmentStyle(QtWidgets.QLCDNumber.Filled)
linesLcd = QtWidgets.QLCDNumber(5)
linesLcd.setSegmentStyle(QtWidgets.QLCDNumber.Filled)
startBtn = QtWidgets.QPushButton("&Start")
startBtn.setFocusPolicy(QtCore.Qt.NoFocus)
quitBtn = QtWidgets.QPushButton("&Quit")
quitBtn.setFocusPolicy(QtCore.Qt.NoFocus)
pauseBtn = QtWidgets.QPushButton("&Pause")
pauseBtn.setFocusPolicy(QtCore.Qt.NoFocus)
startBtn.clicked.connect(self.board.start)
pauseBtn.clicked.connect(self.board.pause)
quitBtn.clicked.connect(root.quit)
self.board.scoreChanged.connect(scoreLcd.display)
self.board.levelChanged.connect(levelLcd.display)
self.board.linesRemovedChanged.connect(linesLcd.display)
layout = QtWidgets.QGridLayout()
layout.addWidget(self.createLabel("NEXT"), 0, 0)
layout.addWidget(nxtPieceLabel, 1, 0)
layout.addWidget(self.createLabel("LEVEL"), 2, 0)
layout.addWidget(levelLcd, 3, 0)
layout.addWidget(startBtn, 4, 0)
layout.addWidget(self.board, 0, 1, 6, 1)
layout.addWidget(self.createLabel("SCORE"), 0, 2)
layout.addWidget(scoreLcd, 1, 2)
layout.addWidget(self.createLabel("LINES REMOVED"), 2, 2)
layout.addWidget(linesLcd, 3, 2)
layout.addWidget(quitBtn, 4, 2)
layout.addWidget(pauseBtn, 5, 2)
self.setLayout(layout)
self.setWindowTitle("Tetrix")
self.resize(550, 370)
def createLabel(self, text):
label = QtWidgets.QLabel(text)
label.setAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignBottom)
return label
class TetrixBoard(QtWidgets.QFrame):
"""TetrixBoard."""
BOARD_WIDTH = 10
BOARD_HEIGHT = 22
scoreChanged = QtCore.Signal(int)
levelChanged = QtCore.Signal(int)
linesRemovedChanged = QtCore.Signal(int)
def __init__(self, parent=None):
super(TetrixBoard, self).__init__(parent)
self.timer = QtCore.QBasicTimer()
self.nxtPieceLabel = None
self.isWaitingAfterLine = False
self.curPiece = TetrixPiece()
self.nxtPiece = TetrixPiece()
self.curx = 0
self.cury = 0
self.numLinesRemoved = 0
self.numPiecesDropped = 0
self.score = 0
self.level = 0
self.board = None
self.setFrameStyle(QtWidgets.QFrame.Panel | QtWidgets.QFrame.Sunken)
self.setFocusPolicy(QtCore.Qt.StrongFocus)
self.isStarted = False
self.isPaused = False
self.clearBoard()
self.nxtPiece.setRandomShape()
def shapeAt(self, x, y):
return self.board[(y * TetrixBoard.BOARD_WIDTH) + x]
def setShapeAt(self, x, y, shape: ShapeEnum):
self.board[(y * TetrixBoard.BOARD_WIDTH) + x] = shape
def timeoutTime(self):
return 1000 / (1 + self.level)
def squareWidth(self):
return self.contentsRect().width() / TetrixBoard.BOARD_WIDTH
def squareHeight(self):
return self.contentsRect().height() / TetrixBoard.BOARD_HEIGHT
def setNextPieceLabel(self, label):
self.nxtPieceLabel = label
def sizeHint(self):
return QtCore.QSize(TetrixBoard.BOARD_WIDTH * 15 + self.frameWidth()*2,
TetrixBoard.BOARD_HEIGHT*15 + self.frameWidth()*2)
def minimumSizeHint(self):
return QtCore.QSize(TetrixBoard.BOARD_WIDTH*15 + self.frameWidth()*2,
TetrixBoard.BOARD_WIDTH * 5 + self.frameWidth()*2)
def start(self):
if self.isPaused:
return
self.isStarted = True
self.isWaitingAfterLine = False
self.numLinesRemoved = 0
self.numPiecesDropped = 0
self.score = 0
self.level = 1
self.clearBoard()
self.linesRemovedChanged.emit(self.numLinesRemoved)
self.scoreChanged.emit(self.score)
self.levelChanged.emit(self.level)
self.newPiece()
self.timer.start(self.timeoutTime(), self)
def pause(self):
if not self.isStarted:
return
self.isPaused = not self.isPaused
if self.isPaused:
self.timer.stop()
else:
self.timer.start(self.timeoutTime(), self)
self.update()
def paintEvent(self, event):
super(TetrixBoard, self).paintEvent(event)
painter = QtGui.QPainter(self)
rect = self.contentsRect()
if self.isPaused:
painter.drawText(rect, QtCore.Qt.AlignCenter, "Pause")
return
boardTop = rect.bottom() - TetrixBoard.BOARD_HEIGHT*self.squareHeight()
for i in range(TetrixBoard.BOARD_HEIGHT):
for j in range(TetrixBoard.BOARD_WIDTH):
shape = self.shapeAt(j, TetrixBoard.BOARD_HEIGHT-i-1)
if shape != ShapeEnum.NO_SHAPE:
self.drawSquare(
painter, rect().left() + j*self.squareWidth(),
boardTop+i*self.squareHeight(), shape)
if self.curPiece.shape() != ShapeEnum.NO_SHAPE:
for i in range(4):
x = self.curx + self.curPiece.xcoord(i)
y = self.cury - self.curPiece.ycoord(i)
self.drawSquare(painter, rect.left() + x *self.squareWidth(),
boardTop+(TetrixBoard.BOARD_HEIGHT-y-1)*self.squareHeight(),
self.curPiece.shape())
def keyPressEvent(self, event):
if (not self.isStarted or self.isPaused or
self.curPiece.shape() == ShapeEnum.NO_SHAPE):
super(TetrixBoard, self).keyPressEvent(event)
return
key = event.key()
if key == QtCore.Qt.Key_Left:
self.tryMove(self.curPiece, self.curx-1, self.cury)
elif key == QtCore.Qt.Key_Right:
self.tryMove(self.curPiece, self.curx+1, self.cury)
elif key == QtCore.Qt.Key_Down:
self.tryMove(self.curPiece.rotatedRight(), self.curx, self.cury)
elif key == QtCore.Qt.Key_Up:
self.tryMove(self.curPiece.rotatedLeft(), self.curx, self.cury)
elif key == QtCore.Qt.Key_Space:
self.dropDown()
elif key == QtCore.Qt.Key_D:
self.oneLineDown()
else:
super(TetrixBoard, self).keyPressEvent(event)
def timerEvent(self, event):
if event.timerId() == self.timer.timerId():
if self.isWaitingAfterLine:
self.isWaitingAfterLine = False
self.newPiece()
self.timer.start(self.timeoutTime(), self)
else:
self.oneLineDown()
else:
super(TetrixBoard, self).timerEvent(event)
def clearBoard(self):
self.board = [ShapeEnum.NO_SHAPE for i in
range(TetrixBoard.BOARD_HEIGHT*TetrixBoard.BOARD_WIDTH)]
def dropDown(self):
dropHeight = 0
newy = self.cury
while newy:
if not self.tryMove(self.curPiece, self.cury, newy - 1):
break
newy -= 1
dropHeight += 1
self.pieceDropped(dropHeight)
def oneLineDown(self):
if not self.tryMove(self.curPiece, self.curx, self.cury-1):
self.pieceDropped(0)
def pieceDropped(self, dropHeight):
for i in range(4):
x = self.curx + self.curPiece.xcoord(i)
y = self.cury - self.curPiece.ycoord(i)
self.setShapeAt(x, y, self.curPiece.shape())
self.numPiecesDropped += 1
if self.numPiecesDropped % 25 == 0:
self.level += 1
self.timer.start(self.timeoutTime(), self)
self.levelChanged.emit(self.level)
self.score += dropHeight + 7
self.scoreChanged.emit(self.score)
self.removeFullLines()
def removeFullLines(self):
numFullLines = 0
for i in range(TetrixBoard.BOARD_HEIGHT -1, -1, -1):
lineIsFull = True
for j in range(TetrixBoard.BOARD_WIDTH):
if self.shapeAt(j, i) == ShapeEnum.NO_SHAPE:
lineIsFull = False
break
if lineIsFull:
numFullLines += 1
for k in range(TetrixBoard.BOARD_HEIGHT-1):
for j in range(TetrixBoard.BOARD_WIDTH):
self.setShapeAt(j, k, self.shapeAt(j, k+1))
for j in range(TetrixBoard.BOARD_WIDTH):
self.setShapeAt(j, TetrixBoard.BOARD_HEIGHT-1,
ShapeEnum.NO_SHAPE)
if numFullLines > 0:
self.numLinesRemoved += numFullLines
self.score += 10 * numFullLines
self.linesRemovedChanged.emit(self.numLinesRemoved)
self.scoreChanged.emit(self.score)
self.timer.start(500, self)
self.isWaitingAfterLine = True
self.curPiece.setShape(ShapeEnum.NO_SHAPE)
self.update()
def newPiece(self):
self.curPiece = self.nxtPiece
self.nxtPiece.setRandomShape()
self.showNextPiece()
self.curx = (TetrixBoard.BOARD_WIDTH // 2) + 1
self.cury = TetrixBoard.BOARD_HEIGHT - 1 + self.curPiece.ymin()
if not self.tryMove(self.curPiece, self.curx, self.cury):
self.curPiece.setShape(ShapeEnum.NO_SHAPE)
self.timer.stop()
self.isStarted = False
def showNextPiece(self):
if self.nxtPieceLabel is not None:
return
dx = self.nxtPiece.xmax() - self.nxtPiece.xmin() + 1
dy = self.nxtPiece.ymax() - self.nxtPiece.ymin() + 1
pixmap = QtGui.QPixmap(
dx*self.squareWidth(), dy*self.squareHeight())
painter = QtGui.QPainter(pixmap)
painter.fillRect(
pixmap.rect(), self.nxtPieceLabel.palette().background())
for i in range(4):
x = self.nxtPiece.xcoord(i) - self.nxtPiece.xmin()
y = self.nxtPiece.ycoord(i) - self.nxtPiece.ymin()
self.drawSquare(painter, x*self.squareWidth(),
y*self.squareHeight(), self.nxtPiece.shape())
self.nxtPieceLabel.setPixmap(pixmap) # FIXME
def tryMove(self, newPiece, newX, newY):
for i in range(4):
x = newX + newPiece.xcoord(i)
y = newY - newPiece.ycoord(i)
if (x < 0 or x >= TetrixBoard.BOARD_WIDTH or y < 0 or
y >= TetrixBoard.BOARD_HEIGHT):
return False
if self.shapeAt(x, y) != ShapeEnum.NO_SHAPE:
return False
self.curPiece = newPiece
self.curx = newX
self.cury = newY
self.update()
return True
def drawSquare(self, painter: QtGui.QPainter, x, y, shape: ShapeEnum):
COLOR_TABLE = [
0x000000, 0xCC6666, 0x66CC66, 0x6666CC,
0xCCCC66, 0xCC66CC, 0x66CCCC, 0xDAAA00
]
color = QtGui.QColor(COLOR_TABLE[shape.value])
painter.fillRect(x+1, y+1, self.squareWidth()-2,
self.squareHeight()-2, color)
painter.setPen(color.lighter())
painter.drawLine(x, y+self.squareHeight()-1, x, y)
painter.drawLine(x, y, x+self.squareWidth()-1, y)
painter.setPen(color.darker())
painter.drawLine(x+1, y+self.squareHeight()-1,
x+self.squareWidth()-1, y+self.squareHeight()-1)
painter.drawLine(x+self.squareWidth()-1, y+self.squareHeight()-1,
x+self.squareWidth()-1, y+1)
class TetrixPiece:
COORDS_TABLES = (
((0, 0), (0, 0), (0, 0), ( 0, 0)),
((0, -1), (0, 0), (-1, 0), (-1, 1)),
((0, -1), (0, 0), ( 1, 0), ( 1, 1)),
((0, -1), (0, 0), ( 0, 1), ( 0, 2)),
((-1, 0), (0, 0), ( 1, 0), ( 0, 1)),
(( 0, 0), (1, 0), ( 0, 1), ( 1, 1)),
((-1, -1), (0, -1), ( 0, 0), ( 0, 1)),
(( 1, -1), (0, -1), ( 0, 0), ( 0, 1))
)
def __init__(self):
self.coords = [[0, 0] for _ in range(4)]
self.pieceShape = ShapeEnum.NO_SHAPE
self.setShape(ShapeEnum.NO_SHAPE)
def shape(self):
return self.pieceShape
def setShape(self, shape: ShapeEnum):
table = TetrixPiece.COORDS_TABLES[shape.value]
for i in range(4):
for j in range(2):
self.coords[i][j] = table[i][j]
self.pieceShape = shape
def setRandomShape(self):
SHAPES = [
ShapeEnum.L_SHAPE, ShapeEnum.LINE_SHAPE, ShapeEnum.MIRRORED_L_SHAPE,
ShapeEnum.S_SHAPE, ShapeEnum.SQUARE_SHAPE, ShapeEnum.T_SHAPE,
ShapeEnum.Z_SHAPE
]
shape = random.choice(SHAPES)
self.setShape(shape)
def xcoord(self, index):
return self.coords[index][0]
def ycoord(self, index):
return self.coords[index][1]
def setXCoord(self, index, x):
self.coords[index][0] = x
def setYCoord(self, index, y):
self.coords[index][1] = y
def xmin(self):
vmin = self.coords[0][0]
for i in range(4):
vmin = min(vmin, self.coords[i][0])
return vmin
def xmax(self):
vmax = self.coords[0][0]
for i in range(4):
vmax = max(vmax, self.coords[i][0])
return vmax
def ymin(self):
vmin = self.coords[0][1]
for i in range(4):
vmin = min(vmin, self.coords[i][1])
return vmin
def ymax(self):
vmax = self.coords[0][1]
for i in range(4):
vmax = max(vmax, self.coords[i][1])
return vmax
def rotatedLeft(self):
if self.pieceShape == ShapeEnum.SQUARE_SHAPE:
return self
result = TetrixPiece()
result.pieceShape = self.pieceShape
for i in range(4):
result.setXCoord(i, self.ycoord(i))
result.setYCoord(i, -self.xcoord(i))
return result
def rotatedRight(self):
if self.pieceShape == ShapeEnum.SQUARE_SHAPE:
return self
result = TetrixPiece()
result.pieceShape = self.pieceShape
for i in range(4):
result.setXCoord(i, -self.ycoord(i))
result.setYCoord(i, self.xcoord(i))
return result
if __name__ == "__main__":
import sys
import time
app = QtWidgets.QApplication(sys.argv)
tetrix = TetrixWindow()
tetrix.show()
random.seed(time.time())
sys.exit(app.exec())
|
[
"PySide6.QtGui.QPainter",
"PySide6.QtCore.QBasicTimer",
"PySide6.QtGui.QColor",
"random.choice",
"PySide6.QtWidgets.QPushButton",
"PySide6.QtWidgets.QLabel",
"time.time",
"PySide6.QtWidgets.QApplication",
"PySide6.QtCore.Signal",
"PySide6.QtWidgets.QGridLayout",
"PySide6.QtWidgets.QLCDNumber"
] |
[((4476, 4494), 'PySide6.QtCore.Signal', 'QtCore.Signal', (['int'], {}), '(int)\n', (4489, 4494), False, 'from PySide6 import QtCore\n'), ((4514, 4532), 'PySide6.QtCore.Signal', 'QtCore.Signal', (['int'], {}), '(int)\n', (4527, 4532), False, 'from PySide6 import QtCore\n'), ((4559, 4577), 'PySide6.QtCore.Signal', 'QtCore.Signal', (['int'], {}), '(int)\n', (4572, 4577), False, 'from PySide6 import QtCore\n'), ((16948, 16980), 'PySide6.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (16970, 16980), False, 'from PySide6 import QtWidgets\n'), ((2289, 2307), 'PySide6.QtWidgets.QLabel', 'QtWidgets.QLabel', ([], {}), '()\n', (2305, 2307), False, 'from PySide6 import QtWidgets\n'), ((2535, 2558), 'PySide6.QtWidgets.QLCDNumber', 'QtWidgets.QLCDNumber', (['(5)'], {}), '(5)\n', (2555, 2558), False, 'from PySide6 import QtWidgets\n'), ((2640, 2663), 'PySide6.QtWidgets.QLCDNumber', 'QtWidgets.QLCDNumber', (['(2)'], {}), '(2)\n', (2660, 2663), False, 'from PySide6 import QtWidgets\n'), ((2745, 2768), 'PySide6.QtWidgets.QLCDNumber', 'QtWidgets.QLCDNumber', (['(5)'], {}), '(5)\n', (2765, 2768), False, 'from PySide6 import QtWidgets\n'), ((2851, 2882), 'PySide6.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""&Start"""'], {}), "('&Start')\n", (2872, 2882), False, 'from PySide6 import QtWidgets\n'), ((2952, 2982), 'PySide6.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""&Quit"""'], {}), "('&Quit')\n", (2973, 2982), False, 'from PySide6 import QtWidgets\n'), ((3052, 3083), 'PySide6.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""&Pause"""'], {}), "('&Pause')\n", (3073, 3083), False, 'from PySide6 import QtWidgets\n'), ((3480, 3503), 'PySide6.QtWidgets.QGridLayout', 'QtWidgets.QGridLayout', ([], {}), '()\n', (3501, 3503), False, 'from PySide6 import QtWidgets\n'), ((4233, 4255), 'PySide6.QtWidgets.QLabel', 'QtWidgets.QLabel', (['text'], {}), '(text)\n', (4249, 4255), False, 'from PySide6 import QtWidgets\n'), ((4688, 4708), 'PySide6.QtCore.QBasicTimer', 'QtCore.QBasicTimer', ([], {}), '()\n', (4706, 4708), False, 'from PySide6 import QtCore\n'), ((7050, 7070), 'PySide6.QtGui.QPainter', 'QtGui.QPainter', (['self'], {}), '(self)\n', (7064, 7070), False, 'from PySide6 import QtGui\n'), ((12404, 12426), 'PySide6.QtGui.QPainter', 'QtGui.QPainter', (['pixmap'], {}), '(pixmap)\n', (12418, 12426), False, 'from PySide6 import QtGui\n'), ((13596, 13634), 'PySide6.QtGui.QColor', 'QtGui.QColor', (['COLOR_TABLE[shape.value]'], {}), '(COLOR_TABLE[shape.value])\n', (13608, 13634), False, 'from PySide6 import QtGui\n'), ((15299, 15320), 'random.choice', 'random.choice', (['SHAPES'], {}), '(SHAPES)\n', (15312, 15320), False, 'import random\n'), ((17043, 17054), 'time.time', 'time.time', ([], {}), '()\n', (17052, 17054), False, 'import time\n')]
|
import csv
from baseline.base_utils import INTENTION_TAGS_WITH_SPACE
''' Convert .tsv to .csv without header for each intent
Format:
example1 intent1
example2 intent1
...
exampleN intentM
'''
dataset_arr = ['chatbot', 'snips']
dataset_fullname_arr = ['ChatbotCorpus', 'snips']
for tts_stt in ["gtts_witai", "macsay_witai"]:
for dataset, dataset_fullname in zip(dataset_arr, dataset_fullname_arr):
tags = INTENTION_TAGS_WITH_SPACE[dataset_fullname]
for type in ['test', 'train']:
# Data dir path
data_dir_path = "../../data/stterror_data/"
data_dir_path += "{}/{}/{}.tsv".format(dataset.lower(), tts_stt, type)
tsv_file = open(data_dir_path)
reader = csv.reader(tsv_file, delimiter='\t')
# Write csv
results_dir_path = data_dir_path.split('.tsv')[0] + "_semantic_hashing.csv"
file_test = open(results_dir_path, 'wt')
dict_writer = csv.writer(file_test, delimiter='\t')
row_count = 0
sentences, intents = [], []
for row in reader:
if row_count != 0:
dict_writer.writerow([row[0], tags[row[1]]])
row_count += 1
|
[
"csv.reader",
"csv.writer"
] |
[((735, 771), 'csv.reader', 'csv.reader', (['tsv_file'], {'delimiter': '"""\t"""'}), "(tsv_file, delimiter='\\t')\n", (745, 771), False, 'import csv\n'), ((964, 1001), 'csv.writer', 'csv.writer', (['file_test'], {'delimiter': '"""\t"""'}), "(file_test, delimiter='\\t')\n", (974, 1001), False, 'import csv\n')]
|
# Generated by Django 3.0.6 on 2020-05-17 00:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shelf', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='clipping',
name='kind_of_clipping',
field=models.CharField(choices=[('Highlight', 'Highlight'), ('Note', 'Note'), ('Bookmark', 'Bookmark')], default='Highlight', max_length=10),
),
migrations.AlterField(
model_name='clipping',
name='message',
field=models.TextField(),
),
]
|
[
"django.db.models.CharField",
"django.db.models.TextField"
] |
[((335, 474), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('Highlight', 'Highlight'), ('Note', 'Note'), ('Bookmark', 'Bookmark')]", 'default': '"""Highlight"""', 'max_length': '(10)'}), "(choices=[('Highlight', 'Highlight'), ('Note', 'Note'), (\n 'Bookmark', 'Bookmark')], default='Highlight', max_length=10)\n", (351, 474), False, 'from django.db import migrations, models\n'), ((594, 612), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (610, 612), False, 'from django.db import migrations, models\n')]
|
"""
Peasauce - interactive disassembler
Copyright (C) 2012-2017 <NAME>
Licensed using the MIT license.
This file is intended to allow compilation of assembler instructions and
from the compiled binary, the ability to isolate the machine code those
instructions produce.
The targeted assembler is vasm, whose home page is located here:
http://sun.hasenbraten.de/vasm/index.php
As the author uses Windows it is the only platform currently supported.
Anyone wanting to add support for their own platform will need to do the
following steps:
1. Compile vasm for a combination of cpu and syntax.
On Windows with Visual Studio 2015 installed, this is done with the
following steps:
a) Download and extract the vasm source code from the link above.
b) Open a developer command prompt for Visual Studio.
c) In the command window, enter the vasm source code directory.
d) Type a variant of 'nmake -f Makefile.Win32 CPU=m68k SYNTAX=mot'.
e) Observe 'vasm<cpu>_<syntax>_win32.exe' now exists, where <cpu>
and <syntax> are whatever you specified to 'nmake'.
2. At this point you will have an executable. As illustrated in the
previous step, on Windows for the m68k cpu and Motorola syntax,
this will be named 'vasmm68k_mot_win32.exe'. This module currently
only knows to look in a particular directory for files with a name
matching 'vasm*_win32.exe'.
To add support for your platform:
a) Modify FILE_NAME_PREFIX and FILE_NAME_SUFFIX so that only vasm
executables matching your file name will be found.
b) Add some code to look for commands in your path matching the
resulting pattern - maybe call some shell command to do this.
c) Process the results the same way Windows does.
3. Commit the code to your github fork of the project and do a pull
request to the author.
"""
import glob
import logging
import os
import subprocess
import sys
import StringIO
import tempfile
from . import constants
logger = logging.getLogger("tool-assembler-vasm")
def get_top_level_path():
# path of the python script being run.
#path = sys.path[0]
##if not len(path):
path = os.getcwd()
return path
class BaseAssembler(object):
_cpu_id = constants.CPU_UNKNOWN
_syntax_id = constants.ASM_SYNTAX_UNKNOWN
_output_format_id = constants.OUTPUT_FORMAT_UNKNOWN
_option_names = None
_supported_cpus = None
def set_cpu(self, cpu_id):
self._cpu_id = cpu_id
def set_syntax(self, syntax_id):
self._syntax_id = syntax_id
def set_output_format(self, output_format_id):
_lookup_option_value(constants.OPTIONS_FILE_OUTPUT, output_format_id, check=True)
self._output_format_id = output_format_id
def _lookup_option_value(self, key_id, value_id, check=False):
try:
return self._option_names[key_id][value_id]
except KeyError:
if check:
return None
raise
def compile_text(self, text, cpu_id, syntax_id):
""" Take assembly language instructions and return the corresponding machine code. """
assembler_path = self._supported_cpus.get((cpu_id, syntax_id), None)
if assembler_path is None:
cpu_name = constants.get_cpu_name_by_id(cpu_id)
syntax_name = constants.get_syntax_name_by_id(syntax_id)
logger.error("cpu %s and syntax %s not unsupported", cpu_name, syntax_name)
return
# Work out the output file path.
output_path = tempfile.gettempdir()
output_file_name = self._option_names[constants.OPTIONS_STANDARD][constants.OPTION_DEFAULT_FILE_NAME]
output_file_path = os.path.join(output_path, output_file_name)
# Create a temporary file for the text to be assembled.
input_file_path = tempfile.mktemp()
input_file = open(input_file_path, "w")
input_file.write(text)
input_file.close()
LOG_STDOUT = True
LOG_STDERR = True
stdout = tempfile.NamedTemporaryFile()
stderr = tempfile.NamedTemporaryFile()
current_path = os.getcwd()
try:
input_file_path = input_file.name
os.chdir(output_path)
if os.path.exists(output_file_path):
os.remove(output_file_path)
call_args = [ assembler_path, input_file_path ]
option_list = [
(constants.OPTIONS_STANDARD, constants.OPTION_DISABLE_OPTIMISATIONS),
(constants.OPTIONS_FILE_OUTPUT, constants.OUTPUT_FORMAT_BINARY),
(constants.OPTIONS_CPU, cpu_id),
]
for k1, k2 in option_list:
flag_string = self._lookup_option_value(k1, k2, check=True)
if flag_string is not None:
call_args.append(flag_string)
logger.debug("command line arguments: %s", " ".join(call_args[2:]))
result = subprocess.call(call_args, stdout=stdout, stderr=stderr)
if LOG_STDOUT:
stdout.flush()
stdout.seek(0, os.SEEK_SET)
stdout_text = stdout.read()
if result == 0:
ret = open(output_file_path, "rb").read()
logger.debug("success: binary file of size %d bytes", len(ret))
return ret
else:
if LOG_STDERR:
stderr.flush()
stderr.seek(0, os.SEEK_SET)
stderr_text = stderr.read()
logger.error("assembler failure: standard error contents follow")
lines = [ line for line in stderr_text.split(os.linesep) if len(line) ]
for line in lines:
logger.error("assembler failure: %s", line)
else:
logger.error("assembler returned failure result")
finally:
stdout.close()
stderr.close()
os.remove(input_file_path)
os.chdir(current_path)
LOCAL_BINARIES_NAME = "local_binaries"
class Assembler(BaseAssembler):
_option_names = {
constants.OPTIONS_STANDARD: {
constants.OPTION_DISABLE_OPTIMISATIONS: "-no-opt",
constants.OPTION_DEFAULT_FILE_NAME: "a.out",
},
constants.OPTIONS_CPU: {
constants.CPU_MC60000: "-m68000",
constants.CPU_MC60010: "-m68010",
constants.CPU_MC60020: "-m68020",
constants.CPU_MC60030: "-m68030",
constants.CPU_MC60040: "-m68040",
constants.CPU_MC60060: "-m68060",
},
constants.OPTIONS_FILE_OUTPUT: {
constants.OUTPUT_FORMAT_BINARY: "-Fbin",
constants.OUTPUT_FORMAT_ATARIST_TOS: "-Ftos",
constants.OUTPUT_FORMAT_AMIGA_HUNK: "-Fhunk",
},
}
""" These will be populated from the naming of located executables. """
_supported_cpus = {
}
FILE_NAME_PREFIX = "vasm"
FILE_NAME_SUFFIX = "_win32.exe"
def __init__(self):
if os.name != "nt":
logger.warning("vasm only supported on Windows (pull requests accepted)")
return
# A top-level directory in the .
path = get_top_level_path()
local_binaries_path = os.path.join(path, LOCAL_BINARIES_NAME)
if not os.path.exists(local_binaries_path):
logger.warning("Top-level '%s' directory missing (place vasm binaries here)", LOCAL_BINARIES_NAME)
return
pattern_prefix = self.FILE_NAME_PREFIX
# TODO: Handle suffixes for other platforms.
pattern_suffix = self.FILE_NAME_SUFFIX
match_pattern = os.path.join(local_binaries_path, pattern_prefix +"*"+ pattern_suffix)
matches = glob.glob(match_pattern)
if not len(matches):
logger.warning("Unable to locate vasm executables (place vasm binaries in top-level '%s' directory", LOCAL_BINARIES_NAME)
return
for matched_file_path in matches:
matched_dir_path, matched_file_name = os.path.split(matched_file_path)
unique_substring = matched_file_name[len(pattern_prefix):-len(pattern_suffix)]
cpu_name, syntax_name = unique_substring.split("_")
syntax_id = None
if syntax_name == "mot":
syntax_id = constants.ASM_SYNTAX_MOTOROLA
else:
logger.warning("vasm executable '%s' has unknown syntax, skipping..", matched_file_path)
continue
if cpu_name == "m68k" and syntax_id is not None:
self._supported_cpus[(constants.CPU_MC60000, syntax_id)] = matched_file_path
self._supported_cpus[(constants.CPU_MC60010, syntax_id)] = matched_file_path
self._supported_cpus[(constants.CPU_MC60020, syntax_id)] = matched_file_path
self._supported_cpus[(constants.CPU_MC60030, syntax_id)] = matched_file_path
self._supported_cpus[(constants.CPU_MC60040, syntax_id)] = matched_file_path
self._supported_cpus[(constants.CPU_MC60060, syntax_id)] = matched_file_path
logger.debug("Detected %d supported cpu(s) for vasm assembler", len(self._supported_cpus))
|
[
"tempfile.NamedTemporaryFile",
"os.remove",
"os.path.join",
"os.getcwd",
"tempfile.gettempdir",
"os.path.exists",
"subprocess.call",
"glob.glob",
"os.path.split",
"tempfile.mktemp",
"os.chdir",
"logging.getLogger"
] |
[((2201, 2241), 'logging.getLogger', 'logging.getLogger', (['"""tool-assembler-vasm"""'], {}), "('tool-assembler-vasm')\n", (2218, 2241), False, 'import logging\n'), ((2372, 2383), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2381, 2383), False, 'import os\n'), ((3741, 3762), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (3760, 3762), False, 'import tempfile\n'), ((3900, 3943), 'os.path.join', 'os.path.join', (['output_path', 'output_file_name'], {}), '(output_path, output_file_name)\n', (3912, 3943), False, 'import os\n'), ((4035, 4052), 'tempfile.mktemp', 'tempfile.mktemp', ([], {}), '()\n', (4050, 4052), False, 'import tempfile\n'), ((4230, 4259), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (4257, 4259), False, 'import tempfile\n'), ((4277, 4306), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (4304, 4306), False, 'import tempfile\n'), ((4331, 4342), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4340, 4342), False, 'import os\n'), ((7514, 7553), 'os.path.join', 'os.path.join', (['path', 'LOCAL_BINARIES_NAME'], {}), '(path, LOCAL_BINARIES_NAME)\n', (7526, 7553), False, 'import os\n'), ((7908, 7980), 'os.path.join', 'os.path.join', (['local_binaries_path', "(pattern_prefix + '*' + pattern_suffix)"], {}), "(local_binaries_path, pattern_prefix + '*' + pattern_suffix)\n", (7920, 7980), False, 'import os\n'), ((7997, 8021), 'glob.glob', 'glob.glob', (['match_pattern'], {}), '(match_pattern)\n', (8006, 8021), False, 'import glob\n'), ((4415, 4436), 'os.chdir', 'os.chdir', (['output_path'], {}), '(output_path)\n', (4423, 4436), False, 'import os\n'), ((4452, 4484), 'os.path.exists', 'os.path.exists', (['output_file_path'], {}), '(output_file_path)\n', (4466, 4484), False, 'import os\n'), ((5161, 5217), 'subprocess.call', 'subprocess.call', (['call_args'], {'stdout': 'stdout', 'stderr': 'stderr'}), '(call_args, stdout=stdout, stderr=stderr)\n', (5176, 5217), False, 'import subprocess\n'), ((6200, 6226), 'os.remove', 'os.remove', (['input_file_path'], {}), '(input_file_path)\n', (6209, 6226), False, 'import os\n'), ((6239, 6261), 'os.chdir', 'os.chdir', (['current_path'], {}), '(current_path)\n', (6247, 6261), False, 'import os\n'), ((7569, 7604), 'os.path.exists', 'os.path.exists', (['local_binaries_path'], {}), '(local_binaries_path)\n', (7583, 7604), False, 'import os\n'), ((8297, 8329), 'os.path.split', 'os.path.split', (['matched_file_path'], {}), '(matched_file_path)\n', (8310, 8329), False, 'import os\n'), ((4502, 4529), 'os.remove', 'os.remove', (['output_file_path'], {}), '(output_file_path)\n', (4511, 4529), False, 'import os\n')]
|
import matplotlib.pyplot as plt
import numpy as np
import random
from collections import namedtuple
def plot_winsratio(
wins: list,
title: str,
start_idx: int = 0,
wsize_mean: int = 100,
wsize_means_mean: int = 1000,
opponent_update_idxs=None,
):
"""Winrate plotting function, plots both a the WR over the last wsize_mean episodes and
a WR mean over the last wsize_means_mean wsize_mean episodes
Args:
wins (list): Wins vector. Contains 0 or 1 for each loss or victory
title (str): Title to use in the plot
start_idx (int, optional): Start for the x labels. Defaults to 0.
wsize_mean (int, optional): Window size to compute the Winrate. Defaults to 100.
wsize_means_mean (int, optional): Window size to compute the mean over the winrates. Defaults to 1000.
opponent_updates_idxs (list, optional): List of indexes where the update of the opponent state dict has happened in self play. Default None.
"""
if len(wins) >= wsize_mean:
# Take 100 episode averages
means = np.cumsum(wins, dtype=float)
means[wsize_mean:] = means[wsize_mean:] - means[:-wsize_mean]
means = means[wsize_mean - 1 :] / wsize_mean
idxs = [i + start_idx + wsize_mean - 1 for i in range(len(means))]
plt.plot(idxs, means, label=f"Running {wsize_mean} average WR")
# Take 20 episode averages of the 100 running average
if len(means) >= wsize_means_mean:
means_mean = np.cumsum(means)
means_mean[wsize_means_mean:] = (
means_mean[wsize_means_mean:] - means_mean[:-wsize_means_mean]
)
means_mean = means_mean[wsize_means_mean - 1 :] / wsize_means_mean
idxs_mean = [
i + start_idx + wsize_mean + wsize_means_mean - 2
for i in range(len(means_mean))
]
plt.plot(
idxs_mean,
means_mean,
label=f"Running {wsize_mean} average WR mean",
)
# add vertical lines for opponent update during self play
if opponent_update_idxs != None:
for x in opponent_update_idxs:
if x >= wsize_mean:
plt.axvline(x=x, c="red")
plt.legend()
plt.title(f"Training {title}")
plt.savefig("imgs/train_ai.png")
plt.close()
def rgb2grayscale(rgb: np.ndarray) -> np.ndarray:
"""Transform RGB image to grayscale
Args:
rgb (np.ndarray): RGB image to transform
Returns:
np.ndarray: Grayscale image
"""
# transform to rgb
r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
grayscale = 0.2989 * r + 0.5870 * g + 0.1140 * b
return grayscale
Transition = namedtuple("Transition", ("ob", "action", "next_ob", "rew", "done"))
class ReplayMemory(object):
"""
Replay memory used for experience replay.
It stores transitions.
"""
def __init__(
self,
memory_capacity: int,
train_buffer_capacity: int,
test_buffer_capacity: int,
) -> None:
"""Initialization of the replay memory
Args:
memory_capacity (int): Maximum number of elements to fit in the memory
train_buffer_capacity (int): Maximum number of elements to fit in the train buffer
test_buffer_capacity (int): Maximum number of elements to fit in the test buffer
"""
self.memory_capacity = memory_capacity
self.train_buffer_capacity = train_buffer_capacity
self.test_buffer_capacity = test_buffer_capacity
self.memory = []
self.train_buffer = []
self.test_buffer = []
self.memory_position = 0
def push_to_memory(self, *args) -> None:
"""Save a transition to memory"""
if len(self.memory) < self.memory_capacity:
self.memory.append(None)
self.memory[self.memory_position] = Transition(*args)
self.memory_position = (self.memory_position + 1) % self.memory_capacity
def push_to_train_buffer(self, *args) -> None:
"""Save a transition to train buffer"""
self.train_buffer.append(Transition(*args))
if len(self.train_buffer) > self.train_buffer_capacity:
raise Exception("Error: capacity of the train_buffer exceded")
def push_to_test_buffer(self, ob: np.ndarray) -> None:
"""Save an observation to test buffer
Args:
ob (np.ndarray): Observation/state to push into the buffer
"""
self.test_buffer.append(ob)
if len(self.test_buffer) > self.test_buffer_capacity:
raise Exception("Error: capacity of the test_buffer exceded")
def sample(self, batch_size: int) -> np.ndarray:
"""Sample batch_size random elements from memory
Args:
batch_size (int): Number of elements to sample
Returns:
np.ndarray: Sampled elements
"""
return random.sample(self.memory, batch_size)
def __len__(self) -> int:
"""Overwrite of the len function for the object
Returns:
int: Length of the memory
"""
return len(self.memory)
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.plot",
"random.sample",
"matplotlib.pyplot.close",
"matplotlib.pyplot.legend",
"numpy.cumsum",
"collections.namedtuple",
"matplotlib.pyplot.savefig"
] |
[((2784, 2852), 'collections.namedtuple', 'namedtuple', (['"""Transition"""', "('ob', 'action', 'next_ob', 'rew', 'done')"], {}), "('Transition', ('ob', 'action', 'next_ob', 'rew', 'done'))\n", (2794, 2852), False, 'from collections import namedtuple\n'), ((1078, 1106), 'numpy.cumsum', 'np.cumsum', (['wins'], {'dtype': 'float'}), '(wins, dtype=float)\n', (1087, 1106), True, 'import numpy as np\n'), ((1313, 1376), 'matplotlib.pyplot.plot', 'plt.plot', (['idxs', 'means'], {'label': 'f"""Running {wsize_mean} average WR"""'}), "(idxs, means, label=f'Running {wsize_mean} average WR')\n", (1321, 1376), True, 'import matplotlib.pyplot as plt\n'), ((2293, 2305), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2303, 2305), True, 'import matplotlib.pyplot as plt\n'), ((2314, 2344), 'matplotlib.pyplot.title', 'plt.title', (['f"""Training {title}"""'], {}), "(f'Training {title}')\n", (2323, 2344), True, 'import matplotlib.pyplot as plt\n'), ((2353, 2385), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""imgs/train_ai.png"""'], {}), "('imgs/train_ai.png')\n", (2364, 2385), True, 'import matplotlib.pyplot as plt\n'), ((2394, 2405), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2403, 2405), True, 'import matplotlib.pyplot as plt\n'), ((5006, 5044), 'random.sample', 'random.sample', (['self.memory', 'batch_size'], {}), '(self.memory, batch_size)\n', (5019, 5044), False, 'import random\n'), ((1508, 1524), 'numpy.cumsum', 'np.cumsum', (['means'], {}), '(means)\n', (1517, 1524), True, 'import numpy as np\n'), ((1909, 1987), 'matplotlib.pyplot.plot', 'plt.plot', (['idxs_mean', 'means_mean'], {'label': 'f"""Running {wsize_mean} average WR mean"""'}), "(idxs_mean, means_mean, label=f'Running {wsize_mean} average WR mean')\n", (1917, 1987), True, 'import matplotlib.pyplot as plt\n'), ((2258, 2283), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': 'x', 'c': '"""red"""'}), "(x=x, c='red')\n", (2269, 2283), True, 'import matplotlib.pyplot as plt\n')]
|
import mp3treesim as mp3
import argparse
def main():
parser = argparse.ArgumentParser(
description='mp3treesim', # TODO: fix this
add_help=True)
parser.add_argument('trees', metavar='TREE', nargs=2,
help='Paths to the trees')
group_mode = parser.add_mutually_exclusive_group()
group_mode.add_argument('-i', action='store_true', default=False,
help='Run MP3-treesim in Intersection mode.')
group_mode.add_argument('-u', action='store_true', default=False,
help='Run MP3-treesim in Union mode.')
group_mode.add_argument('-g', action='store_true', default=False,
help='Run MP3-treesim in Geometric mode.')
args = parser.parse_args()
if args.i:
mode = 'intersection'
elif args.u:
mode = 'union'
elif args.g:
mode = 'geometric'
else:
mode = 'sigmoid'
tree1 = mp3.read_dotfile(args.trees[0])
tree2 = mp3.read_dotfile(args.trees[1])
score = mp3.similarity(tree1, tree2, mode=mode)
print(score)
if __name__ == "__main__":
main()
|
[
"mp3treesim.similarity",
"mp3treesim.read_dotfile",
"argparse.ArgumentParser"
] |
[((68, 132), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""mp3treesim"""', 'add_help': '(True)'}), "(description='mp3treesim', add_help=True)\n", (91, 132), False, 'import argparse\n'), ((966, 997), 'mp3treesim.read_dotfile', 'mp3.read_dotfile', (['args.trees[0]'], {}), '(args.trees[0])\n', (982, 997), True, 'import mp3treesim as mp3\n'), ((1010, 1041), 'mp3treesim.read_dotfile', 'mp3.read_dotfile', (['args.trees[1]'], {}), '(args.trees[1])\n', (1026, 1041), True, 'import mp3treesim as mp3\n'), ((1055, 1094), 'mp3treesim.similarity', 'mp3.similarity', (['tree1', 'tree2'], {'mode': 'mode'}), '(tree1, tree2, mode=mode)\n', (1069, 1094), True, 'import mp3treesim as mp3\n')]
|
# Generated by Django 2.2.5 on 2020-07-04 00:22
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('system', '0018_auto_20200703_2354'),
]
operations = [
migrations.AddField(
model_name='role',
name='code',
field=models.CharField(default='', max_length=20),
),
migrations.AlterField(
model_name='userprofile',
name='user_secret',
field=models.CharField(default=uuid.UUID('f51c894d-a532-4ddc-b3e9-2847d4b2e7ae'), max_length=500, verbose_name='用户JWT秘钥'),
),
]
|
[
"django.db.models.CharField",
"uuid.UUID"
] |
[((341, 384), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'max_length': '(20)'}), "(default='', max_length=20)\n", (357, 384), False, 'from django.db import migrations, models\n'), ((541, 590), 'uuid.UUID', 'uuid.UUID', (['"""f51c894d-a532-4ddc-b3e9-2847d4b2e7ae"""'], {}), "('f51c894d-a532-4ddc-b3e9-2847d4b2e7ae')\n", (550, 590), False, 'import uuid\n')]
|
from pathlib import Path
_ = lambda x : x
addon_info = {
"addon_name" : "unspokenPy3",
"addon_summary" : _("Unspoken"),
"addon_description" : _("""Removes names label from object like, link, button. It plays different sounds instead of labels."""),
"addon_version" : "0.4",
"addon_author" : u", Sean (ported python3): <EMAIL>, Camlorn (main developer): <EMAIL>, Bryan Smart: NoMail",
"addon_url" : "https://github.com/SeanTolstoyevski/unspokenPy3/releases",
"addon_docFileName" : "readme.html",
"addon_minimumNVDAVersion" : 2019.3,
"addon_lastTestedNVDAVersion" : 2022.3,
"addon_updateChannel" : None,
}
pythonSources = list(Path.cwd().joinpath("addon", "globalPlugins").rglob("*.py"))
i18nSources = pythonSources + ["buildVars.py", "addon\\installTasks.py"]
excludedFiles = []
|
[
"pathlib.Path.cwd"
] |
[((643, 653), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (651, 653), False, 'from pathlib import Path\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
process_activity_logs.py
Script to process activity_log records.
* Create activity_log records from tentative_activity_log records.
* Delete tentative_activity_log records converted thus.
"""
import sys
import traceback
from optparse import OptionParser
from applications.zcomx.modules.activity_logs import \
ActivityLog, \
CompletedTentativeLogSet, \
MINIMUM_AGE_TO_LOG_IN_SECONDS, \
PageAddedTentativeLogSet, \
TentativeLogSet
from applications.zcomx.modules.logger import set_cli_logging
VERSION = 'Version 0.1'
def man_page():
"""Print manual page-like help"""
print("""
USAGE
process_activity_logs.py [OPTIONS]
OPTIONS
-h, --help
Print a brief help.
--man
Print man page-like help.
-m, --minimum-age
Tentative activity log records must have this minimum age in order
to be processed. Age is in seconds. Default: {m}
-v, --verbose
Print information messages to stdout.
--vv,
More verbose. Print debug messages to stdout.
""".format(m=MINIMUM_AGE_TO_LOG_IN_SECONDS))
def main():
"""Main processing."""
usage = '%prog [options]'
parser = OptionParser(usage=usage, version=VERSION)
parser.add_option(
'--man',
action='store_true', dest='man', default=False,
help='Display manual page-like help and exit.',
)
parser.add_option(
'-m', '--minimum-age', type='int',
dest='minimum_age', default=MINIMUM_AGE_TO_LOG_IN_SECONDS,
help='Minimum age of tentative log to process.',
)
parser.add_option(
'-v', '--verbose',
action='store_true', dest='verbose', default=False,
help='Print messages to stdout.',
)
parser.add_option(
'--vv',
action='store_true', dest='vv', default=False,
help='More verbose.',
)
(options, unused_args) = parser.parse_args()
if options.man:
man_page()
quit(0)
set_cli_logging(LOG, options.verbose, options.vv)
LOG.debug('Starting')
logs = db(db.tentative_activity_log).select(
db.tentative_activity_log.book_id,
groupby=db.tentative_activity_log.book_id,
)
for log in logs:
LOG.debug('Checking book id: %s', log.book_id)
filters = {'book_id': log.book_id}
tentative_log_set = TentativeLogSet.load(filters=filters)
youngest_log = tentative_log_set.youngest()
age = youngest_log.age()
if age.total_seconds() < options.minimum_age:
LOG.debug(
'Tentative log records too young, book_id: %s', log.book_id)
continue
LOG.debug('Logging book id: %s', log.book_id)
log_set_classes = [
PageAddedTentativeLogSet,
CompletedTentativeLogSet,
]
for log_set_class in log_set_classes:
log_set = log_set_class.load(filters=filters)
activity_log_data = log_set.as_activity_log()
if activity_log_data:
activity_log = ActivityLog.from_add(activity_log_data)
LOG.debug(
'Created activity_log action: %s',
activity_log.action
)
for tentative_activity_log in tentative_log_set.tentative_records:
tentative_activity_log.delete()
LOG.debug('Done')
if __name__ == '__main__':
# pylint: disable=broad-except
try:
main()
except SystemExit:
pass
except Exception:
traceback.print_exc(file=sys.stderr)
exit(1)
|
[
"traceback.print_exc",
"optparse.OptionParser",
"applications.zcomx.modules.activity_logs.ActivityLog.from_add",
"applications.zcomx.modules.logger.set_cli_logging",
"applications.zcomx.modules.activity_logs.TentativeLogSet.load"
] |
[((1220, 1262), 'optparse.OptionParser', 'OptionParser', ([], {'usage': 'usage', 'version': 'VERSION'}), '(usage=usage, version=VERSION)\n', (1232, 1262), False, 'from optparse import OptionParser\n'), ((2017, 2066), 'applications.zcomx.modules.logger.set_cli_logging', 'set_cli_logging', (['LOG', 'options.verbose', 'options.vv'], {}), '(LOG, options.verbose, options.vv)\n', (2032, 2066), False, 'from applications.zcomx.modules.logger import set_cli_logging\n'), ((2390, 2427), 'applications.zcomx.modules.activity_logs.TentativeLogSet.load', 'TentativeLogSet.load', ([], {'filters': 'filters'}), '(filters=filters)\n', (2410, 2427), False, 'from applications.zcomx.modules.activity_logs import ActivityLog, CompletedTentativeLogSet, MINIMUM_AGE_TO_LOG_IN_SECONDS, PageAddedTentativeLogSet, TentativeLogSet\n'), ((3561, 3597), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'sys.stderr'}), '(file=sys.stderr)\n', (3580, 3597), False, 'import traceback\n'), ((3084, 3123), 'applications.zcomx.modules.activity_logs.ActivityLog.from_add', 'ActivityLog.from_add', (['activity_log_data'], {}), '(activity_log_data)\n', (3104, 3123), False, 'from applications.zcomx.modules.activity_logs import ActivityLog, CompletedTentativeLogSet, MINIMUM_AGE_TO_LOG_IN_SECONDS, PageAddedTentativeLogSet, TentativeLogSet\n')]
|
import sys
import os
from loguru import logger
from config_handler import get_mongo_config
from docker_service import create_archive, get_docker_container
from exceptions import AppError
class MongoImportRunner:
container_dest_dir = "/home"
def __call__(self, source, destination):
container = get_docker_container("mongo")
tar_stream = create_archive(source)
success = container.put_archive(self.container_dest_dir, tar_stream)
if not success:
raise AppError(f"Putting '{source}' file to 'mongo' container was failed")
container_filepath = f"{self.container_dest_dir}/{os.path.basename(source)}"
import_cmd = f"mongoimport --username={{mongo_user}} --password={{<PASSWORD>}} --authenticationDatabase=test_database --host={{mongo_host}} --port={{mongo_port}} --db={{mongo_db}} --collection={destination} --file={container_filepath}"
try:
result = container.exec_run(import_cmd.format(**get_mongo_config()))
if result.exit_code:
raise AppError(result.output.decode('utf-8'))
finally:
rm_result = container.exec_run(f"rm {container_filepath}", user='root')
if rm_result.exit_code:
logger.warning(rm_result.output.decode('utf-8'))
def _get_import_runner(db):
if db == "mongo":
return MongoImportRunner()
@logger.catch(onerror=lambda _: sys.exit(1))
def run(db, source, destination):
import_runner = _get_import_runner(db)
import_runner(source, destination)
|
[
"exceptions.AppError",
"os.path.basename",
"docker_service.create_archive",
"config_handler.get_mongo_config",
"docker_service.get_docker_container",
"sys.exit"
] |
[((315, 344), 'docker_service.get_docker_container', 'get_docker_container', (['"""mongo"""'], {}), "('mongo')\n", (335, 344), False, 'from docker_service import create_archive, get_docker_container\n'), ((366, 388), 'docker_service.create_archive', 'create_archive', (['source'], {}), '(source)\n', (380, 388), False, 'from docker_service import create_archive, get_docker_container\n'), ((508, 576), 'exceptions.AppError', 'AppError', (['f"""Putting \'{source}\' file to \'mongo\' container was failed"""'], {}), '(f"Putting \'{source}\' file to \'mongo\' container was failed")\n', (516, 576), False, 'from exceptions import AppError\n'), ((1418, 1429), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1426, 1429), False, 'import sys\n'), ((635, 659), 'os.path.basename', 'os.path.basename', (['source'], {}), '(source)\n', (651, 659), False, 'import os\n'), ((979, 997), 'config_handler.get_mongo_config', 'get_mongo_config', ([], {}), '()\n', (995, 997), False, 'from config_handler import get_mongo_config\n')]
|
####################################################################################################
### ###
### Functions for graph handling (creation, ...) ###
### Author: <NAME> (EPFL) ###
### Last modified: 19.08.2021 ###
### ###
####################################################################################################
# Import libraries
import networkx as nx
from openbabel import pybel as pb
import matplotlib.pyplot as plt
def make_mol(mol_in, in_type="smi", out_type="mol", from_file=False, name="", make_3d=False, save=None):
"""
Generate a molfile from the SMILES representation of the molecule
Inputs: - mol_in Input molecule
- in_type Type of input
- out_type Type of output
- from_file Whether mol_in is a string (False) or points to a file (True)
- name Name of the molecule
- make_3d Whether to make
- save If set, defines the file to which the molfile should be saved
Output: - mol OBMol object of the molecule
"""
if from_file:
# Read the input file
mol = next(pb.readfile(in_type, mol_in))
else:
# Read the input string
mol = pb.readstring(in_type, mol_in)
# Set molecule name
mol.title = name
# Add implicit hydrogens
mol.addh()
if make_3d:
# Make 3D coordinates
mol.make3D()
else:
# Make 2D coordinates for drawing
mol.make2D()
# Convert the molecule into the output format
pp = mol.write(out_type)
# Save molecule to file
if save is not None:
with open(save, "w") as F:
F.write(pp)
return mol
def get_bonds(mol):
"""
Identify the bonds in a molecule
Input: - mol OBMol object of the molecule
Outputs: - atoms List of atoms in the molecule
- bonds List of bonded atoms for each atom in the molecule
"""
# Get molfile of the molecule
pp = mol.write("mol")
lines = pp.split("\n")
# Get number of atoms
n_atoms = len(mol.atoms)
# Initialize arrays of neighbours
bonds = [[] for _ in mol.atoms]
# Initialize array of atoms
atoms = []
# Parse the atom block
for l in lines[4:n_atoms + 4]:
atoms.append(l.split()[3])
# Parse the bond block
for l in lines[n_atoms + 4:]:
# Detect end of file
if "END" in l or len(l.split()) != 7:
break
bond = [int(li)-1 for li in l.split()[:2]]
# Update the list of bonds
bonds[bond[0]].append(bond[1])
bonds[bond[1]].append(bond[0])
return atoms, bonds
def identify_env(G):
"""
Identify the environment of the central node (index 0) of a graph
Input: - G Input graph
Output: - env environment of the central node in G
"""
# Initialize array of neighbouring elements
nei_elems = []
# Identify all nodes bonded to the central node
for e in G.edges:
if 0 in e:
# Get neighbour atom
if e[0] == 0:
i = e[1]
else:
i = e[0]
# Update array of neighbouring elements
nei_elems.append(G.nodes[i]["elem"])
# Return the environment in string format,
# with neighbours sorted alphabetically
return "-".join(sorted(nei_elems))
def generate_graph(atoms, bonds, i0, max_w, elems=["H", "C", "N", "O", "S"], hetatm="error", hetatm_rep=None):
"""
Generate a graph from atom i0 using the list of atoms and bonds in the molecule
Inputs: - atoms List of atoms in the molecule
- bonds Bonded atoms for each atom in the molecule (by index)
- i0 Index of the central atom in the graph
- max_w Maximum graph depth
- elems Allowed elements in the molecule
- hetatm Behaviour for handling unknown elements:
"error": raise an error
"ignore": ignore the atom
"replace": replace the atom with another element
"replace_and_terminate": replace the atom with another element and
cut all bonds from this atom
- hetatm_rep Dictionary of replacements for unknown elements
(used only with hetatm set to "replace" or "replace_and_terminate")
Outputs: - G Graph generated
- env Environment of the central node
"""
# The maximum depth should be at least one
if max_w < 1:
raise ValueError("max_weight should be at least 1, not {}".format(max_w))
# Initialize graph object
G = nx.Graph()
# Add central node
G.add_node(0, elem=atoms[i0], ind=i0)
# Initialize number of nodes in the graph and atom index of each node
N = G.number_of_nodes()
node_inds = [G.nodes[i]["ind"] for i in range(N)]
# Loop over all nodes
i = 0
while i < N:
# Identify the atoms bonded to that node
for j in bonds[node_inds[i]]:
at = atoms[j]
# Handle invalid elements
if at not in elems:
# Raise an error
if hetatm == "error":
raise ValueError("Invalid element found: {}".format(at))
# Ignore the atom
elif hetatm == "ignore":
continue
# Replace the atom with another element
elif hetatm == "replace":
at = hetatm_rep[at]
# Replace the atom with another element and cut all bonds from this atom
elif hetatm == "replace_and_terminate":
at = hetatm_rep[at]
bonds[j] = []
else:
raise ValueError("Invalid behaviour for unknown elements: {}".format(hetatm))
# If a new node is found, add it to the graph
if j not in node_inds:
G.add_node(N, elem=at, ind=j)
G.add_edge(i, N, w="1")
# If the new node is too far away, remove it
if nx.shortest_path_length(G, source=0, target=N) > max_w:
G.remove_node(N)
# Otherwise, keep it and update the total number of nodes and the atom index of each node
else:
N += 1
node_inds = [G.nodes[i]["ind"] for i in range(N)]
# If the bonde node is already in the graph, just add the edge
else:
G.add_edge(i, node_inds.index(j), w="1")
# Proceed to the next node
i += 1
# Get the environment of the central node
env = identify_env(G)
return G, env
def generate_graphs(atoms, bonds, elem, max_w, elems=["H", "C", "N", "O", "S"], hetatm="error", hetatm_rep=None):
"""
Generate graphs for all atoms of a given element in the molecule
Inputs: - atoms List of atoms in the molecule
- bonds Bonded atoms for each atom in the molecule (by index)
- elem Element for which to construct the graphs
- max_w Maximum graph depth
- elems Allowed elements in the molecule
- hetatm Behaviour for handling unknown elements:
"error": raise an error
"ignore": ignore the atom
"replace": replace the atom with another element
"replace_and_terminate": replace the atom with another element and
cut all bonds from this atom
- hetatm_rep Dictionary of replacements for unknown elements
(used only with hetatm set to "replace" or "replace_and_terminate")
Outputs: - Gs
- envs
"""
# Initialize arrays of graphs and environments
Gs = []
envs = []
# Loop over all atoms
for i, at in enumerate(atoms):
# Identify the atoms for which a graph should be constructed
if at == elem:
# Construct the graph
G, env = generate_graph(atoms, bonds, i, max_w, elems=elems, hetatm=hetatm, hetatm_rep=hetatm_rep)
Gs.append(G)
envs.append(env)
return Gs, envs
def cut_graph(G, w):
"""
Cut a graph down to a given depth
Inputs: - G Input graph
- w Depth to cut to
Output: - cut_G Cut graph
"""
# Copy the initial graph and get the number of nodes
cut_G = G.copy()
N = len(G.nodes)
# Check all nodes
for i in range(N):
# If the depth is greater than w, remove the node
if nx.shortest_path_length(G, source=0, target=i) > w:
cut_G.remove_node(i)
return cut_G
def generate_hash(G):
"""
Generate the Weisfeiler-Lehman hash corresponding to a graph
Input: - G Input graph
Output: - H Hash corresponding to graph G
"""
# Replace the central element by "Y" in order to make sure that the hash correctly identifies the central node
G2 = G.copy()
G2.nodes[0]["elem"] = "Y"
return nx.algorithms.graph_hashing.weisfeiler_lehman_graph_hash(G2, edge_attr="w", node_attr="elem", iterations=5)
def print_graph(G, w, layout="kamada_kawai", base_color="C0", center_color="r", out_color="g", show=True, save=None):
"""
Plot a graph at a given depth
Inputs: - G Networkx graph object
- w Maximum depth to display
- layout Node layout for plotting
- base_color Base color of nodes
- center_color Color of the central node
- out_color Color of nodes at the maximum graph depth
- show Whether the plot should be shown or not
- save If set, defines the file to save the plot to
"""
# Cut the graph to the maximum depth
cut_G = cut_graph(G, w)
# Get the label (element) of each node
labs = nx.get_node_attributes(cut_G, "elem")
# Set node layout
if layout == "kamada_kawai":
# Get the position of each node on the plot
pos = nx.kamada_kawai_layout(cut_G)
elif layout == "circular":
pos = nx.circular_layout(cut_G)
elif layout == "planar":
pos = nx.planar_layout(cut_G)
elif layout == "random":
pos = nx.random_layout(cut_G)
elif layout == "shell":
pos = nx.shell_layout(cut_G)
elif layout == "spring":
pos = nx.spring_layout(cut_G)
elif layout == "spectral":
pos = nx.spectral_layout(cut_G)
elif layout == "spiral":
pos = nx.spiral_layout(cut_G)
else:
raise ValueError("Unknown layout: {}".format(layout))
# Initialize figure handle
f = plt.figure(figsize=(6,5))
ax = f.add_subplot(1,1,1)
# Draw the nodes
nx.draw_networkx_nodes(cut_G, pos, ax=ax, node_color=base_color)
# Draw the central node in red
nx.draw_networkx_nodes(cut_G, pos, nodelist=[0], node_color=center_color, ax=ax)
# Draw the edge nodes in green
edge_nodes = []
for i in range(cut_G.number_of_nodes()):
if nx.shortest_path_length(cut_G, source=0, target=i) == w:
edge_nodes.append(i)
nx.draw_networkx_nodes(cut_G, pos, nodelist=edge_nodes, node_color=out_color, ax=ax)
# Get the covalent bonds and H-bonds
H_e = []
es = []
for i, e in enumerate(cut_G.edges):
if cut_G.edges[e]["w"] == 0:
H_e.append(e)
else:
es.append(e)
# Draw the edges
nx.draw_networkx_edges(cut_G, pos, edgelist=es, ax=ax)
nx.draw_networkx_edges(cut_G, pos, edgelist=H_e, style="dashed", ax=ax)
# Draw the labels on the nodes
nx.draw_networkx_labels(cut_G, pos, labs, ax=ax)
f.tight_layout()
# Show the plot
if show:
plt.show()
# Save the plot
if save:
if save.endswith(".png"):
f.savefig(save, dpi=150)
else:
f.savefig(save)
plt.close()
return
|
[
"openbabel.pybel.readstring",
"matplotlib.pyplot.figure",
"networkx.draw_networkx_nodes",
"networkx.draw_networkx_labels",
"networkx.shell_layout",
"networkx.random_layout",
"matplotlib.pyplot.close",
"networkx.kamada_kawai_layout",
"networkx.spectral_layout",
"networkx.shortest_path_length",
"matplotlib.pyplot.show",
"networkx.algorithms.graph_hashing.weisfeiler_lehman_graph_hash",
"networkx.get_node_attributes",
"networkx.circular_layout",
"openbabel.pybel.readfile",
"networkx.spiral_layout",
"networkx.draw_networkx_edges",
"networkx.planar_layout",
"networkx.spring_layout",
"networkx.Graph"
] |
[((5451, 5461), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (5459, 5461), True, 'import networkx as nx\n'), ((10232, 10343), 'networkx.algorithms.graph_hashing.weisfeiler_lehman_graph_hash', 'nx.algorithms.graph_hashing.weisfeiler_lehman_graph_hash', (['G2'], {'edge_attr': '"""w"""', 'node_attr': '"""elem"""', 'iterations': '(5)'}), "(G2, edge_attr='w',\n node_attr='elem', iterations=5)\n", (10288, 10343), True, 'import networkx as nx\n'), ((11138, 11175), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['cut_G', '"""elem"""'], {}), "(cut_G, 'elem')\n", (11160, 11175), True, 'import networkx as nx\n'), ((11915, 11941), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 5)'}), '(figsize=(6, 5))\n', (11925, 11941), True, 'import matplotlib.pyplot as plt\n'), ((11997, 12061), 'networkx.draw_networkx_nodes', 'nx.draw_networkx_nodes', (['cut_G', 'pos'], {'ax': 'ax', 'node_color': 'base_color'}), '(cut_G, pos, ax=ax, node_color=base_color)\n', (12019, 12061), True, 'import networkx as nx\n'), ((12102, 12187), 'networkx.draw_networkx_nodes', 'nx.draw_networkx_nodes', (['cut_G', 'pos'], {'nodelist': '[0]', 'node_color': 'center_color', 'ax': 'ax'}), '(cut_G, pos, nodelist=[0], node_color=center_color, ax=ax\n )\n', (12124, 12187), True, 'import networkx as nx\n'), ((12389, 12478), 'networkx.draw_networkx_nodes', 'nx.draw_networkx_nodes', (['cut_G', 'pos'], {'nodelist': 'edge_nodes', 'node_color': 'out_color', 'ax': 'ax'}), '(cut_G, pos, nodelist=edge_nodes, node_color=\n out_color, ax=ax)\n', (12411, 12478), True, 'import networkx as nx\n'), ((12709, 12763), 'networkx.draw_networkx_edges', 'nx.draw_networkx_edges', (['cut_G', 'pos'], {'edgelist': 'es', 'ax': 'ax'}), '(cut_G, pos, edgelist=es, ax=ax)\n', (12731, 12763), True, 'import networkx as nx\n'), ((12768, 12839), 'networkx.draw_networkx_edges', 'nx.draw_networkx_edges', (['cut_G', 'pos'], {'edgelist': 'H_e', 'style': '"""dashed"""', 'ax': 'ax'}), "(cut_G, pos, edgelist=H_e, style='dashed', ax=ax)\n", (12790, 12839), True, 'import networkx as nx\n'), ((12880, 12928), 'networkx.draw_networkx_labels', 'nx.draw_networkx_labels', (['cut_G', 'pos', 'labs'], {'ax': 'ax'}), '(cut_G, pos, labs, ax=ax)\n', (12903, 12928), True, 'import networkx as nx\n'), ((13155, 13166), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (13164, 13166), True, 'import matplotlib.pyplot as plt\n'), ((1627, 1657), 'openbabel.pybel.readstring', 'pb.readstring', (['in_type', 'mol_in'], {}), '(in_type, mol_in)\n', (1640, 1657), True, 'from openbabel import pybel as pb\n'), ((11298, 11327), 'networkx.kamada_kawai_layout', 'nx.kamada_kawai_layout', (['cut_G'], {}), '(cut_G)\n', (11320, 11327), True, 'import networkx as nx\n'), ((12993, 13003), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13001, 13003), True, 'import matplotlib.pyplot as plt\n'), ((1541, 1569), 'openbabel.pybel.readfile', 'pb.readfile', (['in_type', 'mol_in'], {}), '(in_type, mol_in)\n', (1552, 1569), True, 'from openbabel import pybel as pb\n'), ((9759, 9805), 'networkx.shortest_path_length', 'nx.shortest_path_length', (['G'], {'source': '(0)', 'target': 'i'}), '(G, source=0, target=i)\n', (9782, 9805), True, 'import networkx as nx\n'), ((11373, 11398), 'networkx.circular_layout', 'nx.circular_layout', (['cut_G'], {}), '(cut_G)\n', (11391, 11398), True, 'import networkx as nx\n'), ((12295, 12345), 'networkx.shortest_path_length', 'nx.shortest_path_length', (['cut_G'], {'source': '(0)', 'target': 'i'}), '(cut_G, source=0, target=i)\n', (12318, 12345), True, 'import networkx as nx\n'), ((11442, 11465), 'networkx.planar_layout', 'nx.planar_layout', (['cut_G'], {}), '(cut_G)\n', (11458, 11465), True, 'import networkx as nx\n'), ((6968, 7014), 'networkx.shortest_path_length', 'nx.shortest_path_length', (['G'], {'source': '(0)', 'target': 'N'}), '(G, source=0, target=N)\n', (6991, 7014), True, 'import networkx as nx\n'), ((11509, 11532), 'networkx.random_layout', 'nx.random_layout', (['cut_G'], {}), '(cut_G)\n', (11525, 11532), True, 'import networkx as nx\n'), ((11575, 11597), 'networkx.shell_layout', 'nx.shell_layout', (['cut_G'], {}), '(cut_G)\n', (11590, 11597), True, 'import networkx as nx\n'), ((11641, 11664), 'networkx.spring_layout', 'nx.spring_layout', (['cut_G'], {}), '(cut_G)\n', (11657, 11664), True, 'import networkx as nx\n'), ((11710, 11735), 'networkx.spectral_layout', 'nx.spectral_layout', (['cut_G'], {}), '(cut_G)\n', (11728, 11735), True, 'import networkx as nx\n'), ((11779, 11802), 'networkx.spiral_layout', 'nx.spiral_layout', (['cut_G'], {}), '(cut_G)\n', (11795, 11802), True, 'import networkx as nx\n')]
|
"""
This module implements all the functionalities required for setting up and
running Fio workloads on the pods.
This module implements few functions
setup(): for setting up fio utility on the pod and any necessary
environmental params.
run(): for running fio on pod on specified mount point
Note: The above mentioned functions will be invoked from Workload.setup()
and Workload.run() methods along with user provided parameters.
"""
import logging
from ocs_ci.ocs import exceptions
log = logging.getLogger(__name__)
DISTROS = {"Debian": "apt-get", "RHEL": "yum"}
def find_distro(io_pod):
"""
Find whats the os distro on pod
Args:
io_pod (Pod): app pod object
Returns:
distro (str): representing 'Debian' or 'RHEL' as of now
"""
for distro, pkg_mgr in DISTROS.items():
try:
io_pod.exec_cmd_on_pod(f"which {pkg_mgr}", out_yaml_format=False)
except exceptions.CommandFailed:
log.debug(f"Distro is not {distro}")
else:
return distro
def setup(**kwargs):
"""
setup fio workload
Args:
**kwargs (dict): fio setup configuration.
At this point in time only argument present in kwargs will be
'pod' on which we want to setup. In future if we move to
containerized fio then pod.yaml will be presented in kwargs.
Returns:
bool: True if setup succeeds else False
"""
io_pod = kwargs['pod']
# For first cut doing simple fio install
distro = find_distro(io_pod)
pkg_mgr = DISTROS[distro]
if distro == 'Debian':
cmd = f'{pkg_mgr} update'
io_pod.exec_cmd_on_pod(cmd, out_yaml_format=False)
cmd = f"{pkg_mgr} -y install fio"
return io_pod.exec_cmd_on_pod(cmd, out_yaml_format=False)
def run(**kwargs):
"""
Run fio with params from kwargs.
Default parameter list can be found in
templates/workloads/fio/workload_io.yaml and user can update the
dict as per the requirement.
Args:
kwargs (dict): IO params for fio
Result:
result of command
"""
io_pod = kwargs.pop('pod')
st_type = kwargs.pop('type')
path = kwargs.pop('path')
fio_cmd = "fio"
args = ""
for k, v in kwargs.items():
if k == 'filename':
if st_type == 'fs':
args = args + f" --{k}={path}/{v}"
else:
# For raw block device
args = args + f" --{k}={path}"
else:
args = args + f" --{k}={v}"
fio_cmd = fio_cmd + args
fio_cmd += " --output-format=json"
log.info(f"Running cmd: {fio_cmd}")
return io_pod.exec_cmd_on_pod(fio_cmd, out_yaml_format=False)
|
[
"logging.getLogger"
] |
[((499, 526), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (516, 526), False, 'import logging\n')]
|
"""Add pending flag
Revision ID: 6458bd3b46dc
Revises: <PASSWORD>
Create Date: 2017-11-26 07:32:09.814699
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('iou', sa.Column('pending', sa.Boolean(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('iou', 'pending')
# ### end Alembic commands ###
|
[
"alembic.op.drop_column",
"sqlalchemy.Boolean"
] |
[((572, 604), 'alembic.op.drop_column', 'op.drop_column', (['"""iou"""', '"""pending"""'], {}), "('iou', 'pending')\n", (586, 604), False, 'from alembic import op\n'), ((418, 430), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (428, 430), True, 'import sqlalchemy as sa\n')]
|
# pylint: disable=C0111,R0903
"""Display HTTP status code
Parameters:
* http__status.label: Prefix label (optional)
* http__status.target: Target to retrieve the HTTP status from
* http__status.expect: Expected HTTP status
contributed by `valkheim <https://github.com/valkheim>`_ - many thanks!
"""
from requests import head
import psutil
import core.module
import core.widget
import core.decorators
class Module(core.module.Module):
UNK = "UNK"
@core.decorators.every(seconds=30)
def __init__(self, config, theme):
super().__init__(config, theme, core.widget.Widget(self.output))
self.__label = self.parameter("label")
self.__target = self.parameter("target")
self.__expect = self.parameter("expect", "200")
def labelize(self, s):
if self.__label is None:
return s
return "{}: {}".format(self.__label, s)
def getStatus(self):
try:
res = head(self.__target)
except Exception as e:
print(e)
return self.UNK
else:
status = str(res.status_code)
return status
def getOutput(self):
if self.__status == self.__expect:
return self.labelize(self.__status)
else:
reason = " != {}".format(self.__expect)
return self.labelize("{}{}".format(self.__status, reason))
def output(self, widget):
return self.__output
def update(self):
self.__status = self.getStatus()
self.__output = self.getOutput()
def state(self, widget):
if self.__status == self.UNK:
return "warning"
if self.__status != self.__expect:
return "critical"
return self.__output
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
[
"requests.head"
] |
[((962, 981), 'requests.head', 'head', (['self.__target'], {}), '(self.__target)\n', (966, 981), False, 'from requests import head\n')]
|
"""commandline.
Usage:
commandline.py lookup --lang=<lang> --word=<preword> [--ip=<redis-ip>] [--port=<redis-port>]
commandline.py -h | --help
Options:
-h --help Show this screen.
--lang=<lang> Language of suggested word.
--word=<preword> Pre-word of suggested word.
--ip=<rediis-ip> Ip of redis server (Default: 172.17.0.3)
--port=<rediis-port> Port of redis server (Default: 6379)
"""
import redis
import py_word_suggest
# from config import REDIS_IP
from docopt import docopt
def main():
arguments = docopt(__doc__, version='commandline 0.0.1')
if arguments['--port']:
rp = arguments['--port']
else:
rp = 6379
if arguments['--ip']:
rs = arguments['--ip']
else:
rs = '172.17.0.3'
r = redis.StrictRedis(host=rs, port=rp, db=0)
try:
obj = py_word_suggest.Selector_redis(r)
except Exception as e:
print("{e} Fail to connect to: {ip}:{port}".format(e=e, ip=rs, port=rp))
exit(1)
if arguments['lookup']:
key = 'lang:{l}:gram:2:{w}'.format(l=arguments['--lang'], w=arguments['--word'])
try:
fetch = obj.gen_fetchWords(key)
except Exception as e:
print("{e}".format(e=e))
exit(1)
print("'{w}' has the following suggested words:\n".format(w=arguments['--word']))
print(list(obj.gen_suggestWord(*fetch)))
if __name__ == "__main__":
main()
|
[
"redis.StrictRedis",
"py_word_suggest.Selector_redis",
"docopt.docopt"
] |
[((557, 601), 'docopt.docopt', 'docopt', (['__doc__'], {'version': '"""commandline 0.0.1"""'}), "(__doc__, version='commandline 0.0.1')\n", (563, 601), False, 'from docopt import docopt\n'), ((792, 833), 'redis.StrictRedis', 'redis.StrictRedis', ([], {'host': 'rs', 'port': 'rp', 'db': '(0)'}), '(host=rs, port=rp, db=0)\n', (809, 833), False, 'import redis\n'), ((857, 890), 'py_word_suggest.Selector_redis', 'py_word_suggest.Selector_redis', (['r'], {}), '(r)\n', (887, 890), False, 'import py_word_suggest\n')]
|
import torch
import torch.nn as nn
from torch.autograd import Variable
# adapted from pytorch/examples/vae and ethanluoyc/pytorch-vae
class ImageClassifier(nn.Module):
def __init__(self, latent_variable_size, pretrained, nout=2):
super(ImageClassifier, self).__init__()
self.latent_variable_size = latent_variable_size
self.feature_extractor = pretrained
self.classifier = nn.Linear(latent_variable_size, nout)
def forward(self, x):
x, _ = self.feature_extractor.encode(x)
x = self.classifier(x.view(-1, self.latent_variable_size))
return x
class VAE(nn.Module):
def __init__(self, nc=1, ngf=128, ndf=128, latent_variable_size=128, imsize=64, batchnorm=False):
super(VAE, self).__init__()
self.nc = nc
self.ngf = ngf
self.ndf = ndf
self.imsize = imsize
self.latent_variable_size = latent_variable_size
self.batchnorm = batchnorm
self.encoder = nn.Sequential(
# input is 3 x 64 x 64
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf) x 32 x 32
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*2) x 16 x 16
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*4) x 8 x 8
nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*8) x 4 x 4
nn.Conv2d(ndf * 8, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*8) x 2 x 2
)
self.fc1 = nn.Linear(ndf*8*2*2, latent_variable_size)
self.fc2 = nn.Linear(ndf*8*2*2, latent_variable_size)
# decoder
self.decoder = nn.Sequential(
# input is Z, going into a convolution
# state size. (ngf*8) x 2 x 2
nn.ConvTranspose2d(ngf * 8, ngf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ngf*8) x 4 x 4
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ngf*4) x 8 x 8
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ngf*2) x 16 x 16
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ngf) x 32 x 32
nn.ConvTranspose2d( ngf, nc, 4, 2, 1, bias=False),
nn.Sigmoid(),
# state size. (nc) x 64 x 64
)
self.d1 = nn.Sequential(
nn.Linear(latent_variable_size, ngf*8*2*2),
nn.ReLU(inplace=True),
)
self.bn_mean = nn.BatchNorm1d(latent_variable_size)
def encode(self, x):
h = self.encoder(x)
h = h.view(-1, self.ndf*8*2*2)
if self.batchnorm:
return self.bn_mean(self.fc1(h)), self.fc2(h)
else:
return self.fc1(h), self.fc2(h)
def reparametrize(self, mu, logvar):
std = logvar.mul(0.5).exp_()
if torch.cuda.is_available():
eps = torch.cuda.FloatTensor(std.size()).normal_()
else:
eps = torch.FloatTensor(std.size()).normal_()
eps = Variable(eps)
return eps.mul(std).add_(mu)
def decode(self, z):
h = self.d1(z)
h = h.view(-1, self.ngf*8, 2, 2)
return self.decoder(h)
def get_latent_var(self, x):
mu, logvar = self.encode(x.view(-1, self.nc, self.imsize, self.imsize))
z = self.reparametrize(mu, logvar)
return z
def generate(self, z):
res = self.decode(z)
return res
def forward(self, x):
mu, logvar = self.encode(x.view(-1, self.nc, self.imsize, self.imsize))
z = self.reparametrize(mu, logvar)
res = self.decode(z)
return res, z, mu, logvar
class FC_VAE(nn.Module):
"""Fully connected variational Autoencoder"""
def __init__(self, n_input, nz, n_hidden=1024):
super(FC_VAE, self).__init__()
self.nz = nz
self.n_input = n_input
self.n_hidden = n_hidden
self.encoder = nn.Sequential(nn.Linear(n_input, n_hidden),
nn.ReLU(inplace=True),
nn.BatchNorm1d(n_hidden),
nn.Linear(n_hidden, n_hidden),
nn.BatchNorm1d(n_hidden),
nn.ReLU(inplace=True),
nn.Linear(n_hidden, n_hidden),
nn.BatchNorm1d(n_hidden),
nn.ReLU(inplace=True),
nn.Linear(n_hidden, n_hidden),
nn.BatchNorm1d(n_hidden),
nn.ReLU(inplace=True),
nn.Linear(n_hidden, n_hidden),
)
self.fc1 = nn.Linear(n_hidden, nz)
self.fc2 = nn.Linear(n_hidden, nz)
self.decoder = nn.Sequential(nn.Linear(nz, n_hidden),
nn.ReLU(inplace=True),
nn.BatchNorm1d(n_hidden),
nn.Linear(n_hidden, n_hidden),
nn.BatchNorm1d(n_hidden),
nn.ReLU(inplace=True),
nn.Linear(n_hidden, n_hidden),
nn.BatchNorm1d(n_hidden),
nn.ReLU(inplace=True),
nn.Linear(n_hidden, n_hidden),
nn.BatchNorm1d(n_hidden),
nn.ReLU(inplace=True),
nn.Linear(n_hidden, n_input),
)
def forward(self, x):
mu, logvar = self.encode(x)
z = self.reparametrize(mu, logvar)
res = self.decode(z)
return res, z, mu, logvar
def encode(self, x):
h = self.encoder(x)
return self.fc1(h), self.fc2(h)
def reparametrize(self, mu, logvar):
std = logvar.mul(0.5).exp_()
if torch.cuda.is_available():
eps = torch.cuda.FloatTensor(std.size()).normal_()
else:
eps = torch.FloatTensor(std.size()).normal_()
eps = Variable(eps)
return eps.mul(std).add_(mu)
def decode(self, z):
return self.decoder(z)
def get_latent_var(self, x):
mu, logvar = self.encode(x)
z = self.reparametrize(mu, logvar)
return z
def generate(self, z):
res = self.decode(z)
return res
class FC_Autoencoder(nn.Module):
"""Autoencoder"""
def __init__(self, n_input, nz, n_hidden=512):
super(FC_Autoencoder, self).__init__()
self.nz = nz
self.n_input = n_input
self.n_hidden = n_hidden
self.encoder = nn.Sequential(nn.Linear(n_input, n_hidden),
nn.ReLU(inplace=True),
nn.BatchNorm1d(n_hidden),
nn.Linear(n_hidden, n_hidden),
nn.BatchNorm1d(n_hidden),
nn.ReLU(inplace=True),
nn.Linear(n_hidden, n_hidden),
nn.BatchNorm1d(n_hidden),
nn.ReLU(inplace=True),
nn.Linear(n_hidden, n_hidden),
nn.BatchNorm1d(n_hidden),
nn.ReLU(inplace=True),
nn.Linear(n_hidden, nz),
)
self.decoder = nn.Sequential(nn.Linear(nz, n_hidden),
nn.ReLU(inplace=True),
nn.BatchNorm1d(n_hidden),
nn.Linear(n_hidden, n_hidden),
nn.BatchNorm1d(n_hidden),
nn.ReLU(inplace=True),
nn.Linear(n_hidden, n_hidden),
nn.BatchNorm1d(n_hidden),
nn.ReLU(inplace=True),
nn.Linear(n_hidden, n_hidden),
nn.BatchNorm1d(n_hidden),
nn.ReLU(inplace=True),
nn.Linear(n_hidden, n_input),
)
def forward(self, x):
encoding = self.encoder(x)
decoding = self.decoder(encoding)
return encoding, decoding
class FC_Classifier(nn.Module):
"""Latent space discriminator"""
def __init__(self, nz, n_hidden=1024, n_out=2):
super(FC_Classifier, self).__init__()
self.nz = nz
self.n_hidden = n_hidden
self.n_out = n_out
self.net = nn.Sequential(
nn.Linear(nz, n_hidden),
nn.ReLU(inplace=True),
nn.Linear(n_hidden, n_hidden),
nn.ReLU(inplace=True),
# nn.Linear(n_hidden, n_hidden),
# nn.ReLU(inplace=True),
# nn.Linear(n_hidden, n_hidden),
# nn.ReLU(inplace=True),
nn.Linear(n_hidden, n_hidden),
nn.ReLU(inplace=True),
nn.Linear(n_hidden,n_out)
)
def forward(self, x):
return self.net(x)
class Simple_Classifier(nn.Module):
"""Latent space discriminator"""
def __init__(self, nz, n_out=2):
super(Simple_Classifier, self).__init__()
self.nz = nz
self.n_out = n_out
self.net = nn.Sequential(
nn.Linear(nz, n_out),
)
def forward(self, x):
return self.net(x)
|
[
"torch.nn.ReLU",
"torch.nn.ConvTranspose2d",
"torch.autograd.Variable",
"torch.nn.Conv2d",
"torch.nn.BatchNorm1d",
"torch.nn.BatchNorm2d",
"torch.cuda.is_available",
"torch.nn.Linear",
"torch.nn.LeakyReLU",
"torch.nn.Sigmoid"
] |
[((412, 449), 'torch.nn.Linear', 'nn.Linear', (['latent_variable_size', 'nout'], {}), '(latent_variable_size, nout)\n', (421, 449), True, 'import torch.nn as nn\n'), ((1948, 1996), 'torch.nn.Linear', 'nn.Linear', (['(ndf * 8 * 2 * 2)', 'latent_variable_size'], {}), '(ndf * 8 * 2 * 2, latent_variable_size)\n', (1957, 1996), True, 'import torch.nn as nn\n'), ((2010, 2058), 'torch.nn.Linear', 'nn.Linear', (['(ndf * 8 * 2 * 2)', 'latent_variable_size'], {}), '(ndf * 8 * 2 * 2, latent_variable_size)\n', (2019, 2058), True, 'import torch.nn as nn\n'), ((3295, 3331), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['latent_variable_size'], {}), '(latent_variable_size)\n', (3309, 3331), True, 'import torch.nn as nn\n'), ((3660, 3685), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3683, 3685), False, 'import torch\n'), ((3836, 3849), 'torch.autograd.Variable', 'Variable', (['eps'], {}), '(eps)\n', (3844, 3849), False, 'from torch.autograd import Variable\n'), ((5553, 5576), 'torch.nn.Linear', 'nn.Linear', (['n_hidden', 'nz'], {}), '(n_hidden, nz)\n', (5562, 5576), True, 'import torch.nn as nn\n'), ((5596, 5619), 'torch.nn.Linear', 'nn.Linear', (['n_hidden', 'nz'], {}), '(n_hidden, nz)\n', (5605, 5619), True, 'import torch.nn as nn\n'), ((6836, 6861), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6859, 6861), False, 'import torch\n'), ((7012, 7025), 'torch.autograd.Variable', 'Variable', (['eps'], {}), '(eps)\n', (7020, 7025), False, 'from torch.autograd import Variable\n'), ((1047, 1086), 'torch.nn.Conv2d', 'nn.Conv2d', (['nc', 'ndf', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(nc, ndf, 4, 2, 1, bias=False)\n', (1056, 1086), True, 'import torch.nn as nn\n'), ((1100, 1131), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (1112, 1131), True, 'import torch.nn as nn\n'), ((1187, 1231), 'torch.nn.Conv2d', 'nn.Conv2d', (['ndf', '(ndf * 2)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ndf, ndf * 2, 4, 2, 1, bias=False)\n', (1196, 1231), True, 'import torch.nn as nn\n'), ((1245, 1268), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(ndf * 2)'], {}), '(ndf * 2)\n', (1259, 1268), True, 'import torch.nn as nn\n'), ((1282, 1313), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (1294, 1313), True, 'import torch.nn as nn\n'), ((1371, 1419), 'torch.nn.Conv2d', 'nn.Conv2d', (['(ndf * 2)', '(ndf * 4)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ndf * 2, ndf * 4, 4, 2, 1, bias=False)\n', (1380, 1419), True, 'import torch.nn as nn\n'), ((1433, 1456), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(ndf * 4)'], {}), '(ndf * 4)\n', (1447, 1456), True, 'import torch.nn as nn\n'), ((1470, 1501), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (1482, 1501), True, 'import torch.nn as nn\n'), ((1557, 1605), 'torch.nn.Conv2d', 'nn.Conv2d', (['(ndf * 4)', '(ndf * 8)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ndf * 4, ndf * 8, 4, 2, 1, bias=False)\n', (1566, 1605), True, 'import torch.nn as nn\n'), ((1619, 1642), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(ndf * 8)'], {}), '(ndf * 8)\n', (1633, 1642), True, 'import torch.nn as nn\n'), ((1656, 1687), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (1668, 1687), True, 'import torch.nn as nn\n'), ((1743, 1791), 'torch.nn.Conv2d', 'nn.Conv2d', (['(ndf * 8)', '(ndf * 8)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ndf * 8, ndf * 8, 4, 2, 1, bias=False)\n', (1752, 1791), True, 'import torch.nn as nn\n'), ((1805, 1828), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(ndf * 8)'], {}), '(ndf * 8)\n', (1819, 1828), True, 'import torch.nn as nn\n'), ((1842, 1873), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (1854, 1873), True, 'import torch.nn as nn\n'), ((2218, 2275), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(ngf * 8)', '(ngf * 8)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ngf * 8, ngf * 8, 4, 2, 1, bias=False)\n', (2236, 2275), True, 'import torch.nn as nn\n'), ((2289, 2312), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(ngf * 8)'], {}), '(ngf * 8)\n', (2303, 2312), True, 'import torch.nn as nn\n'), ((2326, 2357), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (2338, 2357), True, 'import torch.nn as nn\n'), ((2413, 2470), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(ngf * 8)', '(ngf * 4)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ngf * 8, ngf * 4, 4, 2, 1, bias=False)\n', (2431, 2470), True, 'import torch.nn as nn\n'), ((2484, 2507), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(ngf * 4)'], {}), '(ngf * 4)\n', (2498, 2507), True, 'import torch.nn as nn\n'), ((2521, 2552), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (2533, 2552), True, 'import torch.nn as nn\n'), ((2608, 2665), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(ngf * 4)', '(ngf * 2)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ngf * 4, ngf * 2, 4, 2, 1, bias=False)\n', (2626, 2665), True, 'import torch.nn as nn\n'), ((2679, 2702), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(ngf * 2)'], {}), '(ngf * 2)\n', (2693, 2702), True, 'import torch.nn as nn\n'), ((2716, 2747), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (2728, 2747), True, 'import torch.nn as nn\n'), ((2805, 2858), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(ngf * 2)', 'ngf', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ngf * 2, ngf, 4, 2, 1, bias=False)\n', (2823, 2858), True, 'import torch.nn as nn\n'), ((2876, 2895), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['ngf'], {}), '(ngf)\n', (2890, 2895), True, 'import torch.nn as nn\n'), ((2909, 2940), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (2921, 2940), True, 'import torch.nn as nn\n'), ((2996, 3044), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['ngf', 'nc', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ngf, nc, 4, 2, 1, bias=False)\n', (3014, 3044), True, 'import torch.nn as nn\n'), ((3067, 3079), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (3077, 3079), True, 'import torch.nn as nn\n'), ((3179, 3227), 'torch.nn.Linear', 'nn.Linear', (['latent_variable_size', '(ngf * 8 * 2 * 2)'], {}), '(latent_variable_size, ngf * 8 * 2 * 2)\n', (3188, 3227), True, 'import torch.nn as nn\n'), ((3235, 3256), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (3242, 3256), True, 'import torch.nn as nn\n'), ((4765, 4793), 'torch.nn.Linear', 'nn.Linear', (['n_input', 'n_hidden'], {}), '(n_input, n_hidden)\n', (4774, 4793), True, 'import torch.nn as nn\n'), ((4827, 4848), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (4834, 4848), True, 'import torch.nn as nn\n'), ((4882, 4906), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['n_hidden'], {}), '(n_hidden)\n', (4896, 4906), True, 'import torch.nn as nn\n'), ((4940, 4969), 'torch.nn.Linear', 'nn.Linear', (['n_hidden', 'n_hidden'], {}), '(n_hidden, n_hidden)\n', (4949, 4969), True, 'import torch.nn as nn\n'), ((5003, 5027), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['n_hidden'], {}), '(n_hidden)\n', (5017, 5027), True, 'import torch.nn as nn\n'), ((5061, 5082), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (5068, 5082), True, 'import torch.nn as nn\n'), ((5116, 5145), 'torch.nn.Linear', 'nn.Linear', (['n_hidden', 'n_hidden'], {}), '(n_hidden, n_hidden)\n', (5125, 5145), True, 'import torch.nn as nn\n'), ((5179, 5203), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['n_hidden'], {}), '(n_hidden)\n', (5193, 5203), True, 'import torch.nn as nn\n'), ((5237, 5258), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (5244, 5258), True, 'import torch.nn as nn\n'), ((5292, 5321), 'torch.nn.Linear', 'nn.Linear', (['n_hidden', 'n_hidden'], {}), '(n_hidden, n_hidden)\n', (5301, 5321), True, 'import torch.nn as nn\n'), ((5355, 5379), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['n_hidden'], {}), '(n_hidden)\n', (5369, 5379), True, 'import torch.nn as nn\n'), ((5413, 5434), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (5420, 5434), True, 'import torch.nn as nn\n'), ((5468, 5497), 'torch.nn.Linear', 'nn.Linear', (['n_hidden', 'n_hidden'], {}), '(n_hidden, n_hidden)\n', (5477, 5497), True, 'import torch.nn as nn\n'), ((5658, 5681), 'torch.nn.Linear', 'nn.Linear', (['nz', 'n_hidden'], {}), '(nz, n_hidden)\n', (5667, 5681), True, 'import torch.nn as nn\n'), ((5720, 5741), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (5727, 5741), True, 'import torch.nn as nn\n'), ((5780, 5804), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['n_hidden'], {}), '(n_hidden)\n', (5794, 5804), True, 'import torch.nn as nn\n'), ((5843, 5872), 'torch.nn.Linear', 'nn.Linear', (['n_hidden', 'n_hidden'], {}), '(n_hidden, n_hidden)\n', (5852, 5872), True, 'import torch.nn as nn\n'), ((5911, 5935), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['n_hidden'], {}), '(n_hidden)\n', (5925, 5935), True, 'import torch.nn as nn\n'), ((5974, 5995), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (5981, 5995), True, 'import torch.nn as nn\n'), ((6034, 6063), 'torch.nn.Linear', 'nn.Linear', (['n_hidden', 'n_hidden'], {}), '(n_hidden, n_hidden)\n', (6043, 6063), True, 'import torch.nn as nn\n'), ((6102, 6126), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['n_hidden'], {}), '(n_hidden)\n', (6116, 6126), True, 'import torch.nn as nn\n'), ((6165, 6186), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (6172, 6186), True, 'import torch.nn as nn\n'), ((6225, 6254), 'torch.nn.Linear', 'nn.Linear', (['n_hidden', 'n_hidden'], {}), '(n_hidden, n_hidden)\n', (6234, 6254), True, 'import torch.nn as nn\n'), ((6293, 6317), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['n_hidden'], {}), '(n_hidden)\n', (6307, 6317), True, 'import torch.nn as nn\n'), ((6356, 6377), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (6363, 6377), True, 'import torch.nn as nn\n'), ((6416, 6444), 'torch.nn.Linear', 'nn.Linear', (['n_hidden', 'n_input'], {}), '(n_hidden, n_input)\n', (6425, 6444), True, 'import torch.nn as nn\n'), ((7608, 7636), 'torch.nn.Linear', 'nn.Linear', (['n_input', 'n_hidden'], {}), '(n_input, n_hidden)\n', (7617, 7636), True, 'import torch.nn as nn\n'), ((7670, 7691), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (7677, 7691), True, 'import torch.nn as nn\n'), ((7725, 7749), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['n_hidden'], {}), '(n_hidden)\n', (7739, 7749), True, 'import torch.nn as nn\n'), ((7783, 7812), 'torch.nn.Linear', 'nn.Linear', (['n_hidden', 'n_hidden'], {}), '(n_hidden, n_hidden)\n', (7792, 7812), True, 'import torch.nn as nn\n'), ((7846, 7870), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['n_hidden'], {}), '(n_hidden)\n', (7860, 7870), True, 'import torch.nn as nn\n'), ((7904, 7925), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (7911, 7925), True, 'import torch.nn as nn\n'), ((7959, 7988), 'torch.nn.Linear', 'nn.Linear', (['n_hidden', 'n_hidden'], {}), '(n_hidden, n_hidden)\n', (7968, 7988), True, 'import torch.nn as nn\n'), ((8022, 8046), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['n_hidden'], {}), '(n_hidden)\n', (8036, 8046), True, 'import torch.nn as nn\n'), ((8080, 8101), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (8087, 8101), True, 'import torch.nn as nn\n'), ((8135, 8164), 'torch.nn.Linear', 'nn.Linear', (['n_hidden', 'n_hidden'], {}), '(n_hidden, n_hidden)\n', (8144, 8164), True, 'import torch.nn as nn\n'), ((8198, 8222), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['n_hidden'], {}), '(n_hidden)\n', (8212, 8222), True, 'import torch.nn as nn\n'), ((8256, 8277), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (8263, 8277), True, 'import torch.nn as nn\n'), ((8311, 8334), 'torch.nn.Linear', 'nn.Linear', (['n_hidden', 'nz'], {}), '(n_hidden, nz)\n', (8320, 8334), True, 'import torch.nn as nn\n'), ((8408, 8431), 'torch.nn.Linear', 'nn.Linear', (['nz', 'n_hidden'], {}), '(nz, n_hidden)\n', (8417, 8431), True, 'import torch.nn as nn\n'), ((8470, 8491), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (8477, 8491), True, 'import torch.nn as nn\n'), ((8530, 8554), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['n_hidden'], {}), '(n_hidden)\n', (8544, 8554), True, 'import torch.nn as nn\n'), ((8593, 8622), 'torch.nn.Linear', 'nn.Linear', (['n_hidden', 'n_hidden'], {}), '(n_hidden, n_hidden)\n', (8602, 8622), True, 'import torch.nn as nn\n'), ((8661, 8685), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['n_hidden'], {}), '(n_hidden)\n', (8675, 8685), True, 'import torch.nn as nn\n'), ((8724, 8745), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (8731, 8745), True, 'import torch.nn as nn\n'), ((8784, 8813), 'torch.nn.Linear', 'nn.Linear', (['n_hidden', 'n_hidden'], {}), '(n_hidden, n_hidden)\n', (8793, 8813), True, 'import torch.nn as nn\n'), ((8852, 8876), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['n_hidden'], {}), '(n_hidden)\n', (8866, 8876), True, 'import torch.nn as nn\n'), ((8915, 8936), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (8922, 8936), True, 'import torch.nn as nn\n'), ((8975, 9004), 'torch.nn.Linear', 'nn.Linear', (['n_hidden', 'n_hidden'], {}), '(n_hidden, n_hidden)\n', (8984, 9004), True, 'import torch.nn as nn\n'), ((9043, 9067), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['n_hidden'], {}), '(n_hidden)\n', (9057, 9067), True, 'import torch.nn as nn\n'), ((9106, 9127), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (9113, 9127), True, 'import torch.nn as nn\n'), ((9166, 9194), 'torch.nn.Linear', 'nn.Linear', (['n_hidden', 'n_input'], {}), '(n_hidden, n_input)\n', (9175, 9194), True, 'import torch.nn as nn\n'), ((9668, 9691), 'torch.nn.Linear', 'nn.Linear', (['nz', 'n_hidden'], {}), '(nz, n_hidden)\n', (9677, 9691), True, 'import torch.nn as nn\n'), ((9705, 9726), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (9712, 9726), True, 'import torch.nn as nn\n'), ((9740, 9769), 'torch.nn.Linear', 'nn.Linear', (['n_hidden', 'n_hidden'], {}), '(n_hidden, n_hidden)\n', (9749, 9769), True, 'import torch.nn as nn\n'), ((9783, 9804), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (9790, 9804), True, 'import torch.nn as nn\n'), ((9978, 10007), 'torch.nn.Linear', 'nn.Linear', (['n_hidden', 'n_hidden'], {}), '(n_hidden, n_hidden)\n', (9987, 10007), True, 'import torch.nn as nn\n'), ((10021, 10042), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (10028, 10042), True, 'import torch.nn as nn\n'), ((10056, 10082), 'torch.nn.Linear', 'nn.Linear', (['n_hidden', 'n_out'], {}), '(n_hidden, n_out)\n', (10065, 10082), True, 'import torch.nn as nn\n'), ((10402, 10422), 'torch.nn.Linear', 'nn.Linear', (['nz', 'n_out'], {}), '(nz, n_out)\n', (10411, 10422), True, 'import torch.nn as nn\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Software: Pre-Processing-01
Description:
- Processa todos arquivos, dos diretórios neg_ e pos_ do dataset que contem os registros
já separados em arquivos .TXT;
- Converte todos para caracteres minusculos;
- Remove caracteres numéricos;
- Remove caracteres especiais;
- Remove stopwords usando uma lista de stopwords;
- Stemmer reduz a raiz das palavras;
- Remove palavras que contenha menos de 4 catacteres.
- Adicionar rótulos: "1" positivos ou "0" negativo para cada arquivo.
- Remove lixo gerado;
- Juntar todos os dados em um único arquivo. Sendo assim, um novo dataser é gerado com o
dados para treinamento contendo n linhas "documentos" e 2 colunas, sendo que na 1ª coluna
as palavras e na 2ª coluna o rótulo;
- Pega o dataset de treinamento gerado e com base nele gera um novo dataset para teste
"sem rótulos".
Register: 21/05/2018
Authors:
RodriguesFAS;
"coloquem nome de vocês :)"
Email:
<<EMAIL>>
"""
import re
import os
import csv
import sys
import nltk
reload(sys)
sys.setdefaultencoding('utf-8')
path_root = '/home/rodriguesfas/Workspace/k-nn/data/'
path_dataset = 'polarity-detection-200-reviews/'
path_neg = 'neg_100/'
path_pos = 'pos_100/'
path_out = 'out/'
file_stopwords = 'stopwords/stopwords.txt'
path_list_stopwords = path_root + file_stopwords
files_neg = path_root + path_dataset + path_neg
files_pos = path_root + path_dataset + path_pos
path_train = path_root + path_out + 'generated-polarity-train.csv'
path_test = path_root + path_out + 'generated-polarity-test.csv'
label_pos = '1'
label_neg = '0'
TEST = True
def LOG(text):
if TEST is True:
print(">> " + text)
def remove_numbers(document):
LOG('Remove numbers..')
return re.sub('[-|0-9]', ' ', document).strip()
def remove_special_characters(document):
LOG('Remove characters special..')
document = re.sub(r'[-_./?!,`":;=+()<>|@#$%&*^~\']', '', document)
document = "".join(document.splitlines())
document = ' '.join(document.split())
return document.strip()
def clean_document(document):
LOG('Cleaning content..')
document = document.replace('(', '').replace(')', '').replace("'", '').replace(
',', '').replace('\\n', '').replace('0', ', 0').replace('1', ', 1').replace('[', '').replace(']', '')
return document
def remove_very_small_words(document):
document = re.sub(r'\b\w{1,3}\b', '', document)
return document.strip()
def remove_stopwords(document):
LOG('Removing stopwords..')
list_stopwords = open(path_list_stopwords).read()
words_filtered = document[:]
words_filtered = [i for i in document.split() if not i in list_stopwords]
return (" ".join(words_filtered))
def stemmer(document):
LOG('Stemming..')
stemmer = nltk.stem.RSLPStemmer()
words = []
for word in document.split():
words.append(stemmer.stem(word))
return (" ".join(words))
def load_document(path, label):
dataset_train = open(path_train, 'a+')
for file in os.listdir(path):
LOG('Processing data file: ' + file)
if file.endswith('.txt'):
document = open(path + file).read().lower()
document = remove_numbers(document)
document = remove_special_characters(str(document))
document = remove_stopwords(document)
document = stemmer(document)
document = remove_very_small_words(str(document))
LOG('Labeling content..')
document = str(document), label
document = clean_document(str(document))
dataset_train.write(document + '\n')
dataset_train.close()
LOG('Dataset Train generated!')
def generater_test(path):
LOG('Generating dataset from test..')
dataset_test = open(path_test, 'a+')
with open(path) as documents:
for document in documents:
document = document.replace(', 0', '').replace(', 1', '')
dataset_test.write(document)
dataset_test.close()
LOG('Dataset Test generated!')
def main():
LOG('Started!')
LOG('Processing directory neg_ ...')
load_document(files_neg, label_neg)
LOG('Processing directory pos_ ...')
load_document(files_pos, label_pos)
generater_test(path_train)
LOG('Finalized!')
# Run
if __name__ == '__main__':
main()
|
[
"nltk.stem.RSLPStemmer",
"re.sub",
"os.listdir",
"sys.setdefaultencoding"
] |
[((1239, 1270), 'sys.setdefaultencoding', 'sys.setdefaultencoding', (['"""utf-8"""'], {}), "('utf-8')\n", (1261, 1270), False, 'import sys\n'), ((2086, 2142), 're.sub', 're.sub', (['"""[-_./?!,`":;=+()<>|@#$%&*^~\\\\\']"""', '""""""', 'document'], {}), '(\'[-_./?!,`":;=+()<>|@#$%&*^~\\\\\\\']\', \'\', document)\n', (2092, 2142), False, 'import re\n'), ((2592, 2630), 're.sub', 're.sub', (['"""\\\\b\\\\w{1,3}\\\\b"""', '""""""', 'document'], {}), "('\\\\b\\\\w{1,3}\\\\b', '', document)\n", (2598, 2630), False, 'import re\n'), ((2990, 3013), 'nltk.stem.RSLPStemmer', 'nltk.stem.RSLPStemmer', ([], {}), '()\n', (3011, 3013), False, 'import nltk\n'), ((3229, 3245), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (3239, 3245), False, 'import os\n'), ((1948, 1980), 're.sub', 're.sub', (['"""[-|0-9]"""', '""" """', 'document'], {}), "('[-|0-9]', ' ', document)\n", (1954, 1980), False, 'import re\n')]
|
import torch as th
from copy import deepcopy
def knowledge_transfer(net2: th.nn.Module, old_state_path: str):
print(f"Copied weights from {old_state_path}")
net1 = th.load(old_state_path)
old_state = net1.state_dict()
n_layers_old = net1.transformer.n_layers
n_head_old = net1.transformer.n_heads
dk_old = net1.transformer.d_model // net1.transformer.n_heads
dk_new = net2.transformer.d_model // net2.transformer.n_heads
new_state = net2.state_dict()
updated_state = deepcopy(new_state)
for k in new_state:
if k == "position_encoding" or k == "self_attn_mask":
continue
elif "self_attn_norm" in k.split(".") or "ffn_norm" in k.split("."):
continue
elif "attn_heads" in k.split("."):
updated_state[k] = th.randn_like(new_state[k]) * 0.001
weight_name = k.split(".")
layer_idx = int(weight_name[3])
if layer_idx < n_layers_old:
head_idx = int(weight_name[6])
lst = [
(i // dk_old, i % dk_old)
for i in (head_idx * dk_new, head_idx * dk_new + dk_new)
]
w = []
if lst[0][0] == lst[1][0]:
if not lst[0][0] < n_head_old:
continue
weight_name_old = weight_name.copy()
weight_name_old[6] = str(lst[0][0])
k_old = ".".join(weight_name_old)
w.append(old_state[k_old][lst[0][1] : lst[1][1], :])
else:
for prev_head_idx in range(lst[0][0], lst[1][0] + 1):
if not prev_head_idx < n_head_old:
continue
weight_name_old = weight_name.copy()
weight_name_old[6] = str(prev_head_idx)
k_old = ".".join(weight_name_old)
if prev_head_idx == lst[0][0]:
w_dash = old_state[k_old][lst[0][1] :, :]
# print(rng,w_dash.shape)
w.append(w_dash)
elif prev_head_idx == lst[1][0]:
w_dash = old_state[k_old][: lst[1][1], :]
# print(rng, w_dash.shape)
w.append(w_dash)
else:
w.append(old_state[k_old])
if w:
final_old_w = th.cat(w)
dice = [slice(dim) for dim in final_old_w.shape]
updated_state[k][dice] = final_old_w
else:
updated_state[k] = th.randn_like(new_state[k]) * 0.001
if k in old_state:
dice = [slice(dim) for dim in old_state[k].shape]
updated_state[k][dice] = old_state[k]
net2.load_state_dict(updated_state)
|
[
"copy.deepcopy",
"torch.load",
"torch.cat",
"torch.randn_like"
] |
[((174, 197), 'torch.load', 'th.load', (['old_state_path'], {}), '(old_state_path)\n', (181, 197), True, 'import torch as th\n'), ((507, 526), 'copy.deepcopy', 'deepcopy', (['new_state'], {}), '(new_state)\n', (515, 526), False, 'from copy import deepcopy\n'), ((806, 833), 'torch.randn_like', 'th.randn_like', (['new_state[k]'], {}), '(new_state[k])\n', (819, 833), True, 'import torch as th\n'), ((2734, 2761), 'torch.randn_like', 'th.randn_like', (['new_state[k]'], {}), '(new_state[k])\n', (2747, 2761), True, 'import torch as th\n'), ((2545, 2554), 'torch.cat', 'th.cat', (['w'], {}), '(w)\n', (2551, 2554), True, 'import torch as th\n')]
|
## This Python file uses the following encoding: utf-8
from PyQt5.QtWidgets import *
from mainwindow import MainWindow
from call_standard import StandardPageWindow
import sys
if __name__ == '__main__':
app = QApplication(sys.argv)
mainWindow = MainWindow()
mainWindow.show()
sys.exit(app.exec_())
|
[
"mainwindow.MainWindow"
] |
[((254, 266), 'mainwindow.MainWindow', 'MainWindow', ([], {}), '()\n', (264, 266), False, 'from mainwindow import MainWindow\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-21 06:31
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('wagtailcore', '0023_alter_page_revision_on_delete_behaviour'),
]
operations = [
migrations.CreateModel(
name='NavMenu',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('footer', models.BooleanField(default=False, help_text='Select to display this menu in the footer rather than in the nav bar.')),
('order', models.PositiveSmallIntegerField(default=1, help_text='The order that this menu appears. Lower numbers appear first.')),
('url', models.ForeignKey(blank=True, help_text='Internal path to specific page', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailcore.Page')),
],
options={
'ordering': ('footer', 'order'),
},
),
]
|
[
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.PositiveSmallIntegerField",
"django.db.models.BooleanField",
"django.db.models.AutoField"
] |
[((473, 566), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (489, 566), False, 'from django.db import migrations, models\n'), ((591, 623), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (607, 623), False, 'from django.db import migrations, models\n'), ((653, 775), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'help_text': '"""Select to display this menu in the footer rather than in the nav bar."""'}), "(default=False, help_text=\n 'Select to display this menu in the footer rather than in the nav bar.')\n", (672, 775), False, 'from django.db import migrations, models\n'), ((799, 922), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'default': '(1)', 'help_text': '"""The order that this menu appears. Lower numbers appear first."""'}), "(default=1, help_text=\n 'The order that this menu appears. Lower numbers appear first.')\n", (831, 922), False, 'from django.db import migrations, models\n'), ((944, 1123), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'help_text': '"""Internal path to specific page"""', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""+"""', 'to': '"""wagtailcore.Page"""'}), "(blank=True, help_text='Internal path to specific page',\n null=True, on_delete=django.db.models.deletion.CASCADE, related_name=\n '+', to='wagtailcore.Page')\n", (961, 1123), False, 'from django.db import migrations, models\n')]
|
import copy
import json
import logging
import time
import pytest
import test_utilities
from pytest_utilities import create_new_sifaddr
from pytest_utilities import generate_test_account, generate_minimal_test_account
from test_utilities import EthereumToSifchainTransferRequest
def test_ebrelayer_restart(
basic_transfer_request: EthereumToSifchainTransferRequest,
source_ethereum_address: str,
integration_dir,
):
basic_transfer_request.ethereum_address = source_ethereum_address
request, credentials = generate_minimal_test_account(
base_transfer_request=basic_transfer_request,
target_ceth_balance=10 ** 15
)
balance = test_utilities.get_sifchain_addr_balance(request.sifchain_address, request.sifnodecli_node, "ceth")
logging.info("restart ebrelayer normally, leaving the last block db in place")
test_utilities.get_shell_output(f"{integration_dir}/sifchain_start_ebrelayer.sh")
test_utilities.advance_n_ethereum_blocks(test_utilities.n_wait_blocks, request.smart_contracts_dir)
time.sleep(5)
assert balance == test_utilities.get_sifchain_addr_balance(request.sifchain_address, request.sifnodecli_node,
"ceth")
@pytest.mark.usefixtures("ensure_relayer_restart")
def test_ethereum_transactions_with_offline_relayer(
basic_transfer_request: EthereumToSifchainTransferRequest,
smart_contracts_dir,
source_ethereum_address,
bridgebank_address,
integration_dir,
):
logging.info("shut down ebrelayer")
test_utilities.get_shell_output(f"pkill -9 ebrelayer || true")
logging.info("prepare transactions to be sent while ebrelayer is offline")
amount = 9000
new_addresses = list(map(lambda x: create_new_sifaddr(), range(3)))
logging.debug(f"new_addresses: {new_addresses}")
request: EthereumToSifchainTransferRequest = copy.deepcopy(basic_transfer_request)
requests = list(map(lambda addr: {
"amount": amount,
"symbol": test_utilities.NULL_ADDRESS,
"sifchain_address": addr
}, new_addresses))
json_requests = json.dumps(requests)
logging.info("send ethereum transactions while ebrelayer is offline")
yarn_result = test_utilities.run_yarn_command(
" ".join([
f"yarn --cwd {smart_contracts_dir}",
"integrationtest:sendBulkLockTx",
f"--amount {amount}",
f"--symbol eth",
f"--json_path {request.solidity_json_path}",
f"--sifchain_address {new_addresses[0]}",
f"--transactions \'{json_requests}\'",
f"--ethereum_address {source_ethereum_address}",
f"--bridgebank_address {bridgebank_address}"
])
)
logging.info(f"bulk result: {yarn_result}")
logging.info("restart ebrelayer")
test_utilities.get_shell_output(f"{integration_dir}/sifchain_start_ebrelayer.sh")
test_utilities.advance_n_ethereum_blocks(test_utilities.n_wait_blocks, request.smart_contracts_dir)
for a in new_addresses:
test_utilities.wait_for_sif_account(a, basic_transfer_request.sifnodecli_node, 90)
test_utilities.wait_for_sifchain_addr_balance(a, "ceth", amount, basic_transfer_request.sifnodecli_node, 90)
@pytest.mark.usefixtures("ensure_relayer_restart")
def test_sifchain_transactions_with_offline_relayer(
basic_transfer_request: EthereumToSifchainTransferRequest,
rowan_source_integrationtest_env_credentials: test_utilities.SifchaincliCredentials,
rowan_source_integrationtest_env_transfer_request: EthereumToSifchainTransferRequest,
rowan_source,
smart_contracts_dir,
source_ethereum_address,
integration_dir,
):
basic_transfer_request.ethereum_address = source_ethereum_address
request, credentials = generate_test_account(
basic_transfer_request,
rowan_source_integrationtest_env_transfer_request,
rowan_source_integrationtest_env_credentials,
target_ceth_balance=10 ** 19,
target_rowan_balance=10 ** 19,
)
logging.info("shut down ebrelayer")
test_utilities.get_shell_output(f"pkill -9 ebrelayer || true")
logging.info("prepare transactions to be sent while ebrelayer is offline")
amount = 9000
new_eth_addrs = test_utilities.create_ethereum_addresses(
smart_contracts_dir,
basic_transfer_request.ethereum_network,
2
)
request.amount = amount
request.sifchain_symbol = "ceth"
request.ethereum_symbol = "eth"
for a in new_eth_addrs:
request.ethereum_address = a["address"]
sifchain_balance = test_utilities.get_sifchain_addr_balance(request.sifchain_address, request.sifnodecli_node,
"ceth")
logging.info(f"sifchain balance is {sifchain_balance}, request is {request}")
test_utilities.send_from_sifchain_to_ethereum(
transfer_request=request,
credentials=credentials
)
time.sleep(5)
logging.info("restart ebrelayer")
test_utilities.get_shell_output(f"{integration_dir}/sifchain_start_ebrelayer.sh")
test_utilities.advance_n_ethereum_blocks(test_utilities.n_wait_blocks, request.smart_contracts_dir)
for a in new_eth_addrs:
request.ethereum_address = a["address"]
test_utilities.wait_for_eth_balance(request, amount, 600)
|
[
"pytest_utilities.generate_minimal_test_account",
"copy.deepcopy",
"test_utilities.wait_for_sif_account",
"logging.debug",
"test_utilities.create_ethereum_addresses",
"test_utilities.wait_for_eth_balance",
"test_utilities.get_sifchain_addr_balance",
"test_utilities.advance_n_ethereum_blocks",
"json.dumps",
"time.sleep",
"logging.info",
"pytest_utilities.generate_test_account",
"test_utilities.wait_for_sifchain_addr_balance",
"pytest_utilities.create_new_sifaddr",
"test_utilities.get_shell_output",
"test_utilities.send_from_sifchain_to_ethereum",
"pytest.mark.usefixtures"
] |
[((1261, 1310), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""ensure_relayer_restart"""'], {}), "('ensure_relayer_restart')\n", (1284, 1310), False, 'import pytest\n'), ((3291, 3340), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""ensure_relayer_restart"""'], {}), "('ensure_relayer_restart')\n", (3314, 3340), False, 'import pytest\n'), ((540, 649), 'pytest_utilities.generate_minimal_test_account', 'generate_minimal_test_account', ([], {'base_transfer_request': 'basic_transfer_request', 'target_ceth_balance': '(10 ** 15)'}), '(base_transfer_request=basic_transfer_request,\n target_ceth_balance=10 ** 15)\n', (569, 649), False, 'from pytest_utilities import generate_test_account, generate_minimal_test_account\n'), ((682, 786), 'test_utilities.get_sifchain_addr_balance', 'test_utilities.get_sifchain_addr_balance', (['request.sifchain_address', 'request.sifnodecli_node', '"""ceth"""'], {}), "(request.sifchain_address, request.\n sifnodecli_node, 'ceth')\n", (722, 786), False, 'import test_utilities\n'), ((786, 864), 'logging.info', 'logging.info', (['"""restart ebrelayer normally, leaving the last block db in place"""'], {}), "('restart ebrelayer normally, leaving the last block db in place')\n", (798, 864), False, 'import logging\n'), ((869, 955), 'test_utilities.get_shell_output', 'test_utilities.get_shell_output', (['f"""{integration_dir}/sifchain_start_ebrelayer.sh"""'], {}), "(\n f'{integration_dir}/sifchain_start_ebrelayer.sh')\n", (900, 955), False, 'import test_utilities\n'), ((955, 1058), 'test_utilities.advance_n_ethereum_blocks', 'test_utilities.advance_n_ethereum_blocks', (['test_utilities.n_wait_blocks', 'request.smart_contracts_dir'], {}), '(test_utilities.n_wait_blocks,\n request.smart_contracts_dir)\n', (995, 1058), False, 'import test_utilities\n'), ((1059, 1072), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (1069, 1072), False, 'import time\n'), ((1553, 1588), 'logging.info', 'logging.info', (['"""shut down ebrelayer"""'], {}), "('shut down ebrelayer')\n", (1565, 1588), False, 'import logging\n'), ((1593, 1655), 'test_utilities.get_shell_output', 'test_utilities.get_shell_output', (['f"""pkill -9 ebrelayer || true"""'], {}), "(f'pkill -9 ebrelayer || true')\n", (1624, 1655), False, 'import test_utilities\n'), ((1661, 1735), 'logging.info', 'logging.info', (['"""prepare transactions to be sent while ebrelayer is offline"""'], {}), "('prepare transactions to be sent while ebrelayer is offline')\n", (1673, 1735), False, 'import logging\n'), ((1830, 1878), 'logging.debug', 'logging.debug', (['f"""new_addresses: {new_addresses}"""'], {}), "(f'new_addresses: {new_addresses}')\n", (1843, 1878), False, 'import logging\n'), ((1928, 1965), 'copy.deepcopy', 'copy.deepcopy', (['basic_transfer_request'], {}), '(basic_transfer_request)\n', (1941, 1965), False, 'import copy\n'), ((2154, 2174), 'json.dumps', 'json.dumps', (['requests'], {}), '(requests)\n', (2164, 2174), False, 'import json\n'), ((2180, 2249), 'logging.info', 'logging.info', (['"""send ethereum transactions while ebrelayer is offline"""'], {}), "('send ethereum transactions while ebrelayer is offline')\n", (2192, 2249), False, 'import logging\n'), ((2779, 2822), 'logging.info', 'logging.info', (['f"""bulk result: {yarn_result}"""'], {}), "(f'bulk result: {yarn_result}')\n", (2791, 2822), False, 'import logging\n'), ((2828, 2861), 'logging.info', 'logging.info', (['"""restart ebrelayer"""'], {}), "('restart ebrelayer')\n", (2840, 2861), False, 'import logging\n'), ((2866, 2952), 'test_utilities.get_shell_output', 'test_utilities.get_shell_output', (['f"""{integration_dir}/sifchain_start_ebrelayer.sh"""'], {}), "(\n f'{integration_dir}/sifchain_start_ebrelayer.sh')\n", (2897, 2952), False, 'import test_utilities\n'), ((2952, 3055), 'test_utilities.advance_n_ethereum_blocks', 'test_utilities.advance_n_ethereum_blocks', (['test_utilities.n_wait_blocks', 'request.smart_contracts_dir'], {}), '(test_utilities.n_wait_blocks,\n request.smart_contracts_dir)\n', (2992, 3055), False, 'import test_utilities\n'), ((3857, 4072), 'pytest_utilities.generate_test_account', 'generate_test_account', (['basic_transfer_request', 'rowan_source_integrationtest_env_transfer_request', 'rowan_source_integrationtest_env_credentials'], {'target_ceth_balance': '(10 ** 19)', 'target_rowan_balance': '(10 ** 19)'}), '(basic_transfer_request,\n rowan_source_integrationtest_env_transfer_request,\n rowan_source_integrationtest_env_credentials, target_ceth_balance=10 **\n 19, target_rowan_balance=10 ** 19)\n', (3878, 4072), False, 'from pytest_utilities import generate_test_account, generate_minimal_test_account\n'), ((4112, 4147), 'logging.info', 'logging.info', (['"""shut down ebrelayer"""'], {}), "('shut down ebrelayer')\n", (4124, 4147), False, 'import logging\n'), ((4152, 4214), 'test_utilities.get_shell_output', 'test_utilities.get_shell_output', (['f"""pkill -9 ebrelayer || true"""'], {}), "(f'pkill -9 ebrelayer || true')\n", (4183, 4214), False, 'import test_utilities\n'), ((4220, 4294), 'logging.info', 'logging.info', (['"""prepare transactions to be sent while ebrelayer is offline"""'], {}), "('prepare transactions to be sent while ebrelayer is offline')\n", (4232, 4294), False, 'import logging\n'), ((4334, 4443), 'test_utilities.create_ethereum_addresses', 'test_utilities.create_ethereum_addresses', (['smart_contracts_dir', 'basic_transfer_request.ethereum_network', '(2)'], {}), '(smart_contracts_dir,\n basic_transfer_request.ethereum_network, 2)\n', (4374, 4443), False, 'import test_utilities\n'), ((5096, 5129), 'logging.info', 'logging.info', (['"""restart ebrelayer"""'], {}), "('restart ebrelayer')\n", (5108, 5129), False, 'import logging\n'), ((5134, 5220), 'test_utilities.get_shell_output', 'test_utilities.get_shell_output', (['f"""{integration_dir}/sifchain_start_ebrelayer.sh"""'], {}), "(\n f'{integration_dir}/sifchain_start_ebrelayer.sh')\n", (5165, 5220), False, 'import test_utilities\n'), ((5220, 5323), 'test_utilities.advance_n_ethereum_blocks', 'test_utilities.advance_n_ethereum_blocks', (['test_utilities.n_wait_blocks', 'request.smart_contracts_dir'], {}), '(test_utilities.n_wait_blocks,\n request.smart_contracts_dir)\n', (5260, 5323), False, 'import test_utilities\n'), ((1095, 1199), 'test_utilities.get_sifchain_addr_balance', 'test_utilities.get_sifchain_addr_balance', (['request.sifchain_address', 'request.sifnodecli_node', '"""ceth"""'], {}), "(request.sifchain_address, request.\n sifnodecli_node, 'ceth')\n", (1135, 1199), False, 'import test_utilities\n'), ((3088, 3175), 'test_utilities.wait_for_sif_account', 'test_utilities.wait_for_sif_account', (['a', 'basic_transfer_request.sifnodecli_node', '(90)'], {}), '(a, basic_transfer_request.\n sifnodecli_node, 90)\n', (3123, 3175), False, 'import test_utilities\n'), ((3179, 3291), 'test_utilities.wait_for_sifchain_addr_balance', 'test_utilities.wait_for_sifchain_addr_balance', (['a', '"""ceth"""', 'amount', 'basic_transfer_request.sifnodecli_node', '(90)'], {}), "(a, 'ceth', amount,\n basic_transfer_request.sifnodecli_node, 90)\n", (3224, 3291), False, 'import test_utilities\n'), ((4676, 4780), 'test_utilities.get_sifchain_addr_balance', 'test_utilities.get_sifchain_addr_balance', (['request.sifchain_address', 'request.sifnodecli_node', '"""ceth"""'], {}), "(request.sifchain_address, request.\n sifnodecli_node, 'ceth')\n", (4716, 4780), False, 'import test_utilities\n'), ((4852, 4929), 'logging.info', 'logging.info', (['f"""sifchain balance is {sifchain_balance}, request is {request}"""'], {}), "(f'sifchain balance is {sifchain_balance}, request is {request}')\n", (4864, 4929), False, 'import logging\n'), ((4938, 5038), 'test_utilities.send_from_sifchain_to_ethereum', 'test_utilities.send_from_sifchain_to_ethereum', ([], {'transfer_request': 'request', 'credentials': 'credentials'}), '(transfer_request=request,\n credentials=credentials)\n', (4983, 5038), False, 'import test_utilities\n'), ((5077, 5090), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (5087, 5090), False, 'import time\n'), ((5404, 5461), 'test_utilities.wait_for_eth_balance', 'test_utilities.wait_for_eth_balance', (['request', 'amount', '(600)'], {}), '(request, amount, 600)\n', (5439, 5461), False, 'import test_utilities\n'), ((1793, 1813), 'pytest_utilities.create_new_sifaddr', 'create_new_sifaddr', ([], {}), '()\n', (1811, 1813), False, 'from pytest_utilities import create_new_sifaddr\n')]
|
import setuptools
import os, sys
sys.path.append(os.getcwd())
import mpdb
with open("README.md", "r") as fh:
long_description = fh.read()
try:
from Cython.Build import cythonize
cem = cythonize("mpdb/eq/*.pyx")
except:
cem = []
setuptools.setup(
author="<NAME>",
author_email="<EMAIL>",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Operating System :: POSIX :: Linux",
],
description="material property data base",
include_package_data=True,
packages=setuptools.find_packages(),
#packages = ['.'],
install_requires=[
'appdirs',
'tabulate',],
extras_require={
"libreOffice":['uno',],
"coolprop":['CoolProp',],
"yawsImport":["requests",],
"scipy": ["scipy",],
"cython": ["Cython",],
},
ext_modules=cem,
long_description=long_description,
long_description_content_type="text/markdown",
name="mpdb",
url="https://github.com/stuart-nolan/mpdb.git",
version=mpdb.__version__,
zip_safe = False
)
"""
https://acp.copernicus.org/articles/15/4399/2015/acp-15-4399-2015-supplement.zip
"""
|
[
"os.getcwd",
"Cython.Build.cythonize",
"setuptools.find_packages"
] |
[((49, 60), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (58, 60), False, 'import os, sys\n'), ((198, 224), 'Cython.Build.cythonize', 'cythonize', (['"""mpdb/eq/*.pyx"""'], {}), "('mpdb/eq/*.pyx')\n", (207, 224), False, 'from Cython.Build import cythonize\n'), ((573, 599), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (597, 599), False, 'import setuptools\n')]
|
#!/usr/bin/env python
# Parses the nl80211.h interface and generate appropriate enums and fields
# (value_string) for packet-netlink-nl80211.c
#
# Copyright (c) 2017, <NAME> <<EMAIL>>
# Copyright (c) 2018, <NAME> <<EMAIL>>
#
# Wireshark - Network traffic analyzer
# By Ger<NAME> <<EMAIL>>
# Copyright 1998 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
#
# To update the dissector source file, run this from the source directory:
#
# python tools/generate-nl80211-fields.py --update
#
import argparse
import re
import requests
import sys
# Begin of comment, followed by the actual array definition
HEADER = "/* Definitions from linux/nl80211.h {{{ */\n"
FOOTER = "/* }}} */\n"
# Enums to extract from the header file
EXPORT_ENUMS = {
# 'enum_name': ('field_name', field_type', 'field_blurb')
'nl80211_commands': ('Command', 'FT_UINT8', '"Generic Netlink Command"'),
'nl80211_attrs': (None, None, None),
'nl80211_iftype': (None, None, None),
'nl80211_sta_flags': (None, None, None),
'nl80211_rate_info': (None, None, None),
'nl80211_sta_bss_param': (None, None, None),
'nl80211_sta_info': (None, None, None),
'nl80211_tid_stats': (None, None, None),
'nl80211_mpath_info': (None, None, None),
'nl80211_mntr_flags': (None, None, None),
'nl80211_bss': (None, None, None),
'nl80211_key_attributes': (None, None, None),
'nl80211_survey_info': (None, None, None),
'nl80211_frequency_attr': (None, None, None),
'nl80211_tx_rate_attributes': (None, None, None),
'nl80211_attr_cqm': (None, None, None),
'nl80211_key_default_types': (None, None, None),
'nl80211_mesh_setup_params': (None, None, None),
'nl80211_meshconf_params': (None, None, None),
'nl80211_if_combination_attrs': (None, None, None),
'nl80211_rekey_data': (None, None, None),
'nl80211_sta_wme_attr': (None, None, None),
'nl80211_pmksa_candidate_attr': (None, None, None),
'nl80211_sched_scan_plan': (None, None, None),
'nl80211_bss_select_attr': (None, None, None),
'nl80211_nan_func_attributes': (None, None, None),
'nl80211_nan_match_attributes': (None, None, None),
'nl80211_txq_stats': (None, None, None),
'nl80211_band_attr': (None, None, None),
'nl80211_bitrate_attr': (None, None, None),
'nl80211_reg_rule_attr': (None, None, None),
'nl80211_txq_attr': (None, None, None),
'nl80211_band_iftype_attr': (None, None, None),
'nl80211_dfs_state': (None, None, None),
'nl80211_wmm_rule': (None, None, None),
'nl80211_txq_stats': (None, None, None),
'nl80211_sched_scan_match_attr': (None, None, None),
'nl80211_chan_width': ('Attribute Value', 'FT_UINT32', None),
'nl80211_channel_type': ('Attribute Value', 'FT_UINT32', None),
'plink_actions': ('Attribute Value', 'FT_UINT8', None),
'nl80211_reg_initiator': ('Attribute Value', 'FT_UINT8', None),
'nl80211_reg_type': ('Attribute Value', 'FT_UINT8', None),
'nl80211_auth_type': ('Attribute Value', 'FT_UINT32', None),
'nl80211_key_type': ('Attribute Value', 'FT_UINT32', None),
'nl80211_bss_status': ('Attribute Value', 'FT_UINT32', None),
'nl80211_bss_scan_width': ('Attribute Value', 'FT_UINT32', None),
'nl80211_mfp': ('Attribute Value', 'FT_UINT32', None),
'nl80211_ps_state': ('Attribute Value', 'FT_UINT32', None),
'nl80211_tx_power_setting': ('Attribute Value', 'FT_UINT32', None),
'nl80211_plink_state': ('Attribute Value', 'FT_UINT8', None),
'nl80211_tdls_operation': ('Attribute Value', 'FT_UINT8', None),
'nl80211_user_reg_hint_type': ('Attribute Value', 'FT_UINT32', None),
'nl80211_connect_failed_reason': ('Attribute Value', 'FT_UINT32', None),
'nl80211_mesh_power_mode': ('Attribute Value', 'FT_UINT32', None),
'nl80211_acl_policy': ('Attribute Value', 'FT_UINT32', None),
'nl80211_radar_event': ('Attribute Value', 'FT_UINT32', None),
'nl80211_crit_proto_id': ('Attribute Value', 'FT_UINT16', None),
'nl80211_smps_mode': ('Attribute Value', 'FT_UINT8', None),
'nl80211_sta_p2p_ps_status': ('Attribute Value', 'FT_UINT8', None),
'nl80211_timeout_reason': ('Attribute Value', 'FT_UINT32', None),
'nl80211_external_auth_action': ('Attribute Value', 'FT_UINT32', None),
'nl80211_dfs_regions': ('Attribute Value', 'FT_UINT8', None),
}
# File to be patched
SOURCE_FILE = "epan/dissectors/packet-netlink-nl80211.c"
# URL where the latest version can be found
URL = "https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/plain/include/uapi/linux/nl80211.h"
def make_enum(name, values, expressions, indent):
code = 'enum ws_%s {\n' % name
for value, expression in zip(values, expressions):
if expression and 'NL80211' in expression:
expression = 'WS_%s' % expression
if expression:
code += '%sWS_%s = %s,\n' % (indent, value, expression)
else:
code += '%sWS_%s,\n' % (indent, value)
code += '};\n'
return code
def make_value_string(name, values, indent,):
code = 'static const value_string ws_%s_vals[] = {\n' % name
align = 40
for value in values:
code += indent + ('{ WS_%s,' % value).ljust(align - 1) + ' '
code += '"%s" },\n' % value
code += '%s{ 0, NULL }\n' % indent
code += '};\n'
code += 'static value_string_ext ws_%s_vals_ext =' % name
code += ' VALUE_STRING_EXT_INIT(ws_%s_vals);\n' % name
return code
def make_hfi(name, indent):
(field_name, field_type, field_blurb) = EXPORT_ENUMS.get(name)
field_abbrev = name
# Fill in default values
if not field_name:
field_name = 'Attribute Type'
if not field_type:
field_type = 'FT_UINT16'
if not field_blurb:
field_blurb = 'NULL'
# Special treatment of already existing field names
rename_fields = {
'nl80211_attrs': 'nl80211_attr_type',
'nl80211_commands': 'nl80211_cmd'
}
if rename_fields.get(name):
field_abbrev = rename_fields[name]
field_abbrev = field_abbrev.lstrip('nl80211_')
code = 'static header_field_info hfi_%s NETLINK_NL80211_HFI_INIT =\n' % name
code += indent + '{ "%s", "nl80211.%s", %s, BASE_DEC | BASE_EXT_STRING,\n' % \
(field_name, field_abbrev, field_type)
code += indent + ' VALS_EXT_PTR(&ws_%s_vals_ext), 0x00, %s, HFILL };\n' % (name, field_blurb)
return code
def make_ett_defs(name, indent):
code = 'static gint ett_%s = -1;' % name
return code
def make_hfi_init(name, indent):
code = indent + indent + '&hfi_%s,' % name
return code
def make_ett(name, indent):
code = indent + indent + '&ett_%s,' % name
return code
class EnumStore(object):
__RE_ENUM_VALUE = re.compile(
r'\s+?(?P<value>\w+)(?:\ /\*.*?\*\/)?(?:\s*=\s*(?P<expression>.*?))?(?:\s*,|$)',
re.MULTILINE | re.DOTALL)
def __init__(self, name, values):
self.name = name
self.values = []
self.expressions = []
self.active = True
self.parse_values(values)
def parse_values(self, values):
for m in self.__RE_ENUM_VALUE.finditer(values):
value, expression = m.groups()
if value.startswith('NUM_'):
break
if value.endswith('_AFTER_LAST'):
break
if value.endswith('_LAST'):
break
if value.startswith('__') and value.endswith('_NUM'):
break
if expression and expression in self.values:
# Skip aliases
continue
self.values.append(value)
self.expressions.append(expression)
def finish(self):
return self.name, self.values, self.expressions
RE_ENUM = re.compile(
r'enum\s+?(?P<enum>\w+)\s+?\{(?P<values>.*?)\}\;',
re.MULTILINE | re.DOTALL)
RE_COMMENT = re.compile(r'/\*.*?\*/', re.MULTILINE | re.DOTALL)
def parse_header(content):
# Strip comments
content = re.sub(RE_COMMENT, '', content)
enums = []
for m in RE_ENUM.finditer(content):
enum = m.group('enum')
values = m.group('values')
if enum in EXPORT_ENUMS:
enums.append(EnumStore(enum, values).finish())
return enums
def parse_source():
"""
Reads the source file and tries to split it in the parts before, inside and
after the block.
"""
begin, block, end = '', '', ''
parts = []
# Stages: 1 (before block), 2 (in block, skip), 3 (after block)
stage = 1
with open(SOURCE_FILE) as f:
for line in f:
if line == FOOTER and stage == 2:
stage = 3 # End of block
if stage == 1:
begin += line
elif stage == 2:
block += line
elif stage == 3:
end += line
if line == HEADER and stage == 1:
stage = 2 # Begin of block
if line == HEADER and stage == 3:
stage = 2 # Begin of next code block
parts.append((begin, block, end))
begin, block, end = '', '', ''
parts.append((begin, block, end))
if stage != 3 or len(parts) != 3:
raise RuntimeError("Could not parse file (in stage %d) (parts %d)" % (stage, len(parts)))
return parts
parser = argparse.ArgumentParser()
parser.add_argument("--update", action="store_true",
help="Update %s as needed instead of writing to stdout" % SOURCE_FILE)
parser.add_argument("--indent", default=" " * 4,
help="indentation (use \\t for tabs, default 4 spaces)")
parser.add_argument("header_file", nargs="?", default=URL,
help="nl80211.h header file (use - for stdin or a HTTP(S) URL, "
"default %(default)s)")
def main():
args = parser.parse_args()
indent = args.indent.replace("\\t", "\t")
if any(args.header_file.startswith(proto) for proto in ('http:', 'https')):
r = requests.get(args.header_file)
r.raise_for_status()
enums = parse_header(r.text)
elif args.header_file == "-":
enums = parse_header(sys.stdin.read())
else:
with open(args.header_file) as f:
enums = parse_header(f.read())
assert len(enums) == len(EXPORT_ENUMS), \
"Could not parse data, found %d/%d results" % \
(len(enums), len(EXPORT_ENUMS))
code_enums, code_vals, code_hfi, code_ett_defs, code_hfi_init, code_ett = '', '', '', '', '', ''
for enum_name, enum_values, expressions in enums:
code_enums += make_enum(enum_name, enum_values, expressions, indent) + '\n'
code_vals += make_value_string(enum_name, enum_values, indent) + '\n'
code_hfi += make_hfi(enum_name, indent) + '\n'
code_ett_defs += make_ett_defs(enum_name, indent) + '\n'
code_hfi_init += make_hfi_init(enum_name, indent) + '\n'
code_ett += make_ett(enum_name, indent) + '\n'
code_top = code_enums + code_vals + code_hfi + code_ett_defs
code_top = code_top.rstrip("\n") + "\n"
code = [code_top, code_hfi_init, code_ett]
update = False
if args.update:
parts = parse_source()
# Check if file needs update
for (begin, old_code, end), new_code in zip(parts, code):
if old_code != new_code:
update = True
break
if not update:
print("File is up-to-date")
return
# Update file
with open(SOURCE_FILE, "w") as f:
for (begin, old_code, end), new_code in zip(parts, code):
f.write(begin)
f.write(new_code)
f.write(end)
print("Updated %s" % SOURCE_FILE)
else:
for new_code in code:
print(new_code)
if __name__ == '__main__':
main()
#
# Editor modelines - https://www.wireshark.org/tools/modelines.html
#
# Local variables:
# c-basic-offset: 4
# tab-width: 8
# indent-tabs-mode: nil
# End:
#
# vi: set shiftwidth=4 tabstop=8 expandtab:
# :indentSize=4:tabSize=8:noTabs=true:
#
|
[
"sys.stdin.read",
"argparse.ArgumentParser",
"requests.get",
"re.sub",
"re.compile"
] |
[((7737, 7834), 're.compile', 're.compile', (['"""enum\\\\s+?(?P<enum>\\\\w+)\\\\s+?\\\\{(?P<values>.*?)\\\\}\\\\;"""', '(re.MULTILINE | re.DOTALL)'], {}), "('enum\\\\s+?(?P<enum>\\\\w+)\\\\s+?\\\\{(?P<values>.*?)\\\\}\\\\;', re.\n MULTILINE | re.DOTALL)\n", (7747, 7834), False, 'import re\n'), ((7847, 7898), 're.compile', 're.compile', (['"""/\\\\*.*?\\\\*/"""', '(re.MULTILINE | re.DOTALL)'], {}), "('/\\\\*.*?\\\\*/', re.MULTILINE | re.DOTALL)\n", (7857, 7898), False, 'import re\n'), ((9304, 9329), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (9327, 9329), False, 'import argparse\n'), ((6715, 6850), 're.compile', 're.compile', (['"""\\\\s+?(?P<value>\\\\w+)(?:\\\\ /\\\\*.*?\\\\*\\\\/)?(?:\\\\s*=\\\\s*(?P<expression>.*?))?(?:\\\\s*,|$)"""', '(re.MULTILINE | re.DOTALL)'], {}), "(\n '\\\\s+?(?P<value>\\\\w+)(?:\\\\ /\\\\*.*?\\\\*\\\\/)?(?:\\\\s*=\\\\s*(?P<expression>.*?))?(?:\\\\s*,|$)'\n , re.MULTILINE | re.DOTALL)\n", (6725, 6850), False, 'import re\n'), ((7961, 7992), 're.sub', 're.sub', (['RE_COMMENT', '""""""', 'content'], {}), "(RE_COMMENT, '', content)\n", (7967, 7992), False, 'import re\n'), ((9929, 9959), 'requests.get', 'requests.get', (['args.header_file'], {}), '(args.header_file)\n', (9941, 9959), False, 'import requests\n'), ((10089, 10105), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (10103, 10105), False, 'import sys\n')]
|
# Generated by Django 3.0.6 on 2020-06-10 14:43
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('core', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='certificate',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='certificates', to=settings.AUTH_USER_MODEL),
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('slug', models.SlugField()),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='children', to='core.Category')),
],
options={
'verbose_name_plural': 'categories',
'unique_together': {('slug', 'parent')},
},
),
]
|
[
"django.db.migrations.swappable_dependency",
"django.db.models.ForeignKey",
"django.db.models.CharField",
"django.db.models.SlugField",
"django.db.models.AutoField"
] |
[((227, 284), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (258, 284), False, 'from django.db import migrations, models\n'), ((458, 593), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""certificates"""', 'to': 'settings.AUTH_USER_MODEL'}), "(null=True, on_delete=django.db.models.deletion.CASCADE,\n related_name='certificates', to=settings.AUTH_USER_MODEL)\n", (475, 593), False, 'from django.db import migrations, models\n'), ((707, 800), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (723, 800), False, 'from django.db import migrations, models\n'), ((824, 856), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (840, 856), False, 'from django.db import migrations, models\n'), ((884, 902), 'django.db.models.SlugField', 'models.SlugField', ([], {}), '()\n', (900, 902), False, 'from django.db import migrations, models\n'), ((932, 1068), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""children"""', 'to': '"""core.Category"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.SET_NULL, related_name='children', to='core.Category')\n", (949, 1068), False, 'from django.db import migrations, models\n')]
|
import bottle
from genetic import GA
ga = GA()
app = bottle.Bottle()
@app.get('/')
def home():
data = ga.get_data()
return bottle.template('home.html', **data)
@app.get('/mark-hit/<ident>')
def mark_hit(ident):
ga.mark_hit(ident)
if __name__ == '__main__':
app.run(debug=True)
|
[
"bottle.template",
"genetic.GA",
"bottle.Bottle"
] |
[((44, 48), 'genetic.GA', 'GA', ([], {}), '()\n', (46, 48), False, 'from genetic import GA\n'), ((55, 70), 'bottle.Bottle', 'bottle.Bottle', ([], {}), '()\n', (68, 70), False, 'import bottle\n'), ((135, 171), 'bottle.template', 'bottle.template', (['"""home.html"""'], {}), "('home.html', **data)\n", (150, 171), False, 'import bottle\n')]
|
from sympy import sympify, Symbol
x = Symbol('x') #Se comvierte la variable a simbolico
#Funcion principal del metodo
def metodo_nuevo(x_0, f, tol, iter_max):
if tol <= 0:
raise ValueError('Tolerancia no debe ser cero.') #Error al ingresar una tolerancia con las condiones no aptas
f = sympify(f)
k = 0
x_k = x_0
error = tol+1
D = []
A = []
# Cliclo de iteraciones segun tolerancia o maximo de iteraciones
while error > tol and k < iter_max:
x_k = calc_sgte(x_k, f)
error = calc_error(x_k, f)
D.append(k)
A.append(error)
k+=1
return [x_k, error, k, D, A]
#Calcula el error
def calc_error(x_k, f):
return abs(f.subs(x, x_k))
#Calcula la variable Z al ser una expresion tan grande
def calc_z(x_k, f):
f_k = f.subs(x, x_k)
den = f.subs(x, x_k + f_k) - f.subs(x, x_k - f_k)
z = x_k + (2* (f_k**2)/den)
return z
#Realiza el calculo de Xk+1
def calc_sgte(x_k, f):
f_k = f.subs(x, x_k)
z = calc_z(x_k, f)#Invoca la funcion para calcular el valor de Z
den = f.subs(x, x_k + f_k) - f.subs(x, x_k - f_k)
x_sgte = x_k - 2*(f_k)*(f.subs(x,z) - f_k)/den
return round(x_sgte,6) #Restinge la cantidad de decimales a 5
if __name__ == '__main__':
# Cada caso de prueba recibe: valor de X0, Ecuacion y numero de iteraciones maximas
# Test 1. Ejemplo (b)
result = metodo_nuevo(1.5, 'x^3+4*x^2-10', iter_max=7)
print(result)
# Test 2. Ejemplo (h)
result = metodo_nuevo(0.7, 'x^2-exp(x)-3*x+2', iter_max=7)
print(result)
# Test 3. Ejemplo (e)
result = metodo_nuevo(2, 'x^3-10', iter_max=7)
print(result)
# Test 4. Ejemplo (c)
result = metodo_nuevo(1, 'cos(x)-x', iter_max=6)
print(result)
|
[
"sympy.Symbol",
"sympy.sympify"
] |
[((39, 50), 'sympy.Symbol', 'Symbol', (['"""x"""'], {}), "('x')\n", (45, 50), False, 'from sympy import sympify, Symbol\n'), ((306, 316), 'sympy.sympify', 'sympify', (['f'], {}), '(f)\n', (313, 316), False, 'from sympy import sympify, Symbol\n')]
|
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.lines import Line2D
import matplotlib.ticker as ticker
def movingAverage(x, window):
ret = np.zeros_like(x)
for i in range(len(x)):
idx1 = max(0, i - (window - 1) // 2)
idx2 = min(len(x), i + (window - 1) // 2 + (2 - (window % 2)))
ret[i] = np.mean(x[idx1:idx2])
return ret
def computeAverage(x, window, idx):
min_idx = max(0, idx - window - 1)
return np.mean(x[min_idx:idx])
def plot(predict_values, gt):
fig, ax = plt.subplots()
ax.plot(np.arange(len(gt)), gt, label='ground truth')
ax.plot(np.arange(len(predict_values)), np.array(predict_values), label='predict')
start, end = ax.get_xlim()
ax.yaxis.set_ticks(np.arange(0, max(gt) +10, 5.0))
ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%0.1f'))
ax.legend(loc='upper left')
plt.xlabel('Frame num.')
plt.ylabel('Speed [mph]')
# ax.figure.savefig('result.png', bbox_inches='tight')
plt.show()
|
[
"numpy.zeros_like",
"matplotlib.pyplot.show",
"numpy.mean",
"numpy.array",
"matplotlib.ticker.FormatStrFormatter",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.subplots"
] |
[((168, 184), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (181, 184), True, 'import numpy as np\n'), ((458, 481), 'numpy.mean', 'np.mean', (['x[min_idx:idx]'], {}), '(x[min_idx:idx])\n', (465, 481), True, 'import numpy as np\n'), ((527, 541), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (539, 541), True, 'from matplotlib import pyplot as plt\n'), ((879, 903), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Frame num."""'], {}), "('Frame num.')\n", (889, 903), True, 'from matplotlib import pyplot as plt\n'), ((908, 933), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Speed [mph]"""'], {}), "('Speed [mph]')\n", (918, 933), True, 'from matplotlib import pyplot as plt\n'), ((997, 1007), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1005, 1007), True, 'from matplotlib import pyplot as plt\n'), ((334, 355), 'numpy.mean', 'np.mean', (['x[idx1:idx2]'], {}), '(x[idx1:idx2])\n', (341, 355), True, 'import numpy as np\n'), ((644, 668), 'numpy.array', 'np.array', (['predict_values'], {}), '(predict_values)\n', (652, 668), True, 'import numpy as np\n'), ((807, 841), 'matplotlib.ticker.FormatStrFormatter', 'ticker.FormatStrFormatter', (['"""%0.1f"""'], {}), "('%0.1f')\n", (832, 841), True, 'import matplotlib.ticker as ticker\n')]
|
#!/usr/bin/env python
# _*_ coding: utf-8 _*_
import os
import argparse
from pymatflow.qe.static import StaticRun
"""
usage:
"""
control = {}
system = {}
electrons = {}
bands = {}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--directory", type=str, default="tmp-qe-static",
help="Directory for the static running.")
parser.add_argument("-f", "--file", type=str,
help="The xyz file containing the structure to be simulated.")
parser.add_argument("--runopt", type=str, default="gen",
choices=["gen", "run", "genrun"],
help="Generate or run or both at the same time.")
parser.add_argument("--auto", type=int, default=3,
help="auto:0 nothing, 1: copying files to server, 2: copying and executing in remote server, 3: pymatflow used in server with direct submit, in order use auto=1, 2, you must make sure there is a working ~/.pymatflow/server_[pbs|yh].conf")
# -------------------------------------------------------------------
# scf related parameters
# -------------------------------------------------------------------
parser.add_argument("--ecutwfc", type=int, default=100,
help="Kinetic energy cutoff for wave functions in unit of Rydberg, default value: 100 Ry")
parser.add_argument("--ecutrho", type=int, default=None,
help="Kinetic energy cutoff for charge density and potential in unit of Rydberg, default value: None")
parser.add_argument("--kpoints-option", type=str, default="crystal_b",
choices=["automatic", "gamma", "crystal_b"],
help="Kpoints generation scheme option for band calculation")
parser.add_argument("--kpoints-mp", type=str, nargs="+",
default=[1, 1, 1, 0, 0, 0],
help="Monkhorst-Pack kpoint grid, in format like --kpoints-mp 1 1 1 0 0 0")
parser.add_argument("--crystal-b", type=str, nargs="+", default=None,
help="manual input kpath in crystal_b, like --crystal-b '0.000000 0.000000 0.000000 GAMMA 5' '0.500000 0.000000 0.000000 X 5' '0.0000 0.000 0.50000 A |' '0.5 0.5 0.5 R '")
parser.add_argument("--crystal-b-file", type=str, default='kpath-from-seekpath.txt',
help="manual input kpath in crystal_b read from the file")
parser.add_argument("--conv-thr", type=float, default=1.0e-6,
help="Convergence threshold for SCF calculation.")
parser.add_argument("--occupations", type=str, default="smearing",
choices=["smearing", "tetrahedra", "tetrahedra_lin", "tetrahedra_opt", "fixed", "from_input"],
help="Occupation method for the calculation.")
parser.add_argument("--smearing", type=str, default="gaussian",
choices=["gaussian", "methfessel-paxton", "marzari-vanderbilt", "fermi-dirac"],
help="Smearing type for occupations by smearing, default is gaussian in this script")
parser.add_argument("--degauss", type=float, default=0.001,
help="Value of the gaussian spreading (Ry) for brillouin-zone integration in metals.(defualt: 0.001 Ry)")
parser.add_argument("--vdw-corr", type=str, default="none",
choices=["dft-d", "dft-d3", "ts", "xdm"],
help="Type of Van der Waals correction in the calculation")
parser.add_argument("--nbnd", type=int, default=None,
help="Number of electronic states (bands) to be calculated")
# -----------------------------------------
# bands.x related parameters
# -----------------------------------------
parser.add_argument("--lsym", type=str, default=".true.",
choices=[".true.", ".false."],
help="set lsym variable in bands.x input.")
# -----------------------------------------------------------------
# run params
# -----------------------------------------------------------------
parser.add_argument("--mpi", type=str, default="",
help="MPI command: like 'mpirun -np 4'")
parser.add_argument("--server", type=str, default="pbs",
choices=["pbs", "yh"],
help="type of remote server, can be pbs or yh")
parser.add_argument("--jobname", type=str, default="qe-band-structure",
help="jobname on the pbs server")
parser.add_argument("--nodes", type=int, default=1,
help="Nodes used in server")
parser.add_argument("--ppn", type=int, default=32,
help="ppn of the server")
# ==========================================================
# transfer parameters from the arg parser to opt_run setting
# ==========================================================
args = parser.parse_args()
system["occupations"] = args.occupations
system["smearing"] = args.smearing
system["degauss"] = args.degauss
bands["lsym"] = args.lsym
# --------------------------------------------------------------
# process crystal_b
if args.crystal_b != None:
# crystal_b from script argument args.crystal_b
crystal_b = []
for kpoint in args.crystal_b:
if kpoint.split()[4] != "|":
crystal_b.append([
float(kpoint.split()[0]),
float(kpoint.split()[1]),
float(kpoint.split()[2]),
kpoint.split()[3].upper(),
int(kpoint.split()[4]),
])
elif kpoint.split()[4] == "|":
crystal_b.append([
float(kpoint.split()[0]),
float(kpoint.split()[1]),
float(kpoint.split()[2]),
kpoint.split()[3].upper(),
"|",
])
elif args.crystal_b == None:
# crystal_b read from file specified by args.crystal_b_file
# file is in format like this
"""
5
0.0 0.0 0.0 #GAMMA 15
x.x x.x x.x #XXX |
x.x x.x x.x #XXX 10
x.x x.x x.x #XXX 15
x.x x.x x.x #XXX 20
"""
# if there is a '|' behind the label it means the path is
# broken after that point!!!
crystal_b = []
with open(args.crystal_b_file, 'r') as fin:
crystal_b_file = fin.readlines()
nk = int(crystal_b_file[0])
for i in range(nk):
if crystal_b_file[i+1].split("\n")[0].split()[4] != "|":
crystal_b.append([
float(crystal_b_file[i+1].split()[0]),
float(crystal_b_file[i+1].split()[1]),
float(crystal_b_file[i+1].split()[2]),
crystal_b_file[i+1].split()[3].split("#")[1].upper(),
int(crystal_b_file[i+1].split()[4]),
])
elif crystal_b_file[i+1].split("\n")[0].split()[4] == "|":
crystal_b.append([
float(crystal_b_file[i+1].split()[0]),
float(crystal_b_file[i+1].split()[1]),
float(crystal_b_file[i+1].split()[2]),
crystal_b_file[i+1].split()[3].split("#")[1].upper(),
'|',
])
else:
pass
# --------------------------------------------------------------------
task = StaticRun()
task.get_xyz(args.file)
task.set_kpoints(kpoints_option=args.kpoints_option, kpoints_mp=args.kpoints_mp, crystal_b=crystal_b)
task.set_params(control=control, system=system, electrons=electrons)
task.set_bands(bands_input=bands)
task.set_run(mpi=args.mpi, server=args.server, jobname=args.jobname, nodes=args.nodes, ppn=args.ppn)
task.bands(directory=args.directory, runopt=args.runopt, auto=args.auto)
|
[
"pymatflow.qe.static.StaticRun",
"argparse.ArgumentParser"
] |
[((245, 270), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (268, 270), False, 'import argparse\n'), ((7538, 7549), 'pymatflow.qe.static.StaticRun', 'StaticRun', ([], {}), '()\n', (7547, 7549), False, 'from pymatflow.qe.static import StaticRun\n')]
|
import pytest
from hypothesis import assume, given
from pfun import compose, identity
from pfun.aio_trampoline import Done
from pfun.hypothesis_strategies import aio_trampolines, anything, unaries
from .monad_test import MonadTest
class TestTrampoline(MonadTest):
@pytest.mark.asyncio
@given(aio_trampolines(anything()))
async def test_right_identity_law(self, trampoline):
assert (await
trampoline.and_then(Done).run()) == (await trampoline.run())
@pytest.mark.asyncio
@given(anything(), unaries(aio_trampolines(anything())))
async def test_left_identity_law(self, value, f):
assert (await Done(value).and_then(f).run()) == (await f(value).run())
@pytest.mark.asyncio
@given(
aio_trampolines(anything()),
unaries(aio_trampolines(anything())),
unaries(aio_trampolines(anything()))
)
async def test_associativity_law(self, trampoline, f, g):
assert (await trampoline.and_then(f).and_then(g).run(
)) == (await trampoline.and_then(lambda x: f(x).and_then(g)).run())
@given(anything())
def test_equality(self, value):
assert Done(value) == Done(value)
@given(anything(), anything())
def test_inequality(self, first, second):
assume(first != second)
assert Done(first) != Done(second)
@pytest.mark.asyncio
@given(anything())
async def test_identity_law(self, value):
assert (await
Done(value).map(identity).run()) == (await Done(value).run())
@pytest.mark.asyncio
@given(unaries(anything()), unaries(anything()), anything())
async def test_composition_law(self, f, g, value):
h = compose(f, g)
assert (await Done(value).map(g).map(f).run()
) == (await Done(value).map(h).run())
|
[
"pfun.aio_trampoline.Done",
"pfun.compose",
"pfun.hypothesis_strategies.anything",
"hypothesis.assume"
] |
[((526, 536), 'pfun.hypothesis_strategies.anything', 'anything', ([], {}), '()\n', (534, 536), False, 'from pfun.hypothesis_strategies import aio_trampolines, anything, unaries\n'), ((1093, 1103), 'pfun.hypothesis_strategies.anything', 'anything', ([], {}), '()\n', (1101, 1103), False, 'from pfun.hypothesis_strategies import aio_trampolines, anything, unaries\n'), ((1273, 1296), 'hypothesis.assume', 'assume', (['(first != second)'], {}), '(first != second)\n', (1279, 1296), False, 'from hypothesis import assume, given\n'), ((1195, 1205), 'pfun.hypothesis_strategies.anything', 'anything', ([], {}), '()\n', (1203, 1205), False, 'from pfun.hypothesis_strategies import aio_trampolines, anything, unaries\n'), ((1207, 1217), 'pfun.hypothesis_strategies.anything', 'anything', ([], {}), '()\n', (1215, 1217), False, 'from pfun.hypothesis_strategies import aio_trampolines, anything, unaries\n'), ((1377, 1387), 'pfun.hypothesis_strategies.anything', 'anything', ([], {}), '()\n', (1385, 1387), False, 'from pfun.hypothesis_strategies import aio_trampolines, anything, unaries\n'), ((1693, 1706), 'pfun.compose', 'compose', (['f', 'g'], {}), '(f, g)\n', (1700, 1706), False, 'from pfun import compose, identity\n'), ((1614, 1624), 'pfun.hypothesis_strategies.anything', 'anything', ([], {}), '()\n', (1622, 1624), False, 'from pfun.hypothesis_strategies import aio_trampolines, anything, unaries\n'), ((320, 330), 'pfun.hypothesis_strategies.anything', 'anything', ([], {}), '()\n', (328, 330), False, 'from pfun.hypothesis_strategies import aio_trampolines, anything, unaries\n'), ((771, 781), 'pfun.hypothesis_strategies.anything', 'anything', ([], {}), '()\n', (779, 781), False, 'from pfun.hypothesis_strategies import aio_trampolines, anything, unaries\n'), ((1156, 1167), 'pfun.aio_trampoline.Done', 'Done', (['value'], {}), '(value)\n', (1160, 1167), False, 'from pfun.aio_trampoline import Done\n'), ((1171, 1182), 'pfun.aio_trampoline.Done', 'Done', (['value'], {}), '(value)\n', (1175, 1182), False, 'from pfun.aio_trampoline import Done\n'), ((1312, 1323), 'pfun.aio_trampoline.Done', 'Done', (['first'], {}), '(first)\n', (1316, 1323), False, 'from pfun.aio_trampoline import Done\n'), ((1327, 1339), 'pfun.aio_trampoline.Done', 'Done', (['second'], {}), '(second)\n', (1331, 1339), False, 'from pfun.aio_trampoline import Done\n'), ((1580, 1590), 'pfun.hypothesis_strategies.anything', 'anything', ([], {}), '()\n', (1588, 1590), False, 'from pfun.hypothesis_strategies import aio_trampolines, anything, unaries\n'), ((1601, 1611), 'pfun.hypothesis_strategies.anything', 'anything', ([], {}), '()\n', (1609, 1611), False, 'from pfun.hypothesis_strategies import aio_trampolines, anything, unaries\n'), ((562, 572), 'pfun.hypothesis_strategies.anything', 'anything', ([], {}), '()\n', (570, 572), False, 'from pfun.hypothesis_strategies import aio_trampolines, anything, unaries\n'), ((816, 826), 'pfun.hypothesis_strategies.anything', 'anything', ([], {}), '()\n', (824, 826), False, 'from pfun.hypothesis_strategies import aio_trampolines, anything, unaries\n'), ((862, 872), 'pfun.hypothesis_strategies.anything', 'anything', ([], {}), '()\n', (870, 872), False, 'from pfun.hypothesis_strategies import aio_trampolines, anything, unaries\n'), ((1516, 1527), 'pfun.aio_trampoline.Done', 'Done', (['value'], {}), '(value)\n', (1520, 1527), False, 'from pfun.aio_trampoline import Done\n'), ((652, 663), 'pfun.aio_trampoline.Done', 'Done', (['value'], {}), '(value)\n', (656, 663), False, 'from pfun.aio_trampoline import Done\n'), ((1473, 1484), 'pfun.aio_trampoline.Done', 'Done', (['value'], {}), '(value)\n', (1477, 1484), False, 'from pfun.aio_trampoline import Done\n'), ((1789, 1800), 'pfun.aio_trampoline.Done', 'Done', (['value'], {}), '(value)\n', (1793, 1800), False, 'from pfun.aio_trampoline import Done\n'), ((1729, 1740), 'pfun.aio_trampoline.Done', 'Done', (['value'], {}), '(value)\n', (1733, 1740), False, 'from pfun.aio_trampoline import Done\n')]
|
import os
from datetime import datetime
from distutils.util import strtobool
import discord
from discord.ext import commands
from dotenv import load_dotenv
load_dotenv()
LOCKDOWN_ACTIVE = bool(os.getenv('LOCKDOWN_ACTIVE'))
class Administration(commands.Cog):
@commands.command()
@commands.has_permissions(ban_members=True)
async def ban(self, ctx, member : discord.Member, *, reason=None):
action = 'ban'
await member.ban(reason=reason)
await ctx.send(f'{member} has been banned.')
await self.log_action(action)
@commands.command()
@commands.has_permissions(ban_members=True)
async def unban(self, ctx, member):
action = 'unban'
banned_users = await ctx.guid.bans()
member_name, member_discriminator = member.split('#')
for ban_entry in banned_users:
user = ban_entry.user
if(user.name, user.discriminator) == (member_name, member_discriminator):
await ctx.guild.unban(user)
await ctx.send(f'{user} has been unbanned.')
await self.log_action(action)
@commands.command()
@commands.has_permissions(kick_members=True)
async def kick(self, ctx, member : discord.Member, *, reason=None):
action = 'kick'
await member.kick(reason=reason)
await ctx.send(f'{member} has been kicked.')
await self.log_action(action)
@commands.command(aliases=['clean','cls','sweep','purge'])
@commands.has_permissions(manage_messages=True)
async def clear(self, ctx, amount=None):
action = 'clear'
amount = int(amount)
await ctx.channel.purge(limit=amount+1)
await self.log_action(action)
def log_action(self, action):
with open('.\Data\AdminLogs.txt', 'a') as file:
file.write(f'{action} occured at {datetime.now()}\n')
file.close()
@commands.command()
async def whois(self, ctx, member: discord.Member = None):
if not member:
member = ctx.message.author
roles = [role for role in member.roles]
embed = discord.Embed(colour=ctx.author.color, timestamp=ctx.message.created_at,
title=str(member))
embed.set_thumbnail(url=member.avatar_url)
embed.set_footer(text=f"Requested by {ctx.author}")
embed.add_field(name="Display Name:", value=member.display_name)
embed.add_field(name="ID:", value=member.id)
embed.add_field(name="Created Account On:", value=member.created_at.strftime("%a, %#d %B %Y, %I:%M %p UTC"))
embed.add_field(name="Joined Server On:", value=member.joined_at.strftime("%a, %#d %B %Y, %I:%M %p UTC"))
embed.add_field(name="Roles:", value="".join([role.mention for role in roles[1:]]))
embed.add_field(name="Highest Role:", value=member.top_role.mention)
await ctx.send(embed=embed)
#DEBUG WIP LISTENER UNCOMMENT AND SETUP ENVIRONMENT TO LOCKDOWN SERVER
# @commands.Cog.listener()
# async def on_member_join(self, member):
# if LOCKDOWN_ACTIVE == True:
# print(LOCKDOWN_ACTIVE)
# await member.send('Server is still a WIP please try again later')
# await member.kick(reason='Server is still a WIP please try again later')
# print('member kicked')
# else:
# print(f'member not kicked lockdown = {LOCKDOWN_ACTIVE}')
def setup(bot):
bot.add_cog(Administration(bot))
|
[
"discord.ext.commands.command",
"discord.ext.commands.has_permissions",
"dotenv.load_dotenv",
"datetime.datetime.now",
"os.getenv"
] |
[((158, 171), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (169, 171), False, 'from dotenv import load_dotenv\n'), ((196, 224), 'os.getenv', 'os.getenv', (['"""LOCKDOWN_ACTIVE"""'], {}), "('LOCKDOWN_ACTIVE')\n", (205, 224), False, 'import os\n'), ((274, 292), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (290, 292), False, 'from discord.ext import commands\n'), ((298, 340), 'discord.ext.commands.has_permissions', 'commands.has_permissions', ([], {'ban_members': '(True)'}), '(ban_members=True)\n', (322, 340), False, 'from discord.ext import commands\n'), ((572, 590), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (588, 590), False, 'from discord.ext import commands\n'), ((596, 638), 'discord.ext.commands.has_permissions', 'commands.has_permissions', ([], {'ban_members': '(True)'}), '(ban_members=True)\n', (620, 638), False, 'from discord.ext import commands\n'), ((1121, 1139), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (1137, 1139), False, 'from discord.ext import commands\n'), ((1145, 1188), 'discord.ext.commands.has_permissions', 'commands.has_permissions', ([], {'kick_members': '(True)'}), '(kick_members=True)\n', (1169, 1188), False, 'from discord.ext import commands\n'), ((1423, 1483), 'discord.ext.commands.command', 'commands.command', ([], {'aliases': "['clean', 'cls', 'sweep', 'purge']"}), "(aliases=['clean', 'cls', 'sweep', 'purge'])\n", (1439, 1483), False, 'from discord.ext import commands\n'), ((1486, 1532), 'discord.ext.commands.has_permissions', 'commands.has_permissions', ([], {'manage_messages': '(True)'}), '(manage_messages=True)\n', (1510, 1532), False, 'from discord.ext import commands\n'), ((1915, 1933), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (1931, 1933), False, 'from discord.ext import commands\n'), ((1855, 1869), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1867, 1869), False, 'from datetime import datetime\n')]
|
# SPDX-License-Identifier: Apache-2.0
"""
Tests scikit-normalizer converter.
"""
import unittest
import numpy
from sklearn.preprocessing import Normalizer
from skl2onnx import convert_sklearn
from skl2onnx.common.data_types import (
Int64TensorType, FloatTensorType, DoubleTensorType)
from test_utils import dump_data_and_model, TARGET_OPSET
class TestSklearnNormalizerConverter(unittest.TestCase):
def test_model_normalizer(self):
model = Normalizer(norm="l2")
model_onnx = convert_sklearn(
model, "scikit-learn normalizer",
[("input", Int64TensorType([None, 1]))],
target_opset=TARGET_OPSET)
self.assertTrue(model_onnx is not None)
self.assertTrue(len(model_onnx.graph.node) == 1)
def test_model_normalizer_blackop(self):
model = Normalizer(norm="l2")
model_onnx = convert_sklearn(
model, "scikit-learn normalizer",
[("input", FloatTensorType([None, 3]))],
target_opset=TARGET_OPSET,
black_op={"Normalizer"})
self.assertNotIn('op_type: "Normalizer', str(model_onnx))
dump_data_and_model(
numpy.array([[1, -1, 3], [3, 1, 2]], dtype=numpy.float32),
model, model_onnx,
basename="SklearnNormalizerL1BlackOp-SkipDim1")
def test_model_normalizer_float_l1(self):
model = Normalizer(norm="l1")
model_onnx = convert_sklearn(
model, "scikit-learn normalizer",
[("input", FloatTensorType([None, 3]))],
target_opset=TARGET_OPSET)
self.assertTrue(model_onnx is not None)
self.assertTrue(len(model_onnx.graph.node) == 1)
dump_data_and_model(
numpy.array([[1, -1, 3], [3, 1, 2]], dtype=numpy.float32),
model, model_onnx,
basename="SklearnNormalizerL1-SkipDim1")
def test_model_normalizer_float_l2(self):
model = Normalizer(norm="l2")
model_onnx = convert_sklearn(
model, "scikit-learn normalizer",
[("input", FloatTensorType([None, 3]))],
target_opset=TARGET_OPSET)
self.assertTrue(model_onnx is not None)
self.assertTrue(len(model_onnx.graph.node) == 1)
dump_data_and_model(
numpy.array([[1, -1, 3], [3, 1, 2]], dtype=numpy.float32),
model, model_onnx,
basename="SklearnNormalizerL2-SkipDim1")
def test_model_normalizer_double_l1(self):
model = Normalizer(norm="l1")
model_onnx = convert_sklearn(
model, "scikit-learn normalizer",
[("input", DoubleTensorType([None, 3]))],
target_opset=TARGET_OPSET)
self.assertTrue(model_onnx is not None)
dump_data_and_model(
numpy.array([[1, -1, 3], [3, 1, 2]], dtype=numpy.float64),
model, model_onnx,
basename="SklearnNormalizerL1Double-SkipDim1")
def test_model_normalizer_double_l2(self):
model = Normalizer(norm="l2")
model_onnx = convert_sklearn(
model, "scikit-learn normalizer",
[("input", DoubleTensorType([None, 3]))],
target_opset=TARGET_OPSET)
self.assertTrue(model_onnx is not None)
dump_data_and_model(
numpy.array([[1, -1, 3], [3, 1, 2]], dtype=numpy.float64),
model, model_onnx,
basename="SklearnNormalizerL2Double-SkipDim1")
def test_model_normalizer_float_noshape(self):
model = Normalizer(norm="l2")
model_onnx = convert_sklearn(
model, "scikit-learn normalizer",
[("input", FloatTensorType([]))],
target_opset=TARGET_OPSET)
self.assertTrue(model_onnx is not None)
self.assertTrue(len(model_onnx.graph.node) == 1)
dump_data_and_model(
numpy.array([[1, -1, 3], [3, 1, 2]], dtype=numpy.float32),
model, model_onnx,
basename="SklearnNormalizerL2NoShape-SkipDim1")
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"skl2onnx.common.data_types.DoubleTensorType",
"skl2onnx.common.data_types.Int64TensorType",
"skl2onnx.common.data_types.FloatTensorType",
"numpy.array",
"sklearn.preprocessing.Normalizer"
] |
[((4006, 4021), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4019, 4021), False, 'import unittest\n'), ((459, 480), 'sklearn.preprocessing.Normalizer', 'Normalizer', ([], {'norm': '"""l2"""'}), "(norm='l2')\n", (469, 480), False, 'from sklearn.preprocessing import Normalizer\n'), ((824, 845), 'sklearn.preprocessing.Normalizer', 'Normalizer', ([], {'norm': '"""l2"""'}), "(norm='l2')\n", (834, 845), False, 'from sklearn.preprocessing import Normalizer\n'), ((1379, 1400), 'sklearn.preprocessing.Normalizer', 'Normalizer', ([], {'norm': '"""l1"""'}), "(norm='l1')\n", (1389, 1400), False, 'from sklearn.preprocessing import Normalizer\n'), ((1929, 1950), 'sklearn.preprocessing.Normalizer', 'Normalizer', ([], {'norm': '"""l2"""'}), "(norm='l2')\n", (1939, 1950), False, 'from sklearn.preprocessing import Normalizer\n'), ((2480, 2501), 'sklearn.preprocessing.Normalizer', 'Normalizer', ([], {'norm': '"""l1"""'}), "(norm='l1')\n", (2490, 2501), False, 'from sklearn.preprocessing import Normalizer\n'), ((2981, 3002), 'sklearn.preprocessing.Normalizer', 'Normalizer', ([], {'norm': '"""l2"""'}), "(norm='l2')\n", (2991, 3002), False, 'from sklearn.preprocessing import Normalizer\n'), ((3486, 3507), 'sklearn.preprocessing.Normalizer', 'Normalizer', ([], {'norm': '"""l2"""'}), "(norm='l2')\n", (3496, 3507), False, 'from sklearn.preprocessing import Normalizer\n'), ((1166, 1223), 'numpy.array', 'numpy.array', (['[[1, -1, 3], [3, 1, 2]]'], {'dtype': 'numpy.float32'}), '([[1, -1, 3], [3, 1, 2]], dtype=numpy.float32)\n', (1177, 1223), False, 'import numpy\n'), ((1723, 1780), 'numpy.array', 'numpy.array', (['[[1, -1, 3], [3, 1, 2]]'], {'dtype': 'numpy.float32'}), '([[1, -1, 3], [3, 1, 2]], dtype=numpy.float32)\n', (1734, 1780), False, 'import numpy\n'), ((2273, 2330), 'numpy.array', 'numpy.array', (['[[1, -1, 3], [3, 1, 2]]'], {'dtype': 'numpy.float32'}), '([[1, -1, 3], [3, 1, 2]], dtype=numpy.float32)\n', (2284, 2330), False, 'import numpy\n'), ((2768, 2825), 'numpy.array', 'numpy.array', (['[[1, -1, 3], [3, 1, 2]]'], {'dtype': 'numpy.float64'}), '([[1, -1, 3], [3, 1, 2]], dtype=numpy.float64)\n', (2779, 2825), False, 'import numpy\n'), ((3269, 3326), 'numpy.array', 'numpy.array', (['[[1, -1, 3], [3, 1, 2]]'], {'dtype': 'numpy.float64'}), '([[1, -1, 3], [3, 1, 2]], dtype=numpy.float64)\n', (3280, 3326), False, 'import numpy\n'), ((3823, 3880), 'numpy.array', 'numpy.array', (['[[1, -1, 3], [3, 1, 2]]'], {'dtype': 'numpy.float32'}), '([[1, -1, 3], [3, 1, 2]], dtype=numpy.float32)\n', (3834, 3880), False, 'import numpy\n'), ((588, 614), 'skl2onnx.common.data_types.Int64TensorType', 'Int64TensorType', (['[None, 1]'], {}), '([None, 1])\n', (603, 614), False, 'from skl2onnx.common.data_types import Int64TensorType, FloatTensorType, DoubleTensorType\n'), ((953, 979), 'skl2onnx.common.data_types.FloatTensorType', 'FloatTensorType', (['[None, 3]'], {}), '([None, 3])\n', (968, 979), False, 'from skl2onnx.common.data_types import Int64TensorType, FloatTensorType, DoubleTensorType\n'), ((1508, 1534), 'skl2onnx.common.data_types.FloatTensorType', 'FloatTensorType', (['[None, 3]'], {}), '([None, 3])\n', (1523, 1534), False, 'from skl2onnx.common.data_types import Int64TensorType, FloatTensorType, DoubleTensorType\n'), ((2058, 2084), 'skl2onnx.common.data_types.FloatTensorType', 'FloatTensorType', (['[None, 3]'], {}), '([None, 3])\n', (2073, 2084), False, 'from skl2onnx.common.data_types import Int64TensorType, FloatTensorType, DoubleTensorType\n'), ((2609, 2636), 'skl2onnx.common.data_types.DoubleTensorType', 'DoubleTensorType', (['[None, 3]'], {}), '([None, 3])\n', (2625, 2636), False, 'from skl2onnx.common.data_types import Int64TensorType, FloatTensorType, DoubleTensorType\n'), ((3110, 3137), 'skl2onnx.common.data_types.DoubleTensorType', 'DoubleTensorType', (['[None, 3]'], {}), '([None, 3])\n', (3126, 3137), False, 'from skl2onnx.common.data_types import Int64TensorType, FloatTensorType, DoubleTensorType\n'), ((3615, 3634), 'skl2onnx.common.data_types.FloatTensorType', 'FloatTensorType', (['[]'], {}), '([])\n', (3630, 3634), False, 'from skl2onnx.common.data_types import Int64TensorType, FloatTensorType, DoubleTensorType\n')]
|
import random as r
def solve(board_size):
"""Solves the n-queens problem given a board size n
Args:
board_size (int): The size of the n*n board containing n queens.
Returns:
board (int[]): A list where each element corresponds to the row of a queen. It is 1-based.
"""
# Declares the max steps for the minConflicts algorithm.
maxSteps = 20
# Loop until a solution is found.
while True:
# the board is initialized and returned along with a list of conflicting queen positions
board, conflictList = initializeBoard(board_size)
# The range here is the max_steps from the min-conflicts algorithm shown in the PDF.
for i in range(maxSteps):
# Checks to see if the current board is a solution.
if solution(board, board_size):
# if the current board is a solution, then it is returned
return board
# If the current board is not a solution, then a random conflicting Queen is chosen.
var = r.choice(conflictList)
''' The minConflicts algorithm runs and returns the new board. It also takes in var as a parameters and
returns a boolean (conflicting) that determines whether or not var still has any conflicts.'''
board, conflicting = minConflicts(board, board_size, var)
# if var does not have any existing conflicts...
if not conflicting:
#...then clearly var should be removed from the conflict list
conflictList.remove(var)
def initializeBoard(boardSize):
"""Initializes the representation of the chess board.
Args:
boardSize (int): The size of the n*n board
Returns:
board (int[]): The representation of the chess board
conflictList (int[(int, int)]): A list of tuples of type (int, int),
this gets passed on to the minConflicts() function.
"""
# board list is initialized
board = []
# The list of Queens that conflict with each other.
conflictList = []
integerList = list(range(1, boardSize + 1))
integerList2 = list(range(boardSize))
# variable to represent half the size of the board (if the size is odd, it takes the floor as it should)
halfSize = int(boardSize / 2)
"""
The general idea is reducing the problem to a knight's problem. Two knights could take over each other on a 3*2 or
2*3 board on the corner, if we switch the knights to queens, it will show that the queens are not conflicting with
each other on row/column/diagonal. The purpose of this part of algorithm is to repeat this process until the board
has enough queens. The situation will change according to the size of the board, each branch of the if statement
shows a different case.
"""
if boardSize % 6 == 2:
board = [0] * (boardSize)
for i in range(1, halfSize + 1):
index1 = (2 * (i - 1) + halfSize - 1) % boardSize
index2 = boardSize - (index1 + 1)
board[index1] = i
board[index2] = boardSize + 1 - i
elif (boardSize - 1) % 6 == 2:
board = [0] * (boardSize)
for i in range(1, halfSize + 1):
index1 = (2 * (i - 1) + halfSize - 1) % (boardSize - 1)
index2 = boardSize - (index1 + 2)
board[index1] = i
board[index2] = boardSize - i
board[boardSize - 1] = boardSize
else:
for i in range(1, halfSize + 1):
board.append(halfSize + i)
board.append(i)
if boardSize % 2 == 1:
board.append(boardSize)
"""
Randomly picks x Queens to shuffle, creating conflicts.
This shows that our algorithm works, and it works well.
The higher the value of x, the more our algorithm has to work.
We decided to let x = 8 in honour of "the eight queens problem"
"""
for i in range(8):
randomInt = r.choice(integerList)
randomIndex = r.choice(integerList2)
board[randomIndex] = randomInt
conflictList.append((randomInt, randomIndex))
# the board and conflict list are returned
return board, conflictList
def minConflicts(board, boardSize, var):
"""Checks to see if a Queen is conflicting with any other Queens on the board.
Args:
board (int[]) : The representation of our chess board.
boardSize (int) : The size of the n*n chess board.
var ((int,int)) : An element of the conflictList list that initializeBoard() returns.
Returns:
board (int[]) : The representation of our chess board.
conflicting (bool) : Whether the Queen is conflicting with another piece.
"""
# we start out by assuming that the queen in question has conflicts
conflicting = True
# Initializes new lists for conflict detection.
counterRow = [0] * (boardSize + 1)
counterDiagonal1 = [0] * (2 * boardSize + 1)
counterDiagonal2 = [0] * (2 * boardSize + 1)
# The number of conflicts on rows/diagonals are counted.
for i in range(boardSize):
counterRow[board[i]] += 1
counterDiagonal1[board[i] - i + boardSize] += 1
counterDiagonal2[board[i] + i] += 1
# variable initializations
minimalConflictor = boardSize
minimalRow = 0
# Loops through the board to see which queen has the least number of conflicts and what the corresponding row is.
for i in range(1, boardSize + 1):
currentConflictor = counterRow[i]
currentConflictor += counterDiagonal1[i - var[1] + boardSize]
currentConflictor += counterDiagonal2[i + var[1]]
if (currentConflictor < minimalConflictor):
minimalConflictor = currentConflictor
minimalRow = i
# Moves the Queen to the row with minimal conflicts.
board[var[1]] = minimalRow
# Checks to see if there is still a conflict after the move...
if minimalConflictor == 0:
# and if there is not, then that means this queen has no conflicts
conflicting = False
#the board and conflicting boolean are each returned
return board, conflicting
def solution(board, boardSize):
"""Checks to see if the board is a solution.
Args:
board (int[]) : The representation of the chess board.
boardSize (int) : The size of the n*n chess board.
"""
# If there is no board, no solution.
if not board:
return False
"""
The set() function removes duplicates (turns a list into a set).
For a board to be a solution, there needs to be exactly one Queen on every column.
So if the length of the board is the same as the length of the set of the board, that means all elements are unique.
And if all elements are unique, then there must be exactly one Queen on every row.
"""
if len(board) != len(set(board)):
return False
# list declarations
diagonal1 = []
diagonal2 = []
# The hills & dales of each Queen are calculated.
for i in range(0, boardSize):
diagonal1.append(board[i] + i)
diagonal2.append(board[i] - i)
# The diagonals are checked the same way that the rows are checked
if len(diagonal1) != len(set(diagonal1)) or len(diagonal2) != len(set(diagonal2)):
return False
# The solution works if it passed all the previous requirements
return True
|
[
"random.choice"
] |
[((4028, 4049), 'random.choice', 'r.choice', (['integerList'], {}), '(integerList)\n', (4036, 4049), True, 'import random as r\n'), ((4072, 4094), 'random.choice', 'r.choice', (['integerList2'], {}), '(integerList2)\n', (4080, 4094), True, 'import random as r\n'), ((1063, 1085), 'random.choice', 'r.choice', (['conflictList'], {}), '(conflictList)\n', (1071, 1085), True, 'import random as r\n')]
|
import numpy as np
from flask import Flask, session,abort,request, jsonify, render_template,redirect,url_for,flash
import pickle
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from keras.models import load_model
import os
import stripe
import datetime
import keras
from keras import optimizers
from keras.utils import to_categorical
from keras.models import Model
from keras.layers import Input, Dense
from keras.layers.normalization import BatchNormalization
from keras.layers.core import Dropout, Activation
app = Flask(__name__)
@app.route('/')
def home():
return render_template('index.html')
@app.route('/heartAttack',methods=['POST'])
def heartAttack():
model = load_model('models/heart_disease_model.h5')
int_features = [[int(x) for x in request.form.values()]]
final_features = [np.array(int_features)]
prediction_proba = model.predict(final_features)
prediction = (prediction_proba > 0.5)
return render_template('index.html', prediction_text='THANK YOU FOR YOUR PURCHASE, \n FOR THE DATA YOU ENTERED \n IT IS PREDICTED {} \n THAT THE PATIENT WILL HAVE A STROKE WITHIN \n THE NEXT 10 YEARS.'.format(prediction))
if __name__ == "__main__":
app.run(debug=True, port=8080) #debug=True,host="0.0.0.0",port=50000
|
[
"keras.models.load_model",
"flask.request.form.values",
"flask.Flask",
"numpy.array",
"flask.render_template"
] |
[((610, 625), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (615, 625), False, 'from flask import Flask, session, abort, request, jsonify, render_template, redirect, url_for, flash\n'), ((670, 699), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (685, 699), False, 'from flask import Flask, session, abort, request, jsonify, render_template, redirect, url_for, flash\n'), ((780, 823), 'keras.models.load_model', 'load_model', (['"""models/heart_disease_model.h5"""'], {}), "('models/heart_disease_model.h5')\n", (790, 823), False, 'from keras.models import load_model\n'), ((909, 931), 'numpy.array', 'np.array', (['int_features'], {}), '(int_features)\n', (917, 931), True, 'import numpy as np\n'), ((862, 883), 'flask.request.form.values', 'request.form.values', ([], {}), '()\n', (881, 883), False, 'from flask import Flask, session, abort, request, jsonify, render_template, redirect, url_for, flash\n')]
|
from numbers import Real
from typing import Union
from hypothesis import given
from symba.base import Expression
from tests.utils import equivalence
from . import strategies
@given(strategies.expressions, strategies.non_zero_reals_or_expressions)
def test_basic(expression: Expression,
real_or_expression: Union[Real, Expression]) -> None:
result = expression / real_or_expression
assert isinstance(result, Expression)
@given(strategies.finite_non_zero_expressions)
def test_self_inverse(expression: Expression) -> None:
assert expression / expression == 1
@given(strategies.finite_non_zero_expressions,
strategies.finite_non_zero_expressions)
def test_commutative_case(first: Expression, second: Expression) -> None:
assert equivalence(first / second == second / first,
abs(first) == abs(second))
@given(strategies.definite_expressions, strategies.unary_reals_or_expressions)
def test_right_neutral_element(expression: Expression,
real_or_expression: Union[Real, Expression]
) -> None:
assert expression / real_or_expression == expression
@given(strategies.finite_expressions, strategies.finite_expressions,
strategies.definite_non_zero_reals_or_expressions)
def test_add_dividend(first: Expression,
second: Expression,
real_or_expression: Expression) -> None:
result = (first + second) / real_or_expression
assert result == ((first / real_or_expression)
+ (second / real_or_expression))
@given(strategies.finite_expressions, strategies.finite_expressions,
strategies.definite_non_zero_reals_or_expressions)
def test_sub_dividend(first: Expression,
second: Expression,
real_or_expression: Expression) -> None:
result = (first - second) / real_or_expression
assert result == ((first / real_or_expression)
- (second / real_or_expression))
@given(strategies.finite_expressions,
strategies.definite_non_zero_reals_or_expressions,
strategies.definite_non_zero_reals_or_expressions)
def test_mul_divisor(expression: Expression,
first_real_or_expression: Union[Real, Expression],
second_real_or_expression: Union[Real, Expression]
) -> None:
result = expression / (first_real_or_expression
* second_real_or_expression)
assert result == ((expression / first_real_or_expression)
/ second_real_or_expression)
|
[
"hypothesis.given"
] |
[((179, 250), 'hypothesis.given', 'given', (['strategies.expressions', 'strategies.non_zero_reals_or_expressions'], {}), '(strategies.expressions, strategies.non_zero_reals_or_expressions)\n', (184, 250), False, 'from hypothesis import given\n'), ((450, 495), 'hypothesis.given', 'given', (['strategies.finite_non_zero_expressions'], {}), '(strategies.finite_non_zero_expressions)\n', (455, 495), False, 'from hypothesis import given\n'), ((594, 684), 'hypothesis.given', 'given', (['strategies.finite_non_zero_expressions', 'strategies.finite_non_zero_expressions'], {}), '(strategies.finite_non_zero_expressions, strategies.\n finite_non_zero_expressions)\n', (599, 684), False, 'from hypothesis import given\n'), ((871, 948), 'hypothesis.given', 'given', (['strategies.definite_expressions', 'strategies.unary_reals_or_expressions'], {}), '(strategies.definite_expressions, strategies.unary_reals_or_expressions)\n', (876, 948), False, 'from hypothesis import given\n'), ((1181, 1303), 'hypothesis.given', 'given', (['strategies.finite_expressions', 'strategies.finite_expressions', 'strategies.definite_non_zero_reals_or_expressions'], {}), '(strategies.finite_expressions, strategies.finite_expressions,\n strategies.definite_non_zero_reals_or_expressions)\n', (1186, 1303), False, 'from hypothesis import given\n'), ((1614, 1736), 'hypothesis.given', 'given', (['strategies.finite_expressions', 'strategies.finite_expressions', 'strategies.definite_non_zero_reals_or_expressions'], {}), '(strategies.finite_expressions, strategies.finite_expressions,\n strategies.definite_non_zero_reals_or_expressions)\n', (1619, 1736), False, 'from hypothesis import given\n'), ((2047, 2195), 'hypothesis.given', 'given', (['strategies.finite_expressions', 'strategies.definite_non_zero_reals_or_expressions', 'strategies.definite_non_zero_reals_or_expressions'], {}), '(strategies.finite_expressions, strategies.\n definite_non_zero_reals_or_expressions, strategies.\n definite_non_zero_reals_or_expressions)\n', (2052, 2195), False, 'from hypothesis import given\n')]
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
df = pd.read_csv('/home/jordibisbal/WS18-MSc-JordiBisbalAnsaldo--NetworkSlicing/evaluation/experiments/1/forks/forks_pow.csv')
x = np.arange(0.0, 100, 1)
data = df[['T1', 'T2','T3','T4', 'T5','T6','T7', 'T8','T9','T10', 'T11','T12','T13', 'T14','T15','T16', 'T17','T18','T19', 'T20','T21','T21', 'T22','T23','T24', 'T25','T26','T27', 'T28','T29','T30']]
fig, ax = plt.subplots(figsize=(8,5))
ax.errorbar(x, np.log10(data.mean(axis=1)), yerr=np.log10(data.std(axis=1)*1.96/np.sqrt(30)) , fmt='.')
plt.xlabel('# blocks', fontsize=16)
plt.ylabel('log (average # forks ' + '$f_b$)', fontsize=16)
plt.grid(linestyle=':',linewidth=1.5)
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.tick_params(axis='both', which='major', labelsize=16)
ax.legend(loc=1,prop={'size': 16})
ax.set_xlim(xmin=0, xmax=100)
ax.set_ylim(ymin=-1, ymax=3)
plt.savefig('ev_forks_pow.png')
plt.show()
|
[
"matplotlib.pyplot.show",
"pandas.read_csv",
"numpy.sqrt",
"numpy.arange",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.grid"
] |
[((78, 209), 'pandas.read_csv', 'pd.read_csv', (['"""/home/jordibisbal/WS18-MSc-JordiBisbalAnsaldo--NetworkSlicing/evaluation/experiments/1/forks/forks_pow.csv"""'], {}), "(\n '/home/jordibisbal/WS18-MSc-JordiBisbalAnsaldo--NetworkSlicing/evaluation/experiments/1/forks/forks_pow.csv'\n )\n", (89, 209), True, 'import pandas as pd\n'), ((205, 227), 'numpy.arange', 'np.arange', (['(0.0)', '(100)', '(1)'], {}), '(0.0, 100, 1)\n', (214, 227), True, 'import numpy as np\n'), ((440, 468), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 5)'}), '(figsize=(8, 5))\n', (452, 468), True, 'import matplotlib.pyplot as plt\n'), ((575, 610), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""# blocks"""'], {'fontsize': '(16)'}), "('# blocks', fontsize=16)\n", (585, 610), True, 'import matplotlib.pyplot as plt\n'), ((611, 670), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["('log (average # forks ' + '$f_b$)')"], {'fontsize': '(16)'}), "('log (average # forks ' + '$f_b$)', fontsize=16)\n", (621, 670), True, 'import matplotlib.pyplot as plt\n'), ((671, 709), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'linestyle': '""":"""', 'linewidth': '(1.5)'}), "(linestyle=':', linewidth=1.5)\n", (679, 709), True, 'import matplotlib.pyplot as plt\n'), ((818, 875), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'which': '"""major"""', 'labelsize': '(16)'}), "(axis='both', which='major', labelsize=16)\n", (833, 875), True, 'import matplotlib.pyplot as plt\n'), ((972, 1003), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""ev_forks_pow.png"""'], {}), "('ev_forks_pow.png')\n", (983, 1003), True, 'import matplotlib.pyplot as plt\n'), ((1004, 1014), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1012, 1014), True, 'import matplotlib.pyplot as plt\n'), ((550, 561), 'numpy.sqrt', 'np.sqrt', (['(30)'], {}), '(30)\n', (557, 561), True, 'import numpy as np\n')]
|
from typing import List, Tuple
import time
import pandas as pd
from initial_solution import *
from local_search import *
from graph import *
def algorithm(nodes: List[dict], vehicles: int, clients: int, vehicle_capacity: int, tenure: int, file_writer, iter_max:int=1000, savings:bool=True) -> None:
"""
Aplica o algoritmo de tabu list utilizando ou uma solucao inicial aleatoria ou uma solucao utilizando o metodo de
Clarke Wright de economia.
"""
inicio = time.time()
# Cria matriz de distancia entre as cidades
distances_between_clients = clients_distance(nodes, clients)
# Escolhe a solucao inicial
if savings:
best_sol = corrected_savings(distances_between_clients, nodes, vehicles, clients, vehicle_capacity, 0.1)
else:
best_sol = random_initial_sol(nodes, vehicles, vehicle_capacity)
best_sol_dist, best_feasible_flag = total_distance(distances_between_clients, best_sol, nodes, vehicle_capacity)
current_sol = best_sol.copy()
current_dist = best_sol_dist
current_feasible_flag = best_feasible_flag
# Mostra a rota inicial e a distancia
file_writer.write('-Solucao inicial-\n')
show_routes(distances_between_clients, best_sol, nodes, vehicle_capacity, file_writer)
file_writer.write(f"Distancia total: {best_sol_dist}\n\n")
# Inicializa lista tabu e condicoes de parada
tabu_list = []
tempo = 0
iter_ = 0
sol_dists = []
best_dists = []
while tempo < 300 and iter_ < iter_max:
aux_current_sol, aux_current_dist, tabu_list, aux_current_feasible_flag = best_neighbor(distances_between_clients, current_sol, current_dist, nodes, vehicles, vehicle_capacity, tabu_list, tenure, best_sol_dist, current_feasible_flag)
if aux_current_sol != None:
current_sol, current_dist, current_feasible_flag = aux_current_sol, aux_current_dist, aux_current_feasible_flag
if (current_feasible_flag and not best_feasible_flag) or (current_feasible_flag and current_dist < best_sol_dist) or (not best_feasible_flag and current_dist < best_sol_dist):
best_sol = current_sol.copy()
best_sol_dist = current_dist
best_feasible_flag = current_feasible_flag
iter_ = 0
else:
iter_ += 1
sol_dists.append(aux_current_dist)
fim = time.time()
tempo = fim - inicio
file_writer.write('-Solucao final-\n')
show_routes(distances_between_clients, best_sol, nodes, vehicle_capacity, file_writer)
df = pd.DataFrame(sol_dists)
df.to_csv('log_dists.csv', index=True, index_label="iter")
return tempo, iter_, best_sol_dist
|
[
"pandas.DataFrame",
"time.time"
] |
[((480, 491), 'time.time', 'time.time', ([], {}), '()\n', (489, 491), False, 'import time\n'), ((2603, 2626), 'pandas.DataFrame', 'pd.DataFrame', (['sol_dists'], {}), '(sol_dists)\n', (2615, 2626), True, 'import pandas as pd\n'), ((2417, 2428), 'time.time', 'time.time', ([], {}), '()\n', (2426, 2428), False, 'import time\n')]
|
import pytz
import logging
from .base_records_test import BaseRecordsIntegrationTest
from ..directory_validator import RecordsDirectoryValidator
from records_mover.records import (
RecordsSchema, DelimitedRecordsFormat, ProcessingInstructions
)
import tempfile
import pathlib
import datetime
logger = logging.getLogger(__name__)
class RecordsSaveDataframeIntegrationTest(BaseRecordsIntegrationTest):
def save_and_verify(self, records_format, processing_instructions=None) -> None:
if not self.has_pandas():
logger.warning("Skipping test as we don't have Pandas to save with.")
return
from pandas import DataFrame
if processing_instructions is None:
processing_instructions = ProcessingInstructions()
us_eastern = pytz.timezone('US/Eastern')
df = DataFrame.from_dict([{
'num': 123,
'numstr': '123',
'str': 'foo',
'comma': ',',
'doublequote': '"',
'quotecommaquote': '","',
'newlinestr': ("* SQL unload would generate multiple files (one for each slice/part)\n"
"* Filecat would produce a single data file"),
'date': datetime.date(2000, 1, 1),
'time': datetime.time(0, 0),
'timestamp': datetime.datetime(2000, 1, 2, 12, 34, 56, 789012),
'timestamptz': us_eastern.localize(datetime.datetime(2000, 1, 2, 12, 34, 56, 789012))
}])
records_schema = RecordsSchema.from_dataframe(df,
processing_instructions,
include_index=False)
records_schema = records_schema.refine_from_dataframe(df, processing_instructions)
with tempfile.TemporaryDirectory(prefix='test_records_save_df') as tempdir:
output_url = pathlib.Path(tempdir).resolve().as_uri() + '/'
source = self.records.sources.dataframe(df=df,
records_schema=records_schema,
processing_instructions=processing_instructions)
target = self.records.targets.directory_from_url(output_url,
records_format=records_format)
out = self.records.move(source, target, processing_instructions)
self.verify_records_directory(records_format.format_type,
records_format.variant,
tempdir,
records_format.hints)
return out
def verify_records_directory(self, format_type, variant, tempdir, hints={}) -> None:
validator = RecordsDirectoryValidator(tempdir,
self.resource_name(format_type, variant, hints),
self.engine.name)
validator.validate()
def test_save_with_defaults(self):
hints = {}
self.save_and_verify(records_format=DelimitedRecordsFormat(hints=hints))
def test_save_csv_variant(self):
records_format = DelimitedRecordsFormat(variant='csv')
self.save_and_verify(records_format=records_format)
def test_save_with_no_compression(self):
hints = {
'compression': None,
}
records_format = DelimitedRecordsFormat(hints=hints)
self.save_and_verify(records_format=records_format)
|
[
"tempfile.TemporaryDirectory",
"datetime.date",
"datetime.datetime",
"records_mover.records.DelimitedRecordsFormat",
"pathlib.Path",
"pytz.timezone",
"records_mover.records.RecordsSchema.from_dataframe",
"datetime.time",
"records_mover.records.ProcessingInstructions",
"logging.getLogger"
] |
[((307, 334), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (324, 334), False, 'import logging\n'), ((795, 822), 'pytz.timezone', 'pytz.timezone', (['"""US/Eastern"""'], {}), "('US/Eastern')\n", (808, 822), False, 'import pytz\n'), ((1508, 1586), 'records_mover.records.RecordsSchema.from_dataframe', 'RecordsSchema.from_dataframe', (['df', 'processing_instructions'], {'include_index': '(False)'}), '(df, processing_instructions, include_index=False)\n', (1536, 1586), False, 'from records_mover.records import RecordsSchema, DelimitedRecordsFormat, ProcessingInstructions\n'), ((3238, 3275), 'records_mover.records.DelimitedRecordsFormat', 'DelimitedRecordsFormat', ([], {'variant': '"""csv"""'}), "(variant='csv')\n", (3260, 3275), False, 'from records_mover.records import RecordsSchema, DelimitedRecordsFormat, ProcessingInstructions\n'), ((3468, 3503), 'records_mover.records.DelimitedRecordsFormat', 'DelimitedRecordsFormat', ([], {'hints': 'hints'}), '(hints=hints)\n', (3490, 3503), False, 'from records_mover.records import RecordsSchema, DelimitedRecordsFormat, ProcessingInstructions\n'), ((749, 773), 'records_mover.records.ProcessingInstructions', 'ProcessingInstructions', ([], {}), '()\n', (771, 773), False, 'from records_mover.records import RecordsSchema, DelimitedRecordsFormat, ProcessingInstructions\n'), ((1800, 1858), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {'prefix': '"""test_records_save_df"""'}), "(prefix='test_records_save_df')\n", (1827, 1858), False, 'import tempfile\n'), ((3138, 3173), 'records_mover.records.DelimitedRecordsFormat', 'DelimitedRecordsFormat', ([], {'hints': 'hints'}), '(hints=hints)\n', (3160, 3173), False, 'from records_mover.records import RecordsSchema, DelimitedRecordsFormat, ProcessingInstructions\n'), ((1228, 1253), 'datetime.date', 'datetime.date', (['(2000)', '(1)', '(1)'], {}), '(2000, 1, 1)\n', (1241, 1253), False, 'import datetime\n'), ((1275, 1294), 'datetime.time', 'datetime.time', (['(0)', '(0)'], {}), '(0, 0)\n', (1288, 1294), False, 'import datetime\n'), ((1321, 1370), 'datetime.datetime', 'datetime.datetime', (['(2000)', '(1)', '(2)', '(12)', '(34)', '(56)', '(789012)'], {}), '(2000, 1, 2, 12, 34, 56, 789012)\n', (1338, 1370), False, 'import datetime\n'), ((1419, 1468), 'datetime.datetime', 'datetime.datetime', (['(2000)', '(1)', '(2)', '(12)', '(34)', '(56)', '(789012)'], {}), '(2000, 1, 2, 12, 34, 56, 789012)\n', (1436, 1468), False, 'import datetime\n'), ((1896, 1917), 'pathlib.Path', 'pathlib.Path', (['tempdir'], {}), '(tempdir)\n', (1908, 1917), False, 'import pathlib\n')]
|
import click
import orjson as json
import yaml
from r2k.cli import cli_utils, logger
from r2k.config import config as _config
@click.command("show")
@cli_utils.config_path_option()
@click.option(
"-j",
"--json",
"is_json",
is_flag=True,
help="When passed the output will be in JSON format (e.g. for use with jq).\n"
"Use with the --no-ansi flag for best results",
)
def config_show(is_json: bool) -> None:
"""Show all the available configuration."""
result = _config.as_dict()
if "password" in result:
result["password"] = "<PASSWORD>"
if is_json:
logger.info(json.dumps(result))
else:
logger.info(yaml.safe_dump(result))
|
[
"yaml.safe_dump",
"click.option",
"click.command",
"r2k.config.config.as_dict",
"orjson.dumps",
"r2k.cli.cli_utils.config_path_option"
] |
[((130, 151), 'click.command', 'click.command', (['"""show"""'], {}), "('show')\n", (143, 151), False, 'import click\n'), ((153, 183), 'r2k.cli.cli_utils.config_path_option', 'cli_utils.config_path_option', ([], {}), '()\n', (181, 183), False, 'from r2k.cli import cli_utils, logger\n'), ((185, 375), 'click.option', 'click.option', (['"""-j"""', '"""--json"""', '"""is_json"""'], {'is_flag': '(True)', 'help': '"""When passed the output will be in JSON format (e.g. for use with jq).\nUse with the --no-ansi flag for best results"""'}), '(\'-j\', \'--json\', \'is_json\', is_flag=True, help=\n """When passed the output will be in JSON format (e.g. for use with jq).\nUse with the --no-ansi flag for best results"""\n )\n', (197, 375), False, 'import click\n'), ((494, 511), 'r2k.config.config.as_dict', '_config.as_dict', ([], {}), '()\n', (509, 511), True, 'from r2k.config import config as _config\n'), ((619, 637), 'orjson.dumps', 'json.dumps', (['result'], {}), '(result)\n', (629, 637), True, 'import orjson as json\n'), ((669, 691), 'yaml.safe_dump', 'yaml.safe_dump', (['result'], {}), '(result)\n', (683, 691), False, 'import yaml\n')]
|
import os
import subprocess
subprocess.call([
'git'
, 'submodule'
, 'foreach'
, 'git'
, 'checkout'
, 'master'
])
subprocess.call([
'git'
, 'submodule'
, 'foreach'
, 'git'
, 'pull'
, 'origin'
, 'master'
])
|
[
"subprocess.call"
] |
[((29, 106), 'subprocess.call', 'subprocess.call', (["['git', 'submodule', 'foreach', 'git', 'checkout', 'master']"], {}), "(['git', 'submodule', 'foreach', 'git', 'checkout', 'master'])\n", (44, 106), False, 'import subprocess\n'), ((127, 214), 'subprocess.call', 'subprocess.call', (["['git', 'submodule', 'foreach', 'git', 'pull', 'origin', 'master']"], {}), "(['git', 'submodule', 'foreach', 'git', 'pull', 'origin',\n 'master'])\n", (142, 214), False, 'import subprocess\n')]
|
import firebase_admin
from firebase_admin import credentials, firestore
class Database:
def __init__(self):
self._conn = None
def connect(self, creds=None, from_file=None):
if not creds and not from_file:
raise Exception("Credentials or service account json file path required to connect with firestore")
if not creds:
creds = credentials.Certificate(from_file)
try:
firebase_admin.initialize_app(creds)
except Exception as e:
if 'The default Firebase app already exists' in str(e):
raise Exception(
'If you want to connect to Firestore from_file, make sure fireorm.connect(from_file=<YOUR FILE>) '
'comes directly after importing FireORM for the first time.')
self._conn = firestore.client()
@property
def conn(self):
if self._conn is None:
firebase_admin.initialize_app()
self._conn = firestore.client()
return self._conn
|
[
"firebase_admin.initialize_app",
"firebase_admin.credentials.Certificate",
"firebase_admin.firestore.client"
] |
[((718, 736), 'firebase_admin.firestore.client', 'firestore.client', ([], {}), '()\n', (734, 736), False, 'from firebase_admin import credentials, firestore\n'), ((345, 379), 'firebase_admin.credentials.Certificate', 'credentials.Certificate', (['from_file'], {}), '(from_file)\n', (368, 379), False, 'from firebase_admin import credentials, firestore\n'), ((390, 426), 'firebase_admin.initialize_app', 'firebase_admin.initialize_app', (['creds'], {}), '(creds)\n', (419, 426), False, 'import firebase_admin\n'), ((794, 825), 'firebase_admin.initialize_app', 'firebase_admin.initialize_app', ([], {}), '()\n', (823, 825), False, 'import firebase_admin\n'), ((842, 860), 'firebase_admin.firestore.client', 'firestore.client', ([], {}), '()\n', (858, 860), False, 'from firebase_admin import credentials, firestore\n')]
|
import pytest
from graphs.graph import Graph, Vertex, Edge
from graph_business_trip.graph_business_trip import business_trip
def test_can_instantiate_Graph():
graph = Graph()
assert graph
assert graph._adjacency_list == {}
def test_can_instantiate_Vertex():
vertex = Vertex()
assert vertex
assert vertex.value == None
def test_can_instantiate_Edge():
vertex1 = Vertex("hello")
edge = Edge(vertex1, weight=50)
assert edge.vertex == vertex1
assert edge.weight == 50
def test_input_metroville_pandora_returns_true(example):
graph = example[0]
metroville = example[3]
pandora = example[1]
assert metroville.value == "metroville"
assert pandora.value == "pandora"
actual = business_trip(graph, [metroville, pandora])
expected = "True, $82"
assert actual == expected
def test_happy_path_multiple_cities_returns_true(example):
graph = example[0]
arendelle = example[2]
monstropolis = example[4]
naboo = example[6]
assert arendelle.value == "arendelle"
assert monstropolis.value == "monstropolis"
assert naboo.value == "naboo"
actual = business_trip(graph, [arendelle, monstropolis, naboo])
expected = "True, $115"
assert actual == expected
def test_unhappy_path_returns_fales(example):
graph = example[0]
naboo = example[6]
pandora = example[1]
assert naboo.value == "naboo"
assert pandora.value == "pandora"
actual = business_trip(graph, [naboo, pandora])
expected = "False, $0"
assert actual == expected
def test_unhappy_path_with_multiple_cities_return_false(example):
graph = example[0]
narnia = example[5]
arendelle = example[2]
naboo = example[6]
assert narnia.value == "narnia"
assert arendelle.value == "arendelle"
assert naboo.value == "naboo"
actual = business_trip(graph, [narnia, arendelle, naboo])
expected = "False, $0"
assert actual == expected
@pytest.fixture
def example():
graph1 = Graph()
pandora = Vertex("pandora")
arendelle = Vertex("arendelle")
metroville = Vertex("metroville")
monstropolis = Vertex("monstropolis")
narnia = Vertex("narnia")
naboo = Vertex("naboo")
graph1.add_node(pandora)
graph1.add_node(arendelle)
graph1.add_node(metroville)
graph1.add_node(monstropolis)
graph1.add_node(narnia)
graph1.add_node(naboo)
graph1.add_edge(pandora, arendelle, 150)
graph1.add_edge(pandora, metroville, 82)
graph1.add_edge(arendelle, pandora, 150)
graph1.add_edge(arendelle, metroville, 99)
graph1.add_edge(arendelle, monstropolis, 42)
graph1.add_edge(metroville, pandora, 82)
graph1.add_edge(metroville, narnia, 37)
graph1.add_edge(metroville, naboo, 26)
graph1.add_edge(metroville, monstropolis, 105)
graph1.add_edge(metroville, arendelle, 99)
graph1.add_edge(monstropolis, arendelle, 42)
graph1.add_edge(monstropolis, metroville, 105)
graph1.add_edge(monstropolis, naboo, 73)
graph1.add_edge(narnia, metroville, 37)
graph1.add_edge(narnia, naboo, 250)
graph1.add_edge(naboo, narnia, 250)
graph1.add_edge(naboo, metroville, 26)
graph1.add_edge(naboo, monstropolis, 73)
return graph1, pandora, arendelle, metroville, monstropolis, narnia, naboo
|
[
"graphs.graph.Edge",
"graphs.graph.Vertex",
"graphs.graph.Graph",
"graph_business_trip.graph_business_trip.business_trip"
] |
[((174, 181), 'graphs.graph.Graph', 'Graph', ([], {}), '()\n', (179, 181), False, 'from graphs.graph import Graph, Vertex, Edge\n'), ((288, 296), 'graphs.graph.Vertex', 'Vertex', ([], {}), '()\n', (294, 296), False, 'from graphs.graph import Graph, Vertex, Edge\n'), ((396, 411), 'graphs.graph.Vertex', 'Vertex', (['"""hello"""'], {}), "('hello')\n", (402, 411), False, 'from graphs.graph import Graph, Vertex, Edge\n'), ((423, 447), 'graphs.graph.Edge', 'Edge', (['vertex1'], {'weight': '(50)'}), '(vertex1, weight=50)\n', (427, 447), False, 'from graphs.graph import Graph, Vertex, Edge\n'), ((741, 784), 'graph_business_trip.graph_business_trip.business_trip', 'business_trip', (['graph', '[metroville, pandora]'], {}), '(graph, [metroville, pandora])\n', (754, 784), False, 'from graph_business_trip.graph_business_trip import business_trip\n'), ((1143, 1197), 'graph_business_trip.graph_business_trip.business_trip', 'business_trip', (['graph', '[arendelle, monstropolis, naboo]'], {}), '(graph, [arendelle, monstropolis, naboo])\n', (1156, 1197), False, 'from graph_business_trip.graph_business_trip import business_trip\n'), ((1460, 1498), 'graph_business_trip.graph_business_trip.business_trip', 'business_trip', (['graph', '[naboo, pandora]'], {}), '(graph, [naboo, pandora])\n', (1473, 1498), False, 'from graph_business_trip.graph_business_trip import business_trip\n'), ((1846, 1894), 'graph_business_trip.graph_business_trip.business_trip', 'business_trip', (['graph', '[narnia, arendelle, naboo]'], {}), '(graph, [narnia, arendelle, naboo])\n', (1859, 1894), False, 'from graph_business_trip.graph_business_trip import business_trip\n'), ((1998, 2005), 'graphs.graph.Graph', 'Graph', ([], {}), '()\n', (2003, 2005), False, 'from graphs.graph import Graph, Vertex, Edge\n'), ((2020, 2037), 'graphs.graph.Vertex', 'Vertex', (['"""pandora"""'], {}), "('pandora')\n", (2026, 2037), False, 'from graphs.graph import Graph, Vertex, Edge\n'), ((2054, 2073), 'graphs.graph.Vertex', 'Vertex', (['"""arendelle"""'], {}), "('arendelle')\n", (2060, 2073), False, 'from graphs.graph import Graph, Vertex, Edge\n'), ((2091, 2111), 'graphs.graph.Vertex', 'Vertex', (['"""metroville"""'], {}), "('metroville')\n", (2097, 2111), False, 'from graphs.graph import Graph, Vertex, Edge\n'), ((2131, 2153), 'graphs.graph.Vertex', 'Vertex', (['"""monstropolis"""'], {}), "('monstropolis')\n", (2137, 2153), False, 'from graphs.graph import Graph, Vertex, Edge\n'), ((2167, 2183), 'graphs.graph.Vertex', 'Vertex', (['"""narnia"""'], {}), "('narnia')\n", (2173, 2183), False, 'from graphs.graph import Graph, Vertex, Edge\n'), ((2196, 2211), 'graphs.graph.Vertex', 'Vertex', (['"""naboo"""'], {}), "('naboo')\n", (2202, 2211), False, 'from graphs.graph import Graph, Vertex, Edge\n')]
|
import http.server
import socketserver
import threading
ADDR = "192.168.2.1"
PORT = 80
class UDPHandler(socketserver.DatagramRequestHandler):
def handle(self):
data = self.request[0]
socket = self.request[1]
print ('client send: ', data)
socket.sendto(b'Hello from server!', self.client_address)
with socketserver.TCPServer((ADDR, PORT), http.server.SimpleHTTPRequestHandler) as http:
print("serving HTTP on", ADDR, "at port", PORT)
server_thread = threading.Thread(target=http.serve_forever)
server_thread.daemon = True
server_thread.start()
with socketserver.UDPServer((ADDR, PORT), UDPHandler) as udp:
print("serving UDP on", ADDR, "at port", PORT)
udp.serve_forever()
|
[
"threading.Thread",
"socketserver.TCPServer",
"socketserver.UDPServer"
] |
[((346, 420), 'socketserver.TCPServer', 'socketserver.TCPServer', (['(ADDR, PORT)', 'http.server.SimpleHTTPRequestHandler'], {}), '((ADDR, PORT), http.server.SimpleHTTPRequestHandler)\n', (368, 420), False, 'import socketserver\n'), ((502, 545), 'threading.Thread', 'threading.Thread', ([], {'target': 'http.serve_forever'}), '(target=http.serve_forever)\n', (518, 545), False, 'import threading\n'), ((614, 662), 'socketserver.UDPServer', 'socketserver.UDPServer', (['(ADDR, PORT)', 'UDPHandler'], {}), '((ADDR, PORT), UDPHandler)\n', (636, 662), False, 'import socketserver\n')]
|
import unittest
import array_problems.Solutions as array_problems
import array_problems.Solutions_Two as array_problems_two
import array_problems.Solutions_Three as array_problems_three
import array_problems.Solutions_Four as array_problems_four
import array_problems.Solutions_Five as array_problems_five
class SolutionsTest(unittest.TestCase):
def test_merge(self):
intervals = [[1, 3], [2, 6], [8, 10], [15, 18]]
output = [[1, 6], [8, 10], [15, 18]]
self.assertListEqual(output, array_problems_five.merge(intervals))
intervals = [[1, 4], [4, 5]]
output = [[1, 5]]
self.assertListEqual(output, array_problems_five.merge(intervals))
def test_largest_parameter(self):
input = [
[1, 0, 1, 1, 1],
[1, 0, 1, 1, 1],
[0, 1, 0, 1, 1]]
output = 7
self.assertEqual(output, array_problems_four.largest_parameter(input))
input = [[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 1, 1, 0],
[0, 1, 0, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0]]
output = 9
self.assertEqual(output, array_problems_four.largest_parameter(input))
def test_max_increase_keep_city_skyline(self):
grid = [
[3, 0, 8, 4],
[2, 4, 5, 7],
[9, 2, 6, 3],
[0, 3, 1, 0]
]
output = 35
self.assertEqual(output, array_problems_four.maxIncreaseKeepingSkyline(grid))
grid = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
output = 0
self.assertEqual(output, array_problems_four.maxIncreaseKeepingSkyline(grid))
def test_array_pair_sum(self):
nums = [6, 2, 6, 5, 1, 2]
output = 9
self.assertEqual(output, array_problems_five.arrayPairSum(nums))
nums = [1, 4, 3, 2]
output = 4
self.assertEqual(output, array_problems_five.arrayPairSum(nums))
def test_sort_array_by_parity(self):
input = [3, 1, 2, 4]
output = [2, 4, 3, 1]
self.assertEqual(output, array_problems_five.sortArrayByParity(input))
def test_replace_elements(self):
input = [17, 18, 5, 4, 6, 1]
output = [18, 6, 6, 6, 1, -1]
self.assertEqual(output, array_problems_five.replaceElements(input))
def test_count_squares(self):
input = [
[0, 1, 1, 1],
[1, 1, 1, 1],
[0, 1, 1, 1]
]
output = 15
self.assertEqual(output, array_problems_four.countSquares(input))
input = [
[1, 0, 1],
[1, 1, 0],
[1, 1, 0]
]
output = 7
self.assertEqual(output, array_problems_four.countSquares(input))
def test_count_battleships(self):
board = [["X", ".", ".", "X"], [".", ".", ".", "X"], [".", ".", ".", "X"]]
output = 2
self.assertEqual(output, array_problems_four.countBattleships(board))
def test_interval_intersection(self):
firstList = [[0, 2], [5, 10], [13, 23], [24, 25]]
secondList = [[1, 5], [8, 12], [15, 24], [25, 26]]
output = [[1, 2], [5, 5], [8, 10], [15, 23], [24, 24], [25, 25]]
self.assertListEqual(output, array_problems_five.intervalIntersection(firstList, secondList))
def test_permute(self):
nums = [1, 2, 3]
output = [[1, 2, 3], [1, 3, 2], [2, 1, 3], [2, 3, 1], [3, 1, 2], [3, 2, 1]]
self.assertListEqual(output, array_problems_five.permute(nums))
def test_subsets(self):
nums = [1, 2, 3]
output = [[], [1], [2], [1, 2], [3], [1, 3], [2, 3], [1, 2, 3]]
self.assertCountEqual(output, array_problems_five.subsets(nums))
nums = [0]
output = [[], [0]]
self.assertCountEqual(output, array_problems_five.subsets(nums))
def test_cal_points(self):
ops = ["5", "2", "C", "D", "+"]
output = 30
self.assertEqual(output, array_problems_five.calPoints(ops))
ops = ["5", "-2", "4", "C", "D", "9", "+", "+"]
output = 27
self.assertEqual(output, array_problems_five.calPoints(ops))
def test_island_perimeter(self):
grid = [
[0, 1, 0, 0],
[1, 1, 1, 0],
[0, 1, 0, 0],
[1, 1, 0, 0]
]
output = 16
self.assertEqual(output, array_problems_two.islandPerimeter(grid))
grid = [[1]]
output = 4
self.assertEqual(output, array_problems_two.islandPerimeter(grid))
grid = [[1, 0]]
output = 4
self.assertEqual(output, array_problems_two.islandPerimeter(grid))
def test_max_area_of_island(self):
grid = [
[0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0],
[0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0]
]
output = 6
self.assertEqual(output, array_problems_two.maxAreaOfIsland(grid))
grid = [[0, 0, 0, 0, 0, 0, 0, 0]]
output = 0
self.assertEqual(output, array_problems_two.maxAreaOfIsland(grid))
def test_relative_sort(self):
arr1 = [2, 3, 1, 3, 2, 4, 6, 7, 9, 2, 19]
arr2 = [2, 1, 4, 3, 9, 6]
output = [2, 2, 2, 1, 4, 3, 3, 9, 6, 7, 19]
self.assertListEqual(output, array_problems_five.relativeSortArray(arr1, arr2))
def test_intersection(self):
nums1 = [1, 2, 2, 1]
nums2 = [2, 2]
output = [2]
self.assertCountEqual(output, array_problems_five.intersection(nums1, nums2))
nums1 = [4, 9, 5]
nums2 = [9, 4, 9, 8, 4]
output = [9, 4]
self.assertCountEqual(output, array_problems_five.intersection(nums1, nums2))
def test_minimal_fall_path(self):
matrix = [
[2, 1, 3],
[6, 5, 4],
[7, 8, 9]
]
output = 13
self.assertEqual(output, array_problems_five.minFallingPathSum(matrix))
matrix = [
[-19, 57],
[-40, -5]
]
output = -59
self.assertEqual(output, array_problems_five.minFallingPathSum(matrix))
def test_get_max_gold(self):
input = [
[1, 0, 7],
[2, 0, 6],
[3, 4, 5],
[0, 3, 0],
[9, 0, 20]
]
output = 28
self.assertEqual(output, array_problems_four.getMaximumGold(input))
input = [
[0, 6, 0],
[5, 8, 7],
[0, 9, 0]
]
output = 24
self.assertEqual(output, array_problems_four.getMaximumGold(input))
def test_validate_stack_sequence(self):
pushed = [1, 2, 3, 4, 5]
popped = [4, 5, 3, 2, 1]
output = True
self.assertEqual(output, array_problems_four.validateStackSequences(pushed, popped))
pushed = [1, 2, 3, 4, 5]
popped = [4, 3, 5, 1, 2]
output = False
self.assertEqual(output, array_problems_four.validateStackSequences(pushed, popped))
def test_update_board(self):
board = [
["E", "E", "E", "E", "E"],
["E", "E", "M", "E", "E"],
["E", "E", "E", "E", "E"],
["E", "E", "E", "E", "E"]
]
click = [3, 0]
output = [
["B", "1", "E", "1", "B"],
["B", "1", "M", "1", "B"],
["B", "1", "1", "1", "B"],
["B", "B", "B", "B", "B"]
]
self.assertEqual(output, array_problems.minesweeper(board, click))
board = [
["B", "1", "E", "1", "B"],
["B", "1", "M", "1", "B"],
["B", "1", "1", "1", "B"],
["B", "B", "B", "B", "B"]
]
click = [1, 2]
output = [
["B", "1", "E", "1", "B"],
["B", "1", "X", "1", "B"],
["B", "1", "1", "1", "B"],
["B", "B", "B", "B", "B"]
]
self.assertEqual(output, array_problems.minesweeper(board, click))
def test_last_stones(self):
input = [2, 7, 4, 1, 8, 1]
output = 1
self.assertEqual(output, array_problems_five.lastStoneWeight(input))
def test_can_reach(self):
arr = [4, 2, 3, 0, 3, 1, 2]
start = 0
output = True
self.assertEqual(output, array_problems_five.canReach(arr, start))
arr = [3, 0, 2, 1, 2]
start = 2
output = False
self.assertEqual(output, array_problems_five.canReach(arr, start))
def test_longest_ones(self):
nums = [0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1]
k = 3
output = 10
self.assertEqual(output, array_problems_five.longestOnes(nums, k))
nums = [1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0]
k = 2
output = 6
self.assertEqual(output, array_problems_five.longestOnes(nums, k))
def test_num_enclaves(self):
grid = [
[0, 0, 0, 0],
[1, 0, 1, 0],
[0, 1, 1, 0],
[0, 0, 0, 0]
]
output = 3
self.assertEqual(output, array_problems.numEnclaves(grid))
def test_min_path_sum(self):
grid = [[1, 3, 1], [1, 5, 1], [4, 2, 1]]
output = 7
self.assertEqual(output, array_problems_four.minPathSum(grid))
grid = [[1, 2, 3], [4, 5, 6]]
output = 12
self.assertEqual(output, array_problems_four.minPathSum(grid))
def test_is_monitonic(self):
nums = [1, 2, 2, 3]
output = True
self.assertEqual(output, array_problems.isMonotonic(nums))
nums = [6, 5, 4, 4]
output = True
self.assertEqual(output, array_problems.isMonotonic(nums))
nums = [1, 3, 2]
output = False
self.assertEqual(output, array_problems.isMonotonic(nums))
def test_flood_fill(self):
image = [
[1, 1, 1],
[1, 1, 0],
[1, 0, 1]
]
sr = 1
sc = 1
newColor = 2
output = [
[2, 2, 2],
[2, 2, 0],
[2, 0, 1]
]
self.assertListEqual(output, array_problems_four.floodFill(image, sr, sc, newColor))
image = [
[0, 0, 0],
[0, 0, 0]
]
sr = 0
sc = 0
newColor = 2
output = [
[2, 2, 2],
[2, 2, 2]
]
self.assertListEqual(output, array_problems_four.floodFill(image, sr, sc, newColor))
def test_circular_queue(self):
my_circular_queue = array_problems.MyCircularDeque(3)
self.assertEqual(True, my_circular_queue.insertLast(1))
self.assertEqual(True, my_circular_queue.insertLast(2))
self.assertEqual(True, my_circular_queue.insertFront(3))
self.assertEqual(False, my_circular_queue.insertFront(4))
self.assertEqual(2, my_circular_queue.getRear())
self.assertEqual(True, my_circular_queue.isFull())
self.assertEqual(True, my_circular_queue.deleteLast())
self.assertEqual(True, my_circular_queue.insertFront(4))
self.assertEqual(4, my_circular_queue.getFront())
def test_min_cost_climbing_stairs(self):
cost = [10, 15, 20]
output = 15
self.assertEqual(output, array_problems_five.minCostClimbingStairs(cost))
cost = [1, 100, 1, 1, 1, 100, 1, 1, 100, 1]
output = 6
self.assertEqual(output, array_problems_five.minCostClimbingStairs(cost))
def test_max_satisfied(self):
customers = [1, 0, 1, 2, 1, 1, 7, 5]
grumpy = [0, 1, 0, 1, 0, 1, 0, 1]
minutes = 3
output = 16
self.assertEqual(output, array_problems_five.maxSatisfied(customers, grumpy, minutes))
def test_max_consecutive(self):
nums = [1, 1, 0, 1, 1, 1]
output = 3
self.assertEqual(output, array_problems_two.findMaxConsecutiveOnes(nums))
nums = [1, 0, 1, 1, 0, 1]
output = 2
self.assertEqual(output, array_problems_two.findMaxConsecutiveOnes(nums))
def test_intersects(self):
nums1 = [1, 2, 2, 1]
nums2 = [2, 2]
output = [2, 2]
self.assertEqual(output, array_problems_four.intersect(nums1, nums2))
nums1 = [4, 9, 5]
nums2 = [9, 4, 9, 8, 4]
output = [4, 9]
self.assertEqual(output, array_problems_four.intersect(nums1, nums2))
def test_max_area(self):
height = [1, 8, 6, 2, 5, 4, 8, 3, 7]
output = 49
self.assertEqual(output, array_problems_five.maxArea(height))
height = [1, 1]
output = 1
self.assertEqual(output, array_problems_five.maxArea(height))
def test_sort_array(self):
nums = [5, 2, 3, 1]
output = [1, 2, 3, 5]
self.assertListEqual(output, array_problems_two.sortArray(nums))
nums = [5, 1, 1, 2, 0, 0]
output = [0, 0, 1, 1, 2, 5]
self.assertListEqual(output, array_problems_two.sortArray(nums))
def test_majority_elements(self):
nums = [2, 2, 1, 1, 1, 2, 2]
output = 2
self.assertEqual(output, array_problems.majorityElement(nums))
nums = [3, 2, 3]
output = 3
self.assertEqual(output, array_problems.majorityElement(nums))
def test_game_of_life(self):
board = [
[0, 1, 0],
[0, 0, 1],
[1, 1, 1],
[0, 0, 0]
]
output = [[0, 0, 0], [1, 0, 1], [0, 1, 1], [0, 1, 0]]
array_problems.gameOfLife(board)
self.assertListEqual(output, board)
board = [
[1, 1],
[1, 0]
]
output = [[1, 1], [1, 1]]
array_problems.gameOfLife(board)
self.assertListEqual(output, board)
def test_move_zeroes(self):
nums = [0, 1, 0, 3, 12]
array_problems.moveZeroes(nums)
output = [1, 3, 12, 0, 0]
self.assertListEqual(output, nums)
nums = [1, 3, 5, 0, 2, 0]
array_problems.moveZeroes(nums)
output = [1, 3, 5, 2, 0, 0]
self.assertListEqual(output, nums)
def test_reorder_log_files(self):
logs = ["dig1 8 1 5 1", "let1 art can", "dig2 3 6", "let2 own kit dig", "let3 art zero"]
output = ["let1 art can", "let3 art zero", "let2 own kit dig", "dig1 8 1 5 1", "dig2 3 6"]
self.assertListEqual(output, array_problems_five.reorderLogFiles(logs))
logs = ["a1 9 2 3 1", "g1 act car", "zo4 4 7", "ab1 off key dog", "a8 act zoo"]
output = ["g1 act car", "a8 act zoo", "ab1 off key dog", "a1 9 2 3 1", "zo4 4 7"]
self.assertListEqual(output, array_problems_five.reorderLogFiles(logs))
def test_sort_colors(self):
nums = [2, 0, 2, 1, 1, 0]
output = [0, 0, 1, 1, 2, 2]
array_problems_two.sortColors(nums)
self.assertListEqual(output, nums)
def test_max_profits(self):
prices = [7, 1, 5, 3, 6, 4]
output = 5
self.assertEqual(output, array_problems_five.maxProfit(prices))
prices = [7, 6, 4, 3, 1]
output = 0
self.assertEqual(output, array_problems_five.maxProfit(prices))
def test_number_of_islands(self):
input = [
["1", "1", "1", "1", "0"],
["1", "1", "0", "1", "0"],
["1", "1", "0", "0", "0"],
["0", "0", "0", "0", "0"]
]
output = 1
self.assertEqual(output, array_problems_two.numIslands(input))
input = [
["1", "1", "0", "0", "0"],
["1", "1", "0", "0", "0"],
["0", "0", "1", "0", "0"],
["0", "0", "0", "1", "1"]
]
output = 3
self.assertEqual(output, array_problems_two.numIslands(input))
def test_rotting_oranges(self):
grid = [
[2, 1, 1],
[1, 1, 0],
[0, 1, 1]
]
output = 4
self.assertEqual(output, array_problems_four.orangesRotting(grid))
grid = [[2, 1, 1], [0, 1, 1], [1, 0, 1]]
output = -1
self.assertEqual(output, array_problems_four.orangesRotting(grid))
grid = [[0, 2]]
output = 0
self.assertEqual(output, array_problems_four.orangesRotting(grid))
def test_remove_elements(self):
nums = [0, 1, 2, 2, 3, 0, 4, 2]
val = 2
output = 5
self.assertEqual(output, array_problems_five.removeElement(nums, val))
nums = [3, 2, 2, 3]
val = 3
output = 2
self.assertEqual(output, array_problems_five.removeElement(nums, val))
def test_find_judge(self):
n = 2
trust = [[1, 2]]
output = 2
self.assertEqual(output, array_problems_five.findJudge(n, trust))
n = 3
trust = [[1, 3], [2, 3]]
output = 3
self.assertEqual(output, array_problems_five.findJudge(n, trust))
n = 3
trust = [[1, 3], [2, 3], [3, 1]]
output = -1
self.assertEqual(output, array_problems_five.findJudge(n, trust))
def test_max_sub_array(self):
nums = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
output = 6
self.assertEqual(output, array_problems_five.maxSubArray(nums))
nums = [1]
output = 1
self.assertEqual(output, array_problems_five.maxSubArray(nums))
nums = [5, 4, -1, 7, 8]
output = 23
self.assertEqual(output, array_problems_five.maxSubArray(nums))
def test_length_of_list(self):
nums = [10, 9, 2, 5, 3, 7, 101, 18]
output = 4
self.assertEqual(output, array_problems_five.lengthOfLIS(nums))
nums = [0, 1, 0, 3, 2, 3]
output = 4
self.assertEqual(output, array_problems_five.lengthOfLIS(nums))
nums = [7, 7, 7, 7, 7, 7, 7]
output = 1
self.assertEqual(output, array_problems_five.lengthOfLIS(nums))
def test_expressive_words(self):
# s = "heeellooo"
# words = ["hello", "hi", "helo"]
# output = 1
# self.assertEqual(output, array_problems.expressiveWords(s, words))
# s = "zzzzzyyyyy"
# words = ["zzyy", "zy", "zyy"]
# output = 3
# self.assertEqual(output, array_problems.expressiveWords(s, words))
pass
def test_rob(self):
nums = [1, 2, 3, 1]
output = 4
self.assertEqual(output, array_problems_five.rob(nums))
nums = [2, 7, 9, 3, 1]
output = 12
self.assertEqual(output, array_problems_five.rob(nums))
def test_merge2(self):
intervals = [[1, 3], [2, 6], [8, 10], [15, 18]]
output = [[1, 6], [8, 10], [15, 18]]
self.assertListEqual(output, array_problems_five.merge_2(intervals))
intervals = [[1, 4], [4, 5]]
output = [[1, 5]]
self.assertListEqual(output, array_problems_five.merge_2(intervals))
def test_maximal_square(self):
matrix = [
["1", "0", "1", "0", "0"],
["1", "0", "1", "1", "1"],
["1", "1", "1", "1", "1"],
["1", "0", "0", "1", "0"]
]
output = 4
self.assertEqual(output, array_problems_four.maximalSquare(matrix))
matrix = [
["0", "1"],
["1", "0"]
]
output = 1
self.assertEqual(output, array_problems_four.maximalSquare(matrix))
matrix = [
["0"]
]
output = 0
self.assertEqual(output, array_problems_four.maximalSquare(matrix))
def test_plus_one(self):
digits = [4, 3, 2, 1]
output = [4, 3, 2, 2]
self.assertListEqual(output, array_problems_five.plusOne(digits))
digits = [0]
output = [1]
self.assertListEqual(output, array_problems_five.plusOne(digits))
digits = [9]
output = [1, 0]
self.assertListEqual(output, array_problems_five.plusOne(digits))
def test_shortest_path_binary_matrix(self):
grid = [
[0, 1],
[1, 0]
]
output = 2
self.assertEqual(output, array_problems_four.shortestPathBinaryMatrix(grid))
grid = [
[0, 0, 0],
[1, 1, 0],
[1, 1, 0]
]
output = 4
self.assertEqual(output, array_problems_four.shortestPathBinaryMatrix(grid))
grid = [[1, 0, 0], [1, 1, 0], [1, 1, 0]]
output = -1
self.assertEqual(output, array_problems_four.shortestPathBinaryMatrix(grid))
def test_exists(self):
board = [["A", "B", "C", "E"], ["S", "F", "C", "S"], ["A", "D", "E", "E"]]
word = "ABCCED"
output = True
self.assertEqual(output, array_problems.exist(board, word))
board = [["A", "B", "C", "E"], ["S", "F", "C", "S"], ["A", "D", "E", "E"]]
word = "SEE"
output = True
self.assertEqual(output, array_problems.exist(board, word))
board = [["A", "B", "C", "E"], ["S", "F", "C", "S"], ["A", "D", "E", "E"]]
word = "ABCB"
output = False
self.assertEqual(output, array_problems.exist(board, word))
def test_can_jump(self):
nums = [2, 3, 1, 1, 4]
output = True
self.assertEqual(output, array_problems_five.canJump(nums))
nums = [3, 2, 1, 0, 4]
output = False
self.assertEqual(output, array_problems_five.canJump(nums))
def test_first_and_last_k(self):
nums = [1, 1, 1, 1, 1, 2, 2, 3, 4, 5, 5, 5, 5, 6]
k = 1
self.assertListEqual([0, 4], array_problems_four.first_and_last_of_k(nums, k))
nums = [1, 1, 1, 1, 1, 2, 2, 3, 4, 5, 5, 5, 5, 6, 11]
k = 10
array_problems.first_and_last_of_k(nums, k)
self.assertListEqual([-1, -1], array_problems_four.first_and_last_of_k(nums, k))
nums = [1, 1, 1, 1, 1, 2, 2, 3, 4, 5, 5, 5, 5, 6]
k = 3
self.assertListEqual([7, 7], array_problems_four.first_and_last_of_k(nums, k))
def test_toeplitze_matrix(self):
matrix = [[1, 2, 3, 4], [5, 1, 2, 3], [9, 5, 1, 2]]
output = True
self.assertEqual(output, array_problems_four.isToeplitzMatrix(matrix))
matrix = [[1, 2], [2, 2]]
output = False
self.assertEqual(output, array_problems_four.isToeplitzMatrix(matrix))
def test_is_alien_sorted(self):
words = ["hello", "leetcode"]
order = "hlabcdefgijkmnopqrstuvwxyz"
output = True
self.assertEqual(output, array_problems_five.isAlienSorted(words, order))
words = ["word", "world", "row"]
order = "worldabcefghijkmnpqstuvxyz"
output = False
self.assertEqual(output, array_problems_five.isAlienSorted(words, order))
words = ["apple", "app"]
order = "abcdefghijklmnopqrstuvwxyz"
output = False
self.assertEqual(output, array_problems_five.isAlienSorted(words, order))
def test_plus_one_large_number(self):
input = [
[9, 9, 9, 9],
[1, 1],
[9, 4, 5, 6],
[9, 0, 0, 0],
[9, 9, 9, 9]
]
output = [3, 8, 4, 6, 5]
self.assertListEqual(output, array_problems_five.plus_one_large_number(input))
def test_can_attend_meetings(self):
input = [[0, 30], [5, 10], [15, 20]]
output = False
self.assertEqual(output, array_problems_five.canAttendMeetings(input))
input = [[7, 10], [2, 4]]
output = True
self.assertEqual(output, array_problems_five.canAttendMeetings(input))
def test_num_meeting_rooms(self):
input = [[0, 30], [5, 10], [15, 20]]
output = 2
self.assertEqual(output, array_problems_five.numberMeetingRooms(input))
def test_meeting_room_conflicts(self):
calendar = [[1, 3], [4, 6], [6, 8], [9, 11], [6, 9], [1, 3], [4, 10]]
rooms = 3
queries = [[1, 9], [2, 6], [7, 9], [3, 5], [3, 9], [2, 4], [7, 10], [5, 9], [3, 10], [9, 10]]
output = [False, True, False, True, False, True, False, False, False, True]
self.assertListEqual(output, array_problems_three.meeting_room_conflicts(calendar, rooms, queries))
def test_pacific_atlantic(self):
heights = [[1, 2, 2, 3, 5], [3, 2, 3, 4, 4], [2, 4, 5, 3, 1], [6, 7, 1, 4, 5], [5, 1, 1, 2, 4]]
output = [[0, 4], [1, 3], [1, 4], [2, 2], [3, 0], [3, 1], [4, 0]]
self.assertCountEqual(output, array_problems.pacificAtlantic(heights))
heights = [[2, 1], [1, 2]]
output = [[0, 0], [0, 1], [1, 0], [1, 1]]
self.assertCountEqual(output, array_problems.pacificAtlantic(heights))
def test_pick_random_weight(self):
pick_weight_random = array_problems_three.PickWeightedRandom([1, 3, 7, 1])
print(pick_weight_random.pickIndex())
print(pick_weight_random.pickIndex())
print(pick_weight_random.pickIndex())
print(pick_weight_random.pickIndex())
print(pick_weight_random.pickIndex())
print(pick_weight_random.pickIndex())
print(pick_weight_random.pickIndex())
print(pick_weight_random.pickIndex())
print(pick_weight_random.pickIndex())
print(pick_weight_random.pickIndex())
print(pick_weight_random.pickIndex())
def test_full_justify(self):
words = ["This", "is", "an", "example", "of", "text", "justification."]
maxWidth = 16
output = [
"This is an",
"example of text",
"justification. "
]
self.assertListEqual(output, array_problems_three.fullJustify(words, maxWidth))
words = ["What", "must", "be", "acknowledgment", "shall", "be"]
maxWidth = 16
output = [
"What must be",
"acknowledgment ",
"shall be "
]
self.assertListEqual(output, array_problems_three.fullJustify(words, maxWidth))
def test_exclusive_time(self):
n = 2
logs = ["0:start:0", "1:start:2", "1:end:5", "0:end:6"]
output = [3, 4]
self.assertListEqual(output, array_problems_three.exclusiveTime(n, logs))
def test_rotate_matrix(self):
matrix = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]
output = [
[7, 4, 1],
[8, 5, 2],
[9, 6, 3]
]
array_problems_five.rotate(matrix)
self.assertListEqual(output, matrix)
def test_fall_and_crush(self):
input = [['#', '.', '#', '#', '*'],
['#', '.', '.', '#', '#'],
['.', '#', '.', '#', '.'],
['.', '.', '#', '.', '#'],
['#', '*', '.', '.', '.'],
['.', '.', '*', '#', '.']]
output = [['.', '.', '.', '.', '*'],
['.', '.', '.', '.', '.'],
['.', '.', '.', '.', '.'],
['.', '.', '.', '.', '.'],
['.', '.', '.', '#', '#'],
['#', '.', '#', '#', '#']]
self.assertListEqual(output, array_problems_three.fallAndCrush2(input))
def test_number_of_markers_on_road(self):
coordinates = [[4, 7], [-1, 5], [3, 6]]
output = 9
self.assertEqual(output, array_problems_five.number_of_markers_on_road(coordinates))
def test_shortest_bridge(self):
grid = [[0, 1, 0], [0, 0, 0], [0, 0, 1]]
output = 2
self.assertEqual(output, array_problems_three.shortestBridge(grid))
grid = [[1, 1, 1, 1, 1], [1, 0, 0, 0, 1], [1, 0, 1, 0, 1], [1, 0, 0, 0, 1], [1, 1, 1, 1, 1]]
output = 1
self.assertEqual(output, array_problems_three.shortestBridge(grid))
grid = [
[1, 1, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[0, 0, 0, 1, 1],
[0, 0, 0, 1, 1]
]
output = 3
self.assertEqual(output, array_problems_three.shortestBridge(grid))
grid = [
[0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]
]
output = 1
self.assertEqual(output, array_problems_three.shortestBridge(grid))
if __name__ == '__main__':
unittest.main()
|
[
"array_problems.Solutions_Four.validateStackSequences",
"array_problems.Solutions_Four.maxIncreaseKeepingSkyline",
"array_problems.Solutions_Five.intersection",
"array_problems.Solutions_Five.maxSatisfied",
"array_problems.Solutions_Five.permute",
"array_problems.Solutions_Five.findJudge",
"array_problems.Solutions.MyCircularDeque",
"array_problems.Solutions_Four.getMaximumGold",
"array_problems.Solutions_Two.sortArray",
"array_problems.Solutions.isMonotonic",
"array_problems.Solutions_Five.plusOne",
"array_problems.Solutions_Five.lastStoneWeight",
"array_problems.Solutions.pacificAtlantic",
"array_problems.Solutions_Five.rob",
"array_problems.Solutions_Four.isToeplitzMatrix",
"array_problems.Solutions_Five.intervalIntersection",
"array_problems.Solutions_Five.merge",
"array_problems.Solutions_Four.orangesRotting",
"array_problems.Solutions_Three.meeting_room_conflicts",
"array_problems.Solutions_Four.countSquares",
"array_problems.Solutions.first_and_last_of_k",
"array_problems.Solutions_Five.minFallingPathSum",
"unittest.main",
"array_problems.Solutions_Two.numIslands",
"array_problems.Solutions_Five.replaceElements",
"array_problems.Solutions_Five.maxSubArray",
"array_problems.Solutions_Five.canJump",
"array_problems.Solutions_Three.shortestBridge",
"array_problems.Solutions_Five.plus_one_large_number",
"array_problems.Solutions.moveZeroes",
"array_problems.Solutions_Four.largest_parameter",
"array_problems.Solutions_Five.removeElement",
"array_problems.Solutions.exist",
"array_problems.Solutions_Three.exclusiveTime",
"array_problems.Solutions.numEnclaves",
"array_problems.Solutions_Three.fullJustify",
"array_problems.Solutions_Two.findMaxConsecutiveOnes",
"array_problems.Solutions_Five.sortArrayByParity",
"array_problems.Solutions_Four.maximalSquare",
"array_problems.Solutions_Four.first_and_last_of_k",
"array_problems.Solutions.majorityElement",
"array_problems.Solutions_Five.relativeSortArray",
"array_problems.Solutions_Five.maxProfit",
"array_problems.Solutions_Two.maxAreaOfIsland",
"array_problems.Solutions.minesweeper",
"array_problems.Solutions_Five.maxArea",
"array_problems.Solutions_Five.isAlienSorted",
"array_problems.Solutions_Four.shortestPathBinaryMatrix",
"array_problems.Solutions_Five.minCostClimbingStairs",
"array_problems.Solutions_Five.calPoints",
"array_problems.Solutions_Two.islandPerimeter",
"array_problems.Solutions_Five.reorderLogFiles",
"array_problems.Solutions_Five.numberMeetingRooms",
"array_problems.Solutions_Four.floodFill",
"array_problems.Solutions_Five.longestOnes",
"array_problems.Solutions_Five.number_of_markers_on_road",
"array_problems.Solutions_Five.rotate",
"array_problems.Solutions.gameOfLife",
"array_problems.Solutions_Four.countBattleships",
"array_problems.Solutions_Five.subsets",
"array_problems.Solutions_Five.canReach",
"array_problems.Solutions_Five.arrayPairSum",
"array_problems.Solutions_Five.lengthOfLIS",
"array_problems.Solutions_Four.minPathSum",
"array_problems.Solutions_Five.merge_2",
"array_problems.Solutions_Three.fallAndCrush2",
"array_problems.Solutions_Three.PickWeightedRandom",
"array_problems.Solutions_Five.canAttendMeetings",
"array_problems.Solutions_Two.sortColors",
"array_problems.Solutions_Four.intersect"
] |
[((28403, 28418), 'unittest.main', 'unittest.main', ([], {}), '()\n', (28416, 28418), False, 'import unittest\n'), ((10741, 10774), 'array_problems.Solutions.MyCircularDeque', 'array_problems.MyCircularDeque', (['(3)'], {}), '(3)\n', (10771, 10774), True, 'import array_problems.Solutions as array_problems\n'), ((13663, 13695), 'array_problems.Solutions.gameOfLife', 'array_problems.gameOfLife', (['board'], {}), '(board)\n', (13688, 13695), True, 'import array_problems.Solutions as array_problems\n'), ((13849, 13881), 'array_problems.Solutions.gameOfLife', 'array_problems.gameOfLife', (['board'], {}), '(board)\n', (13874, 13881), True, 'import array_problems.Solutions as array_problems\n'), ((13999, 14030), 'array_problems.Solutions.moveZeroes', 'array_problems.moveZeroes', (['nums'], {}), '(nums)\n', (14024, 14030), True, 'import array_problems.Solutions as array_problems\n'), ((14150, 14181), 'array_problems.Solutions.moveZeroes', 'array_problems.moveZeroes', (['nums'], {}), '(nums)\n', (14175, 14181), True, 'import array_problems.Solutions as array_problems\n'), ((14945, 14980), 'array_problems.Solutions_Two.sortColors', 'array_problems_two.sortColors', (['nums'], {}), '(nums)\n', (14974, 14980), True, 'import array_problems.Solutions_Two as array_problems_two\n'), ((21746, 21789), 'array_problems.Solutions.first_and_last_of_k', 'array_problems.first_and_last_of_k', (['nums', 'k'], {}), '(nums, k)\n', (21780, 21789), True, 'import array_problems.Solutions as array_problems\n'), ((24753, 24806), 'array_problems.Solutions_Three.PickWeightedRandom', 'array_problems_three.PickWeightedRandom', (['[1, 3, 7, 1]'], {}), '([1, 3, 7, 1])\n', (24792, 24806), True, 'import array_problems.Solutions_Three as array_problems_three\n'), ((26424, 26458), 'array_problems.Solutions_Five.rotate', 'array_problems_five.rotate', (['matrix'], {}), '(matrix)\n', (26450, 26458), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((512, 548), 'array_problems.Solutions_Five.merge', 'array_problems_five.merge', (['intervals'], {}), '(intervals)\n', (537, 548), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((650, 686), 'array_problems.Solutions_Five.merge', 'array_problems_five.merge', (['intervals'], {}), '(intervals)\n', (675, 686), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((884, 928), 'array_problems.Solutions_Four.largest_parameter', 'array_problems_four.largest_parameter', (['input'], {}), '(input)\n', (921, 928), True, 'import array_problems.Solutions_Four as array_problems_four\n'), ((1182, 1226), 'array_problems.Solutions_Four.largest_parameter', 'array_problems_four.largest_parameter', (['input'], {}), '(input)\n', (1219, 1226), True, 'import array_problems.Solutions_Four as array_problems_four\n'), ((1463, 1514), 'array_problems.Solutions_Four.maxIncreaseKeepingSkyline', 'array_problems_four.maxIncreaseKeepingSkyline', (['grid'], {}), '(grid)\n', (1508, 1514), True, 'import array_problems.Solutions_Four as array_problems_four\n'), ((1617, 1668), 'array_problems.Solutions_Four.maxIncreaseKeepingSkyline', 'array_problems_four.maxIncreaseKeepingSkyline', (['grid'], {}), '(grid)\n', (1662, 1668), True, 'import array_problems.Solutions_Four as array_problems_four\n'), ((1792, 1830), 'array_problems.Solutions_Five.arrayPairSum', 'array_problems_five.arrayPairSum', (['nums'], {}), '(nums)\n', (1824, 1830), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((1912, 1950), 'array_problems.Solutions_Five.arrayPairSum', 'array_problems_five.arrayPairSum', (['nums'], {}), '(nums)\n', (1944, 1950), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((2086, 2130), 'array_problems.Solutions_Five.sortArrayByParity', 'array_problems_five.sortArrayByParity', (['input'], {}), '(input)\n', (2123, 2130), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((2278, 2320), 'array_problems.Solutions_Five.replaceElements', 'array_problems_five.replaceElements', (['input'], {}), '(input)\n', (2313, 2320), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((2515, 2554), 'array_problems.Solutions_Four.countSquares', 'array_problems_four.countSquares', (['input'], {}), '(input)\n', (2547, 2554), True, 'import array_problems.Solutions_Four as array_problems_four\n'), ((2704, 2743), 'array_problems.Solutions_Four.countSquares', 'array_problems_four.countSquares', (['input'], {}), '(input)\n', (2736, 2743), True, 'import array_problems.Solutions_Four as array_problems_four\n'), ((2919, 2962), 'array_problems.Solutions_Four.countBattleships', 'array_problems_four.countBattleships', (['board'], {}), '(board)\n', (2955, 2962), True, 'import array_problems.Solutions_Four as array_problems_four\n'), ((3234, 3297), 'array_problems.Solutions_Five.intervalIntersection', 'array_problems_five.intervalIntersection', (['firstList', 'secondList'], {}), '(firstList, secondList)\n', (3274, 3297), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((3474, 3507), 'array_problems.Solutions_Five.permute', 'array_problems_five.permute', (['nums'], {}), '(nums)\n', (3501, 3507), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((3673, 3706), 'array_problems.Solutions_Five.subsets', 'array_problems_five.subsets', (['nums'], {}), '(nums)\n', (3700, 3706), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((3792, 3825), 'array_problems.Solutions_Five.subsets', 'array_problems_five.subsets', (['nums'], {}), '(nums)\n', (3819, 3825), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((3952, 3986), 'array_problems.Solutions_Five.calPoints', 'array_problems_five.calPoints', (['ops'], {}), '(ops)\n', (3981, 3986), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((4097, 4131), 'array_problems.Solutions_Five.calPoints', 'array_problems_five.calPoints', (['ops'], {}), '(ops)\n', (4126, 4131), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((4354, 4394), 'array_problems.Solutions_Two.islandPerimeter', 'array_problems_two.islandPerimeter', (['grid'], {}), '(grid)\n', (4388, 4394), True, 'import array_problems.Solutions_Two as array_problems_two\n'), ((4469, 4509), 'array_problems.Solutions_Two.islandPerimeter', 'array_problems_two.islandPerimeter', (['grid'], {}), '(grid)\n', (4503, 4509), True, 'import array_problems.Solutions_Two as array_problems_two\n'), ((4587, 4627), 'array_problems.Solutions_Two.islandPerimeter', 'array_problems_two.islandPerimeter', (['grid'], {}), '(grid)\n', (4621, 4627), True, 'import array_problems.Solutions_Two as array_problems_two\n'), ((5171, 5211), 'array_problems.Solutions_Two.maxAreaOfIsland', 'array_problems_two.maxAreaOfIsland', (['grid'], {}), '(grid)\n', (5205, 5211), True, 'import array_problems.Solutions_Two as array_problems_two\n'), ((5307, 5347), 'array_problems.Solutions_Two.maxAreaOfIsland', 'array_problems_two.maxAreaOfIsland', (['grid'], {}), '(grid)\n', (5341, 5347), True, 'import array_problems.Solutions_Two as array_problems_two\n'), ((5557, 5606), 'array_problems.Solutions_Five.relativeSortArray', 'array_problems_five.relativeSortArray', (['arr1', 'arr2'], {}), '(arr1, arr2)\n', (5594, 5606), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((5753, 5799), 'array_problems.Solutions_Five.intersection', 'array_problems_five.intersection', (['nums1', 'nums2'], {}), '(nums1, nums2)\n', (5785, 5799), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((5921, 5967), 'array_problems.Solutions_Five.intersection', 'array_problems_five.intersection', (['nums1', 'nums2'], {}), '(nums1, nums2)\n', (5953, 5967), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((6158, 6203), 'array_problems.Solutions_Five.minFallingPathSum', 'array_problems_five.minFallingPathSum', (['matrix'], {}), '(matrix)\n', (6195, 6203), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((6333, 6378), 'array_problems.Solutions_Five.minFallingPathSum', 'array_problems_five.minFallingPathSum', (['matrix'], {}), '(matrix)\n', (6370, 6378), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((6610, 6651), 'array_problems.Solutions_Four.getMaximumGold', 'array_problems_four.getMaximumGold', (['input'], {}), '(input)\n', (6644, 6651), True, 'import array_problems.Solutions_Four as array_problems_four\n'), ((6802, 6843), 'array_problems.Solutions_Four.getMaximumGold', 'array_problems_four.getMaximumGold', (['input'], {}), '(input)\n', (6836, 6843), True, 'import array_problems.Solutions_Four as array_problems_four\n'), ((7011, 7069), 'array_problems.Solutions_Four.validateStackSequences', 'array_problems_four.validateStackSequences', (['pushed', 'popped'], {}), '(pushed, popped)\n', (7053, 7069), True, 'import array_problems.Solutions_Four as array_problems_four\n'), ((7193, 7251), 'array_problems.Solutions_Four.validateStackSequences', 'array_problems_four.validateStackSequences', (['pushed', 'popped'], {}), '(pushed, popped)\n', (7235, 7251), True, 'import array_problems.Solutions_Four as array_problems_four\n'), ((7710, 7750), 'array_problems.Solutions.minesweeper', 'array_problems.minesweeper', (['board', 'click'], {}), '(board, click)\n', (7736, 7750), True, 'import array_problems.Solutions as array_problems\n'), ((8175, 8215), 'array_problems.Solutions.minesweeper', 'array_problems.minesweeper', (['board', 'click'], {}), '(board, click)\n', (8201, 8215), True, 'import array_problems.Solutions as array_problems\n'), ((8337, 8379), 'array_problems.Solutions_Five.lastStoneWeight', 'array_problems_five.lastStoneWeight', (['input'], {}), '(input)\n', (8372, 8379), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((8521, 8561), 'array_problems.Solutions_Five.canReach', 'array_problems_five.canReach', (['arr', 'start'], {}), '(arr, start)\n', (8549, 8561), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((8667, 8707), 'array_problems.Solutions_Five.canReach', 'array_problems_five.canReach', (['arr', 'start'], {}), '(arr, start)\n', (8695, 8707), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((8883, 8923), 'array_problems.Solutions_Five.longestOnes', 'array_problems_five.longestOnes', (['nums', 'k'], {}), '(nums, k)\n', (8914, 8923), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((9040, 9080), 'array_problems.Solutions_Five.longestOnes', 'array_problems_five.longestOnes', (['nums', 'k'], {}), '(nums, k)\n', (9071, 9080), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((9298, 9330), 'array_problems.Solutions.numEnclaves', 'array_problems.numEnclaves', (['grid'], {}), '(grid)\n', (9324, 9330), True, 'import array_problems.Solutions as array_problems\n'), ((9467, 9503), 'array_problems.Solutions_Four.minPathSum', 'array_problems_four.minPathSum', (['grid'], {}), '(grid)\n', (9497, 9503), True, 'import array_problems.Solutions_Four as array_problems_four\n'), ((9596, 9632), 'array_problems.Solutions_Four.minPathSum', 'array_problems_four.minPathSum', (['grid'], {}), '(grid)\n', (9626, 9632), True, 'import array_problems.Solutions_Four as array_problems_four\n'), ((9751, 9783), 'array_problems.Solutions.isMonotonic', 'array_problems.isMonotonic', (['nums'], {}), '(nums)\n', (9777, 9783), True, 'import array_problems.Solutions as array_problems\n'), ((9868, 9900), 'array_problems.Solutions.isMonotonic', 'array_problems.isMonotonic', (['nums'], {}), '(nums)\n', (9894, 9900), True, 'import array_problems.Solutions as array_problems\n'), ((9983, 10015), 'array_problems.Solutions.isMonotonic', 'array_problems.isMonotonic', (['nums'], {}), '(nums)\n', (10009, 10015), True, 'import array_problems.Solutions as array_problems\n'), ((10330, 10384), 'array_problems.Solutions_Four.floodFill', 'array_problems_four.floodFill', (['image', 'sr', 'sc', 'newColor'], {}), '(image, sr, sc, newColor)\n', (10359, 10384), True, 'import array_problems.Solutions_Four as array_problems_four\n'), ((10621, 10675), 'array_problems.Solutions_Four.floodFill', 'array_problems_four.floodFill', (['image', 'sr', 'sc', 'newColor'], {}), '(image, sr, sc, newColor)\n', (10650, 10675), True, 'import array_problems.Solutions_Four as array_problems_four\n'), ((11463, 11510), 'array_problems.Solutions_Five.minCostClimbingStairs', 'array_problems_five.minCostClimbingStairs', (['cost'], {}), '(cost)\n', (11504, 11510), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((11616, 11663), 'array_problems.Solutions_Five.minCostClimbingStairs', 'array_problems_five.minCostClimbingStairs', (['cost'], {}), '(cost)\n', (11657, 11663), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((11860, 11920), 'array_problems.Solutions_Five.maxSatisfied', 'array_problems_five.maxSatisfied', (['customers', 'grumpy', 'minutes'], {}), '(customers, grumpy, minutes)\n', (11892, 11920), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((12045, 12092), 'array_problems.Solutions_Two.findMaxConsecutiveOnes', 'array_problems_two.findMaxConsecutiveOnes', (['nums'], {}), '(nums)\n', (12086, 12092), True, 'import array_problems.Solutions_Two as array_problems_two\n'), ((12180, 12227), 'array_problems.Solutions_Two.findMaxConsecutiveOnes', 'array_problems_two.findMaxConsecutiveOnes', (['nums'], {}), '(nums)\n', (12221, 12227), True, 'import array_problems.Solutions_Two as array_problems_two\n'), ((12370, 12413), 'array_problems.Solutions_Four.intersect', 'array_problems_four.intersect', (['nums1', 'nums2'], {}), '(nums1, nums2)\n', (12399, 12413), True, 'import array_problems.Solutions_Four as array_problems_four\n'), ((12530, 12573), 'array_problems.Solutions_Four.intersect', 'array_problems_four.intersect', (['nums1', 'nums2'], {}), '(nums1, nums2)\n', (12559, 12573), True, 'import array_problems.Solutions_Four as array_problems_four\n'), ((12703, 12738), 'array_problems.Solutions_Five.maxArea', 'array_problems_five.maxArea', (['height'], {}), '(height)\n', (12730, 12738), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((12816, 12851), 'array_problems.Solutions_Five.maxArea', 'array_problems_five.maxArea', (['height'], {}), '(height)\n', (12843, 12851), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((12980, 13014), 'array_problems.Solutions_Two.sortArray', 'array_problems_two.sortArray', (['nums'], {}), '(nums)\n', (13008, 13014), True, 'import array_problems.Solutions_Two as array_problems_two\n'), ((13123, 13157), 'array_problems.Solutions_Two.sortArray', 'array_problems_two.sortArray', (['nums'], {}), '(nums)\n', (13151, 13157), True, 'import array_problems.Solutions_Two as array_problems_two\n'), ((13287, 13323), 'array_problems.Solutions.majorityElement', 'array_problems.majorityElement', (['nums'], {}), '(nums)\n', (13317, 13323), True, 'import array_problems.Solutions as array_problems\n'), ((13402, 13438), 'array_problems.Solutions.majorityElement', 'array_problems.majorityElement', (['nums'], {}), '(nums)\n', (13432, 13438), True, 'import array_problems.Solutions as array_problems\n'), ((14533, 14574), 'array_problems.Solutions_Five.reorderLogFiles', 'array_problems_five.reorderLogFiles', (['logs'], {}), '(logs)\n', (14568, 14574), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((14791, 14832), 'array_problems.Solutions_Five.reorderLogFiles', 'array_problems_five.reorderLogFiles', (['logs'], {}), '(logs)\n', (14826, 14832), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((15145, 15182), 'array_problems.Solutions_Five.maxProfit', 'array_problems_five.maxProfit', (['prices'], {}), '(prices)\n', (15174, 15182), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((15269, 15306), 'array_problems.Solutions_Five.maxProfit', 'array_problems_five.maxProfit', (['prices'], {}), '(prices)\n', (15298, 15306), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((15582, 15618), 'array_problems.Solutions_Two.numIslands', 'array_problems_two.numIslands', (['input'], {}), '(input)\n', (15611, 15618), True, 'import array_problems.Solutions_Two as array_problems_two\n'), ((15855, 15891), 'array_problems.Solutions_Two.numIslands', 'array_problems_two.numIslands', (['input'], {}), '(input)\n', (15884, 15891), True, 'import array_problems.Solutions_Two as array_problems_two\n'), ((16077, 16117), 'array_problems.Solutions_Four.orangesRotting', 'array_problems_four.orangesRotting', (['grid'], {}), '(grid)\n', (16111, 16117), True, 'import array_problems.Solutions_Four as array_problems_four\n'), ((16221, 16261), 'array_problems.Solutions_Four.orangesRotting', 'array_problems_four.orangesRotting', (['grid'], {}), '(grid)\n', (16255, 16261), True, 'import array_problems.Solutions_Four as array_problems_four\n'), ((16339, 16379), 'array_problems.Solutions_Four.orangesRotting', 'array_problems_four.orangesRotting', (['grid'], {}), '(grid)\n', (16373, 16379), True, 'import array_problems.Solutions_Four as array_problems_four\n'), ((16526, 16570), 'array_problems.Solutions_Five.removeElement', 'array_problems_five.removeElement', (['nums', 'val'], {}), '(nums, val)\n', (16559, 16570), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((16668, 16712), 'array_problems.Solutions_Five.removeElement', 'array_problems_five.removeElement', (['nums', 'val'], {}), '(nums, val)\n', (16701, 16712), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((16837, 16876), 'array_problems.Solutions_Five.findJudge', 'array_problems_five.findJudge', (['n', 'trust'], {}), '(n, trust)\n', (16866, 16876), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((16977, 17016), 'array_problems.Solutions_Five.findJudge', 'array_problems_five.findJudge', (['n', 'trust'], {}), '(n, trust)\n', (17006, 17016), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((17126, 17165), 'array_problems.Solutions_Five.findJudge', 'array_problems_five.findJudge', (['n', 'trust'], {}), '(n, trust)\n', (17155, 17165), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((17301, 17338), 'array_problems.Solutions_Five.maxSubArray', 'array_problems_five.maxSubArray', (['nums'], {}), '(nums)\n', (17332, 17338), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((17411, 17448), 'array_problems.Solutions_Five.maxSubArray', 'array_problems_five.maxSubArray', (['nums'], {}), '(nums)\n', (17442, 17448), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((17535, 17572), 'array_problems.Solutions_Five.maxSubArray', 'array_problems_five.maxSubArray', (['nums'], {}), '(nums)\n', (17566, 17572), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((17706, 17743), 'array_problems.Solutions_Five.lengthOfLIS', 'array_problems_five.lengthOfLIS', (['nums'], {}), '(nums)\n', (17737, 17743), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((17831, 17868), 'array_problems.Solutions_Five.lengthOfLIS', 'array_problems_five.lengthOfLIS', (['nums'], {}), '(nums)\n', (17862, 17868), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((17959, 17996), 'array_problems.Solutions_Five.lengthOfLIS', 'array_problems_five.lengthOfLIS', (['nums'], {}), '(nums)\n', (17990, 17996), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((18485, 18514), 'array_problems.Solutions_Five.rob', 'array_problems_five.rob', (['nums'], {}), '(nums)\n', (18508, 18514), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((18600, 18629), 'array_problems.Solutions_Five.rob', 'array_problems_five.rob', (['nums'], {}), '(nums)\n', (18623, 18629), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((18797, 18835), 'array_problems.Solutions_Five.merge_2', 'array_problems_five.merge_2', (['intervals'], {}), '(intervals)\n', (18824, 18835), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((18937, 18975), 'array_problems.Solutions_Five.merge_2', 'array_problems_five.merge_2', (['intervals'], {}), '(intervals)\n', (18964, 18975), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((19249, 19290), 'array_problems.Solutions_Four.maximalSquare', 'array_problems_four.maximalSquare', (['matrix'], {}), '(matrix)\n', (19282, 19290), True, 'import array_problems.Solutions_Four as array_problems_four\n'), ((19420, 19461), 'array_problems.Solutions_Four.maximalSquare', 'array_problems_four.maximalSquare', (['matrix'], {}), '(matrix)\n', (19453, 19461), True, 'import array_problems.Solutions_Four as array_problems_four\n'), ((19562, 19603), 'array_problems.Solutions_Four.maximalSquare', 'array_problems_four.maximalSquare', (['matrix'], {}), '(matrix)\n', (19595, 19603), True, 'import array_problems.Solutions_Four as array_problems_four\n'), ((19732, 19767), 'array_problems.Solutions_Five.plusOne', 'array_problems_five.plusOne', (['digits'], {}), '(digits)\n', (19759, 19767), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((19848, 19883), 'array_problems.Solutions_Five.plusOne', 'array_problems_five.plusOne', (['digits'], {}), '(digits)\n', (19875, 19883), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((19967, 20002), 'array_problems.Solutions_Five.plusOne', 'array_problems_five.plusOne', (['digits'], {}), '(digits)\n', (19994, 20002), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((20171, 20221), 'array_problems.Solutions_Four.shortestPathBinaryMatrix', 'array_problems_four.shortestPathBinaryMatrix', (['grid'], {}), '(grid)\n', (20215, 20221), True, 'import array_problems.Solutions_Four as array_problems_four\n'), ((20370, 20420), 'array_problems.Solutions_Four.shortestPathBinaryMatrix', 'array_problems_four.shortestPathBinaryMatrix', (['grid'], {}), '(grid)\n', (20414, 20420), True, 'import array_problems.Solutions_Four as array_problems_four\n'), ((20524, 20574), 'array_problems.Solutions_Four.shortestPathBinaryMatrix', 'array_problems_four.shortestPathBinaryMatrix', (['grid'], {}), '(grid)\n', (20568, 20574), True, 'import array_problems.Solutions_Four as array_problems_four\n'), ((20766, 20799), 'array_problems.Solutions.exist', 'array_problems.exist', (['board', 'word'], {}), '(board, word)\n', (20786, 20799), True, 'import array_problems.Solutions as array_problems\n'), ((20960, 20993), 'array_problems.Solutions.exist', 'array_problems.exist', (['board', 'word'], {}), '(board, word)\n', (20980, 20993), True, 'import array_problems.Solutions as array_problems\n'), ((21156, 21189), 'array_problems.Solutions.exist', 'array_problems.exist', (['board', 'word'], {}), '(board, word)\n', (21176, 21189), True, 'import array_problems.Solutions as array_problems\n'), ((21307, 21340), 'array_problems.Solutions_Five.canJump', 'array_problems_five.canJump', (['nums'], {}), '(nums)\n', (21334, 21340), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((21429, 21462), 'array_problems.Solutions_Five.canJump', 'array_problems_five.canJump', (['nums'], {}), '(nums)\n', (21456, 21462), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((21611, 21659), 'array_problems.Solutions_Four.first_and_last_of_k', 'array_problems_four.first_and_last_of_k', (['nums', 'k'], {}), '(nums, k)\n', (21650, 21659), True, 'import array_problems.Solutions_Four as array_problems_four\n'), ((21829, 21877), 'array_problems.Solutions_Four.first_and_last_of_k', 'array_problems_four.first_and_last_of_k', (['nums', 'k'], {}), '(nums, k)\n', (21868, 21877), True, 'import array_problems.Solutions_Four as array_problems_four\n'), ((21988, 22036), 'array_problems.Solutions_Four.first_and_last_of_k', 'array_problems_four.first_and_last_of_k', (['nums', 'k'], {}), '(nums, k)\n', (22027, 22036), True, 'import array_problems.Solutions_Four as array_problems_four\n'), ((22191, 22235), 'array_problems.Solutions_Four.isToeplitzMatrix', 'array_problems_four.isToeplitzMatrix', (['matrix'], {}), '(matrix)\n', (22227, 22235), True, 'import array_problems.Solutions_Four as array_problems_four\n'), ((22327, 22371), 'array_problems.Solutions_Four.isToeplitzMatrix', 'array_problems_four.isToeplitzMatrix', (['matrix'], {}), '(matrix)\n', (22363, 22371), True, 'import array_problems.Solutions_Four as array_problems_four\n'), ((22548, 22595), 'array_problems.Solutions_Five.isAlienSorted', 'array_problems_five.isAlienSorted', (['words', 'order'], {}), '(words, order)\n', (22581, 22595), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((22739, 22786), 'array_problems.Solutions_Five.isAlienSorted', 'array_problems_five.isAlienSorted', (['words', 'order'], {}), '(words, order)\n', (22772, 22786), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((22922, 22969), 'array_problems.Solutions_Five.isAlienSorted', 'array_problems_five.isAlienSorted', (['words', 'order'], {}), '(words, order)\n', (22955, 22969), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((23235, 23283), 'array_problems.Solutions_Five.plus_one_large_number', 'array_problems_five.plus_one_large_number', (['input'], {}), '(input)\n', (23276, 23283), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((23427, 23471), 'array_problems.Solutions_Five.canAttendMeetings', 'array_problems_five.canAttendMeetings', (['input'], {}), '(input)\n', (23464, 23471), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((23562, 23606), 'array_problems.Solutions_Five.canAttendMeetings', 'array_problems_five.canAttendMeetings', (['input'], {}), '(input)\n', (23599, 23606), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((23744, 23789), 'array_problems.Solutions_Five.numberMeetingRooms', 'array_problems_five.numberMeetingRooms', (['input'], {}), '(input)\n', (23782, 23789), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((24154, 24223), 'array_problems.Solutions_Three.meeting_room_conflicts', 'array_problems_three.meeting_room_conflicts', (['calendar', 'rooms', 'queries'], {}), '(calendar, rooms, queries)\n', (24197, 24223), True, 'import array_problems.Solutions_Three as array_problems_three\n'), ((24479, 24518), 'array_problems.Solutions.pacificAtlantic', 'array_problems.pacificAtlantic', (['heights'], {}), '(heights)\n', (24509, 24518), True, 'import array_problems.Solutions as array_problems\n'), ((24643, 24682), 'array_problems.Solutions.pacificAtlantic', 'array_problems.pacificAtlantic', (['heights'], {}), '(heights)\n', (24673, 24682), True, 'import array_problems.Solutions as array_problems\n'), ((25610, 25659), 'array_problems.Solutions_Three.fullJustify', 'array_problems_three.fullJustify', (['words', 'maxWidth'], {}), '(words, maxWidth)\n', (25642, 25659), True, 'import array_problems.Solutions_Three as array_problems_three\n'), ((25916, 25965), 'array_problems.Solutions_Three.fullJustify', 'array_problems_three.fullJustify', (['words', 'maxWidth'], {}), '(words, maxWidth)\n', (25948, 25965), True, 'import array_problems.Solutions_Three as array_problems_three\n'), ((26142, 26185), 'array_problems.Solutions_Three.exclusiveTime', 'array_problems_three.exclusiveTime', (['n', 'logs'], {}), '(n, logs)\n', (26176, 26185), True, 'import array_problems.Solutions_Three as array_problems_three\n'), ((27111, 27152), 'array_problems.Solutions_Three.fallAndCrush2', 'array_problems_three.fallAndCrush2', (['input'], {}), '(input)\n', (27145, 27152), True, 'import array_problems.Solutions_Three as array_problems_three\n'), ((27301, 27359), 'array_problems.Solutions_Five.number_of_markers_on_road', 'array_problems_five.number_of_markers_on_road', (['coordinates'], {}), '(coordinates)\n', (27346, 27359), True, 'import array_problems.Solutions_Five as array_problems_five\n'), ((27499, 27540), 'array_problems.Solutions_Three.shortestBridge', 'array_problems_three.shortestBridge', (['grid'], {}), '(grid)\n', (27534, 27540), True, 'import array_problems.Solutions_Three as array_problems_three\n'), ((27695, 27736), 'array_problems.Solutions_Three.shortestBridge', 'array_problems_three.shortestBridge', (['grid'], {}), '(grid)\n', (27730, 27736), True, 'import array_problems.Solutions_Three as array_problems_three\n'), ((27961, 28002), 'array_problems.Solutions_Three.shortestBridge', 'array_problems_three.shortestBridge', (['grid'], {}), '(grid)\n', (27996, 28002), True, 'import array_problems.Solutions_Three as array_problems_three\n'), ((28327, 28368), 'array_problems.Solutions_Three.shortestBridge', 'array_problems_three.shortestBridge', (['grid'], {}), '(grid)\n', (28362, 28368), True, 'import array_problems.Solutions_Three as array_problems_three\n')]
|
import yaml
def yamldump(data):
if isinstance(data, list):
data = [d.to_dict() for d in data]
else:
data = data.to_dict()
print(yaml.dump(data))
|
[
"yaml.dump"
] |
[((158, 173), 'yaml.dump', 'yaml.dump', (['data'], {}), '(data)\n', (167, 173), False, 'import yaml\n')]
|
import argparse
import sys # We need sys so that we can pass argv to QApplication
import os
import warnings
import pandas as pd
import petab.C as ptc
import pyqtgraph as pg
from PySide6 import QtWidgets, QtCore, QtGui
from PySide6.QtWidgets import (
QVBoxLayout, QComboBox, QWidget, QLabel, QTreeView
)
from petab import core
import petab
from petab.visualize.helper_functions import check_ex_exp_columns
from . import (utils, vis_spec_plot, window_functionality)
from .bar_plot import BarPlot
from .options_window import (OptionMenu, CorrelationOptionMenu,
OverviewPlotWindow)
class MainWindow(QtWidgets.QMainWindow):
"""
The main window
Attributes:
exp_data: PEtab measurement table
visualization_df: PEtab visualization table
yaml_dict: Dictionary of the files in the yaml file
condition_df: PEtab condition table
observable_df: PEtab observable table
plot1_widget: pg.GraphicsLayoutWidget containing the main plot
plot2_widget: pg.GraphicsLayoutWidget containing the correlation plot
warn_msg: QLabel displaying current warning messages
popup_tables: List of Popup TableWidget displaying the clicked table
tree_view: QTreeView of the yaml file
visu_spec_plots: A list of VisuSpecPlots
cbox: A dropdown menu for the plots
current_list_index: List index of the currently displayed plot
wid: QSplitter between main plot and correlation plot
"""
def __init__(self, yaml_filename: str = None,
simulation_file: pd.DataFrame = None, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
# set the background color to white
pg.setConfigOption('background', 'w')
pg.setConfigOption('foreground', 'k')
pg.setConfigOption("antialias", True)
self.resize(1000, 600)
self.setWindowTitle("petabvis")
self.visualization_df = None
self.simulation_df = None
self.condition_df = None
self.observable_df = None
self.exp_data = None
self.yaml_filename = yaml_filename
self.yaml_dict = None
self.color_map = utils.generate_color_map("viridis")
self.vis_spec_plots = []
self.wid = QtWidgets.QSplitter()
self.plot1_widget = pg.GraphicsLayoutWidget(show=True)
self.plot2_widget = pg.GraphicsLayoutWidget(show=False)
self.overview_plot_window = None
self.wid.addWidget(self.plot1_widget)
# plot2_widget will be added to the QSplitter when
# a simulation file is opened
self.cbox = QComboBox() # dropdown menu to select plots
self.cbox.currentIndexChanged.connect(lambda x: self.index_changed(x))
self.warn_msg = QLabel("")
self.warnings = []
self.warning_counter = {}
# The new window that pops up to display a table
self.popup_tables = []
self.options_window = OptionMenu(window=self,
vis_spec_plots=self.vis_spec_plots)
self.correlation_options_window = \
CorrelationOptionMenu(vis_spec_plots=self.vis_spec_plots)
self.correlation_option_button = None
self.overview_plot_button = None
self.tree_view = QTreeView(self)
self.tree_view.setHeaderHidden(True)
self.tree_root_node = None
self.simulation_tree_branch = None
self.wid.addWidget(self.tree_view)
self.current_list_index = 0
warnings.showwarning = self.redirect_warning
window_functionality.add_file_selector(self)
window_functionality.add_option_menu(self)
# the layout of the plot-list and message textbox
lower_layout = QVBoxLayout()
lower_layout.addWidget(self.cbox)
lower_layout.addWidget(self.warn_msg)
lower_widget = QWidget()
lower_widget.setLayout(lower_layout)
split_plots_and_warnings = QtWidgets.QSplitter()
split_plots_and_warnings.setOrientation(QtCore.Qt.Vertical)
split_plots_and_warnings.addWidget(self.wid)
split_plots_and_warnings.addWidget(lower_widget)
layout = QVBoxLayout()
layout.addWidget(split_plots_and_warnings)
widget = QWidget()
widget.setLayout(layout)
self.setCentralWidget(widget)
if self.yaml_filename:
self.read_data_from_yaml_file()
if simulation_file:
self.add_and_plot_simulation_file(simulation_file)
else:
self.add_plots()
def read_data_from_yaml_file(self):
self.yaml_dict = petab.load_yaml(self.yaml_filename)["problems"][0]
folder_path = os.path.dirname(self.yaml_filename) + "/"
if ptc.VISUALIZATION_FILES not in self.yaml_dict:
self.visualization_df = None
self.add_warning(
"The YAML file contains no "
"visualization file (default plotted)")
# table_tree_view sets the df attributes of the window
# equal to the first file of each branch
# (measurement, visualization, ...)
window_functionality.table_tree_view(self, folder_path)
def add_and_plot_simulation_file(self, filename):
"""
Add the simulation file and plot them.
Also, add the correlation plot to the window
and enable correlation plot and overview plot options.
Arguments:
filename: Path of the simulation file.
"""
sim_data = core.get_simulation_df(filename)
# check columns, and add non-mandatory default columns
sim_data, _, _ = check_ex_exp_columns(
sim_data, None, None, None, None, None,
self.condition_df, sim=True)
# delete the replicateId column if it gets added to the simulation
# table but is not in exp_data because it causes problems when
# splitting the replicates
if ptc.REPLICATE_ID not in self.exp_data.columns \
and ptc.REPLICATE_ID in sim_data.columns:
sim_data.drop(ptc.REPLICATE_ID, axis=1, inplace=True)
if len(self.yaml_dict[ptc.MEASUREMENT_FILES]) > 1:
self.add_warning(
"Not Implemented Error: Loading a simulation file with "
"multiple measurement files is currently not supported.")
else:
self.simulation_df = sim_data
self.add_plots()
# insert correlation plot at position 1
self.wid.insertWidget(1, self.plot2_widget)
filename = os.path.basename(filename)
window_functionality.add_simulation_df_to_tree_view(self, filename)
# add correlation options and overview plot to option menu
self.correlation_option_button.setVisible(True)
self.overview_plot_button.setVisible(True)
self.add_overview_plot_window()
def add_plots(self):
"""
Adds the current visuSpecPlots to the main window,
removes the old ones and updates the
cbox (dropdown list)
Returns:
List of PlotItem
"""
self.clear_qsplitter()
self.vis_spec_plots.clear()
self.options_window.reset_states()
if self.visualization_df is not None:
# to keep the order of plots consistent
# with names from the plot selection
plot_ids = list(self.visualization_df[ptc.PLOT_ID].unique())
for plot_id in plot_ids:
self.create_and_add_vis_plot(plot_id)
else: # default plot when no visu_df is provided
self.create_and_add_vis_plot()
plots = [vis_spec_plot.get_plot() for vis_spec_plot in
self.vis_spec_plots]
# update the cbox
self.cbox.clear()
# calling this method sets the index of the cbox to 0
# and thus displays the first plot
utils.add_plotnames_to_cbox(self.exp_data, self.visualization_df,
self.cbox)
return plots
def index_changed(self, i: int):
"""
Changes the displayed plot to the one selected in the dropdown list
Arguments:
i: index of the selected plot
"""
if 0 <= i < len(
self.vis_spec_plots): # i is -1 when the cbox is cleared
self.clear_qsplitter()
self.plot1_widget.addItem(self.vis_spec_plots[i].get_plot())
self.plot2_widget.hide()
if self.simulation_df is not None:
self.plot2_widget.show()
self.plot2_widget.addItem(
self.vis_spec_plots[i].correlation_plot)
self.current_list_index = i
def keyPressEvent(self, ev):
"""
Changes the displayed plot by pressing arrow keys
Arguments:
ev: key event
"""
# Exit when pressing ctrl + Q
ctrl = False
if ev.modifiers() & QtCore.Qt.ControlModifier:
ctrl = True
if ctrl and ev.key() == QtCore.Qt.Key_Q:
sys.exit()
if ev.key() == QtCore.Qt.Key_Up:
self.index_changed(self.current_list_index - 1)
if ev.key() == QtCore.Qt.Key_Down:
self.index_changed(self.current_list_index + 1)
if ev.key() == QtCore.Qt.Key_Left:
self.index_changed(self.current_list_index - 1)
if ev.key() == QtCore.Qt.Key_Right:
self.index_changed(self.current_list_index + 1)
def closeEvent(self, event):
sys.exit()
def add_warning(self, message: str):
"""
Adds the message to the warnings box
Arguments:
message: The message to display
"""
if message not in self.warnings:
self.warnings.append(message)
self.warning_counter[message] = 1
else:
self.warning_counter[message] += 1
self.warn_msg.setText(self.warnings_to_string())
def warnings_to_string(self):
"""
Convert the list of warnings to a string and
indicate the number of occurences
Returns:
Self.warnings as a string
"""
return "\n".join([warning if self.warning_counter[warning] <= 1
else warning + " (occured {} times)".format(
str(self.warning_counter[warning]))
for warning in self.warnings])
def redirect_warning(self, message, category, filename=None, lineno=None,
file=None, line=None):
"""
Redirect all warning messages and display them in the window.
Arguments:
message: The message of the warning
"""
print("Warning redirected: " + str(message))
self.add_warning(str(message))
def create_and_add_vis_plot(self, plot_id=""):
"""
Create a vis_spec_plot object based on the given plot_id.
If no plot_it is provided the default will be plotted.
Add all the warnings of the vis_plot object to the warning text box.
The actual plotting happens in the index_changed method
Arguments:
plot_id: The plotId of the plot
"""
# split the measurement df by observable when using default plots
if self.visualization_df is None:
observable_ids = list(self.exp_data[ptc.OBSERVABLE_ID].unique())
for observable_id in observable_ids:
rows = self.exp_data[ptc.OBSERVABLE_ID] == observable_id
data = self.exp_data[rows]
simulation_df = self.simulation_df
if simulation_df is not None:
rows = self.simulation_df[ptc.OBSERVABLE_ID]\
== observable_id
simulation_df = self.simulation_df[rows]
vis_plot = vis_spec_plot.VisSpecPlot(
measurement_df=data, visualization_df=None,
condition_df=self.condition_df,
simulation_df=simulation_df, plot_id=observable_id,
color_map=self.color_map)
self.vis_spec_plots.append(vis_plot)
if vis_plot.warnings:
self.add_warning(vis_plot.warnings)
else:
# reduce the visualization df to the relevant rows (by plotId)
rows = self.visualization_df[ptc.PLOT_ID] == plot_id
vis_df = self.visualization_df[rows]
if ptc.PLOT_TYPE_SIMULATION in vis_df.columns and \
vis_df.iloc[0][ptc.PLOT_TYPE_SIMULATION] == ptc.BAR_PLOT:
bar_plot = BarPlot(measurement_df=self.exp_data,
visualization_df=vis_df,
condition_df=self.condition_df,
simulation_df=self.simulation_df,
plot_id=plot_id)
# might want to change the name of
# visu_spec_plots to clarify that
# it can also include bar plots (maybe to plots?)
self.vis_spec_plots.append(bar_plot)
else:
vis_plot = vis_spec_plot.VisSpecPlot(
measurement_df=self.exp_data,
visualization_df=vis_df,
condition_df=self.condition_df,
simulation_df=self.simulation_df, plot_id=plot_id,
color_map=self.color_map)
self.vis_spec_plots.append(vis_plot)
if vis_plot.warnings:
self.add_warning(vis_plot.warnings)
def clear_qsplitter(self):
"""
Clear the GraphicsLayoutWidgets for the
measurement and correlation plot
"""
self.plot1_widget.clear()
self.plot2_widget.clear()
def add_overview_plot_window(self):
self.overview_plot_window = OverviewPlotWindow(self.exp_data,
self.simulation_df)
def main():
options = argparse.ArgumentParser()
options.add_argument("-y", "--YAML", type=str, required=False,
help="PEtab YAML file", default=None)
options.add_argument("-s", "--simulation", type=str, required=False,
help="PEtab simulation file", default=None)
args = options.parse_args()
simulation_file = None
if args.simulation is not None:
simulation_file = args.simulation
app = QtWidgets.QApplication(sys.argv)
main_window = MainWindow(args.YAML, simulation_file)
main_window.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
[
"petab.visualize.helper_functions.check_ex_exp_columns",
"pyqtgraph.setConfigOption",
"petab.core.get_simulation_df",
"argparse.ArgumentParser",
"os.path.basename",
"petab.load_yaml",
"PySide6.QtWidgets.QComboBox",
"os.path.dirname",
"PySide6.QtWidgets.QVBoxLayout",
"PySide6.QtWidgets.QWidget",
"PySide6.QtWidgets.QLabel",
"PySide6.QtWidgets.QTreeView",
"PySide6.QtWidgets.QSplitter",
"PySide6.QtWidgets.QApplication",
"sys.exit",
"pyqtgraph.GraphicsLayoutWidget"
] |
[((14165, 14190), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (14188, 14190), False, 'import argparse\n'), ((14612, 14644), 'PySide6.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (14634, 14644), False, 'from PySide6 import QtWidgets, QtCore, QtGui\n'), ((1748, 1785), 'pyqtgraph.setConfigOption', 'pg.setConfigOption', (['"""background"""', '"""w"""'], {}), "('background', 'w')\n", (1766, 1785), True, 'import pyqtgraph as pg\n'), ((1794, 1831), 'pyqtgraph.setConfigOption', 'pg.setConfigOption', (['"""foreground"""', '"""k"""'], {}), "('foreground', 'k')\n", (1812, 1831), True, 'import pyqtgraph as pg\n'), ((1840, 1877), 'pyqtgraph.setConfigOption', 'pg.setConfigOption', (['"""antialias"""', '(True)'], {}), "('antialias', True)\n", (1858, 1877), True, 'import pyqtgraph as pg\n'), ((2303, 2324), 'PySide6.QtWidgets.QSplitter', 'QtWidgets.QSplitter', ([], {}), '()\n', (2322, 2324), False, 'from PySide6 import QtWidgets, QtCore, QtGui\n'), ((2353, 2387), 'pyqtgraph.GraphicsLayoutWidget', 'pg.GraphicsLayoutWidget', ([], {'show': '(True)'}), '(show=True)\n', (2376, 2387), True, 'import pyqtgraph as pg\n'), ((2416, 2451), 'pyqtgraph.GraphicsLayoutWidget', 'pg.GraphicsLayoutWidget', ([], {'show': '(False)'}), '(show=False)\n', (2439, 2451), True, 'import pyqtgraph as pg\n'), ((2656, 2667), 'PySide6.QtWidgets.QComboBox', 'QComboBox', ([], {}), '()\n', (2665, 2667), False, 'from PySide6.QtWidgets import QVBoxLayout, QComboBox, QWidget, QLabel, QTreeView\n'), ((2804, 2814), 'PySide6.QtWidgets.QLabel', 'QLabel', (['""""""'], {}), "('')\n", (2810, 2814), False, 'from PySide6.QtWidgets import QVBoxLayout, QComboBox, QWidget, QLabel, QTreeView\n'), ((3321, 3336), 'PySide6.QtWidgets.QTreeView', 'QTreeView', (['self'], {}), '(self)\n', (3330, 3336), False, 'from PySide6.QtWidgets import QVBoxLayout, QComboBox, QWidget, QLabel, QTreeView\n'), ((3780, 3793), 'PySide6.QtWidgets.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (3791, 3793), False, 'from PySide6.QtWidgets import QVBoxLayout, QComboBox, QWidget, QLabel, QTreeView\n'), ((3905, 3914), 'PySide6.QtWidgets.QWidget', 'QWidget', ([], {}), '()\n', (3912, 3914), False, 'from PySide6.QtWidgets import QVBoxLayout, QComboBox, QWidget, QLabel, QTreeView\n'), ((3995, 4016), 'PySide6.QtWidgets.QSplitter', 'QtWidgets.QSplitter', ([], {}), '()\n', (4014, 4016), False, 'from PySide6 import QtWidgets, QtCore, QtGui\n'), ((4213, 4226), 'PySide6.QtWidgets.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (4224, 4226), False, 'from PySide6.QtWidgets import QVBoxLayout, QComboBox, QWidget, QLabel, QTreeView\n'), ((4296, 4305), 'PySide6.QtWidgets.QWidget', 'QWidget', ([], {}), '()\n', (4303, 4305), False, 'from PySide6.QtWidgets import QVBoxLayout, QComboBox, QWidget, QLabel, QTreeView\n'), ((5567, 5599), 'petab.core.get_simulation_df', 'core.get_simulation_df', (['filename'], {}), '(filename)\n', (5589, 5599), False, 'from petab import core\n'), ((5688, 5782), 'petab.visualize.helper_functions.check_ex_exp_columns', 'check_ex_exp_columns', (['sim_data', 'None', 'None', 'None', 'None', 'None', 'self.condition_df'], {'sim': '(True)'}), '(sim_data, None, None, None, None, None, self.\n condition_df, sim=True)\n', (5708, 5782), False, 'from petab.visualize.helper_functions import check_ex_exp_columns\n'), ((9617, 9627), 'sys.exit', 'sys.exit', ([], {}), '()\n', (9625, 9627), False, 'import sys\n'), ((4742, 4777), 'os.path.dirname', 'os.path.dirname', (['self.yaml_filename'], {}), '(self.yaml_filename)\n', (4757, 4777), False, 'import os\n'), ((6621, 6647), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (6637, 6647), False, 'import os\n'), ((9152, 9162), 'sys.exit', 'sys.exit', ([], {}), '()\n', (9160, 9162), False, 'import sys\n'), ((4669, 4704), 'petab.load_yaml', 'petab.load_yaml', (['self.yaml_filename'], {}), '(self.yaml_filename)\n', (4684, 4704), False, 'import petab\n')]
|