code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
"""
utility for project 9
:author: <NAME> <<EMAIL>>
:license: CC0
"""
def clear():
"""
clear cmd/term
:return: void
"""
import os
import sys
if sys.platform == 'win32':
os.system('cls') # on windows
else:
os.system('clear') # on linux / os x
if __name__ == '__main__':
raise Exception("please run main py")
| [
"os.system"
] | [((210, 226), 'os.system', 'os.system', (['"""cls"""'], {}), "('cls')\n", (219, 226), False, 'import os\n'), ((259, 277), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (268, 277), False, 'import os\n')] |
import os
import argparse
import subprocess
from workflow import Workflow
def get_kubectl_cmd_path():
wf = Workflow()
return wf.settings.get("KUBECTL_CMD_PATH") or os.environ.get("KUBECTL_CMD_PATH", '/usr/local/bin/kubectl')
class KService:
def __init__(self, type, name, age, status):
self.type = type
self.name = name
self.age = age
self.status = status
def get_args(args):
parser = argparse.ArgumentParser()
parser.add_argument('query', nargs='?', default="")
return parser.parse_args(args)
def get_pods():
res = []
pods = subprocess.Popen("%s get pods" % get_kubectl_cmd_path(), shell=True, stdout=subprocess.PIPE).stdout.read().split(
'\n')[
1:-1]
for pod_str in pods:
try:
dep_name, _, status, _, age = " ".join(pod_str.split()).split(' ')
res.append(KService("Pod", dep_name, age, status))
except:
print("ASd")
return res
def get_deployments():
res = []
deps = subprocess.Popen("%s get deploy" % get_kubectl_cmd_path(), shell=True, stdout=subprocess.PIPE).stdout.read().split(
'\n')[1:-1]
for dep_str in deps:
dep_name, _, current, _, _, age = " ".join(dep_str.split()).split(' ')
res.append(KService("Deploy", dep_name, age, current))
return res
def get_replica_sets():
res = []
deps = subprocess.Popen("%s get rs" % get_kubectl_cmd_path(), shell=True, stdout=subprocess.PIPE).stdout.read().split(
'\n')[1:-1]
for dep_str in deps:
dep_name, desired, current, _, age = " ".join(dep_str.split()).split(' ')
res.append(KService("Deploy", dep_name, age, "%s/%s" % (desired, current)))
return res
def get_services():
res = []
res += get_pods()
res += get_deployments()
return res
def search_key_for_service(service):
return u' '.join([
service.name
])
def process_and_feedback(wf, wf_cached_data_key, data_func, icon, include_type_in_arg=False):
args = get_args(wf.args)
data = wf.cached_data(wf_cached_data_key, data_func, max_age=60)
query = args.query.strip()
if query:
data = wf.filter(query, data, key=search_key_for_service, min_score=20)
for d in data:
if include_type_in_arg:
arg = "{type} {name}".format(type=d.type.lower(), name=d.name)
else:
arg = d.name
wf.add_item(title=d.name,
subtitle="%s - Age: %s | Extra: %s" % (d.type, d.age, d.status),
arg=arg,
valid=True,
icon=icon)
wf.send_feedback()
def update_local_path_vars(wf):
set_path_to = os.environ.get('set_path_to')
configured_path = os.environ.get('configured_path')
wf.settings[set_path_to] = configured_path
wf.settings.save()
print("Successfully set path to %s with %s" % (set_path_to, wf.settings[set_path_to]))
def _report_missing_var(wf, var_name):
print("Missing dashbaord url; use *ksetenv*")
"""
wf.add_item(title="Hit enter to set %s environment variable." % var_name,
arg="setenv",
valid=True)
wf.send_feedback()
"""
| [
"workflow.Workflow",
"os.environ.get",
"argparse.ArgumentParser"
] | [((113, 123), 'workflow.Workflow', 'Workflow', ([], {}), '()\n', (121, 123), False, 'from workflow import Workflow\n'), ((438, 463), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (461, 463), False, 'import argparse\n'), ((2703, 2732), 'os.environ.get', 'os.environ.get', (['"""set_path_to"""'], {}), "('set_path_to')\n", (2717, 2732), False, 'import os\n'), ((2755, 2788), 'os.environ.get', 'os.environ.get', (['"""configured_path"""'], {}), "('configured_path')\n", (2769, 2788), False, 'import os\n'), ((174, 234), 'os.environ.get', 'os.environ.get', (['"""KUBECTL_CMD_PATH"""', '"""/usr/local/bin/kubectl"""'], {}), "('KUBECTL_CMD_PATH', '/usr/local/bin/kubectl')\n", (188, 234), False, 'import os\n')] |
# -*- coding: utf-8 -*-
import os
import numpy as np
import sys
import logging
import csv
# Setup logging
logger = logging.getLogger(__name__)
console_handle = logging.StreamHandler()
console_handle.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s: %(message)s', datefmt='%m-%d %H:%M')
console_handle.setFormatter(formatter)
logger.addHandler(console_handle)
class Data:
"""Common class for a list of instances of the class Samples
Attributes:
name: name of the data as a string
samples: a list of samples as instances of class Sample
casedisgene: a list of lists [[case,gene]] containing each case in samples and the respective disease causing gene
"""
# index for each score
FM_IDX = 0
CADD_IDX = 1
GESTALT_IDX = 2
BOQA_IDX = 3
PHENO_IDX = 4
# FEATURE_IDX is for feature vector which contain the above feature score
# LABEL_IDX is for pathogenic gene label (0, 1)
# GENE_IDX is for gene symbol
FEATURE_IDX = 0
LABEL_IDX = 1
GENE_IDX = 2
GENE_NAME_IDX = 3
def __init__(self):
self.data = {}
# Filter dict
self.filter_dict = {0: "feature_score", 1: "cadd_phred_score", 2: "gestalt_score", 3: "boqa_score", 4: "pheno_score"}
def loadData(self, input_file, filter_field=None):
filter_cases = []
with open(input_file) as csvfile:
reader = csv.DictReader(csvfile)
case = ""
for row in reader:
case = row["case"]
if not case in self.data:
self.data.update({case:[[], [], [], []]})
x = self.data[case][self.FEATURE_IDX]
y = self.data[case][self.LABEL_IDX]
gene = self.data[case][self.GENE_IDX]
gene_name = self.data[case][self.GENE_NAME_IDX]
x.append([row["feature_score"], row["cadd_phred_score"], row["gestalt_score"], row["boqa_score"], row["pheno_score"]])
y.append(int(row["label"]))
gene.append(row["gene_id"])
gene_name.append(row["gene_symbol"])
# filter the sample which has no the feature we assigned
if filter_field != None:
if int(row["label"]) == 1:
if row[self.filter_dict[filter_field[0]]] == 'nan' or row[self.filter_dict[filter_field[0]]] == '0':
logger.debug("%s - %s has no %s score", case, row["gene_symbol"], self.filter_dict[filter_field[0]])
filter_cases.append(case)
for key in list(self.data):
if key in filter_cases:
del self.data[key]
else:
x = self.data[key][self.FEATURE_IDX]
y = self.data[key][self.LABEL_IDX]
x = np.array(x)
y = np.array(y)
self.data[key][self.FEATURE_IDX] = x
self.data[key][self.LABEL_IDX] = y
logger.info("Input %s: total %d cases", input_file, len(self.data))
| [
"logging.getLogger",
"logging.StreamHandler",
"csv.DictReader",
"logging.Formatter",
"numpy.array"
] | [((124, 151), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (141, 151), False, 'import logging\n'), ((170, 193), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (191, 193), False, 'import logging\n'), ((246, 314), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s: %(message)s"""'], {'datefmt': '"""%m-%d %H:%M"""'}), "('%(asctime)s: %(message)s', datefmt='%m-%d %H:%M')\n", (263, 314), False, 'import logging\n'), ((1457, 1480), 'csv.DictReader', 'csv.DictReader', (['csvfile'], {}), '(csvfile)\n', (1471, 1480), False, 'import csv\n'), ((2951, 2962), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (2959, 2962), True, 'import numpy as np\n'), ((2988, 2999), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (2996, 2999), True, 'import numpy as np\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: bigip_device_group_member
short_description: Manages members in a device group
description:
- Manages members in a device group. Members in a device group can only
be added or removed, never updated. This is because the members are
identified by unique name values and changing that name would invalidate
the uniqueness.
version_added: "1.0.0"
options:
name:
description:
- Specifies the name of the device that you want to add to the
device group. Often this will be the hostname of the device.
This member must be trusted by the device already. Trusting
can be done with the C(bigip_device_trust) module and the
C(peer_hostname) option to that module.
type: str
required: True
device_group:
description:
- The device group to which you want to add the member.
type: str
required: True
state:
description:
- When C(present), ensures the device group member exists.
- When C(absent), ensures the device group member is removed.
type: str
choices:
- present
- absent
default: present
extends_documentation_fragment: f5networks.f5_modules.f5
author:
- <NAME> (@caphrim007)
- <NAME> (@wojtek0806)
'''
EXAMPLES = r'''
- name: Add the current device to the "device_trust_group" device group
bigip_device_group_member:
name: "{{ inventory_hostname }}"
device_group: device_trust_group
provider:
password: <PASSWORD>
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Add the hosts in the current scope to "device_trust_group"
bigip_device_group_member:
name: "{{ item }}"
device_group: device_trust_group
provider:
password: <PASSWORD>
server: lb.mydomain.com
user: admin
loop: "{{ hostvars.keys() }}"
run_once: true
delegate_to: localhost
'''
RETURN = r'''
# only common fields returned
'''
from datetime import datetime
from ansible.module_utils.basic import AnsibleModule
from ..module_utils.bigip import F5RestClient
from ..module_utils.common import (
F5ModuleError, AnsibleF5Parameters, f5_argument_spec
)
from ..module_utils.icontrol import tmos_version
from ..module_utils.teem import send_teem
class Parameters(AnsibleF5Parameters):
api_map = {}
api_attributes = []
returnables = []
updatables = []
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
pass
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
change = getattr(self, returnable)
if isinstance(change, dict):
result.update(change)
else:
result[returnable] = change
result = self._filter_params(result)
except Exception:
raise
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class Difference(object):
pass
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = Parameters(params=self.module.params)
self.have = None
self.changes = Changes()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Changes(params=changed)
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def exec_module(self):
start = datetime.now().isoformat()
version = tmos_version(self.client)
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
send_teem(start, self.client, self.module, version)
return result
def present(self):
if self.exists():
return False
else:
return self.create()
def absent(self):
if self.exists():
return self.remove()
return False
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to remove the member from the device group.")
return True
def exists(self):
errors = [401, 403, 409, 500, 501, 502, 503, 504]
uri = "https://{0}:{1}/mgmt/tm/cm/device-group/{2}/devices/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.device_group,
self.want.name
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
if resp.status in errors or 'code' in response and response['code'] in errors:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/cm/device-group/{2}/devices/".format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.device_group
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
raise F5ModuleError(resp.content)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/cm/device-group/{2}/devices/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.device_group,
self.want.name
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
device_group=dict(required=True),
state=dict(
default='present',
choices=['absent', 'present']
),
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| [
"datetime.datetime.now",
"ansible.module_utils.basic.AnsibleModule"
] | [((8085, 8183), 'ansible.module_utils.basic.AnsibleModule', 'AnsibleModule', ([], {'argument_spec': 'spec.argument_spec', 'supports_check_mode': 'spec.supports_check_mode'}), '(argument_spec=spec.argument_spec, supports_check_mode=spec.\n supports_check_mode)\n', (8098, 8183), False, 'from ansible.module_utils.basic import AnsibleModule\n'), ((4193, 4207), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4205, 4207), False, 'from datetime import datetime\n')] |
from elasticsearch import TransportError
from sanic import Blueprint
from sanic.request import Request
from sanic.response import HTTPResponse, json
from ..connections import get_client
rest_bp = Blueprint('rest')
def format_es_exception(e: TransportError):
return json({"status_code": e.status_code,
"error": e.error,
"info": e.info})
@rest_bp.route('/query', methods=['POST'])
async def close_index(request: Request) -> HTTPResponse:
client = get_client(request)
body = request.json['body']
method = request.json['method']
path = request.json['path']
try:
resp = await client.transport.perform_request(method, path, body=body)
except TransportError as e:
return format_es_exception(e)
return json(resp)
| [
"sanic.response.json",
"sanic.Blueprint"
] | [((198, 215), 'sanic.Blueprint', 'Blueprint', (['"""rest"""'], {}), "('rest')\n", (207, 215), False, 'from sanic import Blueprint\n'), ((273, 343), 'sanic.response.json', 'json', (["{'status_code': e.status_code, 'error': e.error, 'info': e.info}"], {}), "({'status_code': e.status_code, 'error': e.error, 'info': e.info})\n", (277, 343), False, 'from sanic.response import HTTPResponse, json\n'), ((782, 792), 'sanic.response.json', 'json', (['resp'], {}), '(resp)\n', (786, 792), False, 'from sanic.response import HTTPResponse, json\n')] |
from argo_dataflow import pipeline, kafka
def handler(msg, context):
return ("hi! " + msg.decode("UTF-8")).encode("UTF-8")
if __name__ == '__main__':
(pipeline("104-python3-9")
.owner('argoproj-labs')
.describe("""This example is of the Python 3.9 handler.
[Learn about handlers](../docs/HANDLERS.md)""")
.step(
(kafka('input-topic')
.code('main', handler)
.kafka('output-topic')
))
.save())
| [
"argo_dataflow.kafka",
"argo_dataflow.pipeline"
] | [((349, 369), 'argo_dataflow.kafka', 'kafka', (['"""input-topic"""'], {}), "('input-topic')\n", (354, 369), False, 'from argo_dataflow import pipeline, kafka\n'), ((163, 188), 'argo_dataflow.pipeline', 'pipeline', (['"""104-python3-9"""'], {}), "('104-python3-9')\n", (171, 188), False, 'from argo_dataflow import pipeline, kafka\n')] |
import asyncio
import discord
from commands import Commands, Guild_Instance, leave, play_search
import os
from pymongo import MongoClient
from dotenv import load_dotenv
load_dotenv()
CONNECTION_STRING = f"mongodb+srv://{os.environ['mongo_user']}:{os.environ['mongo_pass']}@dj<EMAIL>.mongodb.net/djangur?retryWrites=true&w=majority"
db_client = MongoClient(CONNECTION_STRING)
db = db_client['djangur']
client = discord.Client()
@client.event
async def on_ready():
print('Logged in as {0.user}'.format(client))
print(os.environ['prefix'])
@client.event
async def on_message(msg):
if msg.author == client.user:
return
ginst = Guild_Instance.by_id(msg.guild.id)
ginst.tc = msg.channel
ginst.db = db[str(msg.guild.id)]
if msg.content.isdigit() and ginst.searching:
await play_search(msg.content, msg=msg, client=client, ginst=ginst)
if not msg.content.startswith(os.environ['prefix']):
return
no_prefix = msg.content[len(os.environ['prefix']):]
split = no_prefix.split(' ', 1)
cmd = split[0]
args = split[1] if (len(split) == 2) else ''
if cmd in Commands.command_map:
await Commands.command_map[cmd].fn(args, msg=msg, client=client, ginst=ginst)
else:
await msg.channel.send(f'{cmd}: Command not found.')
@client.event
async def on_voice_state_update(member, before, after):
if not member.name == 'Tramvai':
return
elif before.channel is None:
ginst = Guild_Instance.by_id(after.channel.guild.id)
voice = after.channel.guild.voice_client
time = 0
while True:
await asyncio.sleep(1)
time = time + 1
if voice.is_playing() and not voice.is_paused():
time = 0
if time == 600:
print(await Commands.command_map['leave'].fn(None, None, None, ginst))
if not voice.is_connected():
break
elif before.channel is not None:
if after.channel is None:
ginst = Guild_Instance.by_id(before.channel.guild.id)
await Commands.command_map['leave'].fn(None, None, None, ginst)
client.run(os.environ['token'])
| [
"commands.Guild_Instance.by_id",
"commands.play_search",
"dotenv.load_dotenv",
"asyncio.sleep",
"discord.Client",
"pymongo.MongoClient"
] | [((170, 183), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (181, 183), False, 'from dotenv import load_dotenv\n'), ((347, 377), 'pymongo.MongoClient', 'MongoClient', (['CONNECTION_STRING'], {}), '(CONNECTION_STRING)\n', (358, 377), False, 'from pymongo import MongoClient\n'), ((414, 430), 'discord.Client', 'discord.Client', ([], {}), '()\n', (428, 430), False, 'import discord\n'), ((654, 688), 'commands.Guild_Instance.by_id', 'Guild_Instance.by_id', (['msg.guild.id'], {}), '(msg.guild.id)\n', (674, 688), False, 'from commands import Commands, Guild_Instance, leave, play_search\n'), ((818, 879), 'commands.play_search', 'play_search', (['msg.content'], {'msg': 'msg', 'client': 'client', 'ginst': 'ginst'}), '(msg.content, msg=msg, client=client, ginst=ginst)\n', (829, 879), False, 'from commands import Commands, Guild_Instance, leave, play_search\n'), ((1486, 1530), 'commands.Guild_Instance.by_id', 'Guild_Instance.by_id', (['after.channel.guild.id'], {}), '(after.channel.guild.id)\n', (1506, 1530), False, 'from commands import Commands, Guild_Instance, leave, play_search\n'), ((1635, 1651), 'asyncio.sleep', 'asyncio.sleep', (['(1)'], {}), '(1)\n', (1648, 1651), False, 'import asyncio\n'), ((2035, 2080), 'commands.Guild_Instance.by_id', 'Guild_Instance.by_id', (['before.channel.guild.id'], {}), '(before.channel.guild.id)\n', (2055, 2080), False, 'from commands import Commands, Guild_Instance, leave, play_search\n')] |
# coding=utf-8
"""
Script to generate city object.
"""
from __future__ import division
import os
import numpy as np
import pickle
import warnings
import random
import datetime
import shapely.geometry.point as point
import pycity_base.classes.Weather as weath
import pycity_base.classes.demand.SpaceHeating as SpaceHeating
import pycity_base.classes.demand.ElectricalDemand as ElectricalDemand
import pycity_base.classes.demand.Apartment as Apartment
import pycity_base.classes.demand.DomesticHotWater as DomesticHotWater
import pycity_base.classes.demand.Occupancy as occup
import pycity_calc.environments.timer as time
# import pycity_calc.environments.market as price
import pycity_calc.environments.germanmarket as germanmarket
import pycity_calc.environments.environment as env
import pycity_calc.environments.co2emissions as co2
import pycity_calc.buildings.building as build_ex
import pycity_calc.cities.city as city
import pycity_calc.visualization.city_visual as citvis
import pycity_calc.toolbox.modifiers.slp_th_manipulator as slpman
import pycity_calc.toolbox.teaser_usage.teaser_use as tusage
import pycity_calc.toolbox.mc_helpers.user.user_unc_sampling as usunc
try:
import teaser.logic.simulation.VDI_6007.weather as vdiweather
except: # pragma: no cover
msg = 'Could not import teaser.logic.simulation.VDI_6007.weather. ' \
'If you need to use it, install ' \
'it via pip "pip install TEASER". Alternatively, you might have ' \
'run into trouble with XML bindings in TEASER. This can happen ' \
'if you try to re-import TEASER within an active Python console.' \
'Please close the active Python console and open another one. Then' \
' try again. You might also be on the wrong TEASER branch ' \
'(without VDI 6007 core).'
warnings.warn(msg)
def load_data_file_with_spec_demand_data(filename):
"""
Function loads and returns data from
.../src/data/BaseData/Specific_Demand_Data/filename.
Filename should hold float (or int) values.
Other values (e.g. strings) will be loaded as 'nan'.
Parameter
---------
filename : str
String with name of file, e.g. 'district_data.txt'
Returns
-------
dataset : numpy array
Numpy array with data
"""
src_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname
(
os.path.abspath(
__file__)))))
input_data_path = os.path.join(src_path, 'data', 'BaseData',
'Specific_Demand_Data', filename)
dataset = np.genfromtxt(input_data_path, delimiter='\t', skip_header=1)
return dataset
def convert_th_slp_int_and_str(th_slp_int):
"""
Converts thermal slp type integer into string
Parameters
----------
th_slp_int : int
SLP type integer number
Returns
-------
th_slp_tag : str
SLP type string
Annotations
-----------
- `HEF` : Single family household
- `HMF` : Multi family household
- `GBA` : Bakeries
- `GBD` : Other services
- `GBH` : Accomodations
- `GGA` : Restaurants
- `GGB` : Gardening
- `GHA` : Retailers
- `GHD` : Summed load profile business, trade and services
- `GKO` : Banks, insurances, public institutions
- `GMF` : Household similar businesses
- `GMK` : Automotive
- `GPD` : Paper and printing
- `GWA` : Laundries
"""
if th_slp_int is None:
msg = 'th_slp_int is None. Going to return None.'
warnings.warn(msg)
return None
slp_th_profile_dict_tag = {0: 'HEF',
1: 'HMF',
2: 'GMF',
3: 'GMK',
4: 'GPD',
5: 'GHA',
6: 'GBD',
7: 'GKO',
8: 'GBH',
9: 'GGA',
10: 'GBA',
11: 'GWA',
12: 'GGB',
13: 'GHD'}
th_slp_tag = slp_th_profile_dict_tag[th_slp_int]
return th_slp_tag
def convert_el_slp_int_and_str(el_slp_int):
"""
Converts el slp type integer into string
Parameters
----------
el_slp_int : int
SLP type integer number
Returns
-------
el_slp_tag : str
SLP type string
Annotations
-----------
# 0: H0 : Residential
# 1: G0 : Commercial
# 2: G1 : Commercial Mo-Sa 08:00 to 18:00
# 3: G2 : Commercial, mainly evening hours
# 4: G3 : Commercial 24 hours
# 5: G4 : Shop / hairdresser
# 6: G5 : Backery
# 7: G6 : Commercial, weekend
# 8: L0 : Farm
# 9: L1 : Farm, mainly cattle and milk
# 10: L2 : Other farming
"""
if el_slp_int is None:
msg = 'el_slp_int is None. Going to return None.'
warnings.warn(msg)
return None
slp_el_profile_dict_tag = {0: 'H0',
1: 'G0',
2: 'G1',
3: 'G2',
4: 'G3',
5: 'G4',
6: 'G5',
7: 'G6',
8: 'L0',
9: 'L1',
10: 'L2'}
el_slp_tag = slp_el_profile_dict_tag[el_slp_int]
return el_slp_tag
def convert_method_3_nb_into_str(method_3_nb):
"""
Converts method_3_nb into string
Parameters
----------
method_3_nb : int
Number of method 3
Returns
-------
method_3_str : str
String of method 3
"""
if method_3_nb is None:
msg = 'method_3_nb is None. Going to return None.'
warnings.warn(msg)
return None
dict_method_3 = {0: 'food_pro',
1: 'metal',
2: 'rest',
3: 'sports',
4: 'repair'}
method_3_str = dict_method_3[method_3_nb]
return method_3_str
def convert_method_4_nb_into_str(method_4_nb):
"""
Converts method_4_nb into string
Parameters
----------
method_4_nb : int
Number of method 4
Returns
-------
method_4_str : str
String of method 4
"""
if method_4_nb is None:
msg = 'method_4_nb is None. Going to return None.'
warnings.warn(msg)
return None
dict_method_4 = {0: 'metal_1', 1: 'metal_2', 2: 'warehouse'}
method_4_str = dict_method_4[method_4_nb]
return method_4_str
def conv_build_type_nb_to_name(build_type):
"""
Convert build_type number to name / explanation
Parameters
----------
build_type : int
Building type number, based on Spec_demands_non_res.txt
Returns
-------
build_name : str
Building name / explanation
"""
if build_type is None:
msg = 'build_type is None. Going to return None for build_name.'
warnings.warn(msg)
return None
dict_b_name = {
0: 'Residential',
1: 'Office (simulation)',
2: 'Main construction work',
3: 'Finishing trade construction work',
4: 'Bank and insurance',
5: 'Public institution',
6: 'Non profit organization',
7: 'Small office buildings',
8: 'Other services',
9: 'Metal',
10: 'Automobile',
11: 'Wood and timber',
12: 'Paper',
13: 'Small retailer for food',
14: 'Small retailer for non-food',
15: 'Large retailer for food',
16: 'Large retailer for non-food',
17: 'Primary school',
18: 'School for physically handicapped',
19: 'High school',
20: 'Trade school',
21: 'University',
22: 'Hotel',
23: 'Restaurant',
24: 'Childrens home',
25: 'Backery',
26: 'Butcher',
27: 'Laundry',
28: 'Farm primary agriculture ',
29: 'Farm with 10 - 49 cattle units',
30: 'Farm with 50 - 100 cattle units',
31: 'Farm with more than 100 cattle units',
32: 'Gardening',
33: 'Hospital',
34: 'Library',
35: 'Prison',
36: 'Cinema',
37: 'Theater',
38: 'Parish hall',
39: 'Sports hall',
40: 'Multi purpose hall',
41: 'Swimming hall',
42: 'Club house',
43: 'Fitness studio',
44: 'Train station smaller 5000m2',
45: 'Train station equal to or larger than 5000m2'
}
return dict_b_name[build_type]
def constrained_sum_sample_pos(n, total):
"""
Return a randomly chosen list of n positive integers summing to total.
Each such list is equally likely to occur.
Parameters
----------
n : int
Number of chosen integers
total : int
Sum of all entries of result list
Returns
-------
results_list : list (of int)
List with result integers, which sum up to value 'total'
"""
dividers = sorted(random.sample(range(1, int(total)), int(n - 1)))
list_occ = [a - b for a, b in zip(dividers + [total], [0] + dividers)]
for i in range(len(list_occ)):
list_occ[i] = int(list_occ[i])
return list_occ
def redistribute_occ(occ_list):
"""
Redistribute occupants in occ_list, so that each apartment is having at
least 1 person and maximal 5 persons.
Parameters
----------
occ_list
Returns
-------
occ_list_new : list
List holding number of occupants per apartment
"""
occ_list_new = occ_list[:]
if sum(occ_list_new) / len(occ_list_new) > 5: # pragma: no cover
msg = 'Average number of occupants per apartment is higher than 5.' \
' This is not valid for usage of Richardson profile generator.'
raise AssertionError(msg)
# Number of occupants to be redistributed
nb_occ_redist = 0
# Find remaining occupants
# ###############################################################
for i in range(len(occ_list_new)):
if occ_list_new[i] > 5:
# Add remaining occupants to nb_occ_redist
nb_occ_redist += occ_list_new[i] - 5
# Set occ_list_new entry to 5 persons
occ_list_new[i] = 5
if nb_occ_redist == 0:
# Return original list
return occ_list_new
# Identify empty apartments and add single occupant
# ###############################################################
for i in range(len(occ_list_new)):
if occ_list_new[i] == 0:
# Add single occupant
occ_list_new[i] = 1
# Remove occupant from nb_occ_redist
nb_occ_redist -= 1
if nb_occ_redist == 0:
# Return original list
return occ_list_new
# Redistribute remaining occupants
# ###############################################################
for i in range(len(occ_list_new)):
if occ_list_new[i] < 5:
# Fill occupants up with remaining occupants
for j in range(5 - occ_list_new[i]):
# Add single occupant
occ_list_new[i] += 1
# Remove single occupant from remaining sum
nb_occ_redist -= 1
if nb_occ_redist == 0:
# Return original list
return occ_list_new
if nb_occ_redist: # pragma: no cover
raise AssertionError('Not all occupants could be distributed.'
'Check inputs and/or redistribute_occ() call.')
def generate_environment(timestep=3600,
year_timer=2017,
year_co2=2017,
try_path=None,
location=(51.529086, 6.944689),
altitude=55,
new_try=False):
"""
Returns environment object. Total number of timesteps is automatically
generated for one year.
Parameters
----------
timestep : int
Timestep in seconds
year_timer : int, optional
Chosen year of analysis (default: 2010)
(influences initial day for profile generation)
year_co2 : int, optional
Chose year with specific emission factors (default: 2017)
try_path : str, optional
Path to TRY weather file (default: None)
If set to None, uses default weather TRY file (2010, region 5)
location : Tuple, optional
(latitude , longitude) of the simulated system's position,
(default: (51.529086, 6.944689) for Bottrop, Germany.
altitude : float, optional
Altitute of location in m (default: 55 - City of Bottrop)
new_try : bool, optional
Defines, if TRY dataset have been generated after 2017 (default: False)
If False, assumes that TRY dataset has been generated before 2017.
If True, assumes that TRY dataset has been generated after 2017 and
belongs to the new TRY classes. This is important for extracting
the correct values from the TRY dataset!
Returns
-------
environment : object
Environment object
"""
# Create environment
timer = time.TimerExtended(timestep=timestep, year=year_timer)
weather = weath.Weather(timer, useTRY=True, pathTRY=try_path,
location=location, altitude=altitude,
new_try=new_try)
market = germanmarket.GermanMarket()
co2em = co2.Emissions(year=year_co2)
environment = env.EnvironmentExtended(timer=timer,
weather=weather,
prices=market,
location=location,
co2em=co2em)
return environment
def generate_res_building_single_zone(environment, net_floor_area,
spec_th_demand,
th_gen_method,
el_gen_method,
annual_el_demand=None,
el_random=False,
use_dhw=False,
dhw_method=1, number_occupants=None,
build_year=None, mod_year=None,
build_type=None, pv_use_area=None,
height_of_floors=None, nb_of_floors=None,
neighbour_buildings=None,
residential_layout=None, attic=None,
cellar=None, construction_type=None,
dormer=None, dhw_volumen=None,
do_normalization=True,
slp_manipulate=True,
curr_central_ahu=None,
dhw_random=False, prev_heat_dev=True,
season_mod=None):
"""
Function generates and returns extended residential building object
with single zone.
Parameters
----------
environment : object
Environment object
net_floor_area : float
Net floor area of building in m2
spec_th_demand : float
Specific thermal energy demand in kWh/m2*a
th_gen_method : int
Thermal load profile generation method
1 - Use SLP
2 - Load Modelica simulation output profile (only residential)
Method 2 is only used for residential buildings. For non-res.
buildings, SLPs are generated instead
el_gen_method : int, optional
Electrical generation method (default: 1)
1 - Use SLP
2 - Generate stochastic load profile (only valid for residential
building)
annual_el_demand : float, optional
Annual electrical energy demand in kWh/a (default: None)
el_random : bool, optional
Defines, if random value should be chosen from statistics
or if average value should be chosen. el_random == True means,
use random value. (default: False)
use_dhw : bool, optional
Boolean to define, if domestic hot water profile should be generated
(default: False)
True - Generate dhw profile
dhw_method : int, optional
Domestic hot water profile generation method (default: 1)
1 - Use Annex 42 profile
2 - Use stochastic profile
number_occupants : int, optional
Number of occupants (default: None)
build_year : int, optional
Building year of construction (default: None)
mod_year : int, optional
Last year of modernization of building (default: None)
build_type : int, optional
Building type (default: None)
pv_use_area : float, optional
Usable pv area in m2 (default: None)
height_of_floors : float
average height of single floor
nb_of_floors : int
Number of floors above the ground
neighbour_buildings : int
neighbour (default = 0)
0: no neighbour
1: one neighbour
2: two neighbours
residential_layout : int
type of floor plan (default = 0)
0: compact
1: elongated/complex
attic : int
type of attic (default = 0)
0: flat roof
1: non heated attic
2: partly heated attic
3: heated attic
cellar : int
type of cellar (default = 0)
0: no cellar
1: non heated cellar
2: partly heated cellar
3: heated cellar
construction_type : str
construction type (default = "heavy")
heavy: heavy construction
light: light construction
dormer : str
construction type
0: no dormer
1: dormer
dhw_volumen : float, optional
Volume of domestic hot water in liter per capita and day
(default: None).
do_normalization : bool, optional
Defines, if stochastic profile (el_gen_method=2) should be
normalized to given annualDemand value (default: True).
If set to False, annual el. demand depends on stochastic el. load
profile generation. If set to True, does normalization with
annualDemand
slp_manipulate : bool, optional
Defines, if thermal space heating SLP profile should be modified
(default: True). Only used for residential buildings!
Only relevant, if th_gen_method == 1
True - Do manipulation
False - Use original profile
Sets thermal power to zero in time spaces, where average daily outdoor
temperature is equal to or larger than 12 °C. Rescales profile to
original demand value.
curr_central_ahu : bool, optional
Defines, if building has air handling unit (AHU)
(default: False)
dhw_random : bool, optional
Defines, if hot water volume per person and day value should be
randomized by choosing value from gaussian distribution (20 %
standard deviation) (default: False)
If True: Randomize value
If False: Use reference value
prev_heat_dev : bool, optional
Defines, if heating devices should be prevented within chosen
appliances (default: True). If set to True, DESWH, E-INST,
Electric shower, Storage heaters and Other electric space heating
are set to zero. Only relevant for el_gen_method == 2
season_mod : float, optional
Float to define rescaling factor to rescale annual lighting power curve
with cosine wave to increase winter usage and decrease summer usage.
Reference is maximum lighting power (default: None). If set to None,
do NOT perform rescaling with cosine wave
Returns
-------
extended_building : object
BuildingExtended object
"""
assert net_floor_area > 0
assert spec_th_demand >= 0
if annual_el_demand is not None:
assert annual_el_demand >= 0
else:
assert number_occupants is not None
assert number_occupants > 0
# Define SLP profiles for residential building with single zone
th_slp_type = 'HEF'
el_slp_type = 'H0'
if number_occupants is not None:
assert number_occupants > 0
assert number_occupants <= 5 # Max 5 occupants for stochastic profile
if el_gen_method == 2 or (dhw_method == 2 and use_dhw == True):
# Generate occupancy profile (necessary for stochastic, el. or
# dhw profile)
occupancy_object = occup.Occupancy(environment,
number_occupants=number_occupants)
else: # Generate occupancy object without profile generation
# Just used to store information about number of occupants
occupancy_object = occup.Occupancy(environment,
number_occupants=number_occupants,
do_profile=False)
else:
occupancy_object = None # Dummy object to prevent error with
# apartment usage
if el_gen_method == 2:
warnings.warn('Stochastic el. profile cannot be generated ' +
'due to missing number of occupants. ' +
'SLP is used instead.')
# Set el_gen_method to 1 (SLP)
el_gen_method = 1
elif dhw_method == 2:
raise AssertionError('DHW profile cannot be generated' +
'for residential building without' +
'occupants (stochastic mode).' +
'Please check your input file ' +
'(missing number of occupants) ' +
'or disable dhw generation.')
if (number_occupants is None and dhw_method == 1 and use_dhw == True):
# Set number of occupants to 2 to enable dhw usage
number_occupants = 2
# Create space heating demand
if th_gen_method == 1:
# Use SLP
heat_power_curve = SpaceHeating.SpaceHeating(environment,
method=1,
profile_type=th_slp_type,
livingArea=net_floor_area,
specificDemand=spec_th_demand)
if slp_manipulate: # Do SLP manipulation
timestep = environment.timer.timeDiscretization
temp_array = environment.weather.tAmbient
mod_curve = \
slpman.slp_th_manipulator(timestep,
th_slp_curve=heat_power_curve.loadcurve,
temp_array=temp_array)
heat_power_curve.loadcurve = mod_curve
elif th_gen_method == 2:
# Use Modelica result profile
heat_power_curve = SpaceHeating.SpaceHeating(environment,
method=3,
livingArea=net_floor_area,
specificDemand=spec_th_demand)
# Calculate el. energy demand for apartment, if no el. energy
# demand is given for whole building to rescale
if annual_el_demand is None:
# Generate annual_el_demand_ap
annual_el_demand = calc_el_dem_ap(nb_occ=number_occupants,
el_random=el_random,
type='sfh')
print('Annual electrical demand in kWh: ', annual_el_demand)
if number_occupants is not None:
print('El. demand per person in kWh: ')
print(annual_el_demand / number_occupants)
print()
# Create electrical power curve
if el_gen_method == 2:
if season_mod is not None:
season_light_mod = True
else:
season_light_mod = False
el_power_curve = ElectricalDemand.ElectricalDemand(environment,
method=2,
total_nb_occupants=number_occupants,
randomizeAppliances=True,
lightConfiguration=0,
annualDemand=annual_el_demand,
occupancy=occupancy_object.occupancy,
do_normalization=do_normalization,
prev_heat_dev=prev_heat_dev,
season_light_mod=season_light_mod,
light_mod_fac=season_mod)
else: # Use el. SLP
el_power_curve = ElectricalDemand.ElectricalDemand(environment,
method=1,
annualDemand=annual_el_demand,
profileType=el_slp_type)
# Create domestic hot water demand
if use_dhw:
if dhw_volumen is None or dhw_random:
dhw_kwh = calc_dhw_dem_ap(nb_occ=number_occupants,
dhw_random=dhw_random,
type='sfh')
# Reconvert kWh/a to Liters per day
dhw_vol_ap = dhw_kwh * 1000 * 3600 * 1000 / (955 * 4182 * 35 * 365)
# DHW volume per person and day
dhw_volumen = dhw_vol_ap / number_occupants
if dhw_method == 1: # Annex 42
dhw_power_curve = DomesticHotWater.DomesticHotWater(environment,
tFlow=60,
thermal=True,
method=1,
# Annex 42
dailyConsumption=dhw_volumen * number_occupants,
supplyTemperature=25)
else: # Stochastic profile
dhw_power_curve = DomesticHotWater.DomesticHotWater(environment,
tFlow=60,
thermal=True,
method=2,
supplyTemperature=25,
occupancy=occupancy_object.occupancy)
# Rescale to reference dhw volume (liters per person
# and day)
curr_dhw_vol_flow = dhw_power_curve.water
# Water volume flow in Liter/hour
curr_volume_year = sum(curr_dhw_vol_flow) * \
environment.timer.timeDiscretization / \
3600
curr_vol_day = curr_volume_year / 365
curr_vol_day_and_person = curr_vol_day / \
occupancy_object.number_occupants
print('Curr. volume per person and day: ',
curr_vol_day_and_person)
dhw_con_factor = dhw_volumen / curr_vol_day_and_person
print('Conv. factor of hot water: ', dhw_con_factor)
print('New volume per person and day: ',
curr_vol_day_and_person * dhw_con_factor)
# Normalize water flow and power load
dhw_power_curve.water *= dhw_con_factor
dhw_power_curve.loadcurve *= dhw_con_factor
# Create apartment
apartment = Apartment.Apartment(environment, occupancy=occupancy_object,
net_floor_area=net_floor_area)
# Add demands to apartment
if th_gen_method == 1 or th_gen_method == 2:
if use_dhw:
apartment.addMultipleEntities([heat_power_curve, el_power_curve,
dhw_power_curve])
else:
apartment.addMultipleEntities([heat_power_curve, el_power_curve])
else:
if use_dhw:
apartment.addMultipleEntities([el_power_curve,
dhw_power_curve])
else:
apartment.addEntity(el_power_curve)
# Create extended building object
extended_building = \
build_ex.BuildingExtended(environment,
build_year=build_year,
mod_year=mod_year,
build_type=build_type,
roof_usabl_pv_area=pv_use_area,
net_floor_area=net_floor_area,
height_of_floors=height_of_floors,
nb_of_floors=nb_of_floors,
neighbour_buildings=neighbour_buildings,
residential_layout=residential_layout,
attic=attic,
cellar=cellar,
construction_type=construction_type,
dormer=dormer,
with_ahu=
curr_central_ahu)
# Add apartment to extended building
extended_building.addEntity(entity=apartment)
return extended_building
def generate_res_building_multi_zone(environment,
net_floor_area,
spec_th_demand,
th_gen_method,
el_gen_method,
nb_of_apartments,
annual_el_demand=None,
el_random=False,
use_dhw=False,
dhw_method=1,
total_number_occupants=None,
build_year=None, mod_year=None,
build_type=None, pv_use_area=None,
height_of_floors=None, nb_of_floors=None,
neighbour_buildings=None,
residential_layout=None, attic=None,
cellar=None, construction_type=None,
dormer=None, dhw_volumen=None,
do_normalization=True,
slp_manipulate=True,
curr_central_ahu=False,
dhw_random=False, prev_heat_dev=True,
season_mod=None):
"""
Function generates and returns extended residential building object
with multiple apartments. Occupants are randomly distributed over
number of apartments.
Parameters
----------
environment : object
Environment object
net_floor_area : float
Net floor area of building in m2
spec_th_demand : float
Specific thermal energy demand in kWh/m2*a
annual_el_demand : float, optional
Annual electrical energy demand in kWh/a (default: None)
el_random : bool, optional
Defines, if random value should be chosen from statistics
or if average value should be chosen. el_random == True means,
use random value. (default: False)
th_gen_method : int
Thermal load profile generation method
1 - Use SLP
2 - Load Modelica simulation output profile (only residential)
Method 2 is only used for residential buildings. For non-res.
buildings, SLPs are generated instead
el_gen_method : int, optional
Electrical generation method (default: 1)
1 - Use SLP
2 - Generate stochastic load profile (only valid for residential
building)
nb_of_apartments : int
Number of apartments within building
use_dhw : bool, optional
Boolean to define, if domestic hot water profile should be generated
(default: False)
True - Generate dhw profile
dhw_method : int, optional
Domestic hot water profile generation method (default: 1)
1 - Use Annex 42 profile
2 - Use stochastic profile
total_number_occupants : int, optional
Total number of occupants in all apartments (default: None)
build_year : int, optional
Building year of construction (default: None)
mod_year : int, optional
Last year of modernization of building (default: None)
build_type : int, optional
Building type (default: None)
pv_use_area : float, optional
Usable pv area in m2 (default: None)
height_of_floors : float
average height of the floors
nb_of_floors : int
Number of floors above the ground
neighbour_buildings : int
neighbour (default = 0)
0: no neighbour
1: one neighbour
2: two neighbours
residential_layout : int
type of floor plan (default = 0)
0: compact
1: elongated/complex
attic : int
type of attic (default = 0)
0: flat roof
1: non heated attic
2: partly heated attic
3: heated attic
cellar : int
type of cellar (default = 0)
0: no cellar
1: non heated cellar
2: partly heated cellar
3: heated cellar
construction_type : str
construction type (default = "heavy")
heavy: heavy construction
light: light construction
dormer : str
construction type
0: no dormer
1: dormer
dhw_volumen : float, optional
Volume of domestic hot water in liter per capita and day
(default: None).
do_normalization : bool, optional
Defines, if stochastic profile (el_gen_method=2) should be
normalized to given annualDemand value (default: True).
If set to False, annual el. demand depends on stochastic el. load
profile generation. If set to True, does normalization with
annualDemand
slp_manipulate : bool, optional
Defines, if thermal space heating SLP profile should be modified
(default: True). Only used for residential buildings!
Only relevant, if th_gen_method == 1
True - Do manipulation
False - Use original profile
Sets thermal power to zero in time spaces, where average daily outdoor
temperature is equal to or larger than 12 °C. Rescales profile to
original demand value.
curr_central_ahu : bool, optional
Defines, if building has air handling unit (AHU)
(default: False)
dhw_random : bool, optional
Defines, if hot water volume per person and day value should be
randomized by choosing value from gaussian distribution (20 %
standard deviation) (default: False)
If True: Randomize value
If False: Use reference value
prev_heat_dev : bool, optional
Defines, if heating devices should be prevented within chosen
appliances (default: True). If set to True, DESWH, E-INST,
Electric shower, Storage heaters and Other electric space heating
are set to zero. Only relevant for el_gen_method == 2
season_mod : float, optional
Float to define rescaling factor to rescale annual lighting power curve
with cosine wave to increase winter usage and decrease summer usage.
Reference is maximum lighting power (default: None). If set to None,
do NOT perform rescaling with cosine wave
Returns
-------
extended_building : object
BuildingExtended object
Annotation
----------
Raise assertion error when share of occupants per apartment is higher
than 5 (necessary for stochastic, el. profile generation)
"""
assert net_floor_area > 0
assert spec_th_demand >= 0
if annual_el_demand is not None:
assert annual_el_demand >= 0
if total_number_occupants is not None:
assert total_number_occupants > 0
assert total_number_occupants / nb_of_apartments <= 5, (
'Number of occupants per apartment is ' +
'at least once higher than 5.')
# Distribute occupants to different apartments
occupancy_list = constrained_sum_sample_pos(n=nb_of_apartments,
total=total_number_occupants)
# While not all values are smaller or equal to 5, return run
# This while loop might lead to large runtimes for buildings with a
# large number of apartments (not finding a valid solution, see
# issue #147). Thus, we add a counter to exit the loop
count = 0
while all(i <= 5 for i in occupancy_list) is not True:
occupancy_list = constrained_sum_sample_pos(n=nb_of_apartments,
total=total_number_occupants)
if count == 100000:
# Take current occupancy_list and redistribute occupants
# manually until valid distribution is found
occupancy_list = redistribute_occ(occ_list=occupancy_list)
# Exit while loop
break
count += 1
print('Current list of occupants per apartment: ', occupancy_list)
else:
msg = 'Number of occupants is None for current building!'
warnings.warn(msg)
# Define SLP profiles for residential building with multiple zone
th_slp_type = 'HMF'
el_slp_type = 'H0'
# Create extended building object
extended_building = \
build_ex.BuildingExtended(environment,
build_year=build_year,
mod_year=mod_year,
build_type=build_type,
roof_usabl_pv_area=pv_use_area,
net_floor_area=net_floor_area,
height_of_floors=height_of_floors,
nb_of_floors=nb_of_floors,
neighbour_buildings=
neighbour_buildings,
residential_layout=
residential_layout,
attic=attic,
cellar=cellar,
construction_type=
construction_type,
dormer=dormer,
with_ahu=curr_central_ahu)
if annual_el_demand is not None:
# Distribute el. demand equally to apartments
annual_el_demand_ap = annual_el_demand / nb_of_apartments
else:
annual_el_demand_ap = None
# Loop over apartments
# #---------------------------------------------------------------------
for i in range(int(nb_of_apartments)):
# Dummy init of number of occupants
curr_number_occupants = None
# Check number of occupants
if total_number_occupants is not None:
# Get number of occupants
curr_number_occupants = occupancy_list[i]
# Generate occupancy profiles for stochastic el. and/or dhw
if el_gen_method == 2 or (dhw_method == 2 and use_dhw):
# Generate occupancy profile (necessary for stochastic, el. or
# dhw profile)
occupancy_object = occup.Occupancy(environment,
number_occupants=
curr_number_occupants)
else: # Generate occupancy object without profile
occupancy_object = occup.Occupancy(environment,
number_occupants=
curr_number_occupants,
do_profile=False)
else:
if el_gen_method == 2:
warnings.warn('Stochastic el. profile cannot be generated ' +
'due to missing number of occupants. ' +
'SLP is used instead.')
# Set el_gen_method to 1 (SLP)
el_gen_method = 1
elif dhw_method == 2:
raise AssertionError('DHW profile cannot be generated' +
'for residential building without' +
'occupants (stochastic mode).' +
'Please check your input file ' +
'(missing number of occupants) ' +
'or disable dhw generation.')
if (curr_number_occupants is None and dhw_method == 1 and
use_dhw == True):
# If dhw profile should be generated, but current number of
# occupants is None, number of occupants is samples from
# occupancy distribution for apartment
curr_number_occupants = usunc.calc_sampling_occ_per_app(
nb_samples=1)
# Assumes equal area share for all apartments
apartment_area = net_floor_area / nb_of_apartments
# Create space heating demand (for apartment)
if th_gen_method == 1:
# Use SLP
heat_power_curve = \
SpaceHeating.SpaceHeating(environment,
method=1,
profile_type=th_slp_type,
livingArea=apartment_area,
specificDemand=spec_th_demand)
if slp_manipulate: # Do SLP manipulation
timestep = environment.timer.timeDiscretization
temp_array = environment.weather.tAmbient
mod_curve = \
slpman.slp_th_manipulator(timestep,
th_slp_curve=heat_power_curve.loadcurve,
temp_array=temp_array)
heat_power_curve.loadcurve = mod_curve
elif th_gen_method == 2:
# Use Modelica result profile
heat_power_curve = SpaceHeating.SpaceHeating(environment,
method=3,
livingArea=apartment_area,
specificDemand=spec_th_demand)
# Calculate el. energy demand for apartment, if no el. energy
# demand is given for whole building to rescale
if annual_el_demand_ap is None:
# Generate annual_el_demand_ap
annual_el_demand_ap = calc_el_dem_ap(nb_occ=curr_number_occupants,
el_random=el_random,
type='mfh')
print('Annual el. demand (apartment) in kWh: ', annual_el_demand_ap)
if curr_number_occupants is not None:
print('El. demand per person in kWh: ')
print(annual_el_demand_ap / curr_number_occupants)
print()
# Create electrical power curve
if el_gen_method == 2:
if season_mod is not None:
season_light_mod = True
else:
season_light_mod = False
el_power_curve = ElectricalDemand.ElectricalDemand(environment,
method=2,
total_nb_occupants=curr_number_occupants,
randomizeAppliances=True,
lightConfiguration=0,
annualDemand=annual_el_demand_ap,
occupancy=occupancy_object.occupancy,
do_normalization=do_normalization,
prev_heat_dev=prev_heat_dev,
season_light_mod=season_light_mod,
light_mod_fac=season_mod)
else: # Use el. SLP
el_power_curve = ElectricalDemand.ElectricalDemand(environment,
method=1,
annualDemand=annual_el_demand_ap,
profileType=el_slp_type)
# Create domestic hot water demand
if use_dhw:
if dhw_volumen is None or dhw_random:
dhw_kwh = calc_dhw_dem_ap(nb_occ=curr_number_occupants,
dhw_random=dhw_random,
type='mfh')
# Reconvert kWh/a to Liters per day
dhw_vol_ap = dhw_kwh * 1000 * 3600 * 1000 / (
955 * 4182 * 35 * 365)
# DHW volume per person and day
dhw_volumen = dhw_vol_ap / curr_number_occupants
if dhw_method == 1: # Annex 42
dhw_power_curve = DomesticHotWater.DomesticHotWater(
environment,
tFlow=60,
thermal=True,
method=1,
# Annex 42
dailyConsumption=dhw_volumen * curr_number_occupants,
supplyTemperature=25)
else: # Stochastic profile
dhw_power_curve = DomesticHotWater.DomesticHotWater(
environment,
tFlow=60,
thermal=True,
method=2,
supplyTemperature=25,
occupancy=occupancy_object.occupancy)
# Rescale to reference dhw volume (liters per person
# and day)
curr_dhw_vol_flow = dhw_power_curve.water
# Water volume flow in Liter/hour
curr_volume_year = sum(curr_dhw_vol_flow) * \
environment.timer.timeDiscretization / \
3600
curr_vol_day = curr_volume_year / 365
curr_vol_day_and_person = curr_vol_day / \
occupancy_object.number_occupants
print('Curr. volume per person and day: ',
curr_vol_day_and_person)
dhw_con_factor = dhw_volumen / curr_vol_day_and_person
print('Conv. factor of hot water: ', dhw_con_factor)
print('New volume per person and day: ',
curr_vol_day_and_person * dhw_con_factor)
# Normalize water flow and power load
dhw_power_curve.water *= dhw_con_factor
dhw_power_curve.loadcurve *= dhw_con_factor
# Create apartment
apartment = Apartment.Apartment(environment,
occupancy=occupancy_object,
net_floor_area=apartment_area)
# Add demands to apartment
if th_gen_method == 1 or th_gen_method == 2:
if use_dhw:
apartment.addMultipleEntities([heat_power_curve,
el_power_curve,
dhw_power_curve])
else:
apartment.addMultipleEntities([heat_power_curve,
el_power_curve])
else:
if use_dhw:
apartment.addMultipleEntities([el_power_curve,
dhw_power_curve])
else:
apartment.addEntity(el_power_curve)
# Add apartment to extended building
extended_building.addEntity(entity=apartment)
return extended_building
def generate_nonres_building_single_zone(environment,
net_floor_area, spec_th_demand,
annual_el_demand, th_slp_type,
el_slp_type=None,
build_year=None, mod_year=None,
build_type=None, pv_use_area=None,
method_3_type=None,
method_4_type=None,
height_of_floors=None,
nb_of_floors=None):
"""
Function generates and returns extended nonresidential building object
with single zone.
Parameters
----------
environment : object
Environment object
net_floor_area : float
Net floor area of building in m2
spec_th_demand : float
Specific thermal energy demand in kWh/m2*a
annual_el_demand : float
Annual electrical energy demand in kWh/a
th_slp_type : str
Thermal SLP type (for non-residential buildings)
- `GBA` : Bakeries
- `GBD` : Other services
- `GBH` : Accomodations
- `GGA` : Restaurants
- `GGB` : Gardening
- `GHA` : Retailers
- `GHD` : Summed load profile business, trade and services
- `GKO` : Banks, insurances, public institutions
- `GMF` : Household similar businesses
- `GMK` : Automotive
- `GPD` : Paper and printing
- `GWA` : Laundries
el_slp_type : str, optional (default: None)
Electrical SLP type
- H0 : Household
- L0 : Farms
- L1 : Farms with breeding / cattle
- L2 : Farms without cattle
- G0 : Business (general)
- G1 : Business (workingdays 8:00 AM - 6:00 PM)
- G2 : Business with high loads in the evening
- G3 : Business (24 hours)
- G4 : Shops / Barbers
- G5 : Bakery
- G6 : Weekend operation
number_occupants : int, optional
Number of occupants (default: None)
build_year : int, optional
Building year of construction (default: None)
mod_year : int, optional
Last year of modernization of building (default: None)
build_type : int, optional
Building type (default: None)
pv_use_area : float, optional
Usable pv area in m2 (default: None)
method_3_type : str, optional
Defines type of profile for method=3 (default: None)
Options:
- 'food_pro': Food production
- 'metal': Metal company
- 'rest': Restaurant (with large cooling load)
- 'sports': Sports hall
- 'repair': Repair / metal shop
method_4_type : str, optional
Defines type of profile for method=4 (default: None)
- 'metal_1' : Metal company with smooth profile
- 'metal_2' : Metal company with fluctuation in profile
- 'warehouse' : Warehouse
height_of_floors : float
average height of the floors
nb_of_floors : int
Number of floors above the ground
Returns
-------
extended_building : object
BuildingExtended object
"""
assert net_floor_area > 0
assert spec_th_demand >= 0
assert annual_el_demand >= 0
assert th_slp_type != 'HEF', ('HEF thermal slp profile only valid for ' +
'residential buildings.')
assert th_slp_type != 'HMF', ('HMF thermal slp profile only valid for ' +
'residential buildings.')
assert el_slp_type != 'H0', ('H0 thermal slp profile only valid for ' +
'residential buildings.')
# Create space heating demand
heat_power_curve = SpaceHeating.SpaceHeating(environment,
method=1,
profile_type=th_slp_type,
livingArea=net_floor_area,
specificDemand=spec_th_demand)
if method_3_type is not None:
el_power_curve = \
ElectricalDemand.ElectricalDemand(environment,
method=3,
annualDemand=annual_el_demand,
do_normalization=True,
method_3_type=method_3_type)
elif method_4_type is not None:
el_power_curve = \
ElectricalDemand.ElectricalDemand(environment,
method=4,
annualDemand=annual_el_demand,
do_normalization=True,
method_4_type=method_4_type)
else:
# Use el. SLP for el. power load generation
assert el_slp_type is not None, 'el_slp_type is required!'
el_power_curve = \
ElectricalDemand.ElectricalDemand(environment,
method=1,
annualDemand=annual_el_demand,
profileType=el_slp_type)
# Create apartment
apartment = Apartment.Apartment(environment)
# Add demands to apartment
apartment.addMultipleEntities([heat_power_curve, el_power_curve])
# Create extended building object
extended_building = build_ex.BuildingExtended(environment,
net_floor_area=net_floor_area,
build_year=build_year,
mod_year=mod_year,
build_type=build_type,
roof_usabl_pv_area=pv_use_area,
height_of_floors=height_of_floors,
nb_of_floors=nb_of_floors,
)
# Add apartment to extended building
extended_building.addEntity(entity=apartment)
return extended_building
def get_district_data_from_txt(path, delimiter='\t'):
"""
Load city district data from txt file (see annotations below for further
information of required inputs).
naN are going to be replaced with Python None.
Parameters
----------
path : str
Path to txt file
delimiter : str, optional
Defines delimiter for txt file (default: '\t')
Returns
-------
district_data : ndarray
Numpy 2d-array with city district data (each column represents
different parameter, see annotations)
Annotations
-----------
File structure
Columns:
1: id (int)
2: x in m (float)
3: y in m (float)
4: building_type (int, e.g. 0 for residential building)
5: net floor area in m2 (float)
6: Year of construction (int, optional)
7: Year of modernization (int, optional)
8: Annual (final) thermal energy demand in kWh (float, optional)
9: Annual electrical energy demand in kWh (float, optional)
10: Usable pv roof area in m2 (float, optional)
11: Number of apartments (int, optional)
12: Total number of occupants (int, optional)
13: Number of floors above the ground (int, optional)
14: Average Height of floors (float, optional)
15: If building has a central AHU or not (boolean, optional)
16: Residential layout (int, optional, e.g. 0 for compact)
17: Neighbour Buildings (int, optional) (0 - free standing)
(1 - double house) (2 - row house)
18: Type of attic (int, optional, e.g. 0 for flat roof) (1 - regular roof;
unheated) (2 - regular roof; partially heated) (3 - regular roof; fully
heated)
19: Type of cellar (int, optional, e.g. 1 for non heated cellar)
(0 - no basement) (1 - non heated) (2 - partially heated) (3 - fully heated)
20: Dormer (int, optional, 0: no dormer/ 1: dormer)
21: Construction Type(heavy/light, optional) (0 - heavy; 1 - light)
22: Method_3_nb (for usage of measured, weekly non-res. el. profile
(optional)
23: Method_4_nb (for usage of measured, annual non-res. el. profile
(optional)
"""
district_data = np.genfromtxt(path, delimiter=delimiter, skip_header=1)
# Replace nan with None values of Python
district_data = np.where(np.isnan(district_data), None, district_data)
return district_data
def calc_el_dem_ap(nb_occ, el_random, type):
"""
Calculate electric energy demand per apartment per year
in kWh/a (residential buildings, only)
Parameters
----------
nb_occ : int
Number of occupants
el_random : bool
Defines, if random value should be chosen from statistics
or if average value should be chosen. el_random == True means,
use random value.
type : str
Define residential building type (single family or multi-
family)
Options:
- 'sfh' : Single family house
- 'mfh' : Multi family house
Returns
-------
el_dem : float
Electric energy demand per apartment in kWh/a
"""
assert nb_occ > 0
assert nb_occ <= 5, 'Number of occupants cannot exceed 5 per ap.'
assert type in ['sfh', 'mfh']
if el_random:
# Choose first entry of random sample list
el_dem = usunc.calc_sampling_el_demand_per_apartment(
nb_samples=1,
nb_persons=nb_occ,
type=type)[0]
else:
# Choose average value depending on nb_occ
# Class D without hot water (Stromspiegel 2017)
dict_sfh = {1: 2500,
2: 3200,
3: 3900,
4: 4200,
5: 5400}
dict_mfh = {1: 1500,
2: 2200,
3: 2800,
4: 3200,
5: 4000}
if type == 'sfh':
el_dem = dict_sfh[nb_occ]
elif type == 'mfh':
el_dem = dict_mfh[nb_occ]
return el_dem
def calc_dhw_dem_ap(nb_occ, dhw_random, type, delta_t=35, c_p_water=4182,
rho_water=995):
"""
Calculate hot water energy demand per apartment per year
in kWh/a (residential buildings, only)
Parameters
----------
nb_occ : int
Number of occupants
dhw_random : bool
Defines, if random value should be chosen from statistics
or if average value should be chosen. dhw_random == True means,
use random value.
type : str
Define residential building type (single family or multi-
family)
Options:
- 'sfh' : Single family house
- 'mfh' : Multi family house
delta_t : float, optional
Temperature split of heated up water in Kelvin (default: 35)
c_p_water : float, optional
Specific heat capacity of water in J/kgK (default: 4182)
rho_water : float, optional
Density of water in kg/m3 (default: 995)
Returns
-------
dhw_dem : float
Electric energy demand per apartment in kWh/a
"""
assert nb_occ > 0
assert nb_occ <= 5, 'Number of occupants cannot exceed 5 per ap.'
assert type in ['sfh', 'mfh']
if dhw_random:
# Choose first entry of random sample list
# DHW volume in liters per apartment and day
dhw_volume = usunc.calc_sampling_dhw_per_apartment(
nb_samples=1,
nb_persons=nb_occ,
b_type=type)[0]
dhw_dem = dhw_volume * 365 * rho_water * c_p_water * delta_t / \
(1000 * 3600 * 1000)
else:
# Choose average value depending on nb_occ
# Class D without hot water (Stromspiegel 2017)
dict_sfh = {1: 500,
2: 800,
3: 1000,
4: 1300,
5: 1600}
dict_mfh = {1: 500,
2: 900,
3: 1300,
4: 1400,
5: 2000}
if type == 'sfh':
dhw_dem = dict_sfh[nb_occ]
elif type == 'mfh':
dhw_dem = dict_mfh[nb_occ]
return dhw_dem
def run_city_generator(generation_mode, timestep,
year_timer, year_co2,
location,
th_gen_method,
el_gen_method, district_data, use_dhw=False,
dhw_method=1, try_path=None,
pickle_city_filename=None, do_save=True,
path_save_city=None, eff_factor=0.85,
show_city=False, altitude=55, dhw_volumen=None,
do_normalization=True, slp_manipulate=True,
call_teaser=False, teaser_proj_name='pycity',
do_log=True, log_path=None,
project_name='teaser_project',
air_vent_mode=1, vent_factor=0.5,
t_set_heat=20,
t_set_cool=70,
t_night=16,
vdi_sh_manipulate=False, city_osm=None,
el_random=False, dhw_random=False, prev_heat_dev=True,
season_mod=None, merge_windows=False, new_try=False):
"""
Function generates city district for user defined input. Generated
buildings consist of only one single zone!
Parameters
----------
generation_mode : int
Integer to define method to generate city district
(so far, only csv/txt file import has been implemented)
generation_mode = 0: Load data from csv/txt file (tab seperated)
timestep : int
Timestep in seconds
year_timer : int
Chosen year of analysis
(influences initial day for profile generation)
year_co2 : int, optional
Chose year with specific emission factors
location : Tuple
(latitude, longitude) of the simulated system's position.
th_gen_method : int
Thermal load profile generation method
1 - Use SLP
2 - Load Modelica simulation output profile (only residential)
Method 2 is only used for residential buildings. For non-res.
buildings, SLPs are generated instead
3 - Use TEASER VDI 6007 core to simulate thermal loads‚
el_gen_method : int
Electrical generation method
1 - Use SLP
2 - Generate stochastic load profile (only valid for residential
building). Requires number of occupants.
district_data : ndarray
Numpy 2d-array with city district data (each column represents
different parameter, see annotations)
use_dhw : bool, optional
Defines if domestic hot water profiles should be generated.
(default: False)
dhw_method : int, optional
Defines method for dhw profile generation (default: 1)
Only relevant if use_dhw=True. Options:
- 1: Generate profiles via Annex 42
- 2: Generate stochastic dhw profiles
try_path : str, optional
Path to TRY weather file (default: None)
If set to None, uses default weather TRY file (2010, region 5)
pickle_city_filename : str, optional
Name for file, which should be pickled and saved, if no path is
handed over to save object to(default: None)
do_save : bool, optional
Defines, if city object instance should be saved as pickle file
(default: True)
path_save_city : str, optional
Path to save (pickle and dump) city object instance to (default: None)
If None is used, saves file to .../output/...
eff_factor : float, optional
Efficiency factor of thermal boiler system (default: 0.85)
show_city : bool, optional
Boolean to define if city district should be printed by matplotlib
after generation (default: False)
True: Print results
False: Do not print results
altitude : float, optional
Altitude of location in m (default: 55 - City of Bottrop)
dhw_volumen : float, optional
Volume of domestic hot water in liter per capita and day
(default: None).
do_normalization : bool, optional
Defines, if stochastic profile (el_gen_method=2) should be
normalized to given annualDemand value (default: True).
If set to False, annual el. demand depends on stochastic el. load
profile generation. If set to True, does normalization with
annualDemand
slp_manipulate : bool, optional
Defines, if thermal space heating SLP profile should be modified
(default: True). Only used for residential buildings!
Only relevant, if th_gen_method == 1
True - Do manipulation
False - Use original profile
Sets thermal power to zero in time spaces, where average daily outdoor
temperature is equal to or larger than 12 °C. Rescales profile to
original demand value.
call_teaser : bool, optional
Defines, if teaser should be called to generate typeBuildings
(currently, residential typeBuildings only).
(default: False)
If set to True, generates typeBuildings and add them to building node
as attribute 'type_building'
teaser_proj_name : str, optional
TEASER project name (default: 'pycity'). Only relevant, if call_teaser
is set to True
do_log : bool, optional
Defines, if log file of inputs should be generated (default: True)
log_path : str, optional
Path to log file (default: None). If set to None, saves log to
.../output
air_vent_mode : int
Defines method to generation air exchange rate for VDI 6007 simulation
Options:
0 : Use constant value (vent_factor in 1/h)
1 : Use deterministic, temperature-dependent profile
2 : Use stochastic, user-dependent profile
vent_factor : float, optional
Ventilation rate factor in 1/h (default: 0.5). Only used, if
array_vent_rate is None (otherwise, array_vent_rate array is used)
t_set_heat : float, optional
Heating set temperature in degree Celsius. If temperature drops below
t_set_heat, model is going to be heated up. (default: 20)
(Related to constraints for res. buildings in DIN V 18599)
t_set_cool : float, optional
Cooling set temperature in degree Celsius. If temperature rises above
t_set_cool, model is going to be cooled down. (default: 70)
t_night : float, optional
Night set back temperature in degree Celsius (default: 16)
(Related to constraints for res. buildings in DIN V 18599)
project_name : str, optional
TEASER project name (default: 'teaser_project')
vdi_sh_manipulate : bool, optional
Defines, if VDI 6007 thermal space heating load curve should be
normalized to match given annual space heating demand in kWh
(default: False)
el_random : bool, optional
Defines, if annual, eletrical demand value for normalization of
el. load profile should randomly diverge from reference value
within specific boundaries (default: False).
If False: Use reference value for normalization
If True: Allow generating values that is different from reference value
dhw_random : bool, optional
Defines, if hot water volume per person and day value should be
randomized by choosing value from gaussian distribution (20 %
standard deviation) (default: False)
If True: Randomize value
If False: Use reference value
prev_heat_dev : bool, optional
Defines, if heating devices should be prevented within chosen
appliances (default: True). If set to True, DESWH, E-INST,
Electric shower, Storage heaters and Other electric space heating
are set to zero. Only relevant for el_gen_method == 2
season_mod : float, optional
Float to define rescaling factor to rescale annual lighting power curve
with cosine wave to increase winter usage and decrease summer usage.
Reference is maximum lighting power (default: None). If set to None,
do NOT perform rescaling with cosine wave
merge_windows : bool, optional
Defines TEASER project setting for merge_windows_calc
(default: False). If set to False, merge_windows_calc is set to False.
If True, Windows are merged into wall resistances.
new_try : bool, optional
Defines, if TRY dataset have been generated after 2017 (default: False)
If False, assumes that TRY dataset has been generated before 2017.
If True, assumes that TRY dataset has been generated after 2017 and
belongs to the new TRY classes. This is important for extracting
the correct values from the TRY dataset!
Returns
-------
city_object : object
City object of pycity_calc
Annotations
-----------
Non-residential building loads are automatically generated via SLP
(even if el_gen_method is set to 2). Furthermore, dhw profile generation
is automatically neglected (only valid for residential buildings)
Electrical load profiles of residential buildings without occupants
are automatically generated via SLP (even if el_gen_method is set to 2)
File structure (district_data np.array)
Columns:
1: id (int)
2: x in m (float)
3: y in m (float)
4: building_type (int, e.g. 0 for residential building)
5: net floor area in m2 (float)
6: Year of construction (int, optional)
7: Year of modernization (int, optional)
8: Annual (final) thermal energy demand in kWh (float, optional)
For residential: space heating, only!
For non-residential: Space heating AND hot water! (SLP usage)
9: Annual electrical energy demand in kWh (float, optional)
10: Usable pv roof area in m2 (float, optional)
11: Number of apartments (int, optional)
12: Total number of occupants (int, optional)
13: Number of floors above the ground (int, optional)
14: Average Height of floors (float, optional)
15: If building has a central AHU or not (boolean, optional)
16: Residential layout (int, optional, e.g. 0 for compact)
17: Neighbour Buildings (int, optional); 0 - free standing; 1 - Double house; 2 - Row house;
18: Type of attic (int, optional, e.g. 0 for flat roof); 1 - Roof, non heated; 2 - Roof, partially heated; 3- Roof, fully heated;
19: Type of basement (int, optional, e.g. 1 for non heated basement 0 - No basement; 1 - basement, non heated; 2 - basement, partially heated; 3- basement, fully heated;
20: Dormer (int, optional, 0: no dormer/ 1: dormer)
21: Construction Type(heavy/light, optional) (0 - heavy; 1 - light)
22: Method_3_nb (for usage of measured, weekly non-res. el. profile
(optional) (0 to 4)
23: Method_4_nb (for usage of measured, annual non-res. el. profile
(optional) (0 - 2)
method_3_type : str, optional
Defines type of profile for method=3 (default: None)
Options:
0 - 'food_pro': Food production
1 - 'metal': Metal company
2 - 'rest': Restaurant (with large cooling load)
3 - 'sports': Sports hall
4 - 'repair': Repair / metal shop
method_4_type : str, optional
Defines type of profile for method=4 (default: None)
0 - 'metal_1' : Metal company with smooth profile
1 - 'metal_2' : Metal company with fluctuation in profile
2 - 'warehouse' : Warehouse
"""
assert eff_factor > 0, 'Efficiency factor has to be larger than zero.'
assert eff_factor <= 1, 'Efficiency factor cannot increase value 1.'
if dhw_volumen is not None: # pragma: no cover
assert dhw_volumen >= 0, 'Hot water volume cannot be below zero.'
if generation_mode == 1: # pragma: no cover
assert city_osm is not None, 'Generation mode 1 requires city object!'
if vdi_sh_manipulate is True and th_gen_method == 3: # pragma: no cover
msg = 'Simulated profiles of VDI 6007 call (TEASER --> ' \
'space heating) is going to be normalized with annual thermal' \
' space heating demand values given by user!'
warnings.warn(msg)
if do_log: # pragma: no cover
# Write log file
# ################################################################
# Log file path
if log_path is None:
# If not existing, use default path
this_path = os.path.dirname(os.path.abspath(__file__))
log_path = os.path.join(this_path, 'output', 'city_gen_log.txt')
log_file = open(log_path, mode='w')
log_file.write('PyCity_Calc city_generator.py log file')
log_file.write('\n############## Time and location ##############\n')
log_file.write('Date: ' + str(datetime.datetime.now()) + '\n')
log_file.write('generation_mode: ' + str(generation_mode) + '\n')
log_file.write('timestep in seconds: ' + str(timestep) + '\n')
log_file.write('Year for timer: ' + str(year_timer) + '\n')
log_file.write('Year for CO2 emission factors: '
+ str(year_co2) + '\n')
log_file.write('Location: ' + str(location) + '\n')
log_file.write('altitude: ' + str(altitude) + '\n')
if generation_mode == 0:
log_file.write('Generation mode: csv/txt input, only.\n')
elif generation_mode == 1:
log_file.write('Generation mode: csv/txt plus city osm object.\n')
log_file.write('\n############## Generation methods ##############\n')
log_file.write('th_gen_method: ' + str(th_gen_method) + '\n')
if th_gen_method == 1:
log_file.write('Manipulate SLP: ' + str(slp_manipulate) + '\n')
elif th_gen_method == 3:
log_file.write('t_set_heat: ' + str(t_set_heat) + '\n')
log_file.write('t_set_night: ' + str(t_night) + '\n')
log_file.write('t_set_cool: ' + str(t_set_cool) + '\n')
log_file.write('air_vent_mode: ' + str(air_vent_mode) + '\n')
log_file.write('vent_factor: ' + str(vent_factor) + '\n')
log_file.write('el_gen_method: ' + str(el_gen_method) + '\n')
log_file.write(
'Normalize el. profile: ' + str(do_normalization) + '\n')
log_file.write(
'Do random el. normalization: ' + str(el_random) + '\n')
log_file.write(
'Prevent el. heating devices for el load generation: '
'' + str(prev_heat_dev) + '\n')
log_file.write(
'Rescaling factor lighting power curve to implement seasonal '
'influence: ' + str(season_mod) + '\n')
log_file.write('use_dhw: ' + str(use_dhw) + '\n')
log_file.write('dhw_method: ' + str(dhw_method) + '\n')
log_file.write('dhw_volumen: ' + str(dhw_volumen) + '\n')
log_file.write(
'Do random dhw. normalization: ' + str(dhw_random) + '\n')
log_file.write('\n############## Others ##############\n')
log_file.write('try_path: ' + str(try_path) + '\n')
log_file.write('eff_factor: ' + str(eff_factor) + '\n')
log_file.write('timestep in seconds: ' + str(timestep) + '\n')
log_file.write('call_teaser: ' + str(call_teaser) + '\n')
log_file.write('teaser_proj_name: ' + str(teaser_proj_name) + '\n')
# Log file is closed, after pickle filename has been generated
# (see code below)
if generation_mode == 0 or generation_mode == 1:
# ##################################################################
# Load specific demand files
# Load specific thermal demand input data
spec_th_dem_res_building = load_data_file_with_spec_demand_data(
'RWI_res_building_spec_th_demand.txt')
start_year_column = (spec_th_dem_res_building[:, [0]])
# Reverse
start_year_column = start_year_column[::-1]
"""
Columns:
1. Start year (int)
2. Final year (int)
3. Spec. thermal energy demand in kWh/m2*a (float)
"""
# ##################################################################
# Load specific electrical demand input data
spec_el_dem_res_building = load_data_file_with_spec_demand_data(
'AGEB_res_building_spec_e_demand.txt')
"""
Columns:
1. Start year (int)
2. Final year (int)
3. Spec. thermal energy demand in kWh/m2*a (float)
"""
# ##################################################################
# Load specific electrical demand input data
# (depending on number of occupants)
spec_el_dem_res_building_per_person = \
load_data_file_with_spec_demand_data(
'Stromspiegel2017_spec_el_energy_demand.txt')
"""
Columns:
1. Number of persons (int) ( 1 - 5 SFH and 1 - 5 MFH)
2. Annual electrical demand in kWh/a (float)
3. Specific electrical demand per person in kWh/person*a (float)
"""
# ###################################################################
# Load specific demand data and slp types for
# non residential buildings
spec_dem_and_slp_non_res = load_data_file_with_spec_demand_data(
'Spec_demands_non_res.txt')
"""
Columns:
1. type_id (int)
2. type_name (string) # Currently 'nan', due to expected float
3. Spec. thermal energy demand in kWh/m2*a (float)
4. Spec. electrical energy demand in kWh/m2*a (float)
5. Thermal SLP type (int)
6. Electrical SLP type (int)
"""
# ###################################################################
# Generate city district
# Generate extended environment of pycity_calc
environment = generate_environment(timestep=timestep,
year_timer=year_timer,
year_co2=year_co2,
location=location,
try_path=try_path,
altitude=altitude,
new_try=new_try)
print('Generated environment object.\n')
if generation_mode == 0:
# Generate city object
# ############################################################
city_object = city.City(environment=environment)
print('Generated city object.\n')
else:
# Overwrite city_osm environment
print('Overwrite city_osm.environment with new environment')
city_osm.environment = environment
city_object = city_osm
# Check if district_data only holds one entry for single building
# In this case, has to be processed differently
if district_data.ndim > 1:
multi_data = True
else: # Only one entry (single building)
multi_data = False
# If multi_data is false, loop below is going to be exited with
# a break statement at the end.
# Generate dummy node id and thermal space heating demand dict
dict_id_vdi_sh = {}
# Loop over district_data
# ############################################################
for i in range(len(district_data)):
if multi_data:
# Extract data out of input file
curr_id = int(
district_data[i][0]) # id / primary key of building
curr_x = district_data[i][1] # x-coordinate in m
curr_y = district_data[i][2] # y-coordinate in m
curr_build_type = int(
district_data[i][3]) # building type nb (int)
curr_nfa = district_data[i][4] # Net floor area in m2
curr_build_year = district_data[i][5] # Year of construction
curr_mod_year = district_data[i][
6] # optional (last year of modernization)
curr_th_e_demand = district_data[i][
7] # optional: Final thermal energy demand in kWh
# For residential buildings: Space heating only!
# For non-residential buildings: Space heating AND hot water! (SLP)
curr_el_e_demand = district_data[i][
8] # optional (Annual el. energy demand in kWh)
curr_pv_roof_area = district_data[i][
9] # optional (Usable pv roof area in m2)
curr_nb_of_apartments = district_data[i][
10] # optional (Number of apartments)
curr_nb_of_occupants = district_data[i][
11] # optional (Total number of occupants)
curr_nb_of_floors = district_data[i][
12] # optional (Number of floors above the ground)
curr_avg_height_of_floors = district_data[i][
13] # optional (Average Height of floors)
curr_central_ahu = district_data[i][
14] # optional (If building has a central air handling unit (AHU) or not (boolean))
curr_res_layout = district_data[i][
15] # optional Residential layout (int, optional, e.g. 0 for compact)
curr_nb_of_neighbour_bld = district_data[i][
16] # optional Neighbour Buildings (int, optional)
curr_type_attic = district_data[i][
17] # optional Type of attic (int, optional, e.g. 0 for flat roof);
# 1 - Roof, non heated; 2 - Roof, partially heated; 3- Roof, fully heated;
curr_type_cellar = district_data[i][
18] # optional Type of basement
# (int, optional, e.g. 1 for non heated basement 0 - No basement; 1 - basement, non heated; 2 - basement, partially heated; 3- basement, fully heated;
curr_dormer = district_data[i][
19] # optional Dormer (int, optional, 0: no dormer/ 1: dormer)
curr_construction_type = district_data[i][
20] # optional Construction Type(heavy/light, optional) (0 - heavy; 1 - light)
curr_method_3_nb = district_data[i][
21] # optional Method_3_nb (for usage of measured, weekly non-res. el. profile
curr_method_4_nb = district_data[i][
22] # optional Method_4_nb (for usage of measured, annual non-res. el. profile
else: # Single entry
# Extract data out of input file
curr_id = int(district_data[0]) # id / primary key of building
curr_x = district_data[1] # x-coordinate in m
curr_y = district_data[2] # y-coordinate in m
curr_build_type = int(
district_data[3]) # building type nb (int)
curr_nfa = district_data[4] # Net floor area in m2
curr_build_year = district_data[5] # Year of construction
curr_mod_year = district_data[
6] # optional (last year of modernization)
curr_th_e_demand = district_data[
7] # optional: Final thermal energy demand in kWh
# For residential buildings: Space heating only!
# For non-residential buildings: Space heating AND hot water! (SLP)
curr_el_e_demand = district_data[
8] # optional (Annual el. energy demand in kWh)
curr_pv_roof_area = district_data[
9] # optional (Usable pv roof area in m2)
curr_nb_of_apartments = district_data[
10] # optional (Number of apartments)
curr_nb_of_occupants = district_data[
11] # optional (Total number of occupants)
curr_nb_of_floors = district_data[
12] # optional (Number of floors above the ground)
curr_avg_height_of_floors = district_data[
13] # optional (Average Height of floors)
curr_central_ahu = district_data[
14] # optional (If building has a central air handling unit (AHU) or not (boolean))
curr_res_layout = district_data[
15] # optional Residential layout (int, optional, e.g. 0 for compact)
curr_nb_of_neighbour_bld = district_data[
16] # optional Neighbour Buildings (int, optional)
curr_type_attic = district_data[
17] # optional Type of attic (int, optional, e.g. 0 for flat roof);
# 1 - Roof, non heated; 2 - Roof, partially heated; 3- Roof, fully heated;
curr_type_cellar = district_data[
18] # optional Type of basement
# (int, optional, e.g. 1 for non heated basement 0 - No basement; 1 - basement, non heated; 2 - basement, partially heated; 3- basement, fully heated;
curr_dormer = district_data[
19] # optional Dormer (int, optional, 0: no dormer/ 1: dormer)
curr_construction_type = district_data[
20] # optional Construction Type(heavy/light, optional) (0 - heavy; 1 - light)
curr_method_3_nb = district_data[
21] # optional Method_3_nb (for usage of measured, weekly non-res. el. profile
curr_method_4_nb = district_data[
22] # optional Method_4_nb (for usage of measured, annual non-res. el. profile
print('Process building', curr_id)
print('########################################################')
# Assert functions
# ############################################################
assert curr_build_type >= 0
assert curr_nfa > 0
for m in range(5, 9):
if multi_data:
if district_data[i][m] is not None:
assert district_data[i][m] > 0
else:
if district_data[m] is not None:
assert district_data[m] > 0
if curr_nb_of_apartments is not None:
assert curr_nb_of_apartments > 0
# Convert to int
curr_nb_of_apartments = int(curr_nb_of_apartments)
if curr_nb_of_occupants is not None:
assert curr_nb_of_occupants > 0
# Convert curr_nb_of_occupants from float to int
curr_nb_of_occupants = int(curr_nb_of_occupants)
if (curr_nb_of_occupants is not None
and curr_nb_of_apartments is not None):
assert curr_nb_of_occupants / curr_nb_of_apartments <= 5, (
'Average share of occupants per apartment should ' +
'not exceed 5 persons! (Necessary for stochastic, el.' +
'profile generation.)')
if curr_method_3_nb is not None:
curr_method_3_nb >= 0
if curr_method_4_nb is not None:
curr_method_4_nb >= 0
if curr_build_type == 0 and curr_nb_of_apartments is None: # pragma: no cover
# Define single apartment, if nb of apartments is unknown
msg = 'Building ' + str(curr_id) + ' is residential, but' \
' does not have a number' \
' of apartments. Going' \
' to set nb. to 1.'
warnings.warn(msg)
curr_nb_of_apartments = 1
if (curr_build_type == 0 and curr_nb_of_occupants is None
and use_dhw and dhw_method == 2):
raise AssertionError('DHW profile cannot be generated' +
'for residential building without' +
'occupants (stochastic mode).' +
'Please check your input file ' +
'(missing number of occupants) ' +
'or disable dhw generation.')
# Check if TEASER inputs are defined
if call_teaser or th_gen_method == 3:
if curr_build_type == 0: # Residential
assert curr_nb_of_floors is not None
assert curr_avg_height_of_floors is not None
assert curr_central_ahu is not None
assert curr_res_layout is not None
assert curr_nb_of_neighbour_bld is not None
assert curr_type_attic is not None
assert curr_type_cellar is not None
assert curr_dormer is not None
assert curr_construction_type is not None
if curr_nb_of_floors is not None:
assert curr_nb_of_floors > 0
if curr_avg_height_of_floors is not None:
assert curr_avg_height_of_floors > 0
if curr_central_ahu is not None:
assert 0 <= curr_central_ahu <= 1
if curr_res_layout is not None:
assert 0 <= curr_res_layout <= 1
if curr_nb_of_neighbour_bld is not None:
assert 0 <= curr_nb_of_neighbour_bld <= 2
if curr_type_attic is not None:
assert 0 <= curr_type_attic <= 3
if curr_type_cellar is not None:
assert 0 <= curr_type_cellar <= 3
if curr_dormer is not None:
assert 0 <= curr_dormer <= 1
if curr_construction_type is not None:
assert 0 <= curr_construction_type <= 1
# Check building type (residential or non residential)
# #-------------------------------------------------------------
if curr_build_type == 0: # Is residential
print('Residential building')
# Get spec. net therm. demand value according to last year
# of modernization or build_year
# If year of modernization is defined, use curr_mod_year
if curr_mod_year is not None:
use_year = int(curr_mod_year)
else: # Use year of construction
use_year = int(curr_build_year)
# Get specific, thermal energy demand (based on use_year)
for j in range(len(start_year_column)):
if use_year >= start_year_column[j]:
curr_spec_th_demand = spec_th_dem_res_building[len(
spec_th_dem_res_building) - 1 - j][2]
break
# # Get spec. electr. demand
# if curr_nb_of_occupants is None:
# # USE AGEB values, if no number of occupants is given
# # Set specific demand value in kWh/m2*a
# curr_spec_el_demand = spec_el_dem_res_building[1]
# # Only valid for array like [2012 38.7]
# else:
# # Use Stromspiegel 2017 values
# # Calculate specific electric demand values depending
# # on number of occupants
#
# if curr_nb_of_apartments == 1:
# btype = 'sfh'
# elif curr_nb_of_apartments > 1:
# btype = 'mfh'
#
# # Average occupancy number per apartment
# curr_av_occ_per_app = \
# curr_nb_of_occupants / curr_nb_of_apartments
# print('Average number of occupants per apartment')
# print(round(curr_av_occ_per_app, ndigits=2))
#
# if curr_av_occ_per_app <= 5 and curr_av_occ_per_app > 0:
# # Correctur factor for non-int. av. number of
# # occupants (#19)
#
# # Divide annual el. energy demand with net floor area
# if btype == 'sfh':
# row_idx_low = math.ceil(curr_av_occ_per_app) - 1
# row_idx_high = math.floor(curr_av_occ_per_app) - 1
# elif btype == 'mfh':
# row_idx_low = math.ceil(curr_av_occ_per_app) - 1 \
# + 5
# row_idx_high = math.floor(curr_av_occ_per_app) - 1 \
# + 5
#
# cur_spec_el_dem_per_occ_high = \
# spec_el_dem_res_building_per_person[row_idx_high][2]
# cur_spec_el_dem_per_occ_low = \
# spec_el_dem_res_building_per_person[row_idx_low][2]
#
# print('Chosen reference spec. el. demands per person '
# 'in kWh/a (high and low value):')
# print(cur_spec_el_dem_per_occ_high)
# print(cur_spec_el_dem_per_occ_low)
#
# delta = round(curr_av_occ_per_app, 0) - \
# curr_av_occ_per_app
#
# if delta < 0:
# curr_spec_el_dem_occ = cur_spec_el_dem_per_occ_high + \
# (cur_spec_el_dem_per_occ_high -
# cur_spec_el_dem_per_occ_low) * delta
# elif delta > 0:
# curr_spec_el_dem_occ = cur_spec_el_dem_per_occ_low + \
# (cur_spec_el_dem_per_occ_high -
# cur_spec_el_dem_per_occ_low) * delta
# else:
# curr_spec_el_dem_occ = cur_spec_el_dem_per_occ_high
#
# # print('Calculated spec. el. demand per person in '
# # 'kWh/a:')
# # print(round(curr_spec_el_dem_occ, ndigits=2))
#
# # Specific el. demand per person (dependend on av.
# # number of occupants in each apartment)
# # --> Multiplied with number of occupants
# # --> Total el. energy demand in kWh
# # --> Divided with net floor area
# # --> Spec. el. energy demand in kWh/a
#
# curr_spec_el_demand = \
# curr_spec_el_dem_occ * curr_nb_of_occupants \
# / curr_nfa
#
# # print('Spec. el. energy demand in kWh/m2:')
# # print(curr_spec_el_demand)
#
# else:
# raise AssertionError('Invalid number of occupants')
# if el_random:
# if curr_nb_of_occupants is None:
# # Randomize curr_spec_el_demand with normal distribution
# # with curr_spec_el_demand as mean and 10 % standard dev.
# curr_spec_el_demand = \
# np.random.normal(loc=curr_spec_el_demand,
# scale=0.10 * curr_spec_el_demand)
# else:
# # Randomize rounding up and down of curr_av_occ_per_ap
# if round(curr_av_occ_per_app) > curr_av_occ_per_app:
# # Round up
# delta = round(curr_av_occ_per_app) - \
# curr_av_occ_per_app
# prob_r_up = 1 - delta
# rnb = random.random()
# if rnb < prob_r_up:
# use_occ = math.ceil(curr_av_occ_per_app)
# else:
# use_occ = math.floor(curr_av_occ_per_app)
#
# else:
# # Round down
# delta = curr_av_occ_per_app - \
# round(curr_av_occ_per_app)
# prob_r_down = 1 - delta
# rnb = random.random()
# if rnb < prob_r_down:
# use_occ = math.floor(curr_av_occ_per_app)
# else:
# use_occ = math.ceil(curr_av_occ_per_app)
#
# sample_el_per_app = \
# usunc.calc_sampling_el_demand_per_apartment(nb_samples=1,
# nb_persons=use_occ,
# type=btype)[0]
#
# # Divide sampled el. demand per apartment through
# # number of persons of apartment (according to
# # Stromspiegel 2017) and multiply this value with
# # actual number of persons in building to get
# # new total el. energy demand. Divide this value with
# # net floor area to get specific el. energy demand
# curr_spec_el_demand = \
# (sample_el_per_app / curr_av_occ_per_app) * \
# curr_nb_of_occupants / curr_nfa
# conversion of the construction_type from int to str
if curr_construction_type == 0:
new_curr_construction_type = 'heavy'
elif curr_construction_type == 1:
new_curr_construction_type = 'light'
else:
new_curr_construction_type = 'heavy'
# #-------------------------------------------------------------
else: # Non-residential
print('Non residential')
# Get spec. demands and slp types according to building_type
curr_spec_th_demand = \
spec_dem_and_slp_non_res[curr_build_type - 2][2]
curr_spec_el_demand = \
spec_dem_and_slp_non_res[curr_build_type - 2][3]
curr_th_slp_type = \
spec_dem_and_slp_non_res[curr_build_type - 2][4]
curr_el_slp_type = \
spec_dem_and_slp_non_res[curr_build_type - 2][5]
# Convert slp type integers into strings
curr_th_slp_type = convert_th_slp_int_and_str(curr_th_slp_type)
curr_el_slp_type = convert_el_slp_int_and_str(curr_el_slp_type)
# If curr_el_e_demand is not known, calculate it via spec.
# demand
if curr_el_e_demand is None:
curr_el_e_demand = curr_spec_el_demand * curr_nfa
# #-------------------------------------------------------------
# If curr_th_e_demand is known, recalc spec e. demand
if curr_th_e_demand is not None:
# Calc. spec. net thermal energy demand with efficiency factor
curr_spec_th_demand = eff_factor * curr_th_e_demand / curr_nfa
else:
# Spec. final energy demand is given, recalculate it to
# net thermal energy demand with efficiency factor
curr_spec_th_demand *= eff_factor
# # If curr_el_e_demand is not known, calculate it via spec. demand
# if curr_el_e_demand is None:
# curr_el_e_demand = curr_spec_el_demand * curr_nfa
if th_gen_method == 1 or th_gen_method == 2 or curr_build_type != 0:
print('Used specific thermal demand value in kWh/m2*a:')
print(curr_spec_th_demand)
# #-------------------------------------------------------------
# Generate BuildingExtended object
if curr_build_type == 0: # Residential
if curr_nb_of_apartments > 1: # Multi-family house
building = generate_res_building_multi_zone(environment,
net_floor_area=curr_nfa,
spec_th_demand=curr_spec_th_demand,
annual_el_demand=curr_el_e_demand,
th_gen_method=th_gen_method,
el_gen_method=el_gen_method,
nb_of_apartments=curr_nb_of_apartments,
use_dhw=use_dhw,
dhw_method=dhw_method,
total_number_occupants=curr_nb_of_occupants,
build_year=curr_build_year,
mod_year=curr_mod_year,
build_type=curr_build_type,
pv_use_area=curr_pv_roof_area,
height_of_floors=curr_avg_height_of_floors,
nb_of_floors=curr_nb_of_floors,
neighbour_buildings=curr_nb_of_neighbour_bld,
residential_layout=curr_res_layout,
attic=curr_type_attic,
cellar=curr_type_cellar,
construction_type=new_curr_construction_type,
dormer=curr_dormer,
dhw_volumen=dhw_volumen,
do_normalization=do_normalization,
slp_manipulate=slp_manipulate,
curr_central_ahu=curr_central_ahu,
dhw_random=dhw_random,
prev_heat_dev=prev_heat_dev,
season_mod=season_mod)
elif curr_nb_of_apartments == 1: # Single-family house
building = generate_res_building_single_zone(environment,
net_floor_area=curr_nfa,
spec_th_demand=curr_spec_th_demand,
annual_el_demand=curr_el_e_demand,
th_gen_method=th_gen_method,
el_gen_method=el_gen_method,
use_dhw=use_dhw,
dhw_method=dhw_method,
number_occupants=curr_nb_of_occupants,
build_year=curr_build_year,
mod_year=curr_mod_year,
build_type=curr_build_type,
pv_use_area=curr_pv_roof_area,
height_of_floors=curr_avg_height_of_floors,
nb_of_floors=curr_nb_of_floors,
neighbour_buildings=curr_nb_of_neighbour_bld,
residential_layout=curr_res_layout,
attic=curr_type_attic,
cellar=curr_type_cellar,
construction_type=new_curr_construction_type,
dormer=curr_dormer,
dhw_volumen=dhw_volumen,
do_normalization=do_normalization,
slp_manipulate=slp_manipulate,
curr_central_ahu=curr_central_ahu,
dhw_random=dhw_random,
prev_heat_dev=prev_heat_dev,
season_mod=season_mod)
else:
raise AssertionError('Wrong number of apartments')
else: # Non-residential
method_3_str = None
method_4_str = None
# Convert curr_method numbers, if not None
if curr_method_3_nb is not None:
method_3_str = \
convert_method_3_nb_into_str(int(curr_method_3_nb))
if curr_method_4_nb is not None:
method_4_str = \
convert_method_4_nb_into_str(int(curr_method_4_nb))
building = generate_nonres_building_single_zone(environment,
th_slp_type=curr_th_slp_type,
net_floor_area=curr_nfa,
spec_th_demand=curr_spec_th_demand,
annual_el_demand=curr_el_e_demand,
el_slp_type=curr_el_slp_type,
build_year=curr_build_year,
mod_year=curr_mod_year,
build_type=curr_build_type,
pv_use_area=curr_pv_roof_area,
method_3_type=method_3_str,
method_4_type=method_4_str,
height_of_floors=curr_avg_height_of_floors,
nb_of_floors=curr_nb_of_floors
)
# Generate position shapely point
position = point.Point(curr_x, curr_y)
if generation_mode == 0:
# Add building to city object
id = city_object.add_extended_building(
extended_building=building,
position=position, name=curr_id)
elif generation_mode == 1:
# Add building as entity to corresponding building node
# Positions should be (nearly) equal
assert position.x - city_object.nodes[int(curr_id)][
'position'].x <= 0.1
assert position.y - city_object.nodes[int(curr_id)][
'position'].y <= 0.1
city_object.nodes[int(curr_id)]['entity'] = building
id = curr_id
# Save annual thermal net heat energy demand for space heating
# to dict (used for normalization with VDI 6007 core)
dict_id_vdi_sh[id] = curr_spec_th_demand * curr_nfa
print('Finished processing of building', curr_id)
print('#######################################################')
print()
# If only single building should be processed, break loop
if multi_data is False:
break
# #-------------------------------------------------------------
print('Added all buildings with data to city object.')
# VDI 6007 simulation to generate space heating load curves
# Overwrites existing heat load curves (and annual heat demands)
if th_gen_method == 3:
print('Perform VDI 6007 space heating load simulation for every'
' building')
if el_gen_method == 1:
# Skip usage of occupancy and electrial load profiles
# as internal loads within VDI 6007 core
requ_profiles = False
else:
requ_profiles = True
tusage.calc_and_add_vdi_6007_loads_to_city(city=city_object,
air_vent_mode=air_vent_mode,
vent_factor=vent_factor,
t_set_heat=t_set_heat,
t_set_cool=t_set_cool,
t_night=t_night,
alpha_rad=None,
project_name=project_name,
requ_profiles=requ_profiles)
# Set call_teaser to False, as it is already included
# in calc_and_add_vdi_6007_loads_to_city
call_teaser = False
if vdi_sh_manipulate:
# Normalize VDI 6007 load curves to match given annual
# thermal space heating energy demand
for n in city_object.nodes():
if 'node_type' in city_object.nodes[n]:
# If node_type is building
if city_object.nodes[n]['node_type'] == 'building':
# If entity is kind building
if city_object.nodes[n][
'entity']._kind == 'building':
# Given value (user input)
ann_sh = dict_id_vdi_sh[n]
# Building pointer
curr_b = city_object.nodes[n]['entity']
# Current value on object
curr_sh = curr_b.get_annual_space_heat_demand()
norm_factor = ann_sh / curr_sh
# Do normalization
# Loop over apartments
for apart in curr_b.apartments:
# Normalize apartment space heating load
apart.demandSpaceheating.loadcurve \
*= norm_factor
print('Generation results:')
print('###########################################')
for n in city_object.nodes():
if 'node_type' in city_object.nodes[n]:
if city_object.nodes[n]['node_type'] == 'building':
if 'entity' in city_object.nodes[n]:
if city_object.nodes[n]['entity']._kind == 'building':
print('Results of building: ', n)
print('################################')
print()
curr_b = city_object.nodes[n]['entity']
sh_demand = curr_b.get_annual_space_heat_demand()
el_demand = curr_b.get_annual_el_demand()
dhw_demand = curr_b.get_annual_dhw_demand()
nfa = curr_b.net_floor_area
print('Annual space heating demand in kWh:')
print(sh_demand)
if nfa is not None and nfa != 0:
print(
'Specific space heating demand in kWh/m2:')
print(sh_demand / nfa)
print()
print('Annual electric demand in kWh:')
print(el_demand)
if nfa is not None and nfa != 0:
print('Specific electric demand in kWh/m2:')
print(el_demand / nfa)
nb_occ = curr_b.get_number_of_occupants()
if nb_occ is not None and nb_occ != 0:
print('Specific electric demand in kWh'
' per person and year:')
print(el_demand / nb_occ)
print()
print('Annual hot water demand in kWh:')
print(dhw_demand)
if nfa is not None and nfa != 0:
print('Specific hot water demand in kWh/m2:')
print(dhw_demand / nfa)
volume_year = dhw_demand * 1000 * 3600 / (
4200 * 35)
volume_day = volume_year / 365
if nb_occ is not None and nb_occ != 0:
v_person_day = \
volume_day / nb_occ
print('Hot water volume per person and day:')
print(v_person_day)
print()
# Create and add TEASER type_buildings to every building node
if call_teaser:
# Create TEASER project
project = tusage.create_teaser_project(name=teaser_proj_name,
merge_windows=merge_windows)
# Generate typeBuildings and add to city
tusage.create_teaser_typecity(project=project,
city=city_object,
generate_Output=False)
if do_save: # pragma: no cover
if path_save_city is None:
if pickle_city_filename is None:
msg = 'If path_save_city is None, pickle_city_filename' \
'cannot be None! Instead, filename has to be ' \
'defined to be able to save city object.'
raise AssertionError
this_path = os.path.dirname(os.path.abspath(__file__))
path_save_city = os.path.join(this_path, 'output',
pickle_city_filename)
try:
# Pickle and dump city objects
pickle.dump(city_object, open(path_save_city, 'wb'))
print('Pickled and dumped city object to: ')
print(path_save_city)
except:
warnings.warn('Could not pickle and save city object')
if do_log: # pragma: no cover
if pickle_city_filename is not None:
log_file.write('pickle_city_filename: ' +
str(pickle_city_filename)
+ '\n')
print('Wrote log file to: ' + str(log_path))
# Close log file
log_file.close()
# Visualize city
if show_city: # pragma: no cover
# Plot city district
try:
citvis.plot_city_district(city=city_object,
plot_street=False)
except:
warnings.warn('Could not plot city district.')
return city_object
if __name__ == '__main__':
this_path = os.path.dirname(os.path.abspath(__file__))
# User inputs #########################################################
# Choose generation mode
# ######################################################
# 0 - Use csv/txt input to generate city district
# 1 - Use csv/txt input file to enrich existing city object, based on
# osm call (city object should hold nodes, but no entities. City
# generator is going to add building, apartment and load entities to
# building nodes
generation_mode = 0
# Generate environment
# ######################################################
year_timer = 2017
year_co2 = 2017
timestep = 3600 # Timestep in seconds
# location = (51.529086, 6.944689) # (latitude, longitude) of Bottrop
location = (50.775346, 6.083887) # (latitude, longitude) of Aachen
altitude = 266 # Altitude of location in m (Aachen)
# Weather path
try_path = None
# If None, used default TRY (region 5, 2010)
new_try = False
# new_try has to be set to True, if you want to use TRY data of 2017
# or newer! Else: new_try = False
# Space heating load generation
# ######################################################
# Thermal generation method
# 1 - SLP (standardized load profile)
# 2 - Load and rescale Modelica simulation profile
# (generated with TRY region 12, 2010)
# 3 - VDI 6007 calculation (requires el_gen_method = 2)
th_gen_method = 3
# For non-residential buildings, SLPs are generated automatically.
# Manipulate thermal slp to fit to space heating demand?
slp_manipulate = False
# True - Do manipulation
# False - Use original profile
# Only relevant, if th_gen_method == 1
# Sets thermal power to zero in time spaces, where average daily outdoor
# temperature is equal to or larger than 12 °C. Rescales profile to
# original demand value.
# Manipulate vdi space heating load to be normalized to given annual net
# space heating demand in kWh
vdi_sh_manipulate = False
# Electrical load generation
# ######################################################
# Choose electric load profile generation method (1 - SLP; 2 - Stochastic)
# Stochastic profile is only generated for residential buildings,
# which have a defined number of occupants (otherwise, SLP is used)
el_gen_method = 2
# If user defindes method_3_nb or method_4_nb within input file
# (only valid for non-residential buildings), SLP will not be used.
# Instead, corresponding profile will be loaded (based on measurement
# data, see ElectricalDemand.py within pycity)
# Do normalization of el. load profile
# (only relevant for el_gen_method=2).
# Rescales el. load profile to expected annual el. demand value in kWh
do_normalization = True
# Randomize electrical demand value (residential buildings, only)
el_random = True
# Prevent usage of electrical heating and hot water devices in
# electrical load generation (only relevant if el_gen_method == 2)
prev_heat_dev = True
# True: Prevent electrical heating device usage for profile generation
# False: Include electrical heating devices in electrical load generation
# Use cosine function to increase winter lighting usage and reduce
# summer lighting usage in richadson el. load profiles
# season_mod is factor, which is used to rescale cosine wave with
# lighting power reference (max. lighting power)
season_mod = 0.3
# If None, do not use cosine wave to estimate seasonal influence
# Else: Define float
# (only relevant if el_gen_method == 2)
# Hot water profile generation
# ######################################################
# Generate DHW profiles? (True/False)
use_dhw = True # Only relevant for residential buildings
# DHW generation method? (1 - Annex 42; 2 - Stochastic profiles)
# Choice of Anex 42 profiles NOT recommended for multiple builings,
# as profile stays the same and only changes scaling.
# Stochastic profiles require defined nb of occupants per residential
# building
dhw_method = 2 # Only relevant for residential buildings
# Define dhw volume per person and day (use_dhw=True)
dhw_volumen = None # Only relevant for residential buildings
# Randomize choosen dhw_volume reference value by selecting new value
dhw_random = True
# Input file names and pathes
# ######################################################
# Define input data filename
filename = 'city_3_buildings.txt'
# filename = 'city_clust_simple.txt'
# filename = 'aachen_forsterlinde_mod_6.txt'
# filename = 'aachen_frankenberg_mod_6.txt'
# filename = 'aachen_huenefeld_mod_6.txt'
# filename = 'aachen_kronenberg_mod_8.txt'
# filename = 'aachen_preusweg_mod_8.txt'
# filename = 'aachen_tuerme_mod_6.txt'
# Output filename
pickle_city_filename = filename[:-4] + '.pkl'
# For generation_mode == 1:
# city_osm_input = None
# city_osm_input = 'aachen_forsterlinde_mod_7.pkl'
city_osm_input = 'aachen_frankenberg_mod_7.pkl'
# city_osm_input = 'aachen_huenefeld_mod_7.pkl'
# city_osm_input = 'aachen_kronenberg_mod_7.pkl'
# city_osm_input = 'aachen_preusweg_mod_7.pkl'
# city_osm_input = 'aachen_tuerme_mod_7.pkl'
# Pickle and dump city object instance?
do_save = True
# Path to save city object instance to
path_save_city = None
# If None, uses .../output/...
# Efficiency factor of thermal energy systems
# Used to convert input values (final energy demand) to net energy demand
eff_factor = 1
# For VDI 6007 simulation (th_gen_method == 3)
# #####################################
t_set_heat = 20 # Heating set temperature in degree Celsius
t_set_night = 16 # Night set back temperature in degree Celsius
t_set_cool = 70 # Cooling set temperature in degree Celsius
# Air exchange rate (required for th_gen_method = 3 (VDI 6007 sim.))
air_vent_mode = 2
# int; Define mode for air ventilation rate generation
# 0 : Use constant value (vent_factor in 1/h)
# 1 : Use deterministic, temperature-dependent profile
# 2 : Use stochastic, user-dependent profile
# False: Use static ventilation rate value
vent_factor = 0.3 # Constant. ventilation rate
# (only used, if air_vent_mode is 0. Otherwise, estimate vent_factor
# based on last year of modernization)
# TEASER typebuilding generation
# ######################################################
# Use TEASER to generate typebuildings?
call_teaser = False
teaser_proj_name = filename[:-4]
# Requires additional attributes (such as nb_of_floors, net_floor_area..)
merge_windows = False
# merge_windows : bool, optional
# Defines TEASER project setting for merge_windows_calc
# (default: False). If set to False, merge_windows_calc is set to False.
# If True, Windows are merged into wall resistances.
txt_path = os.path.join(this_path, 'input', filename)
if generation_mode == 1:
path_city_osm_in = os.path.join(this_path, 'input', city_osm_input)
# Path for log file
log_f_name = log_file_name = str('log_' + filename)
log_f_path = os.path.join(this_path, 'output', log_file_name)
# End of user inputs ################################################
print('Run city generator for ', filename)
assert generation_mode in [0, 1]
if generation_mode == 1:
assert city_osm_input is not None
if air_vent_mode == 1 or air_vent_mode == 2:
assert el_gen_method == 2, 'air_vent_mode 1 and 2 require occupancy' \
' profiles!'
# Load district_data file
district_data = get_district_data_from_txt(txt_path)
if generation_mode == 1:
# Load city input file
city_osm = pickle.load(open(path_city_osm_in, mode='rb'))
else:
# Dummy value
city_osm = None
# Generate city district
city = run_city_generator(generation_mode=generation_mode,
timestep=timestep,
year_timer=year_timer,
year_co2=year_co2,
location=location,
th_gen_method=th_gen_method,
el_gen_method=el_gen_method, use_dhw=use_dhw,
dhw_method=dhw_method,
district_data=district_data,
pickle_city_filename=pickle_city_filename,
eff_factor=eff_factor, show_city=True,
try_path=try_path, altitude=altitude,
dhw_volumen=dhw_volumen,
do_normalization=do_normalization,
slp_manipulate=slp_manipulate,
call_teaser=call_teaser,
teaser_proj_name=teaser_proj_name,
air_vent_mode=air_vent_mode,
vent_factor=vent_factor,
t_set_heat=t_set_heat,
t_set_cool=t_set_cool,
t_night=t_set_night,
vdi_sh_manipulate=vdi_sh_manipulate,
city_osm=city_osm, el_random=el_random,
dhw_random=dhw_random,
prev_heat_dev=prev_heat_dev,
log_path=log_f_path,
season_mod=season_mod,
merge_windows=merge_windows,
new_try=new_try,
path_save_city=path_save_city,
do_save=do_save)
| [
"pycity_calc.environments.environment.EnvironmentExtended",
"pycity_calc.toolbox.teaser_usage.teaser_use.create_teaser_typecity",
"pycity_base.classes.demand.Occupancy.Occupancy",
"pycity_calc.toolbox.mc_helpers.user.user_unc_sampling.calc_sampling_dhw_per_apartment",
"pycity_calc.toolbox.modifiers.slp_th_m... | [((2465, 2541), 'os.path.join', 'os.path.join', (['src_path', '"""data"""', '"""BaseData"""', '"""Specific_Demand_Data"""', 'filename'], {}), "(src_path, 'data', 'BaseData', 'Specific_Demand_Data', filename)\n", (2477, 2541), False, 'import os\n'), ((2592, 2653), 'numpy.genfromtxt', 'np.genfromtxt', (['input_data_path'], {'delimiter': '"""\t"""', 'skip_header': '(1)'}), "(input_data_path, delimiter='\\t', skip_header=1)\n", (2605, 2653), True, 'import numpy as np\n'), ((13423, 13477), 'pycity_calc.environments.timer.TimerExtended', 'time.TimerExtended', ([], {'timestep': 'timestep', 'year': 'year_timer'}), '(timestep=timestep, year=year_timer)\n', (13441, 13477), True, 'import pycity_calc.environments.timer as time\n'), ((13493, 13603), 'pycity_base.classes.Weather.Weather', 'weath.Weather', (['timer'], {'useTRY': '(True)', 'pathTRY': 'try_path', 'location': 'location', 'altitude': 'altitude', 'new_try': 'new_try'}), '(timer, useTRY=True, pathTRY=try_path, location=location,\n altitude=altitude, new_try=new_try)\n', (13506, 13603), True, 'import pycity_base.classes.Weather as weath\n'), ((13670, 13697), 'pycity_calc.environments.germanmarket.GermanMarket', 'germanmarket.GermanMarket', ([], {}), '()\n', (13695, 13697), True, 'import pycity_calc.environments.germanmarket as germanmarket\n'), ((13710, 13738), 'pycity_calc.environments.co2emissions.Emissions', 'co2.Emissions', ([], {'year': 'year_co2'}), '(year=year_co2)\n', (13723, 13738), True, 'import pycity_calc.environments.co2emissions as co2\n'), ((13758, 13862), 'pycity_calc.environments.environment.EnvironmentExtended', 'env.EnvironmentExtended', ([], {'timer': 'timer', 'weather': 'weather', 'prices': 'market', 'location': 'location', 'co2em': 'co2em'}), '(timer=timer, weather=weather, prices=market,\n location=location, co2em=co2em)\n', (13781, 13862), True, 'import pycity_calc.environments.environment as env\n'), ((28493, 28589), 'pycity_base.classes.demand.Apartment.Apartment', 'Apartment.Apartment', (['environment'], {'occupancy': 'occupancy_object', 'net_floor_area': 'net_floor_area'}), '(environment, occupancy=occupancy_object, net_floor_area\n =net_floor_area)\n', (28512, 28589), True, 'import pycity_base.classes.demand.Apartment as Apartment\n'), ((29238, 29679), 'pycity_calc.buildings.building.BuildingExtended', 'build_ex.BuildingExtended', (['environment'], {'build_year': 'build_year', 'mod_year': 'mod_year', 'build_type': 'build_type', 'roof_usabl_pv_area': 'pv_use_area', 'net_floor_area': 'net_floor_area', 'height_of_floors': 'height_of_floors', 'nb_of_floors': 'nb_of_floors', 'neighbour_buildings': 'neighbour_buildings', 'residential_layout': 'residential_layout', 'attic': 'attic', 'cellar': 'cellar', 'construction_type': 'construction_type', 'dormer': 'dormer', 'with_ahu': 'curr_central_ahu'}), '(environment, build_year=build_year, mod_year=\n mod_year, build_type=build_type, roof_usabl_pv_area=pv_use_area,\n net_floor_area=net_floor_area, height_of_floors=height_of_floors,\n nb_of_floors=nb_of_floors, neighbour_buildings=neighbour_buildings,\n residential_layout=residential_layout, attic=attic, cellar=cellar,\n construction_type=construction_type, dormer=dormer, with_ahu=\n curr_central_ahu)\n', (29263, 29679), True, 'import pycity_calc.buildings.building as build_ex\n'), ((38773, 39214), 'pycity_calc.buildings.building.BuildingExtended', 'build_ex.BuildingExtended', (['environment'], {'build_year': 'build_year', 'mod_year': 'mod_year', 'build_type': 'build_type', 'roof_usabl_pv_area': 'pv_use_area', 'net_floor_area': 'net_floor_area', 'height_of_floors': 'height_of_floors', 'nb_of_floors': 'nb_of_floors', 'neighbour_buildings': 'neighbour_buildings', 'residential_layout': 'residential_layout', 'attic': 'attic', 'cellar': 'cellar', 'construction_type': 'construction_type', 'dormer': 'dormer', 'with_ahu': 'curr_central_ahu'}), '(environment, build_year=build_year, mod_year=\n mod_year, build_type=build_type, roof_usabl_pv_area=pv_use_area,\n net_floor_area=net_floor_area, height_of_floors=height_of_floors,\n nb_of_floors=nb_of_floors, neighbour_buildings=neighbour_buildings,\n residential_layout=residential_layout, attic=attic, cellar=cellar,\n construction_type=construction_type, dormer=dormer, with_ahu=\n curr_central_ahu)\n', (38798, 39214), True, 'import pycity_calc.buildings.building as build_ex\n'), ((53349, 53485), 'pycity_base.classes.demand.SpaceHeating.SpaceHeating', 'SpaceHeating.SpaceHeating', (['environment'], {'method': '(1)', 'profile_type': 'th_slp_type', 'livingArea': 'net_floor_area', 'specificDemand': 'spec_th_demand'}), '(environment, method=1, profile_type=th_slp_type,\n livingArea=net_floor_area, specificDemand=spec_th_demand)\n', (53374, 53485), True, 'import pycity_base.classes.demand.SpaceHeating as SpaceHeating\n'), ((54937, 54969), 'pycity_base.classes.demand.Apartment.Apartment', 'Apartment.Apartment', (['environment'], {}), '(environment)\n', (54956, 54969), True, 'import pycity_base.classes.demand.Apartment as Apartment\n'), ((55136, 55376), 'pycity_calc.buildings.building.BuildingExtended', 'build_ex.BuildingExtended', (['environment'], {'net_floor_area': 'net_floor_area', 'build_year': 'build_year', 'mod_year': 'mod_year', 'build_type': 'build_type', 'roof_usabl_pv_area': 'pv_use_area', 'height_of_floors': 'height_of_floors', 'nb_of_floors': 'nb_of_floors'}), '(environment, net_floor_area=net_floor_area,\n build_year=build_year, mod_year=mod_year, build_type=build_type,\n roof_usabl_pv_area=pv_use_area, height_of_floors=height_of_floors,\n nb_of_floors=nb_of_floors)\n', (55161, 55376), True, 'import pycity_calc.buildings.building as build_ex\n'), ((58033, 58088), 'numpy.genfromtxt', 'np.genfromtxt', (['path'], {'delimiter': 'delimiter', 'skip_header': '(1)'}), '(path, delimiter=delimiter, skip_header=1)\n', (58046, 58088), True, 'import numpy as np\n'), ((127597, 127639), 'os.path.join', 'os.path.join', (['this_path', '"""input"""', 'filename'], {}), "(this_path, 'input', filename)\n", (127609, 127639), False, 'import os\n'), ((127843, 127891), 'os.path.join', 'os.path.join', (['this_path', '"""output"""', 'log_file_name'], {}), "(this_path, 'output', log_file_name)\n", (127855, 127891), False, 'import os\n'), ((1823, 1841), 'warnings.warn', 'warnings.warn', (['msg'], {}), '(msg)\n', (1836, 1841), False, 'import warnings\n'), ((3535, 3553), 'warnings.warn', 'warnings.warn', (['msg'], {}), '(msg)\n', (3548, 3553), False, 'import warnings\n'), ((5029, 5047), 'warnings.warn', 'warnings.warn', (['msg'], {}), '(msg)\n', (5042, 5047), False, 'import warnings\n'), ((5938, 5956), 'warnings.warn', 'warnings.warn', (['msg'], {}), '(msg)\n', (5951, 5956), False, 'import warnings\n'), ((6571, 6589), 'warnings.warn', 'warnings.warn', (['msg'], {}), '(msg)\n', (6584, 6589), False, 'import warnings\n'), ((7168, 7186), 'warnings.warn', 'warnings.warn', (['msg'], {}), '(msg)\n', (7181, 7186), False, 'import warnings\n'), ((22535, 22671), 'pycity_base.classes.demand.SpaceHeating.SpaceHeating', 'SpaceHeating.SpaceHeating', (['environment'], {'method': '(1)', 'profile_type': 'th_slp_type', 'livingArea': 'net_floor_area', 'specificDemand': 'spec_th_demand'}), '(environment, method=1, profile_type=th_slp_type,\n livingArea=net_floor_area, specificDemand=spec_th_demand)\n', (22560, 22671), True, 'import pycity_base.classes.demand.SpaceHeating as SpaceHeating\n'), ((24491, 24843), 'pycity_base.classes.demand.ElectricalDemand.ElectricalDemand', 'ElectricalDemand.ElectricalDemand', (['environment'], {'method': '(2)', 'total_nb_occupants': 'number_occupants', 'randomizeAppliances': '(True)', 'lightConfiguration': '(0)', 'annualDemand': 'annual_el_demand', 'occupancy': 'occupancy_object.occupancy', 'do_normalization': 'do_normalization', 'prev_heat_dev': 'prev_heat_dev', 'season_light_mod': 'season_light_mod', 'light_mod_fac': 'season_mod'}), '(environment, method=2, total_nb_occupants\n =number_occupants, randomizeAppliances=True, lightConfiguration=0,\n annualDemand=annual_el_demand, occupancy=occupancy_object.occupancy,\n do_normalization=do_normalization, prev_heat_dev=prev_heat_dev,\n season_light_mod=season_light_mod, light_mod_fac=season_mod)\n', (24524, 24843), True, 'import pycity_base.classes.demand.ElectricalDemand as ElectricalDemand\n'), ((25468, 25585), 'pycity_base.classes.demand.ElectricalDemand.ElectricalDemand', 'ElectricalDemand.ElectricalDemand', (['environment'], {'method': '(1)', 'annualDemand': 'annual_el_demand', 'profileType': 'el_slp_type'}), '(environment, method=1, annualDemand=\n annual_el_demand, profileType=el_slp_type)\n', (25501, 25585), True, 'import pycity_base.classes.demand.ElectricalDemand as ElectricalDemand\n'), ((38563, 38581), 'warnings.warn', 'warnings.warn', (['msg'], {}), '(msg)\n', (38576, 38581), False, 'import warnings\n'), ((48543, 48639), 'pycity_base.classes.demand.Apartment.Apartment', 'Apartment.Apartment', (['environment'], {'occupancy': 'occupancy_object', 'net_floor_area': 'apartment_area'}), '(environment, occupancy=occupancy_object, net_floor_area\n =apartment_area)\n', (48562, 48639), True, 'import pycity_base.classes.demand.Apartment as Apartment\n'), ((53752, 53896), 'pycity_base.classes.demand.ElectricalDemand.ElectricalDemand', 'ElectricalDemand.ElectricalDemand', (['environment'], {'method': '(3)', 'annualDemand': 'annual_el_demand', 'do_normalization': '(True)', 'method_3_type': 'method_3_type'}), '(environment, method=3, annualDemand=\n annual_el_demand, do_normalization=True, method_3_type=method_3_type)\n', (53785, 53896), True, 'import pycity_base.classes.demand.ElectricalDemand as ElectricalDemand\n'), ((58165, 58188), 'numpy.isnan', 'np.isnan', (['district_data'], {}), '(district_data)\n', (58173, 58188), True, 'import numpy as np\n'), ((74097, 74115), 'warnings.warn', 'warnings.warn', (['msg'], {}), '(msg)\n', (74110, 74115), False, 'import warnings\n'), ((120436, 120461), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (120451, 120461), False, 'import os\n'), ((127696, 127744), 'os.path.join', 'os.path.join', (['this_path', '"""input"""', 'city_osm_input'], {}), "(this_path, 'input', city_osm_input)\n", (127708, 127744), False, 'import os\n'), ((20962, 21025), 'pycity_base.classes.demand.Occupancy.Occupancy', 'occup.Occupancy', (['environment'], {'number_occupants': 'number_occupants'}), '(environment, number_occupants=number_occupants)\n', (20977, 21025), True, 'import pycity_base.classes.demand.Occupancy as occup\n'), ((21247, 21333), 'pycity_base.classes.demand.Occupancy.Occupancy', 'occup.Occupancy', (['environment'], {'number_occupants': 'number_occupants', 'do_profile': '(False)'}), '(environment, number_occupants=number_occupants, do_profile=\n False)\n', (21262, 21333), True, 'import pycity_base.classes.demand.Occupancy as occup\n'), ((21575, 21705), 'warnings.warn', 'warnings.warn', (["('Stochastic el. profile cannot be generated ' +\n 'due to missing number of occupants. ' + 'SLP is used instead.')"], {}), "('Stochastic el. profile cannot be generated ' +\n 'due to missing number of occupants. ' + 'SLP is used instead.')\n", (21588, 21705), False, 'import warnings\n'), ((23089, 23192), 'pycity_calc.toolbox.modifiers.slp_th_manipulator.slp_th_manipulator', 'slpman.slp_th_manipulator', (['timestep'], {'th_slp_curve': 'heat_power_curve.loadcurve', 'temp_array': 'temp_array'}), '(timestep, th_slp_curve=heat_power_curve.loadcurve,\n temp_array=temp_array)\n', (23114, 23192), True, 'import pycity_calc.toolbox.modifiers.slp_th_manipulator as slpman\n'), ((23421, 23531), 'pycity_base.classes.demand.SpaceHeating.SpaceHeating', 'SpaceHeating.SpaceHeating', (['environment'], {'method': '(3)', 'livingArea': 'net_floor_area', 'specificDemand': 'spec_th_demand'}), '(environment, method=3, livingArea=net_floor_area,\n specificDemand=spec_th_demand)\n', (23446, 23531), True, 'import pycity_base.classes.demand.SpaceHeating as SpaceHeating\n'), ((26339, 26498), 'pycity_base.classes.demand.DomesticHotWater.DomesticHotWater', 'DomesticHotWater.DomesticHotWater', (['environment'], {'tFlow': '(60)', 'thermal': '(True)', 'method': '(1)', 'dailyConsumption': '(dhw_volumen * number_occupants)', 'supplyTemperature': '(25)'}), '(environment, tFlow=60, thermal=True,\n method=1, dailyConsumption=dhw_volumen * number_occupants,\n supplyTemperature=25)\n', (26372, 26498), True, 'import pycity_base.classes.demand.DomesticHotWater as DomesticHotWater\n'), ((26953, 27097), 'pycity_base.classes.demand.DomesticHotWater.DomesticHotWater', 'DomesticHotWater.DomesticHotWater', (['environment'], {'tFlow': '(60)', 'thermal': '(True)', 'method': '(2)', 'supplyTemperature': '(25)', 'occupancy': 'occupancy_object.occupancy'}), '(environment, tFlow=60, thermal=True,\n method=2, supplyTemperature=25, occupancy=occupancy_object.occupancy)\n', (26986, 27097), True, 'import pycity_base.classes.demand.DomesticHotWater as DomesticHotWater\n'), ((42319, 42364), 'pycity_calc.toolbox.mc_helpers.user.user_unc_sampling.calc_sampling_occ_per_app', 'usunc.calc_sampling_occ_per_app', ([], {'nb_samples': '(1)'}), '(nb_samples=1)\n', (42350, 42364), True, 'import pycity_calc.toolbox.mc_helpers.user.user_unc_sampling as usunc\n'), ((42655, 42791), 'pycity_base.classes.demand.SpaceHeating.SpaceHeating', 'SpaceHeating.SpaceHeating', (['environment'], {'method': '(1)', 'profile_type': 'th_slp_type', 'livingArea': 'apartment_area', 'specificDemand': 'spec_th_demand'}), '(environment, method=1, profile_type=th_slp_type,\n livingArea=apartment_area, specificDemand=spec_th_demand)\n', (42680, 42791), True, 'import pycity_base.classes.demand.SpaceHeating as SpaceHeating\n'), ((44737, 45097), 'pycity_base.classes.demand.ElectricalDemand.ElectricalDemand', 'ElectricalDemand.ElectricalDemand', (['environment'], {'method': '(2)', 'total_nb_occupants': 'curr_number_occupants', 'randomizeAppliances': '(True)', 'lightConfiguration': '(0)', 'annualDemand': 'annual_el_demand_ap', 'occupancy': 'occupancy_object.occupancy', 'do_normalization': 'do_normalization', 'prev_heat_dev': 'prev_heat_dev', 'season_light_mod': 'season_light_mod', 'light_mod_fac': 'season_mod'}), '(environment, method=2, total_nb_occupants\n =curr_number_occupants, randomizeAppliances=True, lightConfiguration=0,\n annualDemand=annual_el_demand_ap, occupancy=occupancy_object.occupancy,\n do_normalization=do_normalization, prev_heat_dev=prev_heat_dev,\n season_light_mod=season_light_mod, light_mod_fac=season_mod)\n', (44770, 45097), True, 'import pycity_base.classes.demand.ElectricalDemand as ElectricalDemand\n'), ((45769, 45889), 'pycity_base.classes.demand.ElectricalDemand.ElectricalDemand', 'ElectricalDemand.ElectricalDemand', (['environment'], {'method': '(1)', 'annualDemand': 'annual_el_demand_ap', 'profileType': 'el_slp_type'}), '(environment, method=1, annualDemand=\n annual_el_demand_ap, profileType=el_slp_type)\n', (45802, 45889), True, 'import pycity_base.classes.demand.ElectricalDemand as ElectricalDemand\n'), ((54152, 54296), 'pycity_base.classes.demand.ElectricalDemand.ElectricalDemand', 'ElectricalDemand.ElectricalDemand', (['environment'], {'method': '(4)', 'annualDemand': 'annual_el_demand', 'do_normalization': '(True)', 'method_4_type': 'method_4_type'}), '(environment, method=4, annualDemand=\n annual_el_demand, do_normalization=True, method_4_type=method_4_type)\n', (54185, 54296), True, 'import pycity_base.classes.demand.ElectricalDemand as ElectricalDemand\n'), ((54646, 54763), 'pycity_base.classes.demand.ElectricalDemand.ElectricalDemand', 'ElectricalDemand.ElectricalDemand', (['environment'], {'method': '(1)', 'annualDemand': 'annual_el_demand', 'profileType': 'el_slp_type'}), '(environment, method=1, annualDemand=\n annual_el_demand, profileType=el_slp_type)\n', (54679, 54763), True, 'import pycity_base.classes.demand.ElectricalDemand as ElectricalDemand\n'), ((59165, 59256), 'pycity_calc.toolbox.mc_helpers.user.user_unc_sampling.calc_sampling_el_demand_per_apartment', 'usunc.calc_sampling_el_demand_per_apartment', ([], {'nb_samples': '(1)', 'nb_persons': 'nb_occ', 'type': 'type'}), '(nb_samples=1, nb_persons=nb_occ,\n type=type)\n', (59208, 59256), True, 'import pycity_calc.toolbox.mc_helpers.user.user_unc_sampling as usunc\n'), ((61186, 61273), 'pycity_calc.toolbox.mc_helpers.user.user_unc_sampling.calc_sampling_dhw_per_apartment', 'usunc.calc_sampling_dhw_per_apartment', ([], {'nb_samples': '(1)', 'nb_persons': 'nb_occ', 'b_type': 'type'}), '(nb_samples=1, nb_persons=nb_occ,\n b_type=type)\n', (61223, 61273), True, 'import pycity_calc.toolbox.mc_helpers.user.user_unc_sampling as usunc\n'), ((74447, 74500), 'os.path.join', 'os.path.join', (['this_path', '"""output"""', '"""city_gen_log.txt"""'], {}), "(this_path, 'output', 'city_gen_log.txt')\n", (74459, 74500), False, 'import os\n'), ((80450, 80484), 'pycity_calc.cities.city.City', 'city.City', ([], {'environment': 'environment'}), '(environment=environment)\n', (80459, 80484), True, 'import pycity_calc.cities.city as city\n'), ((111236, 111263), 'shapely.geometry.point.Point', 'point.Point', (['curr_x', 'curr_y'], {}), '(curr_x, curr_y)\n', (111247, 111263), True, 'import shapely.geometry.point as point\n'), ((113182, 113445), 'pycity_calc.toolbox.teaser_usage.teaser_use.calc_and_add_vdi_6007_loads_to_city', 'tusage.calc_and_add_vdi_6007_loads_to_city', ([], {'city': 'city_object', 'air_vent_mode': 'air_vent_mode', 'vent_factor': 'vent_factor', 't_set_heat': 't_set_heat', 't_set_cool': 't_set_cool', 't_night': 't_night', 'alpha_rad': 'None', 'project_name': 'project_name', 'requ_profiles': 'requ_profiles'}), '(city=city_object, air_vent_mode=\n air_vent_mode, vent_factor=vent_factor, t_set_heat=t_set_heat,\n t_set_cool=t_set_cool, t_night=t_night, alpha_rad=None, project_name=\n project_name, requ_profiles=requ_profiles)\n', (113224, 113445), True, 'import pycity_calc.toolbox.teaser_usage.teaser_use as tusage\n'), ((118374, 118459), 'pycity_calc.toolbox.teaser_usage.teaser_use.create_teaser_project', 'tusage.create_teaser_project', ([], {'name': 'teaser_proj_name', 'merge_windows': 'merge_windows'}), '(name=teaser_proj_name, merge_windows=merge_windows\n )\n', (118402, 118459), True, 'import pycity_calc.toolbox.teaser_usage.teaser_use as tusage\n'), ((118573, 118664), 'pycity_calc.toolbox.teaser_usage.teaser_use.create_teaser_typecity', 'tusage.create_teaser_typecity', ([], {'project': 'project', 'city': 'city_object', 'generate_Output': '(False)'}), '(project=project, city=city_object,\n generate_Output=False)\n', (118602, 118664), True, 'import pycity_calc.toolbox.teaser_usage.teaser_use as tusage\n'), ((40675, 40743), 'pycity_base.classes.demand.Occupancy.Occupancy', 'occup.Occupancy', (['environment'], {'number_occupants': 'curr_number_occupants'}), '(environment, number_occupants=curr_number_occupants)\n', (40690, 40743), True, 'import pycity_base.classes.demand.Occupancy as occup\n'), ((40946, 41036), 'pycity_base.classes.demand.Occupancy.Occupancy', 'occup.Occupancy', (['environment'], {'number_occupants': 'curr_number_occupants', 'do_profile': '(False)'}), '(environment, number_occupants=curr_number_occupants,\n do_profile=False)\n', (40961, 41036), True, 'import pycity_base.classes.demand.Occupancy as occup\n'), ((41252, 41382), 'warnings.warn', 'warnings.warn', (["('Stochastic el. profile cannot be generated ' +\n 'due to missing number of occupants. ' + 'SLP is used instead.')"], {}), "('Stochastic el. profile cannot be generated ' +\n 'due to missing number of occupants. ' + 'SLP is used instead.')\n", (41265, 41382), False, 'import warnings\n'), ((43185, 43288), 'pycity_calc.toolbox.modifiers.slp_th_manipulator.slp_th_manipulator', 'slpman.slp_th_manipulator', (['timestep'], {'th_slp_curve': 'heat_power_curve.loadcurve', 'temp_array': 'temp_array'}), '(timestep, th_slp_curve=heat_power_curve.loadcurve,\n temp_array=temp_array)\n', (43210, 43288), True, 'import pycity_calc.toolbox.modifiers.slp_th_manipulator as slpman\n'), ((43541, 43651), 'pycity_base.classes.demand.SpaceHeating.SpaceHeating', 'SpaceHeating.SpaceHeating', (['environment'], {'method': '(3)', 'livingArea': 'apartment_area', 'specificDemand': 'spec_th_demand'}), '(environment, method=3, livingArea=apartment_area,\n specificDemand=spec_th_demand)\n', (43566, 43651), True, 'import pycity_base.classes.demand.SpaceHeating as SpaceHeating\n'), ((46734, 46898), 'pycity_base.classes.demand.DomesticHotWater.DomesticHotWater', 'DomesticHotWater.DomesticHotWater', (['environment'], {'tFlow': '(60)', 'thermal': '(True)', 'method': '(1)', 'dailyConsumption': '(dhw_volumen * curr_number_occupants)', 'supplyTemperature': '(25)'}), '(environment, tFlow=60, thermal=True,\n method=1, dailyConsumption=dhw_volumen * curr_number_occupants,\n supplyTemperature=25)\n', (46767, 46898), True, 'import pycity_base.classes.demand.DomesticHotWater as DomesticHotWater\n'), ((47118, 47262), 'pycity_base.classes.demand.DomesticHotWater.DomesticHotWater', 'DomesticHotWater.DomesticHotWater', (['environment'], {'tFlow': '(60)', 'thermal': '(True)', 'method': '(2)', 'supplyTemperature': '(25)', 'occupancy': 'occupancy_object.occupancy'}), '(environment, tFlow=60, thermal=True,\n method=2, supplyTemperature=25, occupancy=occupancy_object.occupancy)\n', (47151, 47262), True, 'import pycity_base.classes.demand.DomesticHotWater as DomesticHotWater\n'), ((74397, 74422), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (74412, 74422), False, 'import os\n'), ((89838, 89856), 'warnings.warn', 'warnings.warn', (['msg'], {}), '(msg)\n', (89851, 89856), False, 'import warnings\n'), ((119241, 119296), 'os.path.join', 'os.path.join', (['this_path', '"""output"""', 'pickle_city_filename'], {}), "(this_path, 'output', pickle_city_filename)\n", (119253, 119296), False, 'import os\n'), ((120163, 120225), 'pycity_calc.visualization.city_visual.plot_city_district', 'citvis.plot_city_district', ([], {'city': 'city_object', 'plot_street': '(False)'}), '(city=city_object, plot_street=False)\n', (120188, 120225), True, 'import pycity_calc.visualization.city_visual as citvis\n'), ((2400, 2425), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (2415, 2425), False, 'import os\n'), ((119181, 119206), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (119196, 119206), False, 'import os\n'), ((119613, 119667), 'warnings.warn', 'warnings.warn', (['"""Could not pickle and save city object"""'], {}), "('Could not pickle and save city object')\n", (119626, 119667), False, 'import warnings\n'), ((120304, 120350), 'warnings.warn', 'warnings.warn', (['"""Could not plot city district."""'], {}), "('Could not plot city district.')\n", (120317, 120350), False, 'import warnings\n'), ((74728, 74751), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (74749, 74751), False, 'import datetime\n')] |
# <NAME>, March 2020
# Common code for PyTorch implementation of Copy-Pasting GAN
import copy
import itertools
import matplotlib.pyplot as plt
import numpy as np
import os, platform, time
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
from PIL import Image, ImageDraw
from torch.utils.data import Dataset
from tqdm import tqdm
def read_image_robust(img_path, monochromatic=False):
''' Returns an image that meets conditions along with a success flag, in order to avoid crashing. '''
try:
# image = plt.imread(img_path).copy()
image = np.array(Image.open(img_path)).copy() # always uint8
success = True
if np.any(np.array(image.strides) < 0):
success = False # still negative stride
elif not(monochromatic) and (image.ndim != 3 or image.shape[2] != 3):
success = False # not RGB
elif monochromatic:
# width, height = image.shape[1], image.shape[0]
# image = np.broadcast_to(x[:, :, np.newaxis], (height, width, 3))
image = image[:, :, np.newaxis] # one channel <=> only one ground truth
except IOError:
# Probably corrupt file
image = None
success = False
return image, success
def paint_squares(image, noisy=False, channels=10):
'''
Paints one or more squares at random locations to create an artificial foreground image.
Generates multiple associated ground truth masks; one per object.
'''
width, height = image.shape[1], image.shape[0]
image = image.copy() # do not overwrite background
object_count = np.random.randint(1, 5) # [1, 4] inclusive
masks = np.zeros((height, width, channels), dtype=np.uint8)
for i in range(object_count):
sq_w, sq_h = 9, 9
x1 = np.random.randint(0, width - sq_w + 1)
y1 = np.random.randint(0, height - sq_h + 1)
x2 = x1 + sq_w
y2 = y1 + sq_h
masks[y1:y2, x1:x2, i] = 255
if not(noisy):
# Pick one fixed (not necessarily saturated) color for the whole square
clr = np.random.randint(0, 256, 3)
image[y1:y2, x1:x2] = clr
else:
# Pick a random fully saturated (extremal) color for every pixel
image[y1:y2, x1:x2] = np.random.choice([0, 255], (sq_h, sq_w, 3))
return image, masks, object_count
def create_random_gfake_mask(width, height):
''' See Appendix D. '''
x0, y0 = np.random.rand(2) * 0.8 + 0.1
num_verts = np.random.randint(4, 7)
# TODO possible improvement: allow up to more vertices?
# TODO possible improvement: encourage convex (currently many "sharp" objects)
radii = np.random.rand(num_verts) * 0.4 + 0.1
# radii = np.random.rand(num_verts) * 0.8 + 0.2 # TODO: not very clear from paper
angles = np.sort(np.random.rand(num_verts)) * 2.0 * np.pi
poly_polar = list(zip(radii, angles))
poly_cart = [(int(width * (x0 + r * np.cos(a)) / 1),
int(height * (y0 + r * np.sin(a)) / 1)) for (r, a) in poly_polar]
# poly_cart = [(x1, y1), (x2, y2), ...]
img = Image.new('L', (width, height), 0)
ImageDraw.Draw(img).polygon(poly_cart, outline=1, fill=255)
mask = np.array(img, dtype='uint8')
assert(mask.shape == (height, width))
return mask
def apply_border_zero(masks):
ndim = len(masks.shape)
if ndim == 2:
masks[0, :] = 0
masks[-1, :] = 0
masks[:, 0] = 0
masks[:, -1] = 0
elif ndim == 3:
masks[:, 0, :] = 0
masks[:, -1, :] = 0
masks[:, :, 0] = 0
masks[:, :, -1] = 0
elif ndim == 4:
masks[:, :, 0, :] = 0
masks[:, :, -1, :] = 0
masks[:, :, :, 0] = 0
masks[:, :, :, -1] = 0
else:
raise Exception('Mask has too many dimensions')
return masks
def copy_paste(fores, masks, backs, border_zero=True):
# TODO possible improvement: poisson blending
# if hard_thres > 0:
# used_masks = (masks > hard_thres).float() # force binary
# else:
used_masks = masks.clone()
# Border zeroing implemented in April 2020
if border_zero:
used_masks = apply_border_zero(used_masks)
return used_masks * fores + (1.0 - used_masks) * backs
class MyCopyPasteDataset(Dataset):
'''
Custom dataset class with foreground, background, and optional mask folders as image sources.
Only one object may appear per image, since the object count is not kept track of.
Returns irrelevant foreground anti-shortcuts as well. Enforces color (RGB) images.
'''
def __init__(self, fore_dir, back_dir, mask_dir=None, rand_horz_flip=True, post_resize=-1, center_crop=False):
self.fore_dir = fore_dir
self.back_dir = back_dir
self.rand_horz_flip = rand_horz_flip
if post_resize <= 0:
self.post_tf = transforms.ToTensor() # converts [0, 255] to [0.0, 1.0]
elif center_crop:
# Resize + square center crop
self.post_tf = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize(post_resize),
transforms.CenterCrop(post_resize),
transforms.ToTensor()
])
else:
# Resize both dimensions, possibly distorting the images
self.post_tf = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize((post_resize, post_resize)),
transforms.ToTensor()
])
self.has_masks = (mask_dir is not None)
# Load all file paths; file names must be the same across all 2 or 3 given directories
# self.all_fore_files = []
# self.all_mask_files = []
# self.all_back_files = []
# for fn in os.listdir(fore_dir):
# fore_fp = os.path.join(fore_dir, fn)
# if os.path.isfile(fore_fp):
# back_fp = os.path.join(back_dir, fn)
# assert(os.path.isfile(back_fp))
# self.all_fore_files.append(fore_fp)
# self.all_back_files.append(back_fp)
# if self.has_masks:
# mask_fp = os.path.join(mask_dir, fn)
# assert(os.path.isfile(mask_fp))
# self.all_mask_files.append(mask_fp)
# Load all file paths; file names must be the same across foreground and segmentation masks
self.all_fore_files = []
self.all_mask_files = []
self.all_back_files = []
for fn in os.listdir(fore_dir):
fore_fp = os.path.join(fore_dir, fn)
self.all_fore_files.append(fore_fp)
if self.has_masks:
mask_fp_jpg = os.path.join(mask_dir, fn[:-4] + '.jpg')
mask_fp_png = os.path.join(mask_dir, fn[:-4] + '.png')
if os.path.isfile(mask_fp_jpg):
self.all_mask_files.append(mask_fp_jpg)
elif os.path.isfile(mask_fp_png):
self.all_mask_files.append(mask_fp_png)
else:
raise Exception('No matching mask file found for ' + fore_fp)
for fn in os.listdir(back_dir):
back_fp = os.path.join(back_dir, fn)
self.all_back_files.append(back_fp)
self.fore_count = len(self.all_fore_files)
self.back_count = len(self.all_back_files)
print('Image file count: ' + str(self.fore_count) + ' foreground, ' + str(self.back_count) + ' background, has masks: ' + str(self.has_masks))
def __len__(self):
return self.fore_count
def __getitem__(self, idx):
# Force randomness (especially if num_workers > 0)
np.random.seed(idx + int((time.time() * 654321) % 123456))
# Read random pair of images from file system
success = False
while not(success):
file_idx = np.random.choice(self.fore_count)
fp = self.all_fore_files[file_idx]
fore, success = read_image_robust(fp)
if not(success):
continue
if self.has_masks:
fp = self.all_mask_files[file_idx]
mask, success = read_image_robust(fp, monochromatic=True)
assert(success) # must match fore
# mask = ((mask > 0) * 255.0).astype('uint8') # convert soft masks to hard
else:
mask = None
# Read random background image
success = False
while not(success):
file_idx2 = np.random.choice(self.back_count)
fp = self.all_back_files[file_idx2]
back, success = read_image_robust(fp)
# Read irrelevant foreground image
success = False
while not(success):
file_idx3 = np.random.choice(self.fore_count)
if file_idx3 == file_idx:
continue # try again, cannot pick same image
fp = self.all_fore_files[file_idx3]
irrel, success = read_image_robust(fp)
# Transform foregrounds (+ masks) and backgrounds
# NOTE: identical random choices must be made for some images
if self.rand_horz_flip:
if np.random.rand() < 0.5:
fore = fore[:, ::-1, :].copy()
if self.has_masks:
mask = mask[:, ::-1, :].copy()
if np.random.rand() < 0.5:
irrel = irrel[:, ::-1, :].copy()
if np.random.rand() < 0.5:
back = back[:, ::-1, :].copy()
fore = self.post_tf(fore)
irrel = self.post_tf(irrel)
back = self.post_tf(back)
if self.has_masks:
mask = self.post_tf(mask)
# Verify sizes
assert(fore.shape[1:] == irrel.shape[1:])
assert(fore.shape[1:] == back.shape[1:])
if self.has_masks:
assert(fore.shape[1:] == mask.shape[1:])
# Create grounded fake mask and composite
width, height = fore.shape[2], fore.shape[1] # fore is (C, H, W)
gfake_mask = self.post_tf(create_random_gfake_mask(width, height))
comp_gfake = copy_paste(fore, gfake_mask, back)
# Construct dictionary; object count is unknown
result = {'fore': fore, 'back': back, 'irrel': irrel, 'object_cnt': 1, 'gfake_mask': gfake_mask, 'comp_gfake': comp_gfake}
if self.has_masks:
result['mask'] = mask # don't set None, otherwise crash
return result
class MySquaresDataset(Dataset):
'''
Custom dataset class with just a collection of background images as source.
One or more artificial objects are painted to create a foreground, keeping track of object count.
Returns irrelevant foreground anti-shortcuts as well. Enforces color (RGB) images.
'''
def __init__(self, back_dir, rand_horz_flip=True, noisy=False, max_objects=10):
self.back_dir = back_dir
self.rand_horz_flip = rand_horz_flip
self.post_tf = transforms.ToTensor() # converts [0, 255] to [0.0, 1.0]
self.noisy = noisy
self.max_objects = max_objects
# Load all file paths; file names must be the same across all 2 or 3 given directories
self.all_back_files = []
for fn in os.listdir(back_dir):
back_fp = os.path.join(back_dir, fn)
self.all_back_files.append(back_fp)
self.file_count = len(self.all_back_files)
print('Image file count: ' + str(self.file_count) + ', noisy: ' + str(self.noisy) + ', max objects: ' + str(self.max_objects))
def __len__(self):
return self.file_count
def __getitem__(self, idx):
# Read a random triplet (relevant + background + irrelevant) of non-overlapping backgrounds from file system
success = False
while not(success):
file_idx = np.random.choice(self.file_count)
fp = self.all_back_files[file_idx]
fore, success = read_image_robust(fp)
success = False
while not(success):
file_idx2 = np.random.choice(self.file_count)
if file_idx2 == file_idx:
continue # try again, cannot pick same image
fp = self.all_back_files[file_idx2]
back, success = read_image_robust(fp)
success = False
while not(success):
file_idx3 = np.random.choice(self.file_count)
if file_idx3 == file_idx or file_idx3 == file_idx2:
continue # try again, cannot pick same image
fp = self.all_back_files[file_idx3]
irrel, success = read_image_robust(fp)
# Create corresponding foregrounds and masks; leave actual background unchanged
fore, masks, object_cnt = paint_squares(fore, noisy=self.noisy, channels=self.max_objects)
irrel, _, _ = paint_squares(irrel, noisy=self.noisy, channels=self.max_objects)
# Transform foregrounds (+ masks) and backgrounds
# NOTE: identical random choices must be made for some images
if self.rand_horz_flip:
if np.random.rand() < 0.5:
fore = fore[:, ::-1, :].copy()
masks = masks[:, ::-1, :].copy()
if np.random.rand() < 0.5:
irrel = irrel[:, ::-1, :].copy()
if np.random.rand() < 0.5:
back = back[:, ::-1, :].copy()
fore = self.post_tf(fore)
masks = self.post_tf(masks)
irrel = self.post_tf(irrel)
back = self.post_tf(back)
# Create grounded fake mask and composite
width, height = fore.shape[2], fore.shape[1] # fore is (C, H, W)
gfake_mask = self.post_tf(create_random_gfake_mask(width, height))
comp_gfake = copy_paste(fore, gfake_mask, back)
# Construct dictionary
result = {'fore': fore, 'back': back, 'irrel': irrel, 'mask': masks, 'object_cnt': object_cnt, 'gfake_mask': gfake_mask, 'comp_gfake': comp_gfake}
return result
| [
"torchvision.transforms.CenterCrop",
"os.listdir",
"PIL.Image.open",
"numpy.random.rand",
"torchvision.transforms.ToPILImage",
"numpy.random.choice",
"PIL.Image.new",
"numpy.sin",
"os.path.join",
"os.path.isfile",
"numpy.array",
"numpy.random.randint",
"numpy.zeros",
"PIL.ImageDraw.Draw",
... | [((1671, 1694), 'numpy.random.randint', 'np.random.randint', (['(1)', '(5)'], {}), '(1, 5)\n', (1688, 1694), True, 'import numpy as np\n'), ((1726, 1777), 'numpy.zeros', 'np.zeros', (['(height, width, channels)'], {'dtype': 'np.uint8'}), '((height, width, channels), dtype=np.uint8)\n', (1734, 1777), True, 'import numpy as np\n'), ((2562, 2585), 'numpy.random.randint', 'np.random.randint', (['(4)', '(7)'], {}), '(4, 7)\n', (2579, 2585), True, 'import numpy as np\n'), ((3164, 3198), 'PIL.Image.new', 'Image.new', (['"""L"""', '(width, height)', '(0)'], {}), "('L', (width, height), 0)\n", (3173, 3198), False, 'from PIL import Image, ImageDraw\n'), ((3274, 3302), 'numpy.array', 'np.array', (['img'], {'dtype': '"""uint8"""'}), "(img, dtype='uint8')\n", (3282, 3302), True, 'import numpy as np\n'), ((1851, 1889), 'numpy.random.randint', 'np.random.randint', (['(0)', '(width - sq_w + 1)'], {}), '(0, width - sq_w + 1)\n', (1868, 1889), True, 'import numpy as np\n'), ((1903, 1942), 'numpy.random.randint', 'np.random.randint', (['(0)', '(height - sq_h + 1)'], {}), '(0, height - sq_h + 1)\n', (1920, 1942), True, 'import numpy as np\n'), ((6599, 6619), 'os.listdir', 'os.listdir', (['fore_dir'], {}), '(fore_dir)\n', (6609, 6619), False, 'import os, platform, time\n'), ((7232, 7252), 'os.listdir', 'os.listdir', (['back_dir'], {}), '(back_dir)\n', (7242, 7252), False, 'import os, platform, time\n'), ((11077, 11098), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (11096, 11098), True, 'import torchvision.transforms as transforms\n'), ((11346, 11366), 'os.listdir', 'os.listdir', (['back_dir'], {}), '(back_dir)\n', (11356, 11366), False, 'import os, platform, time\n'), ((2152, 2180), 'numpy.random.randint', 'np.random.randint', (['(0)', '(256)', '(3)'], {}), '(0, 256, 3)\n', (2169, 2180), True, 'import numpy as np\n'), ((2345, 2388), 'numpy.random.choice', 'np.random.choice', (['[0, 255]', '(sq_h, sq_w, 3)'], {}), '([0, 255], (sq_h, sq_w, 3))\n', (2361, 2388), True, 'import numpy as np\n'), ((2516, 2533), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (2530, 2533), True, 'import numpy as np\n'), ((2741, 2766), 'numpy.random.rand', 'np.random.rand', (['num_verts'], {}), '(num_verts)\n', (2755, 2766), True, 'import numpy as np\n'), ((3203, 3222), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img'], {}), '(img)\n', (3217, 3222), False, 'from PIL import Image, ImageDraw\n'), ((4921, 4942), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4940, 4942), True, 'import torchvision.transforms as transforms\n'), ((6643, 6669), 'os.path.join', 'os.path.join', (['fore_dir', 'fn'], {}), '(fore_dir, fn)\n', (6655, 6669), False, 'import os, platform, time\n'), ((7276, 7302), 'os.path.join', 'os.path.join', (['back_dir', 'fn'], {}), '(back_dir, fn)\n', (7288, 7302), False, 'import os, platform, time\n'), ((7971, 8004), 'numpy.random.choice', 'np.random.choice', (['self.fore_count'], {}), '(self.fore_count)\n', (7987, 8004), True, 'import numpy as np\n'), ((8635, 8668), 'numpy.random.choice', 'np.random.choice', (['self.back_count'], {}), '(self.back_count)\n', (8651, 8668), True, 'import numpy as np\n'), ((8887, 8920), 'numpy.random.choice', 'np.random.choice', (['self.fore_count'], {}), '(self.fore_count)\n', (8903, 8920), True, 'import numpy as np\n'), ((11390, 11416), 'os.path.join', 'os.path.join', (['back_dir', 'fn'], {}), '(back_dir, fn)\n', (11402, 11416), False, 'import os, platform, time\n'), ((11946, 11979), 'numpy.random.choice', 'np.random.choice', (['self.file_count'], {}), '(self.file_count)\n', (11962, 11979), True, 'import numpy as np\n'), ((12153, 12186), 'numpy.random.choice', 'np.random.choice', (['self.file_count'], {}), '(self.file_count)\n', (12169, 12186), True, 'import numpy as np\n'), ((12460, 12493), 'numpy.random.choice', 'np.random.choice', (['self.file_count'], {}), '(self.file_count)\n', (12476, 12493), True, 'import numpy as np\n'), ((738, 761), 'numpy.array', 'np.array', (['image.strides'], {}), '(image.strides)\n', (746, 761), True, 'import numpy as np\n'), ((2886, 2911), 'numpy.random.rand', 'np.random.rand', (['num_verts'], {}), '(num_verts)\n', (2900, 2911), True, 'import numpy as np\n'), ((6779, 6819), 'os.path.join', 'os.path.join', (['mask_dir', "(fn[:-4] + '.jpg')"], {}), "(mask_dir, fn[:-4] + '.jpg')\n", (6791, 6819), False, 'import os, platform, time\n'), ((6850, 6890), 'os.path.join', 'os.path.join', (['mask_dir', "(fn[:-4] + '.png')"], {}), "(mask_dir, fn[:-4] + '.png')\n", (6862, 6890), False, 'import os, platform, time\n'), ((6910, 6937), 'os.path.isfile', 'os.path.isfile', (['mask_fp_jpg'], {}), '(mask_fp_jpg)\n', (6924, 6937), False, 'import os, platform, time\n'), ((9307, 9323), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (9321, 9323), True, 'import numpy as np\n'), ((9479, 9495), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (9493, 9495), True, 'import numpy as np\n'), ((9567, 9583), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (9581, 9583), True, 'import numpy as np\n'), ((13182, 13198), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (13196, 13198), True, 'import numpy as np\n'), ((13317, 13333), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (13331, 13333), True, 'import numpy as np\n'), ((13405, 13421), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (13419, 13421), True, 'import numpy as np\n'), ((653, 673), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (663, 673), False, 'from PIL import Image, ImageDraw\n'), ((7020, 7047), 'os.path.isfile', 'os.path.isfile', (['mask_fp_png'], {}), '(mask_fp_png)\n', (7034, 7047), False, 'import os, platform, time\n'), ((5109, 5132), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (5130, 5132), True, 'import torchvision.transforms as transforms\n'), ((5150, 5180), 'torchvision.transforms.Resize', 'transforms.Resize', (['post_resize'], {}), '(post_resize)\n', (5167, 5180), True, 'import torchvision.transforms as transforms\n'), ((5198, 5232), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['post_resize'], {}), '(post_resize)\n', (5219, 5232), True, 'import torchvision.transforms as transforms\n'), ((5250, 5271), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (5269, 5271), True, 'import torchvision.transforms as transforms\n'), ((5434, 5457), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (5455, 5457), True, 'import torchvision.transforms as transforms\n'), ((5475, 5520), 'torchvision.transforms.Resize', 'transforms.Resize', (['(post_resize, post_resize)'], {}), '((post_resize, post_resize))\n', (5492, 5520), True, 'import torchvision.transforms as transforms\n'), ((5538, 5559), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (5557, 5559), True, 'import torchvision.transforms as transforms\n'), ((7808, 7819), 'time.time', 'time.time', ([], {}), '()\n', (7817, 7819), False, 'import os, platform, time\n'), ((3009, 3018), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (3015, 3018), True, 'import numpy as np\n'), ((3067, 3076), 'numpy.sin', 'np.sin', (['a'], {}), '(a)\n', (3073, 3076), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""Richardson-Extrapolation.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1oNlSL2Vztk9Fc7tMBgPcL82WGaUuCY-A
Before you turn this problem in, make sure everything runs as expected. First, **restart the kernel** (in the menubar, select Kernel$\rightarrow$Restart) and then **run all cells** (in the menubar, select Cell$\rightarrow$Run All).
Make sure you fill in any place that says `YOUR CODE HERE` or "YOUR ANSWER HERE", as well as your name and collaborators below:
"""
NAME = "<NAME>"
COLLABORATORS = ""
"""---
## CSE330 Lab: Richardson Extrapolation
---
## Instructions
Today's assignment is to:
1. Implement Richardson Extrapolation method using Python
## Richardson Extrapolation:
We used central difference method to calculate derivatives of functions last task. In this task we will use Richardson extrapolation to get a more accurate result.
Let,
$$ D_h = \frac{f(x_1+h) -f(x_1-h)}{2h}\tag{5.1}$$
General Taylor Series formula:
$$ f(x) = f(x_1) + f'(x_1)(x - x_1) + \frac{f''(x_1)}{2}(x - x_1)^2+... $$
Using Taylor's theorem to expand we get,
\begin{align}
f(x_1+h) &= f(x_1) + f^{\prime}(x_1)h + \frac{f^{\prime \prime}(x_1)}{2}h^2 + \frac{f^{\prime \prime \prime}(x_1)}{3!}h^3 + \frac{f^{(4)}(x_1)}{4!}h^4 + \frac{f^{(5)}(x_1)}{5!}h^5 + O(h^6)\tag{5.2} \\
f(x_1-h) &= f(x_1) - f^{\prime}(x_1)h + \frac{f^{\prime \prime}(x_1)}{2}h^2 - \frac{f^{\prime \prime \prime}(x_1)}{3!}h^3 + \frac{f^{(4)}(x_1)}{4!}h^4 - \frac{f^{(5)}(x_1)}{5!}h^5 + O(h^6)\tag{5.3}
\end{align}
Subtracting $5.3$ from $5.2$ we get,
$$ f(x_1+h) - f(x_1-h) = 2f^{\prime}(x_1)h + 2\frac{f^{\prime \prime \prime}(x_1)}{3!}h^3 + 2\frac{f^{(5)}(x_1)}{5!}h^5 + O(h^7)\tag{5.4}$$
So,
\begin{align}
D_h &= \frac{f(x_1+h) - f(x_1-h)}{2h} \\
&= \frac{1}{2h} \left( 2f^{\prime}(x_1)h + 2\frac{f^{\prime \prime \prime}(x_1)}{3!}h^3 + 2\frac{f^{(5)}(x_1)}{5!}h^5 + O(h^7) \right) \\
&= f^{\prime}(x_1) + \frac{f^{\prime \prime \prime}(x_1)}{6}h^2 + \frac{f^{(5)}(x_1)}{120}h^4 + O(h^6) \tag{5.5}
\end{align}
We get our derivative $f'(x)$ plus some error terms of order $>= 2$ Now, we want to bring our error order down to 4.
If we use $h, \text{and} \frac{h}{2}$ as step size in $5.5$, we get,
\begin{align}
D_h &= f^{\prime}(x_1) + f^{\prime \prime \prime}(x_1)\frac{h^2}{6} + f^{(5)}(x_1) \frac{h^4}{120} + O(h^6) \tag{5.6} \\
D_{h/2} &= f^{\prime}(x_1) + f^{\prime \prime \prime}(x_1)\frac{h^2}{2^2 . 6} + f^{(5)}(x_1) \frac{h^4}{2^4 . 120} + O(h^6) \tag{5.7}
\end{align}
Multiplying $5.7$ by $4$ and subtracting from $5.6$ we get,
\begin{align}
D_h - 4D_{h/2} &= -3f^{\prime}(x) + f^{(5)}(x_1) \frac{h^4}{160} + O(h^6)\\
\Longrightarrow D^{(1)}_h = \frac{4D_{h/2} - D_h}{3} &= f^{\prime}(x) - f^{(5)}(x_1) \frac{h^4}{480} + O(h^6) \tag{5.8}
\end{align}
Let's calculate the derivative using $5.8$
### 1. Let's import the necessary headers
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from numpy.polynomial import Polynomial
"""### 2. Let's create a function named `dh(f, h, x)`
function `dh(f, h, x)` takes three parameters as input: a function `f`, a value `h`, and a set of values `x`. It returns the derivatives of the function at each elements of array `x` using the Central Difference method. This calculates equation $(5.1)$.
"""
def dh(f, h, x):
'''
Input:
f: np.polynomial.Polynonimial type data.
h: floating point data.
x: np.array type data.
Output:
return np.array type data of slope at each point x.
'''
# --------------------------------------------
return (f(x+h) - f(x-h)) / (2*h)
# --------------------------------------------
"""### 3. Let's create another funtion `dh1(f, h, x)`.
`dh1(f, h, x)` takes the same type of values as `dh(f, h, x)` as input. It calculates the derivative using previously defined `dh(f, h, x)` function and using equation $5.8$ and returns the values.
"""
def dh1(f, h, x):
'''
Input:
f: np.polynomial.Polynonimial type data.
h: floating point data.
x: np.array type data.
Output:
return np.array type data of slope at each point x.
'''
# --------------------------------------------
# YOUR CODE HERE
return (4 * dh(f, h/2, x) - dh(f, h, x)) / 3
# --------------------------------------------
"""### 4. Now let's create the `error(f, hs, x_i)` function
The `error(f, hs, x_i)` function takes a function `f` as input. It also takes a list of different values of h as `hs` and a specific value as `x_i` as input. It calculates the derivatives as point `x_i` using both functions described in **B** and **C**, i.e. `dh` and `dh1`
"""
def error(f, hs, x_i): #Using the functions we wrote dh() my c_diff and dh1() which is my first order c diff, we find the error through appending their diffrences with Y_actual ny f(x)
'''
Input:
f : np.polynomial.Polynonimial type data.
hs : np.array type data. list of h.
x_i: floating point data. single value of x.
Output:
return two np.array type data of errors by two methods..
'''
f_prime = f.deriv(1) #first order derivitive f^1(x)
Y_actual = f_prime(x_i)
diff_error = []
diff2_error = []
for h in hs: #where h is my loop counter iterating through hs
# for each values of hs calculate the error using both methods
# and append those values into diff_error and diff2_error list.
# --------------------------------------------
# YOUR CODE HERE
e1 = Y_actual - dh(f, hs, x_i)
diff_error.append(e1)
e2 = Y_actual - dh1(f, hs, x_i)
diff2_error.append(e2)
# --------------------------------------------
print(pd.DataFrame({"h": hs, "Diff": diff_error, "Diff2": diff2_error}))
return diff_error, diff2_error
"""### 5. Finally let's run some tests
function to draw the actual function
"""
def draw_graph(f, ax, domain=[-10, 10], label=None):
data = f.linspace(domain=domain)
ax.plot(data[0], data[1], label='Function')
"""### Draw the polynomial and it's actual derivative function"""
fig, ax = plt.subplots()
ax.axhline(y=0, color='k')
p = Polynomial([2.0, 1.0, -6.0, -2.0, 2.5, 1.0])
p_prime = p.deriv(1)
draw_graph(p, ax, [-2.4, 1.5], 'Function')
draw_graph(p_prime, ax, [-2.4, 1.5], 'Derivative')
ax.legend()
"""### Draw the actual derivative and richardson derivative using `h=1` and `h=0.1` as step size."""
fig, ax = plt.subplots()
ax.axhline(y=0, color='k')
draw_graph(p_prime, ax, [-2.4, 1.5], 'actual')
h = 1
x = np.linspace(-2.4, 1.5, 50, endpoint=True)
y = dh1(p, h, x)
ax.plot(x, y, label='Richardson; h=1')
h = 0.1
x = np.linspace(-2.4, 1.5, 50, endpoint=True)
y = dh1(p, h, x)
ax.plot(x, y, label='Richardson; h=0.1')
ax.legend()
"""### Draw error-vs-h cuve"""
fig, ax = plt.subplots()
ax.axhline(y=0, color='k')
hs = np.array([1., 0.55, 0.3, .17, 0.1, 0.055, 0.03, 0.017, 0.01])
e1, e2 = error(p, hs, 2.0)
ax.plot(hs, e1, label='e1')
ax.plot(hs, e2, label='e2')
ax.legend()
| [
"numpy.array",
"numpy.linspace",
"numpy.polynomial.Polynomial",
"pandas.DataFrame",
"matplotlib.pyplot.subplots"
] | [((6212, 6226), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6224, 6226), True, 'import matplotlib.pyplot as plt\n'), ((6259, 6303), 'numpy.polynomial.Polynomial', 'Polynomial', (['[2.0, 1.0, -6.0, -2.0, 2.5, 1.0]'], {}), '([2.0, 1.0, -6.0, -2.0, 2.5, 1.0])\n', (6269, 6303), False, 'from numpy.polynomial import Polynomial\n'), ((6545, 6559), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6557, 6559), True, 'import matplotlib.pyplot as plt\n'), ((6646, 6687), 'numpy.linspace', 'np.linspace', (['(-2.4)', '(1.5)', '(50)'], {'endpoint': '(True)'}), '(-2.4, 1.5, 50, endpoint=True)\n', (6657, 6687), True, 'import numpy as np\n'), ((6757, 6798), 'numpy.linspace', 'np.linspace', (['(-2.4)', '(1.5)', '(50)'], {'endpoint': '(True)'}), '(-2.4, 1.5, 50, endpoint=True)\n', (6768, 6798), True, 'import numpy as np\n'), ((6913, 6927), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6925, 6927), True, 'import matplotlib.pyplot as plt\n'), ((6960, 7023), 'numpy.array', 'np.array', (['[1.0, 0.55, 0.3, 0.17, 0.1, 0.055, 0.03, 0.017, 0.01]'], {}), '([1.0, 0.55, 0.3, 0.17, 0.1, 0.055, 0.03, 0.017, 0.01])\n', (6968, 7023), True, 'import numpy as np\n'), ((5811, 5876), 'pandas.DataFrame', 'pd.DataFrame', (["{'h': hs, 'Diff': diff_error, 'Diff2': diff2_error}"], {}), "({'h': hs, 'Diff': diff_error, 'Diff2': diff2_error})\n", (5823, 5876), True, 'import pandas as pd\n')] |
"""The :mod:`mlshell.pipeline.steps` contains unified pipeline steps."""
import inspect
import mlshell
import numpy as np
import pandas as pd
import sklearn
import sklearn.impute
import sklearn.compose
__all__ = ['Steps']
class Steps(object):
"""Unified pipeline steps.
Parameters
----------
estimator : :mod:`sklearn` estimator
Estimator to use in the last step.
If ``estimator_type=regressor``:
``sklearn.compose.TransformedTargetRegressor(regressor=`estimator`)``
If ``estimator_type=classifier`` and ``th_step=True``:
``sklearn.pipeline.Pipeline(steps=[
('predict_proba',
mlshell.model_selection.PredictionTransformer(`estimator`)),
('apply_threshold',
mlshell.model_selection.ThresholdClassifier(threshold=0.5,
kwargs='auto')),
])``
If ``estimator_type=classifier`` and ``th_step=False``:
``sklearn.pipeline.Pipeline(steps=[('classifier', `estimator`)])``
estimator_type : str {'classifier`, 'regressor'}, optional (default=None)
Either regression or classification task. If None, get from
:func:`sklearn.base.is_classifier` on ``estimator``.
th_step : bool
If True and ``estimator_type=classifier``: ``mlshell.model_selection.
ThresholdClassifier`` sub-step added, otherwise ignored.
Notes
-----
Assembling steps in class are made for convenience. Use steps property to
access after initialization. Only OneHot encoder and imputer steps are
initially activated.
By default, 4 parameters await for resolution ('auto'):
'process_parallel__pipeline_categoric__select_columns__kw_args'
'process_parallel__pipeline_numeric__select_columns__kw_args'
'estimate__apply_threshold__threshold'
'estimate__apply_threshold__params'
Set corresponding parameters with ``set_params()`` to overwrite default in
created pipeline or use :class:`mlshell.model_selection.Resolver` .
'pass_custom' step allows brute force arbitrary parameters in uniform style
with pipeline hp (as if score contains additional nested loops). Step name
is hard-coded and could not be changed.
'apply_threshold' allows grid search classification thresholds as pipeline
hyper-parameter.
'estimate' step should be the last.
"""
_required_parameters = ['estimator', 'estimator_type']
def __init__(self, estimator, estimator_type=None, th_step=False):
if estimator_type is None:
estimator_type = 'classifier' if sklearn.base.is_classifier(estimator)\
else 'regressor'
self._steps = [
('pass_custom', mlshell.preprocessing.FunctionTransformer(func=self.scorer_kwargs, validate=False, skip=True, kw_args={})),
('select_rows', mlshell.preprocessing.FunctionTransformer(func=self.subrows, validate=False, skip=True)),
('process_parallel', sklearn.pipeline.FeatureUnion(transformer_list=[
('pipeline_categoric', sklearn.pipeline.Pipeline(steps=[
('select_columns', mlshell.preprocessing.FunctionTransformer(self.subcolumns, validate=False, skip=False, kw_args='auto')), # {'indices': dataset.meta['categoric_ind_name']}
('encode_onehot', mlshell.preprocessing.OneHotEncoder(handle_unknown='ignore', categories='auto', sparse=False, drop=None, skip=False)), # x could be [].
])),
('pipeline_numeric', sklearn.pipeline.Pipeline(steps=[
('select_columns', mlshell.preprocessing.FunctionTransformer(self.subcolumns, validate=False, skip=False, kw_args='auto')), # {'indices': dataset.meta['numeric_ind_name']}
('impute', sklearn.pipeline.FeatureUnion([
('indicators', sklearn.impute.MissingIndicator(missing_values=np.nan, error_on_new=False)),
('gaps', sklearn.impute.SimpleImputer(missing_values=np.nan, strategy='constant', fill_value=0, copy=True)),
])),
('transform_normal', mlshell.preprocessing.PowerTransformer(method='yeo-johnson', standardize=False, copy=False, skip=True)),
('scale_row_wise', mlshell.preprocessing.FunctionTransformer(func=None, validate=False, skip=True)),
('scale_column_wise', sklearn.preprocessing.RobustScaler(quantile_range=(0, 100), copy=False)),
('add_polynomial', sklearn.preprocessing.PolynomialFeatures(degree=1, include_bias=False)), # x => degree=1 => x, x => degree=0 => []
('compose_columns', sklearn.compose.ColumnTransformer([
("discretize", sklearn.preprocessing.KBinsDiscretizer(n_bins=5, encode='onehot-dense', strategy='quantile'), self.bining_mask)], sparse_threshold=0, remainder='passthrough'))
])),
])),
('select_columns', sklearn.feature_selection.SelectFromModel(estimator=CustomSelector(estimator_type=estimator_type, verbose=False, skip=True), prefit=False)),
('reduce_dimensions', mlshell.decomposition.PCA(random_state=42, skip=True)),
('estimate', self.last_step(estimator, estimator_type, th_step=th_step)),
]
def last_step(self, estimator, estimator_type, th_step):
"""Prepare estimator step."""
if estimator_type == 'regressor':
last_step =\
sklearn.compose.TransformedTargetRegressor(regressor=estimator)
elif estimator_type == 'classifier' and th_step:
last_step = sklearn.pipeline.Pipeline(steps=[
('predict_proba',
mlshell.model_selection.PredictionTransformer(
estimator)),
('apply_threshold',
mlshell.model_selection.ThresholdClassifier(
params='auto', threshold=None)),
])
elif estimator_type == 'classifier' and not th_step:
last_step = sklearn.pipeline.Pipeline(steps=[('classifier',
estimator)])
else:
raise ValueError(f"Unknown estimator type `{estimator_type}`.")
if sklearn.base.is_classifier(estimator=last_step)\
^ (estimator_type == "classifier"):
raise TypeError(f"{self.__class__.__name__}:"
f"{inspect.stack()[0][3]}:"
f" wrong estimator type: {last_step}")
return last_step
@property
def steps(self):
"""list : access steps to pass in `sklearn.pipeline.Pipeline` ."""
return self._steps
def scorer_kwargs(self, x, **kw_args):
"""Mock function to custom kwargs setting.
Parameters
----------
x : :class:`numpy.ndarray` or :class:`pandas.DataFrame`
Features of shape [n_samples, n_features].
**kw_args : dict
Step parameters. Could be extracted from pipeline in scorer if
needed.
Returns
-------
result: :class:`numpy.ndarray` or :class:`pandas.DataFrame`
Unchanged ``x``.
"""
return x
def subcolumns(self, x, **kw_args):
"""Get sub-columns from x.
Parameters
----------
x : :class:`numpy.ndarray` or :class:`pandas.DataFrame`
Features of shape [n_samples, n_features].
**kw_args : dict
Columns indices to extract: {'indices': array-like}.
Returns
-------
result: :class:`numpy.ndarray` or :class:`pandas.DataFrame`
Extracted sub-columns of ``x``.
"""
indices = kw_args['indices']
if isinstance(x, pd.DataFrame):
return x.iloc[:, indices]
else:
return x[:, indices]
def subrows(self, x):
"""Get rows from x."""
# For example to delete outlier/anomalies.
return x
def bining_mask(self, x):
"""Get features indices which need bining."""
# Use slice(0, None) to get all.
return []
class CustomSelector(sklearn.base.BaseEstimator):
"""Custom feature selector template."""
def __init__(self, estimator_type='classifier', verbose=True,
skip=False):
self.skip = skip
self.verbose = verbose
self.feature_importances_ = None
self.estimator_type = estimator_type
super().__init__()
if not self.skip:
raise NotImplementedError
def fit(self, x, y):
if self.skip:
self.feature_importances_ = np.full(x.shape[1], fill_value=1)
return self
# TODO: some logic
self.feature_importances_ = np.full(x.shape[1], fill_value=1)
return self
if __name__ == '__main__':
pass
| [
"sklearn.preprocessing.PolynomialFeatures",
"mlshell.decomposition.PCA",
"mlshell.preprocessing.FunctionTransformer",
"sklearn.base.is_classifier",
"inspect.stack",
"mlshell.preprocessing.OneHotEncoder",
"sklearn.preprocessing.KBinsDiscretizer",
"mlshell.model_selection.ThresholdClassifier",
"mlshel... | [((8958, 8991), 'numpy.full', 'np.full', (['x.shape[1]'], {'fill_value': '(1)'}), '(x.shape[1], fill_value=1)\n', (8965, 8991), True, 'import numpy as np\n'), ((5627, 5690), 'sklearn.compose.TransformedTargetRegressor', 'sklearn.compose.TransformedTargetRegressor', ([], {'regressor': 'estimator'}), '(regressor=estimator)\n', (5669, 5690), False, 'import sklearn\n'), ((6431, 6478), 'sklearn.base.is_classifier', 'sklearn.base.is_classifier', ([], {'estimator': 'last_step'}), '(estimator=last_step)\n', (6457, 6478), False, 'import sklearn\n'), ((8837, 8870), 'numpy.full', 'np.full', (['x.shape[1]'], {'fill_value': '(1)'}), '(x.shape[1], fill_value=1)\n', (8844, 8870), True, 'import numpy as np\n'), ((2650, 2687), 'sklearn.base.is_classifier', 'sklearn.base.is_classifier', (['estimator'], {}), '(estimator)\n', (2676, 2687), False, 'import sklearn\n'), ((2780, 2890), 'mlshell.preprocessing.FunctionTransformer', 'mlshell.preprocessing.FunctionTransformer', ([], {'func': 'self.scorer_kwargs', 'validate': '(False)', 'skip': '(True)', 'kw_args': '{}'}), '(func=self.scorer_kwargs, validate\n =False, skip=True, kw_args={})\n', (2821, 2890), False, 'import mlshell\n'), ((2921, 3012), 'mlshell.preprocessing.FunctionTransformer', 'mlshell.preprocessing.FunctionTransformer', ([], {'func': 'self.subrows', 'validate': '(False)', 'skip': '(True)'}), '(func=self.subrows, validate=False,\n skip=True)\n', (2962, 3012), False, 'import mlshell\n'), ((5292, 5345), 'mlshell.decomposition.PCA', 'mlshell.decomposition.PCA', ([], {'random_state': '(42)', 'skip': '(True)'}), '(random_state=42, skip=True)\n', (5317, 5345), False, 'import mlshell\n'), ((6210, 6270), 'sklearn.pipeline.Pipeline', 'sklearn.pipeline.Pipeline', ([], {'steps': "[('classifier', estimator)]"}), "(steps=[('classifier', estimator)])\n", (6235, 6270), False, 'import sklearn\n'), ((5860, 5916), 'mlshell.model_selection.PredictionTransformer', 'mlshell.model_selection.PredictionTransformer', (['estimator'], {}), '(estimator)\n', (5905, 5916), False, 'import mlshell\n'), ((6000, 6074), 'mlshell.model_selection.ThresholdClassifier', 'mlshell.model_selection.ThresholdClassifier', ([], {'params': '"""auto"""', 'threshold': 'None'}), "(params='auto', threshold=None)\n", (6043, 6074), False, 'import mlshell\n'), ((6621, 6636), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (6634, 6636), False, 'import inspect\n'), ((3209, 3315), 'mlshell.preprocessing.FunctionTransformer', 'mlshell.preprocessing.FunctionTransformer', (['self.subcolumns'], {'validate': '(False)', 'skip': '(False)', 'kw_args': '"""auto"""'}), "(self.subcolumns, validate=False,\n skip=False, kw_args='auto')\n", (3250, 3315), False, 'import mlshell\n'), ((3408, 3529), 'mlshell.preprocessing.OneHotEncoder', 'mlshell.preprocessing.OneHotEncoder', ([], {'handle_unknown': '"""ignore"""', 'categories': '"""auto"""', 'sparse': '(False)', 'drop': 'None', 'skip': '(False)'}), "(handle_unknown='ignore', categories=\n 'auto', sparse=False, drop=None, skip=False)\n", (3443, 3529), False, 'import mlshell\n'), ((3682, 3788), 'mlshell.preprocessing.FunctionTransformer', 'mlshell.preprocessing.FunctionTransformer', (['self.subcolumns'], {'validate': '(False)', 'skip': '(False)', 'kw_args': '"""auto"""'}), "(self.subcolumns, validate=False,\n skip=False, kw_args='auto')\n", (3723, 3788), False, 'import mlshell\n'), ((4254, 4361), 'mlshell.preprocessing.PowerTransformer', 'mlshell.preprocessing.PowerTransformer', ([], {'method': '"""yeo-johnson"""', 'standardize': '(False)', 'copy': '(False)', 'skip': '(True)'}), "(method='yeo-johnson', standardize=\n False, copy=False, skip=True)\n", (4292, 4361), False, 'import mlshell\n'), ((4402, 4481), 'mlshell.preprocessing.FunctionTransformer', 'mlshell.preprocessing.FunctionTransformer', ([], {'func': 'None', 'validate': '(False)', 'skip': '(True)'}), '(func=None, validate=False, skip=True)\n', (4443, 4481), False, 'import mlshell\n'), ((4527, 4598), 'sklearn.preprocessing.RobustScaler', 'sklearn.preprocessing.RobustScaler', ([], {'quantile_range': '(0, 100)', 'copy': '(False)'}), '(quantile_range=(0, 100), copy=False)\n', (4561, 4598), False, 'import sklearn\n'), ((4644, 4714), 'sklearn.preprocessing.PolynomialFeatures', 'sklearn.preprocessing.PolynomialFeatures', ([], {'degree': '(1)', 'include_bias': '(False)'}), '(degree=1, include_bias=False)\n', (4684, 4714), False, 'import sklearn\n'), ((3958, 4032), 'sklearn.impute.MissingIndicator', 'sklearn.impute.MissingIndicator', ([], {'missing_values': 'np.nan', 'error_on_new': '(False)'}), '(missing_values=np.nan, error_on_new=False)\n', (3989, 4032), False, 'import sklearn\n'), ((4082, 4183), 'sklearn.impute.SimpleImputer', 'sklearn.impute.SimpleImputer', ([], {'missing_values': 'np.nan', 'strategy': '"""constant"""', 'fill_value': '(0)', 'copy': '(True)'}), "(missing_values=np.nan, strategy='constant',\n fill_value=0, copy=True)\n", (4110, 4183), False, 'import sklearn\n'), ((4886, 4982), 'sklearn.preprocessing.KBinsDiscretizer', 'sklearn.preprocessing.KBinsDiscretizer', ([], {'n_bins': '(5)', 'encode': '"""onehot-dense"""', 'strategy': '"""quantile"""'}), "(n_bins=5, encode='onehot-dense',\n strategy='quantile')\n", (4924, 4982), False, 'import sklearn\n')] |
from datetime import datetime
import peewee
from paste import domain
from . import db
class AbstractRepository(domain.IRepository):
_model = NotImplemented
_entity = NotImplemented
def count(self):
return self._model.count()
def save(self, entity):
model = _entity_to_model(entity)
if model.pk is None:
model.created_at = datetime.utcnow()
model.updated_at = datetime.utcnow()
model.save()
return _model_to_entity(model)
def get(self, **kw):
try:
return _model_to_entity(self._model.get(**kw))
except peewee.DoesNotExist:
raise domain.DoesNotExist('%s: %s' % (self._entity, kw))
def find(self, page, size, **kw):
if kw:
for k, v in kw.items():
if isinstance(v, domain.Entity):
kw[k] = v.pk
query = self._model.filter(**kw)
else:
query = self._model.select()
return [_model_to_entity(i) for i in query.paginate(page, size)]
def delete(self, entity):
_entity_to_model(entity).delete_instance()
class UserRepository(AbstractRepository):
_model = db.User
_entity = domain.User
class SnippetRepository(AbstractRepository):
_model = db.Snippet
_entity = domain.Snippet
def _by_object(obj):
name = obj.__class__.__name__
fields = ('pk', 'created_at', 'updated_at')
if name == 'User':
return domain.User, db.User, fields + ('name', 'passhash')
if name == 'Snippet':
fields += ('author', 'name', 'syntax', 'raw', 'html')
return domain.Snippet, db.Snippet, fields
raise NotImplementedError
def _entity_to_model(entity):
_, model_cls, fields = _by_object(entity)
attrs = {}
for field in fields:
value = getattr(entity, field)
if isinstance(value, domain.Entity):
value = value.pk
attrs[field] = value
return model_cls(**attrs)
def _model_to_entity(model):
entity_cls, _, fields = _by_object(model)
attrs = {}
for f in fields:
value = getattr(model, f)
if isinstance(value, db.AbstractModel):
value = _model_to_entity(value)
attrs[f] = value
return entity_cls(**attrs)
| [
"paste.domain.DoesNotExist",
"datetime.datetime.utcnow"
] | [((429, 446), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (444, 446), False, 'from datetime import datetime\n'), ((383, 400), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (398, 400), False, 'from datetime import datetime\n'), ((660, 710), 'paste.domain.DoesNotExist', 'domain.DoesNotExist', (["('%s: %s' % (self._entity, kw))"], {}), "('%s: %s' % (self._entity, kw))\n", (679, 710), False, 'from paste import domain\n')] |
# Dataset https://archive.ics.uci.edu/ml/datasets/Nursery
import numpy as np
import pandas as pd
from collections import Counter
from sklearn.model_selection import train_test_split, GridSearchCV, StratifiedKFold
from imblearn.metrics import geometric_mean_score
from sklearn.metrics import mean_squared_error, make_scorer, roc_auc_score, log_loss
from imblearn.over_sampling import SMOTE, RandomOverSampler
from imblearn.under_sampling import RandomUnderSampler
from sklearn.preprocessing import OneHotEncoder, LabelBinarizer, LabelEncoder
from sklearn.ensemble import RandomForestClassifier
from racog import RACOG
RS = 334
nurseryurl = 'https://archive.ics.uci.edu/ml/machine-learning-databases/nursery/nursery.data'
attribute_list = ['parents', 'has_nurs', 'form', 'children',
'housing', 'finance', 'social', 'health', 'target']
nursery = pd.read_csv(nurseryurl, header=None, names=attribute_list)
LE = LabelEncoder()
X = nursery.drop('target', axis=1)
y = nursery['target']
ii = y[y == 'recommend'].index.values
X.drop(ii, inplace=True)
y.drop(ii, inplace=True)
for col in X:
if X[col].dtype == 'object':
X[col] = LE.fit_transform(X[col])
X = X.values
LE = LabelEncoder()
y = LE.fit_transform(y)
rf = RandomForestClassifier()
params = {'class_weight': 'balanced',
'criterion': 'entropy',
'max_depth': 15,
'max_features': 0.9,
'min_samples_leaf': 11,
'min_samples_split': 2,
'min_weight_fraction_leaf': 0,
'n_estimators': 30}
rf.set_params(**params)
gscore = make_scorer(geometric_mean_score, average='multiclass')
def gmean(y_true, y_pred):
return geometric_mean_score(y_true, y_pred, average='multiclass')
strf = StratifiedKFold(n_splits=3, shuffle=True, random_state=RS)
count = 0
for train_index, test_index in strf.split(X, y):
print(Counter(y[test_index]), Counter(y[train_index]))
# swap train/test
X_train, X_test, y_train, y_test = X[test_index], X[train_index], y[test_index], y[train_index]
rf.set_params(**params)
rf.fit(X_train, y_train)
y_pred = rf.predict(X_test)
print('#####################################################')
print('Count', count)
print('')
print('Without oversampling | Gmean:', gmean(y_test, y_pred))
rnd_over = RandomOverSampler(random_state=RS + count)
X_rndo, y_rndo = rnd_over.fit_sample(X_train, y_train)
print('')
rf.fit(X_rndo, y_rndo)
y_pred = rf.predict(X_test)
print('Random oversampling | Gmean:', gmean(y_test, y_pred))
smote = SMOTE(random_state=RS + count, kind='regular', k_neighbors=5, m=None,
m_neighbors=10, n_jobs=1)
X_smote, y_smote = smote.fit_sample(X_train, y_train)
rf.fit(X_smote, y_smote)
y_pred = rf.predict(X_test)
print('')
print('SMOTE oversampling | Gmean:', gmean(y_test, y_pred))
racog = RACOG(categorical_features='all',
warmup_offset=100, lag0=20, n_iter='auto',
threshold=10, eps=10E-5, verbose=0, n_jobs=1)
X_racog, y_racog = racog.fit_sample(X_train, y_train)
rf.fit(X_racog, y_racog)
y_pred = rf.predict(X_test)
print('RACOG oversampling | Gmean:', gmean(y_test, y_pred))
print('')
count = count + 1
| [
"racog.RACOG",
"sklearn.preprocessing.LabelEncoder",
"pandas.read_csv",
"imblearn.over_sampling.SMOTE",
"sklearn.metrics.make_scorer",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.model_selection.StratifiedKFold",
"imblearn.metrics.geometric_mean_score",
"imblearn.over_sampling.RandomOverSamp... | [((870, 928), 'pandas.read_csv', 'pd.read_csv', (['nurseryurl'], {'header': 'None', 'names': 'attribute_list'}), '(nurseryurl, header=None, names=attribute_list)\n', (881, 928), True, 'import pandas as pd\n'), ((935, 949), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (947, 949), False, 'from sklearn.preprocessing import OneHotEncoder, LabelBinarizer, LabelEncoder\n'), ((1206, 1220), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (1218, 1220), False, 'from sklearn.preprocessing import OneHotEncoder, LabelBinarizer, LabelEncoder\n'), ((1251, 1275), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {}), '()\n', (1273, 1275), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((1579, 1634), 'sklearn.metrics.make_scorer', 'make_scorer', (['geometric_mean_score'], {'average': '"""multiclass"""'}), "(geometric_mean_score, average='multiclass')\n", (1590, 1634), False, 'from sklearn.metrics import mean_squared_error, make_scorer, roc_auc_score, log_loss\n'), ((1743, 1801), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': '(3)', 'shuffle': '(True)', 'random_state': 'RS'}), '(n_splits=3, shuffle=True, random_state=RS)\n', (1758, 1801), False, 'from sklearn.model_selection import train_test_split, GridSearchCV, StratifiedKFold\n'), ((1675, 1733), 'imblearn.metrics.geometric_mean_score', 'geometric_mean_score', (['y_true', 'y_pred'], {'average': '"""multiclass"""'}), "(y_true, y_pred, average='multiclass')\n", (1695, 1733), False, 'from imblearn.metrics import geometric_mean_score\n'), ((2319, 2361), 'imblearn.over_sampling.RandomOverSampler', 'RandomOverSampler', ([], {'random_state': '(RS + count)'}), '(random_state=RS + count)\n', (2336, 2361), False, 'from imblearn.over_sampling import SMOTE, RandomOverSampler\n'), ((2573, 2672), 'imblearn.over_sampling.SMOTE', 'SMOTE', ([], {'random_state': '(RS + count)', 'kind': '"""regular"""', 'k_neighbors': '(5)', 'm': 'None', 'm_neighbors': '(10)', 'n_jobs': '(1)'}), "(random_state=RS + count, kind='regular', k_neighbors=5, m=None,\n m_neighbors=10, n_jobs=1)\n", (2578, 2672), False, 'from imblearn.over_sampling import SMOTE, RandomOverSampler\n'), ((2898, 3025), 'racog.RACOG', 'RACOG', ([], {'categorical_features': '"""all"""', 'warmup_offset': '(100)', 'lag0': '(20)', 'n_iter': '"""auto"""', 'threshold': '(10)', 'eps': '(0.0001)', 'verbose': '(0)', 'n_jobs': '(1)'}), "(categorical_features='all', warmup_offset=100, lag0=20, n_iter='auto',\n threshold=10, eps=0.0001, verbose=0, n_jobs=1)\n", (2903, 3025), False, 'from racog import RACOG\n'), ((1871, 1893), 'collections.Counter', 'Counter', (['y[test_index]'], {}), '(y[test_index])\n', (1878, 1893), False, 'from collections import Counter\n'), ((1895, 1918), 'collections.Counter', 'Counter', (['y[train_index]'], {}), '(y[train_index])\n', (1902, 1918), False, 'from collections import Counter\n')] |
import json
from datetime import datetime
import time
from functools import reduce
import boto3
from celery import shared_task
from celery.bin.control import inspect
from django.conf import settings
from comic.container_exec.backends.k8s import K8sJob
from comic.eyra.models import Job, Submission, DataFile, JobInput
@shared_task
def run_job(job_pk):
"""Celery task for running a job.
Args:
job_pk: the primary key of the Job object that defines the algorithm run
"""
job = Job.objects.get(pk=job_pk)
if job.status != Job.PENDING:
raise Exception(f"Can't start job with status '{Job.STATUS_CHOICES[job.status][1]}'")
job.status = Job.STARTED
job.started = datetime.now()
job.save()
job.log = ''
try:
with K8sJob(job) as k8s_job:
k8s_job.run()
# keep probing until failure or success
while True:
s = k8s_job.status()
job.log = k8s_job.get_text_logs()
job.save()
if s.failed or s.succeeded:
break
time.sleep(5)
job.status = Job.SUCCESS if s.succeeded else Job.FAILURE
job.log = k8s_job.get_text_logs()
except Exception as e:
job.status = Job.FAILURE
job.log += '\n Error in job executor: \n' + str(e)
raise e
finally:
job.stopped = datetime.now()
job.save()
if job.status == Job.FAILURE:
raise Exception("Job failed")
def create_algorithm_job_for_submission(submission: Submission):
if submission.algorithm_job:
raise Exception('Job already exists for submission')
job_output = DataFile.objects.create(
name='algorithm job output',
)
job_output.file = f"data_files/{str(job_output.pk)}"
job_output.save()
submission.algorithm_job = Job.objects.create(
output=job_output,
submission=submission,
image=submission.image,
)
submission.save()
input_data_file = submission.benchmark.data_set.public_test_data_file
if submission.is_private:
input_data_file = submission.benchmark.data_set.private_test_data_file
job_input = JobInput.objects.create(
job=submission.algorithm_job,
name='test_data',
data_file=input_data_file,
)
def create_evaluation_job_for_submission(submission: Submission):
if submission.evaluation_job:
raise Exception('Job already exists for submission')
job_output = DataFile.objects.create(
name='evaluation job output',
)
job_output.file = f"data_files/{str(job_output.pk)}"
job_output.save()
submission.evaluation_job = Job.objects.create(
output=job_output,
submission=submission,
image=submission.benchmark.evaluation_image
)
submission.save()
job_algorithm_output_input = JobInput.objects.create(
job=submission.evaluation_job,
name='algorithm_output',
data_file=submission.algorithm_job.output,
)
ground_truth_data_file = submission.benchmark.data_set.public_ground_truth_data_file
if submission.is_private:
ground_truth_data_file = submission.benchmark.data_set.private_ground_truth_data_file
job_ground_truth_input = JobInput.objects.create(
job=submission.evaluation_job,
name='ground_truth',
data_file=ground_truth_data_file,
)
@shared_task
def run_submission(submission_pk):
submission: Submission = Submission.objects.get(pk=submission_pk)
create_algorithm_job_for_submission(submission)
create_evaluation_job_for_submission(submission)
if not submission.benchmark.should_evaluate:
submission.algorithm_job.status = Job.SUCCESS
submission.algorithm_job.log = 'Ran externally.'
submission.algorithm_job.save()
submission.evaluation_job.status = Job.SUCCESS
submission.evaluation_job.log = 'Ran externally.'
submission.evaluation_job.save()
submission.metrics = "Should be set externally."
return
try:
run_job(submission.algorithm_job.pk)
except Exception as e:
submission.evaluation_job.status = Job.FAILURE
submission.evaluation_job.log = 'Cannot evaluate, since the implementation job failed.'
submission.evaluation_job.save()
raise e
run_job(submission.evaluation_job.pk)
try:
eval_output = submission.evaluation_job.output.file.read().decode('ascii')
submission.metrics = json.loads(eval_output)['metrics']
except:
submission.metrics = "Error getting 'metrics' value from evaluation output."
submission.save()
@shared_task
def autoscale_gpu_node():
autoscaling_client = boto3.client(
'autoscaling',
region_name=settings.AWS_AUTOSCALING_REGION,
aws_access_key_id=settings.AWS_AUTOSCALING_ACCESS_KEY_ID,
aws_secret_access_key=settings.AWS_AUTOSCALING_SECRET_ACCESS_KEY,
)
i = inspect()
active_tasks_per_node = [a[1] for a in list(i.active().items())]
scheduled_tasks_per_node = [a[1] for a in list(i.scheduled().items())]
reserved_tasks_per_node = [a[1] for a in list(i.reserved().items())]
tasks_per_node = active_tasks_per_node + scheduled_tasks_per_node + reserved_tasks_per_node
tasks = reduce(lambda x, y: x + y, tasks_per_node)
task_names = [task['name'] for task in tasks]
scale_to = 0
if run_submission.name in task_names:
scale_to = 1
print(f"Scaling to {str(scale_to)} GPU nodes.")
print(autoscaling_client.set_desired_capacity(
AutoScalingGroupName='terraform-eks-eyra-prod01-gpu',
DesiredCapacity=scale_to
))
@shared_task
def sleep_one_sec():
# used for testing basic tasks
time.sleep(1)
return 42
| [
"json.loads",
"celery.bin.control.inspect",
"boto3.client",
"functools.reduce",
"time.sleep",
"datetime.datetime.now",
"comic.eyra.models.Job.objects.create",
"comic.eyra.models.Job.objects.get",
"comic.eyra.models.DataFile.objects.create",
"comic.eyra.models.JobInput.objects.create",
"comic.eyr... | [((504, 530), 'comic.eyra.models.Job.objects.get', 'Job.objects.get', ([], {'pk': 'job_pk'}), '(pk=job_pk)\n', (519, 530), False, 'from comic.eyra.models import Job, Submission, DataFile, JobInput\n'), ((707, 721), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (719, 721), False, 'from datetime import datetime\n'), ((1694, 1746), 'comic.eyra.models.DataFile.objects.create', 'DataFile.objects.create', ([], {'name': '"""algorithm job output"""'}), "(name='algorithm job output')\n", (1717, 1746), False, 'from comic.eyra.models import Job, Submission, DataFile, JobInput\n'), ((1873, 1962), 'comic.eyra.models.Job.objects.create', 'Job.objects.create', ([], {'output': 'job_output', 'submission': 'submission', 'image': 'submission.image'}), '(output=job_output, submission=submission, image=\n submission.image)\n', (1891, 1962), False, 'from comic.eyra.models import Job, Submission, DataFile, JobInput\n'), ((2212, 2314), 'comic.eyra.models.JobInput.objects.create', 'JobInput.objects.create', ([], {'job': 'submission.algorithm_job', 'name': '"""test_data"""', 'data_file': 'input_data_file'}), "(job=submission.algorithm_job, name='test_data',\n data_file=input_data_file)\n", (2235, 2314), False, 'from comic.eyra.models import Job, Submission, DataFile, JobInput\n'), ((2523, 2576), 'comic.eyra.models.DataFile.objects.create', 'DataFile.objects.create', ([], {'name': '"""evaluation job output"""'}), "(name='evaluation job output')\n", (2546, 2576), False, 'from comic.eyra.models import Job, Submission, DataFile, JobInput\n'), ((2704, 2814), 'comic.eyra.models.Job.objects.create', 'Job.objects.create', ([], {'output': 'job_output', 'submission': 'submission', 'image': 'submission.benchmark.evaluation_image'}), '(output=job_output, submission=submission, image=\n submission.benchmark.evaluation_image)\n', (2722, 2814), False, 'from comic.eyra.models import Job, Submission, DataFile, JobInput\n'), ((2896, 3023), 'comic.eyra.models.JobInput.objects.create', 'JobInput.objects.create', ([], {'job': 'submission.evaluation_job', 'name': '"""algorithm_output"""', 'data_file': 'submission.algorithm_job.output'}), "(job=submission.evaluation_job, name=\n 'algorithm_output', data_file=submission.algorithm_job.output)\n", (2919, 3023), False, 'from comic.eyra.models import Job, Submission, DataFile, JobInput\n'), ((3294, 3407), 'comic.eyra.models.JobInput.objects.create', 'JobInput.objects.create', ([], {'job': 'submission.evaluation_job', 'name': '"""ground_truth"""', 'data_file': 'ground_truth_data_file'}), "(job=submission.evaluation_job, name='ground_truth',\n data_file=ground_truth_data_file)\n", (3317, 3407), False, 'from comic.eyra.models import Job, Submission, DataFile, JobInput\n'), ((3515, 3555), 'comic.eyra.models.Submission.objects.get', 'Submission.objects.get', ([], {'pk': 'submission_pk'}), '(pk=submission_pk)\n', (3537, 3555), False, 'from comic.eyra.models import Job, Submission, DataFile, JobInput\n'), ((4764, 4968), 'boto3.client', 'boto3.client', (['"""autoscaling"""'], {'region_name': 'settings.AWS_AUTOSCALING_REGION', 'aws_access_key_id': 'settings.AWS_AUTOSCALING_ACCESS_KEY_ID', 'aws_secret_access_key': 'settings.AWS_AUTOSCALING_SECRET_ACCESS_KEY'}), "('autoscaling', region_name=settings.AWS_AUTOSCALING_REGION,\n aws_access_key_id=settings.AWS_AUTOSCALING_ACCESS_KEY_ID,\n aws_secret_access_key=settings.AWS_AUTOSCALING_SECRET_ACCESS_KEY)\n", (4776, 4968), False, 'import boto3\n'), ((5009, 5018), 'celery.bin.control.inspect', 'inspect', ([], {}), '()\n', (5016, 5018), False, 'from celery.bin.control import inspect\n'), ((5345, 5387), 'functools.reduce', 'reduce', (['(lambda x, y: x + y)', 'tasks_per_node'], {}), '(lambda x, y: x + y, tasks_per_node)\n', (5351, 5387), False, 'from functools import reduce\n'), ((5802, 5815), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (5812, 5815), False, 'import time\n'), ((1408, 1422), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1420, 1422), False, 'from datetime import datetime\n'), ((777, 788), 'comic.container_exec.backends.k8s.K8sJob', 'K8sJob', (['job'], {}), '(job)\n', (783, 788), False, 'from comic.container_exec.backends.k8s import K8sJob\n'), ((4544, 4567), 'json.loads', 'json.loads', (['eval_output'], {}), '(eval_output)\n', (4554, 4567), False, 'import json\n'), ((1106, 1119), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (1116, 1119), False, 'import time\n')] |
import telnetlib
def print_logo(logo=''):
LOGO_DAFAULT = """\033[93m
/\ /\\
/ \\'._ (\_/) _.'/ \\
/_.''._'--('.')--'_.''._\\
| \_ / `;=/ " \=;` \ _/ |
\/ `\__|`\___/`|__/` \/
` \(/|\)/ `
" ` "
DAW_Start_By_VLDZ
\033[0m
"""
if logo != '':
print(logo)
else:
print(LOGO_DAFAULT)
print_logo()
port = int(input('\n PORT:'))
ip_1 = str(input(' Host_1 IP: '))
node_1 = telnetlib.Telnet(ip_1, port)
ip_2 = str(input(' Host_2 IP: '))
node_2 = telnetlib.Telnet(ip_2, port)
while True:
symbol = str(input('==> '))
if symbol == 's':
node_1.write(b's\r\n')
node_2.write(b's\r\n')
elif symbol == 'n':
node_1.write(b'n\r\n')
node_2.write(b'n\r\n')
elif symbol == 'b':
node_1.write(b'b\r\n')
node_2.write(b'b\r\n')
else:
node_1.write(bytes(str.encode(symbol)))
node_2.write(bytes(str.encode(symbol))) | [
"telnetlib.Telnet"
] | [((455, 483), 'telnetlib.Telnet', 'telnetlib.Telnet', (['ip_1', 'port'], {}), '(ip_1, port)\n', (471, 483), False, 'import telnetlib\n'), ((527, 555), 'telnetlib.Telnet', 'telnetlib.Telnet', (['ip_2', 'port'], {}), '(ip_2, port)\n', (543, 555), False, 'import telnetlib\n')] |
#!/usr/bin/python3
import requests
import sys
if __name__ == '__main__':
if len(sys.argv) != 3:
print("Error: you should pass 2 arguments: [link_to_download_from] [path_to_save_downloaded_file]")
exit(1)
url = sys.argv[1]
r = requests.get(url, allow_redirects=True)
open(sys.argv[2], 'wb').write(r.content)
| [
"requests.get"
] | [((256, 295), 'requests.get', 'requests.get', (['url'], {'allow_redirects': '(True)'}), '(url, allow_redirects=True)\n', (268, 295), False, 'import requests\n')] |
# Generated by Django 3.1.14 on 2021-12-14 08:25
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0066_collection_management_permissions'),
('profiles', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='profilesettings',
name='terms_and_conditions',
field=models.ForeignKey(blank=True, help_text='Choose a Terms and Conditions page', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailcore.page'),
),
]
| [
"django.db.models.ForeignKey"
] | [((450, 635), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'help_text': '"""Choose a Terms and Conditions page"""', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""+"""', 'to': '"""wagtailcore.page"""'}), "(blank=True, help_text=\n 'Choose a Terms and Conditions page', null=True, on_delete=django.db.\n models.deletion.SET_NULL, related_name='+', to='wagtailcore.page')\n", (467, 635), False, 'from django.db import migrations, models\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import struct
import socket
from netlink import *
NETLINK_ROUTE = 0
NETLINK_UNUSED = 1
NETLINK_USERSOCK = 2
NETLINK_FIREWALL = 3
NETLINK_SOCK_DIAG = 4
NETLINK_NFLOG = 5
NETLINK_XFRM = 6
NETLINK_SELINUX = 7
NETLINK_ISCSI = 8
NETLINK_AUDIT = 9
NETLINK_FIB_LOOKUP = 10
NETLINK_CONNECTOR = 11
NETLINK_NETFILTER = 12
NETLINK_IP6_FW = 13
NETLINK_DNRTMSG = 14
NETLINK_KOBJECT_UEVENT = 15
NETLINK_GENERIC = 16
NETLINK_SCSITRANSPORT = 18
NETLINK_ECRYPTFS = 19
NETLINK_RDMA = 20
NETLINK_CRYPTO = 21
NETLINK_INET_DIAG = NETLINK_SOCK_DIAG
class Connection(object):
'''
Base class that establishes a netlink connection with the kernel.
'''
def __init__(self, family):
self.family = family
self.conn = socket.socket(socket.AF_NETLINK, socket.SOCK_RAW, family)
self.conn.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 65536)
self.conn.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 65536)
self.conn.bind((0, 0))
def send(self, msg):
self.conn.send(msg)
def recv(self):
return self.conn.recv(65536)
# Genetlink Controller command and attribute values
CTRL_CMD_UNSPEC = 0
CTRL_CMD_NEWFAMILY = 1
CTRL_CMD_DELFAMILY = 2
CTRL_CMD_GETFAMILY = 3
CTRL_CMD_NEWOPS = 4
CTRL_CMD_DELOPS = 5
CTRL_CMD_GETOPS = 6
CTRL_CMD_NEWMCAST_GRP = 7
CTRL_CMD_DELCAST_GRP = 8
CTRL_CMD_GETMCAST_GRP = 9
__CTRL_CMD_MAX = 10
TASKSTATS_GENL_VERSION = 0x1
GENL_HDRLEN = struct.calcsize('BBxx')
class Genlmsg(object):
'''
Generic netlink message container, this class is to encapsulate the fields
of struct genlmsghdr.
struct genlmsghdr {
__u8 cmd;
__u8 version;
__u16 reserved;
};
the `.pack()` method returns a binary c-formatted string of the generic
netlink header and its associated payload.
@param cmd :: the generic netlink command.
@type cmd :: int
@param nlattr :: Nlattr object containing the attributes for the call.
@type nlattr :: Nlattr Class Object
@param version :: the generic netlink version of the interface (defaults to
taskstats)
@type version :: int
'''
def __init__(self, cmd, nlattr, version=TASKSTATS_GENL_VERSION):
self.cmd = cmd
self.version = version
self.nlattr = nlattr
self.payload = self.nlattr.pack()
self.genlen = GENL_HDRLEN + self.nlattr.nla_len
def pack(self):
genlhdr = struct.pack('BBxx', self.cmd, self.version)
return genlhdr + self.payload
class Controller(Connection):
'''
Controller class that establishes a generic netlink connection with
family of the supplied 'genl_name'.
'''
def __init__(self, genl_name):
super(Controller, self).__init__(NETLINK_GENERIC)
self.genl_name = genl_name
self.genlhdr = Genlmsg(CTRL_CMD_GETFAMILY, Nlattr(CTRL_ATTR_FAMILY_NAME,
self.genl_name))
self.attrs = dict()
self.pid = os.getpid()
self.fam_id = self.get_family_id
@property
def get_family_id(self):
nlmsg = Nlmsg(GENL_ID_CTRL, self.pid, self.genlhdr).pack()
self.send(nlmsg)
family_id_reply = self.recv()
parse_response(self, family_id_reply)
return struct.unpack('I', self.attrs[CTRL_ATTR_FAMILY_ID])[0]
| [
"struct.calcsize",
"struct.unpack",
"struct.pack",
"socket.socket"
] | [((1686, 1709), 'struct.calcsize', 'struct.calcsize', (['"""BBxx"""'], {}), "('BBxx')\n", (1701, 1709), False, 'import struct\n'), ((932, 989), 'socket.socket', 'socket.socket', (['socket.AF_NETLINK', 'socket.SOCK_RAW', 'family'], {}), '(socket.AF_NETLINK, socket.SOCK_RAW, family)\n', (945, 989), False, 'import socket\n'), ((2693, 2736), 'struct.pack', 'struct.pack', (['"""BBxx"""', 'self.cmd', 'self.version'], {}), "('BBxx', self.cmd, self.version)\n", (2704, 2736), False, 'import struct\n'), ((3560, 3611), 'struct.unpack', 'struct.unpack', (['"""I"""', 'self.attrs[CTRL_ATTR_FAMILY_ID]'], {}), "('I', self.attrs[CTRL_ATTR_FAMILY_ID])\n", (3573, 3611), False, 'import struct\n')] |
# Version: 2022.03.23
"""
**********************************************************************************************************************
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved *
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated *
* documentation files (the "Software"), to deal in the Software without restriction, including without limitation *
* the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and *
* to permit persons to whom the Software is furnished to do so. *
* *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO *
* THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE *
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF *
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS *
* IN THE SOFTWARE. *
**********************************************************************************************************************
"""
import json
import boto3
import os
import logging
logger = logging.getLogger()
logger.setLevel(logging.getLevelName(os.getenv('lambda_logging_level', 'INFO')))
def lambda_handler(event, context):
logger.debug(event)
# Establish a loop counter
loop_counter = 0
# Process the incoming S3 event
for recording in event['Records']:
# Increment loop
loop_counter = loop_counter+1
# Grab incoming data elements from the S3 event
try:
recording_key = recording['s3']['object']['key']
recording_name = recording_key.replace('voicemail_recordings/','')
contact_id = recording_name.replace('.wav','')
recording_bucket = recording['s3']['bucket']['name']
except Exception as e:
logger.error(e)
logger.debug('Record {0} Result: Failed to extract data from event'.format(loop_counter))
continue
# Establish the S3 client and get the object tags
try:
s3_client = boto3.client('s3')
object_data = s3_client.get_object_tagging(
Bucket=recording_bucket,
Key=recording_key
)
object_tags = object_data['TagSet']
loaded_tags = {}
for i in object_tags:
loaded_tags.update({i['Key']:i['Value']})
except Exception as e:
logger.error(e)
logger.debug('Record {0} Result: Failed to extract tags from object'.format(loop_counter))
continue
# Build the Recording URL
try:
recording_url = 'https://{0}.s3-{1}.amazonaws.com/{2}'.format(recording_bucket, recording['awsRegion'], recording_key)
except Exception as e:
logger.error(e)
logger.debug('Record {0} Result: Failed to generate recording URL'.format(loop_counter))
continue
# Do the transcription
try:
# Esteablish the client
transcribe_client = boto3.client('transcribe')
# Submit the transcription job
transcribe_response = transcribe_client.start_transcription_job(
TranscriptionJobName=contact_id,
LanguageCode=loaded_tags['vm_lang'],
MediaFormat='wav',
Media={
'MediaFileUri': recording_url
},
OutputBucketName=os.environ['s3_transcripts_bucket']
)
except Exception as e:
logger.error(e)
logger.debug('Record {0} Result: Transcription job failed'.format(loop_counter))
continue
logger.debug('Record {0} Result: Success!'.format(loop_counter))
return {
'status': 'complete',
'result': '{0} records processed'.format(loop_counter)
}
| [
"logging.getLogger",
"boto3.client",
"os.getenv"
] | [((1769, 1788), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1786, 1788), False, 'import logging\n'), ((1826, 1867), 'os.getenv', 'os.getenv', (['"""lambda_logging_level"""', '"""INFO"""'], {}), "('lambda_logging_level', 'INFO')\n", (1835, 1867), False, 'import os\n'), ((2737, 2755), 'boto3.client', 'boto3.client', (['"""s3"""'], {}), "('s3')\n", (2749, 2755), False, 'import boto3\n'), ((3730, 3756), 'boto3.client', 'boto3.client', (['"""transcribe"""'], {}), "('transcribe')\n", (3742, 3756), False, 'import boto3\n')] |
from __future__ import print_function
import numpy as np
import argparse
import glob
import os
import errno
import math
import cv2
from random import shuffle
from shutil import copyfile
parser = argparse.ArgumentParser(
description="create training/test/validation sets from video list"
)
parser.add_argument("--videoListPath", type=str, help="path to videos", required=True)
parser.add_argument(
"--fpsSingle", type=int, help="fps for single frame processing", default=2
)
parser.add_argument(
"--numRecurrent", type=int, help="how many recurent steps", default=3
)
parser.add_argument(
"--fpsRecurrent", type=int, help="fps for reccurent part", default=24
)
parser.add_argument(
"--chapterTiming",
type=str,
help="start and end timing list for all chapters",
default="timingChapters.txt",
)
parser.add_argument("--name", type=str, help="run name", default="training")
parser.add_argument("--blacklist", type=str, help="ignore video", default="-1")
parser.add_argument(
"--whitelist",
type=str,
help="specifies list of selected videos, if not set all videos are selected",
default="-1",
)
args = parser.parse_args()
def silentremove(filename):
try:
os.remove(filename)
except OSError as e:
if e.errno != errno.ENOENT:
raise # re-raise exception if a different error occurred
def processChapter_cutlist(
video,
chap,
origFramerate,
timing,
outputFileSingle,
cutList,
numRecurrent,
fpsRecurrent,
):
videoNameSplit = video.split("/")
videoName = videoNameSplit[-2]
imgPathRel = videoName + "/chapter" + str(chap) + "/"
modFrameFactorSingle = int(round(origFramerate / args.fpsSingle))
stepRecurrent = int(round(origFramerate / fpsRecurrent))
numRecurrent = (
numRecurrent + stepRecurrent * 2
) # extra frames in case of flow estimation
logFilename = video + "log" + str(chap) + ".txt"
with open(logFilename, "r") as fp:
with open(outputFileSingle, "a") as ofp_single:
prevIdx = -1
# iterate over log list
for cnt, line in enumerate(fp):
idx = line.find("pts_time:")
if idx == -1:
continue
pts_time = float(line[idx + 9 : idx + 9 + 7])
idx2 = line.find("n:")
frame_idx = int(line[idx2 + 2 : idx2 + 2 + 5]) + 1
# use floor here to be on the save side
if pts_time <= timing[0] or pts_time > math.floor(timing[1]):
continue
# ignore if at cut position
if pts_time in cutList:
continue
# sequence already processed
if frame_idx < prevIdx:
continue
largerElemCutList = [
x for x in cutList if x > pts_time and x < timing[1]
]
largerElemCutList.append(timing[1])
cutTimeNext = min(largerElemCutList)
smallerElemCutList = [
x for x in cutList if x < pts_time and x > timing[0]
]
smallerElemCutList.append(timing[0])
seqLength = (cutTimeNext - pts_time) * origFramerate
# for long sequences jump to some point later in the same sequence
jump = min(int(seqLength), origFramerate * 4)
prevIdx = frame_idx + int(jump)
# ignore if sequence to short
if seqLength < numRecurrent * stepRecurrent:
continue
imgFilename = {}
existing = True
for ri in range(0, numRecurrent * stepRecurrent):
frame_recurr = int(frame_idx + ri + 1)
frame_str = str(frame_recurr).zfill(8)
if ri % stepRecurrent != 0:
continue
ri_rec = int(ri / stepRecurrent)
imgFilename[ri_rec] = "out" + frame_str
if existing == False:
continue
for ri in range(stepRecurrent * 2, numRecurrent):
if (ri - stepRecurrent * 2) % modFrameFactorSingle == 0:
ofp_single.write(imgPathRel + imgFilename[ri] + "\n")
def processShotFile(video, shotFile):
numFrames = 0
cutList = []
with open(video + shotFile, "r") as fp:
for cnt, line in enumerate(fp):
# get cuts
idx = line.find("pkt_pts_time=")
if idx != -1:
numFrames = numFrames + 1
pts_time = float(line[idx + 13 : idx + 13 + 8])
cutList.append(pts_time)
return cutList
def main():
videoList = glob.glob(args.videoListPath + "*/")
origFramerate = 24
trainingSingleFile = (
args.videoListPath
+ args.name
+ "_"
+ str(args.fpsSingle)
+ "fpsSingle_"
+ str(args.fpsRecurrent)
+ "fps_"
+ str(args.numRecurrent)
+ "frames"
+ "_single.txt"
)
silentremove(trainingSingleFile)
for video in videoList:
print(video)
videoNameSplit = video.split("/")
videoName = videoNameSplit[-2]
if videoName in args.blacklist:
print(videoName + " on blacklist")
continue
if args.whitelist != "-1" and videoName not in args.whitelist:
print(videoName + " not on whitelist")
continue
print("processing " + videoName)
cutList = processShotFile(video, "shots.txt")
print(len(cutList))
timingList = []
with open(video + args.chapterTiming, "r") as fp:
timingListTmp = fp.read().splitlines()
for timingLine in timingListTmp:
timingList.append([float(x) for x in timingLine.split(",")])
chapterList = glob.glob(video + "log*.txt")
numChapters = len(chapterList)
validChapters = range(2, numChapters)
trainingSet = validChapters
for chap in trainingSet:
processChapter_cutlist(
video,
chap,
origFramerate,
timingList[chap - 1],
trainingSingleFile,
cutList,
args.numRecurrent,
args.fpsRecurrent,
)
main()
| [
"math.floor",
"glob.glob",
"argparse.ArgumentParser",
"os.remove"
] | [((196, 292), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""create training/test/validation sets from video list"""'}), "(description=\n 'create training/test/validation sets from video list')\n", (219, 292), False, 'import argparse\n'), ((4822, 4858), 'glob.glob', 'glob.glob', (["(args.videoListPath + '*/')"], {}), "(args.videoListPath + '*/')\n", (4831, 4858), False, 'import glob\n'), ((1215, 1234), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (1224, 1234), False, 'import os\n'), ((5978, 6007), 'glob.glob', 'glob.glob', (["(video + 'log*.txt')"], {}), "(video + 'log*.txt')\n", (5987, 6007), False, 'import glob\n'), ((2537, 2558), 'math.floor', 'math.floor', (['timing[1]'], {}), '(timing[1])\n', (2547, 2558), False, 'import math\n')] |
import json
import importlib
import os
import unittest
from unittest.mock import Mock
from moto import mock_dynamodb2
import boto3
from lambda_local.context import Context
os.environ['ENVIRONMENT'] = 'TEST'
os.environ['LOG_LEVEL'] = 'CRITICAL'
os.environ['DSA_KEY'] = 'TESTKEY'
class TestLapsed(unittest.TestCase):
@mock_dynamodb2
def test_in_both_is_noop(self):
import lambda_lapsed
from actionnetwork_activist_sync.actionnetwork import ActionNetwork
from actionnetwork_activist_sync.state_model import State
State.create_table(billing_mode='PAY_PER_REQUEST')
j_karl = json.dumps({
'Email': '<EMAIL>',
'firstname': 'Karl',
'lastname': 'Marx'
})
self.create_karl_state(State, lambda_lapsed.cur_batch, State.PROCESSED)
self.create_karl_state(State, lambda_lapsed.prev_batch, State.PROCESSED)
mock_an = Mock(ActionNetwork)
lambda_lapsed.get_actionnetwork = lambda a: mock_an
result = lambda_lapsed.lambda_handler({}, Context(5))
self.assertEqual(result['removed'], 0)
self.assertEqual(result['cur_count'], 1)
self.assertEqual(result['prev_count'], 1)
@mock_dynamodb2
def test_not_in_cur_but_in_prev_gets_removed(self):
import lambda_lapsed
from actionnetwork_activist_sync.actionnetwork import ActionNetwork
from actionnetwork_activist_sync.state_model import State
# this lets us make sure the mock gets called
os.environ['DRY_RUN'] = '0'
importlib.reload(lambda_lapsed)
State.create_table(billing_mode='PAY_PER_REQUEST')
self.create_friedrich_state(State, lambda_lapsed.cur_batch, State.PROCESSED)
self.create_karl_state(State, lambda_lapsed.prev_batch, State.PROCESSED)
mock_an = Mock(ActionNetwork)
mock_an.remove_member_by_email = Mock()
lambda_lapsed.get_actionnetwork = lambda a: mock_an
result = lambda_lapsed.lambda_handler({}, Context(5))
mock_an.remove_member_by_email.assert_called_once_with(
'<EMAIL>'
)
self.assertEqual(result['removed'], 1)
self.assertEqual(result['cur_count'], 1)
self.assertEqual(result['prev_count'], 1)
del os.environ['DRY_RUN']
def create_karl_state(self, State, batch, status):
state = State(
batch,
'<EMAIL>',
raw=json.dumps({
'Email': '<EMAIL>',
'firstname': 'Karl',
'lastname': 'Marx'
}),
status=status
)
state.save()
return state
def create_friedrich_state(self, State, batch, status):
state = State(
batch,
'<EMAIL>',
raw=json.dumps({
'Email': '<EMAIL>',
'firstname': 'Friedrich',
'lastname': 'Engels'
}),
status=status
)
state.save()
return state
| [
"unittest.mock.Mock",
"json.dumps",
"actionnetwork_activist_sync.state_model.State.create_table",
"lambda_local.context.Context",
"importlib.reload"
] | [((555, 605), 'actionnetwork_activist_sync.state_model.State.create_table', 'State.create_table', ([], {'billing_mode': '"""PAY_PER_REQUEST"""'}), "(billing_mode='PAY_PER_REQUEST')\n", (573, 605), False, 'from actionnetwork_activist_sync.state_model import State\n'), ((624, 697), 'json.dumps', 'json.dumps', (["{'Email': '<EMAIL>', 'firstname': 'Karl', 'lastname': 'Marx'}"], {}), "({'Email': '<EMAIL>', 'firstname': 'Karl', 'lastname': 'Marx'})\n", (634, 697), False, 'import json\n'), ((937, 956), 'unittest.mock.Mock', 'Mock', (['ActionNetwork'], {}), '(ActionNetwork)\n', (941, 956), False, 'from unittest.mock import Mock\n'), ((1574, 1605), 'importlib.reload', 'importlib.reload', (['lambda_lapsed'], {}), '(lambda_lapsed)\n', (1590, 1605), False, 'import importlib\n'), ((1615, 1665), 'actionnetwork_activist_sync.state_model.State.create_table', 'State.create_table', ([], {'billing_mode': '"""PAY_PER_REQUEST"""'}), "(billing_mode='PAY_PER_REQUEST')\n", (1633, 1665), False, 'from actionnetwork_activist_sync.state_model import State\n'), ((1852, 1871), 'unittest.mock.Mock', 'Mock', (['ActionNetwork'], {}), '(ActionNetwork)\n', (1856, 1871), False, 'from unittest.mock import Mock\n'), ((1913, 1919), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (1917, 1919), False, 'from unittest.mock import Mock\n'), ((1068, 1078), 'lambda_local.context.Context', 'Context', (['(5)'], {}), '(5)\n', (1075, 1078), False, 'from lambda_local.context import Context\n'), ((2031, 2041), 'lambda_local.context.Context', 'Context', (['(5)'], {}), '(5)\n', (2038, 2041), False, 'from lambda_local.context import Context\n'), ((2458, 2531), 'json.dumps', 'json.dumps', (["{'Email': '<EMAIL>', 'firstname': 'Karl', 'lastname': 'Marx'}"], {}), "({'Email': '<EMAIL>', 'firstname': 'Karl', 'lastname': 'Marx'})\n", (2468, 2531), False, 'import json\n'), ((2815, 2900), 'json.dumps', 'json.dumps', (["{'Email': '<EMAIL>', 'firstname': 'Friedrich', 'lastname': 'Engels'}"], {}), "({'Email': '<EMAIL>', 'firstname': 'Friedrich', 'lastname': 'Engels'}\n )\n", (2825, 2900), False, 'import json\n')] |
"""
This script tests the classes and functions from motif_finder.py.
Parameters
----------
None
Returns
-------
Assertion errors if tests fail
"""
import sys
import random
import pickle
import networkx as nx
from github_analysis.big_cloud_scratch import git_graph
from github_analysis.data_layer import getCommitsByProjectIds
from github_analysis.cluster import get_embedding_clusters
from github_analysis.motif_finder import *
clusters = get_embedding_clusters(random_state=0)
projects_cluster = getCommitsByProjectIds(clusters[0])
G = git_graph(projects_cluster)
mf = MotifFinder(G)
# Unit tests
def test_main_output_type():
pass
def test_sample_initial_node_output_type():
"""Check that MotifFinder.sample_initial_node outputs an integer."""
assert type(mf.sample_initial_node()) == int
def test_sample_initial_node_output():
"""Check that MotifFinder.sample_initial_node outputs a node in the given graph."""
assert mf.sample_initial_node() in G
def test_get_random_child_output_type():
"""Check that MotifFinder.get_random_child outputs an integer."""
assert type(mf.get_random_child(355738534)) == int
def test_get_random_child_no_children():
"""Check that MotifFinder.get_random_child outputs None if there are no children."""
assert mf.get_random_child(139371373) is None
def test_get_random_child_output():
"""Check that MotifFinder.get_random_child outputs a child of the node its been given."""
initial_node = mf.sample_initial_node()
child = mf.get_random_child(initial_node)
assert child in G.successors(initial_node)
def test_get_sample_motif_bad_input():
"""Check that MotifFinder.get_sample_motif raises an error when not given an integer for the k param."""
try:
mf.get_sample_motif('5')
except TypeError:
return True
raise TypeError
def test_get_sample_motif_output_type():
"""Check that MotifFinder.get_sample_motif outputs a networkx directed graph."""
assert type(mf.get_sample_motif(5)) == nx.classes.digraph.DiGraph
def test_get_sample_motif_output():
"""Check that MotifFinder.get_sample_motif outputs a networkx directed graph that is a subgraph of G."""
subgraph = mf.get_sample_motif(5)
for node in subgraph:
if node in G:
continue
else:
raise ValueError('Subgraph doesnt contain same nodes as graph')
def test_get_motif_samples_bad_input():
"""Check that MotifFinder.get_motif_samples raises an error when not given an integer for the k and num_samples
param."""
try:
mf.get_motif_samples('5', '5')
except TypeError:
return True
raise TypeError
def test_get_motif_samples_output_type():
"""Check that MotifFinder.get_sample_motif outputs a dictionary."""
assert type(mf.get_motif_samples(5,5)) == dict
def test_get_motifs_by_cluster_output_type():
assert type(get_motifs_by_cluster(clusters)) == dict
# def test_get_motifs | [
"github_analysis.big_cloud_scratch.git_graph",
"github_analysis.data_layer.getCommitsByProjectIds",
"github_analysis.cluster.get_embedding_clusters"
] | [((445, 483), 'github_analysis.cluster.get_embedding_clusters', 'get_embedding_clusters', ([], {'random_state': '(0)'}), '(random_state=0)\n', (467, 483), False, 'from github_analysis.cluster import get_embedding_clusters\n'), ((503, 538), 'github_analysis.data_layer.getCommitsByProjectIds', 'getCommitsByProjectIds', (['clusters[0]'], {}), '(clusters[0])\n', (525, 538), False, 'from github_analysis.data_layer import getCommitsByProjectIds\n'), ((543, 570), 'github_analysis.big_cloud_scratch.git_graph', 'git_graph', (['projects_cluster'], {}), '(projects_cluster)\n', (552, 570), False, 'from github_analysis.big_cloud_scratch import git_graph\n')] |
import sys
import psutil
from pyinjector import inject
if len(sys.argv) != 3:
print("Usage: python inject.py <process-name> <shared-library>")
exit()
_, process_name, shared_library = sys.argv
for process in psutil.process_iter():
if process.name() == process_name:
print(f"Found {process_name} - injecting {shared_library} into PID {process.pid}")
inject(process.pid, shared_library)
print("Injected successfully")
exit()
print(f"Unable to find process named {process_name}") | [
"pyinjector.inject",
"psutil.process_iter"
] | [((219, 240), 'psutil.process_iter', 'psutil.process_iter', ([], {}), '()\n', (238, 240), False, 'import psutil\n'), ((380, 415), 'pyinjector.inject', 'inject', (['process.pid', 'shared_library'], {}), '(process.pid, shared_library)\n', (386, 415), False, 'from pyinjector import inject\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 1 15:10:45 2019
@author: r17935avinash
"""
################################ IMPORT LIBRARIES ###############################################################
import torch
import numpy as np
import pykp.io
import torch.nn as nn
from utils.statistics import RewardStatistics
from utils.time_log import time_since
import time
from sequence_generator import SequenceGenerator
from utils.report import export_train_and_valid_loss, export_train_and_valid_reward
import sys
import logging
import os
from evaluate import evaluate_reward
from pykp.reward import *
import math
EPS = 1e-8
import argparse
import config
import logging
import os
import json
from pykp.io import KeyphraseDataset
from pykp.model import Seq2SeqModel
from torch.optim import Adam
import pykp
from pykp.model import Seq2SeqModel
import train_ml
import train_rl
from utils.time_log import time_since
from utils.data_loader import load_data_and_vocab
from utils.string_helper import convert_list_to_kphs
import time
import numpy as np
import random
from torch import device
from hierarchal_attention_Discriminator import Discriminator
from torch.nn import functional as F
#####################################################################################################
#def Check_Valid_Loss(valid_data_loader,D_model,batch,generator,opt,perturb_std):
##### TUNE HYPERPARAMETERS ##############
## batch_reward_stat, log_selected_token_dist = train_one_batch(batch, generator, optimizer_rl, opt, perturb_std)
#########################################################
def train_one_batch(D_model,one2many_batch, generator, opt,perturb_std):
src, src_lens, src_mask, src_oov, oov_lists, src_str_list, trg_str_2dlist, trg, trg_oov, trg_lens, trg_mask, _, title, title_oov, title_lens, title_mask = one2many_batch
one2many = opt.one2many
one2many_mode = opt.one2many_mode
if one2many and one2many_mode > 1:
num_predictions = opt.num_predictions
else:
num_predictions = 1
if torch.cuda.is_available():
src = src.to(opt.device)
src_mask = src_mask.to(opt.device)
src_oov = src_oov.to(opt.device)
if opt.title_guided:
title = title.to(opt.device)
title_mask = title_mask.to(opt.device)
eos_idx = opt.word2idx[pykp.io.EOS_WORD]
delimiter_word = opt.delimiter_word
batch_size = src.size(0)
topk = opt.topk
reward_type = opt.reward_type
reward_shaping = opt.reward_shaping
baseline = opt.baseline
match_type = opt.match_type
regularization_type = opt.regularization_type ## DNT
regularization_factor = opt.regularization_factor ##DNT
devices = opt.device
if regularization_type == 2:
entropy_regularize = True
else:
entropy_regularize = False
start_time = time.time()
sample_list, log_selected_token_dist, output_mask, pred_eos_idx_mask, entropy, location_of_eos_for_each_batch, location_of_peos_for_each_batch = generator.sample(
src, src_lens, src_oov, src_mask, oov_lists, opt.max_length, greedy=False, one2many=one2many,
one2many_mode=one2many_mode, num_predictions=num_predictions, perturb_std=perturb_std, entropy_regularize=entropy_regularize, title=title, title_lens=title_lens, title_mask=title_mask)
pred_str_2dlist = sample_list_to_str_2dlist(sample_list, oov_lists, opt.idx2word, opt.vocab_size, eos_idx, delimiter_word, opt.word2idx[pykp.io.UNK_WORD], opt.replace_unk,
src_str_list, opt.separate_present_absent, pykp.io.PEOS_WORD)
target_str_2dlist = convert_list_to_kphs(trg)
"""
src = [batch_size,abstract_seq_len]
target_str_2dlist = list of list of true keyphrases
pred_str_2dlist = list of list of false keyphrases
"""
total_abstract_loss = 0
batch_mine = 0
abstract_t = torch.Tensor([]).to(devices)
abstract_f = torch.Tensor([]).to(devices)
kph_t = torch.Tensor([]).to(devices)
kph_f = torch.Tensor([]).to(devices)
h_kph_t_size = 0
h_kph_f_size = 0
len_list_t,len_list_f = [],[]
for idx, (src_list, pred_str_list,target_str_list) in enumerate(zip(src, pred_str_2dlist,target_str_2dlist)):
batch_mine+=1
if (len(target_str_list)==0 or len(pred_str_list)==0):
continue
h_abstract_t,h_kph_t = D_model.get_hidden_states(src_list,target_str_list)
h_abstract_f,h_kph_f = D_model.get_hidden_states(src_list,pred_str_list)
len_list_t.append(h_kph_t.size(1))
len_list_f.append(h_kph_f.size(1))
h_kph_t_size = max(h_kph_t_size,h_kph_t.size(1))
h_kph_f_size = max(h_kph_f_size,h_kph_f.size(1))
for idx, (src_list, pred_str_list,target_str_list) in enumerate(zip(src, pred_str_2dlist,target_str_2dlist)):
batch_mine+=1
if (len(target_str_list)==0 or len(pred_str_list)==0):
continue
h_abstract_t,h_kph_t = D_model.get_hidden_states(src_list,target_str_list)
h_abstract_f,h_kph_f = D_model.get_hidden_states(src_list,pred_str_list)
p1d = (0,0,0,h_kph_t_size - h_kph_t.size(1))
p2d = (0,0,0,h_kph_f_size - h_kph_f.size(1))
h_kph_t = F.pad(h_kph_t,p1d)
h_kph_f = F.pad(h_kph_f,p2d)
abstract_t = torch.cat((abstract_t,h_abstract_t),dim=0)
abstract_f = torch.cat((abstract_f,h_abstract_f),dim=0)
kph_t = torch.cat((kph_t,h_kph_t),dim=0)
kph_f = torch.cat((kph_f,h_kph_f),dim=0)
_,real_rewards,abstract_loss_real = D_model.calculate_context(abstract_t,kph_t,1,len_list_t)
_,fake_rewards,abstract_loss_fake = D_model.calculate_context(abstract_f,kph_f,0,len_list_f)
avg_batch_loss = ( abstract_loss_real + abstract_loss_fake )
avg_real = real_rewards
avg_fake = fake_rewards
return avg_batch_loss,avg_real,avg_fake
def main(opt):
clip = 5
start_time = time.time()
train_data_loader, valid_data_loader, word2idx, idx2word, vocab = load_data_and_vocab(opt, load_train=True)
load_data_time = time_since(start_time)
logging.info('Time for loading the data: %.1f' % load_data_time)
print("Data Successfully Loaded __.__.__.__.__.__.__.__.__.__.__.__.__.__.")
model = Seq2SeqModel(opt)
## if torch.cuda.is_available():
if torch.cuda.is_available():
model.load_state_dict(torch.load(opt.model_path))
model = model.to(opt.gpuid)
else:
model.load_state_dict(torch.load(opt.model_path,map_location="cpu"))
print("___________________ Generator Initialised and Loaded _________________________")
generator = SequenceGenerator(model,
bos_idx=opt.word2idx[pykp.io.BOS_WORD],
eos_idx=opt.word2idx[pykp.io.EOS_WORD],
pad_idx=opt.word2idx[pykp.io.PAD_WORD],
peos_idx=opt.word2idx[pykp.io.PEOS_WORD],
beam_size=1,
max_sequence_length=opt.max_length,
copy_attn=opt.copy_attention,
coverage_attn=opt.coverage_attn,
review_attn=opt.review_attn,
cuda=opt.gpuid > -1
)
init_perturb_std = opt.init_perturb_std
final_perturb_std = opt.final_perturb_std
perturb_decay_factor = opt.perturb_decay_factor
perturb_decay_mode = opt.perturb_decay_mode
hidden_dim = opt.D_hidden_dim
embedding_dim = opt.D_embedding_dim
n_layers = opt.D_layers
D_model = Discriminator(opt.vocab_size,embedding_dim,hidden_dim,n_layers,opt.word2idx[pykp.io.PAD_WORD])
print("The Discriminator Description is ",D_model)
if opt.pretrained_Discriminator :
if torch.cuda.is_available() :
D_model.load_state_dict(torch.load(opt.Discriminator_model_path))
D_model = D_model.to(opt.gpuid)
else:
D_model.load_state_dict(torch.load(opt.Discriminator_model_path,map_location="cpu"))
else :
if torch.cuda.is_available() :
D_model = D_model.to(opt.gpuid)
else:
D_model.load_state_dict(torch.load(opt.Discriminator_model_path,map_location="cpu"))
D_optimizer = torch.optim.Adam(D_model.parameters(),opt.learning_rate)
print("Beginning with training Discriminator")
print("########################################################################################################")
total_epochs = 5
for epoch in range(total_epochs):
total_batch = 0
print("Starting with epoch:",epoch)
for batch_i, batch in enumerate(train_data_loader):
best_valid_loss = 1000
D_model.train()
D_optimizer.zero_grad()
if perturb_decay_mode == 0: # do not decay
perturb_std = init_perturb_std
elif perturb_decay_mode == 1: # exponential decay
perturb_std = final_perturb_std + (init_perturb_std - final_perturb_std) * math.exp(-1. * total_batch * perturb_decay_factor)
elif perturb_decay_mode == 2: # steps decay
perturb_std = init_perturb_std * math.pow(perturb_decay_factor, math.floor((1+total_batch)/4000))
avg_batch_loss , _ , _ = train_one_batch( D_model , batch , generator , opt , perturb_std )
torch.nn.utils.clip_grad_norm_( D_model.parameters() , clip)
avg_batch_loss.backward()
D_optimizer.step()
D_model.eval()
if batch_i % 4000 == 0:
total = 0
valid_loss_total , valid_real_total , valid_fake_total = 0 , 0 , 0
for batch_j , valid_batch in enumerate(valid_data_loader):
total += 1
valid_loss , valid_real , valid_fake = train_one_batch( D_model , valid_batch , generator , opt ,perturb_std )
valid_loss_total += valid_loss.cpu().detach().numpy()
valid_real_total += valid_real.cpu().detach().numpy()
valid_fake_total += valid_fake.cpu().detach().numpy()
D_optimizer.zero_grad()
print("Currently loss is " , valid_loss_total.item() / total )
print("Currently real loss is " , valid_real_total.item() / total )
print("Currently fake loss is " , valid_fake_total.item() / total )
if best_valid_loss > valid_loss_total.item() / total :
print("Loss Decreases so saving the file ...............----------->>>>>")
state_dfs = D_model.state_dict()
torch.save(state_dfs,"Discriminator_checkpts/Attention_Disriminator_" + str(epoch) + ".pth.tar")
best_valid_loss = valid_loss_total.item() / total
######################################
| [
"utils.string_helper.convert_list_to_kphs",
"sequence_generator.SequenceGenerator",
"hierarchal_attention_Discriminator.Discriminator",
"math.floor",
"torch.load",
"torch.Tensor",
"logging.info",
"torch.cuda.is_available",
"pykp.model.Seq2SeqModel",
"utils.time_log.time_since",
"torch.nn.functio... | [((2551, 2576), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2574, 2576), False, 'import torch\n'), ((3352, 3363), 'time.time', 'time.time', ([], {}), '()\n', (3361, 3363), False, 'import time\n'), ((4134, 4159), 'utils.string_helper.convert_list_to_kphs', 'convert_list_to_kphs', (['trg'], {}), '(trg)\n', (4154, 4159), False, 'from utils.string_helper import convert_list_to_kphs\n'), ((6427, 6438), 'time.time', 'time.time', ([], {}), '()\n', (6436, 6438), False, 'import time\n'), ((6509, 6550), 'utils.data_loader.load_data_and_vocab', 'load_data_and_vocab', (['opt'], {'load_train': '(True)'}), '(opt, load_train=True)\n', (6528, 6550), False, 'from utils.data_loader import load_data_and_vocab\n'), ((6572, 6594), 'utils.time_log.time_since', 'time_since', (['start_time'], {}), '(start_time)\n', (6582, 6594), False, 'from utils.time_log import time_since\n'), ((6599, 6663), 'logging.info', 'logging.info', (["('Time for loading the data: %.1f' % load_data_time)"], {}), "('Time for loading the data: %.1f' % load_data_time)\n", (6611, 6663), False, 'import logging\n'), ((6762, 6779), 'pykp.model.Seq2SeqModel', 'Seq2SeqModel', (['opt'], {}), '(opt)\n', (6774, 6779), False, 'from pykp.model import Seq2SeqModel\n'), ((6828, 6853), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6851, 6853), False, 'import torch\n'), ((7149, 7519), 'sequence_generator.SequenceGenerator', 'SequenceGenerator', (['model'], {'bos_idx': 'opt.word2idx[pykp.io.BOS_WORD]', 'eos_idx': 'opt.word2idx[pykp.io.EOS_WORD]', 'pad_idx': 'opt.word2idx[pykp.io.PAD_WORD]', 'peos_idx': 'opt.word2idx[pykp.io.PEOS_WORD]', 'beam_size': '(1)', 'max_sequence_length': 'opt.max_length', 'copy_attn': 'opt.copy_attention', 'coverage_attn': 'opt.coverage_attn', 'review_attn': 'opt.review_attn', 'cuda': '(opt.gpuid > -1)'}), '(model, bos_idx=opt.word2idx[pykp.io.BOS_WORD], eos_idx=\n opt.word2idx[pykp.io.EOS_WORD], pad_idx=opt.word2idx[pykp.io.PAD_WORD],\n peos_idx=opt.word2idx[pykp.io.PEOS_WORD], beam_size=1,\n max_sequence_length=opt.max_length, copy_attn=opt.copy_attention,\n coverage_attn=opt.coverage_attn, review_attn=opt.review_attn, cuda=opt.\n gpuid > -1)\n', (7166, 7519), False, 'from sequence_generator import SequenceGenerator\n'), ((8185, 8288), 'hierarchal_attention_Discriminator.Discriminator', 'Discriminator', (['opt.vocab_size', 'embedding_dim', 'hidden_dim', 'n_layers', 'opt.word2idx[pykp.io.PAD_WORD]'], {}), '(opt.vocab_size, embedding_dim, hidden_dim, n_layers, opt.\n word2idx[pykp.io.PAD_WORD])\n', (8198, 8288), False, 'from hierarchal_attention_Discriminator import Discriminator\n'), ((5736, 5755), 'torch.nn.functional.pad', 'F.pad', (['h_kph_t', 'p1d'], {}), '(h_kph_t, p1d)\n', (5741, 5755), True, 'from torch.nn import functional as F\n'), ((5773, 5792), 'torch.nn.functional.pad', 'F.pad', (['h_kph_f', 'p2d'], {}), '(h_kph_f, p2d)\n', (5778, 5792), True, 'from torch.nn import functional as F\n'), ((5813, 5857), 'torch.cat', 'torch.cat', (['(abstract_t, h_abstract_t)'], {'dim': '(0)'}), '((abstract_t, h_abstract_t), dim=0)\n', (5822, 5857), False, 'import torch\n'), ((5877, 5921), 'torch.cat', 'torch.cat', (['(abstract_f, h_abstract_f)'], {'dim': '(0)'}), '((abstract_f, h_abstract_f), dim=0)\n', (5886, 5921), False, 'import torch\n'), ((5936, 5970), 'torch.cat', 'torch.cat', (['(kph_t, h_kph_t)'], {'dim': '(0)'}), '((kph_t, h_kph_t), dim=0)\n', (5945, 5970), False, 'import torch\n'), ((5985, 6019), 'torch.cat', 'torch.cat', (['(kph_f, h_kph_f)'], {'dim': '(0)'}), '((kph_f, h_kph_f), dim=0)\n', (5994, 6019), False, 'import torch\n'), ((8389, 8414), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (8412, 8414), False, 'import torch\n'), ((8672, 8697), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (8695, 8697), False, 'import torch\n'), ((4399, 4415), 'torch.Tensor', 'torch.Tensor', (['[]'], {}), '([])\n', (4411, 4415), False, 'import torch\n'), ((4445, 4461), 'torch.Tensor', 'torch.Tensor', (['[]'], {}), '([])\n', (4457, 4461), False, 'import torch\n'), ((4486, 4502), 'torch.Tensor', 'torch.Tensor', (['[]'], {}), '([])\n', (4498, 4502), False, 'import torch\n'), ((4527, 4543), 'torch.Tensor', 'torch.Tensor', (['[]'], {}), '([])\n', (4539, 4543), False, 'import torch\n'), ((6885, 6911), 'torch.load', 'torch.load', (['opt.model_path'], {}), '(opt.model_path)\n', (6895, 6911), False, 'import torch\n'), ((6989, 7035), 'torch.load', 'torch.load', (['opt.model_path'], {'map_location': '"""cpu"""'}), "(opt.model_path, map_location='cpu')\n", (6999, 7035), False, 'import torch\n'), ((8453, 8493), 'torch.load', 'torch.load', (['opt.Discriminator_model_path'], {}), '(opt.Discriminator_model_path)\n', (8463, 8493), False, 'import torch\n'), ((8589, 8649), 'torch.load', 'torch.load', (['opt.Discriminator_model_path'], {'map_location': '"""cpu"""'}), "(opt.Discriminator_model_path, map_location='cpu')\n", (8599, 8649), False, 'import torch\n'), ((8794, 8854), 'torch.load', 'torch.load', (['opt.Discriminator_model_path'], {'map_location': '"""cpu"""'}), "(opt.Discriminator_model_path, map_location='cpu')\n", (8804, 8854), False, 'import torch\n'), ((9663, 9714), 'math.exp', 'math.exp', (['(-1.0 * total_batch * perturb_decay_factor)'], {}), '(-1.0 * total_batch * perturb_decay_factor)\n', (9671, 9714), False, 'import math\n'), ((9851, 9887), 'math.floor', 'math.floor', (['((1 + total_batch) / 4000)'], {}), '((1 + total_batch) / 4000)\n', (9861, 9887), False, 'import math\n')] |
import os
import sys
import json
import argparse
import numpy as np
sys.path.append('Camera_Intrinsics_API/')
from get_camera_intrinsics import CameraIntrinsicsHelper
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_dir",
type=str,
default='data/videos_sfm/',
help="COLMAP output folder of videos",
)
parser.add_argument(
"--input_dir_greedy",
type=str,
default='data/videos_sfm_greedy/',
help="Folder for the COLMAP outputs - greedy.",
)
parser.add_argument(
"--annotation_dir",
type=str,
default='data/v1/annotations/',
help="annotation folder. Must contain the vq3d_<split>.json files.",
)
parser.add_argument(
"--output_filename",
type=str,
default='data/v1/scan_to_intrinsics.json',
)
args = parser.parse_args()
dataset = {}
for split in ['train', 'val']:
a = json.load(open(os.path.join(args.annotation_dir,
f'vq3d_{split}.json'), 'r'))
for video in a['videos']:
video_uid=video['video_uid']
scan_uid=video['scan_uid']
dataset[video_uid]=scan_uid
helper = CameraIntrinsicsHelper()
datadir=args.input_dir
datadir_2=args.input_dir_greedy
cpt=0
all_intrinsics = {}
for video_uid in os.listdir(datadir):
scan_uid=dataset[video_uid]
intrinsic_txt = os.path.join(datadir,
video_uid,
'sparse',
'0',
'cameras.txt')
if not os.path.isfile(intrinsic_txt):
intrinsic_txt = os.path.join(datadir_2,
video_uid,
'sparse',
'0',
'cameras.txt')
if not os.path.isfile(intrinsic_txt):
cpt+=1
else:
intrinsics = helper.parse_colmap_intrinsics(intrinsic_txt)
if scan_uid not in all_intrinsics:
all_intrinsics[scan_uid]={}
token = (intrinsics['width'], intrinsics['height'])
if token not in all_intrinsics[scan_uid]:
all_intrinsics[scan_uid][token] = []
all_intrinsics[scan_uid][token].append(
(
intrinsics['f'],
intrinsics['cx'],
intrinsics['cy'],
intrinsics['k1'],
intrinsics['k2'],
)
)
else:
intrinsics = helper.parse_colmap_intrinsics(intrinsic_txt)
if scan_uid not in all_intrinsics:
all_intrinsics[scan_uid]={}
token = (intrinsics['width'], intrinsics['height'])
if token not in all_intrinsics[scan_uid]:
all_intrinsics[scan_uid][token] = []
all_intrinsics[scan_uid][token].append(
(
intrinsics['f'],
intrinsics['cx'],
intrinsics['cy'],
intrinsics['k1'],
intrinsics['k2'],
)
)
outputs = {}
for scan_uid, d in all_intrinsics.items():
print(' ')
print('Scan uid: ', scan_uid)
outputs[scan_uid]={}
for resolution, v in d.items():
print(' -- resolution: ', resolution)
resolution_str = str(resolution)
outputs[scan_uid][resolution_str]={
'f': np.median([float(i[0]) for i in v]),
'cx': np.median([float(i[1]) for i in v]),
'cy': np.median([float(i[2]) for i in v]),
'k1': np.median([float(i[3]) for i in v]),
'k2': np.median([float(i[4]) for i in v]),
}
for i in v:
print(' -- -- -- : ', i)
print(' ')
print(' -- -- -- : ',
outputs[scan_uid][resolution_str]['f'],
outputs[scan_uid][resolution_str]['cx'],
outputs[scan_uid][resolution_str]['cy'],
outputs[scan_uid][resolution_str]['k1'],
outputs[scan_uid][resolution_str]['k2'],
)
json.dump(outputs, open(output_filename, 'w'))
| [
"os.listdir",
"argparse.ArgumentParser",
"os.path.join",
"os.path.isfile",
"get_camera_intrinsics.CameraIntrinsicsHelper",
"sys.path.append"
] | [((69, 110), 'sys.path.append', 'sys.path.append', (['"""Camera_Intrinsics_API/"""'], {}), "('Camera_Intrinsics_API/')\n", (84, 110), False, 'import sys\n'), ((208, 233), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (231, 233), False, 'import argparse\n'), ((1273, 1297), 'get_camera_intrinsics.CameraIntrinsicsHelper', 'CameraIntrinsicsHelper', ([], {}), '()\n', (1295, 1297), False, 'from get_camera_intrinsics import CameraIntrinsicsHelper\n'), ((1421, 1440), 'os.listdir', 'os.listdir', (['datadir'], {}), '(datadir)\n', (1431, 1440), False, 'import os\n'), ((1502, 1564), 'os.path.join', 'os.path.join', (['datadir', 'video_uid', '"""sparse"""', '"""0"""', '"""cameras.txt"""'], {}), "(datadir, video_uid, 'sparse', '0', 'cameras.txt')\n", (1514, 1564), False, 'import os\n'), ((1730, 1759), 'os.path.isfile', 'os.path.isfile', (['intrinsic_txt'], {}), '(intrinsic_txt)\n', (1744, 1759), False, 'import os\n'), ((1789, 1853), 'os.path.join', 'os.path.join', (['datadir_2', 'video_uid', '"""sparse"""', '"""0"""', '"""cameras.txt"""'], {}), "(datadir_2, video_uid, 'sparse', '0', 'cameras.txt')\n", (1801, 1853), False, 'import os\n'), ((1002, 1057), 'os.path.join', 'os.path.join', (['args.annotation_dir', 'f"""vq3d_{split}.json"""'], {}), "(args.annotation_dir, f'vq3d_{split}.json')\n", (1014, 1057), False, 'import os\n'), ((2039, 2068), 'os.path.isfile', 'os.path.isfile', (['intrinsic_txt'], {}), '(intrinsic_txt)\n', (2053, 2068), False, 'import os\n')] |
import codecs
import os
from setuptools import find_packages, setup
def long_description():
if not (os.path.isfile('README.md') and os.access('README.md', os.R_OK)):
return ''
with codecs.open('README.md', encoding='utf8') as f:
return f.read()
linting_deps = [
'mypy==0.761',
'pycodestyle==2.5.0',
]
setup(
name='nooz',
version='0.1.0',
description='Trending headlines right in your terminal.',
long_description=long_description(),
long_description_content_type='text/markdown',
url='https://github.com/preetmishra/nooz',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: End Users/Desktop',
'Topic :: Internet',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Operating System :: OS Independent',
],
python_requires='>=3.5, <=3.8',
keywords='news',
packages=find_packages(),
zip_safe=True,
entry_points={
'console_scripts': [
'nooz = nooz.run:main',
],
},
extras_require={
'linting': linting_deps,
},
install_requires=[
'mypy_extensions>=0.4',
'requests>=2.23.0',
'urwid==2.1.0',
'urllib3>=1.25.8'
],
) | [
"os.path.isfile",
"codecs.open",
"setuptools.find_packages",
"os.access"
] | [((200, 241), 'codecs.open', 'codecs.open', (['"""README.md"""'], {'encoding': '"""utf8"""'}), "('README.md', encoding='utf8')\n", (211, 241), False, 'import codecs\n'), ((1213, 1228), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (1226, 1228), False, 'from setuptools import find_packages, setup\n'), ((106, 133), 'os.path.isfile', 'os.path.isfile', (['"""README.md"""'], {}), "('README.md')\n", (120, 133), False, 'import os\n'), ((138, 169), 'os.access', 'os.access', (['"""README.md"""', 'os.R_OK'], {}), "('README.md', os.R_OK)\n", (147, 169), False, 'import os\n')] |
from decimal import Decimal
import torch
from torch.nn import Module
import os
from deeplodocus.utils.notification import Notification
from deeplodocus.utils.flags.save import *
from deeplodocus.utils.flags.event import *
from deeplodocus.utils.flags.notif import *
from deeplodocus.utils.flags.ext import DEEP_EXT_PYTORCH, DEEP_EXT_ONNX
from deeplodocus.utils.flags.msg import DEEP_MSG_MODEL_SAVED, DEEP_MSG_SAVER_IMPROVED, DEEP_MSG_SAVER_NOT_IMPROVED
from deeplodocus.core.metrics.over_watch_metric import OverWatchMetric
from deeplodocus.brain.signal import Signal
from deeplodocus.brain.thalamus import Thalamus
from deeplodocus.utils.generic_utils import get_corresponding_flag
from deeplodocus.utils.flags.flag_lists import DEEP_LIST_SAVE_SIGNAL, DEEP_LIST_SAVE_FORMATS
class Saver(object):
"""
AUTHORS:
--------
:author: <NAME>
:author: <NAME>
DESCRIPTION:
------------
Class to handle the saving of the model
"""
def __init__(
self,
name: str = "no_model_name",
save_directory: str = "weights",
save_signal: Flag = DEEP_EVENT_ON_EPOCH_END,
method: Flag = DEEP_SAVE_FORMAT_PYTORCH,
overwrite: bool = False
):
self.name = name
self.directory = save_directory
self.save_signal = get_corresponding_flag(DEEP_LIST_SAVE_SIGNAL, save_signal)
self.method = get_corresponding_flag(DEEP_LIST_SAVE_FORMATS, method) # Can be onnx or pt
self.best_overwatch_metric = None
self.training_loss = None
self.model = None
self.optimizer = None
self.epoch_index = -1
self.batch_index = -1
self.validation_loss = None
self.overwrite = overwrite
self.inp = None
# Set the extension
if DEEP_SAVE_FORMAT_PYTORCH.corresponds(self.method):
self.extension = DEEP_EXT_PYTORCH
elif DEEP_SAVE_FORMAT_ONNX.corresponds(self.method):
self.extension = DEEP_EXT_ONNX
if not os.path.isfile(self.directory):
os.makedirs(self.directory, exist_ok=True)
# Connect the save to the computation of the overwatched metric
Thalamus().connect(
receiver=self.on_overwatch_metric_computed,
event=DEEP_EVENT_OVERWATCH_METRIC_COMPUTED,
expected_arguments=["current_overwatch_metric"]
)
Thalamus().connect(
receiver=self.on_training_end,
event=DEEP_EVENT_ON_TRAINING_END,
expected_arguments=[]
)
Thalamus().connect(
receiver=self.save_model,
event=DEEP_EVENT_SAVE_MODEL,
expected_arguments=[]
)
Thalamus().connect(
receiver=self.set_training_loss,
event=DEEP_EVENT_SEND_TRAINING_LOSS,
expected_arguments=["training_loss"]
)
Thalamus().connect(
receiver=self.set_save_params,
event=DEEP_EVENT_SEND_SAVE_PARAMS_FROM_TRAINER,
expected_arguments=[
"model",
"optimizer",
"epoch_index",
"validation_loss",
"inp"
]
)
"""
ON BATCH END NOT TO BE IMPLEMENTED FOR EFFICIENCY REASONS
def on_batch_end(self, model:Module):
pass
"""
def on_training_end(self) -> None:
"""
AUTHORS:
--------
:author: <NAME>
DESCRIPTION:
------------
Called once the training is finished
PARAMETERS:
-----------
RETURN:
-------
:return: None
"""
if DEEP_SAVE_SIGNAL_END_TRAINING.corresponds(self.save_signal):
self.save_model()
def on_overwatch_metric_computed(self, current_overwatch_metric: OverWatchMetric):
"""
AUTHORS:
--------
:author: <NAME>
:author: <NAME>
DESCRIPTION:
------------
Check if saving the model is required
PARAMETERS:
-----------
:param current_overwatch_metric: float: The value of the metric to over watch
RETURN:
-------
:return -> bool: Whether the model should be saved or not
"""
# Save if there is no metric to compare against
if self.best_overwatch_metric is None:
self.best_overwatch_metric = current_overwatch_metric
save = True
else:
# If the new metric has to be smaller than the best one
if DEEP_SAVE_CONDITION_LESS.corresponds(current_overwatch_metric.get_condition()):
# If the model improved since last batch => Save
if self.best_overwatch_metric.get_value() > current_overwatch_metric.get_value():
Notification(
DEEP_NOTIF_SUCCESS,
DEEP_MSG_SAVER_IMPROVED % (
current_overwatch_metric.name,
"%.4e" % Decimal(
self.best_overwatch_metric.get_value()
- current_overwatch_metric.get_value()
)
)
)
self.best_overwatch_metric = current_overwatch_metric
save = True
# No improvement => Return False
else:
Notification(
DEEP_NOTIF_INFO,
DEEP_MSG_SAVER_NOT_IMPROVED % current_overwatch_metric.name
)
save = False
# If the new metric has to be bigger than the best one (e.g. The accuracy of a classification)
elif DEEP_SAVE_CONDITION_GREATER.corresponds(current_overwatch_metric.get_condition()):
# If the model improved since last batch => Save
if self.best_overwatch_metric.get_value() < current_overwatch_metric.get_value():
Notification(
DEEP_NOTIF_SUCCESS,
DEEP_MSG_SAVER_IMPROVED % (
current_overwatch_metric.name,
"%.4e" % Decimal(
current_overwatch_metric.get_value()
- self.best_overwatch_metric.get_value()
)
)
)
self.best_overwatch_metric = current_overwatch_metric
save = True
# No improvement => Return False
else:
Notification(
DEEP_NOTIF_INFO,
DEEP_MSG_SAVER_NOT_IMPROVED % current_overwatch_metric.name
)
save = False
else:
Notification(DEEP_NOTIF_FATAL, "The following saving condition does not exist : %s"
% current_overwatch_metric.get_condition())
save = False
if save is True:
self.save_model()
def save_model(self) -> None:
"""
AUTHORS:
--------
:author: <NAME>
:author: <NAME>
DESCRIPTION:
------------
Save the model
PARAMETERS:
-----------
RETURN:
-------
:return: None
"""
# Set training_loss
Thalamus().add_signal(
Signal(
event=DEEP_EVENT_REQUEST_TRAINING_LOSS,
args=[]
)
)
# Set model and stuff
Thalamus().add_signal(
Signal(
event=DEEP_EVENT_REQUEST_SAVE_PARAMS_FROM_TRAINER,
args=[]
)
)
file_path = self.__get_file_path()
# If we want to save to the pytorch format
if DEEP_SAVE_FORMAT_PYTORCH.corresponds(self.method):
# TODO: Finish try except statements here after testing...
# try:
torch.save(
{
"model_state_dict": self.model.state_dict(),
"epoch": self.epoch_index,
"training_loss": self.training_loss,
"validation_loss": self.validation_loss,
"optimizer_state_dict": self.optimizer.state_dict()
},
file_path
)
# except:
# Notification(DEEP_NOTIF_ERROR, "Error while saving the pytorch model and weights" )
# self.__handle_error_saving(model)
# If we want to save to the ONNX format
elif DEEP_SAVE_FORMAT_ONNX.corresponds(self.method):
# TODO: and here. Also fix onnx export function
Notification(DEEP_NOTIF_FATAL, "Save as onnx format not implemented yet")
# try:
# torch.onnx._export(model, inp, file_path,
# export_params=True,
# verbose=True,
# input_names=input_names,
# output_names=output_names)
# except:
# Notification(DEEP_NOTIF_ERROR, "Error while saving the ONNX model and weights" )
# self.__handle_error_saving(model)
Notification(DEEP_NOTIF_SUCCESS, DEEP_MSG_MODEL_SAVED % file_path)
def set_training_loss(self, training_loss):
"""
:param training_loss:
:return:
"""
self.training_loss = training_loss
def set_save_params(self, model, optimizer, epoch_index, validation_loss, inp):
"""
:param model:
:param optimizer:
:param epoch_index:
:param validation_loss:
:param inp:
:return:
"""
self.model = model
self.optimizer = optimizer
self.epoch_index = epoch_index
self.validation_loss = validation_loss
self.inp = inp
def __get_file_path(self):
if self.epoch_index is None:
file_path = "%s/%s%s" % (
self.directory,
self.name,
self.extension
)
else:
if self.save_signal.corresponds(DEEP_SAVE_SIGNAL_END_BATCH):
# Set the file path as 'directory/name_epoch_batch.ext'
file_path = "%s/%s_%s_%s%s" % (
self.directory,
self.name,
str(self.epoch_index).zfill(3),
str(self.batch_index).zfill(8),
self.extension
)
# If saving at the end of each epoch
else:
# Set the file path as 'directory/name_epoch.ext'
file_path = "%s/%s_%s%s" % (
self.directory,
self.name,
str(self.epoch_index).zfill(3),
self.extension
)
return file_path
| [
"os.makedirs",
"deeplodocus.brain.signal.Signal",
"os.path.isfile",
"deeplodocus.utils.notification.Notification",
"deeplodocus.utils.generic_utils.get_corresponding_flag",
"deeplodocus.brain.thalamus.Thalamus"
] | [((1373, 1431), 'deeplodocus.utils.generic_utils.get_corresponding_flag', 'get_corresponding_flag', (['DEEP_LIST_SAVE_SIGNAL', 'save_signal'], {}), '(DEEP_LIST_SAVE_SIGNAL, save_signal)\n', (1395, 1431), False, 'from deeplodocus.utils.generic_utils import get_corresponding_flag\n'), ((1455, 1509), 'deeplodocus.utils.generic_utils.get_corresponding_flag', 'get_corresponding_flag', (['DEEP_LIST_SAVE_FORMATS', 'method'], {}), '(DEEP_LIST_SAVE_FORMATS, method)\n', (1477, 1509), False, 'from deeplodocus.utils.generic_utils import get_corresponding_flag\n'), ((9688, 9754), 'deeplodocus.utils.notification.Notification', 'Notification', (['DEEP_NOTIF_SUCCESS', '(DEEP_MSG_MODEL_SAVED % file_path)'], {}), '(DEEP_NOTIF_SUCCESS, DEEP_MSG_MODEL_SAVED % file_path)\n', (9700, 9754), False, 'from deeplodocus.utils.notification import Notification\n'), ((2096, 2126), 'os.path.isfile', 'os.path.isfile', (['self.directory'], {}), '(self.directory)\n', (2110, 2126), False, 'import os\n'), ((2141, 2183), 'os.makedirs', 'os.makedirs', (['self.directory'], {'exist_ok': '(True)'}), '(self.directory, exist_ok=True)\n', (2152, 2183), False, 'import os\n'), ((7773, 7828), 'deeplodocus.brain.signal.Signal', 'Signal', ([], {'event': 'DEEP_EVENT_REQUEST_TRAINING_LOSS', 'args': '[]'}), '(event=DEEP_EVENT_REQUEST_TRAINING_LOSS, args=[])\n', (7779, 7828), False, 'from deeplodocus.brain.signal import Signal\n'), ((7967, 8033), 'deeplodocus.brain.signal.Signal', 'Signal', ([], {'event': 'DEEP_EVENT_REQUEST_SAVE_PARAMS_FROM_TRAINER', 'args': '[]'}), '(event=DEEP_EVENT_REQUEST_SAVE_PARAMS_FROM_TRAINER, args=[])\n', (7973, 8033), False, 'from deeplodocus.brain.signal import Signal\n'), ((2268, 2278), 'deeplodocus.brain.thalamus.Thalamus', 'Thalamus', ([], {}), '()\n', (2276, 2278), False, 'from deeplodocus.brain.thalamus import Thalamus\n'), ((2483, 2493), 'deeplodocus.brain.thalamus.Thalamus', 'Thalamus', ([], {}), '()\n', (2491, 2493), False, 'from deeplodocus.brain.thalamus import Thalamus\n'), ((2649, 2659), 'deeplodocus.brain.thalamus.Thalamus', 'Thalamus', ([], {}), '()\n', (2657, 2659), False, 'from deeplodocus.brain.thalamus import Thalamus\n'), ((2805, 2815), 'deeplodocus.brain.thalamus.Thalamus', 'Thalamus', ([], {}), '()\n', (2813, 2815), False, 'from deeplodocus.brain.thalamus import Thalamus\n'), ((2991, 3001), 'deeplodocus.brain.thalamus.Thalamus', 'Thalamus', ([], {}), '()\n', (2999, 3001), False, 'from deeplodocus.brain.thalamus import Thalamus\n'), ((7737, 7747), 'deeplodocus.brain.thalamus.Thalamus', 'Thalamus', ([], {}), '()\n', (7745, 7747), False, 'from deeplodocus.brain.thalamus import Thalamus\n'), ((7931, 7941), 'deeplodocus.brain.thalamus.Thalamus', 'Thalamus', ([], {}), '()\n', (7939, 7941), False, 'from deeplodocus.brain.thalamus import Thalamus\n'), ((9128, 9201), 'deeplodocus.utils.notification.Notification', 'Notification', (['DEEP_NOTIF_FATAL', '"""Save as onnx format not implemented yet"""'], {}), "(DEEP_NOTIF_FATAL, 'Save as onnx format not implemented yet')\n", (9140, 9201), False, 'from deeplodocus.utils.notification import Notification\n'), ((5647, 5741), 'deeplodocus.utils.notification.Notification', 'Notification', (['DEEP_NOTIF_INFO', '(DEEP_MSG_SAVER_NOT_IMPROVED % current_overwatch_metric.name)'], {}), '(DEEP_NOTIF_INFO, DEEP_MSG_SAVER_NOT_IMPROVED %\n current_overwatch_metric.name)\n', (5659, 5741), False, 'from deeplodocus.utils.notification import Notification\n'), ((6888, 6982), 'deeplodocus.utils.notification.Notification', 'Notification', (['DEEP_NOTIF_INFO', '(DEEP_MSG_SAVER_NOT_IMPROVED % current_overwatch_metric.name)'], {}), '(DEEP_NOTIF_INFO, DEEP_MSG_SAVER_NOT_IMPROVED %\n current_overwatch_metric.name)\n', (6900, 6982), False, 'from deeplodocus.utils.notification import Notification\n')] |
from typing import NamedTuple, cast, List as PyList, Dict, Any, BinaryIO, Optional, TypeVar, Type, Protocol, \
runtime_checkable
from types import GeneratorType
from textwrap import indent
from collections.abc import Sequence as ColSequence
from itertools import chain
import io
from remerkleable.core import View, BasicView, OFFSET_BYTE_LENGTH, ViewHook, ObjType, ObjParseException
from remerkleable.basic import uint256, uint8, uint32
from remerkleable.tree import Node, subtree_fill_to_length, subtree_fill_to_contents,\
zero_node, Gindex, PairNode, to_gindex, NavigationError, get_depth
from remerkleable.subtree import SubtreeView
from remerkleable.readonly_iters import PackedIter, ComplexElemIter, ComplexFreshElemIter, ContainerElemIter
V = TypeVar('V', bound=View)
def decode_offset(stream: BinaryIO) -> uint32:
return cast(uint32, uint32.deserialize(stream, OFFSET_BYTE_LENGTH))
def encode_offset(stream: BinaryIO, offset: int):
return uint32(offset).serialize(stream)
class ComplexView(SubtreeView):
def encode_bytes(self) -> bytes:
stream = io.BytesIO()
self.serialize(stream)
stream.seek(0)
return stream.read()
@classmethod
def decode_bytes(cls: Type[V], bytez: bytes) -> V:
stream = io.BytesIO()
stream.write(bytez)
stream.seek(0)
return cls.deserialize(stream, len(bytez))
M = TypeVar('M', bound="MonoSubtreeView")
class MonoSubtreeView(ColSequence, ComplexView):
def length(self) -> int:
raise NotImplementedError
@classmethod
def coerce_view(cls: Type[M], v: Any) -> M:
return cls(*v)
@classmethod
def element_cls(cls) -> Type[View]:
raise NotImplementedError
@classmethod
def item_elem_cls(cls, i: int) -> Type[View]:
return cls.element_cls()
@classmethod
def to_chunk_length(cls, elems_length: int) -> int:
if cls.is_packed():
elem_type: Type[View] = cls.element_cls()
if issubclass(elem_type, BasicView):
elems_per_chunk = 32 // elem_type.type_byte_length()
return (elems_length + elems_per_chunk - 1) // elems_per_chunk
else:
raise Exception("cannot append a packed element that is not a basic type")
else:
return elems_length
@classmethod
def views_into_chunks(cls, views: PyList[View]) -> PyList[Node]:
if cls.is_packed():
elem_type: Type[View] = cls.element_cls()
if issubclass(elem_type, BasicView):
# cast the list as a whole, checking each element takes too long.
return elem_type.pack_views(cast(PyList[BasicView], views))
else:
raise Exception("cannot append a packed element that is not a basic type")
else:
return [v.get_backing() for v in views]
@classmethod
def is_valid_count(cls, count: int) -> bool:
raise NotImplementedError
def __iter__(self):
return iter(self.get(i) for i in range(self.length()))
def readonly_iter(self):
tree_depth = self.tree_depth()
length = self.length()
backing = self.get_backing()
elem_type: Type[View] = self.element_cls()
if self.is_packed():
return PackedIter(backing, tree_depth, length, cast(Type[BasicView], elem_type))
else:
if issubclass(elem_type, bytes): # is the element type the raw-bytes? Then not re-use views.
return ComplexFreshElemIter(backing, tree_depth, length, cast(Type[View], elem_type))
else:
return ComplexElemIter(backing, tree_depth, length, elem_type)
@classmethod
def deserialize(cls: Type[M], stream: BinaryIO, scope: int) -> M:
elem_cls = cls.element_cls()
if elem_cls.is_fixed_byte_length():
elem_byte_length = elem_cls.type_byte_length()
if scope % elem_byte_length != 0:
raise Exception(f"scope {scope} does not match element byte length {elem_byte_length} multiple")
count = scope // elem_byte_length
if not cls.is_valid_count(count):
raise Exception(f"count {count} is invalid")
return cls(elem_cls.deserialize(stream, elem_byte_length) for _ in range(count)) # type: ignore
else:
if scope == 0:
if not cls.is_valid_count(0):
raise Exception("scope cannot be 0, count must not be 0")
return cls()
first_offset = decode_offset(stream)
if first_offset > scope:
raise Exception(f"first offset is too big: {first_offset}, scope: {scope}")
if first_offset % OFFSET_BYTE_LENGTH != 0:
raise Exception(f"first offset {first_offset} is not a multiple of offset length {OFFSET_BYTE_LENGTH}")
count = first_offset // OFFSET_BYTE_LENGTH
if not cls.is_valid_count(count):
raise Exception(f"count {count} is invalid")
# count - 1: we already have the first offset
offsets = [first_offset] + [decode_offset(stream) for _ in range(count - 1)] + [uint32(scope)]
elem_min, elem_max = elem_cls.min_byte_length(), elem_cls.max_byte_length()
elems = []
for i in range(count):
start, end = offsets[i], offsets[i+1]
if end < start:
raise Exception(f"offsets[{i}] value {start} is invalid, next offset is {end}")
elem_size = end - start
if not (elem_min <= elem_size <= elem_max):
raise Exception(f"offset[{i}] value {start} is invalid, next offset is {end},"
f" implied size is {elem_size}, size bounds: [{elem_min}, {elem_max}]")
elems.append(elem_cls.deserialize(stream, elem_size))
return cls(*elems) # type: ignore
def serialize(self, stream: BinaryIO) -> int:
elem_cls = self.__class__.element_cls()
if issubclass(elem_cls, uint8):
out = bytes(iter(self))
stream.write(out)
return len(out)
if elem_cls.is_fixed_byte_length():
for v in self.readonly_iter():
v.serialize(stream)
return elem_cls.type_byte_length() * self.length()
else:
temp_dyn_stream = io.BytesIO()
offset = OFFSET_BYTE_LENGTH * self.length() # the offsets are part of the fixed-size-bytes prologue
for v in self:
encode_offset(stream, offset)
offset += cast(View, v).serialize(temp_dyn_stream)
temp_dyn_stream.seek(0)
stream.write(temp_dyn_stream.read(offset))
return offset
@classmethod
def from_obj(cls: Type[M], obj: ObjType) -> M:
if not isinstance(obj, (list, tuple)):
raise ObjParseException(f"obj '{obj}' is not a list or tuple")
elem_cls = cls.element_cls()
return cls(elem_cls.from_obj(el) for el in obj) # type: ignore
@classmethod
def navigate_type(cls, key: Any) -> Type[View]:
if key < 0:
raise KeyError
return cls.element_cls()
@classmethod
def key_to_static_gindex(cls, key: Any) -> Gindex:
if key < 0:
raise KeyError
if cls.is_packed():
elems_per_chunk = 32 // cls.element_cls().type_byte_length()
chunk_i = key // elems_per_chunk
else:
chunk_i = key
return to_gindex(chunk_i, cls.tree_depth())
def navigate_view(self, key: Any) -> View:
return self.__getitem__(key)
def __len__(self):
return self.length()
def __add__(self, other):
if issubclass(self.element_cls(), uint8):
return bytes(self) + bytes(other)
else:
return list(chain(self, other))
def __getitem__(self, k):
if isinstance(k, slice):
start = 0 if k.start is None else k.start
end = self.length() if k.stop is None else k.stop
return [self.get(i) for i in range(start, end)]
else:
return self.get(k)
def __setitem__(self, k, v):
if type(k) == slice:
i = 0 if k.start is None else k.start
end = self.length() if k.stop is None else k.stop
for item in v:
self.set(i, item)
i += 1
if i != end:
raise Exception("failed to do full slice-set, not enough values")
else:
self.set(k, v)
def _repr_sequence(self):
length: int
try:
length = self.length()
except NavigationError:
return f"{self.type_repr()}( *summary root, no length known* )"
vals: Dict[int, View] = {}
partial = False
for i in range(length):
try:
vals[i] = self.get(i)
except NavigationError:
partial = True
continue
basic_elems = isinstance(self.element_cls(), BasicView)
shortened = length > (64 if basic_elems else 8)
summary_length = (10 if basic_elems else 3)
seperator = ', ' if basic_elems else ',\n'
contents = seperator.join(f"... {length - (summary_length * 2)} omitted ..."
if (shortened and i == summary_length)
else (f"{i}: {repr(v)}" if partial else repr(v))
for i, v in vals.items()
if (not shortened) or i <= summary_length or i >= length - summary_length)
if '\n' in contents:
contents = '\n' + indent(contents, ' ') + '\n'
if partial:
return f"{self.type_repr()}~partial~<<len={length}>>({contents})"
else:
return f"{self.type_repr()}<<len={length}>>({contents})"
class List(MonoSubtreeView):
def __new__(cls, *args, backing: Optional[Node] = None, hook: Optional[ViewHook] = None, **kwargs):
if backing is not None:
if len(args) != 0:
raise Exception("cannot have both a backing and elements to init List")
return super().__new__(cls, backing=backing, hook=hook, **kwargs)
elem_cls = cls.element_cls()
vals = list(args)
if len(vals) == 1:
val = vals[0]
if isinstance(val, (GeneratorType, list, tuple)):
vals = list(val)
if issubclass(elem_cls, uint8):
if isinstance(val, bytes):
vals = list(val)
if isinstance(val, str):
if val[:2] == '0x':
val = val[2:]
vals = list(bytes.fromhex(val))
if len(vals) > 0:
limit = cls.limit()
if len(vals) > limit:
raise Exception(f"too many list inputs: {len(vals)}, limit is: {limit}")
input_views = []
for el in vals:
if isinstance(el, View):
input_views.append(el)
else:
input_views.append(elem_cls.coerce_view(el))
input_nodes = cls.views_into_chunks(input_views)
contents = subtree_fill_to_contents(input_nodes, cls.contents_depth())
backing = PairNode(contents, uint256(len(input_views)).get_backing())
return super().__new__(cls, backing=backing, hook=hook, **kwargs)
def __class_getitem__(cls, params) -> Type["List"]:
(element_type, limit) = params
contents_depth = 0
packed = False
if isinstance(element_type, BasicView):
elems_per_chunk = 32 // element_type.type_byte_length()
contents_depth = get_depth((limit + elems_per_chunk - 1) // elems_per_chunk)
packed = True
else:
contents_depth = get_depth(limit)
class SpecialListView(List):
@classmethod
def is_packed(cls) -> bool:
return packed
@classmethod
def contents_depth(cls) -> int:
return contents_depth
@classmethod
def element_cls(cls) -> Type[View]:
return element_type
@classmethod
def limit(cls) -> int:
return limit
SpecialListView.__name__ = SpecialListView.type_repr()
return SpecialListView
def length(self) -> int:
ll_node = super().get_backing().get_right()
ll = cast(uint256, uint256.view_from_backing(node=ll_node, hook=None))
return int(ll)
def value_byte_length(self) -> int:
elem_cls = self.__class__.element_cls()
if elem_cls.is_fixed_byte_length():
return elem_cls.type_byte_length() * self.length()
else:
return sum(OFFSET_BYTE_LENGTH + cast(View, el).value_byte_length() for el in iter(self))
def append(self, v: View):
ll = self.length()
if ll >= self.__class__.limit():
raise Exception("list is maximum capacity, cannot append")
i = ll
elem_type: Type[View] = self.__class__.element_cls()
if not isinstance(v, elem_type):
v = elem_type.coerce_view(v)
target: Gindex
if self.__class__.is_packed():
next_backing = self.get_backing()
if isinstance(v, BasicView):
elems_per_chunk = 32 // elem_type.type_byte_length()
chunk_i = i // elems_per_chunk
target = to_gindex(chunk_i, self.__class__.tree_depth())
chunk: Node
if i % elems_per_chunk == 0:
set_last = next_backing.setter(target, expand=True)
chunk = zero_node(0)
else:
set_last = next_backing.setter(target)
chunk = next_backing.getter(target)
chunk = v.backing_from_base(chunk, i % elems_per_chunk)
next_backing = set_last(chunk)
else:
raise Exception("cannot append a packed element that is not a basic type")
else:
target = to_gindex(i, self.__class__.tree_depth())
set_last = self.get_backing().setter(target, expand=True)
next_backing = set_last(v.get_backing())
set_length = next_backing.rebind_right
new_length = uint256(ll + 1).get_backing()
next_backing = set_length(new_length)
self.set_backing(next_backing)
def pop(self):
ll = self.length()
if ll == 0:
raise Exception("list is empty, cannot pop")
i = ll - 1
target: Gindex
can_summarize: bool
if self.__class__.is_packed():
next_backing = self.get_backing()
elem_type: Type[View] = self.__class__.element_cls()
if issubclass(elem_type, BasicView):
elems_per_chunk = 32 // elem_type.type_byte_length()
chunk_i = i // elems_per_chunk
target = to_gindex(chunk_i, self.__class__.tree_depth())
if i % elems_per_chunk == 0:
chunk = zero_node(0)
else:
chunk = next_backing.getter(target)
set_last = next_backing.setter(target)
chunk = elem_type.default(None).backing_from_base(chunk, i % elems_per_chunk)
next_backing = set_last(chunk)
can_summarize = (target & 1) == 0 and i % elems_per_chunk == 0
else:
raise Exception("cannot pop a packed element that is not a basic type")
else:
target = to_gindex(i, self.__class__.tree_depth())
set_last = self.get_backing().setter(target)
next_backing = set_last(zero_node(0))
can_summarize = (target & 1) == 0
# if possible, summarize
if can_summarize:
# summarize to the highest node possible.
# I.e. the resulting target must be a right-hand, unless it's the only content node.
while (target & 1) == 0 and target != 0b10:
target >>= 1
summary_fn = next_backing.summarize_into(target)
next_backing = summary_fn()
set_length = next_backing.rebind_right
new_length = uint256(ll - 1).get_backing()
next_backing = set_length(new_length)
self.set_backing(next_backing)
def get(self, i: int) -> View:
if i < 0 or i >= self.length():
raise IndexError
return super().get(i)
def set(self, i: int, v: View) -> None:
if i < 0 or i >= self.length():
raise IndexError
super().set(i, v)
def __repr__(self):
return self._repr_sequence()
@classmethod
def type_repr(cls) -> str:
return f"List[{cls.element_cls().__name__}, {cls.limit()}]"
@classmethod
def is_packed(cls) -> bool:
raise NotImplementedError
@classmethod
def contents_depth(cls) -> int:
raise NotImplementedError
@classmethod
def tree_depth(cls) -> int:
return cls.contents_depth() + 1 # 1 extra for length mix-in
@classmethod
def item_elem_cls(cls, i: int) -> Type[View]:
return cls.element_cls()
@classmethod
def limit(cls) -> int:
raise NotImplementedError
@classmethod
def is_valid_count(cls, count: int) -> bool:
return 0 <= count <= cls.limit()
@classmethod
def navigate_type(cls, key: Any) -> Type[View]:
if key >= cls.limit():
raise KeyError
return super().navigate_type(key)
@classmethod
def key_to_static_gindex(cls, key: Any) -> Gindex:
if key >= cls.limit():
raise KeyError
return super().key_to_static_gindex(key)
@classmethod
def default_node(cls) -> Node:
return PairNode(zero_node(cls.contents_depth()), zero_node(0)) # mix-in 0 as list length
@classmethod
def is_fixed_byte_length(cls) -> bool:
return False
@classmethod
def min_byte_length(cls) -> int:
return 0
@classmethod
def max_byte_length(cls) -> int:
elem_cls = cls.element_cls()
bytes_per_elem = elem_cls.max_byte_length()
if not elem_cls.is_fixed_byte_length():
bytes_per_elem += OFFSET_BYTE_LENGTH
return bytes_per_elem * cls.limit()
def to_obj(self) -> ObjType:
return list(el.to_obj() for el in self.readonly_iter())
class Vector(MonoSubtreeView):
def __new__(cls, *args, backing: Optional[Node] = None, hook: Optional[ViewHook] = None, **kwargs):
if backing is not None:
if len(args) != 0:
raise Exception("cannot have both a backing and elements to init Vector")
return super().__new__(cls, backing=backing, hook=hook, **kwargs)
elem_cls = cls.element_cls()
vals = list(args)
if len(vals) == 1:
val = vals[0]
if isinstance(val, (GeneratorType, list, tuple)):
vals = list(val)
if issubclass(elem_cls, uint8):
if isinstance(val, bytes):
vals = list(val)
if isinstance(val, str):
if val[:2] == '0x':
val = val[2:]
vals = list(bytes.fromhex(val))
if len(vals) > 0:
vector_length = cls.vector_length()
if len(vals) != vector_length:
raise Exception(f"invalid inputs length: {len(vals)}, vector length is: {vector_length}")
input_views = []
for el in vals:
if isinstance(el, View):
input_views.append(el)
else:
input_views.append(elem_cls.coerce_view(el))
input_nodes = cls.views_into_chunks(input_views)
backing = subtree_fill_to_contents(input_nodes, cls.tree_depth())
return super().__new__(cls, backing=backing, hook=hook, **kwargs)
def __class_getitem__(cls, params) -> Type["Vector"]:
(element_view_cls, length) = params
if length <= 0:
raise Exception(f"Invalid vector length: {length}")
tree_depth = 0
packed = False
if isinstance(element_view_cls, BasicView):
elems_per_chunk = 32 // element_view_cls.type_byte_length()
tree_depth = get_depth((length + elems_per_chunk - 1) // elems_per_chunk)
packed = True
else:
tree_depth = get_depth(length)
class SpecialVectorView(Vector):
@classmethod
def is_packed(cls) -> bool:
return packed
@classmethod
def tree_depth(cls) -> int:
return tree_depth
@classmethod
def element_cls(cls) -> Type[View]:
return element_view_cls
@classmethod
def vector_length(cls) -> int:
return length
out_typ = SpecialVectorView
# for fixed-size vectors, pre-compute the size.
if element_view_cls.is_fixed_byte_length():
byte_length = element_view_cls.type_byte_length() * length
class FixedSpecialVectorView(SpecialVectorView):
@classmethod
def type_byte_length(cls) -> int:
return byte_length
@classmethod
def min_byte_length(cls) -> int:
return byte_length
@classmethod
def max_byte_length(cls) -> int:
return byte_length
out_typ = FixedSpecialVectorView
out_typ.__name__ = out_typ.type_repr()
return out_typ
def get(self, i: int) -> View:
if i < 0 or i >= self.__class__.vector_length():
raise IndexError
return super().get(i)
def set(self, i: int, v: View) -> None:
if i < 0 or i >= self.__class__.vector_length():
raise IndexError
super().set(i, v)
def length(self) -> int:
return self.__class__.vector_length()
def value_byte_length(self) -> int:
if self.__class__.is_fixed_byte_length():
return self.__class__.type_byte_length()
else:
return sum(OFFSET_BYTE_LENGTH + cast(View, el).value_byte_length() for el in iter(self))
def __repr__(self):
return self._repr_sequence()
@classmethod
def type_repr(cls) -> str:
return f"Vector[{cls.element_cls().__name__}, {cls.vector_length()}]"
@classmethod
def vector_length(cls) -> int:
raise NotImplementedError
@classmethod
def is_valid_count(cls, count: int) -> bool:
return count == cls.vector_length()
@classmethod
def navigate_type(cls, key: Any) -> Type[View]:
if key >= cls.vector_length():
raise KeyError
return super().navigate_type(key)
@classmethod
def key_to_static_gindex(cls, key: Any) -> Gindex:
if key >= cls.vector_length():
raise KeyError
return super().key_to_static_gindex(key)
@classmethod
def default_node(cls) -> Node:
elem_type: Type[View] = cls.element_cls()
length = cls.to_chunk_length(cls.vector_length())
elem: Node
if cls.is_packed():
elem = zero_node(0)
else:
elem = elem_type.default_node()
return subtree_fill_to_length(elem, cls.tree_depth(), length)
@classmethod
def is_fixed_byte_length(cls) -> bool:
return cls.element_cls().is_fixed_byte_length() # only if the element type is fixed byte length.
@classmethod
def min_byte_length(cls) -> int:
elem_cls = cls.element_cls()
bytes_per_elem = elem_cls.min_byte_length()
if not elem_cls.is_fixed_byte_length():
bytes_per_elem += OFFSET_BYTE_LENGTH
return bytes_per_elem * cls.vector_length()
@classmethod
def max_byte_length(cls) -> int:
elem_cls = cls.element_cls()
bytes_per_elem = elem_cls.max_byte_length()
if not elem_cls.is_fixed_byte_length():
bytes_per_elem += OFFSET_BYTE_LENGTH
return bytes_per_elem * cls.vector_length()
def to_obj(self) -> ObjType:
return tuple(el.to_obj() for el in self.readonly_iter())
Fields = Dict[str, Type[View]]
class FieldOffset(NamedTuple):
key: str
typ: Type[View]
offset: int
@runtime_checkable
class _ContainerLike(Protocol):
@classmethod
def fields(cls) -> Fields:
...
CV = TypeVar('CV', bound="Container")
class Container(ComplexView):
# Container types should declare fields through class annotations.
# If none are specified, it will fall back on this (to avoid annotations of super classes),
# and error on construction, since empty container types are invalid.
_empty_annotations: bool
_field_indices: Dict[str, int]
def __new__(cls, *args, backing: Optional[Node] = None, hook: Optional[ViewHook] = None, **kwargs):
if backing is not None:
if len(args) != 0:
raise Exception("cannot have both a backing and elements to init List")
return super().__new__(cls, backing=backing, hook=hook, **kwargs)
input_nodes = []
for fkey, ftyp in cls.fields().items():
fnode: Node
if fkey in kwargs:
finput = kwargs.pop(fkey)
if isinstance(finput, View):
fnode = finput.get_backing()
else:
fnode = ftyp.coerce_view(finput).get_backing()
else:
fnode = ftyp.default_node()
input_nodes.append(fnode)
# check if any keys are remaining to catch unrecognized keys
if len(kwargs) > 0:
raise AttributeError(f'The field names [{"".join(kwargs.keys())}] are not defined in {cls}')
backing = subtree_fill_to_contents(input_nodes, cls.tree_depth())
out = super().__new__(cls, backing=backing, hook=hook)
return out
def __init_subclass__(cls, *args, **kwargs):
super().__init_subclass__(*args, **kwargs)
cls._field_indices = {fkey: i for i, fkey in enumerate(cls.__annotations__.keys()) if fkey[0] != '_'}
if len(cls._field_indices) == 0:
raise Exception(f"Container {cls.__name__} must have at least one field!")
@classmethod
def coerce_view(cls: Type[CV], v: Any) -> CV:
return cls(**{fkey: getattr(v, fkey) for fkey in cls.fields().keys()})
@classmethod
def fields(cls) -> Fields:
return cls.__annotations__
@classmethod
def is_fixed_byte_length(cls) -> bool:
return all(f.is_fixed_byte_length() for f in cls.fields().values())
@classmethod
def type_byte_length(cls) -> int:
if cls.is_fixed_byte_length():
return cls.min_byte_length()
else:
raise Exception("dynamic length container does not have a fixed byte length")
@classmethod
def min_byte_length(cls) -> int:
total = 0
for ftyp in cls.fields().values():
if not ftyp.is_fixed_byte_length():
total += OFFSET_BYTE_LENGTH
total += ftyp.min_byte_length()
return total
@classmethod
def max_byte_length(cls) -> int:
total = 0
for ftyp in cls.fields().values():
if not ftyp.is_fixed_byte_length():
total += OFFSET_BYTE_LENGTH
total += ftyp.max_byte_length()
return total
@classmethod
def is_packed(cls) -> bool:
return False
@classmethod
def tree_depth(cls) -> int:
return get_depth(len(cls.fields()))
@classmethod
def item_elem_cls(cls, i: int) -> Type[View]:
return list(cls.fields().values())[i]
@classmethod
def default_node(cls) -> Node:
return subtree_fill_to_contents([field.default_node() for field in cls.fields().values()], cls.tree_depth())
def value_byte_length(self) -> int:
if self.__class__.is_fixed_byte_length():
return self.__class__.type_byte_length()
else:
total = 0
fields = self.fields()
for fkey, ftyp in fields.items():
if ftyp.is_fixed_byte_length():
total += ftyp.type_byte_length()
else:
total += OFFSET_BYTE_LENGTH
total += cast(View, getattr(self, fkey)).value_byte_length()
return total
def __getattr__(self, item):
if item[0] == '_':
return super().__getattribute__(item)
else:
try:
i = self.__class__._field_indices[item]
except KeyError:
raise AttributeError(f"unknown attribute {item}")
return super().get(i)
def __setattr__(self, key, value):
if key[0] == '_':
super().__setattr__(key, value)
else:
try:
i = self.__class__._field_indices[key]
except KeyError:
raise AttributeError(f"unknown attribute {key}")
super().set(i, value)
def _get_field_val_repr(self, fkey: str, ftype: Type[View]) -> str:
field_start = ' ' + fkey + ': ' + ftype.__name__ + ' = '
try:
field_repr = repr(getattr(self, fkey))
if '\n' in field_repr: # if multiline, indent it, but starting from the value.
i = field_repr.index('\n')
field_repr = field_repr[:i+1] + indent(field_repr[i+1:], ' ' * len(field_start))
return field_start + field_repr
except NavigationError:
return f"{field_start} *omitted from partial*"
def __repr__(self):
return f"{self.__class__.__name__}(Container)\n" + '\n'.join(
indent(self._get_field_val_repr(fkey, ftype), ' ')
for fkey, ftype in self.__class__.fields().items())
@classmethod
def type_repr(cls) -> str:
return f"{cls.__name__}(Container)\n" + '\n'.join(
(' ' + fkey + ': ' + ftype.__name__) for fkey, ftype in cls.fields().items())
def __iter__(self):
tree_depth = self.tree_depth()
backing = self.get_backing()
return ContainerElemIter(backing, tree_depth, list(self.__class__.fields().values()))
@classmethod
def decode_bytes(cls: Type[V], bytez: bytes) -> V:
stream = io.BytesIO()
stream.write(bytez)
stream.seek(0)
return cls.deserialize(stream, len(bytez))
@classmethod
def deserialize(cls: Type[CV], stream: BinaryIO, scope: int) -> CV:
fields = cls.fields()
field_values: Dict[str, View]
if cls.is_fixed_byte_length():
field_values = {fkey: ftyp.deserialize(stream, ftyp.type_byte_length()) for fkey, ftyp in fields.items()}
else:
field_values = {}
dyn_fields: PyList[FieldOffset] = []
fixed_size = 0
for fkey, ftyp in fields.items():
if ftyp.is_fixed_byte_length():
fsize = ftyp.type_byte_length()
field_values[fkey] = ftyp.deserialize(stream, fsize)
fixed_size += fsize
else:
dyn_fields.append(FieldOffset(key=fkey, typ=ftyp, offset=int(decode_offset(stream))))
fixed_size += OFFSET_BYTE_LENGTH
if len(dyn_fields) > 0:
if dyn_fields[0].offset < fixed_size:
raise Exception(f"first offset is smaller than expected fixed size")
for i, (fkey, ftyp, foffset) in enumerate(dyn_fields):
next_offset = dyn_fields[i + 1].offset if i + 1 < len(dyn_fields) else scope
if foffset > next_offset:
raise Exception(f"offset {i} is invalid: {foffset} larger than next offset {next_offset}")
fsize = next_offset - foffset
f_min_size, f_max_size = ftyp.min_byte_length(), ftyp.max_byte_length()
if not (f_min_size <= fsize <= f_max_size):
raise Exception(f"offset {i} is invalid, size out of bounds: {foffset}, next {next_offset},"
f" implied size: {fsize}, size bounds: [{f_min_size}, {f_max_size}]")
field_values[fkey] = ftyp.deserialize(stream, fsize)
return cls(**field_values) # type: ignore
def serialize(self, stream: BinaryIO) -> int:
fields = self.__class__.fields()
is_fixed_size = self.is_fixed_byte_length()
temp_dyn_stream: BinaryIO
written = sum(map((lambda x: x.type_byte_length() if x.is_fixed_byte_length() else OFFSET_BYTE_LENGTH),
fields.values()))
if not is_fixed_size:
temp_dyn_stream = io.BytesIO()
for fkey, ftyp in fields.items():
v: View = getattr(self, fkey)
if ftyp.is_fixed_byte_length():
v.serialize(stream)
else:
encode_offset(stream, written)
written += v.serialize(temp_dyn_stream) # type: ignore
if not is_fixed_size:
temp_dyn_stream.seek(0)
stream.write(temp_dyn_stream.read(written))
return written
@classmethod
def from_obj(cls: Type[CV], obj: ObjType) -> CV:
if not isinstance(obj, dict):
raise ObjParseException(f"obj '{obj}' is not a dict")
fields = cls.fields()
for k in obj.keys():
if k not in fields:
raise ObjParseException(f"obj '{obj}' has unknown key {k}")
return cls(**{k: fields[k].from_obj(v) for k, v in obj.items()}) # type: ignore
def to_obj(self) -> ObjType:
return {f_k: f_v.to_obj() for f_k, f_v in zip(self.__class__.fields().keys(), self.__iter__())}
@classmethod
def key_to_static_gindex(cls, key: Any) -> Gindex:
fields = cls.fields()
try:
field_index = list(fields.keys()).index(key)
except ValueError: # list.index raises ValueError if the element (a key here) is missing
raise KeyError
return to_gindex(field_index, cls.tree_depth())
@classmethod
def navigate_type(cls, key: Any) -> Type[View]:
return cls.fields()[key]
def navigate_view(self, key: Any) -> View:
return self.__getattr__(key)
| [
"itertools.chain",
"remerkleable.core.ObjParseException",
"remerkleable.basic.uint256.view_from_backing",
"remerkleable.tree.zero_node",
"textwrap.indent",
"remerkleable.basic.uint32",
"io.BytesIO",
"remerkleable.tree.get_depth",
"remerkleable.basic.uint256",
"remerkleable.readonly_iters.ComplexEl... | [((758, 782), 'typing.TypeVar', 'TypeVar', (['"""V"""'], {'bound': 'View'}), "('V', bound=View)\n", (765, 782), False, 'from typing import NamedTuple, cast, List as PyList, Dict, Any, BinaryIO, Optional, TypeVar, Type, Protocol, runtime_checkable\n'), ((1395, 1432), 'typing.TypeVar', 'TypeVar', (['"""M"""'], {'bound': '"""MonoSubtreeView"""'}), "('M', bound='MonoSubtreeView')\n", (1402, 1432), False, 'from typing import NamedTuple, cast, List as PyList, Dict, Any, BinaryIO, Optional, TypeVar, Type, Protocol, runtime_checkable\n'), ((24788, 24820), 'typing.TypeVar', 'TypeVar', (['"""CV"""'], {'bound': '"""Container"""'}), "('CV', bound='Container')\n", (24795, 24820), False, 'from typing import NamedTuple, cast, List as PyList, Dict, Any, BinaryIO, Optional, TypeVar, Type, Protocol, runtime_checkable\n'), ((856, 902), 'remerkleable.basic.uint32.deserialize', 'uint32.deserialize', (['stream', 'OFFSET_BYTE_LENGTH'], {}), '(stream, OFFSET_BYTE_LENGTH)\n', (874, 902), False, 'from remerkleable.basic import uint256, uint8, uint32\n'), ((1088, 1100), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (1098, 1100), False, 'import io\n'), ((1274, 1286), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (1284, 1286), False, 'import io\n'), ((30712, 30724), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (30722, 30724), False, 'import io\n'), ((967, 981), 'remerkleable.basic.uint32', 'uint32', (['offset'], {}), '(offset)\n', (973, 981), False, 'from remerkleable.basic import uint256, uint8, uint32\n'), ((6451, 6463), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (6461, 6463), False, 'import io\n'), ((6968, 7024), 'remerkleable.core.ObjParseException', 'ObjParseException', (['f"""obj \'{obj}\' is not a list or tuple"""'], {}), '(f"obj \'{obj}\' is not a list or tuple")\n', (6985, 7024), False, 'from remerkleable.core import View, BasicView, OFFSET_BYTE_LENGTH, ViewHook, ObjType, ObjParseException\n'), ((11877, 11936), 'remerkleable.tree.get_depth', 'get_depth', (['((limit + elems_per_chunk - 1) // elems_per_chunk)'], {}), '((limit + elems_per_chunk - 1) // elems_per_chunk)\n', (11886, 11936), False, 'from remerkleable.tree import Node, subtree_fill_to_length, subtree_fill_to_contents, zero_node, Gindex, PairNode, to_gindex, NavigationError, get_depth\n'), ((12006, 12022), 'remerkleable.tree.get_depth', 'get_depth', (['limit'], {}), '(limit)\n', (12015, 12022), False, 'from remerkleable.tree import Node, subtree_fill_to_length, subtree_fill_to_contents, zero_node, Gindex, PairNode, to_gindex, NavigationError, get_depth\n'), ((12668, 12718), 'remerkleable.basic.uint256.view_from_backing', 'uint256.view_from_backing', ([], {'node': 'll_node', 'hook': 'None'}), '(node=ll_node, hook=None)\n', (12693, 12718), False, 'from remerkleable.basic import uint256, uint8, uint32\n'), ((18088, 18100), 'remerkleable.tree.zero_node', 'zero_node', (['(0)'], {}), '(0)\n', (18097, 18100), False, 'from remerkleable.tree import Node, subtree_fill_to_length, subtree_fill_to_contents, zero_node, Gindex, PairNode, to_gindex, NavigationError, get_depth\n'), ((20592, 20652), 'remerkleable.tree.get_depth', 'get_depth', (['((length + elems_per_chunk - 1) // elems_per_chunk)'], {}), '((length + elems_per_chunk - 1) // elems_per_chunk)\n', (20601, 20652), False, 'from remerkleable.tree import Node, subtree_fill_to_length, subtree_fill_to_contents, zero_node, Gindex, PairNode, to_gindex, NavigationError, get_depth\n'), ((20718, 20735), 'remerkleable.tree.get_depth', 'get_depth', (['length'], {}), '(length)\n', (20727, 20735), False, 'from remerkleable.tree import Node, subtree_fill_to_length, subtree_fill_to_contents, zero_node, Gindex, PairNode, to_gindex, NavigationError, get_depth\n'), ((23560, 23572), 'remerkleable.tree.zero_node', 'zero_node', (['(0)'], {}), '(0)\n', (23569, 23572), False, 'from remerkleable.tree import Node, subtree_fill_to_length, subtree_fill_to_contents, zero_node, Gindex, PairNode, to_gindex, NavigationError, get_depth\n'), ((33161, 33173), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (33171, 33173), False, 'import io\n'), ((33747, 33794), 'remerkleable.core.ObjParseException', 'ObjParseException', (['f"""obj \'{obj}\' is not a dict"""'], {}), '(f"obj \'{obj}\' is not a dict")\n', (33764, 33794), False, 'from remerkleable.core import View, BasicView, OFFSET_BYTE_LENGTH, ViewHook, ObjType, ObjParseException\n'), ((3356, 3388), 'typing.cast', 'cast', (['Type[BasicView]', 'elem_type'], {}), '(Type[BasicView], elem_type)\n', (3360, 3388), False, 'from typing import NamedTuple, cast, List as PyList, Dict, Any, BinaryIO, Optional, TypeVar, Type, Protocol, runtime_checkable\n'), ((3653, 3708), 'remerkleable.readonly_iters.ComplexElemIter', 'ComplexElemIter', (['backing', 'tree_depth', 'length', 'elem_type'], {}), '(backing, tree_depth, length, elem_type)\n', (3668, 3708), False, 'from remerkleable.readonly_iters import PackedIter, ComplexElemIter, ComplexFreshElemIter, ContainerElemIter\n'), ((7947, 7965), 'itertools.chain', 'chain', (['self', 'other'], {}), '(self, other)\n', (7952, 7965), False, 'from itertools import chain\n'), ((14541, 14556), 'remerkleable.basic.uint256', 'uint256', (['(ll + 1)'], {}), '(ll + 1)\n', (14548, 14556), False, 'from remerkleable.basic import uint256, uint8, uint32\n'), ((15954, 15966), 'remerkleable.tree.zero_node', 'zero_node', (['(0)'], {}), '(0)\n', (15963, 15966), False, 'from remerkleable.tree import Node, subtree_fill_to_length, subtree_fill_to_contents, zero_node, Gindex, PairNode, to_gindex, NavigationError, get_depth\n'), ((16480, 16495), 'remerkleable.basic.uint256', 'uint256', (['(ll - 1)'], {}), '(ll - 1)\n', (16487, 16495), False, 'from remerkleable.basic import uint256, uint8, uint32\n'), ((33908, 33961), 'remerkleable.core.ObjParseException', 'ObjParseException', (['f"""obj \'{obj}\' has unknown key {k}"""'], {}), '(f"obj \'{obj}\' has unknown key {k}")\n', (33925, 33961), False, 'from remerkleable.core import View, BasicView, OFFSET_BYTE_LENGTH, ViewHook, ObjType, ObjParseException\n'), ((2682, 2712), 'typing.cast', 'cast', (['PyList[BasicView]', 'views'], {}), '(PyList[BasicView], views)\n', (2686, 2712), False, 'from typing import NamedTuple, cast, List as PyList, Dict, Any, BinaryIO, Optional, TypeVar, Type, Protocol, runtime_checkable\n'), ((3583, 3610), 'typing.cast', 'cast', (['Type[View]', 'elem_type'], {}), '(Type[View], elem_type)\n', (3587, 3610), False, 'from typing import NamedTuple, cast, List as PyList, Dict, Any, BinaryIO, Optional, TypeVar, Type, Protocol, runtime_checkable\n'), ((5217, 5230), 'remerkleable.basic.uint32', 'uint32', (['scope'], {}), '(scope)\n', (5223, 5230), False, 'from remerkleable.basic import uint256, uint8, uint32\n'), ((9795, 9817), 'textwrap.indent', 'indent', (['contents', '""" """'], {}), "(contents, ' ')\n", (9801, 9817), False, 'from textwrap import indent\n'), ((13894, 13906), 'remerkleable.tree.zero_node', 'zero_node', (['(0)'], {}), '(0)\n', (13903, 13906), False, 'from remerkleable.tree import Node, subtree_fill_to_length, subtree_fill_to_contents, zero_node, Gindex, PairNode, to_gindex, NavigationError, get_depth\n'), ((15311, 15323), 'remerkleable.tree.zero_node', 'zero_node', (['(0)'], {}), '(0)\n', (15320, 15323), False, 'from remerkleable.tree import Node, subtree_fill_to_length, subtree_fill_to_contents, zero_node, Gindex, PairNode, to_gindex, NavigationError, get_depth\n'), ((6676, 6689), 'typing.cast', 'cast', (['View', 'v'], {}), '(View, v)\n', (6680, 6689), False, 'from typing import NamedTuple, cast, List as PyList, Dict, Any, BinaryIO, Optional, TypeVar, Type, Protocol, runtime_checkable\n'), ((12997, 13011), 'typing.cast', 'cast', (['View', 'el'], {}), '(View, el)\n', (13001, 13011), False, 'from typing import NamedTuple, cast, List as PyList, Dict, Any, BinaryIO, Optional, TypeVar, Type, Protocol, runtime_checkable\n'), ((22523, 22537), 'typing.cast', 'cast', (['View', 'el'], {}), '(View, el)\n', (22527, 22537), False, 'from typing import NamedTuple, cast, List as PyList, Dict, Any, BinaryIO, Optional, TypeVar, Type, Protocol, runtime_checkable\n')] |
'''Faça um programa que gere números aleatórios entre 0 e 50 até o número 32 ser
gerado. Quando isso ocorrer, informar:
a. A soma de todos os números gerados
b. A quantidade de números gerados que é impar
c. O menor número gerado'''
import random
x = random.randint(0,50)
cont = 32
somaNumeros = 0
qqntImpares = 0
menorNumero = 51
while cont != x:
x = random.randint(0, 50)
somaNumeros = somaNumeros + x
if x%2 != 0:
qqntImpares = qqntImpares + 1
if menorNumero > x:
menorNumero = x
print('A soma de todos os números é {}'.format(somaNumeros))
print('A quantidade de números ímpares é {}'.format(qqntImpares))
print('O menor número é {}'.format(menorNumero))
| [
"random.randint"
] | [((253, 274), 'random.randint', 'random.randint', (['(0)', '(50)'], {}), '(0, 50)\n', (267, 274), False, 'import random\n'), ((358, 379), 'random.randint', 'random.randint', (['(0)', '(50)'], {}), '(0, 50)\n', (372, 379), False, 'import random\n')] |
# encoding=utf8
import HttpUtils
class YibaiApiError(Exception):
def __init__(self, code, message):
super(YibaiApiError, self).__init__(message)
self.code = code
class YibaiClient(object):
def __init__(self, server_url, apikey):
self.serverUrl = server_url
self.apikey = apikey
def sms_batch_submit(self, submits):
return self.__execute({'submits': submits}, '/sms/batchSubmit')
def sms_pull_status_report(self):
return self.__execute({}, '/sms/pullStatusReport')
def sms_pull_reply_message(self):
return self.__execute({}, '/sms/pullReply')
def user_info(self):
return self.__execute({}, '/user/info')
def __execute(self, request, url_path):
request['apikey'] = self.apikey
req_url = self.serverUrl + url_path
res = HttpUtils.post_json(req_url, request)
if res['code'] == 200:
return res['response']
raise YibaiApiError(res['code'], res['message'])
| [
"HttpUtils.post_json"
] | [((874, 911), 'HttpUtils.post_json', 'HttpUtils.post_json', (['req_url', 'request'], {}), '(req_url, request)\n', (893, 911), False, 'import HttpUtils\n')] |
# Generated by Django 3.1.3 on 2020-11-15 16:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('network', '0004_auto_20201111_2224'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='likers',
),
]
| [
"django.db.migrations.RemoveField"
] | [((227, 283), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""post"""', 'name': '"""likers"""'}), "(model_name='post', name='likers')\n", (249, 283), False, 'from django.db import migrations\n')] |
#!/usr/bin/env python3
from gcal_helpers import helpers
helpers.write_transformation("newsletter")
| [
"gcal_helpers.helpers.write_transformation"
] | [((58, 100), 'gcal_helpers.helpers.write_transformation', 'helpers.write_transformation', (['"""newsletter"""'], {}), "('newsletter')\n", (86, 100), False, 'from gcal_helpers import helpers\n')] |
"""
Sliding Window Matching
=======================
Find recurring patterns in neural signals using Sliding Window Matching.
This tutorial primarily covers the :func:`~.sliding_window_matching` function.
"""
###################################################################################################
# Overview
# --------
#
# Non-periodic or non-sinusoidal properties can be difficult to assess in frequency domain
# methods. To try and address this, the sliding window matching (SWM) algorithm has been
# proposed for detecting and measuring recurring, but unknown, patterns in time series data.
# Patterns of interest may be transient events, and/or the waveform shape of neural oscillations.
#
# In this example, we will explore applying the SWM algorithm to some LFP data.
#
# The SWM approach tries to find recurring patterns (or motifs) in the data, using sliding
# windows. An iterative process samples window randomly, and compares each to the average
# window. The goal is to find a selection of windows that look maximally like the average
# window, at which point the occurrences of the window have been detected, and the average
# window pattern can be examined.
#
# The sliding window matching algorithm is described in
# `Gips et al, 2017 <https://doi.org/10.1016/j.jneumeth.2016.11.001>`_
#
###################################################################################################
# sphinx_gallery_thumbnail_number = 2
import numpy as np
# Import the sliding window matching function
from neurodsp.rhythm import sliding_window_matching
# Import utilities for loading and plotting data
from neurodsp.utils.download import load_ndsp_data
from neurodsp.plts.rhythm import plot_swm_pattern
from neurodsp.plts.time_series import plot_time_series
from neurodsp.utils import set_random_seed, create_times
from neurodsp.utils.norm import normalize_sig
###################################################################################################
# Set random seed, for reproducibility
set_random_seed(0)
###################################################################################################
# Load neural signal
# ------------------
#
# First, we will load a segment of ECoG data, as an example time series.
#
###################################################################################################
# Download, if needed, and load example data files
sig = load_ndsp_data('sample_data_1.npy', folder='data')
sig = normalize_sig(sig, mean=0, variance=1)
# Set sampling rate, and create a times vector for plotting
fs = 1000
times = create_times(len(sig)/fs, fs)
###################################################################################################
#
# Next, we can visualize this data segment. As we can see this segment of data has
# some prominent bursts of oscillations, in this case, in the beta frequency.
#
###################################################################################################
# Plot example signal
plot_time_series(times, sig)
###################################################################################################
# Apply sliding window matching
# -----------------------------
#
# The beta oscillation in our data segment looks like it might have some non-sinusoidal
# properties. We can investigate this with sliding window matching.
#
# Sliding window matching can be applied with the
# :func:`~.sliding_window_matching` function.
#
###################################################################################################
# Data Preprocessing
# ~~~~~~~~~~~~~~~~~~
#
# Typically, the input signal does not have to be filtered into a band of interest to use SWM.
#
# If the goal is to characterize non-sinusoidal rhythms, you typically won't want to
# apply a filter that will smooth out the features of interest.
#
# However, if the goal is to characterize higher frequency activity, it can be useful to
# apply a highpass filter, so that the method does not converge on a lower frequency motif.
#
# In our case, the beta rhythm of interest is the most prominent, low frequency, feature of the
# data, so we won't apply a filter.
#
###################################################################################################
# Algorithm Settings
# ~~~~~~~~~~~~~~~~~~
#
# The SWM algorithm has some algorithm specific settings that need to be applied, including:
#
# - `win_len` : the length of the window, defined in seconds
# - `win_spacing` : the minimum distance between windows, also defined in seconds
#
# The length of the window influences the patterns that are extracted from the data.
# Typically, you want to set the window length to match the expected timescale of the
# patterns under study.
#
# For our purposes, we will define the window length to be about 1 cycle of a beta oscillation,
# which should help the algorithm to find the waveform shape of the neural oscillation.
#
###################################################################################################
# Define window length & minimum window spacing, both in seconds
win_len = .055
win_spacing = .055
###################################################################################################
# Apply the sliding window matching algorithm to the time series
windows, window_starts = sliding_window_matching(sig, fs, win_len, win_spacing, var_thresh=.5)
###################################################################################################
# Examine the Results
# ~~~~~~~~~~~~~~~~~~~
#
# What we got back from the SWM function are the calculate average window, the list
# of indices in the data of the windows, and the calculated costs for each iteration of
# the algorithm run.
#
# In order to visualize the resulting pattern, we can use
# :func:`~.plot_swm_pattern`.
#
###################################################################################################
# Compute the average window
avg_window = np.mean(windows, 0)
# Plot the discovered pattern
plot_swm_pattern(avg_window)
###################################################################################################
#
# In the above average pattern, that looks to capture a beta rhythm, we can notice some
# waveform shape of the extracted rhythm.
#
###################################################################################################
# Concluding Notes
# ~~~~~~~~~~~~~~~~
#
# One thing to keep in mind is that the SWM algorithm includes a random element of sampling
# and comparing the windows - meaning it is not deterministic. Because of this, results
# can change with different random seeds.
#
# To explore this, go back and change the random seed, and see how the output changes.
#
# You can also set the number of iterations that the algorithm sweeps through. Increasing
# the number of iterations, and using longer data segments, can help improve the robustness
# of the algorithm results.
#
| [
"numpy.mean",
"neurodsp.utils.set_random_seed",
"neurodsp.rhythm.sliding_window_matching",
"neurodsp.plts.time_series.plot_time_series",
"neurodsp.plts.rhythm.plot_swm_pattern",
"neurodsp.utils.download.load_ndsp_data",
"neurodsp.utils.norm.normalize_sig"
] | [((2026, 2044), 'neurodsp.utils.set_random_seed', 'set_random_seed', (['(0)'], {}), '(0)\n', (2041, 2044), False, 'from neurodsp.utils import set_random_seed, create_times\n'), ((2424, 2474), 'neurodsp.utils.download.load_ndsp_data', 'load_ndsp_data', (['"""sample_data_1.npy"""'], {'folder': '"""data"""'}), "('sample_data_1.npy', folder='data')\n", (2438, 2474), False, 'from neurodsp.utils.download import load_ndsp_data\n'), ((2481, 2519), 'neurodsp.utils.norm.normalize_sig', 'normalize_sig', (['sig'], {'mean': '(0)', 'variance': '(1)'}), '(sig, mean=0, variance=1)\n', (2494, 2519), False, 'from neurodsp.utils.norm import normalize_sig\n'), ((3019, 3047), 'neurodsp.plts.time_series.plot_time_series', 'plot_time_series', (['times', 'sig'], {}), '(times, sig)\n', (3035, 3047), False, 'from neurodsp.plts.time_series import plot_time_series\n'), ((5342, 5412), 'neurodsp.rhythm.sliding_window_matching', 'sliding_window_matching', (['sig', 'fs', 'win_len', 'win_spacing'], {'var_thresh': '(0.5)'}), '(sig, fs, win_len, win_spacing, var_thresh=0.5)\n', (5365, 5412), False, 'from neurodsp.rhythm import sliding_window_matching\n'), ((5988, 6007), 'numpy.mean', 'np.mean', (['windows', '(0)'], {}), '(windows, 0)\n', (5995, 6007), True, 'import numpy as np\n'), ((6039, 6067), 'neurodsp.plts.rhythm.plot_swm_pattern', 'plot_swm_pattern', (['avg_window'], {}), '(avg_window)\n', (6055, 6067), False, 'from neurodsp.plts.rhythm import plot_swm_pattern\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
@ Author : pengj
@ date : 2018/11/1 19:03
@ IDE : PyCharm
@ GitHub : https://github.com/JackyPJB
@ Contact : <EMAIL>
-------------------------------------------------
Description :
将字符串 "PAYPALISHIRING" 以Z字形排列成给定的行数:
P A H N
A P L S I I G
Y I R
之后从左往右,逐行读取字符:"PAHNAPLSIIGYIR"
实现一个将字符串进行指定行数变换的函数:
string convert(string s, int numRows);
示例 1:
输入: s = "PAYPALISHIRING", numRows = 3
输出: "PAHNAPLSIIGYIR"
示例 2:
输入: s = "PAYPALISHIRING", numRows = 4
输出: "PINALSIGYAHRPI"
解释:
P I N
A L S I G
Y A H R
P I
-------------------------------------------------
"""
import time
__author__ = 'Max_Pengjb'
start = time.time()
# 下面写上代码块
def z_arrange(s, numrows):
if numrows < 2:
return s
res = [[] for _ in range(numrows)]
circle = numrows * 2 - 2
length = len(s)
for i in range(length):
t = i % circle if i % circle < numrows else circle - i % circle
# if t < numrows:
# res[t].append(s[i])
# else:
# t = circle - t
res[t].append(s[i])
print(res)
return ''.join(map(lambda x: ''.join(x), res))
ss = "PAYPALISHIRING"
print(z_arrange(ss, 4))
# 上面中间写上代码块
end = time.time()
print('Running time: %s Seconds' % (end - start))
| [
"time.time"
] | [((808, 819), 'time.time', 'time.time', ([], {}), '()\n', (817, 819), False, 'import time\n'), ((1349, 1360), 'time.time', 'time.time', ([], {}), '()\n', (1358, 1360), False, 'import time\n')] |
# Generated by Django 2.0.2 on 2018-03-03 13:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ved', '0009_auto_20180302_1839'),
]
operations = [
migrations.AlterField(
model_name='order',
name='firewood_choice',
field=models.CharField(max_length=50, verbose_name='Val'),
),
migrations.AlterField(
model_name='order',
name='order_status',
field=models.CharField(choices=[('Ej påbörjad', 'Ej påbörjad'), ('Påbörjad', 'Påbörjad'), ('Levererad', 'Levererad')], default='Ej påbörjad', max_length=30, verbose_name='Status på order'),
),
]
| [
"django.db.models.CharField"
] | [((340, 391), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'verbose_name': '"""Val"""'}), "(max_length=50, verbose_name='Val')\n", (356, 391), False, 'from django.db import migrations, models\n'), ((518, 708), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('Ej påbörjad', 'Ej påbörjad'), ('Påbörjad', 'Påbörjad'), ('Levererad',\n 'Levererad')]", 'default': '"""Ej påbörjad"""', 'max_length': '(30)', 'verbose_name': '"""Status på order"""'}), "(choices=[('Ej påbörjad', 'Ej påbörjad'), ('Påbörjad',\n 'Påbörjad'), ('Levererad', 'Levererad')], default='Ej påbörjad',\n max_length=30, verbose_name='Status på order')\n", (534, 708), False, 'from django.db import migrations, models\n')] |
from apistar import App, Route, TestClient
from apistar.settings import Setting, Settings
def get_settings(settings: Settings):
return settings
def get_setting(ABC: Setting):
return {'ABC': ABC}
routes = [
Route('/settings/', 'GET', get_settings),
Route('/setting/', 'GET', get_setting),
]
settings = {
'ABC': 123,
'XYZ': 456
}
app = App(routes=routes, settings=settings)
client = TestClient(app)
def test_settings():
response = client.get('/settings/')
assert response.status_code == 200
assert response.json() == {
'ABC': 123,
'XYZ': 456
}
def test_setting():
response = client.get('/setting/')
assert response.status_code == 200
assert response.json() == {
'ABC': 123,
}
def test_use_setting_as_argument():
abc = Setting(789)
assert get_setting(abc) == {'ABC': 789}
def test_settings_lookup():
settings = Settings(
ABC=123,
DEF={'XYZ': 456}
)
assert settings.get('ABC') == 123
assert settings.get(['DEF']) == {'XYZ': 456}
assert settings.get(['DEF', 'XYZ']) == 456
assert settings.get('missing') is None
assert settings.get(['ABC', 'missing']) is None
assert settings.get(['DEF', 'missing']) is None
assert settings.get(['DEF', 'missing'], '') == ''
| [
"apistar.settings.Settings",
"apistar.App",
"apistar.settings.Setting",
"apistar.Route",
"apistar.TestClient"
] | [((366, 403), 'apistar.App', 'App', ([], {'routes': 'routes', 'settings': 'settings'}), '(routes=routes, settings=settings)\n', (369, 403), False, 'from apistar import App, Route, TestClient\n'), ((414, 429), 'apistar.TestClient', 'TestClient', (['app'], {}), '(app)\n', (424, 429), False, 'from apistar import App, Route, TestClient\n'), ((224, 264), 'apistar.Route', 'Route', (['"""/settings/"""', '"""GET"""', 'get_settings'], {}), "('/settings/', 'GET', get_settings)\n", (229, 264), False, 'from apistar import App, Route, TestClient\n'), ((270, 308), 'apistar.Route', 'Route', (['"""/setting/"""', '"""GET"""', 'get_setting'], {}), "('/setting/', 'GET', get_setting)\n", (275, 308), False, 'from apistar import App, Route, TestClient\n'), ((815, 827), 'apistar.settings.Setting', 'Setting', (['(789)'], {}), '(789)\n', (822, 827), False, 'from apistar.settings import Setting, Settings\n'), ((917, 952), 'apistar.settings.Settings', 'Settings', ([], {'ABC': '(123)', 'DEF': "{'XYZ': 456}"}), "(ABC=123, DEF={'XYZ': 456})\n", (925, 952), False, 'from apistar.settings import Setting, Settings\n')] |
# -*- coding: utf-8 -*-
from django.shortcuts import get_object_or_404
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from ..serializers.request_serializers import GetReminderRequest, UpdateReminderRequest
from ..utils import invalid_data_response, validate_api_secret_key
from app.models import TheUser
# ----------------------------------------------------------------------------------------------------------------------
@api_view(['POST'])
def get_reminders(request):
"""
Returns the reminders status.
"""
validate_api_secret_key(request.data.get('app_key'))
request_serializer = GetReminderRequest(data=request.data)
if request_serializer.is_valid():
the_user = get_object_or_404(TheUser, auth_token=request.data.get('user_token'))
return Response({'detail': 'successful',
'data': the_user.get_api_reminders()},
status=status.HTTP_200_OK)
else:
return invalid_data_response(request_serializer)
# ----------------------------------------------------------------------------------------------------------------------
@api_view(['POST'])
def update_reminder(request):
"""
Changes the status of the reminder.
"""
validate_api_secret_key(request.data.get('app_key'))
request_serializer = UpdateReminderRequest(data=request.data)
if request_serializer.is_valid():
the_user = get_object_or_404(TheUser, auth_token=request.data.get('user_token'))
the_user.update_reminder(request.data.get('field'), request.data.get('value'))
return Response({'detail': 'successful'},
status=status.HTTP_200_OK)
else:
return invalid_data_response(request_serializer)
| [
"rest_framework.response.Response",
"rest_framework.decorators.api_view"
] | [((510, 528), 'rest_framework.decorators.api_view', 'api_view', (["['POST']"], {}), "(['POST'])\n", (518, 528), False, 'from rest_framework.decorators import api_view\n'), ((1212, 1230), 'rest_framework.decorators.api_view', 'api_view', (["['POST']"], {}), "(['POST'])\n", (1220, 1230), False, 'from rest_framework.decorators import api_view\n'), ((1672, 1733), 'rest_framework.response.Response', 'Response', (["{'detail': 'successful'}"], {'status': 'status.HTTP_200_OK'}), "({'detail': 'successful'}, status=status.HTTP_200_OK)\n", (1680, 1733), False, 'from rest_framework.response import Response\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import anaf.documents.models
import anaf.documents.files
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Document',
fields=[
('object_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='core.Object')),
('title', models.CharField(max_length=255)),
('body', models.TextField(null=True, blank=True)),
],
options={
'ordering': ['-last_updated'],
},
bases=('core.object',),
),
migrations.CreateModel(
name='File',
fields=[
('object_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='core.Object')),
('name', models.CharField(max_length=255)),
('content', models.FileField(storage=anaf.documents.files.FileStorage(), upload_to=anaf.documents.models.generate_filename)),
],
options={
'ordering': ['-last_updated'],
},
bases=('core.object',),
),
migrations.CreateModel(
name='Folder',
fields=[
('object_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='core.Object')),
('name', models.CharField(max_length=255)),
('parent', models.ForeignKey(related_name='child_set', blank=True, to='documents.Folder', null=True)),
],
options={
},
bases=('core.object',),
),
migrations.CreateModel(
name='WebLink',
fields=[
('object_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='core.Object')),
('title', models.CharField(max_length=255)),
('url', models.CharField(max_length=255)),
('folder', models.ForeignKey(to='documents.Folder')),
],
options={
'ordering': ['-last_updated'],
},
bases=('core.object',),
),
migrations.AddField(
model_name='file',
name='folder',
field=models.ForeignKey(to='documents.Folder'),
preserve_default=True,
),
migrations.AddField(
model_name='document',
name='folder',
field=models.ForeignKey(to='documents.Folder'),
preserve_default=True,
),
]
| [
"django.db.models.OneToOneField",
"django.db.models.TextField",
"django.db.models.CharField",
"django.db.models.ForeignKey"
] | [((2524, 2564), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'to': '"""documents.Folder"""'}), "(to='documents.Folder')\n", (2541, 2564), False, 'from django.db import models, migrations\n'), ((2721, 2761), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'to': '"""documents.Folder"""'}), "(to='documents.Folder')\n", (2738, 2761), False, 'from django.db import models, migrations\n'), ((399, 513), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'parent_link': '(True)', 'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'to': '"""core.Object"""'}), "(parent_link=True, auto_created=True, primary_key=True,\n serialize=False, to='core.Object')\n", (419, 513), False, 'from django.db import models, migrations\n'), ((538, 570), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (554, 570), False, 'from django.db import models, migrations\n'), ((598, 637), 'django.db.models.TextField', 'models.TextField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (614, 637), False, 'from django.db import models, migrations\n'), ((895, 1009), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'parent_link': '(True)', 'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'to': '"""core.Object"""'}), "(parent_link=True, auto_created=True, primary_key=True,\n serialize=False, to='core.Object')\n", (915, 1009), False, 'from django.db import models, migrations\n'), ((1033, 1065), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (1049, 1065), False, 'from django.db import models, migrations\n'), ((1467, 1581), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'parent_link': '(True)', 'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'to': '"""core.Object"""'}), "(parent_link=True, auto_created=True, primary_key=True,\n serialize=False, to='core.Object')\n", (1487, 1581), False, 'from django.db import models, migrations\n'), ((1605, 1637), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (1621, 1637), False, 'from django.db import models, migrations\n'), ((1667, 1761), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'related_name': '"""child_set"""', 'blank': '(True)', 'to': '"""documents.Folder"""', 'null': '(True)'}), "(related_name='child_set', blank=True, to=\n 'documents.Folder', null=True)\n", (1684, 1761), False, 'from django.db import models, migrations\n'), ((1970, 2084), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'parent_link': '(True)', 'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'to': '"""core.Object"""'}), "(parent_link=True, auto_created=True, primary_key=True,\n serialize=False, to='core.Object')\n", (1990, 2084), False, 'from django.db import models, migrations\n'), ((2109, 2141), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (2125, 2141), False, 'from django.db import models, migrations\n'), ((2168, 2200), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (2184, 2200), False, 'from django.db import models, migrations\n'), ((2230, 2270), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'to': '"""documents.Folder"""'}), "(to='documents.Folder')\n", (2247, 2270), False, 'from django.db import models, migrations\n')] |
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import tensorflow as tf
import numpy as np
EPS = 1e-10
def get_required_argument(dotmap, key, message, default=None):
val = dotmap.get(key, default)
if val is default:
raise ValueError(message)
return val
def gaussian_kl_np(mu0, log_std0, mu1, log_std1):
"""interprets each entry in mu_i and log_std_i as independent,
preserves shape
output clipped to {0, 1e10}
"""
var0, var1 = np.exp(2 * log_std0), np.exp(2 * log_std1)
pre_sum = 0.5*(((mu1- mu0)**2 + var0)/(var1+EPS) - 1) + log_std1 - log_std0
all_kls = pre_sum
#all_kls = np.mean(all_kls)
all_kls = np.clip(all_kls, 0, 1/EPS) ### for stability
return all_kls
def gaussian_jsd_np(mu0, log_std0, mu1, log_std1):
pass
def average_dkl(mu, std):
"""
Calculates the average kullback leiber divergences of multiple univariate gaussian distributions.
K(P1,…Pk) = 1/(k(k−1)) ∑_[k_(i,j)=1] DKL(Pi||Pj)
(<NAME>, Informational divergence and the dissimilarity of probability distributions.)
expects the distributions along axis 0, and samples along axis 1.
Output is reduced by axis 0
Args:
mu: array-like means
std: array-like stds
"""
## clip log
log_std = np.log(std)
log_std = np.clip(log_std, -100, 1e8)
assert len(mu.shape)>=2 and len(log_std.shape)>=2
num_models = len(mu)
d_kl = None
for i in range(num_models):
for j in range(num_models):
if d_kl is None:
d_kl = gaussian_kl_np(mu[i], log_std[i], mu[j], log_std[j])
else: d_kl+= gaussian_kl_np(mu[i], log_std[i], mu[j], log_std[j])
d_kl = d_kl/(num_models*(num_models-1)+EPS)
return d_kl
def median_dkl(mu, std):
"""
Calculates the median kullback leiber divergences of multiple univariate gaussian distributions.
K(P1,…Pk) = 1/(k(k−1)) ∑_[k_(i,j)=1] DKL(Pi||Pj)
(<NAME>, Informational divergence and the dissimilarity of probability distributions.)
expects the distributions along axis 0, and samples along axis 1.
Output is reduced by axis 0
Args:
mu: array-like means
std: array-like stds
"""
## clip log
log_std = np.log(std)
log_std = np.clip(log_std, -100, 1e8)
assert len(mu.shape)>=2 and len(log_std.shape)>=2
num_models = len(mu)
d_kl = np.zeros(shape=(num_models*(num_models-1),) + mu.shape[1:])
n = 0
for i in range(num_models):
for j in range(num_models):
if i != j:
d_kl[n] = gaussian_kl_np(mu[i], log_std[i], mu[j], log_std[j])
n += 1
d_kl_med = np.median(d_kl, axis=0)
return d_kl_med
class TensorStandardScaler:
"""Helper class for automatically normalizing inputs into the network.
"""
def __init__(self, x_dim, sc_factor=1, name='Scaler'):
"""Initializes a scaler.
Arguments:
x_dim (int): The dimensionality of the inputs into the scaler.
Returns: None.
"""
self.fitted = False
with tf.variable_scope(name):
self.count = tf.get_variable(
name=name+'_count', shape=(), initializer=tf.constant_initializer(0),
trainable=False
)
self.mu = tf.get_variable(
name=name+'_mu', shape=[1, x_dim], initializer=tf.constant_initializer(0.0),
trainable=False
)
self.var = tf.get_variable(
name=name+'_std', shape=[1, x_dim], initializer=tf.constant_initializer(1.0),
trainable=False
)
self.cached_count, self.cached_mu, self.cached_var = 0, np.zeros([1, x_dim]), np.ones([1, x_dim])
self.sc_factor = sc_factor
def fit(self, data):
"""Runs two ops, one for assigning the mean of the data to the internal mean, and
another for assigning the standard deviation of the data to the internal standard deviation.
This function must be called within a 'with <session>.as_default()' block.
Arguments:
data (np.ndarray): A numpy array containing the input
Returns: None.
"""
batch_count = data.shape[0]
batch_mu = np.mean(data, axis=0, keepdims=True)
batch_var = np.var(data, axis=0, keepdims=True)
new_mean, new_var, new_count = self.running_mean_var_from_batch(batch_mu, batch_var, batch_count)
#sigma[sigma < 1e-8] = 1.0
self.mu.load(new_mean)
self.var.load(new_var)
self.count.load(new_count)
self.fitted = True
self.cache()
def transform(self, data):
"""Transforms the input matrix data using the parameters of this scaler.
can be adjusted to scale with a factor, to control sensitivity to ood data:
d = (d-mu)/sigma = d + (d-mu)/sigma - d = d + (d(1-sigma)-mu)/sigma
and the version with scaling factor thus becomes
d = d + sc_factor*(d(1-sigma)-mu)/sigma
Arguments:
data (np.array): A numpy array containing the points to be transformed.
sc_factor: Factor to what degree the original dataset is transformed
Returns: (np.array) The transformed dataset.
"""
#scaled_transform = data + self.sc_factor * (data* (1-self.sigma) - self.mu) / self.sigma
# scaling = 1+self.sc_factor*(self.sigma-1)
# scaling = tf.clip_by_value(scaling, 1.0e-8, 1.0e8)
scaled_transform = (data-self.mu)/(tf.maximum(tf.sqrt(self.var)*self.sc_factor, 1e-2))
return scaled_transform
def inverse_transform(self, data):
"""Undoes the transformation performed by this scaler.
Arguments:
data (np.array): A numpy array containing the points to be transformed.
Returns: (np.array) The transformed dataset.
"""
return (tf.maximum(tf.sqrt(self.var)*self.sc_factor, 1e-2)) * data + self.mu
def inverse_transform_var(self, data):
"""Undoes the transformation performed by this scaler for variances.
Arguments:
data (np.array): A numpy array containing the points to be transformed.
Returns: (np.array) The transformed dataset.
"""
return tf.square(tf.maximum(tf.sqrt(self.var)*self.sc_factor, 1e-2)) * data
def inverse_transform_logvar(self, data):
"""Undoes the transformation performed by this scaler for variances.
Arguments:
data (np.array): A numpy array containing the points to be transformed.
Returns: (np.array) The transformed dataset.
"""
return 2*tf.log(tf.maximum(tf.sqrt(self.var)*self.sc_factor, 1e-2)) + data
def get_vars(self):
"""Returns a list of variables managed by this object.
Returns: (list<tf.Variable>) The list of variables.
"""
return [self.mu, self.var]
def get_mu(self):
return self.mu
def get_var(self):
return self.var
def cache(self):
"""Caches current values of this scaler.
Returns: None.
"""
self.cached_mu = self.mu.eval()
self.cached_var = self.var.eval()
self.cached_count = self.count.eval()
def load_cache(self):
"""Loads values from the cache
Returns: None.
"""
self.mu.load(self.cached_mu)
self.var.load(self.cached_var)
self.count.load(self.cached_count)
def decay_count(self, decay_rate=0.99):
self.count.load(self.cached_count*decay_rate)
def running_mean_var_from_batch(self, batch_mean, batch_var, batch_count):
delta = batch_mean - self.cached_mu
tot_count = self.cached_count + batch_count
new_mean = self.cached_mu + delta * batch_count / tot_count
m_a = self.cached_var * self.cached_count
m_b = batch_var * batch_count
M2 = m_a + m_b + np.square(delta) * self.cached_count * batch_count / tot_count
new_var = M2 / tot_count
new_count = tot_count
return new_mean, new_var, new_count
| [
"numpy.clip",
"numpy.mean",
"numpy.median",
"tensorflow.variable_scope",
"numpy.ones",
"numpy.log",
"numpy.square",
"numpy.exp",
"numpy.zeros",
"tensorflow.sqrt",
"tensorflow.constant_initializer",
"numpy.var"
] | [((724, 752), 'numpy.clip', 'np.clip', (['all_kls', '(0)', '(1 / EPS)'], {}), '(all_kls, 0, 1 / EPS)\n', (731, 752), True, 'import numpy as np\n'), ((1366, 1377), 'numpy.log', 'np.log', (['std'], {}), '(std)\n', (1372, 1377), True, 'import numpy as np\n'), ((1392, 1427), 'numpy.clip', 'np.clip', (['log_std', '(-100)', '(100000000.0)'], {}), '(log_std, -100, 100000000.0)\n', (1399, 1427), True, 'import numpy as np\n'), ((2338, 2349), 'numpy.log', 'np.log', (['std'], {}), '(std)\n', (2344, 2349), True, 'import numpy as np\n'), ((2364, 2399), 'numpy.clip', 'np.clip', (['log_std', '(-100)', '(100000000.0)'], {}), '(log_std, -100, 100000000.0)\n', (2371, 2399), True, 'import numpy as np\n'), ((2482, 2545), 'numpy.zeros', 'np.zeros', ([], {'shape': '((num_models * (num_models - 1),) + mu.shape[1:])'}), '(shape=(num_models * (num_models - 1),) + mu.shape[1:])\n', (2490, 2545), True, 'import numpy as np\n'), ((2761, 2784), 'numpy.median', 'np.median', (['d_kl'], {'axis': '(0)'}), '(d_kl, axis=0)\n', (2770, 2784), True, 'import numpy as np\n'), ((532, 552), 'numpy.exp', 'np.exp', (['(2 * log_std0)'], {}), '(2 * log_std0)\n', (538, 552), True, 'import numpy as np\n'), ((554, 574), 'numpy.exp', 'np.exp', (['(2 * log_std1)'], {}), '(2 * log_std1)\n', (560, 574), True, 'import numpy as np\n'), ((4351, 4387), 'numpy.mean', 'np.mean', (['data'], {'axis': '(0)', 'keepdims': '(True)'}), '(data, axis=0, keepdims=True)\n', (4358, 4387), True, 'import numpy as np\n'), ((4408, 4443), 'numpy.var', 'np.var', (['data'], {'axis': '(0)', 'keepdims': '(True)'}), '(data, axis=0, keepdims=True)\n', (4414, 4443), True, 'import numpy as np\n'), ((3178, 3201), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (3195, 3201), True, 'import tensorflow as tf\n'), ((3801, 3821), 'numpy.zeros', 'np.zeros', (['[1, x_dim]'], {}), '([1, x_dim])\n', (3809, 3821), True, 'import numpy as np\n'), ((3823, 3842), 'numpy.ones', 'np.ones', (['[1, x_dim]'], {}), '([1, x_dim])\n', (3830, 3842), True, 'import numpy as np\n'), ((3303, 3329), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0)'], {}), '(0)\n', (3326, 3329), True, 'import tensorflow as tf\n'), ((3480, 3508), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (3503, 3508), True, 'import tensorflow as tf\n'), ((3660, 3688), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(1.0)'], {}), '(1.0)\n', (3683, 3688), True, 'import tensorflow as tf\n'), ((5629, 5646), 'tensorflow.sqrt', 'tf.sqrt', (['self.var'], {}), '(self.var)\n', (5636, 5646), True, 'import tensorflow as tf\n'), ((5998, 6015), 'tensorflow.sqrt', 'tf.sqrt', (['self.var'], {}), '(self.var)\n', (6005, 6015), True, 'import tensorflow as tf\n'), ((6379, 6396), 'tensorflow.sqrt', 'tf.sqrt', (['self.var'], {}), '(self.var)\n', (6386, 6396), True, 'import tensorflow as tf\n'), ((8003, 8019), 'numpy.square', 'np.square', (['delta'], {}), '(delta)\n', (8012, 8019), True, 'import numpy as np\n'), ((6752, 6769), 'tensorflow.sqrt', 'tf.sqrt', (['self.var'], {}), '(self.var)\n', (6759, 6769), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python
"""
<NAME>
Nov 2019
"""
import rospy
from sensor_msgs.msg import Image
from sensor_msgs.msg import Imu
from cv_bridge import CvBridge, CvBridgeError
import numpy as np
import cv2
class Image_Processor(object):
def __init__(self):
rospy.Subscriber("/r200/depth/image_raw", Image, self.record_callback)
self.delta_time = 0
self.bridge = CvBridge()
print("Image processor node initialized!")
def start_recording(self, save_path, delta_time=1):
self.save_path = save_path
self.frame = 0
self.delta_time = delta_time
def record_callback(self, data):
print("callback works")
if self.delta_time >0:
image = self.bridge.imgmsg_to_cv2(data, "bgr8")
print(image.shape)
cv2.imwrite(self.save_path + str(self.frame) + ".png", image)
rospy.sleep(self.delta_time)
self.frame += 1
if __name__ == '__main__':
rospy.init_node('image_processor', anonymous=True)
IMP = Image_Processor()
IMP.start_recording("/home/zhiang/Pictures/terrain_boulder/")
try:
rospy.spin()
except rospy.ROSInterruptException:
print("Node killed!") | [
"rospy.init_node",
"cv_bridge.CvBridge",
"rospy.spin",
"rospy.sleep",
"rospy.Subscriber"
] | [((968, 1018), 'rospy.init_node', 'rospy.init_node', (['"""image_processor"""'], {'anonymous': '(True)'}), "('image_processor', anonymous=True)\n", (983, 1018), False, 'import rospy\n'), ((266, 336), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/r200/depth/image_raw"""', 'Image', 'self.record_callback'], {}), "('/r200/depth/image_raw', Image, self.record_callback)\n", (282, 336), False, 'import rospy\n'), ((387, 397), 'cv_bridge.CvBridge', 'CvBridge', ([], {}), '()\n', (395, 397), False, 'from cv_bridge import CvBridge, CvBridgeError\n'), ((1130, 1142), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (1140, 1142), False, 'import rospy\n'), ((879, 907), 'rospy.sleep', 'rospy.sleep', (['self.delta_time'], {}), '(self.delta_time)\n', (890, 907), False, 'import rospy\n')] |
from os.path import join
from math import ceil
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
plt.rcParams["font.size"] = 14
plt.rcParams["figure.figsize"] = [15, 8]
class reporter:
def __init__(self, name, folder, counts, interactive=True):
plt.subplot(221)
plt.title("Histogram")
sns.distplot(counts)
plt.subplot(222)
plt.title("Boxplot")
plt.boxplot(counts)
plt.subplot(223)
clist = [ceil(c) for c in counts]
clist = np.array(sorted(clist))
integers = np.unique([int(c) for c in clist])
cdf = np.array([sum(clist <= i)/len(clist) for i in integers])
plt.title("CDF - P(x $\leq$ X)")
plt.grid(alpha=0.25)
plt.plot(cdf)
plt.subplot(224)
plt.plot(1 - cdf)
plt.title("CCDF - P(x > X)")
plt.grid(alpha=0.25)
plt.suptitle(name)
plt.savefig(join(folder, name))
plt.clf()
if interactive:
import pygal as pg
box_plot = pg.Box()
box_plot.title = name
box_plot.add("Values", counts)
boxplot_name = name + "_boxplot.svg"
box_plot.render_to_file(join(folder, boxplot_name))
hist = pg.Bar(show_x_labels=False)
clist = [ceil(c) for c in counts]
freqs = [clist.count(i) for i in range(0, int(max(clist)))]
hist.add("Values", freqs)
hist.title = name
hist.x_labels = map(str, integers)
histogram_name = name + "_histogram.svg"
hist.render_to_file(join(folder, histogram_name))
line = pg.Line()
line.title = name
line.add("CDF", cdf)
line.add("CCDF", 1 - cdf)
line.x_labels = map(str, integers)
# line.x_labels = map(str, counts)
line_name = name + "_cdf_ccdf.svg"
line.render_to_file(join(folder, line_name))
with open(join(folder, "report_{}.html".format(name)), "w+") as out:
obj0 = "<object type='image/svg+xml' data='"
obj1 = "'></object>\n"
out.write("<html><head align='center'>Report - {}</head><body>\n".format(name))
out.write("{}{}{}".format(obj0, boxplot_name, obj1))
out.write("{}{}{}".format(obj0, histogram_name, obj1))
out.write("{}{}{}".format(obj0, line_name, obj1))
out.write("</body></html>")
| [
"matplotlib.pyplot.boxplot",
"matplotlib.pyplot.grid",
"math.ceil",
"pygal.Line",
"seaborn.distplot",
"pygal.Bar",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"os.path.join",
"pygal.Box",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.suptitle"
] | [((285, 301), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(221)'], {}), '(221)\n', (296, 301), True, 'import matplotlib.pyplot as plt\n'), ((310, 332), 'matplotlib.pyplot.title', 'plt.title', (['"""Histogram"""'], {}), "('Histogram')\n", (319, 332), True, 'import matplotlib.pyplot as plt\n'), ((341, 361), 'seaborn.distplot', 'sns.distplot', (['counts'], {}), '(counts)\n', (353, 361), True, 'import seaborn as sns\n'), ((370, 386), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(222)'], {}), '(222)\n', (381, 386), True, 'import matplotlib.pyplot as plt\n'), ((395, 415), 'matplotlib.pyplot.title', 'plt.title', (['"""Boxplot"""'], {}), "('Boxplot')\n", (404, 415), True, 'import matplotlib.pyplot as plt\n'), ((424, 443), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['counts'], {}), '(counts)\n', (435, 443), True, 'import matplotlib.pyplot as plt\n'), ((453, 469), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(223)'], {}), '(223)\n', (464, 469), True, 'import matplotlib.pyplot as plt\n'), ((685, 718), 'matplotlib.pyplot.title', 'plt.title', (['"""CDF - P(x $\\\\leq$ X)"""'], {}), "('CDF - P(x $\\\\leq$ X)')\n", (694, 718), True, 'import matplotlib.pyplot as plt\n'), ((726, 746), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'alpha': '(0.25)'}), '(alpha=0.25)\n', (734, 746), True, 'import matplotlib.pyplot as plt\n'), ((755, 768), 'matplotlib.pyplot.plot', 'plt.plot', (['cdf'], {}), '(cdf)\n', (763, 768), True, 'import matplotlib.pyplot as plt\n'), ((779, 795), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(224)'], {}), '(224)\n', (790, 795), True, 'import matplotlib.pyplot as plt\n'), ((804, 821), 'matplotlib.pyplot.plot', 'plt.plot', (['(1 - cdf)'], {}), '(1 - cdf)\n', (812, 821), True, 'import matplotlib.pyplot as plt\n'), ((830, 858), 'matplotlib.pyplot.title', 'plt.title', (['"""CCDF - P(x > X)"""'], {}), "('CCDF - P(x > X)')\n", (839, 858), True, 'import matplotlib.pyplot as plt\n'), ((867, 887), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'alpha': '(0.25)'}), '(alpha=0.25)\n', (875, 887), True, 'import matplotlib.pyplot as plt\n'), ((897, 915), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['name'], {}), '(name)\n', (909, 915), True, 'import matplotlib.pyplot as plt\n'), ((964, 973), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (971, 973), True, 'import matplotlib.pyplot as plt\n'), ((487, 494), 'math.ceil', 'ceil', (['c'], {}), '(c)\n', (491, 494), False, 'from math import ceil\n'), ((936, 954), 'os.path.join', 'join', (['folder', 'name'], {}), '(folder, name)\n', (940, 954), False, 'from os.path import join\n'), ((1054, 1062), 'pygal.Box', 'pg.Box', ([], {}), '()\n', (1060, 1062), True, 'import pygal as pg\n'), ((1273, 1300), 'pygal.Bar', 'pg.Bar', ([], {'show_x_labels': '(False)'}), '(show_x_labels=False)\n', (1279, 1300), True, 'import pygal as pg\n'), ((1669, 1678), 'pygal.Line', 'pg.Line', ([], {}), '()\n', (1676, 1678), True, 'import pygal as pg\n'), ((1225, 1251), 'os.path.join', 'join', (['folder', 'boxplot_name'], {}), '(folder, boxplot_name)\n', (1229, 1251), False, 'from os.path import join\n'), ((1322, 1329), 'math.ceil', 'ceil', (['c'], {}), '(c)\n', (1326, 1329), False, 'from math import ceil\n'), ((1619, 1647), 'os.path.join', 'join', (['folder', 'histogram_name'], {}), '(folder, histogram_name)\n', (1623, 1647), False, 'from os.path import join\n'), ((1953, 1976), 'os.path.join', 'join', (['folder', 'line_name'], {}), '(folder, line_name)\n', (1957, 1976), False, 'from os.path import join\n')] |
"""Tests the translate_* files."""
from __future__ import absolute_import
import cPickle
import os
import shutil
from shared.testutil import testsize
from third_party import polib
from kake import compile_all_pot
from kake import compile_small_mo
from kake import translate_handlebars
from kake import translate_javascript
from kake import translate_util
import kake.lib.compile_rule
import kake.lib.testutil
import kake.make
class TranslateString(translate_util.TranslateBase):
"""Treats the input file as a single nltext string."""
def translate(self, infile_name, outfile_lang_moentries_context):
file_contents = self._read_input(infile_name)
for (outfile, lang, mo_entries, _) in outfile_lang_moentries_context:
translated_contents = mo_entries.get_singular_translation(
file_contents.strip())
if translated_contents == file_contents:
translated_contents = None
self._write_output(infile_name, outfile, translated_contents)
class TestBase(kake.lib.testutil.KakeTestBase):
def setUp(self, make_small_mo_file=True):
super(TestBase, self).setUp()
os.makedirs(self._abspath('javascript'))
os.makedirs(self._abspath('caps'))
os.makedirs(self._abspath('intl', 'translations', 'pofiles'))
os.makedirs(self._abspath('intl', 'translations', 'approved_pofiles'))
os.makedirs(self._abspath('genfiles', 'translations', 'caps'))
os.makedirs(self._abspath('genfiles', 'extracted_strings', 'caps'))
os.makedirs(self._abspath('kake'))
shutil.copyfile(os.path.join(self.real_ka_root,
'kake', 'compile_js.js'),
os.path.join(self.tmpdir,
'kake', 'compile_js.js'))
with open(self._abspath('f1'), 'w') as f:
print >>f, 'Graphing linear equations'
with open(self._abspath('javascript', 'f1.js'), 'w') as f:
print >>f, 'a = i18n._("Graphing linear equations");'
print >>f, 'b = i18n._("Hello %(where)s", {where: "world"});'
with open(self._abspath('javascript', 'f1.jsx'), 'w') as f:
print >>f, 'a = i18n._("Graphing linear equations");'
print >>f, 'b = i18n._("Hello %(where)s", {where: "world"});'
# The actual jsx would be: <$_ where="world">Hello %(where)s</$_>
# But our fake jsx-compiler won't correctly 'compile' this, so
# I cheat and put in the post-compiled value.
print >>f, 'c = $_({where: "world"}, "Hello %(where)s", etc, etc);'
with open(self._abspath('javascript', 'f1.handlebars'), 'w') as f:
print >>f, '{{#_}}Graphing linear equations{{/_}}'
# Also test plural functionality
with open(self._abspath('javascript', 'f2.js'), 'w') as f:
print >>f, 'a = $.ngettext("1 second", "%(num)s seconds");'
with open(self._abspath('javascript', 'f2.handlebars'), 'w') as f:
print >>f, ('{{#ngettext num}}1 minute{{else}}'
'{{num}} minutes{{/ngettext}}')
# A plural used in a singular context.
with open(self._abspath('f.html'), 'w') as f:
print >>f, '<div title="1 second">1 minute</div>'
with open(self._abspath('f.js'), 'w') as f:
print >>f, 'a = i18n._("1 minute");'
with open(self._abspath('f.handlebars'), 'w') as f:
print >>f, '{{#_}}1 minute{{/_}}'
# An exercise with no translations.
with open(self._abspath('f_no.html'), 'w') as f:
print >>f, '<script>alert(i18n._("Apple"));</script>'
print >>f, ('<span data-if="alert(i18n._(\'Banana\'));">'
'Canteloupe'
'</span>')
print >>f, '<input type="text" value="Durian" />'
print >>f, '<var>alert(i18n._("Eggplant"));</var>'
print >>f, ('<span data-if="isSingular(A)"><var>A</var> Fig</span>'
'<span data-else=""><var>A</var> Figs</span>')
# Exercise files with partial translations in diferent kinds of nltext
# positions.
with open(self._abspath('f_p1.html'), 'w') as f:
print >>f, '<script>alert(i18n._("Apple"));</script>'
print >>f, '<script>alert(i18n._("Addition 1"));</script>'
with open(self._abspath('f_p2.html'), 'w') as f:
print >>f, '<script>alert(i18n._("Apple"));</script>'
print >>f, '<span data-if="alert(i18n._(\'Addition 1\'));"></span>'
with open(self._abspath('f_p3.html'), 'w') as f:
print >>f, '<script>alert(i18n._("Apple"));</script>'
print >>f, '<span>Addition 1</span>'
with open(self._abspath('f_p4.html'), 'w') as f:
print >>f, '<script>alert(i18n._("Apple"));</script>'
print >>f, '<input type="text" value="Addition 1" />'
with open(self._abspath('f_p5.html'), 'w') as f:
print >>f, '<script>alert(i18n._("Apple"));</script>'
print >>f, '<var>alert(i18n._("Addition 1"));</var>'
with open(self._abspath('f_p6.html'), 'w') as f:
print >>f, '<script>alert(i18n._("Apple"));</script>'
print >>f, ('<span data-if="isSingular(n)">1 hour</span>'
'<span data-else=""><var>n</var> hours</span>')
with open(self._abspath('f_p7.html'), 'w') as f:
print >>f, ('<script>'
'alert(i18n._("Apple")); alert(i18n._("Addition 1"));'
'</script>')
with open(self._abspath('f_p8.html'), 'w') as f:
print >>f, ('<script>'
'alert(i18n._("Apple")); '
'alert(i18n._("Subtraction 1"));'
'</script>')
# A file without a translation
with open(self._abspath('f_no'), 'w') as f:
print >>f, 'Hello, world'
# Make the .po file. We don't need 'occurences' fields for
# our tests, but _write_pofile() wants them, so we make up
# some fake ones.
e1 = polib.POEntry(msgid='Hello %(where)s',
msgstr='HELLO %(where)s',
occurrences=[('a', 1)])
e2 = polib.POEntry(msgid='Graphing linear equations',
msgstr='GRAPHING LINEAR EQUATIONS',
occurrences=[('a', 1)])
e3 = polib.POEntry(msgid='Addition 1',
msgstr='ADDITION 1',
occurrences=[('a', 1)])
e4 = polib.POEntry(msgid='1 second',
msgid_plural='%(num)s seconds',
msgstr_plural={'0': '1 SECOND',
'1': '%(num)s SECONDS',
'2': '%(num)s SECS'},
occurrences=[('a', 1)])
e5 = polib.POEntry(msgid='1 minute',
msgid_plural='{{num}} minutes',
msgstr_plural={'0': '1 MINUTE',
'1': '{{num}} MINUTES',
'2': '{{num}} MINS'},
occurrences=[('a', 1)])
e6 = polib.POEntry(msgid='1 hour',
msgid_plural='<var>n</var> hours',
msgstr_plural={'0': '1 HOUR',
'1': '<var>n</var> HOURS',
'2': '<var>n</var> H'},
occurrences=[('a', 1)])
# This entry differs between the approved pofiles and the unapproved
# pofiles
e3_unapproved = polib.POEntry(msgid='Addition 1',
msgstr='ADDITION ONE',
occurrences=[('a', 1)])
# These entries only exists in the unapproved pofile
e7_unapproved = polib.POEntry(msgid='Subtraction 1',
msgstr='SUBTRACTION ONE',
occurrences=[('a', 1)])
e8_unapproved = polib.POEntry(msgid='1 fortnight',
msgid_plural='{{num}} fortnights',
msgstr_plural={'0': '1 FORTNIGHT',
'1': '{{num}} FORTNIGHTS',
'2': '{{num}} FORTNS'},
occurrences=[('a', 1)])
def save_po_file(entries, outpath):
po_file = polib.POFile()
po_file.extend(entries)
po_file.save(outpath)
save_po_file((e1, e2, e3_unapproved, e4, e5, e6, e7_unapproved,
e8_unapproved),
self._abspath('intl', 'translations',
'pofiles', 'caps.rest.po'))
save_po_file((e1, e2, e3, e4, e5, e6),
self._abspath('intl', 'translations',
'approved_pofiles', 'caps.rest.po'))
# Also make the .pot.pickle files.
po_entry_map = {
'f1': [e2],
'javascript/f1.js': [e2, e1],
'javascript/f1.jsx': [e2, e1],
'javascript/f1.handlebars': [e2],
'javascript/f2.js': [e4],
'javascript/f2.handlebars': [e5],
'f.html': [e4, e5, e8_unapproved],
'f.js': [e5],
'f.handlebars': [e5],
'f_no': [],
'f_no.html': [],
'f_p1.html': [e3],
'f_p2.html': [e3],
'f_p3.html': [e3],
'f_p4.html': [e3],
'f_p5.html': [e3],
'f_p6.html': [e6],
'f_p7.html': [e3],
'f_p8.html': [e7_unapproved],
}
for (fname, po_entries) in po_entry_map.iteritems():
fname = 'genfiles/extracted_strings/en/%s.pot.pickle' % fname
if not os.path.isdir(os.path.dirname(self._abspath(fname))):
os.makedirs(os.path.dirname(self._abspath(fname)))
compile_all_pot._write_pofile(po_entries, self._abspath(fname))
if make_small_mo_file:
for f in po_entry_map:
fout = 'genfiles/extracted_strings/caps/%s.small_mo.pickle' % f
if not os.path.isdir(os.path.dirname(self._abspath(fout))):
os.makedirs(os.path.dirname(self._abspath(fout)))
compile_small_mo.SplitPOFile().build_many([
(fout,
['genfiles/extracted_strings/en/%s.pot.pickle' % f,
'intl/translations/pofiles/caps.rest.po',
'intl/translations/approved_pofiles/caps.rest.po'],
['intl/translations/pofiles/caps.rest.po'],
{})])
def build(self, translator, infile, outfile):
translator.build_many([(
outfile,
[infile,
'genfiles/extracted_strings/caps/%s.small_mo.pickle' % infile],
[outfile],
{'{lang}': 'caps'}
)])
@testsize.tiny
class TestSmallMo(TestBase):
def test_approval_flag(self):
with open(self._abspath('genfiles/extracted_strings/caps/'
'f.html.small_mo.pickle')) as f:
small_mo = cPickle.load(f)
# We have translations for both "1 second", and "1 fortnight"
self.assertIsNotNone(small_mo.get_plural_translation(
"1 second", approved_only=False))
self.assertIsNotNone(small_mo.get_singular_translation(
"1 second", approved_only=False))
self.assertIsNotNone(small_mo.get_plural_translation(
"1 fortnight", approved_only=False))
self.assertIsNotNone(small_mo.get_singular_translation(
"1 fortnight", approved_only=False))
# ...but the translation for "1 fortnight" is not approved.
self.assertIsNotNone(small_mo.get_plural_translation(
"1 second", approved_only=True))
self.assertIsNotNone(small_mo.get_singular_translation(
"1 second", approved_only=True))
self.assertIsNone(small_mo.get_plural_translation(
"1 fortnight", approved_only=True))
self.assertIsNone(small_mo.get_singular_translation(
"1 fortnight", approved_only=True))
@testsize.tiny
class TestTranslations(TestBase):
def test_simple(self):
translator = TranslateString()
self.build(translator, 'f1', 'f1_caps')
self.assertFile('f1_caps', 'GRAPHING LINEAR EQUATIONS')
def test_symlink_when_there_is_no_translation(self):
translator = TranslateString()
self.build(translator, 'f_no', 'caps/f1_symlink')
self.assertFile('caps/f1_symlink', 'Hello, world\n')
self.assertTrue(os.path.islink(self._abspath('caps', 'f1_symlink')))
self.assertEqual(os.path.join('..', 'f_no'),
os.readlink(self._abspath('caps', 'f1_symlink')))
@testsize.tiny
class TestJavascript(TestBase):
def test_singular(self):
translator = translate_javascript.TranslateJavascript()
self.build(translator, 'javascript/f1.js', 'caps/f1.js')
self.assertFile('caps/f1.js',
'a = i18n._("GRAPHING LINEAR EQUATIONS");\n'
'b = i18n._("HELLO %(where)s", {where: "world"});\n')
def test_plural(self):
translator = translate_javascript.TranslateJavascript()
self.build(translator, 'javascript/f2.js', 'caps/f2.js')
self.assertFile('caps/f2.js',
'a = $.ngettext({"lang": "caps", '
'"messages": ["1 SECOND", "%(num)s SECONDS", '
'"%(num)s SECS"]});\n')
def test_ngettext_entry_used_in_singular_context(self):
translator = translate_javascript.TranslateJavascript()
self.build(translator, 'f.js', 'caps/f.js')
self.assertFile('caps/f.js',
'a = i18n._("1 MINUTE");\n')
def test_should_not_translate_file(self):
self.mock_function('intl.english_only.should_not_translate_file',
lambda f: f == 'javascript/f1.js')
translator = translate_javascript.TranslateJavascript()
# caps/f1.js should be a symlink since it's in do-not-translate
self.build(translator, 'javascript/f1.js', 'caps/f1.js')
self.assertTrue(os.path.islink(self._abspath('caps', 'f1.js')))
self.assertEqual('../javascript/f1.js',
os.readlink(self._abspath('caps', 'f1.js')))
# But f2.js is a different story...
self.build(translator, 'javascript/f2.js', 'caps/f2.js')
self.assertFile('caps/f2.js',
'a = $.ngettext({"lang": "caps", '
'"messages": ["1 SECOND", "%(num)s SECONDS", '
'"%(num)s SECS"]});\n')
@testsize.tiny
class TestHandlebars(TestBase):
def test_singular(self):
translator = translate_handlebars.TranslateHandlebars()
self.build(translator, 'javascript/f1.handlebars', 'caps/f1.hbars')
self.assertFile('caps/f1.hbars',
'GRAPHING LINEAR EQUATIONS\n')
def test_plural(self):
translator = translate_handlebars.TranslateHandlebars()
self.build(translator, 'javascript/f2.handlebars', 'caps/f2.hbars')
self.assertFile('caps/f2.hbars',
'{{#ngettext num "caps" 0}}1 MINUTE{{else}}'
'{{#ngettext num "caps" 1}}{{num}} MINUTES{{else}}'
'{{num}} MINS{{/ngettext}}{{/ngettext}}\n')
def test_ngettext_entry_used_in_singular_context(self):
translator = translate_handlebars.TranslateHandlebars()
self.build(translator, 'f.handlebars', 'caps/f.hbars')
self.assertFile('caps/f.hbars',
'1 MINUTE\n')
def test_gettext_entry_used_in_plural_context(self):
with open(self._abspath('f.handlebars'), 'w') as f:
print >>f, ('{{#ngettext num}}Addition 1{{else}}Additions 1'
'{{/ngettext}}')
translator = translate_handlebars.TranslateHandlebars()
self.build(translator, 'f.handlebars', 'caps/f.hbars')
# Shouldn't translate our string since it's a singular string
# used in a plural context, and it doesn't know how to
# translate the plural.
self.assertFile('caps/f.hbars',
'{{#ngettext num}}Addition 1{{else}}Additions 1'
'{{/ngettext}}\n')
@testsize.tiny
class TestBuild(TestBase):
"""Test make.build() on translate targets."""
def setUp(self):
# make.build should make the small-mo file for us.
super(TestBuild, self).setUp(make_small_mo_file=False)
def test_javascript(self):
kake.make.build('genfiles/translations/caps/javascript/f1.js')
self.assertFile('genfiles/translations/caps/javascript/f1.js',
'a = i18n._("GRAPHING LINEAR EQUATIONS");\n'
'b = i18n._("HELLO %(where)s", {where: "world"});\n')
def test_handlebars(self):
kake.make.build('genfiles/translations/caps/javascript/f1.handlebars')
self.assertFile('genfiles/translations/caps/javascript/f1.handlebars',
'GRAPHING LINEAR EQUATIONS\n')
def test_incremental_rebuilds(self):
"""Test we don't re-translate when irrelevant translations change."""
kake.make.build('genfiles/translations/caps/javascript/f1.handlebars')
kake.make.build('genfiles/translations/caps/javascript/f2.handlebars')
po_path = self._abspath('intl', 'translations', 'approved_pofiles',
'caps.rest.po')
with open(po_path) as f:
old_po = f.read()
new_po = old_po.replace('MINUTE', 'MINUUUUTE') # used in f2, not f1
with open(po_path, 'w') as f:
print >>f, new_po
self.assertFileLacks(
'genfiles/translations/caps/javascript/f2.handlebars',
'MINUUUUTE')
# Now rebuilding f1 should be a noop.
cr = kake.lib.compile_rule.find_compile_rule(
'genfiles/translations/caps/javascript/f1.handlebars')
with self.assertCalled(cr.compile_instance.translate, 0):
kake.make.build(
'genfiles/translations/caps/javascript/f1.handlebars')
# While rebuilding f2 should not be.
with self.assertCalled(cr.compile_instance.translate, 1):
kake.make.build(
'genfiles/translations/caps/javascript/f2.handlebars')
self.assertFileContains(
'genfiles/translations/caps/javascript/f2.handlebars',
'MINUUUUTE')
class TestBuildForFakeLang(TestBase):
"""Test make.build() using the special codepath for fake languages."""
# Note we don't make any fake boxes.po file at all. kake
# automatically extracts the strings from the input file,
# fake-translates them, and inserts them into the translated file,
# all on the fly.
_BOX = u'\u25a1'.encode('utf-8')
_UTF8_GRAPHING_LINEAR_EQUATIONS = '%s %s %s' % (_BOX * len('GRAPHING'),
_BOX * len('LINEAR'),
_BOX * len('EQUATIONS'))
_S_GRAPHING_LINEAR_EQUATIONS = '%s %s %s' % (r'\u25a1' * len('GRAPHING'),
r'\u25a1' * len('LINEAR'),
r'\u25a1' * len('EQUATIONS'))
_S_HELLO_WORLD = '%s %%(where)s' % (r'\u25a1' * len('HELLO'))
_S_ADDITION_1 = '%s %s' % (r'\u25a1' * len('ADDITION'),
r'\u25a1' * len('1'))
def test_javascript(self):
kake.make.build('genfiles/translations/boxes/javascript/f1.js')
self.assertFile('genfiles/translations/boxes/javascript/f1.js',
'a = i18n._("%s");\n'
'b = i18n._("%s", {where: "world"});\n'
% (self._S_GRAPHING_LINEAR_EQUATIONS,
self._S_HELLO_WORLD))
def test_jsx(self):
kake.make.build('genfiles/compiled_jsx/boxes/javascript/f1.jsx.js')
self.assertFile('genfiles/compiled_jsx/boxes/javascript/f1.jsx.js',
'a = i18n._("%s");\n'
'b = i18n._("%s", {where: "world"});\n'
'c = $_({where: "world"}, "%s", etc, etc);\n'
% (self._S_GRAPHING_LINEAR_EQUATIONS,
self._S_HELLO_WORLD,
self._S_HELLO_WORLD))
def test_handlebars(self):
kake.make.build('genfiles/translations/boxes/javascript/f1.handlebars')
self.assertFile('genfiles/translations/boxes/javascript/f1.handlebars',
'%s\n' % self._UTF8_GRAPHING_LINEAR_EQUATIONS)
| [
"kake.translate_handlebars.TranslateHandlebars",
"os.path.join",
"third_party.polib.POFile",
"kake.translate_javascript.TranslateJavascript",
"kake.compile_small_mo.SplitPOFile",
"third_party.polib.POEntry",
"cPickle.load"
] | [((6174, 6266), 'third_party.polib.POEntry', 'polib.POEntry', ([], {'msgid': '"""Hello %(where)s"""', 'msgstr': '"""HELLO %(where)s"""', 'occurrences': "[('a', 1)]"}), "(msgid='Hello %(where)s', msgstr='HELLO %(where)s',\n occurrences=[('a', 1)])\n", (6187, 6266), False, 'from third_party import polib\n'), ((6330, 6443), 'third_party.polib.POEntry', 'polib.POEntry', ([], {'msgid': '"""Graphing linear equations"""', 'msgstr': '"""GRAPHING LINEAR EQUATIONS"""', 'occurrences': "[('a', 1)]"}), "(msgid='Graphing linear equations', msgstr=\n 'GRAPHING LINEAR EQUATIONS', occurrences=[('a', 1)])\n", (6343, 6443), False, 'from third_party import polib\n'), ((6506, 6584), 'third_party.polib.POEntry', 'polib.POEntry', ([], {'msgid': '"""Addition 1"""', 'msgstr': '"""ADDITION 1"""', 'occurrences': "[('a', 1)]"}), "(msgid='Addition 1', msgstr='ADDITION 1', occurrences=[('a', 1)])\n", (6519, 6584), False, 'from third_party import polib\n'), ((6652, 6825), 'third_party.polib.POEntry', 'polib.POEntry', ([], {'msgid': '"""1 second"""', 'msgid_plural': '"""%(num)s seconds"""', 'msgstr_plural': "{'0': '1 SECOND', '1': '%(num)s SECONDS', '2': '%(num)s SECS'}", 'occurrences': "[('a', 1)]"}), "(msgid='1 second', msgid_plural='%(num)s seconds',\n msgstr_plural={'0': '1 SECOND', '1': '%(num)s SECONDS', '2':\n '%(num)s SECS'}, occurrences=[('a', 1)])\n", (6665, 6825), False, 'from third_party import polib\n'), ((6996, 7169), 'third_party.polib.POEntry', 'polib.POEntry', ([], {'msgid': '"""1 minute"""', 'msgid_plural': '"""{{num}} minutes"""', 'msgstr_plural': "{'0': '1 MINUTE', '1': '{{num}} MINUTES', '2': '{{num}} MINS'}", 'occurrences': "[('a', 1)]"}), "(msgid='1 minute', msgid_plural='{{num}} minutes',\n msgstr_plural={'0': '1 MINUTE', '1': '{{num}} MINUTES', '2':\n '{{num}} MINS'}, occurrences=[('a', 1)])\n", (7009, 7169), False, 'from third_party import polib\n'), ((7340, 7517), 'third_party.polib.POEntry', 'polib.POEntry', ([], {'msgid': '"""1 hour"""', 'msgid_plural': '"""<var>n</var> hours"""', 'msgstr_plural': "{'0': '1 HOUR', '1': '<var>n</var> HOURS', '2': '<var>n</var> H'}", 'occurrences': "[('a', 1)]"}), "(msgid='1 hour', msgid_plural='<var>n</var> hours',\n msgstr_plural={'0': '1 HOUR', '1': '<var>n</var> HOURS', '2':\n '<var>n</var> H'}, occurrences=[('a', 1)])\n", (7353, 7517), False, 'from third_party import polib\n'), ((7795, 7880), 'third_party.polib.POEntry', 'polib.POEntry', ([], {'msgid': '"""Addition 1"""', 'msgstr': '"""ADDITION ONE"""', 'occurrences': "[('a', 1)]"}), "(msgid='Addition 1', msgstr='ADDITION ONE', occurrences=[('a', 1)]\n )\n", (7808, 7880), False, 'from third_party import polib\n'), ((8038, 8129), 'third_party.polib.POEntry', 'polib.POEntry', ([], {'msgid': '"""Subtraction 1"""', 'msgstr': '"""SUBTRACTION ONE"""', 'occurrences': "[('a', 1)]"}), "(msgid='Subtraction 1', msgstr='SUBTRACTION ONE', occurrences=\n [('a', 1)])\n", (8051, 8129), False, 'from third_party import polib\n'), ((8225, 8412), 'third_party.polib.POEntry', 'polib.POEntry', ([], {'msgid': '"""1 fortnight"""', 'msgid_plural': '"""{{num}} fortnights"""', 'msgstr_plural': "{'0': '1 FORTNIGHT', '1': '{{num}} FORTNIGHTS', '2': '{{num}} FORTNS'}", 'occurrences': "[('a', 1)]"}), "(msgid='1 fortnight', msgid_plural='{{num}} fortnights',\n msgstr_plural={'0': '1 FORTNIGHT', '1': '{{num}} FORTNIGHTS', '2':\n '{{num}} FORTNS'}, occurrences=[('a', 1)])\n", (8238, 8412), False, 'from third_party import polib\n'), ((13406, 13448), 'kake.translate_javascript.TranslateJavascript', 'translate_javascript.TranslateJavascript', ([], {}), '()\n', (13446, 13448), False, 'from kake import translate_javascript\n'), ((13748, 13790), 'kake.translate_javascript.TranslateJavascript', 'translate_javascript.TranslateJavascript', ([], {}), '()\n', (13788, 13790), False, 'from kake import translate_javascript\n'), ((14154, 14196), 'kake.translate_javascript.TranslateJavascript', 'translate_javascript.TranslateJavascript', ([], {}), '()\n', (14194, 14196), False, 'from kake import translate_javascript\n'), ((14543, 14585), 'kake.translate_javascript.TranslateJavascript', 'translate_javascript.TranslateJavascript', ([], {}), '()\n', (14583, 14585), False, 'from kake import translate_javascript\n'), ((15339, 15381), 'kake.translate_handlebars.TranslateHandlebars', 'translate_handlebars.TranslateHandlebars', ([], {}), '()\n', (15379, 15381), False, 'from kake import translate_handlebars\n'), ((15603, 15645), 'kake.translate_handlebars.TranslateHandlebars', 'translate_handlebars.TranslateHandlebars', ([], {}), '()\n', (15643, 15645), False, 'from kake import translate_handlebars\n'), ((16060, 16102), 'kake.translate_handlebars.TranslateHandlebars', 'translate_handlebars.TranslateHandlebars', ([], {}), '()\n', (16100, 16102), False, 'from kake import translate_handlebars\n'), ((16498, 16540), 'kake.translate_handlebars.TranslateHandlebars', 'translate_handlebars.TranslateHandlebars', ([], {}), '()\n', (16538, 16540), False, 'from kake import translate_handlebars\n'), ((1617, 1673), 'os.path.join', 'os.path.join', (['self.real_ka_root', '"""kake"""', '"""compile_js.js"""'], {}), "(self.real_ka_root, 'kake', 'compile_js.js')\n", (1629, 1673), False, 'import os\n'), ((1736, 1786), 'os.path.join', 'os.path.join', (['self.tmpdir', '"""kake"""', '"""compile_js.js"""'], {}), "(self.tmpdir, 'kake', 'compile_js.js')\n", (1748, 1786), False, 'import os\n'), ((8692, 8706), 'third_party.polib.POFile', 'polib.POFile', ([], {}), '()\n', (8704, 8706), False, 'from third_party import polib\n'), ((11467, 11482), 'cPickle.load', 'cPickle.load', (['f'], {}), '(f)\n', (11479, 11482), False, 'import cPickle\n'), ((13204, 13230), 'os.path.join', 'os.path.join', (['""".."""', '"""f_no"""'], {}), "('..', 'f_no')\n", (13216, 13230), False, 'import os\n'), ((10585, 10615), 'kake.compile_small_mo.SplitPOFile', 'compile_small_mo.SplitPOFile', ([], {}), '()\n', (10613, 10615), False, 'from kake import compile_small_mo\n')] |
from django.conf.urls import url
from django.urls import include
urlpatterns = [url("api/", include("api.urls"))]
| [
"django.urls.include"
] | [((93, 112), 'django.urls.include', 'include', (['"""api.urls"""'], {}), "('api.urls')\n", (100, 112), False, 'from django.urls import include\n')] |
from .ccfsReg import CanonicalCorrelationForestsRegressionPrimitive
__all__ = ['CanonicalCorrelationForestsRegressionPrimitive']
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__) # type: ignore
| [
"pkgutil.extend_path"
] | [((175, 206), 'pkgutil.extend_path', 'extend_path', (['__path__', '__name__'], {}), '(__path__, __name__)\n', (186, 206), False, 'from pkgutil import extend_path\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from numpy import array
from numpy import isnan
from numpy import isinf
from numpy import ones
from numpy import zeros
from scipy.linalg import norm
from scipy.sparse import diags
from compas.numerical import connectivity_matrix
from compas.numerical import normrow
__all__ = ['dr_numpy']
K = [
[0.0],
[0.5, 0.5],
[0.5, 0.0, 0.5],
[1.0, 0.0, 0.0, 1.0],
]
class Coeff():
def __init__(self, c):
self.c = c
self.a = (1 - c * 0.5) / (1 + c * 0.5)
self.b = 0.5 * (1 + self.a)
def dr_numpy(vertices, edges, fixed, loads, qpre, fpre, lpre, linit, E, radius,
callback=None, callback_args=None, **kwargs):
"""Implementation of the dynamic relaxation method for form findong and analysis
of articulated networks of axial-force members.
Parameters
----------
vertices : list
XYZ coordinates of the vertices.
edges : list
Connectivity of the vertices.
fixed : list
Indices of the fixed vertices.
loads : list
XYZ components of the loads on the vertices.
qpre : list
Prescribed force densities in the edges.
fpre : list
Prescribed forces in the edges.
lpre : list
Prescribed lengths of the edges.
linit : list
Initial length of the edges.
E : list
Stiffness of the edges.
radius : list
Radius of the edges.
callback : callable, optional
User-defined function that is called at every iteration.
callback_args : tuple, optional
Additional arguments passed to the callback.
Returns
-------
xyz : array
XYZ coordinates of the equilibrium geometry.
q : array
Force densities in the edges.
f : array
Forces in the edges.
l : array
Lengths of the edges
r : array
Residual forces.
Notes
-----
For more info, see [1]_.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., <NAME>. and <NAME>.,
*Bending incorporated: designing tension structures by integrating bending-active elements*,
Proceedings of Tensinet Symposium 2013,Istanbul, Turkey, 2013.
Examples
--------
>>>
"""
# --------------------------------------------------------------------------
# callback
# --------------------------------------------------------------------------
if callback:
assert callable(callback), 'The provided callback is not callable.'
# --------------------------------------------------------------------------
# configuration
# --------------------------------------------------------------------------
kmax = kwargs.get('kmax', 10000)
dt = kwargs.get('dt', 1.0)
tol1 = kwargs.get('tol1', 1e-3)
tol2 = kwargs.get('tol2', 1e-6)
coeff = Coeff(kwargs.get('c', 0.1))
ca = coeff.a
cb = coeff.b
# --------------------------------------------------------------------------
# attribute lists
# --------------------------------------------------------------------------
num_v = len(vertices)
num_e = len(edges)
free = list(set(range(num_v)) - set(fixed))
# --------------------------------------------------------------------------
# attribute arrays
# --------------------------------------------------------------------------
x = array(vertices, dtype=float).reshape((-1, 3)) # m
p = array(loads, dtype=float).reshape((-1, 3)) # kN
qpre = array(qpre, dtype=float).reshape((-1, 1))
fpre = array(fpre, dtype=float).reshape((-1, 1)) # kN
lpre = array(lpre, dtype=float).reshape((-1, 1)) # m
linit = array(linit, dtype=float).reshape((-1, 1)) # m
E = array(E, dtype=float).reshape((-1, 1)) # kN/mm2 => GPa
radius = array(radius, dtype=float).reshape((-1, 1)) # mm
# --------------------------------------------------------------------------
# sectional properties
# --------------------------------------------------------------------------
A = 3.14159 * radius ** 2 # mm2
EA = E * A # kN
# --------------------------------------------------------------------------
# create the connectivity matrices
# after spline edges have been aligned
# --------------------------------------------------------------------------
C = connectivity_matrix(edges, 'csr')
Ct = C.transpose()
Ci = C[:, free]
Cit = Ci.transpose()
Ct2 = Ct.copy()
Ct2.data **= 2
# --------------------------------------------------------------------------
# if none of the initial lengths are set,
# set the initial lengths to the current lengths
# --------------------------------------------------------------------------
if all(linit == 0):
linit = normrow(C.dot(x))
# --------------------------------------------------------------------------
# initial values
# --------------------------------------------------------------------------
q = ones((num_e, 1), dtype=float)
l = normrow(C.dot(x)) # noqa: E741
f = q * l
v = zeros((num_v, 3), dtype=float)
r = zeros((num_v, 3), dtype=float)
# --------------------------------------------------------------------------
# helpers
# --------------------------------------------------------------------------
def rk(x0, v0, steps=2):
def a(t, v):
dx = v * t
x[free] = x0[free] + dx[free]
# update residual forces
r[free] = p[free] - D.dot(x)
return cb * r / mass
if steps == 1:
return a(dt, v0)
if steps == 2:
B = [0.0, 1.0]
K0 = dt * a(K[0][0] * dt, v0)
K1 = dt * a(K[1][0] * dt, v0 + K[1][1] * K0)
dv = B[0] * K0 + B[1] * K1
return dv
if steps == 4:
B = [1. / 6., 1. / 3., 1. / 3., 1. / 6.]
K0 = dt * a(K[0][0] * dt, v0)
K1 = dt * a(K[1][0] * dt, v0 + K[1][1] * K0)
K2 = dt * a(K[2][0] * dt, v0 + K[2][1] * K0 + K[2][2] * K1)
K3 = dt * a(K[3][0] * dt, v0 + K[3][1] * K0 + K[3][2] * K1 + K[3][3] * K2)
dv = B[0] * K0 + B[1] * K1 + B[2] * K2 + B[3] * K3
return dv
raise NotImplementedError
# --------------------------------------------------------------------------
# start iterating
# --------------------------------------------------------------------------
for k in range(kmax):
# print(k)
q_fpre = fpre / l
q_lpre = f / lpre
q_EA = EA * (l - linit) / (linit * l)
q_lpre[isinf(q_lpre)] = 0
q_lpre[isnan(q_lpre)] = 0
q_EA[isinf(q_EA)] = 0
q_EA[isnan(q_EA)] = 0
q = qpre + q_fpre + q_lpre + q_EA
Q = diags([q[:, 0]], [0])
D = Cit.dot(Q).dot(C)
mass = 0.5 * dt ** 2 * Ct2.dot(qpre + q_fpre + q_lpre + EA / linit)
# RK
x0 = x.copy()
v0 = ca * v.copy()
dv = rk(x0, v0, steps=4)
v[free] = v0[free] + dv[free]
dx = v * dt
x[free] = x0[free] + dx[free]
# update
u = C.dot(x)
l = normrow(u) # noqa: E741
f = q * l
r = p - Ct.dot(Q).dot(u)
# crits
crit1 = norm(r[free])
crit2 = norm(dx[free])
# callback
if callback:
callback(k, x, [crit1, crit2], callback_args)
# convergence
if crit1 < tol1:
break
if crit2 < tol2:
break
return x, q, f, l, r
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
pass
| [
"numpy.ones",
"numpy.array",
"numpy.zeros",
"compas.numerical.connectivity_matrix",
"scipy.linalg.norm",
"numpy.isnan",
"scipy.sparse.diags",
"numpy.isinf",
"compas.numerical.normrow"
] | [((4682, 4715), 'compas.numerical.connectivity_matrix', 'connectivity_matrix', (['edges', '"""csr"""'], {}), "(edges, 'csr')\n", (4701, 4715), False, 'from compas.numerical import connectivity_matrix\n'), ((5333, 5362), 'numpy.ones', 'ones', (['(num_e, 1)'], {'dtype': 'float'}), '((num_e, 1), dtype=float)\n', (5337, 5362), False, 'from numpy import ones\n'), ((5425, 5455), 'numpy.zeros', 'zeros', (['(num_v, 3)'], {'dtype': 'float'}), '((num_v, 3), dtype=float)\n', (5430, 5455), False, 'from numpy import zeros\n'), ((5464, 5494), 'numpy.zeros', 'zeros', (['(num_v, 3)'], {'dtype': 'float'}), '((num_v, 3), dtype=float)\n', (5469, 5494), False, 'from numpy import zeros\n'), ((7129, 7150), 'scipy.sparse.diags', 'diags', (['[q[:, 0]]', '[0]'], {}), '([q[:, 0]], [0])\n', (7134, 7150), False, 'from scipy.sparse import diags\n'), ((7498, 7508), 'compas.numerical.normrow', 'normrow', (['u'], {}), '(u)\n', (7505, 7508), False, 'from compas.numerical import normrow\n'), ((7606, 7619), 'scipy.linalg.norm', 'norm', (['r[free]'], {}), '(r[free])\n', (7610, 7619), False, 'from scipy.linalg import norm\n'), ((7636, 7650), 'scipy.linalg.norm', 'norm', (['dx[free]'], {}), '(dx[free])\n', (7640, 7650), False, 'from scipy.linalg import norm\n'), ((3467, 3495), 'numpy.array', 'array', (['vertices'], {'dtype': 'float'}), '(vertices, dtype=float)\n', (3472, 3495), False, 'from numpy import array\n'), ((3546, 3571), 'numpy.array', 'array', (['loads'], {'dtype': 'float'}), '(loads, dtype=float)\n', (3551, 3571), False, 'from numpy import array\n'), ((3629, 3653), 'numpy.array', 'array', (['qpre'], {'dtype': 'float'}), '(qpre, dtype=float)\n', (3634, 3653), False, 'from numpy import array\n'), ((3682, 3706), 'numpy.array', 'array', (['fpre'], {'dtype': 'float'}), '(fpre, dtype=float)\n', (3687, 3706), False, 'from numpy import array\n'), ((3762, 3786), 'numpy.array', 'array', (['lpre'], {'dtype': 'float'}), '(lpre, dtype=float)\n', (3767, 3786), False, 'from numpy import array\n'), ((3842, 3867), 'numpy.array', 'array', (['linit'], {'dtype': 'float'}), '(linit, dtype=float)\n', (3847, 3867), False, 'from numpy import array\n'), ((3917, 3938), 'numpy.array', 'array', (['E'], {'dtype': 'float'}), '(E, dtype=float)\n', (3922, 3938), False, 'from numpy import array\n'), ((4013, 4039), 'numpy.array', 'array', (['radius'], {'dtype': 'float'}), '(radius, dtype=float)\n', (4018, 4039), False, 'from numpy import array\n'), ((6961, 6974), 'numpy.isinf', 'isinf', (['q_lpre'], {}), '(q_lpre)\n', (6966, 6974), False, 'from numpy import isinf\n'), ((6995, 7008), 'numpy.isnan', 'isnan', (['q_lpre'], {}), '(q_lpre)\n', (7000, 7008), False, 'from numpy import isnan\n'), ((7027, 7038), 'numpy.isinf', 'isinf', (['q_EA'], {}), '(q_EA)\n', (7032, 7038), False, 'from numpy import isinf\n'), ((7057, 7068), 'numpy.isnan', 'isnan', (['q_EA'], {}), '(q_EA)\n', (7062, 7068), False, 'from numpy import isnan\n')] |
# Lint as: python3
#
# Copyright 2020 The XLS Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for dealing with concrete types and interpreter values."""
from typing import Tuple, Optional
from absl import logging
from xls.dslx import bit_helpers
from xls.dslx.interpreter.errors import FailureError
from xls.dslx.interpreter.value import Tag
from xls.dslx.interpreter.value import Value
from xls.dslx.python import cpp_ast as ast
from xls.dslx.python.cpp_concrete_type import ArrayType
from xls.dslx.python.cpp_concrete_type import BitsType
from xls.dslx.python.cpp_concrete_type import ConcreteType
from xls.dslx.python.cpp_concrete_type import EnumType
from xls.dslx.python.cpp_concrete_type import is_ubits
from xls.dslx.python.cpp_concrete_type import TupleType
from xls.dslx.python.cpp_pos import Span
from xls.dslx.python.cpp_scanner import Keyword
from xls.dslx.python.cpp_scanner import Token
from xls.dslx.python.cpp_scanner import TokenKind
from xls.dslx.python.cpp_scanner import TYPE_KEYWORDS_TO_SIGNEDNESS_AND_BITS
def _strength_reduce_enum(type_: ast.Enum, bit_count: int) -> ConcreteType:
"""Turns an enum to corresponding (bits) concrete type (w/signedness).
For example, used in conversion checks.
Args:
type_: AST node (enum definition) to convert.
bit_count: The bit count of the underlying bits type for the enum
definition, as determined by type inference or interpretation.
Returns:
The concrete type that represents the enum's underlying bits type.
"""
assert isinstance(type_, ast.Enum), type_
signed = type_.signed
assert isinstance(signed, bool), type_
return BitsType(signed, bit_count)
def concrete_type_from_value(value: Value) -> ConcreteType:
"""Returns the concrete type of 'value'.
Note that:
* Non-zero-length arrays are assumed (for zero length arrays we can't
currently deduce the type from the value because the concrete element type
is not reified in the array value.
* Enums are strength-reduced to their underlying bits (storage) type.
Args:
value: Value to determine the concrete type for.
"""
if value.tag in (Tag.UBITS, Tag.SBITS):
signed = value.tag == Tag.SBITS
return BitsType(signed, value.bits_payload.bit_count)
elif value.tag == Tag.ARRAY:
element_type = concrete_type_from_value(value.array_payload.index(0))
return ArrayType(element_type, len(value))
elif value.tag == Tag.TUPLE:
return TupleType(
tuple(concrete_type_from_value(m) for m in value.tuple_members))
else:
assert value.tag == Tag.ENUM, value
return _strength_reduce_enum(value.type_, value.bits_payload.bit_count)
def concrete_type_from_element_type_and_dims(
element_type: ConcreteType, dims: Tuple[int, ...]) -> ConcreteType:
"""Wraps element_type in arrays according to `dims`, dims[0] as most minor."""
t = element_type
for dim in dims:
t = ArrayType(t, dim)
return t
def concrete_type_from_dims(primitive: Token,
dims: Tuple[int, ...]) -> 'ConcreteType':
"""Creates a concrete type from the primitive type token and dims.
Args:
primitive: The token holding the primitive type as a keyword.
dims: Dimensions to apply to the primitive type; e.g. () is scalar, (5) is
1-D array of 5 elements having the primitive type.
Returns:
A concrete type object.
Raises:
ValueError: If the primitive keyword is unrecognized or dims are empty.
"""
if primitive.is_keyword(Keyword.BITS) or primitive.is_keyword(Keyword.UN):
base_type = BitsType(signed=False, size=dims[-1])
elif primitive.is_keyword(Keyword.SN):
base_type = BitsType(signed=True, size=dims[-1])
else:
assert primitive.kind == TokenKind.KEYWORD
signedness, bits = TYPE_KEYWORDS_TO_SIGNEDNESS_AND_BITS[primitive.value]
element_type = BitsType(signedness, bits)
while dims:
dims, minor = dims[:-1], dims[-1]
element_type = ArrayType(element_type, minor)
return element_type
result = concrete_type_from_element_type_and_dims(base_type, dims[:-1])
logging.vlog(4, '%r %r => %r', primitive, dims, result)
return result
def _value_compatible_with_type(module: ast.Module, type_: ConcreteType,
value: Value) -> bool:
"""Returns whether value is compatible with type_ (recursively)."""
assert isinstance(value, Value), value
if isinstance(type_, TupleType) and value.is_tuple():
return all(
_value_compatible_with_type(module, ct, m)
for ct, m in zip(type_.get_unnamed_members(), value.tuple_members))
if isinstance(type_, ArrayType) and value.is_array():
et = type_.get_element_type()
return all(
_value_compatible_with_type(module, et, m)
for m in value.array_payload.elements)
if isinstance(type_, EnumType) and value.tag == Tag.ENUM:
return type_.get_nominal_type(module) == value.type_
if isinstance(type_,
BitsType) and not type_.signed and value.tag == Tag.UBITS:
return value.bits_payload.bit_count == type_.get_total_bit_count()
if isinstance(type_, BitsType) and type_.signed and value.tag == Tag.SBITS:
return value.bits_payload.bit_count == type_.get_total_bit_count()
if value.tag == Tag.ENUM and isinstance(type_, BitsType):
return (value.type_.get_signedness() == type_.signed and
value.bits_payload.bit_count == type_.get_total_bit_count())
if value.tag == Tag.ARRAY and is_ubits(type_):
flat_bit_count = value.array_payload.flatten().bits_payload.bit_count
return flat_bit_count == type_.get_total_bit_count()
if isinstance(type_, EnumType) and value.is_bits():
return (type_.signed == (value.tag == Tag.SBITS) and
type_.get_total_bit_count() == value.get_bit_count())
raise NotImplementedError(type_, value)
def concrete_type_accepts_value(module: ast.Module, type_: ConcreteType,
value: Value) -> bool:
"""Returns whether 'value' conforms to this concrete type."""
if value.tag == Tag.UBITS:
return (isinstance(type_, BitsType) and not type_.signed and
value.bits_payload.bit_count == type_.get_total_bit_count())
if value.tag == Tag.SBITS:
return (isinstance(type_, BitsType) and type_.signed and
value.bits_payload.bit_count == type_.get_total_bit_count())
if value.tag in (Tag.ARRAY, Tag.TUPLE, Tag.ENUM):
return _value_compatible_with_type(module, type_, value)
raise NotImplementedError(type_, value)
def concrete_type_convert_value(module: ast.Module, type_: ConcreteType,
value: Value, span: Span,
enum_values: Optional[Tuple[Value, ...]],
enum_signed: Optional[bool]) -> Value:
"""Converts 'value' into a value of this concrete type."""
logging.vlog(3, 'Converting value %s to type %s', value, type_)
if value.tag == Tag.UBITS and isinstance(type_, ArrayType):
bits_per_element = type_.get_element_type().get_total_bit_count().value
bits = value.bits_payload
def bit_slice_value_at_index(i):
return Value(
Tag.UBITS,
bits.slice(
i * bits_per_element, (i + 1) * bits_per_element, lsb_is_0=False))
return Value.make_array(
tuple(bit_slice_value_at_index(i) for i in range(type_.size.value)))
if (isinstance(type_, EnumType) and
value.tag in (Tag.UBITS, Tag.SBITS, Tag.ENUM) and
value.get_bit_count() == type_.get_total_bit_count()):
# Check that the bits we're converting from are present in the enum type
# we're converting to.
nominal_type = type_.get_nominal_type(module)
for enum_value in enum_values:
if value.bits_payload == enum_value.bits_payload:
break
else:
raise FailureError(
span,
'Value is not valid for enum {}: {}'.format(nominal_type.identifier,
value))
return Value.make_enum(value.bits_payload, nominal_type)
if (value.tag == Tag.ENUM and isinstance(type_, BitsType) and
type_.get_total_bit_count() == value.get_bit_count()):
constructor = Value.make_sbits if type_.signed else Value.make_ubits
bit_count = type_.get_total_bit_count().value
return constructor(bit_count, value.bits_payload.value)
def zero_ext() -> Value:
assert isinstance(type_, BitsType)
constructor = Value.make_sbits if type_.signed else Value.make_ubits
bit_count = type_.get_total_bit_count().value
return constructor(bit_count,
value.get_bits_value() & bit_helpers.to_mask(bit_count))
def sign_ext() -> Value:
assert isinstance(type_, BitsType)
constructor = Value.make_sbits if type_.signed else Value.make_ubits
bit_count = type_.get_total_bit_count().value
logging.vlog(3, 'Sign extending %s to %s', value, bit_count)
return constructor(bit_count, value.bits_payload.sign_ext(bit_count).value)
if value.tag == Tag.UBITS:
return zero_ext()
if value.tag == Tag.SBITS:
return sign_ext()
if value.tag == Tag.ENUM:
assert enum_signed is not None
return sign_ext() if enum_signed else zero_ext()
# If we're converting an array into bits, flatten the array payload.
if value.tag == Tag.ARRAY and isinstance(type_, BitsType):
return value.array_payload.flatten()
if concrete_type_accepts_value(module, type_, value): # Vacuous conversion.
return value
raise FailureError(
span,
'Interpreter failure: cannot convert value %s (of type %s) to type %s' %
(value, concrete_type_from_value(value), type_))
| [
"xls.dslx.python.cpp_concrete_type.BitsType",
"xls.dslx.python.cpp_concrete_type.ArrayType",
"xls.dslx.interpreter.value.Value.make_enum",
"xls.dslx.bit_helpers.to_mask",
"absl.logging.vlog",
"xls.dslx.python.cpp_concrete_type.is_ubits"
] | [((2151, 2178), 'xls.dslx.python.cpp_concrete_type.BitsType', 'BitsType', (['signed', 'bit_count'], {}), '(signed, bit_count)\n', (2159, 2178), False, 'from xls.dslx.python.cpp_concrete_type import BitsType\n'), ((4585, 4640), 'absl.logging.vlog', 'logging.vlog', (['(4)', '"""%r %r => %r"""', 'primitive', 'dims', 'result'], {}), "(4, '%r %r => %r', primitive, dims, result)\n", (4597, 4640), False, 'from absl import logging\n'), ((7358, 7421), 'absl.logging.vlog', 'logging.vlog', (['(3)', '"""Converting value %s to type %s"""', 'value', 'type_'], {}), "(3, 'Converting value %s to type %s', value, type_)\n", (7370, 7421), False, 'from absl import logging\n'), ((2717, 2763), 'xls.dslx.python.cpp_concrete_type.BitsType', 'BitsType', (['signed', 'value.bits_payload.bit_count'], {}), '(signed, value.bits_payload.bit_count)\n', (2725, 2763), False, 'from xls.dslx.python.cpp_concrete_type import BitsType\n'), ((3413, 3430), 'xls.dslx.python.cpp_concrete_type.ArrayType', 'ArrayType', (['t', 'dim'], {}), '(t, dim)\n', (3422, 3430), False, 'from xls.dslx.python.cpp_concrete_type import ArrayType\n'), ((4066, 4103), 'xls.dslx.python.cpp_concrete_type.BitsType', 'BitsType', ([], {'signed': '(False)', 'size': 'dims[-1]'}), '(signed=False, size=dims[-1])\n', (4074, 4103), False, 'from xls.dslx.python.cpp_concrete_type import BitsType\n'), ((5969, 5984), 'xls.dslx.python.cpp_concrete_type.is_ubits', 'is_ubits', (['type_'], {}), '(type_)\n', (5977, 5984), False, 'from xls.dslx.python.cpp_concrete_type import is_ubits\n'), ((8498, 8547), 'xls.dslx.interpreter.value.Value.make_enum', 'Value.make_enum', (['value.bits_payload', 'nominal_type'], {}), '(value.bits_payload, nominal_type)\n', (8513, 8547), False, 'from xls.dslx.interpreter.value import Value\n'), ((9355, 9415), 'absl.logging.vlog', 'logging.vlog', (['(3)', '"""Sign extending %s to %s"""', 'value', 'bit_count'], {}), "(3, 'Sign extending %s to %s', value, bit_count)\n", (9367, 9415), False, 'from absl import logging\n'), ((4161, 4197), 'xls.dslx.python.cpp_concrete_type.BitsType', 'BitsType', ([], {'signed': '(True)', 'size': 'dims[-1]'}), '(signed=True, size=dims[-1])\n', (4169, 4197), False, 'from xls.dslx.python.cpp_concrete_type import BitsType\n'), ((4349, 4375), 'xls.dslx.python.cpp_concrete_type.BitsType', 'BitsType', (['signedness', 'bits'], {}), '(signedness, bits)\n', (4357, 4375), False, 'from xls.dslx.python.cpp_concrete_type import BitsType\n'), ((4453, 4483), 'xls.dslx.python.cpp_concrete_type.ArrayType', 'ArrayType', (['element_type', 'minor'], {}), '(element_type, minor)\n', (4462, 4483), False, 'from xls.dslx.python.cpp_concrete_type import ArrayType\n'), ((9129, 9159), 'xls.dslx.bit_helpers.to_mask', 'bit_helpers.to_mask', (['bit_count'], {}), '(bit_count)\n', (9148, 9159), False, 'from xls.dslx import bit_helpers\n')] |
from libpysal.weights.contiguity import Queen
import libpysal
from libpysal import examples
import matplotlib.pyplot as plt
import geopandas as gpd
from splot.libpysal import plot_spatial_weights
def test_plot_spatial_weights():
# get data
gdf = gpd.read_file(examples.get_path('43MUE250GC_SIR.shp'))
gdf.head()
# calculate weights
weights = Queen.from_dataframe(gdf)
# plot weights
fig, _ = plot_spatial_weights(weights, gdf)
plt.close(fig)
# calculate nonplanar_joins
wnp = libpysal.weights.util.nonplanar_neighbors(weights, gdf)
# plot new joins
fig2, _ = plot_spatial_weights(wnp, gdf)
plt.close(fig2)
#customize
fig3, _ = plot_spatial_weights(wnp, gdf, nonplanar_edge_kws=dict(color='#4393c3'))
plt.close(fig3)
# uses a column as the index for spatial weights object
weights_index = Queen.from_dataframe(gdf, idVariable="CD_GEOCMU")
fig, _ = plot_spatial_weights(weights_index, gdf, indexed_on="CD_GEOCMU")
plt.close(fig) | [
"libpysal.examples.get_path",
"splot.libpysal.plot_spatial_weights",
"libpysal.weights.contiguity.Queen.from_dataframe",
"matplotlib.pyplot.close",
"libpysal.weights.util.nonplanar_neighbors"
] | [((364, 389), 'libpysal.weights.contiguity.Queen.from_dataframe', 'Queen.from_dataframe', (['gdf'], {}), '(gdf)\n', (384, 389), False, 'from libpysal.weights.contiguity import Queen\n'), ((422, 456), 'splot.libpysal.plot_spatial_weights', 'plot_spatial_weights', (['weights', 'gdf'], {}), '(weights, gdf)\n', (442, 456), False, 'from splot.libpysal import plot_spatial_weights\n'), ((461, 475), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (470, 475), True, 'import matplotlib.pyplot as plt\n'), ((518, 573), 'libpysal.weights.util.nonplanar_neighbors', 'libpysal.weights.util.nonplanar_neighbors', (['weights', 'gdf'], {}), '(weights, gdf)\n', (559, 573), False, 'import libpysal\n'), ((609, 639), 'splot.libpysal.plot_spatial_weights', 'plot_spatial_weights', (['wnp', 'gdf'], {}), '(wnp, gdf)\n', (629, 639), False, 'from splot.libpysal import plot_spatial_weights\n'), ((644, 659), 'matplotlib.pyplot.close', 'plt.close', (['fig2'], {}), '(fig2)\n', (653, 659), True, 'import matplotlib.pyplot as plt\n'), ((766, 781), 'matplotlib.pyplot.close', 'plt.close', (['fig3'], {}), '(fig3)\n', (775, 781), True, 'import matplotlib.pyplot as plt\n'), ((863, 912), 'libpysal.weights.contiguity.Queen.from_dataframe', 'Queen.from_dataframe', (['gdf'], {'idVariable': '"""CD_GEOCMU"""'}), "(gdf, idVariable='CD_GEOCMU')\n", (883, 912), False, 'from libpysal.weights.contiguity import Queen\n'), ((926, 990), 'splot.libpysal.plot_spatial_weights', 'plot_spatial_weights', (['weights_index', 'gdf'], {'indexed_on': '"""CD_GEOCMU"""'}), "(weights_index, gdf, indexed_on='CD_GEOCMU')\n", (946, 990), False, 'from splot.libpysal import plot_spatial_weights\n'), ((995, 1009), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (1004, 1009), True, 'import matplotlib.pyplot as plt\n'), ((270, 309), 'libpysal.examples.get_path', 'examples.get_path', (['"""43MUE250GC_SIR.shp"""'], {}), "('43MUE250GC_SIR.shp')\n", (287, 309), False, 'from libpysal import examples\n')] |
#!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2017 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# <NAME> - http://herve.niderb.fr
import time
import yaml
from os.path import dirname, basename
import numpy as np
from tqdm import tqdm
from glob import glob
from pyannote.database import FileFinder
from pyannote.database import get_protocol
from pyannote.audio.util import mkdir_p
from sortedcontainers import SortedDict
import tensorboardX
from functools import partial
class Application(object):
CONFIG_YML = '{experiment_dir}/config.yml'
TRAIN_DIR = '{experiment_dir}/train/{protocol}.{subset}'
WEIGHTS_PT = '{train_dir}/weights/{epoch:04d}.pt'
# created by "validate" mode
VALIDATE_DIR = '{train_dir}/validate/{protocol}.{subset}'
@classmethod
def from_train_dir(cls, train_dir, db_yml=None):
experiment_dir = dirname(dirname(train_dir))
app = cls(experiment_dir, db_yml=db_yml)
app.train_dir_ = train_dir
return app
@classmethod
def from_validate_txt(cls, validate_txt, db_yml=None):
train_dir = dirname(dirname(dirname(validate_txt)))
app = cls.from_train_dir(train_dir, db_yml=db_yml)
app.validate_txt_ = validate_txt
return app
@classmethod
def from_model_pt(cls, model_pt, db_yml=None):
train_dir = dirname(dirname(model_pt))
app = cls.from_train_dir(train_dir, db_yml=db_yml)
app.model_pt_ = model_pt
epoch = int(basename(app.model_pt_)[:-3])
app.model_ = app.load_model(epoch, train_dir=train_dir)
return app
def __init__(self, experiment_dir, db_yml=None):
super(Application, self).__init__()
self.db_yml = db_yml
self.preprocessors_ = {'audio': FileFinder(self.db_yml)}
self.experiment_dir = experiment_dir
# load configuration
config_yml = self.CONFIG_YML.format(experiment_dir=self.experiment_dir)
with open(config_yml, 'r') as fp:
self.config_ = yaml.load(fp)
# scheduler
SCHEDULER_DEFAULT = {'name': 'DavisKingScheduler',
'params': {'learning_rate': 'auto'}}
scheduler_cfg = self.config_.get('scheduler', SCHEDULER_DEFAULT)
scheduler_name = scheduler_cfg['name']
schedulers = __import__('pyannote.audio.train.schedulers',
fromlist=[scheduler_name])
Scheduler = getattr(schedulers, scheduler_name)
scheduler_params = scheduler_cfg.get('params', {})
self.get_scheduler_ = partial(Scheduler, **scheduler_params)
self.learning_rate_ = scheduler_params.get('learning_rate', 'auto')
# optimizer
OPTIMIZER_DEFAULT = {
'name': 'SGD',
'params': {'momentum': 0.9, 'dampening': 0, 'weight_decay': 0,
'nesterov': True}}
optimizer_cfg = self.config_.get('optimizer', OPTIMIZER_DEFAULT)
optimizer_name = optimizer_cfg['name']
optimizers = __import__('torch.optim',
fromlist=[optimizer_name])
Optimizer = getattr(optimizers, optimizer_name)
optimizer_params = optimizer_cfg.get('params', {})
self.get_optimizer_ = partial(Optimizer, **optimizer_params)
# feature extraction
if 'feature_extraction' in self.config_:
extraction_name = self.config_['feature_extraction']['name']
features = __import__('pyannote.audio.features',
fromlist=[extraction_name])
FeatureExtraction = getattr(features, extraction_name)
self.feature_extraction_ = FeatureExtraction(
**self.config_['feature_extraction'].get('params', {}))
def train(self, protocol_name, subset='train', restart=None, epochs=1000):
train_dir = self.TRAIN_DIR.format(
experiment_dir=self.experiment_dir,
protocol=protocol_name,
subset=subset)
protocol = get_protocol(protocol_name, progress=True,
preprocessors=self.preprocessors_)
self.task_.fit(
self.model_, self.feature_extraction_,
protocol, subset=subset, restart=restart, epochs=epochs,
get_optimizer=self.get_optimizer_,
get_scheduler=self.get_scheduler_,
learning_rate=self.learning_rate_,
log_dir=train_dir, device=self.device)
def load_model(self, epoch, train_dir=None):
"""Load pretrained model
Parameters
----------
epoch : int
Which epoch to load.
train_dir : str, optional
Path to train directory. Defaults to self.train_dir_.
"""
if train_dir is None:
train_dir = self.train_dir_
import torch
weights_pt = self.WEIGHTS_PT.format(
train_dir=train_dir, epoch=epoch)
self.model_.load_state_dict(torch.load(weights_pt))
return self.model_
def get_number_of_epochs(self, train_dir=None, return_first=False):
"""Get information about completed epochs
Parameters
----------
train_dir : str, optional
Training directory. Defaults to self.train_dir_
return_first : bool, optional
Defaults (False) to return number of epochs.
Set to True to also return index of first epoch.
"""
if train_dir is None:
train_dir = self.train_dir_
directory = self.WEIGHTS_PT.format(train_dir=train_dir, epoch=0)[:-7]
weights = sorted(glob(directory + '*[0-9][0-9][0-9][0-9].pt'))
if not weights:
number_of_epochs = 0
first_epoch = None
else:
number_of_epochs = int(basename(weights[-1])[:-3]) + 1
first_epoch = int(basename(weights[0])[:-3])
return (number_of_epochs, first_epoch) if return_first \
else number_of_epochs
def validate_init(self, protocol_name, subset='development'):
pass
def validate_epoch(self, epoch, protocol_name, subset='development',
validation_data=None):
raise NotImplementedError('')
def validate(self, protocol_name, subset='development',
every=1, start=0, end=None, in_order=False, **kwargs):
minimize, values, best_epoch, best_value = {}, {}, {}, {}
validate_dir = self.VALIDATE_DIR.format(train_dir=self.train_dir_,
protocol=protocol_name,
subset=subset)
mkdir_p(validate_dir)
writer = tensorboardX.SummaryWriter(log_dir=validate_dir)
validation_data = self.validate_init(protocol_name, subset=subset,
**kwargs)
progress_bar = tqdm(unit='epoch')
for i, epoch in enumerate(
self.validate_iter(start=start, end=end, step=every,
in_order=in_order)):
# {'metric1': {'minimize': True, 'value': 0.2},
# 'metric2': {'minimize': False, 'value': 0.9}}
metrics = self.validate_epoch(epoch, protocol_name, subset=subset,
validation_data=validation_data)
if i == 0:
for metric, details in metrics.items():
minimize[metric] = details.get('minimize', True)
values[metric] = SortedDict()
description = 'Epoch #{epoch}'.format(epoch=epoch)
for metric, details in sorted(metrics.items()):
value = details['value']
values[metric][epoch] = value
writer.add_scalar(
f'validate/{protocol_name}.{subset}/{metric}',
values[metric][epoch], global_step=epoch)
# keep track of best epoch so far
if minimize[metric] == 'NA':
best_value = 'NA'
elif minimize[metric]:
best_epoch = \
values[metric].iloc[np.argmin(values[metric].values())]
best_value = values[metric][best_epoch]
else:
best_epoch = \
values[metric].iloc[np.argmax(values[metric].values())]
best_value = values[metric][best_epoch]
if best_value == 'NA':
continue
if abs(best_value) < 1:
addon = (' : {metric} = {value:.3f}% '
'[{best_value:.3f}%, #{best_epoch}]')
description += addon.format(metric=metric, value=100 * value,
best_value=100 * best_value,
best_epoch=best_epoch)
else:
addon = (' : {metric} = {value:.3f} '
'[{best_value:.3f}, #{best_epoch}]')
description += addon.format(metric=metric, value=value,
best_value=best_value,
best_epoch=best_epoch)
progress_bar.set_description(description)
progress_bar.update(1)
def validate_iter(self, start=None, end=None, step=1, sleep=10,
in_order=False):
"""Continuously watches `train_dir` for newly completed epochs
and yields them for validation
Note that epochs will not necessarily be yielded in order.
The very last completed epoch will always be first on the list.
Parameters
----------
start : int, optional
Start validating after `start` epochs. Defaults to 0.
end : int, optional
Stop validating after epoch `end`. Defaults to never stop.
step : int, optional
Validate every `step`th epoch. Defaults to 1.
sleep : int, optional
in_order : bool, optional
Force chronological validation.
Usage
-----
>>> for epoch in app.validate_iter():
... app.validate(epoch)
"""
if end is None:
end = np.inf
if start is None:
start = 0
validated_epochs = set()
next_epoch_to_validate_in_order = start
while next_epoch_to_validate_in_order < end:
# wait for first epoch to complete
_, first_epoch = self.get_number_of_epochs(return_first=True)
if first_epoch is None:
print('waiting for first epoch to complete...')
time.sleep(sleep)
continue
# corner case: make sure this does not wait forever
# for epoch 'start' as it might never happen, in case
# training is started after n pre-existing epochs
if next_epoch_to_validate_in_order < first_epoch:
next_epoch_to_validate_in_order = first_epoch
# first epoch has completed
break
while True:
# check last completed epoch
last_completed_epoch = self.get_number_of_epochs() - 1
# if last completed epoch has not been processed yet,
# always process it first (except if 'in order')
if (not in_order) and (last_completed_epoch not in validated_epochs):
next_epoch_to_validate = last_completed_epoch
time.sleep(5) # HACK give checkpoint time to save weights
# in case no new epoch has completed since last time
# process the next epoch in chronological order (if available)
elif next_epoch_to_validate_in_order <= last_completed_epoch:
next_epoch_to_validate = next_epoch_to_validate_in_order
# otherwise, just wait for a new epoch to complete
else:
time.sleep(sleep)
continue
if next_epoch_to_validate not in validated_epochs:
# yield next epoch to process
yield next_epoch_to_validate
# remember which epoch was processed
validated_epochs.add(next_epoch_to_validate)
# increment 'in_order' processing
if next_epoch_to_validate_in_order == next_epoch_to_validate:
next_epoch_to_validate_in_order += step
| [
"tensorboardX.SummaryWriter",
"pyannote.database.get_protocol",
"torch.load",
"tqdm.tqdm",
"yaml.load",
"time.sleep",
"os.path.dirname",
"pyannote.audio.util.mkdir_p",
"functools.partial",
"pyannote.database.FileFinder",
"os.path.basename",
"sortedcontainers.SortedDict",
"glob.glob"
] | [((3614, 3652), 'functools.partial', 'partial', (['Scheduler'], {}), '(Scheduler, **scheduler_params)\n', (3621, 3652), False, 'from functools import partial\n'), ((4295, 4333), 'functools.partial', 'partial', (['Optimizer'], {}), '(Optimizer, **optimizer_params)\n', (4302, 4333), False, 'from functools import partial\n'), ((5061, 5138), 'pyannote.database.get_protocol', 'get_protocol', (['protocol_name'], {'progress': '(True)', 'preprocessors': 'self.preprocessors_'}), '(protocol_name, progress=True, preprocessors=self.preprocessors_)\n', (5073, 5138), False, 'from pyannote.database import get_protocol\n'), ((7732, 7753), 'pyannote.audio.util.mkdir_p', 'mkdir_p', (['validate_dir'], {}), '(validate_dir)\n', (7739, 7753), False, 'from pyannote.audio.util import mkdir_p\n'), ((7771, 7819), 'tensorboardX.SummaryWriter', 'tensorboardX.SummaryWriter', ([], {'log_dir': 'validate_dir'}), '(log_dir=validate_dir)\n', (7797, 7819), False, 'import tensorboardX\n'), ((7975, 7993), 'tqdm.tqdm', 'tqdm', ([], {'unit': '"""epoch"""'}), "(unit='epoch')\n", (7979, 7993), False, 'from tqdm import tqdm\n'), ((1924, 1942), 'os.path.dirname', 'dirname', (['train_dir'], {}), '(train_dir)\n', (1931, 1942), False, 'from os.path import dirname, basename\n'), ((2400, 2417), 'os.path.dirname', 'dirname', (['model_pt'], {}), '(model_pt)\n', (2407, 2417), False, 'from os.path import dirname, basename\n'), ((2813, 2836), 'pyannote.database.FileFinder', 'FileFinder', (['self.db_yml'], {}), '(self.db_yml)\n', (2823, 2836), False, 'from pyannote.database import FileFinder\n'), ((3063, 3076), 'yaml.load', 'yaml.load', (['fp'], {}), '(fp)\n', (3072, 3076), False, 'import yaml\n'), ((6015, 6037), 'torch.load', 'torch.load', (['weights_pt'], {}), '(weights_pt)\n', (6025, 6037), False, 'import torch\n'), ((6666, 6710), 'glob.glob', 'glob', (["(directory + '*[0-9][0-9][0-9][0-9].pt')"], {}), "(directory + '*[0-9][0-9][0-9][0-9].pt')\n", (6670, 6710), False, 'from glob import glob\n'), ((2160, 2181), 'os.path.dirname', 'dirname', (['validate_txt'], {}), '(validate_txt)\n', (2167, 2181), False, 'from os.path import dirname, basename\n'), ((2531, 2554), 'os.path.basename', 'basename', (['app.model_pt_'], {}), '(app.model_pt_)\n', (2539, 2554), False, 'from os.path import dirname, basename\n'), ((11850, 11867), 'time.sleep', 'time.sleep', (['sleep'], {}), '(sleep)\n', (11860, 11867), False, 'import time\n'), ((12687, 12700), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (12697, 12700), False, 'import time\n'), ((6913, 6933), 'os.path.basename', 'basename', (['weights[0]'], {}), '(weights[0])\n', (6921, 6933), False, 'from os.path import dirname, basename\n'), ((8609, 8621), 'sortedcontainers.SortedDict', 'SortedDict', ([], {}), '()\n', (8619, 8621), False, 'from sortedcontainers import SortedDict\n'), ((13132, 13149), 'time.sleep', 'time.sleep', (['sleep'], {}), '(sleep)\n', (13142, 13149), False, 'import time\n'), ((6851, 6872), 'os.path.basename', 'basename', (['weights[-1]'], {}), '(weights[-1])\n', (6859, 6872), False, 'from os.path import dirname, basename\n')] |
from setuptools import setup, find_packages
with open("README.md") as f:
long_description = f.read()
setup(
name="yente",
version="1.3.5",
url="https://opensanctions.org/docs/api/",
long_description=long_description,
long_description_content_type="text/markdown",
license="MIT",
author="OpenSanctions",
author_email="<EMAIL>",
packages=find_packages(exclude=["examples", "test"]),
namespace_packages=[],
extras_require={
"dev": [
"pip>=10.0.0",
"bump2version",
"wheel>=0.29.0",
"twine",
"mypy",
"pytest",
"pytest-cov",
"flake8>=2.6.0",
"black",
],
},
zip_safe=False,
)
| [
"setuptools.find_packages"
] | [((379, 422), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['examples', 'test']"}), "(exclude=['examples', 'test'])\n", (392, 422), False, 'from setuptools import setup, find_packages\n')] |
# Built-in
import os
from glob import glob
# Libs
import numpy as np
from tqdm import tqdm
from natsort import natsorted
# Own modules
from data import data_utils
from mrs_utils import misc_utils, process_block
# Settings
DS_NAME = 'spca'
def get_images(data_dir, valid_percent=0.5, split=False):
rgb_files = natsorted(glob(os.path.join(data_dir, '*RGB.jpg')))
lbl_files = natsorted(glob(os.path.join(data_dir, '*GT.png')))
'''ind = np.arange(len(rgb_files))
np.random.shuffle(ind)
rgb_files = [rgb_files[a] for a in ind]
lbl_files = [lbl_files[a] for a in ind]'''
assert len(rgb_files) == len(lbl_files)
city_names = ['Fresno', 'Modesto', 'Stockton', 'aus']
city_files = {city_name: [(rgb_file, lbl_file) for (rgb_file, lbl_file) in zip(rgb_files, lbl_files)
if city_name in rgb_file] for city_name in city_names}
train_files, valid_files = [], []
for city_name, file_pairs in city_files.items():
valid_size = int(valid_percent * len(file_pairs))
train_files.extend(file_pairs[valid_size:])
valid_files.extend(file_pairs[:valid_size])
if split:
return train_files, valid_files
else:
return [a[0] for a in valid_files], [a[1] for a in valid_files]
def create_dataset(data_dir, save_dir, patch_size, pad, overlap, valid_percent=0.1, visualize=False):
# create folders and files
patch_dir = os.path.join(save_dir, 'patches')
misc_utils.make_dir_if_not_exist(patch_dir)
record_file_train = open(os.path.join(save_dir, 'file_list_train_{}.txt').format(
misc_utils.float2str(valid_percent)), 'w+')
record_file_valid = open(os.path.join(save_dir, 'file_list_valid_{}.txt').format(
misc_utils.float2str(valid_percent)), 'w+')
train_files, valid_files = get_images(data_dir, valid_percent, split=True)
for img_file, lbl_file in tqdm(train_files):
city_name = os.path.splitext(os.path.basename(img_file))[0].split('_')[0]
for rgb_patch, gt_patch, y, x in data_utils.patch_tile(img_file, lbl_file, patch_size, pad, overlap):
if visualize:
from mrs_utils import vis_utils
vis_utils.compare_figures([rgb_patch, gt_patch], (1, 2), fig_size=(12, 5))
img_patchname = '{}_y{}x{}.jpg'.format(city_name, int(y), int(x))
lbl_patchname = '{}_y{}x{}.png'.format(city_name, int(y), int(x))
# misc_utils.save_file(os.path.join(patch_dir, img_patchname), rgb_patch.astype(np.uint8))
# misc_utils.save_file(os.path.join(patch_dir, lbl_patchname), gt_patch.astype(np.uint8))
record_file_train.write('{} {}\n'.format(img_patchname, lbl_patchname))
for img_file, lbl_file in tqdm(valid_files):
city_name = os.path.splitext(os.path.basename(img_file))[0].split('_')[0]
for rgb_patch, gt_patch, y, x in data_utils.patch_tile(img_file, lbl_file, patch_size, pad, overlap):
if visualize:
from mrs_utils import vis_utils
vis_utils.compare_figures([rgb_patch, gt_patch], (1, 2), fig_size=(12, 5))
img_patchname = '{}_y{}x{}.jpg'.format(city_name, int(y), int(x))
lbl_patchname = '{}_y{}x{}.png'.format(city_name, int(y), int(x))
# misc_utils.save_file(os.path.join(patch_dir, img_patchname), rgb_patch.astype(np.uint8))
# misc_utils.save_file(os.path.join(patch_dir, lbl_patchname), gt_patch.astype(np.uint8))
record_file_valid.write('{} {}\n'.format(img_patchname, lbl_patchname))
def get_stats(img_dir):
from data import data_utils
from glob import glob
rgb_imgs = glob(os.path.join(img_dir, '*RGB.jpg'))
ds_mean, ds_std = data_utils.get_ds_stats(rgb_imgs)
return np.stack([ds_mean, ds_std], axis=0)
def get_stats_pb(img_dir):
val = process_block.ValueComputeProcess(DS_NAME, os.path.join(os.path.dirname(__file__), '../stats/builtin'),
os.path.join(os.path.dirname(__file__), '../stats/builtin/{}.npy'.format(DS_NAME)), func=get_stats).\
run(img_dir=img_dir).val
val_test = val
return val, val_test
if __name__ == '__main__':
img_files = natsorted(glob(os.path.join(r'/home/wh145/data/caemo', '*RGB.jpg')))
np.random.seed(931004)
ps = 512
ol = 0
pd = 0
create_dataset(data_dir=r'/home/wh145/data/caemo',
save_dir=r'/home/wh145/data/caemo/ps_512_ol_0', patch_size=(ps, ps), pad=pd, overlap=ol, visualize=False, valid_percent=0.1)
# val = get_stats_pb(r'/media/ei-edl01/data/uab_datasets/spca/data/Original_Tiles')[0]
# data_utils.patches_to_hdf5(r'/hdd/mrs/spca', r'/hdd/mrs/spca/ps512_pd0_ol0_hdf5')
| [
"mrs_utils.misc_utils.float2str",
"mrs_utils.misc_utils.make_dir_if_not_exist",
"data.data_utils.patch_tile",
"tqdm.tqdm",
"os.path.join",
"mrs_utils.vis_utils.compare_figures",
"numpy.stack",
"os.path.dirname",
"numpy.random.seed",
"os.path.basename",
"data.data_utils.get_ds_stats"
] | [((1428, 1461), 'os.path.join', 'os.path.join', (['save_dir', '"""patches"""'], {}), "(save_dir, 'patches')\n", (1440, 1461), False, 'import os\n'), ((1466, 1509), 'mrs_utils.misc_utils.make_dir_if_not_exist', 'misc_utils.make_dir_if_not_exist', (['patch_dir'], {}), '(patch_dir)\n', (1498, 1509), False, 'from mrs_utils import misc_utils, process_block\n'), ((1896, 1913), 'tqdm.tqdm', 'tqdm', (['train_files'], {}), '(train_files)\n', (1900, 1913), False, 'from tqdm import tqdm\n'), ((2748, 2765), 'tqdm.tqdm', 'tqdm', (['valid_files'], {}), '(valid_files)\n', (2752, 2765), False, 'from tqdm import tqdm\n'), ((3730, 3763), 'data.data_utils.get_ds_stats', 'data_utils.get_ds_stats', (['rgb_imgs'], {}), '(rgb_imgs)\n', (3753, 3763), False, 'from data import data_utils\n'), ((3775, 3810), 'numpy.stack', 'np.stack', (['[ds_mean, ds_std]'], {'axis': '(0)'}), '([ds_mean, ds_std], axis=0)\n', (3783, 3810), True, 'import numpy as np\n'), ((4296, 4318), 'numpy.random.seed', 'np.random.seed', (['(931004)'], {}), '(931004)\n', (4310, 4318), True, 'import numpy as np\n'), ((2038, 2105), 'data.data_utils.patch_tile', 'data_utils.patch_tile', (['img_file', 'lbl_file', 'patch_size', 'pad', 'overlap'], {}), '(img_file, lbl_file, patch_size, pad, overlap)\n', (2059, 2105), False, 'from data import data_utils\n'), ((2890, 2957), 'data.data_utils.patch_tile', 'data_utils.patch_tile', (['img_file', 'lbl_file', 'patch_size', 'pad', 'overlap'], {}), '(img_file, lbl_file, patch_size, pad, overlap)\n', (2911, 2957), False, 'from data import data_utils\n'), ((3673, 3706), 'os.path.join', 'os.path.join', (['img_dir', '"""*RGB.jpg"""'], {}), "(img_dir, '*RGB.jpg')\n", (3685, 3706), False, 'import os\n'), ((333, 367), 'os.path.join', 'os.path.join', (['data_dir', '"""*RGB.jpg"""'], {}), "(data_dir, '*RGB.jpg')\n", (345, 367), False, 'import os\n'), ((401, 434), 'os.path.join', 'os.path.join', (['data_dir', '"""*GT.png"""'], {}), "(data_dir, '*GT.png')\n", (413, 434), False, 'import os\n'), ((1604, 1639), 'mrs_utils.misc_utils.float2str', 'misc_utils.float2str', (['valid_percent'], {}), '(valid_percent)\n', (1624, 1639), False, 'from mrs_utils import misc_utils, process_block\n'), ((1742, 1777), 'mrs_utils.misc_utils.float2str', 'misc_utils.float2str', (['valid_percent'], {}), '(valid_percent)\n', (1762, 1777), False, 'from mrs_utils import misc_utils, process_block\n'), ((4237, 4287), 'os.path.join', 'os.path.join', (['"""/home/wh145/data/caemo"""', '"""*RGB.jpg"""'], {}), "('/home/wh145/data/caemo', '*RGB.jpg')\n", (4249, 4287), False, 'import os\n'), ((1539, 1587), 'os.path.join', 'os.path.join', (['save_dir', '"""file_list_train_{}.txt"""'], {}), "(save_dir, 'file_list_train_{}.txt')\n", (1551, 1587), False, 'import os\n'), ((1677, 1725), 'os.path.join', 'os.path.join', (['save_dir', '"""file_list_valid_{}.txt"""'], {}), "(save_dir, 'file_list_valid_{}.txt')\n", (1689, 1725), False, 'import os\n'), ((2197, 2271), 'mrs_utils.vis_utils.compare_figures', 'vis_utils.compare_figures', (['[rgb_patch, gt_patch]', '(1, 2)'], {'fig_size': '(12, 5)'}), '([rgb_patch, gt_patch], (1, 2), fig_size=(12, 5))\n', (2222, 2271), False, 'from mrs_utils import vis_utils\n'), ((3049, 3123), 'mrs_utils.vis_utils.compare_figures', 'vis_utils.compare_figures', (['[rgb_patch, gt_patch]', '(1, 2)'], {'fig_size': '(12, 5)'}), '([rgb_patch, gt_patch], (1, 2), fig_size=(12, 5))\n', (3074, 3123), False, 'from mrs_utils import vis_utils\n'), ((3906, 3931), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3921, 3931), False, 'import os\n'), ((4011, 4036), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (4026, 4036), False, 'import os\n'), ((1952, 1978), 'os.path.basename', 'os.path.basename', (['img_file'], {}), '(img_file)\n', (1968, 1978), False, 'import os\n'), ((2804, 2830), 'os.path.basename', 'os.path.basename', (['img_file'], {}), '(img_file)\n', (2820, 2830), False, 'import os\n')] |
import dateutil
import pytest
from testsuite.plugins import mockserver
from testsuite.utils import json_util
NOW = dateutil.parser.parse('2019-09-19-13:04:00.000000')
MOCKSERVER_INFO = mockserver.MockserverInfo(
'localhost', 123, 'http://localhost:123/', None,
)
MOCKSERVER_SSL_INFO = mockserver.MockserverInfo(
'localhost',
456,
'https://localhost:456/',
mockserver.SslInfo('/some_dir/cert.cert', '/some_dir/cert.key'),
)
@pytest.mark.parametrize(
'json_input,expected_result',
[
( # simple list
[{'some_date': {'$dateDiff': 0}}, 'regular_element'], # json_input
[{'some_date': NOW}, 'regular_element'], # expected_result
),
( # simple dict
{ # json_input
'some_date': {'$dateDiff': 0},
'regular_key': 'regular_value',
},
{'some_date': NOW, 'regular_key': 'regular_value'}, # json_input
),
( # nested list and dict
{ # json_input
'regular_root_key': 'regular_root_value',
'root_date': {'$dateDiff': 0},
'parent_key': {
'nested_date': {'$dateDiff': 0},
'nested_list': [
'regular_element1',
{'$dateDiff': 0},
{'$dateDiff': 0},
'regular_element2',
],
},
},
{ # expected_result
'regular_root_key': 'regular_root_value',
'root_date': NOW,
'parent_key': {
'nested_date': NOW,
'nested_list': [
'regular_element1',
NOW,
NOW,
'regular_element2',
],
},
},
),
],
)
def test_substitute_now(json_input, expected_result):
result = json_util.substitute(json_input, now=NOW)
assert result == expected_result
@pytest.mark.parametrize(
'json_input,expected_result',
[
(
({'client_url': {'$mockserver': '/path'}}),
({'client_url': 'http://localhost:123/path'}),
),
(
({'client_url': {'$mockserver': '/path', '$schema': False}}),
({'client_url': 'localhost:123/path'}),
),
],
)
def test_substitute_mockserver(json_input, expected_result):
result = json_util.substitute(json_input, mockserver=MOCKSERVER_INFO)
assert result == expected_result
@pytest.mark.parametrize(
'json_input,expected_result',
[
(
({'client_url': {'$mockserver_https': '/path'}}),
({'client_url': 'https://localhost:456/path'}),
),
(
({'client_url': {'$mockserver_https': '/path', '$schema': False}}),
({'client_url': 'localhost:456/path'}),
),
],
)
def test_substitute_mockserver_https(json_input, expected_result):
result = json_util.substitute(
json_input, mockserver_https=MOCKSERVER_SSL_INFO,
)
assert result == expected_result
| [
"dateutil.parser.parse",
"testsuite.utils.json_util.substitute",
"testsuite.plugins.mockserver.MockserverInfo",
"testsuite.plugins.mockserver.SslInfo",
"pytest.mark.parametrize"
] | [((117, 168), 'dateutil.parser.parse', 'dateutil.parser.parse', (['"""2019-09-19-13:04:00.000000"""'], {}), "('2019-09-19-13:04:00.000000')\n", (138, 168), False, 'import dateutil\n'), ((187, 261), 'testsuite.plugins.mockserver.MockserverInfo', 'mockserver.MockserverInfo', (['"""localhost"""', '(123)', '"""http://localhost:123/"""', 'None'], {}), "('localhost', 123, 'http://localhost:123/', None)\n", (212, 261), False, 'from testsuite.plugins import mockserver\n'), ((448, 1143), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""json_input,expected_result"""', "[([{'some_date': {'$dateDiff': 0}}, 'regular_element'], [{'some_date': NOW},\n 'regular_element']), ({'some_date': {'$dateDiff': 0}, 'regular_key':\n 'regular_value'}, {'some_date': NOW, 'regular_key': 'regular_value'}),\n ({'regular_root_key': 'regular_root_value', 'root_date': {'$dateDiff': \n 0}, 'parent_key': {'nested_date': {'$dateDiff': 0}, 'nested_list': [\n 'regular_element1', {'$dateDiff': 0}, {'$dateDiff': 0},\n 'regular_element2']}}, {'regular_root_key': 'regular_root_value',\n 'root_date': NOW, 'parent_key': {'nested_date': NOW, 'nested_list': [\n 'regular_element1', NOW, NOW, 'regular_element2']}})]"], {}), "('json_input,expected_result', [([{'some_date': {\n '$dateDiff': 0}}, 'regular_element'], [{'some_date': NOW},\n 'regular_element']), ({'some_date': {'$dateDiff': 0}, 'regular_key':\n 'regular_value'}, {'some_date': NOW, 'regular_key': 'regular_value'}),\n ({'regular_root_key': 'regular_root_value', 'root_date': {'$dateDiff': \n 0}, 'parent_key': {'nested_date': {'$dateDiff': 0}, 'nested_list': [\n 'regular_element1', {'$dateDiff': 0}, {'$dateDiff': 0},\n 'regular_element2']}}, {'regular_root_key': 'regular_root_value',\n 'root_date': NOW, 'parent_key': {'nested_date': NOW, 'nested_list': [\n 'regular_element1', NOW, NOW, 'regular_element2']}})])\n", (471, 1143), False, 'import pytest\n'), ((2077, 2335), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""json_input,expected_result"""', "[({'client_url': {'$mockserver': '/path'}}, {'client_url':\n 'http://localhost:123/path'}), ({'client_url': {'$mockserver': '/path',\n '$schema': False}}, {'client_url': 'localhost:123/path'})]"], {}), "('json_input,expected_result', [({'client_url': {\n '$mockserver': '/path'}}, {'client_url': 'http://localhost:123/path'}),\n ({'client_url': {'$mockserver': '/path', '$schema': False}}, {\n 'client_url': 'localhost:123/path'})])\n", (2100, 2335), False, 'import pytest\n'), ((2609, 2879), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""json_input,expected_result"""', "[({'client_url': {'$mockserver_https': '/path'}}, {'client_url':\n 'https://localhost:456/path'}), ({'client_url': {'$mockserver_https':\n '/path', '$schema': False}}, {'client_url': 'localhost:456/path'})]"], {}), "('json_input,expected_result', [({'client_url': {\n '$mockserver_https': '/path'}}, {'client_url':\n 'https://localhost:456/path'}), ({'client_url': {'$mockserver_https':\n '/path', '$schema': False}}, {'client_url': 'localhost:456/path'})])\n", (2632, 2879), False, 'import pytest\n'), ((378, 441), 'testsuite.plugins.mockserver.SslInfo', 'mockserver.SslInfo', (['"""/some_dir/cert.cert"""', '"""/some_dir/cert.key"""'], {}), "('/some_dir/cert.cert', '/some_dir/cert.key')\n", (396, 441), False, 'from testsuite.plugins import mockserver\n'), ((1995, 2036), 'testsuite.utils.json_util.substitute', 'json_util.substitute', (['json_input'], {'now': 'NOW'}), '(json_input, now=NOW)\n', (2015, 2036), False, 'from testsuite.utils import json_util\n'), ((2508, 2568), 'testsuite.utils.json_util.substitute', 'json_util.substitute', (['json_input'], {'mockserver': 'MOCKSERVER_INFO'}), '(json_input, mockserver=MOCKSERVER_INFO)\n', (2528, 2568), False, 'from testsuite.utils import json_util\n'), ((3059, 3129), 'testsuite.utils.json_util.substitute', 'json_util.substitute', (['json_input'], {'mockserver_https': 'MOCKSERVER_SSL_INFO'}), '(json_input, mockserver_https=MOCKSERVER_SSL_INFO)\n', (3079, 3129), False, 'from testsuite.utils import json_util\n')] |
from Level.Render import Render
from Level.Data import Data
from Constants import *
import os
class Level:
def __init__(self, folder, main):
self.main = main
self.name = folder
self.folder = LEVEL_PATH + "/" + folder
self.dataFiles = []
files = os.listdir(self.folder)
for file in files:
if file[0:4] == "data":
self.dataFiles.append(file)
self.render = Render(self, main)
self.data = Data(self)
def rename(self, name):
self.name = name
folder = LEVEL_PATH + "/" + name
os.rename(self.folder, folder)
self.folder = folder
self.main.levelSelection.levelGuiHandler.updateText()
def openSection(self, number):
self.close()
self.data = Data(self, number)
def save(self):
self.data.save()
for region in self.data.regions:
if region.loaded:
region.save()
region.save()
def close(self):
self.data.close()
del self.data
| [
"os.rename",
"os.listdir",
"Level.Data.Data",
"Level.Render.Render"
] | [((291, 314), 'os.listdir', 'os.listdir', (['self.folder'], {}), '(self.folder)\n', (301, 314), False, 'import os\n'), ((444, 462), 'Level.Render.Render', 'Render', (['self', 'main'], {}), '(self, main)\n', (450, 462), False, 'from Level.Render import Render\n'), ((483, 493), 'Level.Data.Data', 'Data', (['self'], {}), '(self)\n', (487, 493), False, 'from Level.Data import Data\n'), ((601, 631), 'os.rename', 'os.rename', (['self.folder', 'folder'], {}), '(self.folder, folder)\n', (610, 631), False, 'import os\n'), ((800, 818), 'Level.Data.Data', 'Data', (['self', 'number'], {}), '(self, number)\n', (804, 818), False, 'from Level.Data import Data\n')] |
"""Solution of the exercises of Optimization of compute bound Python code"""
import math
import cmath
import numpy as np
import numexpr as ne
import numba as nb
# Needed here since it is used as global variables
# Maximum strain at surface
e0 = 0.01
# Width of the strain profile below the surface
w = 5.0
# Python: Circular crystal ###
def circ_python_1(N, h, k):
x = (np.arange(N) - N / 2).reshape(-1, 1)
y = (np.arange(N) - N / 2).reshape(1, -1)
omega = x * x + y * y <= (N / 2) ** 2
result = np.zeros((h.size, k.size))
for i_h, v_h in enumerate(h): # loop over the reciprocal space coordinates
for i_k, v_k in enumerate(k):
# One should discard bad values
tmp = 0.0
for n in range(N): # loop and sum over unit-cells
for m in range(N):
if omega[n, m]:
tmp += cmath.exp(2j * np.pi * (v_h * n + v_k * m))
result[i_h][i_k] = abs(tmp) ** 2
return result
# Alternative using Python `sum`
def circ_python_1_alt(N, h, k):
# Filter-out position outside crystal once for all
inside_pos = [
(n, m)
for n in range(N)
for m in range(N)
if ((n - N / 2) ** 2 + (m - N / 2) ** 2) <= (N / 2) ** 2
]
result = np.zeros((h.size, k.size))
for i_h, v_h in enumerate(h): # loop over the reciprocal space coordinates
for i_k, v_k in enumerate(k):
result[i_h][i_k] = (
abs(
sum( # Sum over positions inside the crystal
cmath.exp(2j * np.pi * (v_h * n + v_k * m))
for n, m in inside_pos
)
)
** 2
)
return result
# Python: Circular strained crystal ###
def circ_python(N, h, k):
N_2 = N / 2
positions = {}
for i in range(N):
x = i - N_2
for j in range(N):
y = j - N_2
r = (x * x + y * y) ** 0.5
if r <= N_2:
strain = e0 * (1 + math.tanh((r - N_2) / w))
positions[(i, j)] = (i + strain * x, j + strain * y)
result = np.zeros((h.size, k.size))
for i_h, v_h in enumerate(h): # loop over the reciprocal space coordinates
for i_k, v_k in enumerate(k):
# One should discard bad values
tmp = 0.0
for i_n in range(N): # loop and sum over unit-cells
for i_m in range(N):
pos = positions.get((i_n, i_m))
if pos:
n_s, m_s = pos
tmp += cmath.exp(2j * np.pi * (v_h * n_s + v_k * m_s))
result[i_h, i_k] = abs(tmp) ** 2
return result
# Alternative computing list of strained position
def circ_python_alt(N, h, k):
# Compute strained position inside the crystal once for all
strained_pos = []
crystal_radius = N / 2
for n in range(N):
for m in range(N):
# Center is at (N/2, N/2)
x = n - crystal_radius
y = m - crystal_radius
radius = (x ** 2 + y ** 2) ** 0.5
if radius <= crystal_radius:
delta = e0 * (1 + math.tanh((radius - crystal_radius) / w))
strained_pos.append((n + delta * x, m + delta * y))
result = np.zeros((h.size, k.size))
for i_h, v_h in enumerate(h): # loop over the reciprocal space coordinates
for i_k, v_k in enumerate(k):
result[i_h][i_k] = (
abs(
sum(
cmath.exp(2j * np.pi * (v_h * n_s + v_k * m_s))
for n_s, m_s in strained_pos
)
)
** 2
)
return result
# numpy ###
def circ_numpy(N, h, k):
N_2 = N / 2
h = h.reshape(-1, 1, 1, 1)
k = k.reshape(1, -1, 1, 1)
n = np.arange(N).reshape(1, 1, -1, 1)
m = np.arange(N).reshape(1, 1, 1, -1)
radius = np.sqrt((n - N_2) ** 2 + (m - N_2) ** 2)
strain = e0 * (1.0 + np.tanh((radius - N_2) / w))
p_n = n + strain * (n - N_2)
p_m = m + strain * (m - N_2)
omega = radius <= N_2
tmp = omega * np.exp(2j * np.pi * (h * p_n + k * p_m))
return np.abs(tmp.sum(axis=(2, 3))) ** 2
# numexpr ###
def circ_numexpr(N, h, k):
N_2 = N / 2
h = h.reshape(-1, 1, 1, 1)
k = k.reshape(1, -1, 1, 1)
n = np.arange(N).reshape(1, 1, -1, 1)
m = np.arange(N).reshape(1, 1, 1, -1)
radius = ne.evaluate("sqrt((n - N_2)**2 + (m - N_2)**2)")
strain = ne.evaluate("e0 * (1 + tanh((radius-N_2) / w))")
j2pi = np.pi * 2j
tmp = ne.evaluate(
"where(radius > N_2, 0, exp(j2pi*(h*(n+strain*(n-N_2)) + k*(m+strain*(m-N_2)))))"
)
result = abs(tmp.sum(axis=(2, 3))) ** 2
return result
# numba ###
@nb.jit(parallel=True)
def circ_numba(N, h, k):
result = np.zeros((h.size, k.size), dtype=np.float64)
N_2 = N / 2
for h_i in nb.prange(h.size): # loop over the reciprocal space coordinates
for k_i in range(k.size):
tmp = 0j
for n in range(N): # loop and sum over unit-cells
for m in range(N):
radius = math.sqrt((n - N_2) ** 2 + (m - N_2) ** 2)
if radius > (N_2):
value = 0j
# continue # Numba isn't working using the same continue pattern as below
else:
strain = e0 * (1 + math.tanh((radius - N_2) / w))
p_n = n + strain * (n - N_2)
p_m = m + strain * (m - N_2)
value = np.exp(2j * cmath.pi * (h[h_i] * p_n + k[k_i] * p_m))
tmp += value
result[h_i, k_i] = abs(tmp) ** 2
return result
| [
"numpy.sqrt",
"math.sqrt",
"numpy.tanh",
"numpy.exp",
"numpy.zeros",
"numba.jit",
"cmath.exp",
"math.tanh",
"numexpr.evaluate",
"numba.prange",
"numpy.arange"
] | [((4845, 4866), 'numba.jit', 'nb.jit', ([], {'parallel': '(True)'}), '(parallel=True)\n', (4851, 4866), True, 'import numba as nb\n'), ((517, 543), 'numpy.zeros', 'np.zeros', (['(h.size, k.size)'], {}), '((h.size, k.size))\n', (525, 543), True, 'import numpy as np\n'), ((1293, 1319), 'numpy.zeros', 'np.zeros', (['(h.size, k.size)'], {}), '((h.size, k.size))\n', (1301, 1319), True, 'import numpy as np\n'), ((2172, 2198), 'numpy.zeros', 'np.zeros', (['(h.size, k.size)'], {}), '((h.size, k.size))\n', (2180, 2198), True, 'import numpy as np\n'), ((3345, 3371), 'numpy.zeros', 'np.zeros', (['(h.size, k.size)'], {}), '((h.size, k.size))\n', (3353, 3371), True, 'import numpy as np\n'), ((4003, 4043), 'numpy.sqrt', 'np.sqrt', (['((n - N_2) ** 2 + (m - N_2) ** 2)'], {}), '((n - N_2) ** 2 + (m - N_2) ** 2)\n', (4010, 4043), True, 'import numpy as np\n'), ((4514, 4562), 'numexpr.evaluate', 'ne.evaluate', (['"""sqrt((n - N_2)**2 + (m - N_2)**2)"""'], {}), "('sqrt((n - N_2)**2 + (m - N_2)**2)')\n", (4525, 4562), True, 'import numexpr as ne\n'), ((4576, 4624), 'numexpr.evaluate', 'ne.evaluate', (['"""e0 * (1 + tanh((radius-N_2) / w))"""'], {}), "('e0 * (1 + tanh((radius-N_2) / w))')\n", (4587, 4624), True, 'import numexpr as ne\n'), ((4657, 4761), 'numexpr.evaluate', 'ne.evaluate', (['"""where(radius > N_2, 0, exp(j2pi*(h*(n+strain*(n-N_2)) + k*(m+strain*(m-N_2)))))"""'], {}), "(\n 'where(radius > N_2, 0, exp(j2pi*(h*(n+strain*(n-N_2)) + k*(m+strain*(m-N_2)))))'\n )\n", (4668, 4761), True, 'import numexpr as ne\n'), ((4905, 4949), 'numpy.zeros', 'np.zeros', (['(h.size, k.size)'], {'dtype': 'np.float64'}), '((h.size, k.size), dtype=np.float64)\n', (4913, 4949), True, 'import numpy as np\n'), ((4981, 4998), 'numba.prange', 'nb.prange', (['h.size'], {}), '(h.size)\n', (4990, 4998), True, 'import numba as nb\n'), ((4208, 4250), 'numpy.exp', 'np.exp', (['(2.0j * np.pi * (h * p_n + k * p_m))'], {}), '(2.0j * np.pi * (h * p_n + k * p_m))\n', (4214, 4250), True, 'import numpy as np\n'), ((3914, 3926), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (3923, 3926), True, 'import numpy as np\n'), ((3956, 3968), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (3965, 3968), True, 'import numpy as np\n'), ((4069, 4096), 'numpy.tanh', 'np.tanh', (['((radius - N_2) / w)'], {}), '((radius - N_2) / w)\n', (4076, 4096), True, 'import numpy as np\n'), ((4425, 4437), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (4434, 4437), True, 'import numpy as np\n'), ((4467, 4479), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (4476, 4479), True, 'import numpy as np\n'), ((379, 391), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (388, 391), True, 'import numpy as np\n'), ((425, 437), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (434, 437), True, 'import numpy as np\n'), ((5228, 5270), 'math.sqrt', 'math.sqrt', (['((n - N_2) ** 2 + (m - N_2) ** 2)'], {}), '((n - N_2) ** 2 + (m - N_2) ** 2)\n', (5237, 5270), False, 'import math\n'), ((893, 938), 'cmath.exp', 'cmath.exp', (['(2.0j * np.pi * (v_h * n + v_k * m))'], {}), '(2.0j * np.pi * (v_h * n + v_k * m))\n', (902, 938), False, 'import cmath\n'), ((2064, 2088), 'math.tanh', 'math.tanh', (['((r - N_2) / w)'], {}), '((r - N_2) / w)\n', (2073, 2088), False, 'import math\n'), ((2636, 2685), 'cmath.exp', 'cmath.exp', (['(2.0j * np.pi * (v_h * n_s + v_k * m_s))'], {}), '(2.0j * np.pi * (v_h * n_s + v_k * m_s))\n', (2645, 2685), False, 'import cmath\n'), ((3221, 3261), 'math.tanh', 'math.tanh', (['((radius - crystal_radius) / w)'], {}), '((radius - crystal_radius) / w)\n', (3230, 3261), False, 'import math\n'), ((5682, 5737), 'numpy.exp', 'np.exp', (['(2.0j * cmath.pi * (h[h_i] * p_n + k[k_i] * p_m))'], {}), '(2.0j * cmath.pi * (h[h_i] * p_n + k[k_i] * p_m))\n', (5688, 5737), True, 'import numpy as np\n'), ((1582, 1627), 'cmath.exp', 'cmath.exp', (['(2.0j * np.pi * (v_h * n + v_k * m))'], {}), '(2.0j * np.pi * (v_h * n + v_k * m))\n', (1591, 1627), False, 'import cmath\n'), ((3593, 3642), 'cmath.exp', 'cmath.exp', (['(2.0j * np.pi * (v_h * n_s + v_k * m_s))'], {}), '(2.0j * np.pi * (v_h * n_s + v_k * m_s))\n', (3602, 3642), False, 'import cmath\n'), ((5513, 5542), 'math.tanh', 'math.tanh', (['((radius - N_2) / w)'], {}), '((radius - N_2) / w)\n', (5522, 5542), False, 'import math\n')] |
#!/usr/bin/python3
from pmapi.config import Config, get_logger
import os
import logging
import requests
import connexion
from flask import Flask, request
logger = get_logger()
# if not Config.TOKEN:
# data = {
# "hostname": Config.HOSTNAME,
# "ip": Config.IP,
# "state": Config.STATE,
# "url": Config.URL,
# "service_type": Config.SERVICE_TYPE,
# "roles": "'service', 'primemirror'",
# }
# logging.info("Registering Service: ".format(data))
# r = requests.post("{}/register/service".format(Config.DEPLOYMENT_API_URI), json=data, verify=False)
# resp = r.json()
# if "TOKEN" in resp:
# update_env("TOKEN", resp["TOKEN"])
flask_app = connexion.FlaskApp(__name__)
flask_app.add_api("openapi.yaml", validate_responses=True, strict_validation=True)
app = flask_app.app
app.config.from_object(Config)
| [
"pmapi.config.get_logger",
"connexion.FlaskApp"
] | [((166, 178), 'pmapi.config.get_logger', 'get_logger', ([], {}), '()\n', (176, 178), False, 'from pmapi.config import Config, get_logger\n'), ((705, 733), 'connexion.FlaskApp', 'connexion.FlaskApp', (['__name__'], {}), '(__name__)\n', (723, 733), False, 'import connexion\n')] |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
__all__ = ['build_feature_head']
import torch
import torch.nn as nn
from rekognition_online_action_detection.utils.registry import Registry
FEATURE_HEADS = Registry()
FEATURE_SIZES = {
'rgb_anet_resnet50': 2048,
'flow_anet_resnet50': 2048,
'rgb_kinetics_bninception': 1024,
'flow_kinetics_bninception': 1024,
'rgb_kinetics_resnet50': 2048,
'flow_kinetics_resnet50': 2048,
}
@FEATURE_HEADS.register('THUMOS')
@FEATURE_HEADS.register('TVSeries')
class BaseFeatureHead(nn.Module):
def __init__(self, cfg):
super(BaseFeatureHead, self).__init__()
if cfg.INPUT.MODALITY in ['visual', 'motion', 'twostream']:
self.with_visual = 'motion' not in cfg.INPUT.MODALITY
self.with_motion = 'visual' not in cfg.INPUT.MODALITY
else:
raise RuntimeError('Unknown modality of {}'.format(cfg.INPUT.MODALITY))
if self.with_visual and self.with_motion:
visual_size = FEATURE_SIZES[cfg.INPUT.VISUAL_FEATURE]
motion_size = FEATURE_SIZES[cfg.INPUT.MOTION_FEATURE]
fusion_size = visual_size + motion_size
elif self.with_visual:
fusion_size = FEATURE_SIZES[cfg.INPUT.VISUAL_FEATURE]
elif self.with_motion:
fusion_size = FEATURE_SIZES[cfg.INPUT.MOTION_FEATURE]
self.d_model = fusion_size
if cfg.MODEL.FEATURE_HEAD.LINEAR_ENABLED:
if cfg.MODEL.FEATURE_HEAD.LINEAR_OUT_FEATURES != -1:
self.d_model = cfg.MODEL.FEATURE_HEAD.LINEAR_OUT_FEATURES
self.input_linear = nn.Sequential(
nn.Linear(fusion_size, self.d_model),
nn.ReLU(inplace=True),
)
else:
self.input_linear = nn.Identity()
def forward(self, visual_input, motion_input):
if self.with_visual and self.with_motion:
fusion_input = torch.cat((visual_input, motion_input), dim=-1)
elif self.with_visual:
fusion_input = visual_input
elif self.with_motion:
fusion_input = motion_input
fusion_input = self.input_linear(fusion_input)
return fusion_input
def build_feature_head(cfg):
feature_head = FEATURE_HEADS[cfg.DATA.DATA_NAME]
return feature_head(cfg)
| [
"rekognition_online_action_detection.utils.registry.Registry",
"torch.nn.ReLU",
"torch.nn.Linear",
"torch.nn.Identity",
"torch.cat"
] | [((267, 277), 'rekognition_online_action_detection.utils.registry.Registry', 'Registry', ([], {}), '()\n', (275, 277), False, 'from rekognition_online_action_detection.utils.registry import Registry\n'), ((1847, 1860), 'torch.nn.Identity', 'nn.Identity', ([], {}), '()\n', (1858, 1860), True, 'import torch.nn as nn\n'), ((1990, 2037), 'torch.cat', 'torch.cat', (['(visual_input, motion_input)'], {'dim': '(-1)'}), '((visual_input, motion_input), dim=-1)\n', (1999, 2037), False, 'import torch\n'), ((1710, 1746), 'torch.nn.Linear', 'nn.Linear', (['fusion_size', 'self.d_model'], {}), '(fusion_size, self.d_model)\n', (1719, 1746), True, 'import torch.nn as nn\n'), ((1764, 1785), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1771, 1785), True, 'import torch.nn as nn\n')] |
import sys, os
external_libs = {'Cleverhans v1.0.0': "externals/cleverhans",
'Tensorflow-Model-Resnet': "externals/tensorflow-models",
}
project_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
for lib_name, lib_path in external_libs.iteritems():
lib_path = os.path.join(project_path, lib_path)
if os.listdir(lib_path) == []:
cmd = "git submodule update --init --recursive"
print("Fetching external libraries...")
os.system(cmd)
if lib_name == 'Tensorflow-Model-Resnet':
lib_token_fpath = os.path.join(lib_path, 'resnet', '__init__.py')
if not os.path.isfile(lib_token_fpath):
open(lib_token_fpath, 'a').close()
sys.path.append(lib_path)
print("Located %s" % lib_name)
# print (sys.path)
| [
"os.listdir",
"os.path.join",
"os.path.realpath",
"os.path.isfile",
"os.system",
"sys.path.append"
] | [((318, 354), 'os.path.join', 'os.path.join', (['project_path', 'lib_path'], {}), '(project_path, lib_path)\n', (330, 354), False, 'import sys, os\n'), ((742, 767), 'sys.path.append', 'sys.path.append', (['lib_path'], {}), '(lib_path)\n', (757, 767), False, 'import sys, os\n'), ((220, 246), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (236, 246), False, 'import sys, os\n'), ((362, 382), 'os.listdir', 'os.listdir', (['lib_path'], {}), '(lib_path)\n', (372, 382), False, 'import sys, os\n'), ((502, 516), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (511, 516), False, 'import sys, os\n'), ((590, 637), 'os.path.join', 'os.path.join', (['lib_path', '"""resnet"""', '"""__init__.py"""'], {}), "(lib_path, 'resnet', '__init__.py')\n", (602, 637), False, 'import sys, os\n'), ((653, 684), 'os.path.isfile', 'os.path.isfile', (['lib_token_fpath'], {}), '(lib_token_fpath)\n', (667, 684), False, 'import sys, os\n')] |
# Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Lint as: python3
"""A module for miscelaneous utils."""
import tensorflow as tf
def random_substr(str_tensor, max_words):
"""Select random substring if the input has more than max_words."""
word_batch_r = tf.strings.split(str_tensor)
row_splits = word_batch_r.row_splits
words = word_batch_r.values
start_idx = row_splits[:-1]
end_idx = row_splits[1:]
words_per_example = end_idx - start_idx
ones = tf.ones_like(end_idx)
max_val = tf.maximum(ones, words_per_example - max_words)
max_words_batch = tf.reduce_max(words_per_example)
rnd = tf.random.uniform(
tf.shape(start_idx), minval=0, maxval=max_words_batch, dtype=tf.int64)
off_start_idx = tf.math.floormod(rnd, max_val)
new_words_per_example = tf.where(
tf.equal(max_val, 1), words_per_example, ones * max_words)
new_start_idx = start_idx + off_start_idx
new_end_idx = new_start_idx + new_words_per_example
indices = tf.expand_dims(tf.range(tf.size(words), dtype=tf.int64), axis=0)
within_limit = tf.logical_and(
tf.greater_equal(indices, tf.expand_dims(new_start_idx, axis=1)),
tf.less(indices, tf.expand_dims(new_end_idx, axis=1)))
keep_indices = tf.reduce_any(within_limit, axis=0)
keep_indices = tf.cast(keep_indices, dtype=tf.int32)
_, selected_words = tf.dynamic_partition(words, keep_indices, 2)
row_splits = tf.math.cumsum(new_words_per_example)
row_splits = tf.concat([[0], row_splits], axis=0)
new_tensor = tf.RaggedTensor.from_row_splits(
values=selected_words, row_splits=row_splits)
return tf.strings.reduce_join(new_tensor, axis=1, separator=" ")
| [
"tensorflow.math.cumsum",
"tensorflow.equal",
"tensorflow.shape",
"tensorflow.strings.split",
"tensorflow.reduce_max",
"tensorflow.RaggedTensor.from_row_splits",
"tensorflow.reduce_any",
"tensorflow.concat",
"tensorflow.strings.reduce_join",
"tensorflow.math.floormod",
"tensorflow.dynamic_partit... | [((922, 950), 'tensorflow.strings.split', 'tf.strings.split', (['str_tensor'], {}), '(str_tensor)\n', (938, 950), True, 'import tensorflow as tf\n'), ((1134, 1155), 'tensorflow.ones_like', 'tf.ones_like', (['end_idx'], {}), '(end_idx)\n', (1146, 1155), True, 'import tensorflow as tf\n'), ((1169, 1216), 'tensorflow.maximum', 'tf.maximum', (['ones', '(words_per_example - max_words)'], {}), '(ones, words_per_example - max_words)\n', (1179, 1216), True, 'import tensorflow as tf\n'), ((1238, 1270), 'tensorflow.reduce_max', 'tf.reduce_max', (['words_per_example'], {}), '(words_per_example)\n', (1251, 1270), True, 'import tensorflow as tf\n'), ((1396, 1426), 'tensorflow.math.floormod', 'tf.math.floormod', (['rnd', 'max_val'], {}), '(rnd, max_val)\n', (1412, 1426), True, 'import tensorflow as tf\n'), ((1895, 1930), 'tensorflow.reduce_any', 'tf.reduce_any', (['within_limit'], {'axis': '(0)'}), '(within_limit, axis=0)\n', (1908, 1930), True, 'import tensorflow as tf\n'), ((1949, 1986), 'tensorflow.cast', 'tf.cast', (['keep_indices'], {'dtype': 'tf.int32'}), '(keep_indices, dtype=tf.int32)\n', (1956, 1986), True, 'import tensorflow as tf\n'), ((2010, 2054), 'tensorflow.dynamic_partition', 'tf.dynamic_partition', (['words', 'keep_indices', '(2)'], {}), '(words, keep_indices, 2)\n', (2030, 2054), True, 'import tensorflow as tf\n'), ((2071, 2108), 'tensorflow.math.cumsum', 'tf.math.cumsum', (['new_words_per_example'], {}), '(new_words_per_example)\n', (2085, 2108), True, 'import tensorflow as tf\n'), ((2125, 2161), 'tensorflow.concat', 'tf.concat', (['[[0], row_splits]'], {'axis': '(0)'}), '([[0], row_splits], axis=0)\n', (2134, 2161), True, 'import tensorflow as tf\n'), ((2178, 2255), 'tensorflow.RaggedTensor.from_row_splits', 'tf.RaggedTensor.from_row_splits', ([], {'values': 'selected_words', 'row_splits': 'row_splits'}), '(values=selected_words, row_splits=row_splits)\n', (2209, 2255), True, 'import tensorflow as tf\n'), ((2274, 2331), 'tensorflow.strings.reduce_join', 'tf.strings.reduce_join', (['new_tensor'], {'axis': '(1)', 'separator': '""" """'}), "(new_tensor, axis=1, separator=' ')\n", (2296, 2331), True, 'import tensorflow as tf\n'), ((1306, 1325), 'tensorflow.shape', 'tf.shape', (['start_idx'], {}), '(start_idx)\n', (1314, 1325), True, 'import tensorflow as tf\n'), ((1471, 1491), 'tensorflow.equal', 'tf.equal', (['max_val', '(1)'], {}), '(max_val, 1)\n', (1479, 1491), True, 'import tensorflow as tf\n'), ((1667, 1681), 'tensorflow.size', 'tf.size', (['words'], {}), '(words)\n', (1674, 1681), True, 'import tensorflow as tf\n'), ((1775, 1812), 'tensorflow.expand_dims', 'tf.expand_dims', (['new_start_idx'], {'axis': '(1)'}), '(new_start_idx, axis=1)\n', (1789, 1812), True, 'import tensorflow as tf\n'), ((1839, 1874), 'tensorflow.expand_dims', 'tf.expand_dims', (['new_end_idx'], {'axis': '(1)'}), '(new_end_idx, axis=1)\n', (1853, 1874), True, 'import tensorflow as tf\n')] |
## 백준 13910번
## 개업
## 다이나믹 프로그래밍
## (짜장면 데이)
'''
##! ex) N = 4, 5그릇 이상 요리 X, 4사이즈 윅에 3그릇 이하 요리 X => 4윅에 4개
##* ex) N = 5, 윅 사이즈 1,3 / first : 1+3 = 4 그릇, second : 1 => 5 그릇 --> 2번의 요리로 주문 처리
##* 주문 받은 짜장면의 수, 가지고 있는 윅의 크기 => 주문 처리
# In1 ) N M : (주문 받은 짜장면의 수) N | (가지고 있는 윅의 수) M
# In2 ) S : 윅의 크기 S가 M개 만큼 주어짐 (같은 크기의 윅을 여러개 가지고 있을 수 있음)
# out ) 혜빈이가 모든 주문을 처리하기 위해 해야 하는 최소 요리수 | 주문 처리 못할시 -1
'''
'''
ex1I) 5주문 2개윅
ex1I) 1과 3사이즈
out ) 2
ex2I) 6주문 2개윅
ex2I) 1과 3사이즈
out ) 2
5 2
2 4
=> 4|1 1<2 : -1
13 3
'''
import sys
## 프로토타입
def cooking(N,M,wig):
count = 0
temp = N
breakpoint = False
while temp != 0:
for i in range(M-1, -1, -1):
if wig[i] < temp or wig[i] == temp:
temp -= wig[i]
count += 1
else:
continue
if i == 0:
k = wig[0]
if temp % k == 0:
count = count + (temp//k)
else:
breakpoint = True
count = -1
if breakpoint == True:
break
return count
## 테스트 1 성공, 2 실패
def cooking2(N, M, wig):
temp = N
count = 0
while temp != 0:
## 기저 조건
if wig[0] > temp:
count = -1
break
for i in range(M-1, -1, -1): ## M-1인덱스 부터 0 까지
if wig[i] < temp or wig[i] == temp:
temp -= wig[i]
count += 1
return count
## 미완성
def cooking4(N, M, wig):
temp = N
count = 0
while temp != 0:
## 기저 조건
if wig[0] > temp:
count = -1
break
for j in range(M-1, -1, -1):
## 반복
if temp % wig[j] > 1: ## 7 = 3*2 + 1 ## 8 = 3*2 + 1*2
if temp % wig[j] == 0: ## 6 = 3*2
count += temp // wig[j]
temp = 0
break
else:
## 반복 고려 X
for i in range(M-1, -1, -1): # M-1인덱스 부터 0 까지
if wig[i] < temp or wig[i] == temp:
temp -= wig[i]
count += 1
return count
if __name__ == "__main__":
print('hello')
N, M = map(int, sys.stdin.readline().split())
wig = list(map(int, sys.stdin.readline().split()))
wig.sort()# 정렬 때리기
# print(wig)
# print(cooking(N, M, wig)) ## 제대로 안나옴
# print(cooking2(N,M,wig)) ## 테스트 케이스 2 반복 처리 안됨
print(cooking4(N,M,wig)) ## 미완성 | [
"sys.stdin.readline"
] | [((2245, 2265), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (2263, 2265), False, 'import sys\n'), ((2299, 2319), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (2317, 2319), False, 'import sys\n')] |
from __future__ import absolute_import
from .blockcipher import *
import Crypto.Cipher.ARC2
import Crypto
from pkg_resources import parse_version
def new(key,mode=MODE_ECB,IV=None,counter=None,segment_size=None,effective_keylen=None):
"""Create a new cipher object
ARC2 using pycrypto for algo and pycryptoplus for ciphermode
key = raw string containing the keys
mode = python_AES.MODE_ECB/CBC/CFB/OFB/CTR/CMAC, default is ECB
IV = IV as a raw string, default is "all zero" IV
-> only needed for CBC mode
counter = counter object (CryptoPlus.Util.util.Counter)
-> only needed for CTR mode
segment_size = amount of bits to use from the keystream in each chain part
-> supported values: multiple of 8 between 8 and the blocksize
of the cipher (only per byte access possible), default is 8
-> only needed for CFB mode
effective_keylen = how much bits to effectively use from the supplied key
-> will only be used when the pycrypto version on your system is >2.0.1
EXAMPLES:
**********
IMPORTING:
-----------
>>> import codecs
>>> from CryptoPlus.Cipher import ARC2
http://www.ietf.org/rfc/rfc2268.txt
Doctest will fail when using pycrypto 2.0.1 and older
------------------------------------
>>> key = codecs.decode("0000000000000000", 'hex')
>>> plaintext = codecs.decode("0000000000000000", 'hex')
>>> ek = 63
>>> cipher = ARC2.new(key,ARC2.MODE_ECB,effective_keylen=ek)
>>> codecs.encode(cipher.encrypt(plaintext), 'hex')
b'ebb773f993278eff'
"""
return ARC2(key,mode,IV,counter,effective_keylen,segment_size)
class ARC2(BlockCipher):
def __init__(self,key,mode,IV,counter,effective_keylen,segment_size):
# pycrypto versions newer than 2.0.1 will have support for "effective_keylen"
if parse_version(Crypto.__version__) <= parse_version("2.0.1"):
cipher_module = Crypto.Cipher.ARC2.new
args = {}
else:
cipher_module = Crypto.Cipher.ARC2.new
args = {'effective_keylen':effective_keylen}
self.blocksize = 8
BlockCipher.__init__(self,key,mode,IV,counter,cipher_module,segment_size,args)
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
| [
"pkg_resources.parse_version",
"doctest.testmod"
] | [((2317, 2334), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (2332, 2334), False, 'import doctest\n'), ((1910, 1943), 'pkg_resources.parse_version', 'parse_version', (['Crypto.__version__'], {}), '(Crypto.__version__)\n', (1923, 1943), False, 'from pkg_resources import parse_version\n'), ((1947, 1969), 'pkg_resources.parse_version', 'parse_version', (['"""2.0.1"""'], {}), "('2.0.1')\n", (1960, 1969), False, 'from pkg_resources import parse_version\n')] |
# Utilities
import json
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
# Imports
from discord.ext import commands
from discord import Guild, Role
# Loading config file...
with open("./config.json", "r", encoding="utf-8") as config:
configFile = json.load(config)
class Roles(commands.Cog, name="Roles management"):
def __init__(self, client):
self.client = client
# MySQL
self.connectionStr = configFile["config"]["db"]
# MySQL SQLAlchemy Engine Creation
self.MySQLEngine = create_engine(
self.connectionStr,
pool_size=10,
pool_recycle=3600,
max_overflow=5,
echo=True
)
# SQL Alchemy session
self.sqlSession = sessionmaker(bind=self.MySQLEngine)
self.session = self.sqlSession()
@commands.command(name="Get channel roles", pass_context=True)
async def role(self, ctx, command_='get', role: Role = None):
roles = await Guild.fetch_roles(ctx.guild)
print(roles)
def setup(client):
client.add_cog(Roles(client))
| [
"sqlalchemy.orm.sessionmaker",
"sqlalchemy.create_engine",
"discord.Guild.fetch_roles",
"json.load",
"discord.ext.commands.command"
] | [((281, 298), 'json.load', 'json.load', (['config'], {}), '(config)\n', (290, 298), False, 'import json\n'), ((867, 928), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""Get channel roles"""', 'pass_context': '(True)'}), "(name='Get channel roles', pass_context=True)\n", (883, 928), False, 'from discord.ext import commands\n'), ((558, 655), 'sqlalchemy.create_engine', 'create_engine', (['self.connectionStr'], {'pool_size': '(10)', 'pool_recycle': '(3600)', 'max_overflow': '(5)', 'echo': '(True)'}), '(self.connectionStr, pool_size=10, pool_recycle=3600,\n max_overflow=5, echo=True)\n', (571, 655), False, 'from sqlalchemy import create_engine\n'), ((779, 814), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', ([], {'bind': 'self.MySQLEngine'}), '(bind=self.MySQLEngine)\n', (791, 814), False, 'from sqlalchemy.orm import sessionmaker\n'), ((1017, 1045), 'discord.Guild.fetch_roles', 'Guild.fetch_roles', (['ctx.guild'], {}), '(ctx.guild)\n', (1034, 1045), False, 'from discord import Guild, Role\n')] |
"""
In this module, we implement the accuracy measures to evaluate the effect of differential privacy injection.
In this module, we support the following measures:
* F1-score.
* Earth Mover's distance.
"""
from scipy.stats import wasserstein_distance
from pm4py.algo.discovery.inductive import factory as inductive_miner
from pm4py.evaluation.replay_fitness import factory as replay_factory
from math import fabs
import pandas as pd
def earth_mover_dist(dfg1, dfg2):
# need to consider for zero frequncies as the counter object don't include it
# after the discussion, we decided to let the user know about that issue and maybe has can handle it on his own.
v1=list(dfg1.values())
v2=list(dfg2.values())
distance = wasserstein_distance(v1,v2)
return distance
def percentage_dist(dfg1,dfg2):
#returns the maximum percentage difference between the two DFGs
distance =0
distance_dist={}
for key in dfg1.keys():
if dfg1[key]!=0: #division by zero
diff = fabs(dfg1[key]-dfg2[key])/dfg1[key]
else:
diff = fabs( ((100-dfg1[key]) - (100-dfg2[key])) / (100-dfg1[key]) )
distance_dist[key]=diff
if diff>distance:
distance=diff
return distance, distance_dist
def error_calculation(dfg1,dfg2):
#return MAPE, SMAPE, and distribution of APE between two DFGs.
total =0
smape_acc=0
APE_dist={}
MAPE_dist={}
SMAPE_dist={}
for key in dfg1.keys():
if dfg1[key]!=0: #division by zero
diff = fabs(dfg1[key]-dfg2[key])/fabs(dfg1[key])
smape= abs(dfg1[key] - dfg2[key]) / abs(dfg1[key] + dfg2[key])
else:
diff = fabs( ((100-dfg1[key]) - (100-dfg2[key])) / fabs(100-dfg1[key]) )
smape= abs((100-dfg1[key] )- (100-dfg2[key])) / abs((100-dfg1[key]) + (100-dfg2[key]))
APE_dist[key]=diff
smape_acc +=smape
SMAPE_dist[key]=smape
# smape_acc+=abs(dfg1[key]-dfg2[key])/(dfg1[key]+dfg2[key])
total+=diff
MAPE= total/len(dfg1.keys())
SMAPE=smape_acc/len(dfg1.keys())
return MAPE, SMAPE, APE_dist, SMAPE_dist
def f1_score(xes_file,dfg1,dfg2):
f1_score_1, f1_score_2=0,0
#first we use inductive miner to generate the petric nets of both the DFGs
net1, initial_marking1, final_marking1 = inductive_miner.apply(dfg1)
net2, initial_marking2, final_marking2 = inductive_miner.apply(dfg2)
fitness_1 = replay_factory.apply(xes_file, net1, initial_marking1, final_marking1)
fitness_2 = replay_factory.apply(xes_file, net2, initial_marking2, final_marking2)
return fitness_1, fitness_2
def estimate_SMAPE_variant_and_time(data, variant_counts):
smape_variant=0
# mape=((data['relative_time_original']-data["relative_time_anonymized"])/data['relative_time_original']).abs().mean()*100 #percentage
#$
# print("MAPE %s" %(((data['relative_time_original']-data["relative_time_anonymized"])/data['relative_time_original']).abs().mean()*100))
smape_time=((data['relative_time_original']-data["relative_time_anonymized"])/(data['relative_time_original'].abs()+data["relative_time_anonymized"].abs())).abs().mean()*100
variant_freq=pd.Series([ x['count'] for x in variant_counts])
variant_freq_anonymized= data.groupby(['trace_variant','case:concept:name'])['time:timestamp'].count().reset_index().groupby('trace_variant')['case:concept:name'].count()
smape_variant=((variant_freq-variant_freq_anonymized).abs()/(variant_freq+variant_freq_anonymized)).mean()*100
oversampling_per_variant=variant_freq_anonymized/variant_freq
avg_dilation_per_variant=oversampling_per_variant.mean()
oversampling_ratio=data['case:concept:name'].unique().size/variant_freq.sum()
oversampling_df=pd.DataFrame()
oversampling_df['variant_freq_anonymized'] = variant_freq_anonymized
oversampling_df['variant_freq'] = variant_freq
oversampling_df['dilation_per_variant'] = oversampling_per_variant
return data, smape_time, smape_variant, oversampling_ratio,oversampling_df | [
"pandas.Series",
"pm4py.algo.discovery.inductive.factory.apply",
"scipy.stats.wasserstein_distance",
"math.fabs",
"pandas.DataFrame",
"pm4py.evaluation.replay_fitness.factory.apply"
] | [((746, 774), 'scipy.stats.wasserstein_distance', 'wasserstein_distance', (['v1', 'v2'], {}), '(v1, v2)\n', (766, 774), False, 'from scipy.stats import wasserstein_distance\n'), ((2343, 2370), 'pm4py.algo.discovery.inductive.factory.apply', 'inductive_miner.apply', (['dfg1'], {}), '(dfg1)\n', (2364, 2370), True, 'from pm4py.algo.discovery.inductive import factory as inductive_miner\n'), ((2416, 2443), 'pm4py.algo.discovery.inductive.factory.apply', 'inductive_miner.apply', (['dfg2'], {}), '(dfg2)\n', (2437, 2443), True, 'from pm4py.algo.discovery.inductive import factory as inductive_miner\n'), ((2460, 2530), 'pm4py.evaluation.replay_fitness.factory.apply', 'replay_factory.apply', (['xes_file', 'net1', 'initial_marking1', 'final_marking1'], {}), '(xes_file, net1, initial_marking1, final_marking1)\n', (2480, 2530), True, 'from pm4py.evaluation.replay_fitness import factory as replay_factory\n'), ((2547, 2617), 'pm4py.evaluation.replay_fitness.factory.apply', 'replay_factory.apply', (['xes_file', 'net2', 'initial_marking2', 'final_marking2'], {}), '(xes_file, net2, initial_marking2, final_marking2)\n', (2567, 2617), True, 'from pm4py.evaluation.replay_fitness import factory as replay_factory\n'), ((3217, 3264), 'pandas.Series', 'pd.Series', (["[x['count'] for x in variant_counts]"], {}), "([x['count'] for x in variant_counts])\n", (3226, 3264), True, 'import pandas as pd\n'), ((3786, 3800), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3798, 3800), True, 'import pandas as pd\n'), ((1092, 1155), 'math.fabs', 'fabs', (['((100 - dfg1[key] - (100 - dfg2[key])) / (100 - dfg1[key]))'], {}), '((100 - dfg1[key] - (100 - dfg2[key])) / (100 - dfg1[key]))\n', (1096, 1155), False, 'from math import fabs\n'), ((1023, 1050), 'math.fabs', 'fabs', (['(dfg1[key] - dfg2[key])'], {}), '(dfg1[key] - dfg2[key])\n', (1027, 1050), False, 'from math import fabs\n'), ((1546, 1573), 'math.fabs', 'fabs', (['(dfg1[key] - dfg2[key])'], {}), '(dfg1[key] - dfg2[key])\n', (1550, 1573), False, 'from math import fabs\n'), ((1572, 1587), 'math.fabs', 'fabs', (['dfg1[key]'], {}), '(dfg1[key])\n', (1576, 1587), False, 'from math import fabs\n'), ((1740, 1761), 'math.fabs', 'fabs', (['(100 - dfg1[key])'], {}), '(100 - dfg1[key])\n', (1744, 1761), False, 'from math import fabs\n')] |
import collections
from itertools import groupby
from django.db import connections, models, router
from django.db.models.deletion import Collector
from django.utils import encoding
import bleach
import commonware.log
from mkt.site.models import ManagerBase, ModelBase
from mkt.site.utils import linkify_with_outgoing
from . import utils
log = commonware.log.getLogger('z.translations')
class TranslationManager(ManagerBase):
def remove_for(self, obj, locale):
"""Remove a locale for the given object."""
ids = [getattr(obj, f.attname) for f in obj._meta.translated_fields]
qs = Translation.objects.filter(id__in=filter(None, ids),
locale=locale)
qs.update(localized_string=None, localized_string_clean=None)
class Translation(ModelBase):
"""
Translation model.
Use :class:`translations.fields.TranslatedField` instead of a plain foreign
key to this model.
"""
autoid = models.AutoField(primary_key=True)
id = models.IntegerField()
locale = models.CharField(max_length=10)
localized_string = models.TextField(null=True)
localized_string_clean = models.TextField(null=True)
objects = TranslationManager()
class Meta:
db_table = 'translations'
unique_together = ('id', 'locale')
def __unicode__(self):
return self.localized_string and unicode(self.localized_string) or ''
def __nonzero__(self):
# __nonzero__ is called to evaluate an object in a boolean context. We
# want Translations to be falsy if their string is empty.
return (bool(self.localized_string) and
bool(self.localized_string.strip()))
def __eq__(self, other):
# Django implements an __eq__ that only checks pks. We need to check
# the strings if we're dealing with existing vs. unsaved Translations.
return self.__cmp__(other) == 0
def __cmp__(self, other):
if hasattr(other, 'localized_string'):
return cmp(self.localized_string, other.localized_string)
else:
return cmp(self.localized_string, other)
def clean(self):
if self.localized_string:
self.localized_string = self.localized_string.strip()
def save(self, **kwargs):
self.clean()
return super(Translation, self).save(**kwargs)
def delete(self, using=None):
# FIXME: if the Translation is the one used as default/fallback,
# then deleting it will mean the corresponding field on the related
# model will stay empty even if there are translations in other
# languages!
cls = self.__class__
using = using or router.db_for_write(cls, instance=self)
# Look for all translations for the same string (id=self.id) except the
# current one (autoid=self.autoid).
qs = cls.objects.filter(id=self.id).exclude(autoid=self.autoid)
if qs.using(using).exists():
# If other Translations for the same id exist, we just need to
# delete this one and *only* this one, without letting Django
# collect dependencies (it'd remove the others, which we want to
# keep).
assert self._get_pk_val() is not None
collector = Collector(using=using)
collector.collect([self], collect_related=False)
# In addition, because we have FK pointing to a non-unique column,
# we need to force MySQL to ignore constraints because it's dumb
# and would otherwise complain even if there are remaining rows
# that matches the FK.
with connections[using].constraint_checks_disabled():
collector.delete()
else:
# If no other Translations with that id exist, then we should let
# django behave normally. It should find the related model and set
# the FKs to NULL.
return super(Translation, self).delete(using=using)
delete.alters_data = True
@classmethod
def _cache_key(cls, pk, db):
# Hard-coding the class name here so that subclasses don't try to cache
# themselves under something like "o:translations.purifiedtranslation".
#
# Like in ModelBase, we avoid putting the real db in the key because it
# does us more harm than good.
key_parts = ('o', 'translations.translation', pk, 'default')
return ':'.join(map(encoding.smart_unicode, key_parts))
@classmethod
def new(cls, string, locale, id=None):
"""
Jumps through all the right hoops to create a new translation.
If ``id`` is not given a new id will be created using
``translations_seq``. Otherwise, the id will be used to add strings to
an existing translation.
To increment IDs we use a setting on MySQL. This is to support multiple
database masters -- it's just crazy enough to work! See bug 756242.
"""
if id is None:
# Get a sequence key for the new translation.
cursor = connections['default'].cursor()
cursor.execute("""UPDATE translations_seq
SET id=LAST_INSERT_ID(id + @@global.auto_increment_increment)""")
# The sequence table should never be empty. But alas, if it is,
# let's fix it.
if not cursor.rowcount > 0:
cursor.execute("""INSERT INTO translations_seq (id)
VALUES(LAST_INSERT_ID(id + @@global.auto_increment_increment))""")
cursor.execute('SELECT LAST_INSERT_ID()')
id = cursor.fetchone()[0]
# Update if one exists, otherwise create a new one.
q = {'id': id, 'locale': locale}
try:
trans = cls.objects.get(**q)
trans.localized_string = string
except cls.DoesNotExist:
trans = cls(localized_string=string, **q)
return trans
class PurifiedTranslation(Translation):
"""Run the string through bleach to get a safe version."""
allowed_tags = [
'a',
'abbr',
'acronym',
'b',
'blockquote',
'code',
'em',
'i',
'li',
'ol',
'strong',
'ul',
]
allowed_attributes = {
'a': ['href', 'title', 'rel'],
'abbr': ['title'],
'acronym': ['title'],
}
class Meta:
proxy = True
def __unicode__(self):
if not self.localized_string_clean:
self.clean()
return unicode(self.localized_string_clean)
def __html__(self):
return unicode(self)
def __truncate__(self, length, killwords, end):
return utils.truncate(unicode(self), length, killwords, end)
def clean(self):
super(PurifiedTranslation, self).clean()
cleaned = self.clean_localized_string()
self.localized_string_clean = utils.clean_nl(cleaned).strip()
def clean_localized_string(self):
# All links (text and markup) are normalized.
linkified = linkify_with_outgoing(self.localized_string)
# Keep only the allowed tags and attributes, escape the rest.
return bleach.clean(linkified, tags=self.allowed_tags,
attributes=self.allowed_attributes)
class LinkifiedTranslation(PurifiedTranslation):
"""Run the string through bleach to get a linkified version."""
allowed_tags = ['a']
class Meta:
proxy = True
class NoLinksMixin(object):
"""Mixin used to remove links (URLs and text) from localized_string."""
def clean_localized_string(self):
# First pass: bleach everything, but leave links untouched.
cleaned = super(NoLinksMixin, self).clean_localized_string()
# Second pass: call linkify to empty the inner text of all links.
emptied_links = bleach.linkify(
cleaned, callbacks=[lambda attrs, new: {'_text': ''}])
# Third pass: now strip links (only links will be stripped, other
# forbidden tags are already bleached/escaped.
allowed_tags = self.allowed_tags[:] # Make a copy.
allowed_tags.remove('a')
return bleach.clean(emptied_links, tags=allowed_tags, strip=True)
class NoLinksTranslation(NoLinksMixin, PurifiedTranslation):
"""Run the string through bleach, escape markup and strip all the links."""
class Meta:
proxy = True
class NoLinksNoMarkupTranslation(NoLinksMixin, LinkifiedTranslation):
"""Run the string through bleach, escape markup and strip all the links."""
class Meta:
proxy = True
class TranslationSequence(models.Model):
"""
The translations_seq table, so syncdb will create it during testing.
"""
id = models.IntegerField(primary_key=True)
class Meta:
db_table = 'translations_seq'
def delete_translation(obj, fieldname):
field = obj._meta.get_field(fieldname)
trans_id = getattr(obj, field.attname)
obj.update(**{field.name: None})
if trans_id:
Translation.objects.filter(id=trans_id).delete()
def _sorted_groupby(seq, key):
return groupby(sorted(seq, key=key), key=key)
def attach_trans_dict(model, objs):
"""Put all translations into a translations dict."""
# Get the ids of all the translations we need to fetch.
fields = model._meta.translated_fields
ids = [getattr(obj, f.attname) for f in fields
for obj in objs if getattr(obj, f.attname, None) is not None]
# Get translations in a dict, ids will be the keys. It's important to
# consume the result of groupby, which is an iterator.
qs = Translation.objects.filter(id__in=ids, localized_string__isnull=False)
all_translations = dict((k, list(v)) for k, v in
_sorted_groupby(qs, lambda trans: trans.id))
def get_locale_and_string(translation, new_class):
"""Convert the translation to new_class (making PurifiedTranslations
and LinkifiedTranslations work) and return locale / string tuple."""
converted_translation = new_class()
converted_translation.__dict__ = translation.__dict__
return (converted_translation.locale.lower(),
unicode(converted_translation))
# Build and attach translations for each field on each object.
for obj in objs:
obj.translations = collections.defaultdict(list)
for field in fields:
t_id = getattr(obj, field.attname, None)
field_translations = all_translations.get(t_id, None)
if not t_id or field_translations is None:
continue
obj.translations[t_id] = [get_locale_and_string(t, field.rel.to)
for t in field_translations]
| [
"bleach.clean",
"django.db.models.TextField",
"django.db.models.IntegerField",
"django.db.models.deletion.Collector",
"mkt.site.utils.linkify_with_outgoing",
"django.db.models.AutoField",
"collections.defaultdict",
"bleach.linkify",
"django.db.models.CharField",
"django.db.router.db_for_write"
] | [((982, 1016), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (998, 1016), False, 'from django.db import connections, models, router\n'), ((1026, 1047), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (1045, 1047), False, 'from django.db import connections, models, router\n'), ((1061, 1092), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)'}), '(max_length=10)\n', (1077, 1092), False, 'from django.db import connections, models, router\n'), ((1116, 1143), 'django.db.models.TextField', 'models.TextField', ([], {'null': '(True)'}), '(null=True)\n', (1132, 1143), False, 'from django.db import connections, models, router\n'), ((1173, 1200), 'django.db.models.TextField', 'models.TextField', ([], {'null': '(True)'}), '(null=True)\n', (1189, 1200), False, 'from django.db import connections, models, router\n'), ((8828, 8865), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (8847, 8865), False, 'from django.db import connections, models, router\n'), ((7131, 7175), 'mkt.site.utils.linkify_with_outgoing', 'linkify_with_outgoing', (['self.localized_string'], {}), '(self.localized_string)\n', (7152, 7175), False, 'from mkt.site.utils import linkify_with_outgoing\n'), ((7261, 7349), 'bleach.clean', 'bleach.clean', (['linkified'], {'tags': 'self.allowed_tags', 'attributes': 'self.allowed_attributes'}), '(linkified, tags=self.allowed_tags, attributes=self.\n allowed_attributes)\n', (7273, 7349), False, 'import bleach\n'), ((7936, 8005), 'bleach.linkify', 'bleach.linkify', (['cleaned'], {'callbacks': "[lambda attrs, new: {'_text': ''}]"}), "(cleaned, callbacks=[lambda attrs, new: {'_text': ''}])\n", (7950, 8005), False, 'import bleach\n'), ((8257, 8315), 'bleach.clean', 'bleach.clean', (['emptied_links'], {'tags': 'allowed_tags', 'strip': '(True)'}), '(emptied_links, tags=allowed_tags, strip=True)\n', (8269, 8315), False, 'import bleach\n'), ((10442, 10471), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (10465, 10471), False, 'import collections\n'), ((2714, 2753), 'django.db.router.db_for_write', 'router.db_for_write', (['cls'], {'instance': 'self'}), '(cls, instance=self)\n', (2733, 2753), False, 'from django.db import connections, models, router\n'), ((3308, 3330), 'django.db.models.deletion.Collector', 'Collector', ([], {'using': 'using'}), '(using=using)\n', (3317, 3330), False, 'from django.db.models.deletion import Collector\n')] |
import torch
from torch import nn
class FlowSequential(nn.Sequential):
"""Forward pass with log determinant of the Jacobian."""
def forward(self, input, context=None):
total_log_prob = torch.zeros(input.size(0), device=input.device)
for block in self._modules.values():
input, log_prob = block(input, context)
total_log_prob += log_prob
return input, total_log_prob
def inverse(self, input, context=None):
total_log_prob = torch.zeros(input.size(0), device=input.device)
for block in reversed(self._modules.values()):
input, log_prob = block.inverse(input, context)
total_log_prob += log_prob
return input, total_log_prob
def get_memory():
torch.cuda.synchronize()
max_memory = torch.cuda.max_memory_allocated()
memory = torch.cuda.memory_allocated()
return memory / 10**9, max_memory / 10**9
class RealNVPSequential(nn.Sequential):
"""Assumes first and last module are CheckerSplit and CheckerUnsplit."""
def forward(self, input, context=None):
total_log_prob = torch.zeros(input.size(0), device=input.device)
modules = list(self._modules.values())
split = modules.pop(0)
concat = modules.pop()
transf, const = split(input)
for module in modules:
transf, const, log_prob = module(transf, const, context)
total_log_prob += log_prob
return concat(transf, const), total_log_prob
def inverse(self, input, context=None):
total_log_prob = torch.zeros(input.size(0), device=input.device)
modules = list(self._modules.values())
split = modules.pop(0)
concat = modules.pop()
transf, const = split(input)
for module in reversed(modules):
transf, const, log_prob = module.inverse(transf, const, context)
total_log_prob += log_prob
return concat(transf, const), total_log_prob
class SplitSequential(nn.Sequential):
"""Assumes first and last module are CheckerSplit and CheckerConcat."""
def forward(self, transf, const, context=None):
total_log_prob = torch.zeros(transf.size(0), device=transf.device)
for module in self._modules.values():
transf, const, log_prob = module(transf, const, context)
total_log_prob += log_prob
return transf, const, total_log_prob
def inverse(self, transf, const, context=None):
total_log_prob = torch.zeros(transf.size(0), device=transf.device)
for module in reversed(self._modules.values()):
transf, const, log_prob = module.inverse(transf, const, context)
total_log_prob += log_prob
return transf, const, total_log_prob
| [
"torch.cuda.synchronize",
"torch.cuda.memory_allocated",
"torch.cuda.max_memory_allocated"
] | [((701, 725), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (723, 725), False, 'import torch\n'), ((741, 774), 'torch.cuda.max_memory_allocated', 'torch.cuda.max_memory_allocated', ([], {}), '()\n', (772, 774), False, 'import torch\n'), ((786, 815), 'torch.cuda.memory_allocated', 'torch.cuda.memory_allocated', ([], {}), '()\n', (813, 815), False, 'import torch\n')] |
import time
from os.path import join, dirname
import sys
whereami = dirname(__file__)
scripts_dir= join(whereami, "../scripts/")
sys.path.append(scripts_dir)
from json_parsing import read_json
import Inmoov
filename_pose = join(whereami, '../json/pose.json')
filename_animation = join(whereami, '../json/animations.json')
# global objects that hold the json file contents
# so i can control when/how often to read the json file
# in the inmoov object, when it receives messages, it only needs to update at bootup. json will not change after bootup.
# in the gui, it should update each time it tries to run, because the gui is editing the files.
global_poses = None
global_animations = None
def update_animations():
global global_animations
global_animations = read_json(filename_animation)
def update_poses():
global global_poses
global_poses = read_json(filename_pose)
# TODO: if we are keeping the killlist idea, make it cleaner & easy to remove when transferring to a robot that doesn't need it
# TODO: be more intelligent about when we need to read the animation/pose json files
def do_animation(the_inmoov, animation_name):
update_animations()
print("Executing animation ", str(animation_name))
if animation_name not in global_animations:
print("FAIL TO FIND: ANIMATION '%s'" % str(animation_name))
return
#for key, pose_info in sorted(animation_data[animation_name].items()):
# this method better supports animations >= 10 frames long
# because using sorted() on 1-12 returns [1, 10, 11, 12, 2, 3, 4, 5, etc]
this_animation_dict = global_animations[animation_name]
t = 1
while str(t) in this_animation_dict:
# pose_info is a list with item0 = posename and item1 = holdtime
pose_info = this_animation_dict[str(t)]
print("\n********* Executing pose {} *********\n".format(str(pose_info[0])))
do_pose(the_inmoov, pose_info[0], pose_info[1])
t += 1
print("\nANIMATION COMPLETE!\n")
#killtime = 1
killlist = ["left_shoulder_lift_front","left_arm_rotate","right_arm_rotate","right_shoulder_lift_front"]
def do_pose(the_inmoov, pose_name, hold_time=0):
killtime = 1
update_poses()
if pose_name not in global_poses:
print("FAIL TO FIND: POSE '%s'" % str(pose_name))
return
hold_time = float(hold_time)
pose_data = global_poses[pose_name]
for servo_name, servo_angle in pose_data.items():
#Obtain a handle to the actual servo object
fservo = the_inmoov.find_servo_by_name(str(servo_name))
if fservo.curr_angle == servo_angle:
# if telling it to move to a position it's already at, skip it instead, it doesnt need to move
print('Skipping', servo_name)
else:
fservo.rotate(float(servo_angle))
print('Setting {} servo to an angle of {}'.format(servo_name, servo_angle))
# if servo_name == 'right_lift_front':
# killtime = abs((7.5/90)*(fservo.curr_angle - servo_angle))
if hold_time != 0:
print('\n--------------- Hold for {} second(s) ---------------'.format(hold_time))
# # todo: handle corner case where hold_time < killtime
# time.sleep(killtime)
# # kill all servos that can safely hold position wihtout power
# for killname in killlist:
# fservo = this_inmoov.find_servo_by_name(str(killname))
# fservo.off()
# time.sleep(hold_time - killtime)
time.sleep(hold_time)
if __name__ == '__main__':
this_inmoov = Inmoov.Inmoov()
do_animation(this_inmoov, 'rps_paper')
time.sleep(5)
exit()
do_animation(this_inmoov, 'headright_anim')
time.sleep(5)
do_animation(this_inmoov, 'headleft_anim')
time.sleep(5)
do_animation(this_inmoov, 'headright_anim')
time.sleep(5)
| [
"os.path.join",
"Inmoov.Inmoov",
"time.sleep",
"os.path.dirname",
"json_parsing.read_json",
"sys.path.append"
] | [((69, 86), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (76, 86), False, 'from os.path import join, dirname\n'), ((100, 129), 'os.path.join', 'join', (['whereami', '"""../scripts/"""'], {}), "(whereami, '../scripts/')\n", (104, 129), False, 'from os.path import join, dirname\n'), ((130, 158), 'sys.path.append', 'sys.path.append', (['scripts_dir'], {}), '(scripts_dir)\n', (145, 158), False, 'import sys\n'), ((225, 260), 'os.path.join', 'join', (['whereami', '"""../json/pose.json"""'], {}), "(whereami, '../json/pose.json')\n", (229, 260), False, 'from os.path import join, dirname\n'), ((282, 323), 'os.path.join', 'join', (['whereami', '"""../json/animations.json"""'], {}), "(whereami, '../json/animations.json')\n", (286, 323), False, 'from os.path import join, dirname\n'), ((772, 801), 'json_parsing.read_json', 'read_json', (['filename_animation'], {}), '(filename_animation)\n', (781, 801), False, 'from json_parsing import read_json\n'), ((865, 889), 'json_parsing.read_json', 'read_json', (['filename_pose'], {}), '(filename_pose)\n', (874, 889), False, 'from json_parsing import read_json\n'), ((3464, 3485), 'time.sleep', 'time.sleep', (['hold_time'], {}), '(hold_time)\n', (3474, 3485), False, 'import time\n'), ((3537, 3552), 'Inmoov.Inmoov', 'Inmoov.Inmoov', ([], {}), '()\n', (3550, 3552), False, 'import Inmoov\n'), ((3605, 3618), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (3615, 3618), False, 'import time\n'), ((3682, 3695), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (3692, 3695), False, 'import time\n'), ((3747, 3760), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (3757, 3760), False, 'import time\n'), ((3813, 3826), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (3823, 3826), False, 'import time\n')] |
from distutils.core import setup, Extension
srclist = ['sgglobal.c','sgcb.c','sgcharmx.c','sgfile.c',
'sggen.c','sghall.c','sghkl.c','sgltr.c','sgmath.c','sgmetric.c',
'sgnorm.c','sgprop.c','sgss.c','sgstr.c','sgsymbols.c',
'sgtidy.c','sgtype.c','sgutil.c','runtests.c','sglitemodule.c']
module = Extension('sglite', sources=srclist,
define_macros = [('PythonTypes', 1)])
setup (name='sglite',
description = 'space group info',
ext_modules = [module]
)
| [
"distutils.core.Extension",
"distutils.core.setup"
] | [((332, 404), 'distutils.core.Extension', 'Extension', (['"""sglite"""'], {'sources': 'srclist', 'define_macros': "[('PythonTypes', 1)]"}), "('sglite', sources=srclist, define_macros=[('PythonTypes', 1)])\n", (341, 404), False, 'from distutils.core import setup, Extension\n'), ((427, 501), 'distutils.core.setup', 'setup', ([], {'name': '"""sglite"""', 'description': '"""space group info"""', 'ext_modules': '[module]'}), "(name='sglite', description='space group info', ext_modules=[module])\n", (432, 501), False, 'from distutils.core import setup, Extension\n')] |
from app.blogging import bp
from datetime import datetime
from flask import flash, redirect, url_for
from flask_login import current_user
@bp.before_request
def protect():
'''
Registers new function to Flask-Blogging Blueprint that protects
updates to make them only viewable by paid subscribers.
'''
if current_user.is_authenticated:
if datetime.today() <= current_user.expiration:
return None
else:
flash('You must have a paid-up subscription \
to view updates.', 'warning')
return redirect(url_for('main.support'))
else:
flash('Please login to view updates.', 'warning')
return redirect(url_for('auth.login'))
| [
"datetime.datetime.today",
"flask.flash",
"flask.url_for"
] | [((628, 677), 'flask.flash', 'flash', (['"""Please login to view updates."""', '"""warning"""'], {}), "('Please login to view updates.', 'warning')\n", (633, 677), False, 'from flask import flash, redirect, url_for\n'), ((368, 384), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (382, 384), False, 'from datetime import datetime\n'), ((463, 559), 'flask.flash', 'flash', (['"""You must have a paid-up subscription to view updates."""', '"""warning"""'], {}), "('You must have a paid-up subscription to view updates.'\n , 'warning')\n", (468, 559), False, 'from flask import flash, redirect, url_for\n'), ((702, 723), 'flask.url_for', 'url_for', (['"""auth.login"""'], {}), "('auth.login')\n", (709, 723), False, 'from flask import flash, redirect, url_for\n'), ((585, 608), 'flask.url_for', 'url_for', (['"""main.support"""'], {}), "('main.support')\n", (592, 608), False, 'from flask import flash, redirect, url_for\n')] |
# -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
from src.lib.jianshu_parser.base import BaseParser
from src.lib.jianshu_parser.content.JianshuAuthor import JianshuAuthorInfo
from src.lib.jianshu_parser.content.JianshuArticle import JianshuArticle
class JianshuParser(BaseParser):
u"""
获得jianshu_info表中所需的内容
"""
def __init__(self, content):
self.dom = BeautifulSoup(content, 'lxml')
self.article_parser = JianshuArticle(self.dom)
return
def get_jianshu_info_list(self):
author_parser = JianshuAuthorInfo() # SinaBlog_Info表中的信息
author_parser.set_dom(self.dom)
return [author_parser.get_info()]
| [
"bs4.BeautifulSoup",
"src.lib.jianshu_parser.content.JianshuArticle.JianshuArticle",
"src.lib.jianshu_parser.content.JianshuAuthor.JianshuAuthorInfo"
] | [((385, 415), 'bs4.BeautifulSoup', 'BeautifulSoup', (['content', '"""lxml"""'], {}), "(content, 'lxml')\n", (398, 415), False, 'from bs4 import BeautifulSoup\n'), ((446, 470), 'src.lib.jianshu_parser.content.JianshuArticle.JianshuArticle', 'JianshuArticle', (['self.dom'], {}), '(self.dom)\n', (460, 470), False, 'from src.lib.jianshu_parser.content.JianshuArticle import JianshuArticle\n'), ((549, 568), 'src.lib.jianshu_parser.content.JianshuAuthor.JianshuAuthorInfo', 'JianshuAuthorInfo', ([], {}), '()\n', (566, 568), False, 'from src.lib.jianshu_parser.content.JianshuAuthor import JianshuAuthorInfo\n')] |
"""List of operations"""
from collections import namedtuple
Genotype = namedtuple('Genotype', 'encoder decoder')
OP_NAMES = [
'conv1x1',
'conv3x3',
'sep_conv_3x3',
'sep_conv_5x5',
'global_average_pool',
'conv3x3_dil3',
'conv3x3_dil12',
'sep_conv_3x3_dil3',
'sep_conv_5x5_dil6',
'skip_connect',
'none'
]
| [
"collections.namedtuple"
] | [((73, 114), 'collections.namedtuple', 'namedtuple', (['"""Genotype"""', '"""encoder decoder"""'], {}), "('Genotype', 'encoder decoder')\n", (83, 114), False, 'from collections import namedtuple\n')] |
#!/usr/bin/env python3
# Hacky script to download linux and windows typed_ast wheels from appveyor and gcloud
import os
import os.path
import json
import sys
from urllib.request import urlopen
# Appveyor download for windows wheels
api_url = 'https://ci.appveyor.com/api/'
def get_json(path):
url = api_url + path
f = urlopen(url)
data = f.read()
return json.loads(data)
def download(url):
print('Downloading', url)
name = os.path.join('dist', os.path.split(url)[1])
with urlopen(url) as f:
data = f.read()
with open(name, 'wb') as f:
f.write(data)
def download_appveyor(version):
project_base = 'projects/ddfisher/typed-ast-a4xqu'
history = get_json(project_base + '/history?recordsNumber=20')
for build in history['builds']:
if build.get('tag') == version:
build_version = build['version']
build_version = str(build['buildId'])
break
else:
sys.exit("Couldn't find tag")
print(build_version)
build = get_json(project_base + '/builds/' + build_version)
for job in build['build']['jobs']:
artifact_url = 'buildjobs/{}/artifacts'.format(job['jobId'])
artifacts = get_json(artifact_url)
for artifact in artifacts:
download(api_url + artifact_url + '/' + artifact['fileName'])
# gcloud downloads for linux wehels
MIN_VER = 5
MAX_VER = 9
GCLOUD_URL = "https://storage.googleapis.com/typed-ast/typed_ast-{version}-cp3{pyver}-cp3{pyver}{abi_tag}-{platform}.whl"
def download_entries(base_url, version, platform):
entries = ""
for pyver in range(MIN_VER, MAX_VER + 1):
abi_tag = "" if pyver >= 8 else "m"
url = base_url.format(
version=version,
pyver=pyver,
abi_tag=abi_tag,
platform=platform)
download(url)
def main(argv):
if len(argv) != 2:
sys.exit("Usage: download_typed_ast.py version")
version = argv[1]
os.makedirs('dist', exist_ok=True)
download_entries(GCLOUD_URL, version, 'manylinux1_x86_64')
download_entries(GCLOUD_URL, version, 'manylinux1_i686')
download_entries(GCLOUD_URL, version, 'manylinux2014_aarch64')
download_appveyor(version)
if __name__ == '__main__':
main(sys.argv)
| [
"json.loads",
"os.makedirs",
"os.path.split",
"sys.exit",
"urllib.request.urlopen"
] | [((329, 341), 'urllib.request.urlopen', 'urlopen', (['url'], {}), '(url)\n', (336, 341), False, 'from urllib.request import urlopen\n'), ((373, 389), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (383, 389), False, 'import json\n'), ((1978, 2012), 'os.makedirs', 'os.makedirs', (['"""dist"""'], {'exist_ok': '(True)'}), "('dist', exist_ok=True)\n", (1989, 2012), False, 'import os\n'), ((504, 516), 'urllib.request.urlopen', 'urlopen', (['url'], {}), '(url)\n', (511, 516), False, 'from urllib.request import urlopen\n'), ((964, 993), 'sys.exit', 'sys.exit', (['"""Couldn\'t find tag"""'], {}), '("Couldn\'t find tag")\n', (972, 993), False, 'import sys\n'), ((1902, 1950), 'sys.exit', 'sys.exit', (['"""Usage: download_typed_ast.py version"""'], {}), "('Usage: download_typed_ast.py version')\n", (1910, 1950), False, 'import sys\n'), ((472, 490), 'os.path.split', 'os.path.split', (['url'], {}), '(url)\n', (485, 490), False, 'import os\n')] |
#!/usr/bin/env python
# @author <NAME> <<EMAIL>>, Interactive Robotics Lab, Arizona State University
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import rclpy
from policy_translation.srv import NetworkPT, TuneNetwork
from model_src.model import PolicyTranslationModel
from utils.network import Network
from utils.tf_util import trainOnCPU, limitGPUMemory
from utils.intprim.gaussian_model import GaussianModel
import tensorflow as tf
import numpy as np
import re
from cv_bridge import CvBridge, CvBridgeError
import cv2
import matplotlib.pyplot as plt
from utils.intprim.gaussian_model import GaussianModel
import glob
import json
import pickle
import copy
# Force TensorFlow to use the CPU
FORCE_CPU = True
# Use dropout at run-time for stochastif-forward passes
USE_DROPOUT = True
# Where can we find the trained model?
MODEL_PATH = "../GDrive/model/policy_translation"
# Where is a pre-trained faster-rcnn?
FRCNN_PATH = "../GDrive/rcnn"
# Where are the GloVe word embeddings?
GLOVE_PATH = "../GDrive/glove.6B.50d.txt"
# Where is the normalization of the dataset?
NORM_PATH = "../GDrive/normalization_v2.pkl"
if FORCE_CPU:
trainOnCPU()
else:
limitGPUMemory()
print("Running Policy Translation Model")
model = PolicyTranslationModel(
od_path=FRCNN_PATH,
glove_path=GLOVE_PATH,
special=None
)
bs = 2
model((
np.ones((bs, 15), dtype=np.int64),
np.ones((bs, 6, 5), dtype=np.float32),
np.ones((bs, 500, 7), dtype=np.float32)
))
model.load_weights(MODEL_PATH)
model.summary()
class NetworkService():
def __init__(self):
self.dictionary = self._loadDictionary(GLOVE_PATH)
self.regex = re.compile('[^a-z ]')
self.bridge = CvBridge()
self.history = []
rclpy.init(args=None)
self.node = rclpy.create_node("neural_network")
self.service_nn = self.node.create_service(NetworkPT, "/network", self.cbk_network_dmp_ros2)
self.normalization = pickle.load(open(NORM_PATH, mode="rb"), encoding="latin1")
print("Ready")
def runNode(self):
while rclpy.ok():
rclpy.spin_once(self.node)
self.node.destroy_service(self.service_nn)
self.node.destroy_service(self.service_tn)
rclpy.shutdown()
def _loadDictionary(self, file):
__dictionary = {}
__dictionary[""] = 0 # Empty string
fh = open(file, "r", encoding="utf-8")
for line in fh:
if len(__dictionary) >= 300000:
break
tokens = line.strip().split(" ")
__dictionary[tokens[0]] = len(__dictionary)
fh.close()
return __dictionary
def tokenize(self, language):
voice = self.regex.sub("", language.strip().lower())
tokens = []
for w in voice.split(" "):
idx = 0
try:
idx = self.dictionary[w]
except:
print("Unknown word: " + w)
tokens.append(idx)
return tokens
def normalize(self, value, v_min, v_max):
if (value.shape[1] != v_min.shape[0] or v_min.shape[0] != v_max.shape[0] or
len(value.shape) != 2 or len(v_min.shape) != 1 or len(v_max.shape) != 1):
raise ArrayDimensionMismatch()
value = np.copy(value)
v_min = np.tile(np.expand_dims(v_min, 0), [value.shape[0], 1])
v_max = np.tile(np.expand_dims(v_max, 0), [value.shape[0], 1])
value = (value - v_min) / (v_max - v_min)
return value
def interpolateTrajectory(self, trj, target):
current_length = trj.shape[0]
dimensions = trj.shape[1]
result = np.zeros((target, trj.shape[1]), dtype=np.float32)
for i in range(dimensions):
result[:,i] = np.interp(np.linspace(0.0, 1.0, num=target), np.linspace(0.0, 1.0, num=current_length), trj[:,i])
return result
def cbk_network_dmp_ros2(self, req, res):
res.trajectory, res.confidence, res.timesteps, res.weights, res.phase = self.cbk_network_dmp(req)
return res
def imgmsg_to_cv2(self, img_msg, desired_encoding="passthrough"):
if img_msg.encoding != "8UC3":
self.node.get_logger().info("Unrecognized image type: " + encoding)
exit(0)
dtype = "uint8"
n_channels = 3
dtype = np.dtype(dtype)
dtype = dtype.newbyteorder('>' if img_msg.is_bigendian else '<')
img_buf = np.asarray(img_msg.data, dtype=dtype) if isinstance(img_msg.data, list) else img_msg.data
if n_channels == 1:
im = np.ndarray(shape=(img_msg.height, img_msg.width),
dtype=dtype, buffer=img_buf)
else:
im = np.ndarray(shape=(img_msg.height, img_msg.width, n_channels),
dtype=dtype, buffer=img_buf)
if img_msg.is_bigendian == (sys.byteorder == 'little'):
im = im.byteswap().newbyteorder()
if desired_encoding == 'passthrough':
return im
from cv_bridge.boost.cv_bridge_boost import cvtColor2
try:
res = cvtColor2(im, img_msg.encoding, desired_encoding)
except RuntimeError as e:
raise CvBridgeError(e)
return res
def cbk_network_dmp(self, req):
if req.reset:
self.req_step = 0
self.sfp_history = []
try:
image = self.imgmsg_to_cv2(req.image)
except CvBridgeError as e:
print(e)
language = self.tokenize(req.language)
self.language = language + [0] * (15-len(language))
image_features = model.frcnn(tf.convert_to_tensor([image], dtype=tf.uint8))
scores = image_features["detection_scores"][0, :6].numpy().astype(dtype=np.float32)
scores = [0.0 if v < 0.5 else 1.0 for v in scores.tolist()]
classes = image_features["detection_classes"][0, :6].numpy().astype(dtype=np.int32)
classes = [v * scores[k] for k, v in enumerate(classes.tolist())]
boxes = image_features["detection_boxes"][0, :6, :].numpy().astype(dtype=np.float32)
self.features = np.concatenate((np.expand_dims(classes,1), boxes), axis=1)
self.history = []
self.history.append(list(req.robot))
robot = np.asarray(self.history, dtype=np.float32)
self.input_data = (
tf.convert_to_tensor(np.tile([self.language],[250, 1]), dtype=tf.int64),
tf.convert_to_tensor(np.tile([self.features],[250, 1, 1]), dtype=tf.float32),
tf.convert_to_tensor(np.tile([robot],[250, 1, 1]), dtype=tf.float32)
)
generated, (atn, dmp_dt, phase, weights) = model(self.input_data, training=tf.constant(False), use_dropout=tf.constant(True))
self.trj_gen = tf.math.reduce_mean(generated, axis=0).numpy()
self.trj_std = tf.math.reduce_std(generated, axis=0).numpy()
self.timesteps = int(tf.math.reduce_mean(dmp_dt).numpy() * 500)
self.b_weights = tf.math.reduce_mean(weights, axis=0).numpy()
phase_value = tf.math.reduce_mean(phase, axis=0).numpy()
phase_value = phase_value[-1,0]
self.sfp_history.append(self.b_weights[-1,:,:])
if phase_value > 0.95 and len(self.sfp_history) > 100:
trj_len = len(self.sfp_history)
basismodel = GaussianModel(degree=11, scale=0.012, observed_dof_names=("Base","Shoulder","Ellbow","Wrist1","Wrist2","Wrist3","Gripper"))
domain = np.linspace(0, 1, trj_len, dtype=np.float64)
trajectories = []
for i in range(trj_len):
trajectories.append(np.asarray(basismodel.apply_coefficients(domain, self.sfp_history[i].flatten())))
trajectories = np.asarray(trajectories)
np.save("trajectories", trajectories)
np.save("history", self.history)
gen_trajectory = []
var_trj = np.zeros((trj_len, trj_len, 7), dtype=np.float32)
for w in range(trj_len):
gen_trajectory.append(trajectories[w,w,:])
gen_trajectory = np.asarray(gen_trajectory)
np.save("gen_trajectory", gen_trajectory)
self.sfp_history = []
self.req_step += 1
return (self.trj_gen.flatten().tolist(), self.trj_std.flatten().tolist(), self.timesteps, self.b_weights.flatten().tolist(), float(phase_value))
def idToText(self, id):
names = ["", "Yellow Small Round", "Red Small Round", "Green Small Round", "Blue Small Round", "Pink Small Round",
"Yellow Large Round", "Red Large Round", "Green Large Round", "Blue Large Round", "Pink Large Round",
"Yellow Small Square", "Red Small Square", "Green Small Square", "Blue Small Square", "Pink Small Square",
"Yellow Large Square", "Red Large Square", "Green Large Square", "Blue Large Square", "Pink Large Square",
"Cup Red", "Cup Green", "Cup Blue"]
return names[id]
def plotTrajectory(self, trj, error, image):
fig, ax = plt.subplots(3,3)
fig.set_size_inches(9, 9)
for sp in range(7):
idx = sp // 3
idy = sp % 3
ax[idx,idy].clear()
ax[idx,idy].plot(range(trj.shape[0]), trj[:,sp], alpha=0.5, color='mediumslateblue')
ax[idx,idy].errorbar(range(trj.shape[0]), trj[:,sp], xerr=None, yerr=error[:,sp], alpha=0.1, fmt='none', color='mediumslateblue')
ax[idx,idy].set_ylim([-0.1, 1.1])
ax[2,1].imshow(image)
def plotImageRegions(self, image_np, image_dict, atn):
# Visualization of the results of a detection.
tgt_object = np.argmax(atn)
num_detected = len([v for v in image_dict["detection_scores"][0] if v > 0.5])
num_detected = min(num_detected, len(atn))
for i in range(num_detected):
ymin, xmin, ymax, xmax = image_dict['detection_boxes'][0][i,:]
pt1 = (int(xmin*image_np.shape[1]), int(ymin*image_np.shape[0]))
pt2 = (int(xmax*image_np.shape[1]), int(ymax*image_np.shape[0]))
image_np = cv2.rectangle(image_np, pt1, pt2, (156, 2, 2), 1)
if i == tgt_object:
image_np = cv2.rectangle(image_np, pt1, pt2, (30, 156, 2), 2)
image_np = cv2.putText(image_np, "{:.1f}%".format(atn[i] * 100), (pt1[0]-10, pt1[1]-5), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (30, 156, 2), 2, cv2.LINE_AA)
fig = plt.figure()
plt.imshow(image_np)
if __name__ == "__main__":
ot = NetworkService()
ot.runNode() | [
"cv2.rectangle",
"re.compile",
"rclpy.spin_once",
"rclpy.init",
"rclpy.create_node",
"numpy.save",
"matplotlib.pyplot.imshow",
"rclpy.ok",
"cv_bridge.CvBridgeError",
"model_src.model.PolicyTranslationModel",
"numpy.asarray",
"cv_bridge.boost.cv_bridge_boost.cvtColor2",
"cv_bridge.CvBridge",
... | [((1287, 1366), 'model_src.model.PolicyTranslationModel', 'PolicyTranslationModel', ([], {'od_path': 'FRCNN_PATH', 'glove_path': 'GLOVE_PATH', 'special': 'None'}), '(od_path=FRCNN_PATH, glove_path=GLOVE_PATH, special=None)\n', (1309, 1366), False, 'from model_src.model import PolicyTranslationModel\n'), ((1196, 1208), 'utils.tf_util.trainOnCPU', 'trainOnCPU', ([], {}), '()\n', (1206, 1208), False, 'from utils.tf_util import trainOnCPU, limitGPUMemory\n'), ((1219, 1235), 'utils.tf_util.limitGPUMemory', 'limitGPUMemory', ([], {}), '()\n', (1233, 1235), False, 'from utils.tf_util import trainOnCPU, limitGPUMemory\n'), ((1402, 1435), 'numpy.ones', 'np.ones', (['(bs, 15)'], {'dtype': 'np.int64'}), '((bs, 15), dtype=np.int64)\n', (1409, 1435), True, 'import numpy as np\n'), ((1441, 1478), 'numpy.ones', 'np.ones', (['(bs, 6, 5)'], {'dtype': 'np.float32'}), '((bs, 6, 5), dtype=np.float32)\n', (1448, 1478), True, 'import numpy as np\n'), ((1484, 1523), 'numpy.ones', 'np.ones', (['(bs, 500, 7)'], {'dtype': 'np.float32'}), '((bs, 500, 7), dtype=np.float32)\n', (1491, 1523), True, 'import numpy as np\n'), ((1714, 1735), 're.compile', 're.compile', (['"""[^a-z ]"""'], {}), "('[^a-z ]')\n", (1724, 1735), False, 'import re\n'), ((1765, 1775), 'cv_bridge.CvBridge', 'CvBridge', ([], {}), '()\n', (1773, 1775), False, 'from cv_bridge import CvBridge, CvBridgeError\n'), ((1816, 1837), 'rclpy.init', 'rclpy.init', ([], {'args': 'None'}), '(args=None)\n', (1826, 1837), False, 'import rclpy\n'), ((1858, 1893), 'rclpy.create_node', 'rclpy.create_node', (['"""neural_network"""'], {}), "('neural_network')\n", (1875, 1893), False, 'import rclpy\n'), ((2151, 2161), 'rclpy.ok', 'rclpy.ok', ([], {}), '()\n', (2159, 2161), False, 'import rclpy\n'), ((2312, 2328), 'rclpy.shutdown', 'rclpy.shutdown', ([], {}), '()\n', (2326, 2328), False, 'import rclpy\n'), ((3350, 3364), 'numpy.copy', 'np.copy', (['value'], {}), '(value)\n', (3357, 3364), True, 'import numpy as np\n'), ((3730, 3780), 'numpy.zeros', 'np.zeros', (['(target, trj.shape[1])'], {'dtype': 'np.float32'}), '((target, trj.shape[1]), dtype=np.float32)\n', (3738, 3780), True, 'import numpy as np\n'), ((4450, 4465), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (4458, 4465), True, 'import numpy as np\n'), ((6491, 6533), 'numpy.asarray', 'np.asarray', (['self.history'], {'dtype': 'np.float32'}), '(self.history, dtype=np.float32)\n', (6501, 6533), True, 'import numpy as np\n'), ((9329, 9347), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(3)'], {}), '(3, 3)\n', (9341, 9347), True, 'import matplotlib.pyplot as plt\n'), ((9948, 9962), 'numpy.argmax', 'np.argmax', (['atn'], {}), '(atn)\n', (9957, 9962), True, 'import numpy as np\n'), ((10744, 10756), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10754, 10756), True, 'import matplotlib.pyplot as plt\n'), ((10765, 10785), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image_np'], {}), '(image_np)\n', (10775, 10785), True, 'import matplotlib.pyplot as plt\n'), ((2175, 2201), 'rclpy.spin_once', 'rclpy.spin_once', (['self.node'], {}), '(self.node)\n', (2190, 2201), False, 'import rclpy\n'), ((3389, 3413), 'numpy.expand_dims', 'np.expand_dims', (['v_min', '(0)'], {}), '(v_min, 0)\n', (3403, 3413), True, 'import numpy as np\n'), ((3460, 3484), 'numpy.expand_dims', 'np.expand_dims', (['v_max', '(0)'], {}), '(v_max, 0)\n', (3474, 3484), True, 'import numpy as np\n'), ((4558, 4595), 'numpy.asarray', 'np.asarray', (['img_msg.data'], {'dtype': 'dtype'}), '(img_msg.data, dtype=dtype)\n', (4568, 4595), True, 'import numpy as np\n'), ((4694, 4772), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(img_msg.height, img_msg.width)', 'dtype': 'dtype', 'buffer': 'img_buf'}), '(shape=(img_msg.height, img_msg.width), dtype=dtype, buffer=img_buf)\n', (4704, 4772), True, 'import numpy as np\n'), ((4832, 4926), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(img_msg.height, img_msg.width, n_channels)', 'dtype': 'dtype', 'buffer': 'img_buf'}), '(shape=(img_msg.height, img_msg.width, n_channels), dtype=dtype,\n buffer=img_buf)\n', (4842, 4926), True, 'import numpy as np\n'), ((5225, 5274), 'cv_bridge.boost.cv_bridge_boost.cvtColor2', 'cvtColor2', (['im', 'img_msg.encoding', 'desired_encoding'], {}), '(im, img_msg.encoding, desired_encoding)\n', (5234, 5274), False, 'from cv_bridge.boost.cv_bridge_boost import cvtColor2\n'), ((7559, 7692), 'utils.intprim.gaussian_model.GaussianModel', 'GaussianModel', ([], {'degree': '(11)', 'scale': '(0.012)', 'observed_dof_names': "('Base', 'Shoulder', 'Ellbow', 'Wrist1', 'Wrist2', 'Wrist3', 'Gripper')"}), "(degree=11, scale=0.012, observed_dof_names=('Base',\n 'Shoulder', 'Ellbow', 'Wrist1', 'Wrist2', 'Wrist3', 'Gripper'))\n", (7572, 7692), False, 'from utils.intprim.gaussian_model import GaussianModel\n'), ((7708, 7752), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'trj_len'], {'dtype': 'np.float64'}), '(0, 1, trj_len, dtype=np.float64)\n', (7719, 7752), True, 'import numpy as np\n'), ((7965, 7989), 'numpy.asarray', 'np.asarray', (['trajectories'], {}), '(trajectories)\n', (7975, 7989), True, 'import numpy as np\n'), ((8002, 8039), 'numpy.save', 'np.save', (['"""trajectories"""', 'trajectories'], {}), "('trajectories', trajectories)\n", (8009, 8039), True, 'import numpy as np\n'), ((8052, 8084), 'numpy.save', 'np.save', (['"""history"""', 'self.history'], {}), "('history', self.history)\n", (8059, 8084), True, 'import numpy as np\n'), ((8147, 8196), 'numpy.zeros', 'np.zeros', (['(trj_len, trj_len, 7)'], {'dtype': 'np.float32'}), '((trj_len, trj_len, 7), dtype=np.float32)\n', (8155, 8196), True, 'import numpy as np\n'), ((8322, 8348), 'numpy.asarray', 'np.asarray', (['gen_trajectory'], {}), '(gen_trajectory)\n', (8332, 8348), True, 'import numpy as np\n'), ((8361, 8402), 'numpy.save', 'np.save', (['"""gen_trajectory"""', 'gen_trajectory'], {}), "('gen_trajectory', gen_trajectory)\n", (8368, 8402), True, 'import numpy as np\n'), ((10391, 10440), 'cv2.rectangle', 'cv2.rectangle', (['image_np', 'pt1', 'pt2', '(156, 2, 2)', '(1)'], {}), '(image_np, pt1, pt2, (156, 2, 2), 1)\n', (10404, 10440), False, 'import cv2\n'), ((3868, 3901), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)'], {'num': 'target'}), '(0.0, 1.0, num=target)\n', (3879, 3901), True, 'import numpy as np\n'), ((3903, 3944), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)'], {'num': 'current_length'}), '(0.0, 1.0, num=current_length)\n', (3914, 3944), True, 'import numpy as np\n'), ((5327, 5343), 'cv_bridge.CvBridgeError', 'CvBridgeError', (['e'], {}), '(e)\n', (5340, 5343), False, 'from cv_bridge import CvBridge, CvBridgeError\n'), ((5779, 5824), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[image]'], {'dtype': 'tf.uint8'}), '([image], dtype=tf.uint8)\n', (5799, 5824), True, 'import tensorflow as tf\n'), ((6595, 6629), 'numpy.tile', 'np.tile', (['[self.language]', '[250, 1]'], {}), '([self.language], [250, 1])\n', (6602, 6629), True, 'import numpy as np\n'), ((6681, 6718), 'numpy.tile', 'np.tile', (['[self.features]', '[250, 1, 1]'], {}), '([self.features], [250, 1, 1])\n', (6688, 6718), True, 'import numpy as np\n'), ((6771, 6800), 'numpy.tile', 'np.tile', (['[robot]', '[250, 1, 1]'], {}), '([robot], [250, 1, 1])\n', (6778, 6800), True, 'import numpy as np\n'), ((6913, 6931), 'tensorflow.constant', 'tf.constant', (['(False)'], {}), '(False)\n', (6924, 6931), True, 'import tensorflow as tf\n'), ((6945, 6962), 'tensorflow.constant', 'tf.constant', (['(True)'], {}), '(True)\n', (6956, 6962), True, 'import tensorflow as tf\n'), ((6990, 7028), 'tensorflow.math.reduce_mean', 'tf.math.reduce_mean', (['generated'], {'axis': '(0)'}), '(generated, axis=0)\n', (7009, 7028), True, 'import tensorflow as tf\n'), ((7063, 7100), 'tensorflow.math.reduce_std', 'tf.math.reduce_std', (['generated'], {'axis': '(0)'}), '(generated, axis=0)\n', (7081, 7100), True, 'import tensorflow as tf\n'), ((7208, 7244), 'tensorflow.math.reduce_mean', 'tf.math.reduce_mean', (['weights'], {'axis': '(0)'}), '(weights, axis=0)\n', (7227, 7244), True, 'import tensorflow as tf\n'), ((7280, 7314), 'tensorflow.math.reduce_mean', 'tf.math.reduce_mean', (['phase'], {'axis': '(0)'}), '(phase, axis=0)\n', (7299, 7314), True, 'import tensorflow as tf\n'), ((10500, 10550), 'cv2.rectangle', 'cv2.rectangle', (['image_np', 'pt1', 'pt2', '(30, 156, 2)', '(2)'], {}), '(image_np, pt1, pt2, (30, 156, 2), 2)\n', (10513, 10550), False, 'import cv2\n'), ((6334, 6360), 'numpy.expand_dims', 'np.expand_dims', (['classes', '(1)'], {}), '(classes, 1)\n', (6348, 6360), True, 'import numpy as np\n'), ((7139, 7166), 'tensorflow.math.reduce_mean', 'tf.math.reduce_mean', (['dmp_dt'], {}), '(dmp_dt)\n', (7158, 7166), True, 'import tensorflow as tf\n')] |
from collections import defaultdict
def defaultdicttree():
return defaultdict(defaultdicttree)
def defaultdict_to_dict(d):
if isinstance(d, defaultdict):
new_d = {}
for k, v in d.items():
new_d[k] = defaultdict_to_dict(v)
d = new_d
return d
| [
"collections.defaultdict"
] | [((71, 99), 'collections.defaultdict', 'defaultdict', (['defaultdicttree'], {}), '(defaultdicttree)\n', (82, 99), False, 'from collections import defaultdict\n')] |
import time
import queue
import sys
import numpy as np
from scipy import optimize as sci_opt
from .node import Node
from .utilities import branch, is_integral
class BNBTree:
def __init__(self, x, y, inttol=1e-4, reltol=1e-4):
"""
Initiate a BnB Tree to solve the least squares regression problem with
l0l2 regularization
Parameters
----------
x: np.array
n x p numpy array
y: np.array
1 dimensional numpy array of size n
inttol: float
The integral tolerance of a variable.
reltol: float
primal-dual relative tolerance
"""
self.x = x
self.y = y
self.inttol = inttol
self.reltol = reltol
self.xi_xi = np.sum(x * x, axis=0)
# The number of features
self.p = x.shape[1]
self.n = x.shape[0]
self.node_bfs_queue = queue.Queue()
self.node_dfs_queue = queue.LifoQueue()
self.levels = {}
# self.leaves = []
self.number_of_nodes = 0
self.root = None
def solve(self, l0, l2, m, gaptol=1e-2, warm_start=None, mu=0.95,
branching='maxfrac', l1solver='l1cd', number_of_dfs_levels=0,
verbose=False):
"""
Solve the least squares problem with l0l2 regularization
Parameters
----------
l0: float
The zeroth norm coefficient
l2: float
The second norm coefficient
m: float
features bound (big M)
gaptol: float
the relative gap between the upper and lower bound after which the
algorithm will be terminated
warm_start: np.array
(p x 1) array representing a warm start
branching: str
'maxfrac' or 'strong'
l1solver: str
'l1cd', 'gurobi' or 'mosek'
mu: float
Used with strong branching
number_of_dfs_levels: int
number of levels to solve as dfs
verbose: int
print progress
Returns
-------
tuple
uppersol, upperbound, lower_bound, best_gap, sol_time
"""
st = time.time()
if warm_start is None:
upperbound = sys.maxsize
uppersol = None
else:
if verbose:
print("using a warm start")
support = np.nonzero(warm_start)[0]
x_support = self.x[:, support]
x_ridge = np.sqrt(2 * l2) * np.identity(len(support))
x_upper = np.concatenate((x_support, x_ridge), axis=0)
y_upper = np.concatenate((self.y, np.zeros(len(support))), axis=0)
res = sci_opt.lsq_linear(x_upper, y_upper, (-m, m))
upperbound = res.cost + l0 * len(support)
uppersol = warm_start
uppersol[support] = res.x
if verbose:
print(f"initializing using a warm start took {time.time() - st}")
# upper and lower bounds
zlb = np.zeros(self.p)
zub = np.ones(self.p)
# root node
self.root = Node(None, zlb, zub, x=self.x, y=self.y, l0=l0, l2=l2, m=m,
xi_xi=self.xi_xi)
self.node_bfs_queue.put(self.root)
# lower and upper bounds initialization
lower_bound = {}
dual_bound = {}
self.levels = {0: 1}
min_open_level = 0
if verbose:
print(f'solving using {number_of_dfs_levels} dfs levels')
while self.node_bfs_queue.qsize() > 0 or self.node_dfs_queue.qsize() > 0:
# get node
if self.node_dfs_queue.qsize() > 0:
current_node = self.node_dfs_queue.get()
else:
current_node = self.node_bfs_queue.get()
# print(current_node.level, upperbound, self.levels)
# prune?
if current_node.parent_cost and upperbound <= \
current_node.parent_cost:
self.levels[current_node.level] -= 1
# self.leaves.append(current_node)
continue
# calculate lower bound and update
self.number_of_nodes += 1
current_lower_bound, current_dual_cost = current_node.\
lower_solve(l1solver, self.reltol, self.inttol)
lower_bound[current_node.level] = \
min(current_lower_bound,
lower_bound.get(current_node.level, sys.maxsize))
dual_bound[current_node.level] = \
min(current_dual_cost,
dual_bound.get(current_node.level, sys.maxsize))
self.levels[current_node.level] -= 1
# update gap?
if self.levels[min_open_level] == 0:
del self.levels[min_open_level]
min_value = max([j for i, j in dual_bound.items()
if i <= min_open_level])
best_gap = (upperbound - min_value)/abs(upperbound)
if verbose:
print(f'l: {min_open_level}, (d: {min_value}, '
f'p: {lower_bound[min_open_level]}), u: {upperbound},'
f' g: {best_gap}, t: {time.time() - st} s')
# arrived at a solution?
if best_gap <= gaptol:
# self.leaves += [current_node] + \
# list(self.node_bfs_queue.queue) + \
# list(self.node_dfs_queue.queue)
return uppersol, upperbound, lower_bound, best_gap, \
time.time() - st
min_open_level += 1
# integral solution?
if is_integral(current_node.lower_bound_z, self.inttol):
current_upper_bound = current_lower_bound
if current_upper_bound < upperbound:
upperbound = current_upper_bound
uppersol = current_node.lower_bound_solution
# self.leaves.append(current_node)
if verbose:
print('itegral:', current_node)
# branch?
elif current_dual_cost < upperbound:
current_upper_bound = current_node.upper_solve()
if current_upper_bound < upperbound:
upperbound = current_upper_bound
uppersol = current_node.upper_bound_solution
left_node, right_node = branch(current_node, self.x, l0, l2, m,
self.xi_xi, self.inttol,
branching, mu)
self.levels[current_node.level + 1] = \
self.levels.get(current_node.level + 1, 0) + 2
if current_node.level < min_open_level + number_of_dfs_levels:
self.node_dfs_queue.put(right_node)
self.node_dfs_queue.put(left_node)
else:
self.node_bfs_queue.put(right_node)
self.node_bfs_queue.put(left_node)
# prune?
else:
pass
# self.leaves.append(current_node)
min_value = max([j for i, j in dual_bound.items()
if i <= min_open_level])
best_gap = (upperbound - min_value)/abs(upperbound)
return uppersol, upperbound, lower_bound, best_gap, time.time() - st
# def get_lower_optimal_node(self):
# self.leaves = sorted(self.leaves)
# if self.leaves[-1].lower_bound_value:
# return self.leaves[-1]
# else:
# return self.leaves[-1].parent
#
# @staticmethod
# def support_list(current_node):
# list_ = []
# while current_node:
# list_.append(current_node.support)
# current_node = current_node.parent
# return list_
#
# def optimal_support_list(self):
# list_ = []
# current_node = self.get_lower_optimal_node()
# while current_node:
# list_.append(current_node.support)
# current_node = current_node.parent
# return list_
| [
"numpy.sqrt",
"numpy.ones",
"numpy.sum",
"queue.LifoQueue",
"numpy.zeros",
"scipy.optimize.lsq_linear",
"numpy.concatenate",
"numpy.nonzero",
"queue.Queue",
"time.time"
] | [((776, 797), 'numpy.sum', 'np.sum', (['(x * x)'], {'axis': '(0)'}), '(x * x, axis=0)\n', (782, 797), True, 'import numpy as np\n'), ((919, 932), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (930, 932), False, 'import queue\n'), ((963, 980), 'queue.LifoQueue', 'queue.LifoQueue', ([], {}), '()\n', (978, 980), False, 'import queue\n'), ((2217, 2228), 'time.time', 'time.time', ([], {}), '()\n', (2226, 2228), False, 'import time\n'), ((3045, 3061), 'numpy.zeros', 'np.zeros', (['self.p'], {}), '(self.p)\n', (3053, 3061), True, 'import numpy as np\n'), ((3076, 3091), 'numpy.ones', 'np.ones', (['self.p'], {}), '(self.p)\n', (3083, 3091), True, 'import numpy as np\n'), ((2586, 2630), 'numpy.concatenate', 'np.concatenate', (['(x_support, x_ridge)'], {'axis': '(0)'}), '((x_support, x_ridge), axis=0)\n', (2600, 2630), True, 'import numpy as np\n'), ((2728, 2773), 'scipy.optimize.lsq_linear', 'sci_opt.lsq_linear', (['x_upper', 'y_upper', '(-m, m)'], {}), '(x_upper, y_upper, (-m, m))\n', (2746, 2773), True, 'from scipy import optimize as sci_opt\n'), ((2429, 2451), 'numpy.nonzero', 'np.nonzero', (['warm_start'], {}), '(warm_start)\n', (2439, 2451), True, 'import numpy as np\n'), ((2520, 2535), 'numpy.sqrt', 'np.sqrt', (['(2 * l2)'], {}), '(2 * l2)\n', (2527, 2535), True, 'import numpy as np\n'), ((7490, 7501), 'time.time', 'time.time', ([], {}), '()\n', (7499, 7501), False, 'import time\n'), ((2978, 2989), 'time.time', 'time.time', ([], {}), '()\n', (2987, 2989), False, 'import time\n'), ((5654, 5665), 'time.time', 'time.time', ([], {}), '()\n', (5663, 5665), False, 'import time\n'), ((5253, 5264), 'time.time', 'time.time', ([], {}), '()\n', (5262, 5264), False, 'import time\n')] |
# Generated by Django 3.0.2 on 2020-01-19 09:55
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('miniGithub', '0002_project_owner'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('custom_event_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='miniGithub.Custom_Event')),
('description', models.CharField(max_length=500)),
],
bases=('miniGithub.custom_event',),
),
migrations.AlterField(
model_name='custom_event',
name='creator',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.CreateModel(
name='Problem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('base_problem', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='problem', to='miniGithub.Problem')),
('project', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='miniGithub.Project')),
],
),
migrations.AddField(
model_name='custom_event',
name='problem',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='miniGithub.Problem'),
),
]
| [
"django.db.models.OneToOneField",
"django.db.models.ForeignKey",
"django.db.models.AutoField",
"django.db.migrations.swappable_dependency",
"django.db.models.CharField"
] | [((227, 284), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (258, 284), False, 'from django.db import migrations, models\n'), ((903, 1009), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(null=True, on_delete=django.db.models.deletion.CASCADE,\n to=settings.AUTH_USER_MODEL)\n', (920, 1009), False, 'from django.db import migrations, models\n'), ((1702, 1804), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""miniGithub.Problem"""'}), "(null=True, on_delete=django.db.models.deletion.CASCADE,\n to='miniGithub.Problem')\n", (1719, 1804), False, 'from django.db import migrations, models\n'), ((476, 653), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'auto_created': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'parent_link': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'to': '"""miniGithub.Custom_Event"""'}), "(auto_created=True, on_delete=django.db.models.deletion\n .CASCADE, parent_link=True, primary_key=True, serialize=False, to=\n 'miniGithub.Custom_Event')\n", (496, 653), False, 'from django.db import migrations, models\n'), ((678, 710), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(500)'}), '(max_length=500)\n', (694, 710), False, 'from django.db import migrations, models\n'), ((1122, 1215), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1138, 1215), False, 'from django.db import migrations, models\n'), ((1240, 1272), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1256, 1272), False, 'from django.db import migrations, models\n'), ((1308, 1434), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""problem"""', 'to': '"""miniGithub.Problem"""'}), "(null=True, on_delete=django.db.models.deletion.CASCADE,\n related_name='problem', to='miniGithub.Problem')\n", (1325, 1434), False, 'from django.db import migrations, models\n'), ((1461, 1563), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""miniGithub.Project"""'}), "(null=True, on_delete=django.db.models.deletion.CASCADE,\n to='miniGithub.Project')\n", (1478, 1563), False, 'from django.db import migrations, models\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_elasticstack
------------
Tests for `elasticstack` forms module.
"""
from django import forms
from django.test import TestCase
from elasticstack.forms import SearchForm
class TestForms(TestCase):
def test_named_search_field(self):
"""Ensure that the `q` field can be optionally used"""
class MyForm(SearchForm):
s = forms.CharField(label='Search')
f = forms.CharField(label='More search')
search_field_name = 's'
form = MyForm()
self.assertTrue('s' in form.fields)
self.assertFalse('q' in form.fields)
| [
"django.forms.CharField"
] | [((411, 442), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""Search"""'}), "(label='Search')\n", (426, 442), False, 'from django import forms\n'), ((459, 495), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""More search"""'}), "(label='More search')\n", (474, 495), False, 'from django import forms\n')] |
from flask import render_template, Flask, flash, redirect, url_for, abort, request
from flask_login import login_user, logout_user, login_required
from werkzeug.urls import url_parse
from app import app, db
from app.forms import *
from app.models import *
@app.route('/')
@app.route('/landing')
def landing():
return render_template('Landing.html', title='Landing')
@app.route('/artistlist')
def artistlist():
artists=Artist.query.all()
return render_template('Artists.html', artists=artists, title='Artists')
@app.route('/login', methods=['GET', 'POST'])
def login():
form = loginForm()
if form.validate_on_submit():
user=User.query.filter_by(username=form.username.data).first()
if user is None or not user.check_password(form.password.data):
flash('Incorrect name or password')
return redirect(url_for('login'))
login_user(user)
return redirect(url_for('landing'))
return render_template('Login.html', form=form, title='Login')
@app.route('/search', methods=['GET','POST'])
def search():
searched = Product.query.all()
form = searchForm()
if form.validate_on_submit():
searched = Product.query.filter_by(name=form.searchable.data).all()
return render_template('search.html', searchable=searched, form=form, title='Search')
@app.route('/user/<name>')
def user(name):
if len(User.query.filter_by(username=name).all()) > 0:
chosenUser = User.query.filter_by(username=name).first()
chosenProducts = Product.query.filter_by(Id=chosenUser.id).all()
return render_template('user.html', title='User', userName=chosenUser.username, chosenUser=chosenUser,
productList=chosenProducts)
else:
abort(404)
@app.route('/product/<productName>')
def product(productName):
if len(Product.query.filter_by(name=productName).all()) > 0:
chosenProduct=Product.query.filter_by(name=productName).first()
chosenUser=User.query.filter_by(id=chosenProduct.userId).first()
userName=chosenUser.username
return render_template('product.html', title='Product', name=productName, userPosting=userName,
description=chosenProduct.description, date=chosenProduct.dateHarvested,
productPrice=chosenProduct.price, amount=chosenProduct.amount)
else:
abort(404)
@app.route('/newProduct', methods=['GET','POST'])
def newProduct():
form = productForm()
if form.validate_on_submit():
flash('New product created: {}'.format(form.name.data))
newP = Product(name=form.name.data, description=form.description.data, price=form.price.data, amount=form.amount.data, dateHarvested=form.date.data, userId=4)
db.session.add(newP)
db.session.commit()
return redirect(url_for('landing'))
return render_template('newProduct.html', title='New Product', form=form)
@app.route('/newartist', methods=['GET', 'POST'])
@login_required
def newartist():
form = artistForm()
if form.validate_on_submit():
if len(Artist.query.filter_by(firstname=form.artistName.data).all()) > 0:
flash('That name already exists')
else:
flash('New page created: {}'.format(form.artistName.data))
newA = Artist(firstname=form.artistName.data, lastname='', hometown=form.hometown.data, description=form.description.data)
db.session.add(newA)
db.session.commit()
return redirect(url_for('artistlist'))
return render_template('NewArtist.html', form=form, title='New Artist')
@app.route('/newvenue', methods=['GET','POST'])
def newvenue():
form = venueForm()
if form.validate_on_submit():
if len(Venue.query.filter_by(name=form.name.data).all()) > 0:
flash('That venue already exists')
else:
flash('New venue created: {}'.format(form.name.data))
newV = Venue(name=form.name.data, description=form.description.data)
db.session.add(newV)
db.session.commit()
return redirect(url_for('artistlist'))
return render_template('NewVenue.html', title='New Venue', form=form)
@app.route('/newevent', methods=['GET', 'POST'])
def newevent():
form = eventForm()
form.venue.choices = [(venue.id, venue.name) for venue in Venue.query.all()]
form.artists.choices = [(artist.id, artist.firstname) for artist in Artist.query.all()]
if form.validate_on_submit():
if len(Event.query.filter_by(name=form.name.data).all()) > 0:
flash('That event already exists')
else:
flash('New event created: {}'.format(form.name.data))
newE = Event(name=form.name.data, description=form.description.data, time=form.time.data, venueId=form.venue.data)
db.session.add(newE)
db.session.commit()
for a in form.artists.data:
newX = ArtistToEvent(artistId=Artist.query.filter_by(id=a).first().id, eventId=newE.id)
db.session.add(newX)
db.session.commit()
return redirect(url_for('artistlist'))
return render_template('NewEvent.html', title='New Event', form=form)
@app.route('/artist/<name>')
#instructor = Instructor.query.filter_by(firstname="Alex").first()
def artist(name):
if len(Artist.query.filter_by(firstname=name).all()) > 0:
chosenArtist=Artist.query.filter_by(firstname=name).first()
chosenJoins=ArtistToEvent.query.filter_by(artistId=chosenArtist.id).all()
chosenEvents = []
trackingInt=0
for oneEvent in chosenJoins:
chosenEvents.append(Event.query.filter_by(id=chosenJoins[trackingInt].eventId).first())
trackingInt=trackingInt+1
#chosenEvents=Event.query.filter_by(id=chosenJoin.eventId).all()
return render_template('Artist.html', title='Artist', artistName=chosenArtist.firstname, hometown=chosenArtist.hometown, description=chosenArtist.description, event_list=chosenEvents)
else:
abort(404)
@app.route('/register', methods=['GET','POST'])
def register():
form = registerForm()
if form.validate_on_submit():
if len(User.query.filter_by(username=form.username.data).all()) > 0:
flash('That name already exists')
else:
flash('New user created. You can now log in.')
newU= User(username=form.username.data, password=form.password.data)
newU.set_password(form.password.data)
db.session.add(newU)
db.session.commit()
return redirect(url_for('landing'))
return render_template('Register.html', form=form, title='Register')
@app.route('/logout')
def logout():
logout_user()
flash("User has been logged out.")
return redirect(url_for('landing'))
@app.route('/populate_db')
def populate_db():
a1=Artist(firstname='Anne', lastname='Apricot', hometown='Ithaca', description='A')
a2=Artist(firstname='Ben', lastname='Barrel', hometown='Ithaca', description='B')
a3=Artist(firstname='Cathy', lastname='Chowder', hometown='Ithaca', description='C')
a4=Artist(firstname='Dan', lastname='Derringer', hometown='Delanson', description='D')
e1=Event(name='Augustfest', description='A', venueId='0')
e2 = Event(name='Burgerfest', description='B', venueId='1')
e3 = Event(name='Ciderfest', description='C', venueId='2')
e4 = Event(name='Donutfest', description='D', venueId='1')
e5 = Event(name='Earwigfest', description='E', venueId='1')
e6 = Event(name='Falafelfest', description='F', venueId='2')
ate1 = ArtistToEvent(artistId=1, eventId=1)
ate2 = ArtistToEvent(artistId=2, eventId=2)
ate3 = ArtistToEvent(artistId=3, eventId=3)
ate4 = ArtistToEvent(artistId=4, eventId=4)
ate5 = ArtistToEvent(artistId=1, eventId=5)
ate6 = ArtistToEvent(artistId=2, eventId=5)
ate7 = ArtistToEvent(artistId=3, eventId=6)
ate8 = ArtistToEvent(artistId=1, eventId=6)
v1 = Venue(name='Adelide Acres', description='A')
v2 = Venue(name='Baltimore Barrelers', description='B')
v3 = Venue(name='Canary Church', description='C')
u1 = User(username='Peter',password='<PASSWORD>')
u1.set_password('<PASSWORD>')
u2 = User(username='Old Man McFarmer', password='<PASSWORD>')
u2.set_password('<PASSWORD>')
u3 = User(username='Young Man McFarmer', password='<PASSWORD>')
u3.set_password('<PASSWORD>')
p1 = Product(name='Eggs', amount = 12, dateHarvested = '12-12-2020', description = 'delicious eggs', price = '$0.99'
, userId=1)
p2 = Product(name='Tomatoes', amount=20, dateHarvested='12-14-2020', description='delicious tomatoes', price='$1.99',
userId=2)
p3 = Product(name='Beets', amount=30, dateHarvested='12-10-2020', description='delicious beets', price='$2.99'
, userId=3)
p4 = Product(name='Bacon', amount=10, dateHarvested='11-20-2020', description='delicious bacon', price='$3.99',
userId=2)
p5 = Product(name='Turnips', amount=40, dateHarvested='12-10-2020', description='delicious turnips', price='$4.99',
userId=3)
db.session.add_all([u1, u2, u3, p1, p2, p3, p4, p5])
db.session.commit()
return "database has been populated."
@app.route('/reset_db')
def reset_db():
flash("Resetting database: deleting old data and repopulating with dummy data")
meta = db.metadata
for table in reversed(meta.sorted_tables):
print('Clear table {}'.format(table))
db.session.execute(table.delete())
db.session.commit()
populate_db()
return "Reset and repopulated data."
| [
"flask.render_template",
"app.db.session.commit",
"flask.flash",
"flask_login.login_user",
"flask_login.logout_user",
"flask.url_for",
"app.db.session.add_all",
"app.app.route",
"app.db.session.add",
"flask.abort"
] | [((260, 274), 'app.app.route', 'app.route', (['"""/"""'], {}), "('/')\n", (269, 274), False, 'from app import app, db\n'), ((276, 297), 'app.app.route', 'app.route', (['"""/landing"""'], {}), "('/landing')\n", (285, 297), False, 'from app import app, db\n'), ((376, 400), 'app.app.route', 'app.route', (['"""/artistlist"""'], {}), "('/artistlist')\n", (385, 400), False, 'from app import app, db\n'), ((530, 574), 'app.app.route', 'app.route', (['"""/login"""'], {'methods': "['GET', 'POST']"}), "('/login', methods=['GET', 'POST'])\n", (539, 574), False, 'from app import app, db\n'), ((1021, 1066), 'app.app.route', 'app.route', (['"""/search"""'], {'methods': "['GET', 'POST']"}), "('/search', methods=['GET', 'POST'])\n", (1030, 1066), False, 'from app import app, db\n'), ((1342, 1367), 'app.app.route', 'app.route', (['"""/user/<name>"""'], {}), "('/user/<name>')\n", (1351, 1367), False, 'from app import app, db\n'), ((1776, 1811), 'app.app.route', 'app.route', (['"""/product/<productName>"""'], {}), "('/product/<productName>')\n", (1785, 1811), False, 'from app import app, db\n'), ((2410, 2459), 'app.app.route', 'app.route', (['"""/newProduct"""'], {'methods': "['GET', 'POST']"}), "('/newProduct', methods=['GET', 'POST'])\n", (2419, 2459), False, 'from app import app, db\n'), ((2946, 2994), 'app.app.route', 'app.route', (['"""/newartist"""'], {'methods': "['GET', 'POST']"}), "('/newartist', methods=['GET', 'POST'])\n", (2955, 2994), False, 'from app import app, db\n'), ((3628, 3675), 'app.app.route', 'app.route', (['"""/newvenue"""'], {'methods': "['GET', 'POST']"}), "('/newvenue', methods=['GET', 'POST'])\n", (3637, 3675), False, 'from app import app, db\n'), ((4218, 4265), 'app.app.route', 'app.route', (['"""/newevent"""'], {'methods': "['GET', 'POST']"}), "('/newevent', methods=['GET', 'POST'])\n", (4227, 4265), False, 'from app import app, db\n'), ((5246, 5273), 'app.app.route', 'app.route', (['"""/artist/<name>"""'], {}), "('/artist/<name>')\n", (5255, 5273), False, 'from app import app, db\n'), ((6090, 6137), 'app.app.route', 'app.route', (['"""/register"""'], {'methods': "['GET', 'POST']"}), "('/register', methods=['GET', 'POST'])\n", (6099, 6137), False, 'from app import app, db\n'), ((6728, 6748), 'app.app.route', 'app.route', (['"""/logout"""'], {}), "('/logout')\n", (6737, 6748), False, 'from app import app, db\n'), ((6862, 6887), 'app.app.route', 'app.route', (['"""/populate_db"""'], {}), "('/populate_db')\n", (6871, 6887), False, 'from app import app, db\n'), ((9352, 9374), 'app.app.route', 'app.route', (['"""/reset_db"""'], {}), "('/reset_db')\n", (9361, 9374), False, 'from app import app, db\n'), ((324, 372), 'flask.render_template', 'render_template', (['"""Landing.html"""'], {'title': '"""Landing"""'}), "('Landing.html', title='Landing')\n", (339, 372), False, 'from flask import render_template, Flask, flash, redirect, url_for, abort, request\n'), ((461, 526), 'flask.render_template', 'render_template', (['"""Artists.html"""'], {'artists': 'artists', 'title': '"""Artists"""'}), "('Artists.html', artists=artists, title='Artists')\n", (476, 526), False, 'from flask import render_template, Flask, flash, redirect, url_for, abort, request\n'), ((962, 1017), 'flask.render_template', 'render_template', (['"""Login.html"""'], {'form': 'form', 'title': '"""Login"""'}), "('Login.html', form=form, title='Login')\n", (977, 1017), False, 'from flask import render_template, Flask, flash, redirect, url_for, abort, request\n'), ((1260, 1338), 'flask.render_template', 'render_template', (['"""search.html"""'], {'searchable': 'searched', 'form': 'form', 'title': '"""Search"""'}), "('search.html', searchable=searched, form=form, title='Search')\n", (1275, 1338), False, 'from flask import render_template, Flask, flash, redirect, url_for, abort, request\n'), ((2876, 2942), 'flask.render_template', 'render_template', (['"""newProduct.html"""'], {'title': '"""New Product"""', 'form': 'form'}), "('newProduct.html', title='New Product', form=form)\n", (2891, 2942), False, 'from flask import render_template, Flask, flash, redirect, url_for, abort, request\n'), ((3561, 3625), 'flask.render_template', 'render_template', (['"""NewArtist.html"""'], {'form': 'form', 'title': '"""New Artist"""'}), "('NewArtist.html', form=form, title='New Artist')\n", (3576, 3625), False, 'from flask import render_template, Flask, flash, redirect, url_for, abort, request\n'), ((4153, 4215), 'flask.render_template', 'render_template', (['"""NewVenue.html"""'], {'title': '"""New Venue"""', 'form': 'form'}), "('NewVenue.html', title='New Venue', form=form)\n", (4168, 4215), False, 'from flask import render_template, Flask, flash, redirect, url_for, abort, request\n'), ((5180, 5242), 'flask.render_template', 'render_template', (['"""NewEvent.html"""'], {'title': '"""New Event"""', 'form': 'form'}), "('NewEvent.html', title='New Event', form=form)\n", (5195, 5242), False, 'from flask import render_template, Flask, flash, redirect, url_for, abort, request\n'), ((6664, 6725), 'flask.render_template', 'render_template', (['"""Register.html"""'], {'form': 'form', 'title': '"""Register"""'}), "('Register.html', form=form, title='Register')\n", (6679, 6725), False, 'from flask import render_template, Flask, flash, redirect, url_for, abort, request\n'), ((6767, 6780), 'flask_login.logout_user', 'logout_user', ([], {}), '()\n', (6778, 6780), False, 'from flask_login import login_user, logout_user, login_required\n'), ((6785, 6819), 'flask.flash', 'flash', (['"""User has been logged out."""'], {}), "('User has been logged out.')\n", (6790, 6819), False, 'from flask import render_template, Flask, flash, redirect, url_for, abort, request\n'), ((9230, 9282), 'app.db.session.add_all', 'db.session.add_all', (['[u1, u2, u3, p1, p2, p3, p4, p5]'], {}), '([u1, u2, u3, p1, p2, p3, p4, p5])\n', (9248, 9282), False, 'from app import app, db\n'), ((9287, 9306), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (9304, 9306), False, 'from app import app, db\n'), ((9394, 9473), 'flask.flash', 'flash', (['"""Resetting database: deleting old data and repopulating with dummy data"""'], {}), "('Resetting database: deleting old data and repopulating with dummy data')\n", (9399, 9473), False, 'from flask import render_template, Flask, flash, redirect, url_for, abort, request\n'), ((9632, 9651), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (9649, 9651), False, 'from app import app, db\n'), ((890, 906), 'flask_login.login_user', 'login_user', (['user'], {}), '(user)\n', (900, 906), False, 'from flask_login import login_user, logout_user, login_required\n'), ((1592, 1719), 'flask.render_template', 'render_template', (['"""user.html"""'], {'title': '"""User"""', 'userName': 'chosenUser.username', 'chosenUser': 'chosenUser', 'productList': 'chosenProducts'}), "('user.html', title='User', userName=chosenUser.username,\n chosenUser=chosenUser, productList=chosenProducts)\n", (1607, 1719), False, 'from flask import render_template, Flask, flash, redirect, url_for, abort, request\n'), ((1762, 1772), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (1767, 1772), False, 'from flask import render_template, Flask, flash, redirect, url_for, abort, request\n'), ((2095, 2333), 'flask.render_template', 'render_template', (['"""product.html"""'], {'title': '"""Product"""', 'name': 'productName', 'userPosting': 'userName', 'description': 'chosenProduct.description', 'date': 'chosenProduct.dateHarvested', 'productPrice': 'chosenProduct.price', 'amount': 'chosenProduct.amount'}), "('product.html', title='Product', name=productName,\n userPosting=userName, description=chosenProduct.description, date=\n chosenProduct.dateHarvested, productPrice=chosenProduct.price, amount=\n chosenProduct.amount)\n", (2110, 2333), False, 'from flask import render_template, Flask, flash, redirect, url_for, abort, request\n'), ((2396, 2406), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (2401, 2406), False, 'from flask import render_template, Flask, flash, redirect, url_for, abort, request\n'), ((2773, 2793), 'app.db.session.add', 'db.session.add', (['newP'], {}), '(newP)\n', (2787, 2793), False, 'from app import app, db\n'), ((2802, 2821), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (2819, 2821), False, 'from app import app, db\n'), ((5882, 6068), 'flask.render_template', 'render_template', (['"""Artist.html"""'], {'title': '"""Artist"""', 'artistName': 'chosenArtist.firstname', 'hometown': 'chosenArtist.hometown', 'description': 'chosenArtist.description', 'event_list': 'chosenEvents'}), "('Artist.html', title='Artist', artistName=chosenArtist.\n firstname, hometown=chosenArtist.hometown, description=chosenArtist.\n description, event_list=chosenEvents)\n", (5897, 6068), False, 'from flask import render_template, Flask, flash, redirect, url_for, abort, request\n'), ((6077, 6087), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (6082, 6087), False, 'from flask import render_template, Flask, flash, redirect, url_for, abort, request\n'), ((6840, 6858), 'flask.url_for', 'url_for', (['"""landing"""'], {}), "('landing')\n", (6847, 6858), False, 'from flask import render_template, Flask, flash, redirect, url_for, abort, request\n'), ((800, 835), 'flask.flash', 'flash', (['"""Incorrect name or password"""'], {}), "('Incorrect name or password')\n", (805, 835), False, 'from flask import render_template, Flask, flash, redirect, url_for, abort, request\n'), ((931, 949), 'flask.url_for', 'url_for', (['"""landing"""'], {}), "('landing')\n", (938, 949), False, 'from flask import render_template, Flask, flash, redirect, url_for, abort, request\n'), ((2846, 2864), 'flask.url_for', 'url_for', (['"""landing"""'], {}), "('landing')\n", (2853, 2864), False, 'from flask import render_template, Flask, flash, redirect, url_for, abort, request\n'), ((3180, 3213), 'flask.flash', 'flash', (['"""That name already exists"""'], {}), "('That name already exists')\n", (3185, 3213), False, 'from flask import render_template, Flask, flash, redirect, url_for, abort, request\n'), ((3446, 3466), 'app.db.session.add', 'db.session.add', (['newA'], {}), '(newA)\n', (3460, 3466), False, 'from app import app, db\n'), ((3479, 3498), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (3496, 3498), False, 'from app import app, db\n'), ((3830, 3864), 'flask.flash', 'flash', (['"""That venue already exists"""'], {}), "('That venue already exists')\n", (3835, 3864), False, 'from flask import render_template, Flask, flash, redirect, url_for, abort, request\n'), ((4038, 4058), 'app.db.session.add', 'db.session.add', (['newV'], {}), '(newV)\n', (4052, 4058), False, 'from app import app, db\n'), ((4071, 4090), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (4088, 4090), False, 'from app import app, db\n'), ((4594, 4628), 'flask.flash', 'flash', (['"""That event already exists"""'], {}), "('That event already exists')\n", (4599, 4628), False, 'from flask import render_template, Flask, flash, redirect, url_for, abort, request\n'), ((4848, 4868), 'app.db.session.add', 'db.session.add', (['newE'], {}), '(newE)\n', (4862, 4868), False, 'from app import app, db\n'), ((4881, 4900), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (4898, 4900), False, 'from app import app, db\n'), ((6302, 6335), 'flask.flash', 'flash', (['"""That name already exists"""'], {}), "('That name already exists')\n", (6307, 6335), False, 'from flask import render_template, Flask, flash, redirect, url_for, abort, request\n'), ((6362, 6408), 'flask.flash', 'flash', (['"""New user created. You can now log in."""'], {}), "('New user created. You can now log in.')\n", (6367, 6408), False, 'from flask import render_template, Flask, flash, redirect, url_for, abort, request\n'), ((6552, 6572), 'app.db.session.add', 'db.session.add', (['newU'], {}), '(newU)\n', (6566, 6572), False, 'from app import app, db\n'), ((6585, 6604), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (6602, 6604), False, 'from app import app, db\n'), ((864, 880), 'flask.url_for', 'url_for', (['"""login"""'], {}), "('login')\n", (871, 880), False, 'from flask import render_template, Flask, flash, redirect, url_for, abort, request\n'), ((3527, 3548), 'flask.url_for', 'url_for', (['"""artistlist"""'], {}), "('artistlist')\n", (3534, 3548), False, 'from flask import render_template, Flask, flash, redirect, url_for, abort, request\n'), ((4119, 4140), 'flask.url_for', 'url_for', (['"""artistlist"""'], {}), "('artistlist')\n", (4126, 4140), False, 'from flask import render_template, Flask, flash, redirect, url_for, abort, request\n'), ((5061, 5081), 'app.db.session.add', 'db.session.add', (['newX'], {}), '(newX)\n', (5075, 5081), False, 'from app import app, db\n'), ((5098, 5117), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (5115, 5117), False, 'from app import app, db\n'), ((5146, 5167), 'flask.url_for', 'url_for', (['"""artistlist"""'], {}), "('artistlist')\n", (5153, 5167), False, 'from flask import render_template, Flask, flash, redirect, url_for, abort, request\n'), ((6633, 6651), 'flask.url_for', 'url_for', (['"""landing"""'], {}), "('landing')\n", (6640, 6651), False, 'from flask import render_template, Flask, flash, redirect, url_for, abort, request\n')] |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from devices.v1beta1 import DevicesService_Beta1_pb2 as devices_dot_v1beta1_dot_DevicesService__Beta1__pb2
class DevicesStub(object):
"""Specifies the devices service, which enables managed devices to check-in, authorize themselves, and discover their
identity/role.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Ping = channel.unary_unary(
'/bloombox.schema.services.devices.v1beta1.Devices/Ping',
request_serializer=devices_dot_v1beta1_dot_DevicesService__Beta1__pb2.Ping.Request.SerializeToString,
response_deserializer=devices_dot_v1beta1_dot_DevicesService__Beta1__pb2.Ping.Response.FromString,
)
self.Activate = channel.unary_unary(
'/bloombox.schema.services.devices.v1beta1.Devices/Activate',
request_serializer=devices_dot_v1beta1_dot_DevicesService__Beta1__pb2.Activation.Request.SerializeToString,
response_deserializer=devices_dot_v1beta1_dot_DevicesService__Beta1__pb2.Activation.Response.FromString,
)
class DevicesServicer(object):
"""Specifies the devices service, which enables managed devices to check-in, authorize themselves, and discover their
identity/role.
"""
def Ping(self, request, context):
"""Ping the device server.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Activate(self, request, context):
"""Setup and enable a device for live use. If this is the first time the subject device has activated itself,
initialize or otherwise provision any requisite objects or resources.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_DevicesServicer_to_server(servicer, server):
rpc_method_handlers = {
'Ping': grpc.unary_unary_rpc_method_handler(
servicer.Ping,
request_deserializer=devices_dot_v1beta1_dot_DevicesService__Beta1__pb2.Ping.Request.FromString,
response_serializer=devices_dot_v1beta1_dot_DevicesService__Beta1__pb2.Ping.Response.SerializeToString,
),
'Activate': grpc.unary_unary_rpc_method_handler(
servicer.Activate,
request_deserializer=devices_dot_v1beta1_dot_DevicesService__Beta1__pb2.Activation.Request.FromString,
response_serializer=devices_dot_v1beta1_dot_DevicesService__Beta1__pb2.Activation.Response.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'bloombox.schema.services.devices.v1beta1.Devices', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| [
"grpc.method_handlers_generic_handler",
"grpc.unary_unary_rpc_method_handler"
] | [((2690, 2804), 'grpc.method_handlers_generic_handler', 'grpc.method_handlers_generic_handler', (['"""bloombox.schema.services.devices.v1beta1.Devices"""', 'rpc_method_handlers'], {}), "(\n 'bloombox.schema.services.devices.v1beta1.Devices', rpc_method_handlers)\n", (2726, 2804), False, 'import grpc\n'), ((2048, 2319), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.Ping'], {'request_deserializer': 'devices_dot_v1beta1_dot_DevicesService__Beta1__pb2.Ping.Request.FromString', 'response_serializer': 'devices_dot_v1beta1_dot_DevicesService__Beta1__pb2.Ping.Response.SerializeToString'}), '(servicer.Ping, request_deserializer=\n devices_dot_v1beta1_dot_DevicesService__Beta1__pb2.Ping.Request.\n FromString, response_serializer=\n devices_dot_v1beta1_dot_DevicesService__Beta1__pb2.Ping.Response.\n SerializeToString)\n', (2083, 2319), False, 'import grpc\n'), ((2358, 2645), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.Activate'], {'request_deserializer': 'devices_dot_v1beta1_dot_DevicesService__Beta1__pb2.Activation.Request.FromString', 'response_serializer': 'devices_dot_v1beta1_dot_DevicesService__Beta1__pb2.Activation.Response.SerializeToString'}), '(servicer.Activate, request_deserializer\n =devices_dot_v1beta1_dot_DevicesService__Beta1__pb2.Activation.Request.\n FromString, response_serializer=\n devices_dot_v1beta1_dot_DevicesService__Beta1__pb2.Activation.Response.\n SerializeToString)\n', (2393, 2645), False, 'import grpc\n')] |
#!/bin/python3
import datetime
import itertools
import sys
from heading import *
days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
def priority_key():
weights = {}
sep = KEYWORDS.index('|')
for keyword in KEYWORDS[sep+1:]:
weights[keyword] = len(KEYWORDS)
idx = 1
while idx <= sep:
weights[KEYWORDS[sep - idx]] = idx
idx += 1
return lambda heading: weights[heading.keyword] if heading.keyword in weights else len(weights.keys()) - 1
def date_key(heading):
if heading.date is None:
return datetime.date(datetime.MAXYEAR, 1, 1)
return heading.date
def has_date(heading):
return heading.date is not None
def is_pending(heading):
if heading.keyword not in KEYWORDS:
return False
return KEYWORDS.index(heading.keyword) < KEYWORDS.index('|')
if __name__ == '__main__':
import argparse
inputs = from_fields_file(sys.stdin)
todos = filter(has_date, inputs)
todos = filter(is_pending, todos)
todos = sorted(todos, key=date_key)
todos = itertools.groupby(todos, key=date_key)
today = datetime.date.today()
warned = False
for date, todo_group in todos:
if date < today and not warned:
warned = True
print('\n! Overdue !')
elif date == today:
print ('\n= Today =')
elif date > today:
print('\n= %s %s =' % (days[date.weekday()], date))
prioritized = sorted(todo_group, key=priority_key())
for todo in prioritized:
print(todo)
| [
"datetime.date.today",
"itertools.groupby",
"datetime.date"
] | [((1050, 1088), 'itertools.groupby', 'itertools.groupby', (['todos'], {'key': 'date_key'}), '(todos, key=date_key)\n', (1067, 1088), False, 'import itertools\n'), ((1101, 1122), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (1120, 1122), False, 'import datetime\n'), ((560, 597), 'datetime.date', 'datetime.date', (['datetime.MAXYEAR', '(1)', '(1)'], {}), '(datetime.MAXYEAR, 1, 1)\n', (573, 597), False, 'import datetime\n')] |
# encoding: latin2
"""Algorithm utilities
G{packagetree core}
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from builtins import range
from builtins import object
from past.utils import old_div
__author__ = "<NAME>"
__credits__ = "Copyright (c) 2009-11 <NAME>"
__license__ = "New BSD License"
__version__ = "1.0.0"
__maintainer__ = "RiSE Group"
__email__ = "<EMAIL>"
from .areacl import AreaCl
from .dist2Regions import distanceStatDispatcher
class AreaManager(object):
"""
This class contains operations at areal level, including the generation of
instances of areas, a wide range of area2area and area2region distance
functions.
"""
def __init__(self, w, y, distanceType="EuclideanSquared", variance="false"):
"""
@type w: dictionary
@param w: With B{key} = area Id, and B{value} = list with Ids of neighbours of
each area.
@type y: dictionary
@param y: With B{key} = area Id, and B{value} = list with attribute
values.
@type distanceType: string
@keyword distanceType: Function to calculate the distance between areas. Default value I{distanceType = 'EuclideanSquared'}.
@type variance: boolean
@keyword variance: Boolean indicating if the data have variance matrix. Default value I{variance = 'false'}.
"""
self.y = y
self.areas = {}
self.noNeighs = set([])
self.variance = variance
self.distanceType = distanceType
self.createAreas(w, y)
self.distanceStatDispatcher = distanceStatDispatcher
def createAreas(self, w, y):
"""
Creates instances of areas based on a sparse weights matrix (w) and a
data array (y).
"""
n = len(self.y)
self.distances = {}
noNeighs = []
for key in range(n):
data = y[key]
try:
neighbours = w[key]
except:
neighbours = {}
w[key] = {}
if len(w[key]) == 0:
self.noNeighs = self.noNeighs | set([key])
a = AreaCl(key, neighbours, data, self.variance)
self.areas[key] = a
if len(self.noNeighs) > 0:
print("Disconnected areas neighs: ", list(self.noNeighs))
def returnDistance2Area(self, area, otherArea):
"""
Returns the distance between two areas
"""
i = 0
j = 0
dist = 0.0
i = area.id
j = otherArea.id
if i < j:
dist = self.distances[(i, j)]
elif i == j:
dist = 0.0
else:
dist = self.distances[(j, i)]
return dist
def getDataAverage(self, areaList, dataIndex):
"""
Returns the attribute centroid of a set of areas
"""
dataAvg = len(dataIndex) * [0.0]
for aID in areaList:
i = 0
for index in dataIndex:
dataAvg[i] += old_div(self.areas[aID].data[index],len(areaList))
i += 1
return dataAvg
def getDistance2Region(self, area, areaList, distanceStat="Centroid", weights=[], indexData=[]):
"""
Returns the distance from an area to a region (defined as a list of
area IDs)
"""
if isinstance(distanceStat, str):
if len(indexData) == 0:
indexData = list(range(len(area.data)))
return self.distanceStatDispatcher[distanceStat](self, area, areaList, indexData)
else:
distance = 0.0
i = 0
for dS in distanceStat:
if len(indexData) == 0:
indexDataDS = list(range(len(area.data)))
else:
indexDataDS = indexData[i]
if len(weights) > 0:
distance += weights[i]
self.distanceStatDispatcher[dS](self, area, areaList, indexDataDS)
else:
distance += self.distanceStatDispatcher[dS](self, area, areaList, indexDataDS)
i += 1
return distance
def getDistance2AreaMin(self, area, areaList):
"""
Return the ID of the area whitin a region that is closest to an area
outside the region
"""
areaMin = -1;
distanceMin = 1e300
for aID in areaList:
if self.distances[area.id, aID] < distanceMin:
areaMin = aID
distanceMin = self.distances[area.id, aID]
return areaMin
def checkFeasibility(self, solution):
"""
Checks feasibility of a candidate solution
"""
n = len(solution)
regions = {}
for i in range(n):
try:
regions[solution[i]] = regions[solution[i]] + [i]
except:
regions[solution[i]] = [i]
feasible = 1
r = len(regions)
for i in range(r):
if len(regions[i]) > 0:
newRegion = set([regions[i][0]])
areas2Eval = set([regions[i][0]])
while(len(areas2Eval) > 0):
area = areas2Eval.pop()
areaNeighs = (set(self.areas[area].neighs) & set(regions[i]))
areas2Eval = areas2Eval | (areaNeighs - newRegion)
newRegion = newRegion | areaNeighs
if set(regions[i]) -newRegion != set([]):
feasible = 0
break
return feasible
| [
"builtins.range"
] | [((1897, 1905), 'builtins.range', 'range', (['n'], {}), '(n)\n', (1902, 1905), False, 'from builtins import range\n'), ((4802, 4810), 'builtins.range', 'range', (['n'], {}), '(n)\n', (4807, 4810), False, 'from builtins import range\n'), ((5021, 5029), 'builtins.range', 'range', (['r'], {}), '(r)\n', (5026, 5029), False, 'from builtins import range\n')] |
import argparse
import cv2
import time
import numpy as np
from tf_pose.estimator import TfPoseEstimator
from tf_pose.networks import get_graph_path, model_wh
"""
封装并调用tf-openpose项目所提供的骨架信息识别接口
"""
class TFPOSE:
def __init__(self):
# 0. 参数
self.fps_time = 0
self.frame_count = 0
# 1. 解析参数
self.parseArgs()
# 2. 输出参数
self.printArgs()
# 3. 生成tfpose实例
self.w, self.h = model_wh(self.args.resize)
self.e = TfPoseEstimator(get_graph_path(self.args.model), target_size=(self.w, self.h))
def parseArgs(self):
"""解析参数"""
parser = argparse.ArgumentParser(description='tf-pose-estimation realtime webcam')
parser.add_argument('--video', type=str, default=0,
help='if provided, set the video path')
parser.add_argument('--isoutput', type=bool, default=False,
help='whether write to file')
parser.add_argument('--output', type=str, default='test.avi',
help='if provided, set the output video path')
parser.add_argument('--isorigin', type=bool, default=False,
help='whether output origin img')
parser.add_argument('--resize', type=str, default='432x368',
help='if provided, resize images before they are processed. default=256x256, Recommends : 432x368 or 656x368 or 1312x736 ')
parser.add_argument('--resize-out-ratio', type=float, default=4.0,
help='if provided, resize heatmaps before they are post-processed. default=1.0')
parser.add_argument('--model', type=str, default='mobilenet_v2_large',
help='cmu / mobilenet_thin / mobilenet_v2_large / mobilenet_v2_small')
parser.add_argument('--show-process', type=bool, default=False,
help='for debug purpose, if enabled, speed for inference is dropped.')
# 命令行解析模块
self.args = parser.parse_args()
def printArgs(self):
"""输出参数"""
print('获取的参数如下:')
print('video-视频: %s' % (self.args.video))
print('resize-重写图片大小: %s' % (self.args.resize))
print('resize-out-ratio-重写关键点热图大小: %s' % (self.args.resize_out_ratio))
print('show-process-是否展示过程: %s' % (self.args.show_process))
print('model-模型: %s, 模型路径: %s' % (self.args.model, get_graph_path(self.args.model)))
def setArgsVideo(self, video):
"""设置video参数"""
self.args.__setattr__('video', video)
def setArgsIsOrigin(self, isorigin):
"""设置isorigin参数"""
self.args.__setattr__('isorigin', isorigin)
def setArgsIsOutput(self, isoutput):
"""设置isorigin参数"""
self.args.__setattr__('isoutput', isoutput)
def initVideo(self):
"""
初始化视频信息
"""
print('读取视频')
self.cam = cv2.VideoCapture(self.args.video)
self.ret_val, self.image = self.cam.read() # 获取视频第一帧图片,ret_val为bool值
self.frame_count = 0 # 重置帧数为0,因为会换视频
# 是否写入文件
if self.args.isoutput :
fps = self.cam.get(cv2.CAP_PROP_FPS) # 视频帧率
fourcc = cv2.VideoWriter_fourcc(*'XVID') # 保存视频为MPEG-4编码
frame_size = (int(self.cam.get(cv2.CAP_PROP_FRAME_WIDTH)), int(self.cam.get(cv2.CAP_PROP_FRAME_HEIGHT)))
self.videoWriter = cv2.VideoWriter(self.args.output, fourcc, fps, frame_size)
print('源视频信息: 帧图片大小 %s, 帧率 %s, 视频大小 %s' % (self.image.shape, fps, frame_size))
def getHumans(self):
humans = self.e.inference(self.image, resize_to_default=(self.w > 0 and self.h > 0), upsample_size=self.args.resize_out_ratio)
return humans
def getNextFrame(self):
"""获取下一帧的图片"""
self.ret_val, self.image = self.cam.read()
self.frame_count += 1
return self.ret_val
def hasNextFrame(self):
"""是否还有下一帧"""
return self.ret_val
def getFrameCount(self):
"""获取帧数"""
return self.frame_count
def runOnce(self):
"""
运行一次,即识别一帧,并返回此帧的cv2图片
"""
fps_time = time.time()
# 帧图片处理
print('帧图片处理...')
humans = self.getHumans()
# 关键点绘图
print('画图...')
if self.args.isorigin :
# 显示原图
pose_img = TfPoseEstimator.draw_humans(np.array(self.image), humans, imgcopy=False)
else:
# 不显示原图
emptyImage = np.zeros(self.image.shape, np.uint8)
emptyImage[...] = 0
pose_img = TfPoseEstimator.draw_humans(emptyImage, humans, imgcopy=False)
# cv2.putText(pose_img, "FPS: %f" % (1.0 / (time.time() - fps_time)), (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
# 判断写入文件
if self.args.isoutput :
self.videoWriter.write(pose_img)
return pose_img, humans
if __name__ == '__main__':
TFPOSE() | [
"argparse.ArgumentParser",
"tf_pose.networks.get_graph_path",
"tf_pose.estimator.TfPoseEstimator.draw_humans",
"cv2.VideoWriter",
"numpy.array",
"numpy.zeros",
"cv2.VideoCapture",
"cv2.VideoWriter_fourcc",
"tf_pose.networks.model_wh",
"time.time"
] | [((443, 469), 'tf_pose.networks.model_wh', 'model_wh', (['self.args.resize'], {}), '(self.args.resize)\n', (451, 469), False, 'from tf_pose.networks import get_graph_path, model_wh\n'), ((628, 701), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""tf-pose-estimation realtime webcam"""'}), "(description='tf-pose-estimation realtime webcam')\n", (651, 701), False, 'import argparse\n'), ((2915, 2948), 'cv2.VideoCapture', 'cv2.VideoCapture', (['self.args.video'], {}), '(self.args.video)\n', (2931, 2948), False, 'import cv2\n'), ((4149, 4160), 'time.time', 'time.time', ([], {}), '()\n', (4158, 4160), False, 'import time\n'), ((503, 534), 'tf_pose.networks.get_graph_path', 'get_graph_path', (['self.args.model'], {}), '(self.args.model)\n', (517, 534), False, 'from tf_pose.networks import get_graph_path, model_wh\n'), ((3200, 3231), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), "(*'XVID')\n", (3222, 3231), False, 'import cv2\n'), ((3397, 3455), 'cv2.VideoWriter', 'cv2.VideoWriter', (['self.args.output', 'fourcc', 'fps', 'frame_size'], {}), '(self.args.output, fourcc, fps, frame_size)\n', (3412, 3455), False, 'import cv2\n'), ((4483, 4519), 'numpy.zeros', 'np.zeros', (['self.image.shape', 'np.uint8'], {}), '(self.image.shape, np.uint8)\n', (4491, 4519), True, 'import numpy as np\n'), ((4575, 4637), 'tf_pose.estimator.TfPoseEstimator.draw_humans', 'TfPoseEstimator.draw_humans', (['emptyImage', 'humans'], {'imgcopy': '(False)'}), '(emptyImage, humans, imgcopy=False)\n', (4602, 4637), False, 'from tf_pose.estimator import TfPoseEstimator\n'), ((4379, 4399), 'numpy.array', 'np.array', (['self.image'], {}), '(self.image)\n', (4387, 4399), True, 'import numpy as np\n'), ((2438, 2469), 'tf_pose.networks.get_graph_path', 'get_graph_path', (['self.args.model'], {}), '(self.args.model)\n', (2452, 2469), False, 'from tf_pose.networks import get_graph_path, model_wh\n')] |
from kivy.app import App
from kivy.uix.label import Label
from kivy.core.window import Window
class DrivingApp(App):
def build(self):
Window.fullscreen = False
# Need to set the size, otherwise very pixalated
# wonders about pixel mapping?
Window.size(1920, 1080)
b = Label(text='Launch Child App')
return b
if __name__ == "__main__":
DrivingApp.run()
| [
"kivy.uix.label.Label",
"kivy.core.window.Window.size"
] | [((279, 302), 'kivy.core.window.Window.size', 'Window.size', (['(1920)', '(1080)'], {}), '(1920, 1080)\n', (290, 302), False, 'from kivy.core.window import Window\n'), ((315, 345), 'kivy.uix.label.Label', 'Label', ([], {'text': '"""Launch Child App"""'}), "(text='Launch Child App')\n", (320, 345), False, 'from kivy.uix.label import Label\n')] |
import openpyxl
from robot.api.deco import keyword, library
from robot.api import logger
@library
class OpenRobotPyxl:
def __init__(self):
self.active_sheet = None
self.active_book = None
self.path = None
self.bookname = None
@keyword("Create New Workbook")
def create_new_workbook(self, _path, book_name, sheet_name, postion=0):
wb = openpyxl.Workbook()
self.path = _path
self.bookname = book_name + ".xlsx"
ws = wb.create_sheet(sheet_name, postion)
self.active_book, self.active_sheet = wb, ws
return self.active_book
@keyword('Close Workbook')
def close_workbook(self):
self.active_book.save(self.path + "/" + self.bookname)
@keyword('Get Active Sheet')
def get_active_sheet(self):
if self.active_book:
if self.active_sheet:
return self.active_sheet
else:
# Return the first sheet in the work book.
return self.active_book.worksheets[0]
else:
return None
@keyword('Active Sheet Name')
def get_active_sheet_name(self):
return self.get_active_sheet().title
@keyword('Load Workbook')
def load_workbook(self, path, bookname):
self.active_book = openpyxl.load_workbook(path + "/" + bookname)
self.path = path
self.bookname = bookname
self.active_sheet = None
self.active_sheet = self.get_active_sheet()
@keyword('Add Sheet')
def add_new_sheet(self, sheetname, index=0):
self.active_book.create_sheet(title=sheetname, index=index)
@keyword('Set Cell Value')
def add_value_to_cell(self, row, col, value):
self.active_sheet.cell(row, col, value)
@keyword('Get Cell Value')
def get_cell_value(self, row, col):
return self.active_sheet.cell(row, col).value
@keyword('Insert Row')
def insert_empty_row(self, row_number):
return self.active_sheet.insert_rows(row_number)
@keyword('Insert Column')
def insert_empty_col(self, col_number):
return self.active_sheet.insert_cols(col_number)
@keyword('Delete Row')
def delete_row(self, row_number):
return self.active_sheet.delete_rows(row_number)
@keyword('Delete Column')
def delete_col(self, col_number):
return self.active_sheet.delete_cols(col_number)
@keyword('Convert List to Row')
def insert_value_to_row(self, row, col, listofdata):
if type(listofdata) == list:
datalength = len(listofdata)
for index, row_ in enumerate(range(row, row+datalength)):
cell = self.active_sheet.cell(row_, col)
cell.value = listofdata[index]
else:
return Exception("The data should be of list.")
@keyword('Convert List to Column')
def insert_value_to_row(self, row, col, listofdata):
if type(listofdata) == list:
datalength = len(listofdata)
for index, col_ in enumerate(range(col, col + datalength)):
cell = self.active_sheet.cell(row, col_)
cell.value = listofdata[index]
else:
return Exception("The data should be of list.")
return True
| [
"robot.api.deco.keyword",
"openpyxl.load_workbook",
"openpyxl.Workbook"
] | [((271, 301), 'robot.api.deco.keyword', 'keyword', (['"""Create New Workbook"""'], {}), "('Create New Workbook')\n", (278, 301), False, 'from robot.api.deco import keyword, library\n'), ((622, 647), 'robot.api.deco.keyword', 'keyword', (['"""Close Workbook"""'], {}), "('Close Workbook')\n", (629, 647), False, 'from robot.api.deco import keyword, library\n'), ((747, 774), 'robot.api.deco.keyword', 'keyword', (['"""Get Active Sheet"""'], {}), "('Get Active Sheet')\n", (754, 774), False, 'from robot.api.deco import keyword, library\n'), ((1086, 1114), 'robot.api.deco.keyword', 'keyword', (['"""Active Sheet Name"""'], {}), "('Active Sheet Name')\n", (1093, 1114), False, 'from robot.api.deco import keyword, library\n'), ((1203, 1227), 'robot.api.deco.keyword', 'keyword', (['"""Load Workbook"""'], {}), "('Load Workbook')\n", (1210, 1227), False, 'from robot.api.deco import keyword, library\n'), ((1495, 1515), 'robot.api.deco.keyword', 'keyword', (['"""Add Sheet"""'], {}), "('Add Sheet')\n", (1502, 1515), False, 'from robot.api.deco import keyword, library\n'), ((1639, 1664), 'robot.api.deco.keyword', 'keyword', (['"""Set Cell Value"""'], {}), "('Set Cell Value')\n", (1646, 1664), False, 'from robot.api.deco import keyword, library\n'), ((1769, 1794), 'robot.api.deco.keyword', 'keyword', (['"""Get Cell Value"""'], {}), "('Get Cell Value')\n", (1776, 1794), False, 'from robot.api.deco import keyword, library\n'), ((1895, 1916), 'robot.api.deco.keyword', 'keyword', (['"""Insert Row"""'], {}), "('Insert Row')\n", (1902, 1916), False, 'from robot.api.deco import keyword, library\n'), ((2024, 2048), 'robot.api.deco.keyword', 'keyword', (['"""Insert Column"""'], {}), "('Insert Column')\n", (2031, 2048), False, 'from robot.api.deco import keyword, library\n'), ((2156, 2177), 'robot.api.deco.keyword', 'keyword', (['"""Delete Row"""'], {}), "('Delete Row')\n", (2163, 2177), False, 'from robot.api.deco import keyword, library\n'), ((2279, 2303), 'robot.api.deco.keyword', 'keyword', (['"""Delete Column"""'], {}), "('Delete Column')\n", (2286, 2303), False, 'from robot.api.deco import keyword, library\n'), ((2405, 2435), 'robot.api.deco.keyword', 'keyword', (['"""Convert List to Row"""'], {}), "('Convert List to Row')\n", (2412, 2435), False, 'from robot.api.deco import keyword, library\n'), ((2825, 2858), 'robot.api.deco.keyword', 'keyword', (['"""Convert List to Column"""'], {}), "('Convert List to Column')\n", (2832, 2858), False, 'from robot.api.deco import keyword, library\n'), ((391, 410), 'openpyxl.Workbook', 'openpyxl.Workbook', ([], {}), '()\n', (408, 410), False, 'import openpyxl\n'), ((1300, 1345), 'openpyxl.load_workbook', 'openpyxl.load_workbook', (["(path + '/' + bookname)"], {}), "(path + '/' + bookname)\n", (1322, 1345), False, 'import openpyxl\n')] |
from django.urls import path
from . import views
app_name = 'club'
urlpatterns = [
path('create/', views.create_club, name='create_club'),
path('update/<int:club_pk>', views.update_club, name='update_club'),
path('read_admin_club/<str:club>/<int:ctg_pk>/', views.read_admin_club, name='read_admin_club_ctg'),
path('<int:pk>/', views.ClubView.as_view(), name='club_view'),
path('read_admin_club/<str:club>/', views.read_admin_club, name='read_admin_club'),
path('read_non_admin_club/<str:club>/<int:ctg_pk>/', views.read_non_admin_club, name='read_non_admin_club_ctg'),
path('read_non_admin_club/<str:club>/', views.read_non_admin_club, name='read_non_admin_club'),
path('apply/<str:club>/', views.apply_club, name='apply_club'),
path('admit/<int:club>/<int:pk>/', views.admit, name='admit'),
path('update_is_admin/<int:club_pk>/<int:user_pk>/', views.update_is_admin, name='update_is_admin'),
path('manage/<int:club_pk>/', views.manage_member, name='manage_member'),
path('member_list/<int:club_pk>/non_admin', views.member_list_for_non_admin,
name='member_list_for_non_admin'),
path('create/club/rule/<str:club>/', views.create_club_rule, name='create_club_rule'),
path('read/admin_club/apply_list/<str:club>/', views.read_apply_list, name='read_apply_list'),
path('read/admin_club/rule/<str:club>/', views.read_admin_club_rule, name='read_admin_club_rule'),
path('read/non_admin_club/rule/<str:club>/', views.read_non_admin_club_rule, name='read_non_admin_club_rule'),
path('update/club/rule/<str:club>/<int:rule_pk>/', views.update_club_rule, name='update_club_rule'),
path('delete/club/rule/<str:club>/<int:rule_pk>/', views.delete_club_rule, name='delete_club_rule'),
path('exit_club/<int:club_pk>/<int:user_pk>/', views.exit_club, name='exit_club'),
]
| [
"django.urls.path"
] | [((89, 143), 'django.urls.path', 'path', (['"""create/"""', 'views.create_club'], {'name': '"""create_club"""'}), "('create/', views.create_club, name='create_club')\n", (93, 143), False, 'from django.urls import path\n'), ((149, 216), 'django.urls.path', 'path', (['"""update/<int:club_pk>"""', 'views.update_club'], {'name': '"""update_club"""'}), "('update/<int:club_pk>', views.update_club, name='update_club')\n", (153, 216), False, 'from django.urls import path\n'), ((223, 326), 'django.urls.path', 'path', (['"""read_admin_club/<str:club>/<int:ctg_pk>/"""', 'views.read_admin_club'], {'name': '"""read_admin_club_ctg"""'}), "('read_admin_club/<str:club>/<int:ctg_pk>/', views.read_admin_club,\n name='read_admin_club_ctg')\n", (227, 326), False, 'from django.urls import path\n'), ((396, 483), 'django.urls.path', 'path', (['"""read_admin_club/<str:club>/"""', 'views.read_admin_club'], {'name': '"""read_admin_club"""'}), "('read_admin_club/<str:club>/', views.read_admin_club, name=\n 'read_admin_club')\n", (400, 483), False, 'from django.urls import path\n'), ((484, 600), 'django.urls.path', 'path', (['"""read_non_admin_club/<str:club>/<int:ctg_pk>/"""', 'views.read_non_admin_club'], {'name': '"""read_non_admin_club_ctg"""'}), "('read_non_admin_club/<str:club>/<int:ctg_pk>/', views.\n read_non_admin_club, name='read_non_admin_club_ctg')\n", (488, 600), False, 'from django.urls import path\n'), ((601, 700), 'django.urls.path', 'path', (['"""read_non_admin_club/<str:club>/"""', 'views.read_non_admin_club'], {'name': '"""read_non_admin_club"""'}), "('read_non_admin_club/<str:club>/', views.read_non_admin_club, name=\n 'read_non_admin_club')\n", (605, 700), False, 'from django.urls import path\n'), ((701, 763), 'django.urls.path', 'path', (['"""apply/<str:club>/"""', 'views.apply_club'], {'name': '"""apply_club"""'}), "('apply/<str:club>/', views.apply_club, name='apply_club')\n", (705, 763), False, 'from django.urls import path\n'), ((769, 830), 'django.urls.path', 'path', (['"""admit/<int:club>/<int:pk>/"""', 'views.admit'], {'name': '"""admit"""'}), "('admit/<int:club>/<int:pk>/', views.admit, name='admit')\n", (773, 830), False, 'from django.urls import path\n'), ((836, 939), 'django.urls.path', 'path', (['"""update_is_admin/<int:club_pk>/<int:user_pk>/"""', 'views.update_is_admin'], {'name': '"""update_is_admin"""'}), "('update_is_admin/<int:club_pk>/<int:user_pk>/', views.update_is_admin,\n name='update_is_admin')\n", (840, 939), False, 'from django.urls import path\n'), ((942, 1014), 'django.urls.path', 'path', (['"""manage/<int:club_pk>/"""', 'views.manage_member'], {'name': '"""manage_member"""'}), "('manage/<int:club_pk>/', views.manage_member, name='manage_member')\n", (946, 1014), False, 'from django.urls import path\n'), ((1020, 1134), 'django.urls.path', 'path', (['"""member_list/<int:club_pk>/non_admin"""', 'views.member_list_for_non_admin'], {'name': '"""member_list_for_non_admin"""'}), "('member_list/<int:club_pk>/non_admin', views.member_list_for_non_admin,\n name='member_list_for_non_admin')\n", (1024, 1134), False, 'from django.urls import path\n'), ((1146, 1236), 'django.urls.path', 'path', (['"""create/club/rule/<str:club>/"""', 'views.create_club_rule'], {'name': '"""create_club_rule"""'}), "('create/club/rule/<str:club>/', views.create_club_rule, name=\n 'create_club_rule')\n", (1150, 1236), False, 'from django.urls import path\n'), ((1238, 1336), 'django.urls.path', 'path', (['"""read/admin_club/apply_list/<str:club>/"""', 'views.read_apply_list'], {'name': '"""read_apply_list"""'}), "('read/admin_club/apply_list/<str:club>/', views.read_apply_list, name=\n 'read_apply_list')\n", (1242, 1336), False, 'from django.urls import path\n'), ((1338, 1440), 'django.urls.path', 'path', (['"""read/admin_club/rule/<str:club>/"""', 'views.read_admin_club_rule'], {'name': '"""read_admin_club_rule"""'}), "('read/admin_club/rule/<str:club>/', views.read_admin_club_rule, name=\n 'read_admin_club_rule')\n", (1342, 1440), False, 'from django.urls import path\n'), ((1441, 1554), 'django.urls.path', 'path', (['"""read/non_admin_club/rule/<str:club>/"""', 'views.read_non_admin_club_rule'], {'name': '"""read_non_admin_club_rule"""'}), "('read/non_admin_club/rule/<str:club>/', views.read_non_admin_club_rule,\n name='read_non_admin_club_rule')\n", (1445, 1554), False, 'from django.urls import path\n'), ((1556, 1659), 'django.urls.path', 'path', (['"""update/club/rule/<str:club>/<int:rule_pk>/"""', 'views.update_club_rule'], {'name': '"""update_club_rule"""'}), "('update/club/rule/<str:club>/<int:rule_pk>/', views.update_club_rule,\n name='update_club_rule')\n", (1560, 1659), False, 'from django.urls import path\n'), ((1661, 1764), 'django.urls.path', 'path', (['"""delete/club/rule/<str:club>/<int:rule_pk>/"""', 'views.delete_club_rule'], {'name': '"""delete_club_rule"""'}), "('delete/club/rule/<str:club>/<int:rule_pk>/', views.delete_club_rule,\n name='delete_club_rule')\n", (1665, 1764), False, 'from django.urls import path\n'), ((1766, 1852), 'django.urls.path', 'path', (['"""exit_club/<int:club_pk>/<int:user_pk>/"""', 'views.exit_club'], {'name': '"""exit_club"""'}), "('exit_club/<int:club_pk>/<int:user_pk>/', views.exit_club, name=\n 'exit_club')\n", (1770, 1852), False, 'from django.urls import path\n')] |
from random import randint
from typing import List
class Solution:
def findKthLargest(self, nums: List[int], k: int) -> int:
jthSmallest = len(nums) - k
return self.quickSelect(nums, 0, len(nums) - 1, jthSmallest)
def quickSelect(self, nums: List[int], start: int, end: int, jthSmallest: int) -> int:
pivot = self.partition(nums, start, end)
if (pivot == jthSmallest):
return nums[pivot]
elif (jthSmallest < pivot):
return self.quickSelect(nums, start, pivot - 1, jthSmallest)
else:
return self.quickSelect(nums, pivot + 1, end, jthSmallest)
def partition(self, nums: List[int], start: int, end: int) -> int:
randomIndex = randint(start, end)
self.swap(nums, randomIndex, start)
pivot = nums[start]
smaller = start
for bigger in range(start + 1, end + 1):
if nums[bigger] < pivot:
smaller += 1
self.swap(nums, smaller, bigger)
self.swap(nums, start, smaller)
return smaller
def swap(self, nums: List[int], i: int, j: int) -> None:
temp = nums[i]
nums[i] = nums[j]
nums[j] = temp
print(Solution().findKthLargest([4, 1, 2, 11], 2))
| [
"random.randint"
] | [((734, 753), 'random.randint', 'randint', (['start', 'end'], {}), '(start, end)\n', (741, 753), False, 'from random import randint\n')] |
import json
'''
READ THE DATABASE README before operating
'''
File = r'''YOUR FILE'''
with open(File,'a') as fileObj:
data = json.load()
'''
YOUR DATA LOGIC GOES IN HERE
Once the data is changed, to write it to your JSON file use the following command.
'''
json.dump(object,File) | [
"json.load",
"json.dump"
] | [((129, 140), 'json.load', 'json.load', ([], {}), '()\n', (138, 140), False, 'import json\n'), ((281, 304), 'json.dump', 'json.dump', (['object', 'File'], {}), '(object, File)\n', (290, 304), False, 'import json\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Description: Choose a set of data points as weights and calculate RBF nodes for the
first layer. Those are then used as inputs for a one-layer perceptron, which gives the
output
"""
import numpy as np
import pcn
class rbf:
""" radial basic function """
def __init__(self,inputs,targets,nRBF,sigma=0,normalise=0,eta=0.25,functype='sigmoid',traintype='batch'):
""" constructor """
self.inputs = inputs
self.targets = targets
self.nRBF = nRBF #number of RBF nodes
self.normalise = normalise
self.eta = eta #learning rate
self.functype = functype
self.traintype = traintype
#set width of gaussian
if sigma==0:
d = (self.inputs.max(axis=0)-self.inputs.min(axis=0)).max()
self.sigma = d/np.sqrt(2*nRBF)
else:
self.sigma = sigma
#input array of RBF nodes
self.hidden = np.zeros((np.shape(self.inputs)[0],self.nRBF))
#set RBF weights to be random datapoints
self.weights = np.zeros((np.shape(inputs)[1],self.nRBF))
indices = np.arange(np.shape(self.inputs)[0])
np.random.shuffle(indices)
for i in range(self.nRBF):
self.weights[:,i] = self.inputs[indices[i],:]
#calculate the hidden rbf nodes (first layer)
self.hidden = self.rbffwd(self.inputs,1)
#use initialise perceptron for second layer
self.perceptron = pcn.pcn(self.hidden,self.targets,self.eta,self.functype,self.traintype)
def errfunc(self,outputs,targets):
""" error function """
E = 1/2*np.trace(np.dot(np.transpose(targets-outputs),targets-outputs))
return E
def rbftrain(self,nIt=100):
""" training the network """
#train perceptron
self.perceptron.pcntrain(nIt)
def rbftrain_automatic(self,valid,validt,itSteps):
""" train the perceptron until the error on the validation data increases """
#calculate the hidden rbf nodes (first layer)
rbfvalid = self.rbffwd(valid,1)
trainerror = np.array([])
validerror = np.array([])
(trainerror,validerror) = self.perceptron.pcntrain_automatic(rbfvalid,validt,itSteps)
return trainerror,validerror
def rbffwd(self,inputs,layer):
""" run the network forward """
#rbf nodes
hidden = np.zeros((np.shape(inputs)[0],self.nRBF))
#calculate gaussian overlap of input with weights
for i in range(self.nRBF):
hidden[:,i] = np.exp(-np.sum((inputs - np.ones((1,np.shape(inputs)[1]))*self.weights[:,i])**2,axis=1)/(2*self.sigma**2))
#normalise RBF layer
if self.normalise:
hidden[:,:] /= np.transpose(np.ones((1,np.shape(hidden)[0]))*hidden[:,:].sum(axis=1))
#output of hidden (rbf) layer
outputs = hidden
#output of perceptron layer
if layer == 2:
outputs = self.perceptron.pcnfwd(hidden,True)
return outputs
def confmat(self,inputs,targets):
""" confusion matrix to evaluate the performance of the network """
#calculate hidden nodes
hidden = self.rbffwd(inputs,1)
#confusion matrix of perceptron
self.perceptron.confmat(hidden,targets)
return 0
| [
"numpy.sqrt",
"pcn.pcn",
"numpy.array",
"numpy.shape",
"numpy.transpose",
"numpy.random.shuffle"
] | [((1237, 1263), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (1254, 1263), True, 'import numpy as np\n'), ((1552, 1627), 'pcn.pcn', 'pcn.pcn', (['self.hidden', 'self.targets', 'self.eta', 'self.functype', 'self.traintype'], {}), '(self.hidden, self.targets, self.eta, self.functype, self.traintype)\n', (1559, 1627), False, 'import pcn\n'), ((2213, 2225), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2221, 2225), True, 'import numpy as np\n'), ((2247, 2259), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2255, 2259), True, 'import numpy as np\n'), ((869, 886), 'numpy.sqrt', 'np.sqrt', (['(2 * nRBF)'], {}), '(2 * nRBF)\n', (876, 886), True, 'import numpy as np\n'), ((1203, 1224), 'numpy.shape', 'np.shape', (['self.inputs'], {}), '(self.inputs)\n', (1211, 1224), True, 'import numpy as np\n'), ((1015, 1036), 'numpy.shape', 'np.shape', (['self.inputs'], {}), '(self.inputs)\n', (1023, 1036), True, 'import numpy as np\n'), ((1143, 1159), 'numpy.shape', 'np.shape', (['inputs'], {}), '(inputs)\n', (1151, 1159), True, 'import numpy as np\n'), ((1727, 1758), 'numpy.transpose', 'np.transpose', (['(targets - outputs)'], {}), '(targets - outputs)\n', (1739, 1758), True, 'import numpy as np\n'), ((2540, 2556), 'numpy.shape', 'np.shape', (['inputs'], {}), '(inputs)\n', (2548, 2556), True, 'import numpy as np\n'), ((2907, 2923), 'numpy.shape', 'np.shape', (['hidden'], {}), '(hidden)\n', (2915, 2923), True, 'import numpy as np\n'), ((2728, 2744), 'numpy.shape', 'np.shape', (['inputs'], {}), '(inputs)\n', (2736, 2744), True, 'import numpy as np\n')] |
import datetime
import os
# General
Token = open('../Token.txt', 'r') # The token of the bot
Token = Token.read()
prefix = '*' # the command prefix
lang = 'en-us' # 'en-us' or 'pt-br'
memes = os.listdir('../Assets/monsters_memes') # memes db load
banchannel = None # the channel that will be used to ban messages
# Minigame setup
gamechannel = None # You can set here or with the command "*setup"
gameinterval = 3600 #interval between the sessions #TEMP VALUE
winnerPoints = 3 # points for who win the minigame
valid = False
end_day = 30 # The day of the end off the minigame - will verify at the start time
# log file path
logpath = '../logs'
# Language import
if lang == 'en-us':
from en_us import *
elif lang == 'pt-br':
from pt_br import *
else:
raise Exception(f'There are no lang option called {lang}')
| [
"os.listdir"
] | [((193, 231), 'os.listdir', 'os.listdir', (['"""../Assets/monsters_memes"""'], {}), "('../Assets/monsters_memes')\n", (203, 231), False, 'import os\n')] |
"""
Convert data and then visualize
Data Manupulation
1. Save metrics for validation and test data
Save figures
1. Loss curve
2. plume dispersion and errors
3. metrics
"""
import pathlib
import numpy as np
import xarray as xr
from numpy import ma
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.style
from matplotlib.colors import LogNorm
from ._base_postscript import _BasePostscripts
from .metrics import get_metric
class CityTransformerPostscripts(_BasePostscripts):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.model_name = 'CityTransformer'
self.modes = ['val', 'test']
self.threshold = 0.5
self.clip = 1.e-8
self.alpha = 0.9
self.vmin = self.clip
self.vmax = 1.0
self.nb_bins = 100
self.fig_names = ['loss', 'contour', 'metrics']
self.extent = [-1024,1024,-1024,1024]
self.metrics = {'FAC2',
'FAC5',
'MG',
'VG',
'NAD',
'FB',
}
# Matplotlib settings
mpl.style.use('classic')
fontsize = 28
self.fontsize = fontsize
fontname = 'Times New Roman'
plt.rc('xtick', labelsize=fontsize)
plt.rc('ytick', labelsize=fontsize)
plt.rc('font', family=fontname)
self.title_font = {'fontname':fontname, 'size':fontsize, 'color':'black',
'verticalalignment':'bottom'}
self.axis_font = {'fontname':fontname, 'size':fontsize}
def __preprocess(self, epoch):
for mode in self.modes:
all_metrics = {metric_name: [] for metric_name in self.metrics}
nb_shots = self.nb_shots_dict[mode]
for i in range(nb_shots):
filename = pathlib.Path(self.inference_dir) / mode / f'{mode}{i:06}_epoch{epoch:04}.nc'
ds = xr.open_dataset(filename)
levelset = ds['levelset'].values
# Target metrics
metric_dict = {'FAC2': {'factor': 2, 'levelset': levelset},
'FAC5': {'factor': 5, 'levelset': levelset},
'MG': {'levelset': levelset},
'VG': {'levelset': levelset},
'NAD': {'levelset': levelset},
'FB': {'levelset': levelset},
}
evaluated_metrics = self.__evaluate_metrics(ds, metric_dict=metric_dict)
for metric_name in metric_dict.keys():
all_metrics[metric_name].append(evaluated_metrics[metric_name])
# Saving dataset
data_vars = {}
for metric_name, evaluated_values in all_metrics.items():
data_vars[metric_name] = (['shot_idx'], np.asarray(evaluated_values))
coords = {'shot_idx': np.arange(nb_shots)}
filename = self.data_dir / f'{mode}_epoch{epoch:04}.nc'
ds = xr.Dataset(data_vars=data_vars, coords=coords)
ds.to_netcdf(filename)
def __evaluate_metrics(self, ds, metric_dict):
evaluated_metrics = {}
pred, pred_binary = ds['pred_plume'].values.squeeze(), ds['pred_zeros_map'].values
ref, ref_binary = ds['ref_plume'].values.squeeze(), ds['ref_zeros_map'].values
levelset = ds['levelset'].values
pred = self.__mask_img(img=pred, binary=pred_binary, levelset=levelset, threshold=self.threshold, clip=self.clip)
ref = self.__mask_img(img=ref, binary=ref_binary, levelset=levelset, threshold=self.threshold, clip=self.clip)
for metric_name, kwargs in metric_dict.items():
metric = get_metric(metric_name)(**kwargs)
evaluated_metrics[metric_name] = metric.evaluate(pred, ref)
return evaluated_metrics
def __mask_img(self, img, binary, levelset, threshold, clip, apply_mask=False):
img, binary = np.squeeze(img), np.squeeze(binary)
mask = np.logical_or(binary<threshold, levelset >= 0.)
img = 10**img
img = np.where(mask, -1., img) * clip
if apply_mask:
return ma.masked_where(img <= 0, img)
else:
return img
def __classification_by_factor(self, pred, ref, levelset, threshold, clip):
"""
factor2 == 0
factor5 == 0.5
factor5++ == 1.0
"""
if type(pred) is tuple:
pred, pred_binary = pred
ref, ref_binary = ref
# Create mask based on zeros map and levelset
def mask_on_img(img, binary):
mask = np.logical_or(binary < threshold, levelset >= 0.)
img = 10**img
img = np.where(mask, -1, img) * clip
return img
pred = mask_on_img(pred, pred_binary)
ref = mask_on_img(ref, ref_binary)
factor = np.ones_like(ref) # Default 1.0
target_area = np.logical_and(ref > 0., levelset < 0)
fraction = np.where(target_area, pred/ref, 0)
fac2_area = np.logical_and( fraction >= 1/2., fraction <= 2. )
fac5_area = np.logical_and( fraction >= 1/5., fraction <= 5. )
fac2_area = np.logical_and(target_area, fac2_area)
fac5_area = np.logical_and(target_area, fac5_area)
factor[fac5_area] = np.ones_like(ref)[fac5_area] * 0.5
factor[fac2_area] = np.zeros_like(ref)[fac2_area]
correct_zeros = np.logical_and(pred_binary < 0.5, ref_binary < 0.5)
masked_fraction = ma.masked_where(np.logical_or(correct_zeros, levelset >= 0.), factor)
return masked_fraction
def _visualize(self, epoch):
self.data_dir = self.img_dir / 'metrics/data'
if not self.data_dir.exists():
self.data_dir.mkdir(parents=True)
super()._visualize_loss()
self.__preprocess(epoch)
self.__visualize_plume_dispersion(epoch)
self.__visualize_metrics(epoch)
def __visualize_plume_dispersion(self, epoch):
figsize = (8, 8)
for mode in self.modes:
nb_shots = self.nb_shots_dict[mode]
for i in range(nb_shots):
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=figsize,
subplot_kw={'xticks':[], 'yticks':[]}, gridspec_kw=dict(hspace=0.1, wspace=0.05))
axes[1, 0].set_visible(False)
filename = pathlib.Path(self.inference_dir) / mode / f'{mode}{i:06}_epoch{epoch:04}.nc'
ds = xr.open_dataset(filename)
levelset = ds['levelset'].values
x, y = ds.attrs['release_x'], ds.attrs['release_y']
# apply masks
pred, pred_binary = ds['pred_plume'].values.squeeze(), ds['pred_zeros_map'].values
ref, ref_binary = ds['ref_plume'].values.squeeze(), ds['ref_zeros_map'].values
levelset = ds['levelset'].values
factor = self.__classification_by_factor((pred, pred_binary), (ref, ref_binary), levelset=levelset, threshold=self.threshold, clip=self.clip)
masked_pred = self.__mask_img(img=pred, binary=pred_binary, levelset=levelset, threshold=self.threshold, clip=self.clip, apply_mask=True)
masked_ref = self.__mask_img(img=ref, binary=ref_binary, levelset=levelset, threshold=self.threshold, clip=self.clip, apply_mask=True)
# Plotting the ground truth and prediction
im = axes[0, 0].imshow(levelset < 0., cmap='gray', origin='lower', extent=self.extent, interpolation='none')
im = axes[0, 0].imshow(masked_ref, cmap='coolwarm', origin='lower', extent=self.extent, norm=LogNorm(vmin=self.vmin, vmax=self.vmax), alpha=self.alpha, interpolation='none')
axes[0, 0].plot(x, y, color='none', marker='*', markeredgecolor='g', markeredgewidth=2, markersize=12)
im = axes[0, 1].imshow(levelset < 0., cmap='gray', origin='lower', extent=self.extent, interpolation='none')
im = axes[0, 1].imshow(masked_pred, cmap='coolwarm', origin='lower', extent=self.extent, norm=LogNorm(vmin=self.vmin, vmax=self.vmax), alpha=self.alpha, interpolation='none')
axes[0, 1].plot(x, y, color='none', marker='*', markeredgecolor='g', markeredgewidth=2, markersize=12)
# Plotting the factor map
im2 = axes[1, 1].imshow(levelset < 0., cmap='gray', origin='lower', extent=self.extent, interpolation='none')
im2 = axes[1, 1].imshow(factor, cmap='jet', origin='lower', extent=self.extent, vmin=0, vmax=1, alpha=self.alpha, interpolation='none')
axes[1, 1].plot(x, y, color='none', marker='*', markeredgecolor='g', markeredgewidth=2, markersize=12)
axes[0, 0].set_title('Ground Truth', **self.title_font)
axes[0, 1].set_title(f'{self.arch_name}', **self.title_font)
cbar = fig.colorbar(im, ax=axes[0, :])
cbar2 = fig.colorbar(im2, ax=axes[1, :])
cbar2.remove()
figname = self.img_dir / 'contour' / f'log_{mode}{i:06}_epoch{epoch:04}.png'
plt.savefig(figname, bbox_inches='tight')
plt.close('all')
def __visualize_metrics(self, epoch):
figsize = (20, 12)
plot_dict = {}
# key: metric_name, value: xmin, xmax, ymin, ymax, label
# xmin, xmax are also used to make histogram
plot_dict['FAC2'] = (0, 1, 0, 0.05, 'FAC_2')
plot_dict['FAC5'] = (0, 1, 0, 0.1, 'FAC_5')
plot_dict['FB'] = (-2, 2, 0, 0.05, 'FB')
plot_dict['NAD'] = (0, 0.15, 0, 0.15, 'NAD')
plot_dict['MG'] = (0, 2, 0, 0.1, 'MG')
plot_dict['VG'] = (1, 1.15, 0, 0.5, 'VG')
metric_names = plot_dict.keys()
for mode in self.modes:
filename = self.data_dir / f'{mode}_epoch{epoch:04}.nc'
ds = xr.open_dataset(filename)
fig, axes = plt.subplots(nrows=2, ncols=3, figsize=figsize)
for metric_name, ax in zip(metric_names, axes.flatten()):
xmin, xmax, ymin, ymax, label = plot_dict[metric_name]
bins = np.linspace(xmin, xmax, self.nb_bins)
metric = ds[metric_name].values
weights = np.ones_like(metric) / len(metric)
_hist, _bins, _patches = ax.hist(metric, bins=bins, alpha=0.5, weights=weights, label=self.arch_name)
average = np.mean( np.abs(metric) )
std = np.std( np.abs(metric) )
print(f'model: {self.arch_name}, metric_name: {metric_name}, average: {average}, std: {std}')
ax.set_xlim([xmin, xmax])
ax.set_ylim([ymin, ymax])
ax.set_title(metric_name, **self.title_font)
ax.legend(loc='upper right', prop={'size': self.fontsize*0.6})
ax.grid(ls='dashed', lw=1)
figname = self.img_dir / 'metrics' / f'metric_{self.arch_name}.png'
plt.savefig(figname, bbox_inches='tight')
plt.close('all')
| [
"matplotlib.style.use",
"numpy.arange",
"matplotlib.colors.LogNorm",
"pathlib.Path",
"numpy.where",
"numpy.asarray",
"numpy.ma.masked_where",
"matplotlib.pyplot.close",
"numpy.linspace",
"numpy.abs",
"matplotlib.pyplot.savefig",
"xarray.Dataset",
"numpy.squeeze",
"xarray.open_dataset",
"... | [((1179, 1203), 'matplotlib.style.use', 'mpl.style.use', (['"""classic"""'], {}), "('classic')\n", (1192, 1203), True, 'import matplotlib as mpl\n'), ((1304, 1339), 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick"""'], {'labelsize': 'fontsize'}), "('xtick', labelsize=fontsize)\n", (1310, 1339), True, 'import matplotlib.pyplot as plt\n'), ((1348, 1383), 'matplotlib.pyplot.rc', 'plt.rc', (['"""ytick"""'], {'labelsize': 'fontsize'}), "('ytick', labelsize=fontsize)\n", (1354, 1383), True, 'import matplotlib.pyplot as plt\n'), ((1392, 1423), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': 'fontname'}), "('font', family=fontname)\n", (1398, 1423), True, 'import matplotlib.pyplot as plt\n'), ((4132, 4182), 'numpy.logical_or', 'np.logical_or', (['(binary < threshold)', '(levelset >= 0.0)'], {}), '(binary < threshold, levelset >= 0.0)\n', (4145, 4182), True, 'import numpy as np\n'), ((5019, 5036), 'numpy.ones_like', 'np.ones_like', (['ref'], {}), '(ref)\n', (5031, 5036), True, 'import numpy as np\n'), ((5074, 5113), 'numpy.logical_and', 'np.logical_and', (['(ref > 0.0)', '(levelset < 0)'], {}), '(ref > 0.0, levelset < 0)\n', (5088, 5113), True, 'import numpy as np\n'), ((5132, 5168), 'numpy.where', 'np.where', (['target_area', '(pred / ref)', '(0)'], {}), '(target_area, pred / ref, 0)\n', (5140, 5168), True, 'import numpy as np\n'), ((5188, 5240), 'numpy.logical_and', 'np.logical_and', (['(fraction >= 1 / 2.0)', '(fraction <= 2.0)'], {}), '(fraction >= 1 / 2.0, fraction <= 2.0)\n', (5202, 5240), True, 'import numpy as np\n'), ((5259, 5311), 'numpy.logical_and', 'np.logical_and', (['(fraction >= 1 / 5.0)', '(fraction <= 5.0)'], {}), '(fraction >= 1 / 5.0, fraction <= 5.0)\n', (5273, 5311), True, 'import numpy as np\n'), ((5340, 5378), 'numpy.logical_and', 'np.logical_and', (['target_area', 'fac2_area'], {}), '(target_area, fac2_area)\n', (5354, 5378), True, 'import numpy as np\n'), ((5399, 5437), 'numpy.logical_and', 'np.logical_and', (['target_area', 'fac5_area'], {}), '(target_area, fac5_area)\n', (5413, 5437), True, 'import numpy as np\n'), ((5609, 5660), 'numpy.logical_and', 'np.logical_and', (['(pred_binary < 0.5)', '(ref_binary < 0.5)'], {}), '(pred_binary < 0.5, ref_binary < 0.5)\n', (5623, 5660), True, 'import numpy as np\n'), ((3113, 3159), 'xarray.Dataset', 'xr.Dataset', ([], {'data_vars': 'data_vars', 'coords': 'coords'}), '(data_vars=data_vars, coords=coords)\n', (3123, 3159), True, 'import xarray as xr\n'), ((4080, 4095), 'numpy.squeeze', 'np.squeeze', (['img'], {}), '(img)\n', (4090, 4095), True, 'import numpy as np\n'), ((4097, 4115), 'numpy.squeeze', 'np.squeeze', (['binary'], {}), '(binary)\n', (4107, 4115), True, 'import numpy as np\n'), ((4217, 4242), 'numpy.where', 'np.where', (['mask', '(-1.0)', 'img'], {}), '(mask, -1.0, img)\n', (4225, 4242), True, 'import numpy as np\n'), ((4292, 4322), 'numpy.ma.masked_where', 'ma.masked_where', (['(img <= 0)', 'img'], {}), '(img <= 0, img)\n', (4307, 4322), False, 'from numpy import ma\n'), ((4762, 4812), 'numpy.logical_or', 'np.logical_or', (['(binary < threshold)', '(levelset >= 0.0)'], {}), '(binary < threshold, levelset >= 0.0)\n', (4775, 4812), True, 'import numpy as np\n'), ((5546, 5564), 'numpy.zeros_like', 'np.zeros_like', (['ref'], {}), '(ref)\n', (5559, 5564), True, 'import numpy as np\n'), ((5703, 5748), 'numpy.logical_or', 'np.logical_or', (['correct_zeros', '(levelset >= 0.0)'], {}), '(correct_zeros, levelset >= 0.0)\n', (5716, 5748), True, 'import numpy as np\n'), ((10125, 10150), 'xarray.open_dataset', 'xr.open_dataset', (['filename'], {}), '(filename)\n', (10140, 10150), True, 'import xarray as xr\n'), ((10176, 10223), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(3)', 'figsize': 'figsize'}), '(nrows=2, ncols=3, figsize=figsize)\n', (10188, 10223), True, 'import matplotlib.pyplot as plt\n'), ((11223, 11264), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figname'], {'bbox_inches': '"""tight"""'}), "(figname, bbox_inches='tight')\n", (11234, 11264), True, 'import matplotlib.pyplot as plt\n'), ((11277, 11293), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (11286, 11293), True, 'import matplotlib.pyplot as plt\n'), ((1991, 2016), 'xarray.open_dataset', 'xr.open_dataset', (['filename'], {}), '(filename)\n', (2006, 2016), True, 'import xarray as xr\n'), ((3007, 3026), 'numpy.arange', 'np.arange', (['nb_shots'], {}), '(nb_shots)\n', (3016, 3026), True, 'import numpy as np\n'), ((4856, 4879), 'numpy.where', 'np.where', (['mask', '(-1)', 'img'], {}), '(mask, -1, img)\n', (4864, 4879), True, 'import numpy as np\n'), ((5483, 5500), 'numpy.ones_like', 'np.ones_like', (['ref'], {}), '(ref)\n', (5495, 5500), True, 'import numpy as np\n'), ((6693, 6718), 'xarray.open_dataset', 'xr.open_dataset', (['filename'], {}), '(filename)\n', (6708, 6718), True, 'import xarray as xr\n'), ((9369, 9410), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figname'], {'bbox_inches': '"""tight"""'}), "(figname, bbox_inches='tight')\n", (9380, 9410), True, 'import matplotlib.pyplot as plt\n'), ((9427, 9443), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (9436, 9443), True, 'import matplotlib.pyplot as plt\n'), ((10388, 10425), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', 'self.nb_bins'], {}), '(xmin, xmax, self.nb_bins)\n', (10399, 10425), True, 'import numpy as np\n'), ((2942, 2970), 'numpy.asarray', 'np.asarray', (['evaluated_values'], {}), '(evaluated_values)\n', (2952, 2970), True, 'import numpy as np\n'), ((10500, 10520), 'numpy.ones_like', 'np.ones_like', (['metric'], {}), '(metric)\n', (10512, 10520), True, 'import numpy as np\n'), ((10688, 10702), 'numpy.abs', 'np.abs', (['metric'], {}), '(metric)\n', (10694, 10702), True, 'import numpy as np\n'), ((10735, 10749), 'numpy.abs', 'np.abs', (['metric'], {}), '(metric)\n', (10741, 10749), True, 'import numpy as np\n'), ((1893, 1925), 'pathlib.Path', 'pathlib.Path', (['self.inference_dir'], {}), '(self.inference_dir)\n', (1905, 1925), False, 'import pathlib\n'), ((6595, 6627), 'pathlib.Path', 'pathlib.Path', (['self.inference_dir'], {}), '(self.inference_dir)\n', (6607, 6627), False, 'import pathlib\n'), ((7870, 7909), 'matplotlib.colors.LogNorm', 'LogNorm', ([], {'vmin': 'self.vmin', 'vmax': 'self.vmax'}), '(vmin=self.vmin, vmax=self.vmax)\n', (7877, 7909), False, 'from matplotlib.colors import LogNorm\n'), ((8306, 8345), 'matplotlib.colors.LogNorm', 'LogNorm', ([], {'vmin': 'self.vmin', 'vmax': 'self.vmax'}), '(vmin=self.vmin, vmax=self.vmax)\n', (8313, 8345), False, 'from matplotlib.colors import LogNorm\n')] |
"""
The primary frame containing the content for the entire game
"""
import tkinter as tk
import random as random
from quince.utility import is_valid_pickup
from quince.ronda import Ronda
from quince.ui.components.opponents.opponent_frame \
import OpponentFrameHorizontal, OpponentFrameVertical
from quince.ui.components.table.table import Table
from quince.ui.components.player.player_frame import PlayerFrame
class GameFrame(tk.Frame):
"""Tk frame containing the main gameplay display including
cards, decks, and avatars."""
def __init__(self, parent, player, npc1, npc2, npc3, display_scores):
"""Instantiate a new GameFrame
Args:
parent (Tk widget)
player - Player object representing the (human) user
npc1 (NPC) - Shadow player (opponent)
npc2 (NPC) - Shadow player (opponent)
npc3 (NPC) - Shadow player (opponent)
display_scores (function) - Callback to execute when
a ronda is finished
"""
tk.Frame.__init__(self, parent)
self.parent = parent
self.display_scores = display_scores
self.grid_rowconfigure(0, weight=1)
self.grid_rowconfigure(1, weight=3)
self.grid_rowconfigure(2, weight=1)
self.grid_columnconfigure(0, weight=1)
self.grid_columnconfigure(1, weight=3)
self.grid_columnconfigure(2, weight=1)
self.npc1 = npc1
self.npc2 = npc2
self.npc3 = npc3
self.player = player
self.selected_table_cards = []
self.ronda = Ronda.start([self.player,
self.npc1,
self.npc2,
self.npc3],
self.npc3)
# OPPONENT 1
opp1_hand_size = len(self.ronda.player_cards[self.npc1]['hand'])
opp1_active = self.ronda.current_player is self.npc1
self.opp1 = OpponentFrameVertical(self,
self.npc1.image(),
self.npc1.name(),
opp1_active,
opp1_hand_size)
self.opp1.grid(row=1, column=0)
# OPPONENT 2
opp2_active = self.ronda.current_player is self.npc2
opp2_hand_size = len(self.ronda.player_cards[self.npc2]['hand'])
self.opp2 = OpponentFrameHorizontal(self,
self.npc2.image(),
self.npc2.name(),
opp2_active,
opp2_hand_size)
self.opp2.grid(row=0, column=1)
# OPPONENT 3
opp3_active = self.ronda.current_player is self.npc3
opp3_hand_size = len(self.ronda.player_cards[self.npc3]['hand'])
self.opp3 = OpponentFrameVertical(self,
self.npc3.image(),
self.npc3.name(),
opp3_active,
opp3_hand_size)
self.opp3.grid(row=1, column=2)
# PLAYER
myhand = self.ronda.player_cards[self.player]['hand']
player_is_active = self.ronda.current_player is self.player
self.hud = PlayerFrame(self,
self.player,
myhand,
player_is_active,
self.play_hand)
self.hud.grid(row=2, column=0, columnspan=3)
# TABLE
table_cards = self.ronda.current_mesa
self.tbl = Table(self, table_cards, self.register_table_card_selection)
self.tbl.grid(row=1, column=1)
def draw(self):
"""Update all widgets on the frame"""
self.selected_table_cards = []
table_cards = self.ronda.current_mesa
current_player = self.ronda.current_player
# OPPONENT 1
opp1_hand_size = len(self.ronda.player_cards[self.npc1]['hand'])
opp1_active = self.ronda.current_player is self.npc1
self.opp1.refresh(opp1_hand_size, opp1_active)
# OPPONENT 2
opp2_active = current_player is self.npc2
opp2_hand_size = len(self.ronda.player_cards[self.npc2]['hand'])
self.opp2.refresh(opp2_hand_size, opp2_active)
# OPPONENT 3
opp3_active = current_player is self.npc3
opp3_hand_size = len(self.ronda.player_cards[self.npc3]['hand'])
self.opp3.refresh(opp3_hand_size, opp3_active)
# PLAYER
myhand = self.ronda.player_cards[self.player]['hand']
player_is_active = current_player is self.player
self.hud.refresh(myhand, player_is_active)
# TABLE
self.tbl.destroy()
self.tbl = Table(self, table_cards, self.register_table_card_selection)
self.tbl.grid(row=1, column=1)
def register_table_card_selection(self, cards):
"""Callback function executed by the Table
when the user selects cards.
The list of cards is stored in the GameFrame's
state so that it can be queried when the user
makes a move.
Args:
cards (List of Card)
"""
self.selected_table_cards = cards
def play_hand(self, hand_card):
"""Callback function executed when
player clicks the "Play Hand" button.
"""
if self.ronda.current_player is self.player:
print(f'Attempting to play {hand_card} and\
pick up: {self.selected_table_cards}')
if is_valid_pickup(hand_card, self.selected_table_cards):
self.ronda = self.ronda.play_turn(hand_card,
self.selected_table_cards)
self.draw()
self.play_next_move()
else:
print("not your turn")
def play_next_move(self):
"""This function gets called continually as CPU players make
their moves. When it's the user's turn to play, the loop is
broken until they play their hand, which will start up the
cycle again.
"""
if self.ronda.is_finished:
self.display_scores(self.ronda)
return
if self.ronda.current_player is self.player:
pass
else:
sleep_time = random.randrange(0, 1)
self.after(sleep_time*1000, self._play_cpu_move)
def _play_cpu_move(self):
table_cards = self.ronda.current_mesa
current_player = self.ronda.current_player
hand = self.ronda.player_cards[current_player]['hand']
(own_card, mesa_cards) = current_player.get_move(hand, table_cards)
self.ronda = self.ronda.play_turn(own_card, mesa_cards)
print(f'{current_player.name()}\
played: {own_card} and picked up: {mesa_cards}')
self.draw()
self.play_next_move()
| [
"tkinter.Frame.__init__",
"quince.utility.is_valid_pickup",
"random.randrange",
"quince.ronda.Ronda.start",
"quince.ui.components.table.table.Table",
"quince.ui.components.player.player_frame.PlayerFrame"
] | [((1032, 1063), 'tkinter.Frame.__init__', 'tk.Frame.__init__', (['self', 'parent'], {}), '(self, parent)\n', (1049, 1063), True, 'import tkinter as tk\n'), ((1579, 1649), 'quince.ronda.Ronda.start', 'Ronda.start', (['[self.player, self.npc1, self.npc2, self.npc3]', 'self.npc3'], {}), '([self.player, self.npc1, self.npc2, self.npc3], self.npc3)\n', (1590, 1649), False, 'from quince.ronda import Ronda\n'), ((3396, 3468), 'quince.ui.components.player.player_frame.PlayerFrame', 'PlayerFrame', (['self', 'self.player', 'myhand', 'player_is_active', 'self.play_hand'], {}), '(self, self.player, myhand, player_is_active, self.play_hand)\n', (3407, 3468), False, 'from quince.ui.components.player.player_frame import PlayerFrame\n'), ((3728, 3788), 'quince.ui.components.table.table.Table', 'Table', (['self', 'table_cards', 'self.register_table_card_selection'], {}), '(self, table_cards, self.register_table_card_selection)\n', (3733, 3788), False, 'from quince.ui.components.table.table import Table\n'), ((4894, 4954), 'quince.ui.components.table.table.Table', 'Table', (['self', 'table_cards', 'self.register_table_card_selection'], {}), '(self, table_cards, self.register_table_card_selection)\n', (4899, 4954), False, 'from quince.ui.components.table.table import Table\n'), ((5687, 5740), 'quince.utility.is_valid_pickup', 'is_valid_pickup', (['hand_card', 'self.selected_table_cards'], {}), '(hand_card, self.selected_table_cards)\n', (5702, 5740), False, 'from quince.utility import is_valid_pickup\n'), ((6471, 6493), 'random.randrange', 'random.randrange', (['(0)', '(1)'], {}), '(0, 1)\n', (6487, 6493), True, 'import random as random\n')] |
from src.main.common.model import endpoint
class TableEndpoint(endpoint.Endpoint):
@classmethod
def do_get(cls, *args, **kwargs):
from src.main.admin_api.utils.descriptor_utils import DescriptorUtils
db_system_name = kwargs.get("db_system_name")
tb_system_name = kwargs.get("tb_system_name", None)
response = None
if tb_system_name is None:
descriptor_dicts = []
descriptors = DescriptorUtils.get_tbs_descriptor(db_system_name)
for d in descriptors:
descriptor_dicts.append(d.to_dict())
response = descriptor_dicts
else:
descriptor = DescriptorUtils.get_tb_descriptor_by_system_name(db_system_name, tb_system_name)
if descriptor is not None:
response = descriptor.to_dict()
return response
@classmethod
def do_post(cls, *args, **kwargs):
from src.main.admin_api.utils.descriptor_utils import DescriptorUtils
from src.main.admin_api.model.table import Table
db_system_name = kwargs.get("db_system_name")
response = None
body = TableEndpoint.get_body()
name = body.get("name", None)
if name is not None:
descriptor = Table.from_json(body)
if not DescriptorUtils.does_tb_descriptor_exist(db_system_name, descriptor):
descriptor.save(db_system_name)
response = descriptor.to_dict()
return response
@classmethod
def do_put(cls, *args, **kwargs):
from src.main.admin_api.utils.descriptor_utils import DescriptorUtils
db_system_name = kwargs.get("db_system_name")
tb_system_name = kwargs.get("tb_system_name")
response = None
body = TableEndpoint.get_body()
if tb_system_name is not None:
descriptor = DescriptorUtils.get_tb_descriptor_by_system_name(db_system_name, tb_system_name)
if descriptor is not None:
name = body.get("name", None)
if name is not None:
descriptor.set_name(name)
description = body.get("description", None)
if description is not None:
descriptor.set_description(description)
fields = body.get("fields", None)
if fields is not None:
descriptor.set_fields(fields)
descriptor.save(db_system_name)
response = descriptor.to_dict()
return response
@classmethod
def do_delete(cls, *args, **kwargs):
from src.main.admin_api.utils.descriptor_utils import DescriptorUtils
db_system_name = kwargs.get("db_system_name")
tb_system_name = kwargs.get("tb_system_name")
response = None
descriptor = DescriptorUtils.get_tb_descriptor_by_system_name(db_system_name, tb_system_name)
if descriptor is not None:
response = descriptor.delete(db_system_name)
return response
| [
"src.main.admin_api.utils.descriptor_utils.DescriptorUtils.get_tbs_descriptor",
"src.main.admin_api.utils.descriptor_utils.DescriptorUtils.does_tb_descriptor_exist",
"src.main.admin_api.utils.descriptor_utils.DescriptorUtils.get_tb_descriptor_by_system_name",
"src.main.admin_api.model.table.Table.from_json"
] | [((2901, 2986), 'src.main.admin_api.utils.descriptor_utils.DescriptorUtils.get_tb_descriptor_by_system_name', 'DescriptorUtils.get_tb_descriptor_by_system_name', (['db_system_name', 'tb_system_name'], {}), '(db_system_name, tb_system_name\n )\n', (2949, 2986), False, 'from src.main.admin_api.utils.descriptor_utils import DescriptorUtils\n'), ((467, 517), 'src.main.admin_api.utils.descriptor_utils.DescriptorUtils.get_tbs_descriptor', 'DescriptorUtils.get_tbs_descriptor', (['db_system_name'], {}), '(db_system_name)\n', (501, 517), False, 'from src.main.admin_api.utils.descriptor_utils import DescriptorUtils\n'), ((689, 774), 'src.main.admin_api.utils.descriptor_utils.DescriptorUtils.get_tb_descriptor_by_system_name', 'DescriptorUtils.get_tb_descriptor_by_system_name', (['db_system_name', 'tb_system_name'], {}), '(db_system_name, tb_system_name\n )\n', (737, 774), False, 'from src.main.admin_api.utils.descriptor_utils import DescriptorUtils\n'), ((1299, 1320), 'src.main.admin_api.model.table.Table.from_json', 'Table.from_json', (['body'], {}), '(body)\n', (1314, 1320), False, 'from src.main.admin_api.model.table import Table\n'), ((1916, 2001), 'src.main.admin_api.utils.descriptor_utils.DescriptorUtils.get_tb_descriptor_by_system_name', 'DescriptorUtils.get_tb_descriptor_by_system_name', (['db_system_name', 'tb_system_name'], {}), '(db_system_name, tb_system_name\n )\n', (1964, 2001), False, 'from src.main.admin_api.utils.descriptor_utils import DescriptorUtils\n'), ((1341, 1409), 'src.main.admin_api.utils.descriptor_utils.DescriptorUtils.does_tb_descriptor_exist', 'DescriptorUtils.does_tb_descriptor_exist', (['db_system_name', 'descriptor'], {}), '(db_system_name, descriptor)\n', (1381, 1409), False, 'from src.main.admin_api.utils.descriptor_utils import DescriptorUtils\n')] |
import tensorflow as tf
from data.BaseReader import BaseReader
import numpy as np
class Base2DReader(BaseReader):
# inherit from BaseReader, implement different 2D cropping (cropping from 2D)
def __init__(self, objtype=0, shuffle=True, batch_size=1, crop_noise=False):
super(Base2DReader, self).__init__(objtype, shuffle, batch_size, crop_noise)
def get(self, withPAF=True, read_image=True, imw=1920, imh=1080):
assert type(withPAF) == bool
assert self.objtype in (0, 1)
# produce data from slice_input_producer
flow_list = tf.train.slice_input_producer(list(self.tensor_dict.values()), shuffle=self.shuffle)
flow_dict = {key: flow_list[ik] for ik, key in enumerate(self.tensor_dict.keys())}
# build data dictionary
data_dict = {}
data_dict['img_dir'] = flow_dict['img_dirs']
PAF_given = False
if self.objtype == 0:
body2d = flow_dict['body']
data_dict['body_valid'] = flow_dict['body_valid']
data_dict['keypoint_uv_origin'] = body2d
if 'body_3d' in flow_dict:
data_dict['keypoint_xyz_origin'] = flow_dict['body_3d']
data_dict['keypoint_xyz_local'] = flow_dict['body_3d']
PAF_given = True
elif self.objtype == 1:
cond_left = tf.reduce_any(tf.cast(flow_dict['left_hand_valid'], dtype=tf.bool)) # 0 for right hand, 1 for left hand
hand2d = tf.cond(cond_left, lambda: flow_dict['left_hand'], lambda: flow_dict['right_hand']) # in world coordinate
hand2d = tf.cast(hand2d, tf.float32)
data_dict['keypoint_uv_origin'] = hand2d
data_dict['left_hand_valid'] = flow_dict['left_hand_valid']
data_dict['right_hand_valid'] = flow_dict['right_hand_valid']
if 'left_hand_3d' in flow_dict and 'right_hand_3d' in flow_dict:
hand3d = tf.cond(cond_left, lambda: flow_dict['left_hand_3d'], lambda: flow_dict['right_hand_3d'])
data_dict['keypoint_xyz_origin'] = hand3d
data_dict['keypoint_xyz_local'] = hand3d
PAF_given = True
# read image
if read_image:
img_file = tf.read_file(flow_dict['img_dirs'])
image = tf.image.decode_image(img_file, channels=3)
image = tf.image.pad_to_bounding_box(image, 0, 0, imh, imw)
image.set_shape((imh, imw, 3))
image = tf.cast(image, tf.float32) / 255.0 - 0.5
data_dict['image'] = image
if 'mask_dirs' in flow_dict:
mask_file = tf.read_file(flow_dict['mask_dirs'])
mask = tf.image.decode_image(mask_file, channels=3)
mask = tf.image.pad_to_bounding_box(mask, 0, 0, imh, imw)
mask.set_shape((imh, imw, 3))
mask = mask[:, :, 0]
mask = tf.cast(mask, tf.float32)
else:
mask = tf.ones((imh, imw), dtype=tf.float32)
if 'other_bbox' in flow_dict:
ob = flow_dict['other_bbox']
Xindmap = tf.tile(tf.expand_dims(tf.range(imw, dtype=tf.int32), 0), [imh, 1])
Xindmap = tf.tile(tf.expand_dims(Xindmap, 2), [1, 1, 20])
Yindmap = tf.tile(tf.expand_dims(tf.range(imh, dtype=tf.int32), 1), [1, imw])
Yindmap = tf.tile(tf.expand_dims(Yindmap, 2), [1, 1, 20])
x_out = tf.logical_or(tf.less(Xindmap, ob[:, 0]), tf.greater_equal(Xindmap, ob[:, 2]))
y_out = tf.logical_or(tf.less(Yindmap, ob[:, 1]), tf.greater_equal(Yindmap, ob[:, 3]))
out = tf.cast(tf.logical_or(x_out, y_out), tf.float32)
out = tf.reduce_min(out, axis=2)
mask = tf.minimum(mask, out)
data_dict['mask'] = mask
if self.objtype in (0, 1):
if self.objtype == 0:
keypoints = body2d
valid = flow_dict['body_valid']
elif self.objtype == 1:
keypoints = hand2d
body2d = hand2d
valid = tf.cond(cond_left, lambda: flow_dict['left_hand_valid'], lambda: flow_dict['right_hand_valid'])
data_dict['hand_valid'] = valid
if PAF_given:
body3d = hand3d
crop_center2d, scale2d = self.calc_crop_scale2d(keypoints, valid)
data_dict['crop_center2d'] = crop_center2d
data_dict['scale2d'] = scale2d
if self.rotate_augmentation:
print('using rotation augmentation')
rotate_angle = tf.random_uniform([], minval=-np.pi * 40 / 180, maxval=np.pi * 40 / 180)
R2 = tf.reshape(tf.stack([tf.cos(rotate_angle), -tf.sin(rotate_angle), tf.sin(rotate_angle), tf.cos(rotate_angle)]), [2, 2])
body2d = tf.matmul((body2d - crop_center2d), R2) + crop_center2d
data_dict['keypoint_uv_origin'] = body2d
if PAF_given:
R3 = tf.reshape(tf.stack([tf.cos(rotate_angle), -tf.sin(rotate_angle), 0., tf.sin(rotate_angle), tf.cos(rotate_angle), 0., 0., 0., 1.]), [3, 3])
body3d = tf.matmul(body3d, R3)
data_dict['keypoint_xyz_origin'] = body3d
data_dict['keypoint_xyz_local'] = body3d
body2d_local = self.update_keypoint2d(body2d, crop_center2d, scale2d)
data_dict['keypoint_uv_local'] = body2d_local
if read_image:
image_crop = self.crop_image(image, crop_center2d, scale2d)
data_dict['image_crop'] = image_crop
mask_crop = self.crop_image(tf.stack([mask] * 3, axis=2), crop_center2d, scale2d)
data_dict['mask_crop'] = mask_crop[:, :, 0]
if self.rotate_augmentation:
data_dict['image_crop'] = tf.contrib.image.rotate(data_dict['image_crop'], rotate_angle)
data_dict['mask_crop'] = tf.contrib.image.rotate(data_dict['mask_crop'], rotate_angle)
if self.blur_augmentation:
print('using blur augmentation')
rescale_factor = tf.random_uniform([], minval=0.1, maxval=1.0)
rescale = tf.cast(rescale_factor * self.crop_size, tf.int32)
resized_image = tf.image.resize_images(data_dict['image_crop'], [rescale, rescale])
data_dict['image_crop'] = tf.image.resize_images(resized_image, [self.crop_size, self.crop_size])
# create 2D gaussian map
scoremap2d = self.create_multiple_gaussian_map(body2d_local[:, ::-1], (self.crop_size, self.crop_size), self.sigma, valid_vec=valid, extra=True) # coord_hw, imsize_hw
data_dict['scoremap2d'] = scoremap2d
if withPAF:
from utils.PAF import createPAF
num_keypoint = body2d_local.get_shape().as_list()[0]
zeros = tf.zeros([num_keypoint, 1], dtype=tf.float32)
if PAF_given:
data_dict['PAF'] = createPAF(body2d_local, body3d, self.objtype, (self.crop_size, self.crop_size), normalize_3d=True, valid_vec=valid)
data_dict['PAF_type'] = tf.ones([], dtype=bool) # 0 for 2D PAF, 1 for 3D PAF
else:
data_dict['PAF'] = createPAF(body2d_local, tf.concat([body2d, zeros], axis=1), self.objtype, (self.crop_size, self.crop_size), normalize_3d=False, valid_vec=valid)
data_dict['PAF_type'] = tf.zeros([], dtype=bool) # 0 for 2D PAF, 1 for 3D PAF
if self.objtype == 1: # this is hand, flip the image if it is right hand
data_dict['image_crop'] = tf.cond(cond_left, lambda: data_dict['image_crop'], lambda: data_dict['image_crop'][:, ::-1, :])
data_dict['mask_crop'] = tf.cond(cond_left, lambda: data_dict['mask_crop'], lambda: data_dict['mask_crop'][:, ::-1])
data_dict['scoremap2d'] = tf.cond(cond_left, lambda: data_dict['scoremap2d'], lambda: data_dict['scoremap2d'][:, ::-1, :])
data_dict['keypoint_uv_local'] = tf.cond(cond_left, lambda: data_dict['keypoint_uv_local'],
lambda: tf.constant([self.crop_size, 0], tf.float32) + tf.constant([-1, 1], tf.float32) * data_dict['keypoint_uv_local'])
if withPAF:
data_dict['PAF'] = tf.cond(cond_left, lambda: data_dict['PAF'],
lambda: (data_dict['PAF'][:, ::-1, :]) * tf.constant([-1, 1, 1] * (data_dict['PAF'].get_shape().as_list()[2] // 3), dtype=tf.float32))
names, tensors = zip(*data_dict.items())
if self.shuffle:
tensors = tf.train.shuffle_batch_join([tensors],
batch_size=self.batch_size,
capacity=100,
min_after_dequeue=50,
enqueue_many=False)
else:
tensors = tf.train.batch_join([tensors],
batch_size=self.batch_size,
capacity=100,
enqueue_many=False)
return dict(zip(names, tensors))
| [
"tensorflow.image.resize_images",
"tensorflow.sin",
"tensorflow.cast",
"tensorflow.reduce_min",
"utils.PAF.createPAF",
"tensorflow.concat",
"tensorflow.matmul",
"tensorflow.contrib.image.rotate",
"tensorflow.less",
"tensorflow.train.batch_join",
"tensorflow.cos",
"tensorflow.stack",
"tensorf... | [((2238, 2273), 'tensorflow.read_file', 'tf.read_file', (["flow_dict['img_dirs']"], {}), "(flow_dict['img_dirs'])\n", (2250, 2273), True, 'import tensorflow as tf\n'), ((2294, 2337), 'tensorflow.image.decode_image', 'tf.image.decode_image', (['img_file'], {'channels': '(3)'}), '(img_file, channels=3)\n', (2315, 2337), True, 'import tensorflow as tf\n'), ((2358, 2409), 'tensorflow.image.pad_to_bounding_box', 'tf.image.pad_to_bounding_box', (['image', '(0)', '(0)', 'imh', 'imw'], {}), '(image, 0, 0, imh, imw)\n', (2386, 2409), True, 'import tensorflow as tf\n'), ((2614, 2650), 'tensorflow.read_file', 'tf.read_file', (["flow_dict['mask_dirs']"], {}), "(flow_dict['mask_dirs'])\n", (2626, 2650), True, 'import tensorflow as tf\n'), ((2670, 2714), 'tensorflow.image.decode_image', 'tf.image.decode_image', (['mask_file'], {'channels': '(3)'}), '(mask_file, channels=3)\n', (2691, 2714), True, 'import tensorflow as tf\n'), ((2734, 2784), 'tensorflow.image.pad_to_bounding_box', 'tf.image.pad_to_bounding_box', (['mask', '(0)', '(0)', 'imh', 'imw'], {}), '(mask, 0, 0, imh, imw)\n', (2762, 2784), True, 'import tensorflow as tf\n'), ((2879, 2904), 'tensorflow.cast', 'tf.cast', (['mask', 'tf.float32'], {}), '(mask, tf.float32)\n', (2886, 2904), True, 'import tensorflow as tf\n'), ((2938, 2975), 'tensorflow.ones', 'tf.ones', (['(imh, imw)'], {'dtype': 'tf.float32'}), '((imh, imw), dtype=tf.float32)\n', (2945, 2975), True, 'import tensorflow as tf\n'), ((3659, 3685), 'tensorflow.reduce_min', 'tf.reduce_min', (['out'], {'axis': '(2)'}), '(out, axis=2)\n', (3672, 3685), True, 'import tensorflow as tf\n'), ((3705, 3726), 'tensorflow.minimum', 'tf.minimum', (['mask', 'out'], {}), '(mask, out)\n', (3715, 3726), True, 'import tensorflow as tf\n'), ((8672, 8799), 'tensorflow.train.shuffle_batch_join', 'tf.train.shuffle_batch_join', (['[tensors]'], {'batch_size': 'self.batch_size', 'capacity': '(100)', 'min_after_dequeue': '(50)', 'enqueue_many': '(False)'}), '([tensors], batch_size=self.batch_size, capacity\n =100, min_after_dequeue=50, enqueue_many=False)\n', (8699, 8799), True, 'import tensorflow as tf\n'), ((9031, 9127), 'tensorflow.train.batch_join', 'tf.train.batch_join', (['[tensors]'], {'batch_size': 'self.batch_size', 'capacity': '(100)', 'enqueue_many': '(False)'}), '([tensors], batch_size=self.batch_size, capacity=100,\n enqueue_many=False)\n', (9050, 9127), True, 'import tensorflow as tf\n'), ((1475, 1565), 'tensorflow.cond', 'tf.cond', (['cond_left', "(lambda : flow_dict['left_hand'])", "(lambda : flow_dict['right_hand'])"], {}), "(cond_left, lambda : flow_dict['left_hand'], lambda : flow_dict[\n 'right_hand'])\n", (1482, 1565), True, 'import tensorflow as tf\n'), ((1603, 1630), 'tensorflow.cast', 'tf.cast', (['hand2d', 'tf.float32'], {}), '(hand2d, tf.float32)\n', (1610, 1630), True, 'import tensorflow as tf\n'), ((3175, 3201), 'tensorflow.expand_dims', 'tf.expand_dims', (['Xindmap', '(2)'], {}), '(Xindmap, 2)\n', (3189, 3201), True, 'import tensorflow as tf\n'), ((3335, 3361), 'tensorflow.expand_dims', 'tf.expand_dims', (['Yindmap', '(2)'], {}), '(Yindmap, 2)\n', (3349, 3361), True, 'import tensorflow as tf\n'), ((3410, 3436), 'tensorflow.less', 'tf.less', (['Xindmap', 'ob[:, 0]'], {}), '(Xindmap, ob[:, 0])\n', (3417, 3436), True, 'import tensorflow as tf\n'), ((3438, 3473), 'tensorflow.greater_equal', 'tf.greater_equal', (['Xindmap', 'ob[:, 2]'], {}), '(Xindmap, ob[:, 2])\n', (3454, 3473), True, 'import tensorflow as tf\n'), ((3509, 3535), 'tensorflow.less', 'tf.less', (['Yindmap', 'ob[:, 1]'], {}), '(Yindmap, ob[:, 1])\n', (3516, 3535), True, 'import tensorflow as tf\n'), ((3537, 3572), 'tensorflow.greater_equal', 'tf.greater_equal', (['Yindmap', 'ob[:, 3]'], {}), '(Yindmap, ob[:, 3])\n', (3553, 3572), True, 'import tensorflow as tf\n'), ((3600, 3627), 'tensorflow.logical_or', 'tf.logical_or', (['x_out', 'y_out'], {}), '(x_out, y_out)\n', (3613, 3627), True, 'import tensorflow as tf\n'), ((4553, 4625), 'tensorflow.random_uniform', 'tf.random_uniform', (['[]'], {'minval': '(-np.pi * 40 / 180)', 'maxval': '(np.pi * 40 / 180)'}), '([], minval=-np.pi * 40 / 180, maxval=np.pi * 40 / 180)\n', (4570, 4625), True, 'import tensorflow as tf\n'), ((5611, 5639), 'tensorflow.stack', 'tf.stack', (['([mask] * 3)'], {'axis': '(2)'}), '([mask] * 3, axis=2)\n', (5619, 5639), True, 'import tensorflow as tf\n'), ((5804, 5866), 'tensorflow.contrib.image.rotate', 'tf.contrib.image.rotate', (["data_dict['image_crop']", 'rotate_angle'], {}), "(data_dict['image_crop'], rotate_angle)\n", (5827, 5866), True, 'import tensorflow as tf\n'), ((5908, 5969), 'tensorflow.contrib.image.rotate', 'tf.contrib.image.rotate', (["data_dict['mask_crop']", 'rotate_angle'], {}), "(data_dict['mask_crop'], rotate_angle)\n", (5931, 5969), True, 'import tensorflow as tf\n'), ((6091, 6136), 'tensorflow.random_uniform', 'tf.random_uniform', (['[]'], {'minval': '(0.1)', 'maxval': '(1.0)'}), '([], minval=0.1, maxval=1.0)\n', (6108, 6136), True, 'import tensorflow as tf\n'), ((6163, 6213), 'tensorflow.cast', 'tf.cast', (['(rescale_factor * self.crop_size)', 'tf.int32'], {}), '(rescale_factor * self.crop_size, tf.int32)\n', (6170, 6213), True, 'import tensorflow as tf\n'), ((6246, 6313), 'tensorflow.image.resize_images', 'tf.image.resize_images', (["data_dict['image_crop']", '[rescale, rescale]'], {}), "(data_dict['image_crop'], [rescale, rescale])\n", (6268, 6313), True, 'import tensorflow as tf\n'), ((6356, 6427), 'tensorflow.image.resize_images', 'tf.image.resize_images', (['resized_image', '[self.crop_size, self.crop_size]'], {}), '(resized_image, [self.crop_size, self.crop_size])\n', (6378, 6427), True, 'import tensorflow as tf\n'), ((6861, 6906), 'tensorflow.zeros', 'tf.zeros', (['[num_keypoint, 1]'], {'dtype': 'tf.float32'}), '([num_keypoint, 1], dtype=tf.float32)\n', (6869, 6906), True, 'import tensorflow as tf\n'), ((7624, 7727), 'tensorflow.cond', 'tf.cond', (['cond_left', "(lambda : data_dict['image_crop'])", "(lambda : data_dict['image_crop'][:, ::-1, :])"], {}), "(cond_left, lambda : data_dict['image_crop'], lambda : data_dict[\n 'image_crop'][:, ::-1, :])\n", (7631, 7727), True, 'import tensorflow as tf\n'), ((7762, 7860), 'tensorflow.cond', 'tf.cond', (['cond_left', "(lambda : data_dict['mask_crop'])", "(lambda : data_dict['mask_crop'][:, ::-1])"], {}), "(cond_left, lambda : data_dict['mask_crop'], lambda : data_dict[\n 'mask_crop'][:, ::-1])\n", (7769, 7860), True, 'import tensorflow as tf\n'), ((7896, 7999), 'tensorflow.cond', 'tf.cond', (['cond_left', "(lambda : data_dict['scoremap2d'])", "(lambda : data_dict['scoremap2d'][:, ::-1, :])"], {}), "(cond_left, lambda : data_dict['scoremap2d'], lambda : data_dict[\n 'scoremap2d'][:, ::-1, :])\n", (7903, 7999), True, 'import tensorflow as tf\n'), ((1363, 1415), 'tensorflow.cast', 'tf.cast', (["flow_dict['left_hand_valid']"], {'dtype': 'tf.bool'}), "(flow_dict['left_hand_valid'], dtype=tf.bool)\n", (1370, 1415), True, 'import tensorflow as tf\n'), ((1932, 2028), 'tensorflow.cond', 'tf.cond', (['cond_left', "(lambda : flow_dict['left_hand_3d'])", "(lambda : flow_dict['right_hand_3d'])"], {}), "(cond_left, lambda : flow_dict['left_hand_3d'], lambda : flow_dict[\n 'right_hand_3d'])\n", (1939, 2028), True, 'import tensorflow as tf\n'), ((2473, 2499), 'tensorflow.cast', 'tf.cast', (['image', 'tf.float32'], {}), '(image, tf.float32)\n', (2480, 2499), True, 'import tensorflow as tf\n'), ((3100, 3129), 'tensorflow.range', 'tf.range', (['imw'], {'dtype': 'tf.int32'}), '(imw, dtype=tf.int32)\n', (3108, 3129), True, 'import tensorflow as tf\n'), ((3260, 3289), 'tensorflow.range', 'tf.range', (['imh'], {'dtype': 'tf.int32'}), '(imh, dtype=tf.int32)\n', (3268, 3289), True, 'import tensorflow as tf\n'), ((4040, 4141), 'tensorflow.cond', 'tf.cond', (['cond_left', "(lambda : flow_dict['left_hand_valid'])", "(lambda : flow_dict['right_hand_valid'])"], {}), "(cond_left, lambda : flow_dict['left_hand_valid'], lambda :\n flow_dict['right_hand_valid'])\n", (4047, 4141), True, 'import tensorflow as tf\n'), ((4792, 4829), 'tensorflow.matmul', 'tf.matmul', (['(body2d - crop_center2d)', 'R2'], {}), '(body2d - crop_center2d, R2)\n', (4801, 4829), True, 'import tensorflow as tf\n'), ((5129, 5150), 'tensorflow.matmul', 'tf.matmul', (['body3d', 'R3'], {}), '(body3d, R3)\n', (5138, 5150), True, 'import tensorflow as tf\n'), ((6976, 7096), 'utils.PAF.createPAF', 'createPAF', (['body2d_local', 'body3d', 'self.objtype', '(self.crop_size, self.crop_size)'], {'normalize_3d': '(True)', 'valid_vec': 'valid'}), '(body2d_local, body3d, self.objtype, (self.crop_size, self.\n crop_size), normalize_3d=True, valid_vec=valid)\n', (6985, 7096), False, 'from utils.PAF import createPAF\n'), ((7136, 7159), 'tensorflow.ones', 'tf.ones', (['[]'], {'dtype': 'bool'}), '([], dtype=bool)\n', (7143, 7159), True, 'import tensorflow as tf\n'), ((7440, 7464), 'tensorflow.zeros', 'tf.zeros', (['[]'], {'dtype': 'bool'}), '([], dtype=bool)\n', (7448, 7464), True, 'import tensorflow as tf\n'), ((7275, 7309), 'tensorflow.concat', 'tf.concat', (['[body2d, zeros]'], {'axis': '(1)'}), '([body2d, zeros], axis=1)\n', (7284, 7309), True, 'import tensorflow as tf\n'), ((4668, 4688), 'tensorflow.cos', 'tf.cos', (['rotate_angle'], {}), '(rotate_angle)\n', (4674, 4688), True, 'import tensorflow as tf\n'), ((4713, 4733), 'tensorflow.sin', 'tf.sin', (['rotate_angle'], {}), '(rotate_angle)\n', (4719, 4733), True, 'import tensorflow as tf\n'), ((4735, 4755), 'tensorflow.cos', 'tf.cos', (['rotate_angle'], {}), '(rotate_angle)\n', (4741, 4755), True, 'import tensorflow as tf\n'), ((8166, 8210), 'tensorflow.constant', 'tf.constant', (['[self.crop_size, 0]', 'tf.float32'], {}), '([self.crop_size, 0], tf.float32)\n', (8177, 8210), True, 'import tensorflow as tf\n'), ((4691, 4711), 'tensorflow.sin', 'tf.sin', (['rotate_angle'], {}), '(rotate_angle)\n', (4697, 4711), True, 'import tensorflow as tf\n'), ((4981, 5001), 'tensorflow.cos', 'tf.cos', (['rotate_angle'], {}), '(rotate_angle)\n', (4987, 5001), True, 'import tensorflow as tf\n'), ((5030, 5050), 'tensorflow.sin', 'tf.sin', (['rotate_angle'], {}), '(rotate_angle)\n', (5036, 5050), True, 'import tensorflow as tf\n'), ((5052, 5072), 'tensorflow.cos', 'tf.cos', (['rotate_angle'], {}), '(rotate_angle)\n', (5058, 5072), True, 'import tensorflow as tf\n'), ((8213, 8245), 'tensorflow.constant', 'tf.constant', (['[-1, 1]', 'tf.float32'], {}), '([-1, 1], tf.float32)\n', (8224, 8245), True, 'import tensorflow as tf\n'), ((5004, 5024), 'tensorflow.sin', 'tf.sin', (['rotate_angle'], {}), '(rotate_angle)\n', (5010, 5024), True, 'import tensorflow as tf\n')] |
from tortoise import Model, fields
class TZProfile(Model):
account = fields.CharField(36, pk=True)
contract = fields.CharField(36)
valid_claims = fields.JSONField()
invalid_claims = fields.JSONField()
errored = fields.BooleanField()
class Meta:
table = "tzprofiles"
| [
"tortoise.fields.CharField",
"tortoise.fields.JSONField",
"tortoise.fields.BooleanField"
] | [((75, 104), 'tortoise.fields.CharField', 'fields.CharField', (['(36)'], {'pk': '(True)'}), '(36, pk=True)\n', (91, 104), False, 'from tortoise import Model, fields\n'), ((120, 140), 'tortoise.fields.CharField', 'fields.CharField', (['(36)'], {}), '(36)\n', (136, 140), False, 'from tortoise import Model, fields\n'), ((160, 178), 'tortoise.fields.JSONField', 'fields.JSONField', ([], {}), '()\n', (176, 178), False, 'from tortoise import Model, fields\n'), ((200, 218), 'tortoise.fields.JSONField', 'fields.JSONField', ([], {}), '()\n', (216, 218), False, 'from tortoise import Model, fields\n'), ((233, 254), 'tortoise.fields.BooleanField', 'fields.BooleanField', ([], {}), '()\n', (252, 254), False, 'from tortoise import Model, fields\n')] |
"""
Copyright (c) 2018, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the <project name> project.
"""
import numpy as np
import pickle
import time
from cflib.crazyflie import Crazyflie
from cflib.positioning.motion_commander import MotionCommander
import rospy
import actionlib
from std_msgs.msg import UInt16
from geometry_msgs.msg import Vector3
from rospy_crazyflie.msg import *
from rospy_crazyflie.srv import *
from rospy_crazyflie.motion_commands import *
class CrazyflieControl:
def __init__(self, name, crazyflie):
# Instantiate motion commander
self._cf = crazyflie
self._name = name
self._mc = MotionCommander(self._cf)
# Topic Publishers
self._velocity_setpoint_pub = rospy.Publisher(
self._name + '/velocity_setpoint',
Vector3,
queue_size = 10
)
"""
Services hosted for this crazyflie controller
"""
self._reset_position_estimator_srv = rospy.Service(
self._name + '/reset_position_estimator',
ResetPositionEstimator,
self._reset_position_estimator_cb
)
self._send_hover_setpoint_srv = rospy.Service(
self._name + '/send_hover_setpoint',
SendHoverSetpoint,
self._send_hover_setpoint_cb
)
self._set_param_srv = rospy.Service(
self._name + '/set_param',
SetParam,
self._set_param_cb
)
self._velocity_control_srv = rospy.Service(
self._name + '/velocity_control',
VelocityControl,
self._velocity_control_cb
)
"""
Action servers for this crazyflie controller
"""
self._position_control_as = actionlib.SimpleActionServer(
self._name + '/position_control',
PositionControlAction,
self._position_control_cb,
False
)
self._position_control_as.start()
"""
Service Callbacks
"""
def _reset_position_estimator_cb(self, req):
pass
def _send_hover_setpoint_cb(self, req):
vx = req.vx
vy = req.vy
z = req.z
yaw_rate = req.yaw_rate
self._cf.commander.send_hover_setpoint(vx, vy, yaw_rate, z)
return []
def _set_param_cb(self, req):
self._cf.param.set_value(req.param, req.value)
print("set %s to %s" % (req.param, req.value))
return SetParamResponse()
def _velocity_control_cb(self, req):
try:
obj = pickle.loads(req.pickle)
print(self._mc)
if isinstance(obj, SetVelSetpoint):
self._mc._set_vel_setpoint(obj.vx, obj.vy, obj.vz, obj.rate_yaw)
elif isinstance(obj, StartBack):
self._mc.start_back(velocity = obj.velocity)
elif isinstance(obj, StartCircleLeft):
self._mc.start_circle_left(obj.radius_m, velocity = obj.velocity)
elif isinstance(obj, StartCircleRight):
self._mc.start_turn_right(obj.radius_m, velocity = obj.velocity)
elif isinstance(obj, StartDown):
self._mc.start_down(velocity = obj.velocity)
elif isinstance(obj, StartForward):
self._mc.start_forward(velocity = obj.velocity)
elif isinstance(obj, StartLeft):
self._mc.start_left(velocity = obj.velocity)
elif isinstance(obj, StartLinearMotion):
self._mc.start_linear_motion(obj.vx, obj.vy, obj.vz)
elif isinstance(obj, StartRight):
self._mc.start_right(velocity = obj.velocity)
elif isinstance(obj, StartTurnLeft):
self._mc.start_turn_left(rate = obj.rate)
elif isinstance(obj, StartTurnRight):
self._mc.start_turn_right(rate = obj.rate)
elif isinstance(obj, StartUp):
self._mc.start_up(velocity = obj.velocity)
elif isinstance(obj, Stop):
self._mc.stop()
else:
return 'Object is not a valid velocity command'
except Exception as e:
print(str(e))
raise e
return 'ok'
"""
Action Implementations
"""
def _position_control_cb(self, goal):
try:
obj = pickle.loads(goal.pickle)
if isinstance(obj, Back):
self._mc.back(obj.distance_m, velocity=obj.velocity)
elif isinstance(obj, CircleLeft):
self._mc.circle_left(obj.radius_m,
velocity = obj.velocity,
angle_degrees = obj.angle_degrees
)
elif isinstance(obj, CircleRight):
self._mc.circle_right(obj.radius_m,
velocity = obj.velocity,
angle_degrees = obj.angle_degrees
)
elif isinstance(obj, Down):
self._mc.down(obj.distance_m, velocity=obj.velocity)
elif isinstance(obj, Forward):
self._mc.forward(obj.distance_m, velocity=obj.velocity)
elif isinstance(obj, Land):
self._mc.land(velocity=obj.velocity)
elif isinstance(obj, Left):
self._mc.left(obj.distance_m, velocity=obj.velocity)
elif isinstance(obj, MoveDistance):
self._mc.move_distance(obj.x, obj.y, obj.z, velocity=obj.velocity)
elif isinstance(obj, Right):
self._mc.right(obj.distance_m, velocity=obj.velocity)
elif isinstance(obj, TakeOff):
self._mc.take_off(height=obj.height, velocity = obj.velocity)
elif isinstance(obj, TurnLeft):
self._mc.turn_left(obj.angle_degrees, rate=obj.rate)
elif isinstance(obj, TurnRight):
self._mc.turn_right(obj.angle_degrees, rate=obj.rate)
elif isinstance(obj, Up):
self._mc.up(obj.distance_m, velocity=obj.velocity)
except Exception as e:
print('Exception in action server %s' % self._name + '/position_control')
print(str(e))
print('Action aborted')
self._position_control_as.set_aborted()
return
self._position_control_as.set_succeeded()
def _takeoff(self, goal):
try:
self._mc.take_off(height = goal.height)
time.sleep(5)
except BaseException as e:
self._takeoff_as.set_aborted()
print(e)
return
self._takeoff_as.set_succeeded(TakeOffResult(True))
def _land(self, goal):
try:
self._mc.land(velocity=goal.velocity)
except BaseException as e:
self._land_as.set_aborted()
print(e)
return
self._land_as.set_succeeded(LandResult(True))
def _move_distance(self, goal):
try:
x = goal.x
y = goal.y
z = goal.z
velocity = goal.velocity
dist = np.sqrt(x**2 + y**2 + z**2)
vx = x / dist * velocity
vy = y / dist * velocity
vz = z / dist * velocity
# self._velocity_setpoint_pub.publish(Vector3(vx, vy, vz))
self._mc.move_distance(x, y, z, velocity = velocity)
# self._velocity_setpoint_pub.publish(Vector3(vx, vy, vz))
except BaseException as e:
self._move_distance_as.set_aborted()
print(e)
return
self._move_distance_as.set_succeeded()
| [
"numpy.sqrt",
"actionlib.SimpleActionServer",
"rospy.Service",
"time.sleep",
"cflib.positioning.motion_commander.MotionCommander",
"pickle.loads",
"rospy.Publisher"
] | [((2066, 2091), 'cflib.positioning.motion_commander.MotionCommander', 'MotionCommander', (['self._cf'], {}), '(self._cf)\n', (2081, 2091), False, 'from cflib.positioning.motion_commander import MotionCommander\n'), ((2158, 2232), 'rospy.Publisher', 'rospy.Publisher', (["(self._name + '/velocity_setpoint')", 'Vector3'], {'queue_size': '(10)'}), "(self._name + '/velocity_setpoint', Vector3, queue_size=10)\n", (2173, 2232), False, 'import rospy\n'), ((2405, 2523), 'rospy.Service', 'rospy.Service', (["(self._name + '/reset_position_estimator')", 'ResetPositionEstimator', 'self._reset_position_estimator_cb'], {}), "(self._name + '/reset_position_estimator',\n ResetPositionEstimator, self._reset_position_estimator_cb)\n", (2418, 2523), False, 'import rospy\n'), ((2607, 2711), 'rospy.Service', 'rospy.Service', (["(self._name + '/send_hover_setpoint')", 'SendHoverSetpoint', 'self._send_hover_setpoint_cb'], {}), "(self._name + '/send_hover_setpoint', SendHoverSetpoint, self.\n _send_hover_setpoint_cb)\n", (2620, 2711), False, 'import rospy\n'), ((2784, 2854), 'rospy.Service', 'rospy.Service', (["(self._name + '/set_param')", 'SetParam', 'self._set_param_cb'], {}), "(self._name + '/set_param', SetParam, self._set_param_cb)\n", (2797, 2854), False, 'import rospy\n'), ((2939, 3035), 'rospy.Service', 'rospy.Service', (["(self._name + '/velocity_control')", 'VelocityControl', 'self._velocity_control_cb'], {}), "(self._name + '/velocity_control', VelocityControl, self.\n _velocity_control_cb)\n", (2952, 3035), False, 'import rospy\n'), ((3191, 3314), 'actionlib.SimpleActionServer', 'actionlib.SimpleActionServer', (["(self._name + '/position_control')", 'PositionControlAction', 'self._position_control_cb', '(False)'], {}), "(self._name + '/position_control',\n PositionControlAction, self._position_control_cb, False)\n", (3219, 3314), False, 'import actionlib\n'), ((3984, 4008), 'pickle.loads', 'pickle.loads', (['req.pickle'], {}), '(req.pickle)\n', (3996, 4008), False, 'import pickle\n'), ((5780, 5805), 'pickle.loads', 'pickle.loads', (['goal.pickle'], {}), '(goal.pickle)\n', (5792, 5805), False, 'import pickle\n'), ((7873, 7886), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (7883, 7886), False, 'import time\n'), ((8501, 8534), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2 + z ** 2)'], {}), '(x ** 2 + y ** 2 + z ** 2)\n', (8508, 8534), True, 'import numpy as np\n')] |
"""evaluation function for chromosome
"""
import subprocess
from subprocess import PIPE, STDOUT
from autoprover.evaluation.coqstate import CoqState
def preprocess(theorem, chromosome):
"""
convert chromosome to complete Coq script
Args:
theorem (list): a list of string contains theorem or some pre-provided
tactic.
chromosome (list): a list of string.
Returns:
byte: a byte string will be passed to coqtop
"""
script = b''
script += b'\n'.join(line.encode("utf-8") for line in theorem) + b'\n'
script += b'\n'.join(line.encode("utf-8") for line in chromosome) + b'\n'
script += b'Qed.'
return script
def run_coqtop(script):
"""run Coq script and return output
Args:
script (byte): a coq script
Returns:
string: the output of coqtop
"""
coqtop = subprocess.Popen('coqtop', shell=False,
stdin=PIPE, stdout=PIPE, stderr=STDOUT)
# communicate with coqtop
(out, _) = coqtop.communicate(input=script)
return out.decode('utf-8')
def get_coq_states(result, proof, chromosome, threshold=-1):
"""return valid coq states, will ignore useless and error steps
Args:
result (string): Plain text output from coqtop
proof (Proof): Proof instance
chromosome (list): the corresponse chromosome of result
threshold (int): the number of error tactic tolerance, -1 will ignore
all error.
Returns:
list of Coqstate
"""
# the first and the last is useless
splited_result = split_coqtop_result(result, proof.theorem_name)[1:]
offset = proof.offset
coq_states = []
tactics_set = set()
error_count = 0
def check_overlap(coq_states, append_state):
"""If a state is equal to previous state, remove all element from that.
"""
for index, state in enumerate(coq_states):
if state == append_state:
del coq_states[index+1:]
return
coq_states.append(append_state)
for (i, step) in enumerate(splited_result):
if i < offset:
coq_states.append(CoqState(step, proof.pre_feed_tactic[i]))
continue
# create a new state
if i == (len(splited_result)-1):
# lastest step
state = CoqState(step, "Qed.")
else:
state = CoqState(step, chromosome[i-offset])
if state.is_proof:
coq_states.append(state)
break
elif state.is_error_state or state == coq_states[-1]:
error_count += 1
elif proof.tactics.is_unrepeatable(chromosome[i-offset]):
if chromosome[i-offset] in tactics_set:
error_count += 1
check_overlap(coq_states, state)
else:
tactics_set.add(chromosome[i-offset])
check_overlap(coq_states, state)
else:
check_overlap(coq_states, state)
if error_count == threshold:
break
return coq_states
def split_coqtop_result(result, theorem_name):
""" split result into steps
Args:
result (string): the output of coqtop
Returns:
list: a list of states(string) of coqtop
"""
spliter = theorem_name + " <"
return [spliter+x for x in result.split(spliter)]
def calculate_fitness(coq_states, limit_hyp=100, limit_goal=300):
"""calculate fitness from coqstates
score = sum(len(hypothesis)/len(goal))
Args:
coq_states (list): a list of Coqstate
Returns:
double: represent fitness of a gene, higher is better.
If raise ZeroDivisionError, means there is a bug.
"""
score = 0.0
for state in coq_states:
l_hyp = len(state.hypothesis)
l_goal = len(state.goal)
if l_hyp > limit_hyp:
score -= l_hyp / (l_hyp + limit_hyp)
print(state.hypothesis)
continue
if l_goal > limit_goal:
score -= l_goal / (l_goal + limit_goal)
# print(state.goal)
continue
try:
score += l_hyp / l_goal
except ZeroDivisionError:
print(state.data)
exit(1)
return score
| [
"autoprover.evaluation.coqstate.CoqState",
"subprocess.Popen"
] | [((865, 944), 'subprocess.Popen', 'subprocess.Popen', (['"""coqtop"""'], {'shell': '(False)', 'stdin': 'PIPE', 'stdout': 'PIPE', 'stderr': 'STDOUT'}), "('coqtop', shell=False, stdin=PIPE, stdout=PIPE, stderr=STDOUT)\n", (881, 944), False, 'import subprocess\n'), ((2354, 2376), 'autoprover.evaluation.coqstate.CoqState', 'CoqState', (['step', '"""Qed."""'], {}), "(step, 'Qed.')\n", (2362, 2376), False, 'from autoprover.evaluation.coqstate import CoqState\n'), ((2411, 2449), 'autoprover.evaluation.coqstate.CoqState', 'CoqState', (['step', 'chromosome[i - offset]'], {}), '(step, chromosome[i - offset])\n', (2419, 2449), False, 'from autoprover.evaluation.coqstate import CoqState\n'), ((2173, 2213), 'autoprover.evaluation.coqstate.CoqState', 'CoqState', (['step', 'proof.pre_feed_tactic[i]'], {}), '(step, proof.pre_feed_tactic[i])\n', (2181, 2213), False, 'from autoprover.evaluation.coqstate import CoqState\n')] |
import os
import numpy
from numpy import *
import math
from scipy import integrate, linalg
from matplotlib import pyplot
from pylab import *
from .integral import *
def get_velocity_field(panels, freestream, X, Y):
"""
Computes the velocity field on a given 2D mesh.
Parameters
---------
panels: 1D array of Panel objects
The source panels.
freestream: Freestream object
The freestream conditions.
X: 2D Numpy array of floats
x-coordinates of the mesh points.
Y: 2D Numpy array of floats
y-coordinate of the mesh points.
Returns
-------
u: 2D Numpy array of floats
x-component of the velocity vector field.
v: 2D Numpy array of floats
y-component of the velocity vector field.
"""
# freestream contribution
u = freestream.u_inf * math.cos(freestream.alpha) * numpy.ones_like(X, dtype=float)
v = freestream.u_inf * math.sin(freestream.alpha) * numpy.ones_like(X, dtype=float)
# add the contribution from each source (superposition powers!!!)
vec_intregral = numpy.vectorize(integral)
for panel in panels:
u += panel.sigma / (2.0 * math.pi) * vec_intregral(X, Y, panel, 1, 0)
v += panel.sigma / (2.0 * math.pi) * vec_intregral(X, Y, panel, 0, 1)
return u, v
| [
"math.cos",
"numpy.ones_like",
"math.sin",
"numpy.vectorize"
] | [((1089, 1114), 'numpy.vectorize', 'numpy.vectorize', (['integral'], {}), '(integral)\n', (1104, 1114), False, 'import numpy\n'), ((879, 910), 'numpy.ones_like', 'numpy.ones_like', (['X'], {'dtype': 'float'}), '(X, dtype=float)\n', (894, 910), False, 'import numpy\n'), ((967, 998), 'numpy.ones_like', 'numpy.ones_like', (['X'], {'dtype': 'float'}), '(X, dtype=float)\n', (982, 998), False, 'import numpy\n'), ((850, 876), 'math.cos', 'math.cos', (['freestream.alpha'], {}), '(freestream.alpha)\n', (858, 876), False, 'import math\n'), ((938, 964), 'math.sin', 'math.sin', (['freestream.alpha'], {}), '(freestream.alpha)\n', (946, 964), False, 'import math\n')] |
"""Main module."""
import copy
import io
import logging
import re
from typing import Dict, List
import yaml
log = logging.getLogger(__name__)
TEKTON_TYPE = ("pipeline", "pipelinerun", "task", "taskrun", "condition")
class TektonBundleError(Exception):
pass
def tpl_apply(yaml_obj, parameters):
def _apply(param):
if param in parameters:
return parameters[param]
return "{{%s}}" % (param)
return io.StringIO(
re.sub(
r"\{\{([_a-zA-Z0-9\.]*)\}\}",
lambda m: _apply(m.group(1)),
open(yaml_obj).read(),
))
def resolve_task(mpipe, name, yaml_documents, skip_task_inlining):
if 'pipelineSpec' in mpipe['spec']:
tasks = mpipe['spec']['pipelineSpec']['tasks']
else:
tasks = mpipe['spec']['tasks']
for task in tasks:
if 'taskRef' in task:
reftask = task['taskRef']['name']
if reftask in skip_task_inlining:
continue
if 'task' not in yaml_documents or reftask not in yaml_documents[
'task']:
raise TektonBundleError(
f"Pipeline: {name} reference a Task: {reftask} not in repository"
)
del task['taskRef']
task['taskSpec'] = yaml_documents['task'][reftask]['spec']
return mpipe
def parse(yamlfiles: List[str], parameters: Dict[str, str],
skip_inlining: List[str]) -> Dict[str, str]:
"""parse a bunch of yaml files"""
yaml_documents = {} # type: Dict[str, Dict]
results = []
notkube_ignored = []
nottekton_ignored = []
for yaml_file in yamlfiles:
for document in yaml.load_all(tpl_apply(yaml_file, parameters),
Loader=yaml.Loader):
if 'apiVersion' not in document or 'kind' not in document:
notkube_ignored.append(
yaml.dump(
document,
Dumper=yaml.Dumper,
))
continue
name = (document['metadata']['generateName']
if 'generateName' in document['metadata'].keys() else
document['metadata']['name'])
kind = document['kind'].lower()
if kind not in TEKTON_TYPE:
nottekton_ignored.append(
yaml.dump(
document,
Dumper=yaml.Dumper,
))
continue
yaml_documents.setdefault(kind, {})
yaml_documents[kind][name] = document
if 'pipelinerun' not in yaml_documents:
raise TektonBundleError("We need at least a PipelineRun")
# if we have pipeline (i.e: not embedded) then expand all tasksRef insides.
if 'pipeline' in yaml_documents:
for pipeline in yaml_documents['pipeline']:
mpipe = copy.deepcopy(yaml_documents['pipeline'][pipeline])
resolved = resolve_task(mpipe, pipeline, yaml_documents,
skip_inlining)
yaml_documents['pipeline'][pipeline] = copy.deepcopy(resolved)
# For all pipelinerun expands the pipelineRef, keep it as is if it's a
# pipelineSpec.
for pipeline_run in yaml_documents['pipelinerun']:
mpr = copy.deepcopy(yaml_documents['pipelinerun'][pipeline_run])
if 'pipelineSpec' in mpr['spec']:
mpr = resolve_task(mpr, pipeline_run, yaml_documents,
skip_inlining)
elif 'pipelineRef' in mpr['spec']:
refpipeline = mpr['spec']['pipelineRef']['name']
if 'pipeline' not in yaml_documents or refpipeline not in yaml_documents[
'pipeline']:
raise TektonBundleError(
f"PR: {pipeline_run} reference a Pipeline: {refpipeline} not in repository"
)
del mpr['spec']['pipelineRef']
mpr['spec']['pipelineSpec'] = yaml_documents['pipeline'][
refpipeline]['spec']
# Adjust names with generateName if needed
# TODO(chmou): make it optional, we maybe don't want to do this sometime
if 'name' in mpr['metadata']:
name = mpr['metadata']['name']
mpr['metadata']['generateName'] = name + "-"
del mpr['metadata']['name']
results.append(mpr)
ret = {
'bundle':
yaml.dump_all(results,
Dumper=yaml.Dumper,
default_flow_style=False,
allow_unicode=True),
'ignored_not_tekton':
nottekton_ignored,
'ignored_not_k8':
notkube_ignored
}
return ret
| [
"logging.getLogger",
"yaml.dump_all",
"yaml.dump",
"copy.deepcopy"
] | [((117, 144), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (134, 144), False, 'import logging\n'), ((3346, 3404), 'copy.deepcopy', 'copy.deepcopy', (["yaml_documents['pipelinerun'][pipeline_run]"], {}), "(yaml_documents['pipelinerun'][pipeline_run])\n", (3359, 3404), False, 'import copy\n'), ((4466, 4558), 'yaml.dump_all', 'yaml.dump_all', (['results'], {'Dumper': 'yaml.Dumper', 'default_flow_style': '(False)', 'allow_unicode': '(True)'}), '(results, Dumper=yaml.Dumper, default_flow_style=False,\n allow_unicode=True)\n', (4479, 4558), False, 'import yaml\n'), ((2934, 2985), 'copy.deepcopy', 'copy.deepcopy', (["yaml_documents['pipeline'][pipeline]"], {}), "(yaml_documents['pipeline'][pipeline])\n", (2947, 2985), False, 'import copy\n'), ((3157, 3180), 'copy.deepcopy', 'copy.deepcopy', (['resolved'], {}), '(resolved)\n', (3170, 3180), False, 'import copy\n'), ((1930, 1969), 'yaml.dump', 'yaml.dump', (['document'], {'Dumper': 'yaml.Dumper'}), '(document, Dumper=yaml.Dumper)\n', (1939, 1969), False, 'import yaml\n'), ((2397, 2436), 'yaml.dump', 'yaml.dump', (['document'], {'Dumper': 'yaml.Dumper'}), '(document, Dumper=yaml.Dumper)\n', (2406, 2436), False, 'import yaml\n')] |
"""
The Sims 4 Community Library is licensed under the Creative Commons Attribution 4.0 International public license (CC BY 4.0).
https://creativecommons.org/licenses/by/4.0/
https://creativecommons.org/licenses/by/4.0/legalcode
Copyright (c) COLONOLNUTTY
"""
import os
from sims4.commands import Command, CommandType, CheatOutput
from sims4communitylib.utils.common_time_utils import CommonTimeUtils
from typing import Any, Callable
ON_RTD = os.environ.get('READTHEDOCS', None) == 'True'
if not ON_RTD:
from scheduling import Timeline
from alarms import AlarmHandle
from date_and_time import DateAndTime, TimeSpan
else:
# noinspection PyMissingOrEmptyDocstring
class AlarmHandle:
def cancel(self):
pass
# noinspection PyMissingOrEmptyDocstring
class DateAndTime:
pass
# noinspection PyMissingOrEmptyDocstring
class TimeSpan:
pass
# noinspection PyMissingOrEmptyDocstring
class Timeline:
pass
class CommonAlarmHandle(AlarmHandle):
"""A custom alarm handle that keeps track of when it is slated to trigger for the first time."""
def __init__(
self,
owner: Any,
on_alarm_triggered_callback: Callable[['CommonAlarmHandle'], None],
timeline: Timeline,
when: DateAndTime,
should_repeat: bool=False,
time_until_repeat: TimeSpan=None,
accurate_repeat: bool=True,
persist_across_zone_loads: bool=False
):
self.started_at_date_and_time = when
super().__init__(
owner,
on_alarm_triggered_callback,
timeline,
when,
repeating=should_repeat,
repeat_interval=time_until_repeat,
accurate_repeat=accurate_repeat,
cross_zone=persist_across_zone_loads
)
if not ON_RTD:
@Command('s4clib.print_current_time', command_type=CommandType.Live)
def _s4clib_print_current_time(_connection: int=None):
output = CheatOutput(_connection)
output('Current time')
output('Hour {} Minute {}'.format(CommonTimeUtils.get_current_date_and_time().hour(), CommonTimeUtils.get_current_date_and_time().minute()))
output('Abs Hour {} Abs Minute {}'.format(CommonTimeUtils.get_current_date_and_time().absolute_hours(), CommonTimeUtils.get_current_date_and_time().absolute_minutes()))
| [
"sims4.commands.Command",
"sims4.commands.CheatOutput",
"os.environ.get",
"sims4communitylib.utils.common_time_utils.CommonTimeUtils.get_current_date_and_time"
] | [((446, 481), 'os.environ.get', 'os.environ.get', (['"""READTHEDOCS"""', 'None'], {}), "('READTHEDOCS', None)\n", (460, 481), False, 'import os\n'), ((1859, 1926), 'sims4.commands.Command', 'Command', (['"""s4clib.print_current_time"""'], {'command_type': 'CommandType.Live'}), "('s4clib.print_current_time', command_type=CommandType.Live)\n", (1866, 1926), False, 'from sims4.commands import Command, CommandType, CheatOutput\n'), ((2003, 2027), 'sims4.commands.CheatOutput', 'CheatOutput', (['_connection'], {}), '(_connection)\n', (2014, 2027), False, 'from sims4.commands import Command, CommandType, CheatOutput\n'), ((2101, 2144), 'sims4communitylib.utils.common_time_utils.CommonTimeUtils.get_current_date_and_time', 'CommonTimeUtils.get_current_date_and_time', ([], {}), '()\n', (2142, 2144), False, 'from sims4communitylib.utils.common_time_utils import CommonTimeUtils\n'), ((2153, 2196), 'sims4communitylib.utils.common_time_utils.CommonTimeUtils.get_current_date_and_time', 'CommonTimeUtils.get_current_date_and_time', ([], {}), '()\n', (2194, 2196), False, 'from sims4communitylib.utils.common_time_utils import CommonTimeUtils\n'), ((2258, 2301), 'sims4communitylib.utils.common_time_utils.CommonTimeUtils.get_current_date_and_time', 'CommonTimeUtils.get_current_date_and_time', ([], {}), '()\n', (2299, 2301), False, 'from sims4communitylib.utils.common_time_utils import CommonTimeUtils\n'), ((2320, 2363), 'sims4communitylib.utils.common_time_utils.CommonTimeUtils.get_current_date_and_time', 'CommonTimeUtils.get_current_date_and_time', ([], {}), '()\n', (2361, 2363), False, 'from sims4communitylib.utils.common_time_utils import CommonTimeUtils\n')] |
# Generated by Django 3.1.4 on 2021-01-22 22:56
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blackbook', '0021_update_account_categories'),
]
operations = [
migrations.RemoveField(
model_name='budgetperiod',
name='budget',
),
migrations.RemoveField(
model_name='transaction',
name='account',
),
migrations.RemoveField(
model_name='transaction',
name='journal_entry',
),
migrations.RemoveField(
model_name='transactionjournalentry',
name='budget',
),
migrations.RemoveField(
model_name='transactionjournalentry',
name='category',
),
migrations.RemoveField(
model_name='transactionjournalentry',
name='from_account',
),
migrations.RemoveField(
model_name='transactionjournalentry',
name='tags',
),
migrations.RemoveField(
model_name='transactionjournalentry',
name='to_account',
),
migrations.RemoveField(
model_name='userprofile',
name='user',
),
migrations.DeleteModel(
name='Account',
),
migrations.DeleteModel(
name='AccountType',
),
migrations.DeleteModel(
name='Budget',
),
migrations.DeleteModel(
name='BudgetPeriod',
),
migrations.DeleteModel(
name='Category',
),
migrations.DeleteModel(
name='Transaction',
),
migrations.DeleteModel(
name='TransactionJournalEntry',
),
migrations.DeleteModel(
name='UserProfile',
),
]
| [
"django.db.migrations.DeleteModel",
"django.db.migrations.RemoveField"
] | [((236, 300), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""budgetperiod"""', 'name': '"""budget"""'}), "(model_name='budgetperiod', name='budget')\n", (258, 300), False, 'from django.db import migrations\n'), ((345, 409), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""transaction"""', 'name': '"""account"""'}), "(model_name='transaction', name='account')\n", (367, 409), False, 'from django.db import migrations\n'), ((454, 524), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""transaction"""', 'name': '"""journal_entry"""'}), "(model_name='transaction', name='journal_entry')\n", (476, 524), False, 'from django.db import migrations\n'), ((569, 644), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""transactionjournalentry"""', 'name': '"""budget"""'}), "(model_name='transactionjournalentry', name='budget')\n", (591, 644), False, 'from django.db import migrations\n'), ((689, 766), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""transactionjournalentry"""', 'name': '"""category"""'}), "(model_name='transactionjournalentry', name='category')\n", (711, 766), False, 'from django.db import migrations\n'), ((811, 897), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""transactionjournalentry"""', 'name': '"""from_account"""'}), "(model_name='transactionjournalentry', name=\n 'from_account')\n", (833, 897), False, 'from django.db import migrations\n'), ((937, 1010), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""transactionjournalentry"""', 'name': '"""tags"""'}), "(model_name='transactionjournalentry', name='tags')\n", (959, 1010), False, 'from django.db import migrations\n'), ((1055, 1134), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""transactionjournalentry"""', 'name': '"""to_account"""'}), "(model_name='transactionjournalentry', name='to_account')\n", (1077, 1134), False, 'from django.db import migrations\n'), ((1179, 1240), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""userprofile"""', 'name': '"""user"""'}), "(model_name='userprofile', name='user')\n", (1201, 1240), False, 'from django.db import migrations\n'), ((1285, 1323), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""Account"""'}), "(name='Account')\n", (1307, 1323), False, 'from django.db import migrations\n'), ((1356, 1398), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""AccountType"""'}), "(name='AccountType')\n", (1378, 1398), False, 'from django.db import migrations\n'), ((1431, 1468), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""Budget"""'}), "(name='Budget')\n", (1453, 1468), False, 'from django.db import migrations\n'), ((1501, 1544), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""BudgetPeriod"""'}), "(name='BudgetPeriod')\n", (1523, 1544), False, 'from django.db import migrations\n'), ((1577, 1616), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""Category"""'}), "(name='Category')\n", (1599, 1616), False, 'from django.db import migrations\n'), ((1649, 1691), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""Transaction"""'}), "(name='Transaction')\n", (1671, 1691), False, 'from django.db import migrations\n'), ((1724, 1778), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""TransactionJournalEntry"""'}), "(name='TransactionJournalEntry')\n", (1746, 1778), False, 'from django.db import migrations\n'), ((1811, 1853), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""UserProfile"""'}), "(name='UserProfile')\n", (1833, 1853), False, 'from django.db import migrations\n')] |
import base64
import rsa
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from ticketing.models import BalanceTicket, RideTicket
class VerifyFailedError(Exception):
pass
class QRCode:
"""
QRCode creator is used to create a user ticket/balance ID,
which is then signed and then returned
"""
# Refactor to QR Factory
# Make QR codes for RT tickets
def __init__(self, testing=False):
if not testing:
if settings.PRIVATE_KEY is None or settings.PUBLIC_KEY is None:
raise Exception("The settings file has an issue with the keys")
else:
self.private = rsa.PrivateKey.load_pkcs1(self.__getprivkey())
self.public = rsa.PublicKey.load_pkcs1(self.__getpubkey())
@staticmethod
def __getprivkey():
priv = settings.PRIVATE_KEY
header = priv[:32]
body = priv[32:len(priv)-29].replace(" ", "\n")
footer = priv[-29:]
privkey = header + "\n" + body + footer
return privkey
@staticmethod
def __getpubkey():
pub = settings.PUBLIC_KEY
header = pub[:31]
body = pub[31:len(pub)-28].replace(" ", "\n")
footer = pub[-28:]
pubkey = header + "\n" + body + footer
return pubkey
def createbtqrcode(self, btticket: BalanceTicket):
uid = btticket.qr_code_id
type = 'b'
val = 'x'
name = btticket.account.user.first_name
return self.__sign(uid, type, val, name)
def creatertqrcode(self, rtticket: RideTicket):
uid = rtticket.qr_code
type = 'r'
val = rtticket.initial_value
name = rtticket.short_name
return self.__sign(uid, type, val, name)
def __sign(self, uid, type, val, name):
tosign = str(uid) + '.' + type + '.' + val + '.' + name
signed = base64.b64encode(rsa.sign(tosign.encode('UTF-8'),
self.private, 'SHA-256'))
toreturn = str(tosign) + ':' + str(signed.decode('UTF-8'))
self.ID = toreturn
return toreturn
def verify(self, qrcode):
parts = qrcode.split(':')
hash = base64.b64decode(parts[1])
try:
rsa.verify(parts[0].encode(), hash, self.public)
print("Verified")
user = parts[0].split(".")
uuid = user[0]
ticketType = user[1]
if ticketType == "b":
try:
ticket = BalanceTicket.objects.get(qr_code_id=uuid)
return {"ticket": ticket, "type": ticketType}
except ObjectDoesNotExist:
raise VerifyFailedError()
elif ticketType == "r":
try:
ticket = RideTicket.objects.get(qr_code=uuid)
return {"ticket": ticket, "type": ticketType}
except ObjectDoesNotExist:
raise VerifyFailedError()
except rsa.VerificationError:
print("Verification Error")
raise VerifyFailedError
# Create an error for better usability
print("Hash 0 : " + parts[0])
print("Hash 1 : " + parts[1])
| [
"ticketing.models.RideTicket.objects.get",
"base64.b64decode",
"ticketing.models.BalanceTicket.objects.get"
] | [((2214, 2240), 'base64.b64decode', 'base64.b64decode', (['parts[1]'], {}), '(parts[1])\n', (2230, 2240), False, 'import base64\n'), ((2528, 2570), 'ticketing.models.BalanceTicket.objects.get', 'BalanceTicket.objects.get', ([], {'qr_code_id': 'uuid'}), '(qr_code_id=uuid)\n', (2553, 2570), False, 'from ticketing.models import BalanceTicket, RideTicket\n'), ((2812, 2848), 'ticketing.models.RideTicket.objects.get', 'RideTicket.objects.get', ([], {'qr_code': 'uuid'}), '(qr_code=uuid)\n', (2834, 2848), False, 'from ticketing.models import BalanceTicket, RideTicket\n')] |
from rest_framework.views import APIView
from rest_framework import status
from eva.serializers import WorkSerializer, PageSerializer, WordSerializer, RadicalSerializer
from eva.models import Work, Page, Word, Radical
from rest_framework.response import Response
from django.http import Http404
class WorkView(APIView):
def get(self, request, format=None):
"""return all works"""
works = Work.objects.all()
print(works)
serializer = WorkSerializer(works, many=True)
json = {
'works': serializer.data,
'count': works.count(),
'status': status.HTTP_200_OK,
}
return Response(json)
class WorkDetail(APIView):
"""
Retrieve, update or delete a snippet instance.
"""
def get_object(self, pk):
try:
return Work.objects.get(pk=pk)
except Work.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
work = self.get_object(pk)
serializer = WorkSerializer(work)
json = serializer.data
json['status'] = status.HTTP_200_OK
return Response(json)
class PageView(APIView):
def get(self, request, format=None, *args, **kwargs):
"""return all works"""
try:
workId = request.query_params.get("workId")
if workId is None:
pages = Page.objects.all()
workId = 0
stats = status.HTTP_200_OK
else:
pages = Page.objects.filter(workId=workId)
if pages.count() == 0:
stats = status.HTTP_404_NOT_FOUND
else:
stats = status.HTTP_200_OK
except ValueError:
return Response({
'status': status.HTTP_400_BAD_REQUEST,
'message': 'invalid pageId',
})
serializer = PageSerializer(pages, many=True)
json = {
'pages': serializer.data,
'count': pages.count(),
'workId': workId,
'status': stats,
}
return Response(json)
class PageDetail(APIView):
"""
Retrieve, update or delete a snippet instance.
"""
def get_object(self, pk):
try:
return Page.objects.get(pk=pk)
except Page.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
work = self.get_object(pk)
serializer = PageSerializer(work)
json = serializer.data
json['status'] = status.HTTP_200_OK
return Response(json)
class WordView(APIView):
def get(self, request, format=None, *args, **kwargs):
"""return all works"""
pageId = request.query_params.get("pageId")
try:
if pageId is None:
words = Word.objects.all()
pageId = 0
stats = status.HTTP_200_OK
else:
words = Word.objects.filter(pageId=pageId)
if words.count() == 0:
stats = status.HTTP_404_NOT_FOUND
else:
stats = status.HTTP_200_OK
except ValueError:
return Response({
'status': status.HTTP_400_BAD_REQUEST,
'message': 'invalid pageId',
})
serializer = WordSerializer(words, many=True)
json = {
'words': serializer.data,
'count': words.count(),
'pageId': pageId,
'status': stats,
}
return Response(json)
class WordDetail(APIView):
"""
Retrieve, update or delete a snippet instance.
"""
def get_object(self, pk):
try:
return Word.objects.get(pk=pk)
except Word.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
word = self.get_object(pk)
serializer = WordSerializer(word)
json = serializer.data
json['status'] = status.HTTP_200_OK
return Response(json)
class RadicalView(APIView):
def get(self, request, format=None, *args, **kwargs):
"""return all works"""
wordId = request.query_params.get("wordId")
try:
if wordId is None:
radicals = Radical.objects.all()
wordId = 0
stats = status.HTTP_200_OK
else:
radicals = Radical.objects.filter(wordId=wordId)
if radicals.count() == 0:
stats = status.HTTP_404_NOT_FOUND
else:
stats = status.HTTP_200_OK
except ValueError:
return Response({
'status': status.HTTP_400_BAD_REQUEST,
'message': 'invalid pageId',
})
serializer = RadicalSerializer(radicals, many=True)
json = {
'words': serializer.data,
'count': radicals.count(),
'wordId': wordId,
'status': stats,
}
return Response(json)
class RadicalDetail(APIView):
"""
Retrieve, update or delete a snippet instance.
"""
def get_object(self, pk):
try:
return Radical.objects.get(pk=pk)
except Word.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
radical = self.get_object(pk)
serializer = RadicalSerializer(radical)
json = serializer.data
json['status'] = status.HTTP_200_OK
return Response(json)
| [
"eva.models.Page.objects.get",
"eva.models.Word.objects.get",
"eva.models.Word.objects.filter",
"eva.models.Page.objects.filter",
"eva.models.Radical.objects.filter",
"eva.models.Radical.objects.get",
"eva.models.Word.objects.all",
"eva.models.Radical.objects.all",
"eva.serializers.WordSerializer",
... | [((410, 428), 'eva.models.Work.objects.all', 'Work.objects.all', ([], {}), '()\n', (426, 428), False, 'from eva.models import Work, Page, Word, Radical\n'), ((471, 503), 'eva.serializers.WorkSerializer', 'WorkSerializer', (['works'], {'many': '(True)'}), '(works, many=True)\n', (485, 503), False, 'from eva.serializers import WorkSerializer, PageSerializer, WordSerializer, RadicalSerializer\n'), ((662, 676), 'rest_framework.response.Response', 'Response', (['json'], {}), '(json)\n', (670, 676), False, 'from rest_framework.response import Response\n'), ((1021, 1041), 'eva.serializers.WorkSerializer', 'WorkSerializer', (['work'], {}), '(work)\n', (1035, 1041), False, 'from eva.serializers import WorkSerializer, PageSerializer, WordSerializer, RadicalSerializer\n'), ((1132, 1146), 'rest_framework.response.Response', 'Response', (['json'], {}), '(json)\n', (1140, 1146), False, 'from rest_framework.response import Response\n'), ((1910, 1942), 'eva.serializers.PageSerializer', 'PageSerializer', (['pages'], {'many': '(True)'}), '(pages, many=True)\n', (1924, 1942), False, 'from eva.serializers import WorkSerializer, PageSerializer, WordSerializer, RadicalSerializer\n'), ((2119, 2133), 'rest_framework.response.Response', 'Response', (['json'], {}), '(json)\n', (2127, 2133), False, 'from rest_framework.response import Response\n'), ((2478, 2498), 'eva.serializers.PageSerializer', 'PageSerializer', (['work'], {}), '(work)\n', (2492, 2498), False, 'from eva.serializers import WorkSerializer, PageSerializer, WordSerializer, RadicalSerializer\n'), ((2589, 2603), 'rest_framework.response.Response', 'Response', (['json'], {}), '(json)\n', (2597, 2603), False, 'from rest_framework.response import Response\n'), ((3361, 3393), 'eva.serializers.WordSerializer', 'WordSerializer', (['words'], {'many': '(True)'}), '(words, many=True)\n', (3375, 3393), False, 'from eva.serializers import WorkSerializer, PageSerializer, WordSerializer, RadicalSerializer\n'), ((3570, 3584), 'rest_framework.response.Response', 'Response', (['json'], {}), '(json)\n', (3578, 3584), False, 'from rest_framework.response import Response\n'), ((3929, 3949), 'eva.serializers.WordSerializer', 'WordSerializer', (['word'], {}), '(word)\n', (3943, 3949), False, 'from eva.serializers import WorkSerializer, PageSerializer, WordSerializer, RadicalSerializer\n'), ((4040, 4054), 'rest_framework.response.Response', 'Response', (['json'], {}), '(json)\n', (4048, 4054), False, 'from rest_framework.response import Response\n'), ((4830, 4868), 'eva.serializers.RadicalSerializer', 'RadicalSerializer', (['radicals'], {'many': '(True)'}), '(radicals, many=True)\n', (4847, 4868), False, 'from eva.serializers import WorkSerializer, PageSerializer, WordSerializer, RadicalSerializer\n'), ((5048, 5062), 'rest_framework.response.Response', 'Response', (['json'], {}), '(json)\n', (5056, 5062), False, 'from rest_framework.response import Response\n'), ((5416, 5442), 'eva.serializers.RadicalSerializer', 'RadicalSerializer', (['radical'], {}), '(radical)\n', (5433, 5442), False, 'from eva.serializers import WorkSerializer, PageSerializer, WordSerializer, RadicalSerializer\n'), ((5533, 5547), 'rest_framework.response.Response', 'Response', (['json'], {}), '(json)\n', (5541, 5547), False, 'from rest_framework.response import Response\n'), ((835, 858), 'eva.models.Work.objects.get', 'Work.objects.get', ([], {'pk': 'pk'}), '(pk=pk)\n', (851, 858), False, 'from eva.models import Work, Page, Word, Radical\n'), ((2292, 2315), 'eva.models.Page.objects.get', 'Page.objects.get', ([], {'pk': 'pk'}), '(pk=pk)\n', (2308, 2315), False, 'from eva.models import Work, Page, Word, Radical\n'), ((3743, 3766), 'eva.models.Word.objects.get', 'Word.objects.get', ([], {'pk': 'pk'}), '(pk=pk)\n', (3759, 3766), False, 'from eva.models import Work, Page, Word, Radical\n'), ((5224, 5250), 'eva.models.Radical.objects.get', 'Radical.objects.get', ([], {'pk': 'pk'}), '(pk=pk)\n', (5243, 5250), False, 'from eva.models import Work, Page, Word, Radical\n'), ((1388, 1406), 'eva.models.Page.objects.all', 'Page.objects.all', ([], {}), '()\n', (1404, 1406), False, 'from eva.models import Work, Page, Word, Radical\n'), ((1519, 1553), 'eva.models.Page.objects.filter', 'Page.objects.filter', ([], {'workId': 'workId'}), '(workId=workId)\n', (1538, 1553), False, 'from eva.models import Work, Page, Word, Radical\n'), ((1762, 1840), 'rest_framework.response.Response', 'Response', (["{'status': status.HTTP_400_BAD_REQUEST, 'message': 'invalid pageId'}"], {}), "({'status': status.HTTP_400_BAD_REQUEST, 'message': 'invalid pageId'})\n", (1770, 1840), False, 'from rest_framework.response import Response\n'), ((2840, 2858), 'eva.models.Word.objects.all', 'Word.objects.all', ([], {}), '()\n', (2856, 2858), False, 'from eva.models import Work, Page, Word, Radical\n'), ((2971, 3005), 'eva.models.Word.objects.filter', 'Word.objects.filter', ([], {'pageId': 'pageId'}), '(pageId=pageId)\n', (2990, 3005), False, 'from eva.models import Work, Page, Word, Radical\n'), ((3214, 3292), 'rest_framework.response.Response', 'Response', (["{'status': status.HTTP_400_BAD_REQUEST, 'message': 'invalid pageId'}"], {}), "({'status': status.HTTP_400_BAD_REQUEST, 'message': 'invalid pageId'})\n", (3222, 3292), False, 'from rest_framework.response import Response\n'), ((4297, 4318), 'eva.models.Radical.objects.all', 'Radical.objects.all', ([], {}), '()\n', (4316, 4318), False, 'from eva.models import Work, Page, Word, Radical\n'), ((4434, 4471), 'eva.models.Radical.objects.filter', 'Radical.objects.filter', ([], {'wordId': 'wordId'}), '(wordId=wordId)\n', (4456, 4471), False, 'from eva.models import Work, Page, Word, Radical\n'), ((4683, 4761), 'rest_framework.response.Response', 'Response', (["{'status': status.HTTP_400_BAD_REQUEST, 'message': 'invalid pageId'}"], {}), "({'status': status.HTTP_400_BAD_REQUEST, 'message': 'invalid pageId'})\n", (4691, 4761), False, 'from rest_framework.response import Response\n')] |
from django.shortcuts import render
from django.http import HttpResponse
from App.models import *
# Create your views here.
def search(seq):
myclass=Myclass.objects.all()
return render(seq,'test.html',context={'myclass':myclass})
def students(req):
students_id=req.GET.get('classid')
studentt=Student.objects.all()
studentt=studentt.filter(cid_id=students_id)
return render(req,'student.html',context={'students':studentt}) | [
"django.shortcuts.render"
] | [((186, 240), 'django.shortcuts.render', 'render', (['seq', '"""test.html"""'], {'context': "{'myclass': myclass}"}), "(seq, 'test.html', context={'myclass': myclass})\n", (192, 240), False, 'from django.shortcuts import render\n'), ((392, 451), 'django.shortcuts.render', 'render', (['req', '"""student.html"""'], {'context': "{'students': studentt}"}), "(req, 'student.html', context={'students': studentt})\n", (398, 451), False, 'from django.shortcuts import render\n')] |