code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
#!/usr/bin/env python
""" runsinglehap.py - Module to control processing of single-visit mosaics
:License: :doc:`LICENSE`
USAGE:
>>> runsinglehap [-cdl] inputFilename
- The '-c' option allows the user to specify a customized configuration JSON file which has been tuned for
specialized processing. This file should contain ALL the input parameters necessary for processing. If
not specified, default configuration values will be used.
- The '-d' option will run this task in a more verbose diagnostic mode producing additional log messages
will be displayed and additional files will be created.
- The '-l' option allows the user to set the desired level of verboseness in the log statements displayed on
the screen and written to the .log file. Specifying "critical" will only record/display "critical" log
statements, and specifying "error" will record/display both "error" and "critical" log statements, and so
on. Valid inputs: 'critical', 'error', 'warning', 'info', or 'debug'.
Python USAGE:
>>> python
>>> from drizzlepac import runsinglehap
>>> runsinglehap.perform(inputFilename, debug=False, input_custom_pars_file=None, log_level='info')
"""
# Import standard Python modules
import argparse
import sys
# THIRD-PARTY
from stsci.tools import logutil
from drizzlepac import hapsequencer
__taskname__ = "runsinglehap"
# Local variables
__version__ = "0.1.1"
__version_date__ = "(16-Oct-2019)"
#
# These lines (or something similar) will be needed in the HAP processing code
#
MSG_DATEFMT = '%Y%j%H%M%S'
SPLUNK_MSG_FORMAT = '%(asctime)s %(levelname)s src=%(name)s- %(message)s'
log = logutil.create_logger(__name__, level=logutil.logging.NOTSET, stream=sys.stdout,
format=SPLUNK_MSG_FORMAT, datefmt=MSG_DATEFMT)
# ----------------------------------------------------------------------------------------------------------------------
def perform(input_filename, **kwargs):
"""Main calling subroutine for the ``runsinglehap`` task.
Parameters
----------
input_filename : str
Name of the input csv file containing information about the files to
be processed
debug : bool, optional
display all tracebacks, and debug information? If not specified, the default value is Boolean 'False'.
input_custom_pars_file : str, optional
Represents a fully specified input filename of a configuration JSON file which has been
customized for specialized processing. This file should contain ALL the input parameters necessary
for processing. If not specified, default configuration parameter values will be used.
log_level : str, optional
The desired level of verboseness in the log statements displayed on the screen and written to the
.log file. Valid inputs: 'critical', 'error', 'warning', 'info', or 'debug'. If not specified, the
default value is 'info'.
Updates
-------
return_value : list
a simple status value. '0' for a successful run and '1' for a failed run
"""
# set up log_level as an input to hapsequencer.run_hap_processing().
log_level_dict = {"critical": logutil.logging.CRITICAL,
"error": logutil.logging.ERROR,
"warning": logutil.logging.WARNING,
"info": logutil.logging.INFO,
"debug": logutil.logging.DEBUG}
if 'log_level' in kwargs:
kwargs['log_level'] = kwargs['log_level'].lower()
if kwargs['log_level'] in log_level_dict.keys():
kwargs['log_level'] = log_level_dict[kwargs['log_level']]
else:
print("Log level set to default level 'log.info'.")
kwargs['log_level'] = logutil.logging.INFO
else:
print("Log level set to default level 'log.info'.")
kwargs['log_level'] = logutil.logging.INFO
# execute hapsequencer.run_hap_processing()
return_value = hapsequencer.run_hap_processing(input_filename, **kwargs)
return return_value
# ----------------------------------------------------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description='Process images, produce drizzled images and sourcelists')
parser.add_argument('input_filename', help='Name of the input csv file containing information about the '
'files to be processed')
parser.add_argument('-c', '--input_custom_pars_file', required=False, default=None, help='filename of a '
'configuration JSON file which has been customized for specialized processing. This '
'file should contain ALL the input parameters necessary for processing. If not '
'specified, default configuration parameter values will be used.')
parser.add_argument('-d', '--diagnostic_mode', required=False, action='store_true', help='If this option '
'is turned on, additional log messages will be displayed and additional files will '
'be created during the course of the run.')
parser.add_argument('-l', '--log_level', required=False, default='info',
choices=['critical', 'error', 'warning', 'info', 'debug'], help='The desired level '
'of verboseness in the log statements displayed on the screen and written to the '
'.log file. The level of verboseness from left to right, and includes all log '
'statements with a log_level left of the specified level. Specifying "critical" will '
'only record/display "critical" log statements, and specifying "error" will '
'record/display both "error" and "critical" log statements, and so on.')
user_args = parser.parse_args()
print("Single-visit processing started for: {}".format(user_args.input_filename))
rv = perform(user_args.input_filename, input_custom_pars_file=user_args.input_custom_pars_file,
diagnostic_mode=user_args.diagnostic_mode, log_level=user_args.log_level)
print("Return Value: ", rv)
return rv
if __name__ == '__main__':
main()
|
[
"drizzlepac.hapsequencer.run_hap_processing",
"stsci.tools.logutil.create_logger",
"argparse.ArgumentParser"
] |
[((1666, 1798), 'stsci.tools.logutil.create_logger', 'logutil.create_logger', (['__name__'], {'level': 'logutil.logging.NOTSET', 'stream': 'sys.stdout', 'format': 'SPLUNK_MSG_FORMAT', 'datefmt': 'MSG_DATEFMT'}), '(__name__, level=logutil.logging.NOTSET, stream=sys.\n stdout, format=SPLUNK_MSG_FORMAT, datefmt=MSG_DATEFMT)\n', (1687, 1798), False, 'from stsci.tools import logutil\n'), ((3986, 4043), 'drizzlepac.hapsequencer.run_hap_processing', 'hapsequencer.run_hap_processing', (['input_filename'], {}), '(input_filename, **kwargs)\n', (4017, 4043), False, 'from drizzlepac import hapsequencer\n'), ((4218, 4317), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Process images, produce drizzled images and sourcelists"""'}), "(description=\n 'Process images, produce drizzled images and sourcelists')\n", (4241, 4317), False, 'import argparse\n')]
|
#!/usr/bin/python
# Copyright 2020 The KNIX Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import signal
import socket
import subprocess
import sys
import time
import requests
from deployment import Deployment
import logging_helpers
import process_utils
sys.path.insert(1, os.path.join(sys.path[0], '../FunctionWorker/python'))
import py3utils
from DataLayerClient import DataLayerClient
from LocalQueueClient import LocalQueueClient
from LocalQueueClientMessage import LocalQueueClientMessage
LOG_FILENAME = '/opt/mfn/logs/sandboxagent.log'
FLUENTBIT_FOLDER = '/opt/mfn/LoggingService/fluent-bit' # this a symbolic link to the actual fluent-bit folder location inside the sandbox container
ELASTICSEARCH_INDEX_WF = 'mfnwf'
ELASTICSEARCH_INDEX_FE = 'mfnfe'
POLL_TIMEOUT = py3utils.ensure_long(60000)
#ELASTICSEARCH_INDEX = 'wf' # index name will be: 'wf' + [the first character of the workflow name (in lower case)]
class SandboxAgent:
def __init__(self, hostname, queue, datalayer, sandboxid, userid, workflowid, elasticsearch, workflowname, endpoint_key):
self._start = time.time()
self._python_version = sys.version_info
self._hostname = hostname
self._queue = queue
self._datalayer = datalayer
self._elasticsearch = elasticsearch
self._userid = userid
self._sandboxid = sandboxid
self._workflowid = workflowid
self._workflowname = workflowname
# _XXX_: we'll use the endpoint_key to look up our endpoint
self._endpoint_key = endpoint_key
self._deployment_info_key = "deployment_info_workflow_" + self._workflowid
self._logger = logging_helpers.setup_logger(self._sandboxid, LOG_FILENAME)
self._fluentbit_process, self._command_args_map_fluentbit = logging_helpers.setup_fluentbit_and_elasticsearch_index(self._logger, FLUENTBIT_FOLDER, self._elasticsearch, ELASTICSEARCH_INDEX_WF, ELASTICSEARCH_INDEX_FE)
self._logger.info("hostname (and container name): %s", self._hostname)
self._logger.info("elasticsearch nodes: %s", self._elasticsearch)
self._logger.info("queueservice: %s", self._queue)
self._logger.info("datalayer: %s", self._datalayer)
self._logger.info("user id: %s", self._userid)
self._logger.info("sandbox id: %s", self._sandboxid)
self._logger.info("workflow id: %s", self._workflowid)
self._logger.info("workflow name: %s", self._workflowname)
self._logger.info("endpoint_key: %s", self._endpoint_key)
self._instructions_topic = "instructions_" + self._sandboxid
self._management_data_layer_client = DataLayerClient(locality=1, sid="Management", wid="Management", is_wf_private=True, connect=self._datalayer)
self._logger.info("Management data layer client connected after %s s", str(time.time()-self._start))
# to be declared later
self._local_queue_client = None
self._deployment = None
self._queue_service_process = None
self._frontend_process = None
# visible to the outside world: either kubernetes assigned URL or bare-metal host address + exposed port
self._external_endpoint = None
# visible internally: kubernetes node address or same as bare-metal external endpoint
self._internal_endpoint = None
self._is_running = False
self._shutting_down = False
def _handle_instruction(self, instruction):
error = None
action = instruction["action"]
if "parameters" in instruction:
parameters = instruction["parameters"]
if action == "stop-function-worker":
self._deployment.stop_function_worker(parameters["functionTopic"])
elif action == "shutdown":
self.shutdown()
else:
error = "Unsupported 'action' in instruction: " + action
return error
def _get_and_handle_message(self):
error = None
lqm = self._local_queue_client.getMessage(self._instructions_topic, POLL_TIMEOUT)
if lqm is not None:
lqcm = LocalQueueClientMessage(lqm)
key = lqcm.get_key()
value = lqcm.get_value()
self._logger.info(key + " " + value)
try:
instruction = json.loads(value)
error = self._handle_instruction(instruction)
except Exception as exc:
error = "Couldn't decode instruction: " + str(exc)
self._logger.error(error)
if error is None:
self._logger.info("Handled instruction successfully at t+ %s s", str(time.time()-self._start))
def _process_deployment_info(self):
has_error = False
errmsg = ""
deployment_info = self._management_data_layer_client.get(self._deployment_info_key)
num_trials = 0
sleep_time = 1.0
while num_trials < 5 and (deployment_info is None or deployment_info == ""):
time.sleep(sleep_time)
deployment_info = self._management_data_layer_client.get(self._deployment_info_key)
num_trials = num_trials + 1
sleep_time = sleep_time * 2
if num_trials == 5:
has_error = True
errmsg = "Could not retrieve deployment info: " + self._deployment_info_key
if not has_error:
# if we're running on kubernetes, the endpoint will correspond to the assigned url
# if we're running on bare-metal, the endpoint will correspond to the hostip + docker-mapped port
self._external_endpoint = self._management_data_layer_client.getMapEntry(self._workflowid + "_workflow_endpoint_map", endpoint_key)
num_trials = 0
sleep_time = 1.0
while num_trials < 5 and (self._external_endpoint is None or self._external_endpoint == ""):
time.sleep(sleep_time)
self._external_endpoint = self._management_data_layer_client.getMapEntry(self._workflowid + "_workflow_endpoint_map", endpoint_key)
num_trials = num_trials + 1
sleep_time = sleep_time * 2
if num_trials == 5:
has_error = True
errmsg = "Could not retrieve endpoint: " + self._endpoint_key
# in Kubernetes, endpoint is the externally visible URL
# in bare-metal, endpoint is the current host's address
# for session support, in FunctionWorker, we need current host address (bare-metal)
# or current node address (kubernetes)
# for parallel state support, in FunctionWorker, either would be fine
# As such, let the FunctionWorker know both and let it decide what to do
if 'KUBERNETES_SERVICE_HOST' in os.environ:
# get current node's internal address
self._internal_endpoint = "http://" + socket.gethostbyname(socket.gethostname()) + ":" + str(os.getenv("PORT", "8080"))
else:
# bare-metal mode: the current host's address and external address are the same
self._internal_endpoint = self._external_endpoint
if not has_error:
self._logger.info("External endpoint: %s", self._external_endpoint)
self._logger.info("Internal endpoint: %s", self._internal_endpoint)
self._deployment = Deployment(deployment_info,\
self._hostname, self._userid, self._sandboxid, self._workflowid,\
self._workflowname, self._queue, self._datalayer, \
self._logger, self._external_endpoint, self._internal_endpoint)
self._deployment.set_child_process("fb", self._fluentbit_process, self._command_args_map_fluentbit)
has_error, errmsg = self._deployment.process_deployment_info()
return has_error, errmsg
# SIGTERM kills Thrift before we can handle stuff
def sigterm(self, signum, _):
self._logger.info("SIGTERM received...")
# we will call shutdown() when we catch the exception
# raise interrupt to kill main sequence when shutdown was not received through the queue
raise KeyboardInterrupt
def sigchld(self, signum, _):
if not self._shutting_down:
should_shutdown, pid, failed_process_name = self._deployment.check_child_process()
if should_shutdown:
self._update_deployment_status(True, "A sandbox process stopped unexpectedly: " + failed_process_name)
if pid == self._queue_service_process.pid:
self._queue_service_process = None
elif pid == self._frontend_process.pid:
self._frontend_process = None
self.shutdown(reason="Process " + failed_process_name + " with pid: " + str(pid) + " stopped unexpectedly.")
def shutdown(self, reason=None):
self._shutting_down = True
errmsg = ""
if reason is not None:
errmsg = "Shutting down sandboxagent due to reason: " + reason + "..."
self._logger.info(errmsg)
else:
self._logger.info("Gracefully shutting down sandboxagent...")
if self._frontend_process is not None:
self._logger.info("Shutting down the frontend...")
self._frontend_process.terminate()
else:
self._logger.info("No frontend; most probably it was the reason of the shutdown.")
# shutting down function workers depends on the queue service
if self._queue_service_process is not None:
self._logger.info("Shutting down the function worker(s)...")
self._deployment.shutdown()
# shut down the local queue client, so that we can also shut down the queue service
self._local_queue_client.removeTopic(self._instructions_topic)
self._local_queue_client.shutdown()
self._logger.info("Shutting down the queue service...")
process_utils.terminate_and_wait_child(self._queue_service_process, "queue service", 5, self._logger)
else:
self._logger.info("No queue service; most probably it was the reason of the shutdown.")
self._logger.info("Force shutting down the function worker(s)...")
self._deployment.force_shutdown()
# we can't do this here, because there may be other sandboxes running the same workflow
#self._management_data_layer_client.put("workflow_status_" + self._workflowid, "undeployed")
self._management_data_layer_client.shutdown()
self._logger.info("Shutting down fluent-bit...")
time.sleep(2) # flush interval of fluent-bit
process_utils.terminate_and_wait_child(self._fluentbit_process, "fluent-bit", 5, self._logger)
self._is_running = False
if self._frontend_process is not None:
try:
self._frontend_process.wait(30)
except subprocess.TimeoutExpired as exc:
self._frontend_process.kill()
_, _ = self._frontend_process.communicate()
self._logger.info("Shutdown complete.")
if reason is not None:
self._update_deployment_status(True, errmsg)
self._management_data_layer_client.shutdown()
os._exit(1)
else:
self._update_deployment_status(False, errmsg)
self._management_data_layer_client.shutdown()
os._exit(0)
def _stop_deployment(self, reason, errmsg):
self._logger.error("Stopping deployment due to error in launching %s...", reason)
self._logger.info(errmsg)
self._update_deployment_status(True, errmsg)
self._management_data_layer_client.shutdown()
os._exit(1)
def _update_deployment_status(self, has_error, errmsg):
sbstatus = {}
sbstatus["errmsg"] = errmsg
if has_error:
sbstatus["status"] = "failed"
else:
if self._shutting_down:
sbstatus["status"] = "undeployed"
else:
sbstatus["status"] = "deployed"
# set our own status in the map
self._management_data_layer_client.putMapEntry(self._workflowid + "_sandbox_status_map", self._endpoint_key, json.dumps(sbstatus))
def run(self):
has_error = False
errmsg = ""
ts_qs_launch = time.time()
# 1. launch the QueueService here
self._logger.info("Launching QueueService...")
cmdqs = "java -jar /opt/mfn/queueservice.jar"
command_args_map_qs = {}
command_args_map_qs["command"] = cmdqs
command_args_map_qs["wait_until"] = "Starting local queue..."
error, self._queue_service_process = process_utils.run_command(cmdqs, self._logger, wait_until="Starting local queue...")
if error is not None:
has_error = True
errmsg = "Could not start the sandbox queue service: " + str(error)
if has_error:
self._stop_deployment("queue service", errmsg)
ts_fw_launch = time.time()
# 2. process the deployment info and start function workers
self._logger.info("Going to parse the deployment info and get the endpoint...")
has_error, errmsg = self._process_deployment_info()
if has_error:
self._stop_deployment("workflow", errmsg)
ts_fe_launch = time.time()
# 3. launch the frontend
self._logger.info("Launching frontend...")
cmdweb = "/opt/mfn/frontend"
fenv = dict(os.environ)
workflow = self._deployment.get_workflow()
fenv["MFN_ENTRYTOPIC"] = workflow.getWorkflowEntryTopic()
fenv["MFN_RESULTTOPIC"] = workflow.getWorkflowExitTopic()
fenv["MFN_QUEUE"] = self._queue
# MFN_DATALAYER already set
command_args_map_fe = {}
command_args_map_fe["command"] = cmdweb
command_args_map_fe["custom_env"] = fenv
command_args_map_fe["wait_until"] = "Frontend is ready to handle requests"
error, self._frontend_process = process_utils.run_command(cmdweb, self._logger, custom_env=fenv, wait_until="Frontend is ready to handle requests")
if error is not None:
has_error = True
errmsg = "Could not start the frontend: " + str(error)
if has_error:
self._stop_deployment("frontend", errmsg)
self._logger.info("frontend started")
t_fe = (time.time() - ts_fe_launch) * 1000.0
t_fw = (ts_fe_launch - ts_fw_launch) * 1000.0
t_qs = (ts_fw_launch - ts_qs_launch) * 1000.0
self._logger.info("QS launch time: %s (ms), FWs download + launch time: %s (ms), FE launch time: %s (ms)", str(t_qs), str(t_fw), str(t_fe))
self._deployment.set_child_process("qs", self._queue_service_process, command_args_map_qs)
self._deployment.set_child_process("fe", self._frontend_process, command_args_map_fe)
signal.signal(signal.SIGTERM, self.sigterm)
children_pids = self._deployment.get_all_children_pids()
children_pids.sort()
self._logger.info("All children pids: " + str(children_pids))
signal.signal(signal.SIGCHLD, self.sigchld)
# update our own sandbox status
self._update_deployment_status(False, errmsg)
#self._management_data_layer_client.put("workflow_status_" + self._workflowid, "deployed")
#self._management_data_layer_client.delete("workflow_status_error_" + self._workflowid)
# 4. start listening for additional instructions if any
self._local_queue_client = LocalQueueClient(connect=self._queue)
self._local_queue_client.addTopic(self._instructions_topic)
self._is_running = True
self._logger.info("Successfully deployed.")
while self._is_running:
try:
self._get_and_handle_message()
except KeyboardInterrupt as interrupt:
self._logger.info("Interrupted...")
self.shutdown()
except Exception as exc:
self._logger.error("%s", str(exc))
# allow shutdown() some time to clean up
if self._shutting_down:
time.sleep(5)
else:
time.sleep(2)
def get_k8s_nodename():
with open('/var/run/secrets/kubernetes.io/serviceaccount/token', 'r') as ftoken:
token = ftoken.read()
with open('/var/run/secrets/kubernetes.io/serviceaccount/namespace', 'r') as fnamespace:
namespace = fnamespace.read()
k8sport = os.getenv('KUBERNETES_SERVICE_PORT_HTTPS')
podname = socket.gethostname()
try:
resp = requests.get(
'https://kubernetes.default:'+k8sport+'/api/v1/namespaces/'+namespace+'/pods/'+podname,
headers={"Authorization": "Bearer "+token, "Accept": "application/json"},
verify='/var/run/secrets/kubernetes.io/serviceaccount/ca.crt',
proxies={"https":""})
resp.raise_for_status()
pod = resp.json()
return pod["spec"]["nodeName"]
except (requests.exceptions.HTTPError, KeyError) as httperr:
logger.error("Unable to find my own pod spec %s", podname)
logger.error(resp.text, httperr)
sys.exit(1)
return podname
def find_k8s_ep(fqdn):
# On K8s, sandboxes are run with MFN_HOSTNAME = kubernetes node name
# Find host-local queue and datalayer endpoints
with open('/var/run/secrets/kubernetes.io/serviceaccount/token', 'r') as ftoken:
token = ftoken.read()
with open('/var/run/secrets/kubernetes.io/serviceaccount/namespace', 'r') as fnamespace:
namespace = fnamespace.read()
k8sport = os.getenv('KUBERNETES_SERVICE_PORT_HTTPS')
nodename = os.getenv("MFN_HOSTNAME")
svcname = fqdn.split('.', 1)[0]
retry = True
try:
while retry:
resp = requests.get(
'https://kubernetes.default:'+k8sport+'/api/v1/namespaces/'+namespace+'/endpoints/'+svcname,
headers={"Authorization": "Bearer "+token, "Accept": "application/json"},
verify='/var/run/secrets/kubernetes.io/serviceaccount/ca.crt',
proxies={"https":""})
resp.raise_for_status()
dleps = resp.json()
# we got the service, if we can't find anything
retry = False
for subs in dleps["subsets"]:
for addr in subs.get("addresses", []):
if addr.get("nodeName", None) == nodename:
logger.info("Found collocated endpoint of svc "+svcname+" at "+addr["ip"])
return addr["hostname"] + "." + svcname + ":" + str(dleps["subsets"][0]["ports"][0]["port"])
for addr in subs.get("notReadyAddresses", []):
if addr.get("nodeName", None) == nodename:
infomsg = "Found collocated endpoint that isn't ready yet. Waiting another 5s for svc "+svcname+" endpoint at "+addr["ip"]+" to become ready"
logger.info(infomsg)
retry = True
time.sleep(5)
break
except (requests.exceptions.HTTPError, KeyError) as httperr:
logger.error("Unable to find a collocated service endpoint address for svc %s, please delete pod to have it rescheduled on another node", svcname)
logger.error(resp.text, httperr)
sys.exit(1)
return fqdn
if __name__ == "__main__":
logger = logging.getLogger()
# MFN_HOSTNAME aka hostname is used to:
# - distinguish whether functions are running on the same physical host or not, mainly for session functions
# - allow kubernetes sandboxes to find the queue and datalayer endpoint that is host-local
if len(sys.argv) == 8:
logger.info("Getting parameters from the command line...")
hostname = sys.argv[1]
userid = sys.argv[3]
sandboxid = sys.argv[4]
workflowid = sys.argv[5]
queue = "127.0.0.1:4999"
datalayer = hostname + ":4998"
elasticsearch = sys.argv[6]
endpoint_key = sys.argv[7]
workflowname = workflowid
else:
logger.info("Getting parameters from environment variables...")
hostname = os.getenv("MFN_HOSTNAME", os.getenv("HOSTNAME", socket.gethostname()))
queue = os.getenv("MFN_QUEUE", "127.0.0.1:4999")
datalayer = os.getenv("MFN_DATALAYER", hostname+":4998")
userid = os.getenv("USERID")
sandboxid = os.getenv("SANDBOXID")
workflowid = os.getenv("WORKFLOWID")
elasticsearch = os.getenv("MFN_ELASTICSEARCH", hostname+":9200")
endpoint_key = os.getenv("MFN_ENDPOINT_KEY")
workflowname = os.getenv("WORKFLOWNAME", workflowid)
if os.path.exists('/var/run/secrets/kubernetes.io'):
if "MFN_HOSTNAME" not in os.environ:
os.environ["MFN_HOSTNAME"] = get_k8s_nodename()
hostname = os.environ["MFN_HOSTNAME"]
# Find endpoints for datalayer
datalayer = find_k8s_ep(datalayer)
#queue = find_k8s_ep(queue)
sandbox_agent = SandboxAgent(hostname, queue, datalayer, sandboxid, userid, workflowid, elasticsearch, workflowname, endpoint_key)
sandbox_agent.run()
|
[
"json.dumps",
"logging_helpers.setup_logger",
"LocalQueueClientMessage.LocalQueueClientMessage",
"os.path.join",
"json.loads",
"os.path.exists",
"process_utils.terminate_and_wait_child",
"socket.gethostname",
"LocalQueueClient.LocalQueueClient",
"requests.get",
"logging_helpers.setup_fluentbit_and_elasticsearch_index",
"py3utils.ensure_long",
"time.sleep",
"DataLayerClient.DataLayerClient",
"signal.signal",
"os.getenv",
"sys.exit",
"process_utils.run_command",
"time.time",
"os._exit",
"deployment.Deployment",
"logging.getLogger"
] |
[((1344, 1371), 'py3utils.ensure_long', 'py3utils.ensure_long', (['(60000)'], {}), '(60000)\n', (1364, 1371), False, 'import py3utils\n'), ((842, 895), 'os.path.join', 'os.path.join', (['sys.path[0]', '"""../FunctionWorker/python"""'], {}), "(sys.path[0], '../FunctionWorker/python')\n", (854, 895), False, 'import os\n'), ((17131, 17173), 'os.getenv', 'os.getenv', (['"""KUBERNETES_SERVICE_PORT_HTTPS"""'], {}), "('KUBERNETES_SERVICE_PORT_HTTPS')\n", (17140, 17173), False, 'import os\n'), ((17188, 17208), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (17206, 17208), False, 'import socket\n'), ((18261, 18303), 'os.getenv', 'os.getenv', (['"""KUBERNETES_SERVICE_PORT_HTTPS"""'], {}), "('KUBERNETES_SERVICE_PORT_HTTPS')\n", (18270, 18303), False, 'import os\n'), ((18319, 18344), 'os.getenv', 'os.getenv', (['"""MFN_HOSTNAME"""'], {}), "('MFN_HOSTNAME')\n", (18328, 18344), False, 'import os\n'), ((20087, 20106), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (20104, 20106), False, 'import logging\n'), ((21370, 21418), 'os.path.exists', 'os.path.exists', (['"""/var/run/secrets/kubernetes.io"""'], {}), "('/var/run/secrets/kubernetes.io')\n", (21384, 21418), False, 'import os\n'), ((1658, 1669), 'time.time', 'time.time', ([], {}), '()\n', (1667, 1669), False, 'import time\n'), ((2225, 2284), 'logging_helpers.setup_logger', 'logging_helpers.setup_logger', (['self._sandboxid', 'LOG_FILENAME'], {}), '(self._sandboxid, LOG_FILENAME)\n', (2253, 2284), False, 'import logging_helpers\n'), ((2353, 2517), 'logging_helpers.setup_fluentbit_and_elasticsearch_index', 'logging_helpers.setup_fluentbit_and_elasticsearch_index', (['self._logger', 'FLUENTBIT_FOLDER', 'self._elasticsearch', 'ELASTICSEARCH_INDEX_WF', 'ELASTICSEARCH_INDEX_FE'], {}), '(self._logger,\n FLUENTBIT_FOLDER, self._elasticsearch, ELASTICSEARCH_INDEX_WF,\n ELASTICSEARCH_INDEX_FE)\n', (2408, 2517), False, 'import logging_helpers\n'), ((3211, 3323), 'DataLayerClient.DataLayerClient', 'DataLayerClient', ([], {'locality': '(1)', 'sid': '"""Management"""', 'wid': '"""Management"""', 'is_wf_private': '(True)', 'connect': 'self._datalayer'}), "(locality=1, sid='Management', wid='Management',\n is_wf_private=True, connect=self._datalayer)\n", (3226, 3323), False, 'from DataLayerClient import DataLayerClient\n'), ((11171, 11184), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (11181, 11184), False, 'import time\n'), ((11224, 11322), 'process_utils.terminate_and_wait_child', 'process_utils.terminate_and_wait_child', (['self._fluentbit_process', '"""fluent-bit"""', '(5)', 'self._logger'], {}), "(self._fluentbit_process,\n 'fluent-bit', 5, self._logger)\n", (11262, 11322), False, 'import process_utils\n'), ((12286, 12297), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (12294, 12297), False, 'import os\n'), ((12917, 12928), 'time.time', 'time.time', ([], {}), '()\n', (12926, 12928), False, 'import time\n'), ((13275, 13364), 'process_utils.run_command', 'process_utils.run_command', (['cmdqs', 'self._logger'], {'wait_until': '"""Starting local queue..."""'}), "(cmdqs, self._logger, wait_until=\n 'Starting local queue...')\n", (13300, 13364), False, 'import process_utils\n'), ((13605, 13616), 'time.time', 'time.time', ([], {}), '()\n', (13614, 13616), False, 'import time\n'), ((13934, 13945), 'time.time', 'time.time', ([], {}), '()\n', (13943, 13945), False, 'import time\n'), ((14613, 14733), 'process_utils.run_command', 'process_utils.run_command', (['cmdweb', 'self._logger'], {'custom_env': 'fenv', 'wait_until': '"""Frontend is ready to handle requests"""'}), "(cmdweb, self._logger, custom_env=fenv, wait_until\n ='Frontend is ready to handle requests')\n", (14638, 14733), False, 'import process_utils\n'), ((15493, 15536), 'signal.signal', 'signal.signal', (['signal.SIGTERM', 'self.sigterm'], {}), '(signal.SIGTERM, self.sigterm)\n', (15506, 15536), False, 'import signal\n'), ((15711, 15754), 'signal.signal', 'signal.signal', (['signal.SIGCHLD', 'self.sigchld'], {}), '(signal.SIGCHLD, self.sigchld)\n', (15724, 15754), False, 'import signal\n'), ((16146, 16183), 'LocalQueueClient.LocalQueueClient', 'LocalQueueClient', ([], {'connect': 'self._queue'}), '(connect=self._queue)\n', (16162, 16183), False, 'from LocalQueueClient import LocalQueueClient\n'), ((17233, 17523), 'requests.get', 'requests.get', (["('https://kubernetes.default:' + k8sport + '/api/v1/namespaces/' +\n namespace + '/pods/' + podname)"], {'headers': "{'Authorization': 'Bearer ' + token, 'Accept': 'application/json'}", 'verify': '"""/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"""', 'proxies': "{'https': ''}"}), "('https://kubernetes.default:' + k8sport +\n '/api/v1/namespaces/' + namespace + '/pods/' + podname, headers={\n 'Authorization': 'Bearer ' + token, 'Accept': 'application/json'},\n verify='/var/run/secrets/kubernetes.io/serviceaccount/ca.crt', proxies=\n {'https': ''})\n", (17245, 17523), False, 'import requests\n'), ((20944, 20984), 'os.getenv', 'os.getenv', (['"""MFN_QUEUE"""', '"""127.0.0.1:4999"""'], {}), "('MFN_QUEUE', '127.0.0.1:4999')\n", (20953, 20984), False, 'import os\n'), ((21005, 21051), 'os.getenv', 'os.getenv', (['"""MFN_DATALAYER"""', "(hostname + ':4998')"], {}), "('MFN_DATALAYER', hostname + ':4998')\n", (21014, 21051), False, 'import os\n'), ((21067, 21086), 'os.getenv', 'os.getenv', (['"""USERID"""'], {}), "('USERID')\n", (21076, 21086), False, 'import os\n'), ((21107, 21129), 'os.getenv', 'os.getenv', (['"""SANDBOXID"""'], {}), "('SANDBOXID')\n", (21116, 21129), False, 'import os\n'), ((21151, 21174), 'os.getenv', 'os.getenv', (['"""WORKFLOWID"""'], {}), "('WORKFLOWID')\n", (21160, 21174), False, 'import os\n'), ((21199, 21249), 'os.getenv', 'os.getenv', (['"""MFN_ELASTICSEARCH"""', "(hostname + ':9200')"], {}), "('MFN_ELASTICSEARCH', hostname + ':9200')\n", (21208, 21249), False, 'import os\n'), ((21271, 21300), 'os.getenv', 'os.getenv', (['"""MFN_ENDPOINT_KEY"""'], {}), "('MFN_ENDPOINT_KEY')\n", (21280, 21300), False, 'import os\n'), ((21324, 21361), 'os.getenv', 'os.getenv', (['"""WORKFLOWNAME"""', 'workflowid'], {}), "('WORKFLOWNAME', workflowid)\n", (21333, 21361), False, 'import os\n'), ((4662, 4690), 'LocalQueueClientMessage.LocalQueueClientMessage', 'LocalQueueClientMessage', (['lqm'], {}), '(lqm)\n', (4685, 4690), False, 'from LocalQueueClientMessage import LocalQueueClientMessage\n'), ((5550, 5572), 'time.sleep', 'time.sleep', (['sleep_time'], {}), '(sleep_time)\n', (5560, 5572), False, 'import time\n'), ((7901, 8115), 'deployment.Deployment', 'Deployment', (['deployment_info', 'self._hostname', 'self._userid', 'self._sandboxid', 'self._workflowid', 'self._workflowname', 'self._queue', 'self._datalayer', 'self._logger', 'self._external_endpoint', 'self._internal_endpoint'], {}), '(deployment_info, self._hostname, self._userid, self._sandboxid,\n self._workflowid, self._workflowname, self._queue, self._datalayer,\n self._logger, self._external_endpoint, self._internal_endpoint)\n', (7911, 8115), False, 'from deployment import Deployment\n'), ((10512, 10617), 'process_utils.terminate_and_wait_child', 'process_utils.terminate_and_wait_child', (['self._queue_service_process', '"""queue service"""', '(5)', 'self._logger'], {}), "(self._queue_service_process,\n 'queue service', 5, self._logger)\n", (10550, 10617), False, 'import process_utils\n'), ((11832, 11843), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (11840, 11843), False, 'import os\n'), ((11986, 11997), 'os._exit', 'os._exit', (['(0)'], {}), '(0)\n', (11994, 11997), False, 'import os\n'), ((12805, 12825), 'json.dumps', 'json.dumps', (['sbstatus'], {}), '(sbstatus)\n', (12815, 12825), False, 'import json\n'), ((17820, 17831), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (17828, 17831), False, 'import sys\n'), ((18447, 18742), 'requests.get', 'requests.get', (["('https://kubernetes.default:' + k8sport + '/api/v1/namespaces/' +\n namespace + '/endpoints/' + svcname)"], {'headers': "{'Authorization': 'Bearer ' + token, 'Accept': 'application/json'}", 'verify': '"""/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"""', 'proxies': "{'https': ''}"}), "('https://kubernetes.default:' + k8sport +\n '/api/v1/namespaces/' + namespace + '/endpoints/' + svcname, headers={\n 'Authorization': 'Bearer ' + token, 'Accept': 'application/json'},\n verify='/var/run/secrets/kubernetes.io/serviceaccount/ca.crt', proxies=\n {'https': ''})\n", (18459, 18742), False, 'import requests\n'), ((20018, 20029), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (20026, 20029), False, 'import sys\n'), ((4857, 4874), 'json.loads', 'json.loads', (['value'], {}), '(value)\n', (4867, 4874), False, 'import json\n'), ((6448, 6470), 'time.sleep', 'time.sleep', (['sleep_time'], {}), '(sleep_time)\n', (6458, 6470), False, 'import time\n'), ((14996, 15007), 'time.time', 'time.time', ([], {}), '()\n', (15005, 15007), False, 'import time\n'), ((20905, 20925), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (20923, 20925), False, 'import socket\n'), ((3403, 3414), 'time.time', 'time.time', ([], {}), '()\n', (3412, 3414), False, 'import time\n'), ((7488, 7513), 'os.getenv', 'os.getenv', (['"""PORT"""', '"""8080"""'], {}), "('PORT', '8080')\n", (7497, 7513), False, 'import os\n'), ((16775, 16788), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (16785, 16788), False, 'import time\n'), ((16831, 16844), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (16841, 16844), False, 'import time\n'), ((19705, 19718), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (19715, 19718), False, 'import time\n'), ((5199, 5210), 'time.time', 'time.time', ([], {}), '()\n', (5208, 5210), False, 'import time\n'), ((7454, 7474), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (7472, 7474), False, 'import socket\n')]
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import yaml
import re
from ppdet.core.workspace import get_registered_modules
__all__ = ['ColorTTY', 'ArgsParser']
class ColorTTY(object):
def __init__(self):
super(ColorTTY, self).__init__()
self.colors = ['red', 'green', 'yellow', 'blue', 'magenta', 'cyan']
def __getattr__(self, attr):
if attr in self.colors:
color = self.colors.index(attr) + 31
def color_message(message):
return "[{}m{}[0m".format(color, message)
setattr(self, attr, color_message)
return color_message
def bold(self, message):
return self.with_code('01', message)
def with_code(self, code, message):
return "[{}m{}[0m".format(code, message)
class ArgsParser(ArgumentParser):
def __init__(self):
super(ArgsParser, self).__init__(
formatter_class=RawDescriptionHelpFormatter)
self.add_argument("-c", "--config", help="configuration file to use")
self.add_argument(
"-o", "--opt", nargs='*', help="set configuration options")
def parse_args(self, argv=None):
args = super(ArgsParser, self).parse_args(argv)
assert args.config is not None, \
"Please specify --config=configure_file_path."
args.opt = self._parse_opt(args.opt)
return args
def _parse_opt(self, opts):
config = {}
if not opts:
return config
for s in opts:
s = s.strip()
k, v = s.split('=', 1)
if '.' not in k:
config[k] = yaml.load(v, Loader=yaml.Loader)
else:
keys = k.split('.')
if keys[0] not in config:
config[keys[0]] = {}
cur = config[keys[0]]
for idx, key in enumerate(keys[1:]):
if idx == len(keys) - 2:
cur[key] = yaml.load(v, Loader=yaml.Loader)
else:
cur[key] = {}
cur = cur[key]
return config
def print_total_cfg(config):
modules = get_registered_modules()
color_tty = ColorTTY()
green = '___{}___'.format(color_tty.colors.index('green') + 31)
styled = {}
for key in config.keys():
if not config[key]: # empty schema
continue
if key not in modules and not hasattr(config[key], '__dict__'):
styled[key] = config[key]
continue
elif key in modules:
module = modules[key]
else:
type_name = type(config[key]).__name__
if type_name in modules:
module = modules[type_name].copy()
module.update({
k: v
for k, v in config[key].__dict__.items()
if k in module.schema
})
key += " ({})".format(type_name)
default = module.find_default_keys()
missing = module.find_missing_keys()
mismatch = module.find_mismatch_keys()
extra = module.find_extra_keys()
dep_missing = []
for dep in module.inject:
if isinstance(module[dep], str) and module[dep] != '<value>':
if module[dep] not in modules: # not a valid module
dep_missing.append(dep)
else:
dep_mod = modules[module[dep]]
# empty dict but mandatory
if not dep_mod and dep_mod.mandatory():
dep_missing.append(dep)
override = list(
set(module.keys()) - set(default) - set(extra) - set(dep_missing))
replacement = {}
for name in set(override + default + extra + mismatch + missing):
new_name = name
if name in missing:
value = "<missing>"
else:
value = module[name]
if name in extra:
value = dump_value(value) + " <extraneous>"
elif name in mismatch:
value = dump_value(value) + " <type mismatch>"
elif name in dep_missing:
value = dump_value(value) + " <module config missing>"
elif name in override and value != '<missing>':
mark = green
new_name = mark + name
replacement[new_name] = value
styled[key] = replacement
buffer = yaml.dump(styled, default_flow_style=False, default_style='')
buffer = (re.sub(r"<missing>", r"[31m<missing>[0m", buffer))
buffer = (re.sub(r"<extraneous>", r"[33m<extraneous>[0m", buffer))
buffer = (re.sub(r"<type mismatch>", r"[31m<type mismatch>[0m", buffer))
buffer = (re.sub(r"<module config missing>",
r"[31m<module config missing>[0m", buffer))
buffer = re.sub(r"___(\d+)___(.*?):", r"[\1m\2[0m:", buffer)
print(buffer)
|
[
"yaml.load",
"yaml.dump",
"ppdet.core.workspace.get_registered_modules",
"re.sub"
] |
[((2805, 2829), 'ppdet.core.workspace.get_registered_modules', 'get_registered_modules', ([], {}), '()\n', (2827, 2829), False, 'from ppdet.core.workspace import get_registered_modules\n'), ((5134, 5195), 'yaml.dump', 'yaml.dump', (['styled'], {'default_flow_style': '(False)', 'default_style': '""""""'}), "(styled, default_flow_style=False, default_style='')\n", (5143, 5195), False, 'import yaml\n'), ((5210, 5257), 're.sub', 're.sub', (['"""<missing>"""', '"""[31m<missing>[0m"""', 'buffer'], {}), "('<missing>', '[31m<missing>[0m', buffer)\n", (5216, 5257), False, 'import re\n'), ((5275, 5328), 're.sub', 're.sub', (['"""<extraneous>"""', '"""[33m<extraneous>[0m"""', 'buffer'], {}), "('<extraneous>', '[33m<extraneous>[0m', buffer)\n", (5281, 5328), False, 'import re\n'), ((5346, 5405), 're.sub', 're.sub', (['"""<type mismatch>"""', '"""[31m<type mismatch>[0m"""', 'buffer'], {}), "('<type mismatch>', '[31m<type mismatch>[0m', buffer)\n", (5352, 5405), False, 'import re\n'), ((5423, 5498), 're.sub', 're.sub', (['"""<module config missing>"""', '"""[31m<module config missing>[0m"""', 'buffer'], {}), "('<module config missing>', '[31m<module config missing>[0m', buffer)\n", (5429, 5498), False, 'import re\n'), ((5536, 5588), 're.sub', 're.sub', (['"""___(\\\\d+)___(.*?):"""', '"""[\\\\1m\\\\2[0m:"""', 'buffer'], {}), "('___(\\\\d+)___(.*?):', '[\\\\1m\\\\2[0m:', buffer)\n", (5542, 5588), False, 'import re\n'), ((2261, 2293), 'yaml.load', 'yaml.load', (['v'], {'Loader': 'yaml.Loader'}), '(v, Loader=yaml.Loader)\n', (2270, 2293), False, 'import yaml\n'), ((2602, 2634), 'yaml.load', 'yaml.load', (['v'], {'Loader': 'yaml.Loader'}), '(v, Loader=yaml.Loader)\n', (2611, 2634), False, 'import yaml\n')]
|
import sys
import argparse
from os.path import join, abspath, dirname
sys.path.append(dirname(dirname(join(abspath(__file__)))))
from map import generate
NODES_RANGE = [8, 8]
COST_RANGE = [10, 500]
OBJECTS_RANGE = [4, 4]
OCCURRENCES_RANGE = [1, 4]
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--map", help="path to the map file", type=str, default="maps/default.dat"
)
fname = parser.parse_args().map
generate(fname, NODES_RANGE, COST_RANGE, OBJECTS_RANGE, OCCURRENCES_RANGE)
|
[
"os.path.abspath",
"argparse.ArgumentParser",
"map.generate"
] |
[((291, 316), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (314, 316), False, 'import argparse\n'), ((475, 549), 'map.generate', 'generate', (['fname', 'NODES_RANGE', 'COST_RANGE', 'OBJECTS_RANGE', 'OCCURRENCES_RANGE'], {}), '(fname, NODES_RANGE, COST_RANGE, OBJECTS_RANGE, OCCURRENCES_RANGE)\n', (483, 549), False, 'from map import generate\n'), ((107, 124), 'os.path.abspath', 'abspath', (['__file__'], {}), '(__file__)\n', (114, 124), False, 'from os.path import join, abspath, dirname\n')]
|
import sys
sys.path.append('/usr/lib/python3.6/site-packages/')
import argparse
import base64
from datetime import datetime
import os
import shutil
import numpy as np
import socketio
import eventlet.wsgi
from PIL import Image
from flask import Flask
from io import BytesIO
from keras.models import load_model
from utils import parse_position, preprocess_image
sio = socketio.Server()
app = Flask(__name__)
model = None
prev_image_array = None
MAX_SPEED = 30
MIN_SPEED = 10
speed_limit = MAX_SPEED
recorded_points = []
lap_definition = None
no_manual_input = True
@sio.on('telemetry')
def telemetry(sid, data):
global recorded_points
global no_manual_input
if data:
x, y, z = parse_position(data["Position"])
if no_manual_input:
recorded_points.append([x, y, z])
speed = float(data["speed"])
image = Image.open(BytesIO(base64.b64decode(data["image"])))
try:
image = preprocess_image(image)
image = np.array([np.asarray(image)])
image = 2.0 * image / 255 - 1.0
steering_angle = float(model.predict(image, batch_size=1))
global speed_limit
if speed > speed_limit:
speed_limit = MIN_SPEED # slow down
else:
speed_limit = MAX_SPEED
throttle = 1.0 - steering_angle ** 2 - (speed / speed_limit) ** 2
# print('{} {} {}'.format(steering_angle, throttle, speed))
send_control(steering_angle, throttle)
except Exception as e:
print(e)
if args.image_folder != '':
timestamp = datetime.utcnow().strftime('%Y_%m_%d_%H_%M_%S_%f')[:-3]
image_filename = os.path.join(args.image_folder, timestamp)
lycon.save(path='{}.jpg'.format(image_filename), image=image)
else:
no_manual_input = False
sio.emit('manual', data={}, skip_sid=True)
@sio.on('connect')
def connect(sid, environ):
print("Connection established (id: {}).".format(sid))
send_control(0, 0)
@sio.on('disconnect')
def disconnect(sid):
output_path = 'car_positions.npz'
print("\nConnection terminated - saving data to {}.".format(output_path))
np.savez_compressed(output_path, recorded_points=np.asarray(recorded_points))
print("\n**** Data saved; press ctrl-C to exit.\n")
def send_control(steering_angle, throttle):
sio.emit(
"steer",
data={
'steering_angle': steering_angle.__str__(),
'throttle': throttle.__str__()
},
skip_sid=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Remote Driving')
parser.add_argument(
'model',
type=str,
help='Path to model h5 file. Model should be on the same path.'
)
parser.add_argument(
'--lap_data',
type=str,
default='',
help='Path to lap data (required for progress).'
)
parser.add_argument(
'--image_folder',
type=str,
default='',
help='Path to image folder. This is where the images from the run will be saved.'
)
args = parser.parse_args()
# load model
model = load_model(args.model)
if args.lap_data != '':
try:
lap_definition = np.load(args.lap_data)
except:
print("Failed to load " + args.lap_data + "; no progress reporting.")
if args.image_folder != '':
print("Creating image folder at {}".format(args.image_folder))
if not os.path.exists(args.image_folder):
os.makedirs(args.image_folder)
else:
shutil.rmtree(args.image_folder)
os.makedirs(args.image_folder)
print("**** Recording images from this run to {}.".format(args.image_folder))
else:
print("Image recording not enabled on this run.")
app = socketio.Middleware(sio, app)
eventlet.wsgi.server(eventlet.listen(('', 4567)), app)
|
[
"sys.path.append",
"keras.models.load_model",
"socketio.Middleware",
"utils.parse_position",
"numpy.load",
"argparse.ArgumentParser",
"os.makedirs",
"socketio.Server",
"flask.Flask",
"numpy.asarray",
"os.path.exists",
"base64.b64decode",
"datetime.datetime.utcnow",
"shutil.rmtree",
"utils.preprocess_image",
"os.path.join"
] |
[((11, 63), 'sys.path.append', 'sys.path.append', (['"""/usr/lib/python3.6/site-packages/"""'], {}), "('/usr/lib/python3.6/site-packages/')\n", (26, 63), False, 'import sys\n'), ((369, 386), 'socketio.Server', 'socketio.Server', ([], {}), '()\n', (384, 386), False, 'import socketio\n'), ((393, 408), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (398, 408), False, 'from flask import Flask\n'), ((2631, 2684), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Remote Driving"""'}), "(description='Remote Driving')\n", (2654, 2684), False, 'import argparse\n'), ((3217, 3239), 'keras.models.load_model', 'load_model', (['args.model'], {}), '(args.model)\n', (3227, 3239), False, 'from keras.models import load_model\n'), ((3897, 3926), 'socketio.Middleware', 'socketio.Middleware', (['sio', 'app'], {}), '(sio, app)\n', (3916, 3926), False, 'import socketio\n'), ((704, 736), 'utils.parse_position', 'parse_position', (["data['Position']"], {}), "(data['Position'])\n", (718, 736), False, 'from utils import parse_position, preprocess_image\n'), ((957, 980), 'utils.preprocess_image', 'preprocess_image', (['image'], {}), '(image)\n', (973, 980), False, 'from utils import parse_position, preprocess_image\n'), ((1727, 1769), 'os.path.join', 'os.path.join', (['args.image_folder', 'timestamp'], {}), '(args.image_folder, timestamp)\n', (1739, 1769), False, 'import os\n'), ((2279, 2306), 'numpy.asarray', 'np.asarray', (['recorded_points'], {}), '(recorded_points)\n', (2289, 2306), True, 'import numpy as np\n'), ((3311, 3333), 'numpy.load', 'np.load', (['args.lap_data'], {}), '(args.lap_data)\n', (3318, 3333), True, 'import numpy as np\n'), ((3552, 3585), 'os.path.exists', 'os.path.exists', (['args.image_folder'], {}), '(args.image_folder)\n', (3566, 3585), False, 'import os\n'), ((3599, 3629), 'os.makedirs', 'os.makedirs', (['args.image_folder'], {}), '(args.image_folder)\n', (3610, 3629), False, 'import os\n'), ((3656, 3688), 'shutil.rmtree', 'shutil.rmtree', (['args.image_folder'], {}), '(args.image_folder)\n', (3669, 3688), False, 'import shutil\n'), ((3701, 3731), 'os.makedirs', 'os.makedirs', (['args.image_folder'], {}), '(args.image_folder)\n', (3712, 3731), False, 'import os\n'), ((885, 916), 'base64.b64decode', 'base64.b64decode', (["data['image']"], {}), "(data['image'])\n", (901, 916), False, 'import base64\n'), ((1011, 1028), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (1021, 1028), True, 'import numpy as np\n'), ((1642, 1659), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1657, 1659), False, 'from datetime import datetime\n')]
|
from prol import db
#from sqlalchemy.ext.mutable import MutableDict
class Goal(db.Model):
id = db.Column(db.Integer,primary_key=True)
goal = db.Column(db.Text)
priority = db.Column(db.String(80))
todo = db.Column(db.Text)
#todo = db.Column(db.PickleType)
#todo = db.Column(db.JsonEncodedDict)
user_id = db.Column(db.Integer,db.ForeignKey('user.id'))
def __init__(self, goal,priority,todo,user_id):
self.goal = goal
self.priority = priority
self.todo = todo
self.user_id = user_id
def __repr__(self):
return f"{self.goal}"
class GoalMod():
def __init__(self,goal,priority,todo):
self.goal = goal
self.priority = priority
self.todo = todo
self.todoLen = len(self.todo)
|
[
"prol.db.Column",
"prol.db.String",
"prol.db.ForeignKey"
] |
[((98, 137), 'prol.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (107, 137), False, 'from prol import db\n'), ((145, 163), 'prol.db.Column', 'db.Column', (['db.Text'], {}), '(db.Text)\n', (154, 163), False, 'from prol import db\n'), ((210, 228), 'prol.db.Column', 'db.Column', (['db.Text'], {}), '(db.Text)\n', (219, 228), False, 'from prol import db\n'), ((187, 200), 'prol.db.String', 'db.String', (['(80)'], {}), '(80)\n', (196, 200), False, 'from prol import db\n'), ((334, 358), 'prol.db.ForeignKey', 'db.ForeignKey', (['"""user.id"""'], {}), "('user.id')\n", (347, 358), False, 'from prol import db\n')]
|
# -*- coding: utf-8 -*-
from mock import patch
from tests import TestCase
from yarn_api_client.node_manager import NodeManager
from yarn_api_client.errors import IllegalArgumentError
@patch('yarn_api_client.node_manager.NodeManager.request')
class NodeManagerTestCase(TestCase):
def setUp(self):
self.nm = NodeManager('localhost')
def test_node_information(self, request_mock):
self.nm.node_information()
request_mock.assert_called_with('/ws/v1/node/info')
def test_node_applications(self, request_mock):
self.nm.node_applications('RUNNING', 'root')
request_mock.assert_called_with('/ws/v1/node/apps',
params={"state": 'RUNNING', "user": 'root'})
self.nm.node_applications()
request_mock.assert_called_with('/ws/v1/node/apps', params={})
with self.assertRaises(IllegalArgumentError):
self.nm.node_applications('ololo', 'root')
def test_node_application(self, request_mock):
self.nm.node_application('app_1')
request_mock.assert_called_with('/ws/v1/node/apps/app_1')
def test_node_containers(self, request_mock):
self.nm.node_containers()
request_mock.assert_called_with('/ws/v1/node/containers')
def test_node_container(self, request_mock):
self.nm.node_container('container_1')
request_mock.assert_called_with('/ws/v1/node/containers/container_1')
def test_auxiliary_services(self, request_mock):
self.nm.auxiliary_services()
request_mock.assert_called_with('/ws/v1/node/auxiliaryservices')
def test_auxiliary_services_update(self, request_mock):
self.nm.auxiliary_services_update({
"services": [
{
"name": "mapreduce_shuffle",
"version": "2",
"configuration": {
"properties": {
"class.name": "org.apache.hadoop.mapred.ShuffleHandler",
"mapreduce.shuffle.transfer.buffer.size": "102400",
"mapreduce.shuffle.port": "13563"
}
}
}
]
})
request_mock.assert_called_with('/ws/v1/node/auxiliaryservices', 'PUT', json={
"services": [
{
"name": "mapreduce_shuffle",
"version": "2",
"configuration": {
"properties": {
"class.name": "org.apache.hadoop.mapred.ShuffleHandler",
"mapreduce.shuffle.transfer.buffer.size": "102400",
"mapreduce.shuffle.port": "13563"
}
}
}
]
})
|
[
"yarn_api_client.node_manager.NodeManager",
"mock.patch"
] |
[((187, 244), 'mock.patch', 'patch', (['"""yarn_api_client.node_manager.NodeManager.request"""'], {}), "('yarn_api_client.node_manager.NodeManager.request')\n", (192, 244), False, 'from mock import patch\n'), ((321, 345), 'yarn_api_client.node_manager.NodeManager', 'NodeManager', (['"""localhost"""'], {}), "('localhost')\n", (332, 345), False, 'from yarn_api_client.node_manager import NodeManager\n')]
|
import pypika.terms
from pypika import *
from pypika import Field
from pypika.utils import ignore_copy
from frappe.query_builder.terms import ParameterizedFunction, ParameterizedValueWrapper
from frappe.query_builder.utils import (
Column,
DocType,
get_query_builder,
patch_query_aggregation,
patch_query_execute,
)
pypika.terms.ValueWrapper = ParameterizedValueWrapper
pypika.terms.Function = ParameterizedFunction
# * Overrides the field() method and replaces it with the a `PseudoColumn` 'field' for consistency
pypika.queries.Selectable.__getattr__ = ignore_copy(lambda table, x: Field(x, table=table))
pypika.queries.Selectable.__getitem__ = ignore_copy(lambda table, x: Field(x, table=table))
pypika.queries.Selectable.field = pypika.terms.PseudoColumn("field")
|
[
"pypika.Field"
] |
[((592, 613), 'pypika.Field', 'Field', (['x'], {'table': 'table'}), '(x, table=table)\n', (597, 613), False, 'from pypika import Field\n'), ((684, 705), 'pypika.Field', 'Field', (['x'], {'table': 'table'}), '(x, table=table)\n', (689, 705), False, 'from pypika import Field\n')]
|
import json
f = open('FlyMission.json','r+')
l = json.load(f)
print(l)
|
[
"json.load"
] |
[((50, 62), 'json.load', 'json.load', (['f'], {}), '(f)\n', (59, 62), False, 'import json\n')]
|
# This file is used to configure the behavior of pytest when using the Astropy
# test infrastructure. It needs to live inside the package in order for it to
# get picked up when running the tests inside an interpreter using
# packagename.test
from copy import copy
import numpy as np
import pytest
from astropy import units as u
from astropy.modeling import models
from specutils.spectra import Spectrum1D
try:
from pytest_astropy_header.display import PYTEST_HEADER_MODULES, TESTED_VERSIONS
ASTROPY_HEADER = True
except ImportError:
ASTROPY_HEADER = False
def pytest_configure(config):
if ASTROPY_HEADER:
config.option.astropy_header = True
# Customize the following lines to add/remove entries from the list of
# packages for which version numbers are displayed when running the tests.
PYTEST_HEADER_MODULES.pop('Pandas', None)
PYTEST_HEADER_MODULES['gwcs'] = 'gwcs'
del PYTEST_HEADER_MODULES['h5py']
del PYTEST_HEADER_MODULES['Matplotlib']
# Use ASDF schema tester plugin if ASDF is installed
from importlib.util import find_spec
if find_spec('asdf') is not None:
PYTEST_HEADER_MODULES['Asdf'] = 'asdf'
from specutils import __version__
TESTED_VERSIONS['specutils'] = __version__
class SpectraExamples:
"""
The ``SpectralExamples`` class is a *container class* that has
several examples of simple spectra that are to be used in the tests
(e.g., arithmetic tests, smoothing tests etc).
The purpose of this being a test class instead of using a `Spectrum1D`
directly is that it contains both the `Spectrum1D` object and the flux
that was used to *create* the Spectrum. That's for tests that ensure
the simpler operations just on the flux arrays are carried through to
the `Spectrum1D` operations.
Each of the spectra are created from a base noise-less spectrum
constructed from 4 Gaussians and a ramp. Then three example spectra
are created, and then gaussian random noise is added.
1. s1_um_mJy_e1 - 4 Gaussians + ramp with one instantion of noise
dispersion: um, flux: mJy
2. s1_um_mJy_e2 - same as 1, but with a different instance of noise
dispersion: um, flux: mJy
3. s1_AA_mJy_e3 - same as 1, but with a third instance of noise
dispersion: Angstroms, flux: mJy
4. s1_AA_nJy_e3 - same as 1, but with a fourth instance of noise
dispersion: Angstroms, flux: nJy
5. s1_um_mJy_e1_masked - same as 1, but with a random set of pixels
masked.
6. s1_um_mJy_e1_desc - same as 1, but with the spectral axis in
descending rather than ascending order.
"""
def __init__(self):
#
# Create the base wavelengths and flux
#
self.wavelengths_um = np.linspace(0.4, 1.05, 100)
g1 = models.Gaussian1D(amplitude=2000, mean=0.56, stddev=0.01)
g2 = models.Gaussian1D(amplitude=500, mean=0.62, stddev=0.02)
g3 = models.Gaussian1D(amplitude=-400, mean=0.80, stddev=0.02)
g4 = models.Gaussian1D(amplitude=-350, mean=0.52, stddev=0.01)
ramp = models.Linear1D(slope=300, intercept=0.0)
self.base_flux = (g1(self.wavelengths_um) + g2(self.wavelengths_um) +
g3(self.wavelengths_um) + g4(self.wavelengths_um) +
ramp(self.wavelengths_um) + 1000)
#
# Initialize the seed so the random numbers are not quite as random
#
np.random.seed(42)
#
# Create two spectra with the only difference in the instance of noise
#
self._flux_e1 = self.base_flux + 400 * np.random.random(self.base_flux.shape)
self._s1_um_mJy_e1 = Spectrum1D(spectral_axis=self.wavelengths_um * u.um,
flux=self._flux_e1 * u.mJy)
self._flux_e2 = self.base_flux + 400 * np.random.random(self.base_flux.shape)
self._s1_um_mJy_e2 = Spectrum1D(spectral_axis=self.wavelengths_um * u.um,
flux=self._flux_e2 * u.mJy)
#
# Create one spectrum with the same flux but in angstrom units
#
self.wavelengths_AA = self.wavelengths_um * 10000
self._s1_AA_mJy_e3 = Spectrum1D(spectral_axis=self.wavelengths_AA * u.AA,
flux=self._flux_e1 * u.mJy)
#
# Create one spectrum with the same flux but in angstrom units and nJy
#
self._flux_e4 = (self.base_flux + 400 * np.random.random(self.base_flux.shape)) * 1000000
self._s1_AA_nJy_e4 = Spectrum1D(spectral_axis=self.wavelengths_AA * u.AA,
flux=self._flux_e4 * u.nJy)
#
# Create one spectrum like 1 but with a mask
#
self._s1_um_mJy_e1_masked = copy(self._s1_um_mJy_e1) # SHALLOW copy - the data are shared with the above non-masked case # noqa
self._s1_um_mJy_e1_masked.mask = (np.random.randn(*self.base_flux.shape) + 1) > 0
# Create a spectrum like 1, but with descending spectral axis
self._s1_um_mJy_e1_desc = Spectrum1D(spectral_axis=self.wavelengths_um[::-1] * u.um,
flux=self._flux_e1[::-1] * u.mJy)
@property
def s1_um_mJy_e1(self):
return self._s1_um_mJy_e1
@property
def s1_um_mJy_e1_flux(self):
return self._flux_e1
@property
def s1_um_mJy_e2(self):
return self._s1_um_mJy_e2
@property
def s1_um_mJy_e2_flux(self):
return self._flux_e2
@property
def s1_AA_mJy_e3(self):
return self._s1_AA_mJy_e3
@property
def s1_AA_mJy_e3_flux(self):
return self._flux_e1
@property
def s1_AA_nJy_e4(self):
return self._s1_AA_nJy_e4
@property
def s1_AA_nJy_e4_flux(self):
return self._flux_e4
@property
def s1_um_mJy_e1_masked(self):
return self._s1_um_mJy_e1_masked
@property
def s1_um_mJy_e1_desc(self):
return self._s1_um_mJy_e1_desc
@pytest.fixture
def simulated_spectra():
"""
The method will be called as a fixture to tests.
Parameters
----------
N/A
Return
------
``SpectralExamples``
An instance of the SpectraExamples class.
Examples
--------
This fixture can be used in a test as:
```
from .spectral_examples import spectral_examples
def test_add_spectra(spectral_examples):
# Get the numpy array of data
flux1 = define_spectra.s1_um_mJy_e1_flux
flux2 = define_spectra.s1_um_mJy_e2_flux
flux3 = flux1 + flux2
# Calculate using the spectrum1d/nddata code
spec3 = define_spectra.s1_um_mJy_e1 + define_spectra.s1_um_mJy_e2
assert np.allclose(spec3.flux.value, flux3)
```
"""
return SpectraExamples()
|
[
"numpy.random.seed",
"astropy.modeling.models.Linear1D",
"numpy.random.randn",
"astropy.modeling.models.Gaussian1D",
"importlib.util.find_spec",
"specutils.spectra.Spectrum1D",
"copy.copy",
"numpy.random.random",
"numpy.linspace",
"pytest_astropy_header.display.PYTEST_HEADER_MODULES.pop"
] |
[((845, 886), 'pytest_astropy_header.display.PYTEST_HEADER_MODULES.pop', 'PYTEST_HEADER_MODULES.pop', (['"""Pandas"""', 'None'], {}), "('Pandas', None)\n", (870, 886), False, 'from pytest_astropy_header.display import PYTEST_HEADER_MODULES, TESTED_VERSIONS\n'), ((2986, 3013), 'numpy.linspace', 'np.linspace', (['(0.4)', '(1.05)', '(100)'], {}), '(0.4, 1.05, 100)\n', (2997, 3013), True, 'import numpy as np\n'), ((3028, 3085), 'astropy.modeling.models.Gaussian1D', 'models.Gaussian1D', ([], {'amplitude': '(2000)', 'mean': '(0.56)', 'stddev': '(0.01)'}), '(amplitude=2000, mean=0.56, stddev=0.01)\n', (3045, 3085), False, 'from astropy.modeling import models\n'), ((3099, 3155), 'astropy.modeling.models.Gaussian1D', 'models.Gaussian1D', ([], {'amplitude': '(500)', 'mean': '(0.62)', 'stddev': '(0.02)'}), '(amplitude=500, mean=0.62, stddev=0.02)\n', (3116, 3155), False, 'from astropy.modeling import models\n'), ((3169, 3225), 'astropy.modeling.models.Gaussian1D', 'models.Gaussian1D', ([], {'amplitude': '(-400)', 'mean': '(0.8)', 'stddev': '(0.02)'}), '(amplitude=-400, mean=0.8, stddev=0.02)\n', (3186, 3225), False, 'from astropy.modeling import models\n'), ((3240, 3297), 'astropy.modeling.models.Gaussian1D', 'models.Gaussian1D', ([], {'amplitude': '(-350)', 'mean': '(0.52)', 'stddev': '(0.01)'}), '(amplitude=-350, mean=0.52, stddev=0.01)\n', (3257, 3297), False, 'from astropy.modeling import models\n'), ((3313, 3354), 'astropy.modeling.models.Linear1D', 'models.Linear1D', ([], {'slope': '(300)', 'intercept': '(0.0)'}), '(slope=300, intercept=0.0)\n', (3328, 3354), False, 'from astropy.modeling import models\n'), ((3678, 3696), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (3692, 3696), True, 'import numpy as np\n'), ((3913, 3998), 'specutils.spectra.Spectrum1D', 'Spectrum1D', ([], {'spectral_axis': '(self.wavelengths_um * u.um)', 'flux': '(self._flux_e1 * u.mJy)'}), '(spectral_axis=self.wavelengths_um * u.um, flux=self._flux_e1 * u.mJy\n )\n', (3923, 3998), False, 'from specutils.spectra import Spectrum1D\n'), ((4150, 4235), 'specutils.spectra.Spectrum1D', 'Spectrum1D', ([], {'spectral_axis': '(self.wavelengths_um * u.um)', 'flux': '(self._flux_e2 * u.mJy)'}), '(spectral_axis=self.wavelengths_um * u.um, flux=self._flux_e2 * u.mJy\n )\n', (4160, 4235), False, 'from specutils.spectra import Spectrum1D\n'), ((4451, 4536), 'specutils.spectra.Spectrum1D', 'Spectrum1D', ([], {'spectral_axis': '(self.wavelengths_AA * u.AA)', 'flux': '(self._flux_e1 * u.mJy)'}), '(spectral_axis=self.wavelengths_AA * u.AA, flux=self._flux_e1 * u.mJy\n )\n', (4461, 4536), False, 'from specutils.spectra import Spectrum1D\n'), ((4800, 4885), 'specutils.spectra.Spectrum1D', 'Spectrum1D', ([], {'spectral_axis': '(self.wavelengths_AA * u.AA)', 'flux': '(self._flux_e4 * u.nJy)'}), '(spectral_axis=self.wavelengths_AA * u.AA, flux=self._flux_e4 * u.nJy\n )\n', (4810, 4885), False, 'from specutils.spectra import Spectrum1D\n'), ((5031, 5055), 'copy.copy', 'copy', (['self._s1_um_mJy_e1'], {}), '(self._s1_um_mJy_e1)\n', (5035, 5055), False, 'from copy import copy\n'), ((5328, 5425), 'specutils.spectra.Spectrum1D', 'Spectrum1D', ([], {'spectral_axis': '(self.wavelengths_um[::-1] * u.um)', 'flux': '(self._flux_e1[::-1] * u.mJy)'}), '(spectral_axis=self.wavelengths_um[::-1] * u.um, flux=self.\n _flux_e1[::-1] * u.mJy)\n', (5338, 5425), False, 'from specutils.spectra import Spectrum1D\n'), ((1141, 1158), 'importlib.util.find_spec', 'find_spec', (['"""asdf"""'], {}), "('asdf')\n", (1150, 1158), False, 'from importlib.util import find_spec\n'), ((3845, 3883), 'numpy.random.random', 'np.random.random', (['self.base_flux.shape'], {}), '(self.base_flux.shape)\n', (3861, 3883), True, 'import numpy as np\n'), ((4082, 4120), 'numpy.random.random', 'np.random.random', (['self.base_flux.shape'], {}), '(self.base_flux.shape)\n', (4098, 4120), True, 'import numpy as np\n'), ((5175, 5213), 'numpy.random.randn', 'np.random.randn', (['*self.base_flux.shape'], {}), '(*self.base_flux.shape)\n', (5190, 5213), True, 'import numpy as np\n'), ((4721, 4759), 'numpy.random.random', 'np.random.random', (['self.base_flux.shape'], {}), '(self.base_flux.shape)\n', (4737, 4759), True, 'import numpy as np\n')]
|
'''
Deep Q-learning approach to the cartpole problem
using OpenAI's gym environment.
As part of the basic series on reinforcement learning @
https://github.com/vmayoral/basic_reinforcement_learning
Inspired by https://github.com/VinF/deer
@author: <NAME> <<EMAIL>>
'''
import gym
import random
import pandas
import numpy as np
from keras.models import Model
from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape
import theano.tensor as T
import sys
import logging
import numpy as np
from theano import config
import numpy as np
class QNetwork(object):
""" All the Q-networks classes should inherit this interface.
Parameters
-----------
environment : object from class Environment
The environment linked to the Q-network
batch_size : int
Number of tuples taken into account for each iteration of gradient descent
"""
def __init__(self, environment, batch_size):
self._environment = environment
self._df = 0.9
self._lr = 0.0002
self._input_dimensions = self._environment.inputDimensions()
self._n_actions = self._environment.nActions()
self._batch_size = batch_size
def train(self, states, actions, rewards, nextStates, terminals):
""" This method performs the Bellman iteration for one batch of tuples.
"""
raise NotImplementedError()
def chooseBestAction(self, state):
""" Get the best action for a belief state
"""
raise NotImplementedError()
def qValues(self, state):
""" Get the q value for one belief state
"""
raise NotImplementedError()
def setLearningRate(self, lr):
""" Setting the learning rate
Parameters
-----------
lr : float
The learning rate that has to bet set
"""
self._lr = lr
def setDiscountFactor(self, df):
""" Setting the discount factor
Parameters
-----------
df : float
The discount factor that has to bet set
"""
if df < 0. or df > 1.:
raise AgentError("The discount factor should be in [0,1]")
self._df = df
def learningRate(self):
""" Getting the learning rate
"""
return self._lr
def discountFactor(self):
""" Getting the discount factor
"""
return self._df
class NN():
"""
Deep Q-learning network using Keras
Parameters
-----------
batch_size : int
Number of tuples taken into account for each iteration of gradient descent
input_dimensions :
n_actions :
random_state : numpy random number generator
"""
def __init__(self, batch_size, input_dimensions, n_actions, random_state):
self._input_dimensions=input_dimensions
self._batch_size=batch_size
self._random_state=random_state
self._n_actions=n_actions
def _buildDQN(self):
"""
Build a network consistent with each type of inputs
"""
layers=[]
outs_conv=[]
inputs=[]
for i, dim in enumerate(self._input_dimensions):
nfilter=[]
# - observation[i] is a FRAME
if len(dim) == 3: #FIXME
input = Input(shape=(dim[0],dim[1],dim[2]))
inputs.append(input)
#reshaped=Reshape((dim[0],dim[1],dim[2]), input_shape=(dim[0],dim[1]))(input)
x = Convolution2D(32, 8, 8, border_mode='valid')(input)
x = MaxPooling2D(pool_size=(4, 4), strides=None, border_mode='valid')(x)
x = Convolution2D(64, 4, 4, border_mode='valid')(x)
x = MaxPooling2D(pool_size=(2, 2), strides=None, border_mode='valid')(x)
x = Convolution2D(64, 3, 3)(x)
out = Flatten()(x)
# - observation[i] is a VECTOR
elif len(dim) == 2 and dim[0] > 3: #FIXME
input = Input(shape=(dim[0],dim[1]))
inputs.append(input)
reshaped=Reshape((1,dim[0],dim[1]), input_shape=(dim[0],dim[1]))(input)
x = Convolution2D(16, 2, 1, border_mode='valid')(reshaped)
x = Convolution2D(16, 2, 2)(x)
out = Flatten()(x)
# - observation[i] is a SCALAR -
else:
if dim[0] > 3:
# this returns a tensor
input = Input(shape=(dim[0],))
inputs.append(input)
reshaped=Reshape((1,1,dim[0]), input_shape=(dim[0],))(input)
x = Convolution2D(8, 1, 2, border_mode='valid')(reshaped)
x = Convolution2D(8, 1, 2)(x)
out = Flatten()(x)
else:
if(len(dim) == 2):
# this returns a tensor
input = Input(shape=(dim[1],dim[0]))
inputs.append(input)
out = Flatten()(input)
if(len(dim) == 1):
input = Input(shape=(dim[0],))
inputs.append(input)
out=input
outs_conv.append(out)
if len(outs_conv)>1:
x = merge(outs_conv, mode='concat')
else:
x= outs_conv [0]
# we stack a deep fully-connected network on top
x = Dense(50, activation='relu')(x)
x = Dense(20, activation='relu')(x)
out = Dense(self._n_actions)(x)
model = Model(input=inputs, output=out)
layers=model.layers
# Grab all the parameters together.
params = [ param
for layer in layers
for param in layer.trainable_weights ]
return model, params
from warnings import warn
from keras.optimizers import SGD,RMSprop
class MyQNetwork(QNetwork):
"""
Deep Q-learning network using Keras
Parameters
-----------
environment : object from class Environment
rho : float
Parameter for rmsprop. Default : 0.9
rms_epsilon : float
Parameter for rmsprop. Default : 0.0001
momentum : float
Default : 0
clip_delta : float
Not implemented.
freeze_interval : int
Period during which the target network is freezed and after which the target network is updated. Default : 1000
batch_size : int
Number of tuples taken into account for each iteration of gradient descent. Default : 32
network_type : str
Not used. Default : None
update_rule: str
{sgd,rmsprop}. Default : rmsprop
batch_accumulator : str
{sum,mean}. Default : sum
random_state : numpy random number generator
double_Q : bool, optional
Activate or not the double_Q learning.
More informations in : <NAME> et al. (2015) - Deep Reinforcement Learning with Double Q-learning.
neural_network : object, optional
default is deer.qnetworks.NN_keras
"""
def __init__(self, environment, rho=0.9, rms_epsilon=0.0001, momentum=0, clip_delta=0, freeze_interval=1000, batch_size=32, network_type=None, update_rule="rmsprop", batch_accumulator="sum", random_state=np.random.RandomState(), double_Q=False, neural_network=NN):
""" Initialize environment
"""
QNetwork.__init__(self,environment, batch_size)
self._rho = rho
self._rms_epsilon = rms_epsilon
self._momentum = momentum
#self.clip_delta = clip_delta
self._freeze_interval = freeze_interval
self._double_Q = double_Q
self._random_state = random_state
self.update_counter = 0
Q_net = neural_network(self._batch_size, self._input_dimensions, self._n_actions, self._random_state)
self.q_vals, self.params = Q_net._buildDQN()
if update_rule == 'deepmind_rmsprop':
warn("The update_rule used is rmsprop")
update_rule='rmsprop'
if (update_rule=="sgd"):
optimizer = SGD(lr=self._lr, momentum=momentum, nesterov=False)
elif (update_rule=="rmsprop"):
optimizer = RMSprop(lr=self._lr, rho=self._rho, epsilon=self._rms_epsilon)
else:
raise Exception('The update_rule '+update_rule+ 'is not'
'implemented.')
self.q_vals.compile(optimizer=optimizer, loss='mse')
self.next_q_vals, self.next_params = Q_net._buildDQN()
self.next_q_vals.compile(optimizer='rmsprop', loss='mse') #The parameters do not matter since training is done on self.q_vals
self.q_vals.summary()
# self.next_q_vals.summary()
self._resetQHat()
def toDump(self):
# FIXME
return None,None
def train(self, states_val, actions_val, rewards_val, next_states_val, terminals_val):
"""
Train one batch.
1. Set shared variable in states_shared, next_states_shared, actions_shared, rewards_shared, terminals_shared
2. perform batch training
Parameters
-----------
states_val : list of batch_size * [list of max_num_elements* [list of k * [element 2D,1D or scalar]])
actions_val : b x 1 numpy array of integers
rewards_val : b x 1 numpy array
next_states_val : list of batch_size * [list of max_num_elements* [list of k * [element 2D,1D or scalar]])
terminals_val : b x 1 numpy boolean array (currently ignored)
Returns
-------
Average loss of the batch training
Individual losses for each tuple
"""
if self.update_counter % self._freeze_interval == 0:
self._resetQHat()
next_q_vals = self.next_q_vals.predict(next_states_val.tolist())
if(self._double_Q==True):
next_q_vals_current_qnet=self.q_vals.predict(next_states_val.tolist())
argmax_next_q_vals=np.argmax(next_q_vals_current_qnet, axis=1)
max_next_q_vals=next_q_vals[np.arange(self._batch_size),argmax_next_q_vals].reshape((-1, 1))
else:
max_next_q_vals=np.max(next_q_vals, axis=1, keepdims=True)
not_terminals=np.ones_like(terminals_val) - terminals_val
target = rewards_val + not_terminals * self._df * max_next_q_vals.reshape((-1))
q_vals=self.q_vals.predict(states_val.tolist())
# In order to obtain the individual losses, we predict the current Q_vals and calculate the diff
q_val=q_vals[np.arange(self._batch_size), actions_val.reshape((-1,))]#.reshape((-1, 1))
diff = - q_val + target
loss_ind=0.5*pow(diff,2)
q_vals[ np.arange(self._batch_size), actions_val.reshape((-1,)) ] = target
# Is it possible to use something more flexible than this?
# Only some elements of next_q_vals are actual value that I target.
# My loss should only take these into account.
# Workaround here is that many values are already "exact" in this update
loss=self.q_vals.train_on_batch(states_val.tolist() , q_vals )
self.update_counter += 1
return np.sqrt(loss),loss_ind
def qValues(self, state_val):
""" Get the q values for one belief state
Arguments
---------
state_val : one belief state
Returns
-------
The q value for the provided belief state
"""
return self.q_vals.predict([np.expand_dims(state,axis=0) for state in state_val])[0]
def chooseBestAction(self, state):
""" Get the best action for a belief state
Arguments
---------
state : one belief state
Returns
-------
The best action : int
"""
q_vals = self.qValues(state)
return np.argmax(q_vals)
def _resetQHat(self):
for i,(param,next_param) in enumerate(zip(self.params, self.next_params)):
next_param.set_value(param.get_value())
from deer.base_classes import Environment
import copy
class MyEnv(Environment):
def __init__(self, rng):
""" Initialize environment.
Arguments:
rng - the numpy random number generator
"""
# Defining the type of environment
self.env = gym.make('CartPole-v0')
self._last_observation = self.env.reset()
self.is_terminal=False
self._input_dim = [(1,), (1,), (1,), (1,)] # self.env.observation_space.shape is equal to 4
# and we use only the current value in the belief state
def act(self, action):
""" Simulate one time step in the environment.
"""
self._last_observation, reward, self.is_terminal, info = self.env.step(action)
if (self.mode==0): # Show the policy only at test time
self.env.render()
return reward
def reset(self, mode=0):
""" Reset environment for a new episode.
Arguments:
Mode : int
-1 corresponds to training and 0 to test
"""
# Reset initial observation to a random x and theta
self._last_observation = self.env.reset()
self.is_terminal=False
self.mode=mode
return self._last_observation
def inTerminalState(self):
"""Tell whether the environment reached a terminal state after the last transition (i.e. the last transition
that occured was terminal).
"""
return self.is_terminal
def inputDimensions(self):
return self._input_dim
def nActions(self):
return 2 #Would be useful to have this directly in gym : self.env.action_space.shape
def observe(self):
return copy.deepcopy(self._last_observation)
import deer.experiment.base_controllers as bc
from deer.default_parser import process_args
from deer.agent import NeuralAgent
class Defaults:
# ----------------------
# Experiment Parameters
# ----------------------
STEPS_PER_EPOCH = 200
EPOCHS = 300
STEPS_PER_TEST = 200
PERIOD_BTW_SUMMARY_PERFS = 10
# ----------------------
# Environment Parameters
# ----------------------
FRAME_SKIP = 1
# ----------------------
# DQN Agent parameters:
# ----------------------
UPDATE_RULE = 'sgd'
BATCH_ACCUMULATOR = 'sum'
LEARNING_RATE = 0.1
LEARNING_RATE_DECAY = 0.99
DISCOUNT = 0.9
DISCOUNT_INC = 1.
DISCOUNT_MAX = 0.95
RMS_DECAY = 0.9
RMS_EPSILON = 0.0001
MOMENTUM = 0
CLIP_DELTA = 1.0
EPSILON_START = 1.0
EPSILON_MIN = 0.2
EPSILON_DECAY = 10000
UPDATE_FREQUENCY = 1
REPLAY_MEMORY_SIZE = 1000000
BATCH_SIZE = 32
NETWORK_TYPE = "General_DQN_0"
FREEZE_INTERVAL = 100
DETERMINISTIC = True
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
# --- Parse parameters ---
parameters = process_args(sys.argv[1:], Defaults)
if parameters.deterministic:
rng = np.random.RandomState(12345)
else:
rng = np.random.RandomState()
# --- Instantiate environment ---
env = MyEnv(rng)
# --- Instantiate qnetwork ---
qnetwork = MyQNetwork(
env,
parameters.rms_decay,
parameters.rms_epsilon,
parameters.momentum,
parameters.clip_delta,
parameters.freeze_interval,
parameters.batch_size,
parameters.network_type,
parameters.update_rule,
parameters.batch_accumulator,
rng,
double_Q=True)
# --- Instantiate agent ---
agent = NeuralAgent(
env,
qnetwork,
parameters.replay_memory_size,
max(env.inputDimensions()[i][0] for i in range(len(env.inputDimensions()))),
parameters.batch_size,
rng)
# --- Bind controllers to the agent ---
# For comments, please refer to run_toy_env.py
agent.attach(bc.VerboseController(
evaluate_on='epoch',
periodicity=1))
agent.attach(bc.TrainerController(
evaluate_on='action',
periodicity=parameters.update_frequency,
show_episode_avg_V_value=False,
show_avg_Bellman_residual=False))
agent.attach(bc.LearningRateController(
initial_learning_rate=parameters.learning_rate,
learning_rate_decay=parameters.learning_rate_decay,
periodicity=1))
agent.attach(bc.DiscountFactorController(
initial_discount_factor=parameters.discount,
discount_factor_growth=parameters.discount_inc,
discount_factor_max=parameters.discount_max,
periodicity=1))
agent.attach(bc.EpsilonController(
initial_e=parameters.epsilon_start,
e_decays=parameters.epsilon_decay,
e_min=parameters.epsilon_min,
evaluate_on='action',
periodicity=1,
reset_every='none'))
agent.attach(bc.InterleavedTestEpochController(
id=0,
epoch_length=parameters.steps_per_test,
controllers_to_disable=[0, 1, 2, 3, 4],
periodicity=2,
show_score=True,
summarize_every=parameters.period_btw_summary_perfs))
# --- Run the experiment ---
agent.run(parameters.epochs, parameters.steps_per_epoch)
|
[
"deer.experiment.base_controllers.LearningRateController",
"numpy.argmax",
"keras.models.Model",
"numpy.arange",
"keras.layers.Input",
"keras.layers.Reshape",
"deer.experiment.base_controllers.InterleavedTestEpochController",
"keras.optimizers.SGD",
"deer.experiment.base_controllers.TrainerController",
"keras.layers.Flatten",
"numpy.random.RandomState",
"numpy.max",
"keras.layers.MaxPooling2D",
"copy.deepcopy",
"numpy.ones_like",
"deer.experiment.base_controllers.VerboseController",
"keras.layers.Convolution2D",
"deer.default_parser.process_args",
"keras.optimizers.RMSprop",
"gym.make",
"logging.basicConfig",
"numpy.expand_dims",
"deer.experiment.base_controllers.EpsilonController",
"keras.layers.Dense",
"warnings.warn",
"deer.experiment.base_controllers.DiscountFactorController",
"keras.layers.merge",
"numpy.sqrt"
] |
[((15309, 15348), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (15328, 15348), False, 'import logging\n'), ((15398, 15434), 'deer.default_parser.process_args', 'process_args', (['sys.argv[1:]', 'Defaults'], {}), '(sys.argv[1:], Defaults)\n', (15410, 15434), False, 'from deer.default_parser import process_args\n'), ((5761, 5792), 'keras.models.Model', 'Model', ([], {'input': 'inputs', 'output': 'out'}), '(input=inputs, output=out)\n', (5766, 5792), False, 'from keras.models import Model\n'), ((7454, 7477), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (7475, 7477), True, 'import numpy as np\n'), ((12211, 12228), 'numpy.argmax', 'np.argmax', (['q_vals'], {}), '(q_vals)\n', (12220, 12228), True, 'import numpy as np\n'), ((12705, 12728), 'gym.make', 'gym.make', (['"""CartPole-v0"""'], {}), "('CartPole-v0')\n", (12713, 12728), False, 'import gym\n'), ((14211, 14248), 'copy.deepcopy', 'copy.deepcopy', (['self._last_observation'], {}), '(self._last_observation)\n', (14224, 14248), False, 'import copy\n'), ((15482, 15510), 'numpy.random.RandomState', 'np.random.RandomState', (['(12345)'], {}), '(12345)\n', (15503, 15510), True, 'import numpy as np\n'), ((15535, 15558), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (15556, 15558), True, 'import numpy as np\n'), ((16401, 16457), 'deer.experiment.base_controllers.VerboseController', 'bc.VerboseController', ([], {'evaluate_on': '"""epoch"""', 'periodicity': '(1)'}), "(evaluate_on='epoch', periodicity=1)\n", (16421, 16457), True, 'import deer.experiment.base_controllers as bc\n'), ((16495, 16652), 'deer.experiment.base_controllers.TrainerController', 'bc.TrainerController', ([], {'evaluate_on': '"""action"""', 'periodicity': 'parameters.update_frequency', 'show_episode_avg_V_value': '(False)', 'show_avg_Bellman_residual': '(False)'}), "(evaluate_on='action', periodicity=parameters.\n update_frequency, show_episode_avg_V_value=False,\n show_avg_Bellman_residual=False)\n", (16515, 16652), True, 'import deer.experiment.base_controllers as bc\n'), ((16698, 16842), 'deer.experiment.base_controllers.LearningRateController', 'bc.LearningRateController', ([], {'initial_learning_rate': 'parameters.learning_rate', 'learning_rate_decay': 'parameters.learning_rate_decay', 'periodicity': '(1)'}), '(initial_learning_rate=parameters.learning_rate,\n learning_rate_decay=parameters.learning_rate_decay, periodicity=1)\n', (16723, 16842), True, 'import deer.experiment.base_controllers as bc\n'), ((16883, 17072), 'deer.experiment.base_controllers.DiscountFactorController', 'bc.DiscountFactorController', ([], {'initial_discount_factor': 'parameters.discount', 'discount_factor_growth': 'parameters.discount_inc', 'discount_factor_max': 'parameters.discount_max', 'periodicity': '(1)'}), '(initial_discount_factor=parameters.discount,\n discount_factor_growth=parameters.discount_inc, discount_factor_max=\n parameters.discount_max, periodicity=1)\n', (16910, 17072), True, 'import deer.experiment.base_controllers as bc\n'), ((17116, 17304), 'deer.experiment.base_controllers.EpsilonController', 'bc.EpsilonController', ([], {'initial_e': 'parameters.epsilon_start', 'e_decays': 'parameters.epsilon_decay', 'e_min': 'parameters.epsilon_min', 'evaluate_on': '"""action"""', 'periodicity': '(1)', 'reset_every': '"""none"""'}), "(initial_e=parameters.epsilon_start, e_decays=\n parameters.epsilon_decay, e_min=parameters.epsilon_min, evaluate_on=\n 'action', periodicity=1, reset_every='none')\n", (17136, 17304), True, 'import deer.experiment.base_controllers as bc\n'), ((17367, 17580), 'deer.experiment.base_controllers.InterleavedTestEpochController', 'bc.InterleavedTestEpochController', ([], {'id': '(0)', 'epoch_length': 'parameters.steps_per_test', 'controllers_to_disable': '[0, 1, 2, 3, 4]', 'periodicity': '(2)', 'show_score': '(True)', 'summarize_every': 'parameters.period_btw_summary_perfs'}), '(id=0, epoch_length=parameters.\n steps_per_test, controllers_to_disable=[0, 1, 2, 3, 4], periodicity=2,\n show_score=True, summarize_every=parameters.period_btw_summary_perfs)\n', (17400, 17580), True, 'import deer.experiment.base_controllers as bc\n'), ((5475, 5506), 'keras.layers.merge', 'merge', (['outs_conv'], {'mode': '"""concat"""'}), "(outs_conv, mode='concat')\n", (5480, 5506), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n'), ((5628, 5656), 'keras.layers.Dense', 'Dense', (['(50)'], {'activation': '"""relu"""'}), "(50, activation='relu')\n", (5633, 5656), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n'), ((5672, 5700), 'keras.layers.Dense', 'Dense', (['(20)'], {'activation': '"""relu"""'}), "(20, activation='relu')\n", (5677, 5700), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n'), ((5718, 5740), 'keras.layers.Dense', 'Dense', (['self._n_actions'], {}), '(self._n_actions)\n', (5723, 5740), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n'), ((8176, 8215), 'warnings.warn', 'warn', (['"""The update_rule used is rmsprop"""'], {}), "('The update_rule used is rmsprop')\n", (8180, 8215), False, 'from warnings import warn\n'), ((8328, 8379), 'keras.optimizers.SGD', 'SGD', ([], {'lr': 'self._lr', 'momentum': 'momentum', 'nesterov': '(False)'}), '(lr=self._lr, momentum=momentum, nesterov=False)\n', (8331, 8379), False, 'from keras.optimizers import SGD, RMSprop\n'), ((10257, 10300), 'numpy.argmax', 'np.argmax', (['next_q_vals_current_qnet'], {'axis': '(1)'}), '(next_q_vals_current_qnet, axis=1)\n', (10266, 10300), True, 'import numpy as np\n'), ((10448, 10490), 'numpy.max', 'np.max', (['next_q_vals'], {'axis': '(1)', 'keepdims': '(True)'}), '(next_q_vals, axis=1, keepdims=True)\n', (10454, 10490), True, 'import numpy as np\n'), ((10514, 10541), 'numpy.ones_like', 'np.ones_like', (['terminals_val'], {}), '(terminals_val)\n', (10526, 10541), True, 'import numpy as np\n'), ((11542, 11555), 'numpy.sqrt', 'np.sqrt', (['loss'], {}), '(loss)\n', (11549, 11555), True, 'import numpy as np\n'), ((3350, 3387), 'keras.layers.Input', 'Input', ([], {'shape': '(dim[0], dim[1], dim[2])'}), '(shape=(dim[0], dim[1], dim[2]))\n', (3355, 3387), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n'), ((8443, 8505), 'keras.optimizers.RMSprop', 'RMSprop', ([], {'lr': 'self._lr', 'rho': 'self._rho', 'epsilon': 'self._rms_epsilon'}), '(lr=self._lr, rho=self._rho, epsilon=self._rms_epsilon)\n', (8450, 8505), False, 'from keras.optimizers import SGD, RMSprop\n'), ((10847, 10874), 'numpy.arange', 'np.arange', (['self._batch_size'], {}), '(self._batch_size)\n', (10856, 10874), True, 'import numpy as np\n'), ((11030, 11057), 'numpy.arange', 'np.arange', (['self._batch_size'], {}), '(self._batch_size)\n', (11039, 11057), True, 'import numpy as np\n'), ((3537, 3581), 'keras.layers.Convolution2D', 'Convolution2D', (['(32)', '(8)', '(8)'], {'border_mode': '"""valid"""'}), "(32, 8, 8, border_mode='valid')\n", (3550, 3581), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n'), ((3609, 3674), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(4, 4)', 'strides': 'None', 'border_mode': '"""valid"""'}), "(pool_size=(4, 4), strides=None, border_mode='valid')\n", (3621, 3674), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n'), ((3698, 3742), 'keras.layers.Convolution2D', 'Convolution2D', (['(64)', '(4)', '(4)'], {'border_mode': '"""valid"""'}), "(64, 4, 4, border_mode='valid')\n", (3711, 3742), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n'), ((3766, 3831), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)', 'strides': 'None', 'border_mode': '"""valid"""'}), "(pool_size=(2, 2), strides=None, border_mode='valid')\n", (3778, 3831), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n'), ((3855, 3878), 'keras.layers.Convolution2D', 'Convolution2D', (['(64)', '(3)', '(3)'], {}), '(64, 3, 3)\n', (3868, 3878), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n'), ((3921, 3930), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (3928, 3930), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n'), ((4072, 4101), 'keras.layers.Input', 'Input', ([], {'shape': '(dim[0], dim[1])'}), '(shape=(dim[0], dim[1]))\n', (4077, 4101), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n'), ((11857, 11886), 'numpy.expand_dims', 'np.expand_dims', (['state'], {'axis': '(0)'}), '(state, axis=0)\n', (11871, 11886), True, 'import numpy as np\n'), ((4163, 4221), 'keras.layers.Reshape', 'Reshape', (['(1, dim[0], dim[1])'], {'input_shape': '(dim[0], dim[1])'}), '((1, dim[0], dim[1]), input_shape=(dim[0], dim[1]))\n', (4170, 4221), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n'), ((4246, 4290), 'keras.layers.Convolution2D', 'Convolution2D', (['(16)', '(2)', '(1)'], {'border_mode': '"""valid"""'}), "(16, 2, 1, border_mode='valid')\n", (4259, 4290), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n'), ((4321, 4344), 'keras.layers.Convolution2D', 'Convolution2D', (['(16)', '(2)', '(2)'], {}), '(16, 2, 2)\n', (4334, 4344), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n'), ((4387, 4396), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (4394, 4396), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n'), ((4567, 4589), 'keras.layers.Input', 'Input', ([], {'shape': '(dim[0],)'}), '(shape=(dim[0],))\n', (4572, 4589), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n'), ((4660, 4706), 'keras.layers.Reshape', 'Reshape', (['(1, 1, dim[0])'], {'input_shape': '(dim[0],)'}), '((1, 1, dim[0]), input_shape=(dim[0],))\n', (4667, 4706), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n'), ((4736, 4779), 'keras.layers.Convolution2D', 'Convolution2D', (['(8)', '(1)', '(2)'], {'border_mode': '"""valid"""'}), "(8, 1, 2, border_mode='valid')\n", (4749, 4779), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n'), ((4814, 4836), 'keras.layers.Convolution2D', 'Convolution2D', (['(8)', '(1)', '(2)'], {}), '(8, 1, 2)\n', (4827, 4836), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n'), ((4887, 4896), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (4894, 4896), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n'), ((5079, 5108), 'keras.layers.Input', 'Input', ([], {'shape': '(dim[1], dim[0])'}), '(shape=(dim[1], dim[0]))\n', (5084, 5108), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n'), ((5272, 5294), 'keras.layers.Input', 'Input', ([], {'shape': '(dim[0],)'}), '(shape=(dim[0],))\n', (5277, 5294), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n'), ((10341, 10368), 'numpy.arange', 'np.arange', (['self._batch_size'], {}), '(self._batch_size)\n', (10350, 10368), True, 'import numpy as np\n'), ((5183, 5192), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (5190, 5192), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n')]
|
#!/usr/bin/env python
import sys
sys.exit(0)
|
[
"sys.exit"
] |
[((34, 45), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (42, 45), False, 'import sys\n')]
|
from Agent import Agent
from Gaussian_Reward import Gaussian_Reward
from erdos_renyl_model_without_duplicate import MAMAB
import matplotlib.pyplot as plt
import numpy as np
from math import sqrt, log
import networkx as nx
no_iterations = 20000
no_agents = 5
no_bandits = 20
#mean = [5.2, 4.1, 9.5, 2.4, 5.9, 6.9, 7.4, 0.5, 4.7, 2.1, 10.5, 1.5, 2.8, 8.8, 3.7, 4.4, 7.8, 3.0, 11.9, 8.3]
#print(len(mean))
#variance = [2 for i in range(no_bandits)]
mean = [5.2, 4.1, 9.5, 2.4, 5.9, 6.9, 7.4, 0.5, 4.7, 2.1, 10.5, 1.5, 2.8, 8.8, 3.7, 4.4, 7.8, 3.0, 11.9, 8.3]
#print(len(mean))
variance = [2 for i in range(no_bandits)]
'''
mean = [np.random.random()*12 for i in range(no_bandits) ]
variance = [2 for i in range(no_bandits)]
'''
#mean = np.load("Mean.npy")
#variance = np.load("Variance.npy")
np.save("Agent/Mean.npy", np.array(mean))
np.save("Agent/Variance.npy", np.array(variance))
o = np.argmax(np.array(mean))
print(o)
print(mean)
bandits = [Gaussian_Reward(mean[i], variance[i]) for i in range(no_bandits)]
#COMPUTE DELTA
maxi_mean = np.argsort(mean)
maxi_mean = np.flip(maxi_mean, axis=0)
max_index = maxi_mean[0]
print(maxi_mean)
for i in range(1, len(maxi_mean)):
if maxi_mean[i] != maxi_mean[0]:
sec_max_index = maxi_mean[i]
break
delta = mean[max_index]-mean[sec_max_index]
print(delta)
fig, ax = plt.subplots(1, 1, figsize=(15, 10))
P = [0, 2, 4, 6, 8, 10]
#for prob in range(0, 2):
for prob in P:
GLOBAL_TOT_REGRET = np.array([0 for _ in range(no_iterations+1)])
AGENT_TOT_REGRET = np.array([[0 for _ in range(no_iterations+1)] for i in range(no_agents)])
SELF_TOT_REGRET = np.array([[0 for _ in range(no_iterations+1)] for __ in range(no_agents)])
COM_TOT_REGRET = np.array([[0 for _ in range(no_iterations+1)] for __ in range(no_agents)])
AGENT_TOT_REWARD = np.array([[0 for _ in range(no_iterations+1)] for i in range(no_agents)])
SELF_TOT_REWARD = np.array([[0 for _ in range(no_iterations+1)] for __ in range(no_agents)])
COM_TOT_REWARD = np.array([[0 for _ in range(no_iterations+1)] for __ in range(no_agents)])
SELF_F = [[[0 for _ in range(no_iterations+1)] for j in range(no_bandits)] for k in range(no_agents)]
COM_F = [[[0 for _ in range(no_iterations+1)] for j in range(no_bandits)] for k in range(no_agents)]
DUPLICATE = np.array([[[0 for _ in range(no_iterations+1)] for i in range(no_bandits)] for j in range(no_agents)])
Nij_T_s_MAIN = [[[0 for _ in range(no_iterations+1)] for j in range(no_bandits)] for k in range(no_agents)]
Nij_T_MAIN = [[[0 for _ in range(no_iterations+1)] for j in range(no_bandits)] for k in range(no_agents)]
for experiment in range(1,10):
p = prob/10
print("Experiment " + str(experiment) + "for probability " + str(p))
G = MAMAB(no_bandits=no_bandits, no_agents=no_agents, bandits=bandits,optimal_bandit_index=o , p=p, reward_vairance=variance, delta=delta,
no_iter=no_iterations)
for i in range(no_iterations):
G.Sample()
a = G.Pick()
G.Communicate(index=i, itr=i)
agent_tot_regret_T = G.get_agent_tot_regret_with_time()
self_tot_regret = G.get_agent_self_tot_regret_with_time()
com_tot_reget = G.get_agent_com_tot_regret_with_time()
agent_tot_reward_T = G.get_agent_tot_reward_with_time()
self_tot_reward = G.get_agent_self_tot_reward_with_time()
com_tot_reward = G.get_agent_com_tot_reward_with_time()
self_F = G.get_self_F()
com_F = G.get_com_F()
global_tot_regert_T = G.get_global_tot_regret()
duplicate = G.get_agent_duplicate_eliminator()
Nij_T_s = G.get_Nij_T_s()
Nij_T = G.get_Nij_T()
AGENT_TOT_REGRET = np.add(AGENT_TOT_REGRET, agent_tot_regret_T)
GLOBAL_TOT_REGRET = np.add(GLOBAL_TOT_REGRET, global_tot_regert_T)
SELF_TOT_REGRET = np.add(SELF_TOT_REGRET, self_tot_regret)
COM_TOT_REGRET = np.add(COM_TOT_REGRET, com_tot_reget)
AGENT_TOT_REWARD = np.add(AGENT_TOT_REWARD, agent_tot_reward_T)
SELF_TOT_REWARD = np.add(SELF_TOT_REWARD, self_tot_reward)
COM_TOT_REWARD = np.add(COM_TOT_REWARD, com_tot_reward)
Nij_T_MAIN = np.add(Nij_T_MAIN, Nij_T)
Nij_T_s_MAIN = np.add(Nij_T_s_MAIN, Nij_T_s)
SELF_F = np.add(SELF_F, self_F)
COM_F = np.add(COM_F, com_F)
DUPLICATE = np.add(DUPLICATE, duplicate)
AGENT_TOT_REGRET = (1/experiment)*AGENT_TOT_REGRET
SELF_TOT_REGRET = (1/experiment)*SELF_TOT_REGRET
COM_TOT_REGRET = (1/experiment)*COM_TOT_REGRET
AGENT_TOT_REWARD = (1/experiment)*AGENT_TOT_REWARD
SELF_TOT_REWARD = (1/experiment)*SELF_TOT_REWARD
COM_TOT_REWARD = (1/experiment)*COM_TOT_REWARD
SELF_F = (1/experiment)*SELF_F
COM_F = (1/experiment)*COM_F
GLOBAL_TOT_REGRET = (1/experiment)*GLOBAL_TOT_REGRET
DUPLICATE = (1/experiment)*DUPLICATE
ESTIMATE = G.get_estimate()
Nij_T_MAIN = (1/experiment)*Nij_T_MAIN
Nij_T_s_MAIN = (1/experiment)*Nij_T_s_MAIN
np.save("Agent/Total/Agent_tot_regret" + str(prob), AGENT_TOT_REGRET)
np.save("Agent/Total/Agent_tot_reward" + str(prob), AGENT_TOT_REWARD)
np.save("Agent/Total/Estimate" + str(prob), ESTIMATE)
np.save("Agent/Total/Duplicate" + str(prob), DUPLICATE)
np.save("Self/Total/Self_tot_regret" + str(prob), SELF_TOT_REGRET)
np.save("Self/Total/Self_tot_reward" + str(prob), SELF_TOT_REWARD)
np.save("Self/Total/Self_F" + str(prob), SELF_F)
np.save("Com/Total/Com_tot_regret" + str(prob), COM_TOT_REGRET)
np.save("Com/Total/Com_tot_reward" + str(prob), COM_TOT_REWARD)
np.save("Com/Total/Com_F" + str(prob), COM_F)
np.save("Global/Total/Global_tot_regret" + str(prob), GLOBAL_TOT_REGRET)
np.save("Com/Total/N_ij_T" + str(prob), Nij_T_MAIN)
np.save("Com/Total/N_ij_T_s" + str(prob), Nij_T_s_MAIN)
fig_network = plt.figure()
plt.close(fig_network)
for prob in range(11):
#Network Visualization
p = prob/10
fig, ax = plt.subplots(1, 1, figsize=(30, 30))
nx.draw(G=nx.erdos_renyi_graph(no_agents, p, directed=True), with_labels=True, ax=ax)
plt.savefig("Network/" + str(prob))
plt.close(fig_network)
|
[
"numpy.flip",
"networkx.erdos_renyi_graph",
"matplotlib.pyplot.close",
"numpy.argsort",
"Gaussian_Reward.Gaussian_Reward",
"matplotlib.pyplot.figure",
"numpy.array",
"erdos_renyl_model_without_duplicate.MAMAB",
"numpy.add",
"matplotlib.pyplot.subplots"
] |
[((1089, 1105), 'numpy.argsort', 'np.argsort', (['mean'], {}), '(mean)\n', (1099, 1105), True, 'import numpy as np\n'), ((1119, 1145), 'numpy.flip', 'np.flip', (['maxi_mean'], {'axis': '(0)'}), '(maxi_mean, axis=0)\n', (1126, 1145), True, 'import numpy as np\n'), ((1387, 1423), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(15, 10)'}), '(1, 1, figsize=(15, 10))\n', (1399, 1423), True, 'import matplotlib.pyplot as plt\n'), ((6486, 6498), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6496, 6498), True, 'import matplotlib.pyplot as plt\n'), ((6500, 6522), 'matplotlib.pyplot.close', 'plt.close', (['fig_network'], {}), '(fig_network)\n', (6509, 6522), True, 'import matplotlib.pyplot as plt\n'), ((851, 865), 'numpy.array', 'np.array', (['mean'], {}), '(mean)\n', (859, 865), True, 'import numpy as np\n'), ((898, 916), 'numpy.array', 'np.array', (['variance'], {}), '(variance)\n', (906, 916), True, 'import numpy as np\n'), ((937, 951), 'numpy.array', 'np.array', (['mean'], {}), '(mean)\n', (945, 951), True, 'import numpy as np\n'), ((992, 1029), 'Gaussian_Reward.Gaussian_Reward', 'Gaussian_Reward', (['mean[i]', 'variance[i]'], {}), '(mean[i], variance[i])\n', (1007, 1029), False, 'from Gaussian_Reward import Gaussian_Reward\n'), ((6609, 6645), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(30, 30)'}), '(1, 1, figsize=(30, 30))\n', (6621, 6645), True, 'import matplotlib.pyplot as plt\n'), ((6783, 6805), 'matplotlib.pyplot.close', 'plt.close', (['fig_network'], {}), '(fig_network)\n', (6792, 6805), True, 'import matplotlib.pyplot as plt\n'), ((2938, 3103), 'erdos_renyl_model_without_duplicate.MAMAB', 'MAMAB', ([], {'no_bandits': 'no_bandits', 'no_agents': 'no_agents', 'bandits': 'bandits', 'optimal_bandit_index': 'o', 'p': 'p', 'reward_vairance': 'variance', 'delta': 'delta', 'no_iter': 'no_iterations'}), '(no_bandits=no_bandits, no_agents=no_agents, bandits=bandits,\n optimal_bandit_index=o, p=p, reward_vairance=variance, delta=delta,\n no_iter=no_iterations)\n', (2943, 3103), False, 'from erdos_renyl_model_without_duplicate import MAMAB\n'), ((4109, 4153), 'numpy.add', 'np.add', (['AGENT_TOT_REGRET', 'agent_tot_regret_T'], {}), '(AGENT_TOT_REGRET, agent_tot_regret_T)\n', (4115, 4153), True, 'import numpy as np\n'), ((4187, 4233), 'numpy.add', 'np.add', (['GLOBAL_TOT_REGRET', 'global_tot_regert_T'], {}), '(GLOBAL_TOT_REGRET, global_tot_regert_T)\n', (4193, 4233), True, 'import numpy as np\n'), ((4265, 4305), 'numpy.add', 'np.add', (['SELF_TOT_REGRET', 'self_tot_regret'], {}), '(SELF_TOT_REGRET, self_tot_regret)\n', (4271, 4305), True, 'import numpy as np\n'), ((4336, 4373), 'numpy.add', 'np.add', (['COM_TOT_REGRET', 'com_tot_reget'], {}), '(COM_TOT_REGRET, com_tot_reget)\n', (4342, 4373), True, 'import numpy as np\n'), ((4408, 4452), 'numpy.add', 'np.add', (['AGENT_TOT_REWARD', 'agent_tot_reward_T'], {}), '(AGENT_TOT_REWARD, agent_tot_reward_T)\n', (4414, 4452), True, 'import numpy as np\n'), ((4485, 4525), 'numpy.add', 'np.add', (['SELF_TOT_REWARD', 'self_tot_reward'], {}), '(SELF_TOT_REWARD, self_tot_reward)\n', (4491, 4525), True, 'import numpy as np\n'), ((4558, 4596), 'numpy.add', 'np.add', (['COM_TOT_REWARD', 'com_tot_reward'], {}), '(COM_TOT_REWARD, com_tot_reward)\n', (4564, 4596), True, 'import numpy as np\n'), ((4626, 4651), 'numpy.add', 'np.add', (['Nij_T_MAIN', 'Nij_T'], {}), '(Nij_T_MAIN, Nij_T)\n', (4632, 4651), True, 'import numpy as np\n'), ((4680, 4709), 'numpy.add', 'np.add', (['Nij_T_s_MAIN', 'Nij_T_s'], {}), '(Nij_T_s_MAIN, Nij_T_s)\n', (4686, 4709), True, 'import numpy as np\n'), ((4736, 4758), 'numpy.add', 'np.add', (['SELF_F', 'self_F'], {}), '(SELF_F, self_F)\n', (4742, 4758), True, 'import numpy as np\n'), ((4783, 4803), 'numpy.add', 'np.add', (['COM_F', 'com_F'], {}), '(COM_F, com_F)\n', (4789, 4803), True, 'import numpy as np\n'), ((4831, 4859), 'numpy.add', 'np.add', (['DUPLICATE', 'duplicate'], {}), '(DUPLICATE, duplicate)\n', (4837, 4859), True, 'import numpy as np\n'), ((6661, 6710), 'networkx.erdos_renyi_graph', 'nx.erdos_renyi_graph', (['no_agents', 'p'], {'directed': '(True)'}), '(no_agents, p, directed=True)\n', (6681, 6710), True, 'import networkx as nx\n')]
|
# -*- coding: utf-8 -*-
"""
ctypes type universe.
"""
from __future__ import print_function, division, absolute_import
import ctypes
from numba.typesystem.itypesystem import consing, tyname
from numba.typesystem import universe
from numba.typesystem import numbatypes as ts
domain_name = "ctypes"
# ______________________________________________________________________
nb2ctypes = {
ts.float32: ctypes.c_float,
ts.float64: ctypes.c_double,
ts.float128: ctypes.c_longdouble,
ts.object_: ctypes.py_object,
ts.void: None,
ts.string_: ctypes.c_char_p,
}
def cint(name):
ty = getattr(ts, name)
cname = "c_int" if ty.signed else "c_uint"
nb2ctypes[ty] = getattr(ctypes, cname + str(ty.itemsize * 8))
for name in map(tyname, universe.int_typenames):
cint(name)
globals().update((tyname(ty.typename), cty) for ty, cty in nb2ctypes.iteritems())
float_, double, longdouble = float32, float64, float128
ctypes_map = dict((cty, ty) for ty, cty in nb2ctypes.iteritems())
# ______________________________________________________________________
@consing
def struct_(fields, name=None, readonly=False, packed=False):
class Struct(ctypes.Structure):
_fields_ = fields
if packed:
_pack_ = 1
return Struct
@consing
def function(rettype, argtypes, name=None, is_vararg=False):
assert not is_vararg
return ctypes.CFUNCTYPE(rettype, *argtypes)
@consing
def pointer(base_type):
if base_type in (ctypes.c_char, ctypes.c_byte):
return string_
return ctypes.POINTER(base_type)
carray = consing(lambda base_type, size: base_type * size)
|
[
"ctypes.CFUNCTYPE",
"numba.typesystem.itypesystem.consing",
"numba.typesystem.itypesystem.tyname",
"ctypes.POINTER"
] |
[((1601, 1650), 'numba.typesystem.itypesystem.consing', 'consing', (['(lambda base_type, size: base_type * size)'], {}), '(lambda base_type, size: base_type * size)\n', (1608, 1650), False, 'from numba.typesystem.itypesystem import consing, tyname\n'), ((1406, 1442), 'ctypes.CFUNCTYPE', 'ctypes.CFUNCTYPE', (['rettype', '*argtypes'], {}), '(rettype, *argtypes)\n', (1422, 1442), False, 'import ctypes\n'), ((1564, 1589), 'ctypes.POINTER', 'ctypes.POINTER', (['base_type'], {}), '(base_type)\n', (1578, 1589), False, 'import ctypes\n'), ((843, 862), 'numba.typesystem.itypesystem.tyname', 'tyname', (['ty.typename'], {}), '(ty.typename)\n', (849, 862), False, 'from numba.typesystem.itypesystem import consing, tyname\n')]
|
import time
from PyQt5.QtCore import QTimer, pyqtSignal
from PyQt5.QtGui import QPixmap, QFont
from PyQt5.QtWidgets import QMainWindow, QDialog, QApplication, QLabel
from lsl.mbl_lsl_receiver import MBL_LSLReceiver
from visuals.gui import Ui_MainWindow
from threading import Thread
class MBL_GuiWindow(QMainWindow):
def __init__(self, parent=None):
super().__init__(parent)
self.thread = None
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.connect_signals_slots()
self.receiver = MBL_LSLReceiver()
self.receiver.start()
self.last_time = time.time()
# Hide GUI
self.ui.VisualsBox.setEnabled(False)
self.ui.SettingsBox.setEnabled(False)
# prepare score label
self.score = 0
self.ui.score_value.setText(str(self.score))
self.ui.score_value.setStyleSheet('color: #ffffff')
self.ui.score_value.setFont(QFont('MS Shell Dlg 2', 16))
# creating signals
self.signalUpdateScore = pyqtSignal()
# creating label
self.head_label_a = QLabel(self)
self.head_label_b = QLabel(self)
# loading images
self.pixmap_a = QPixmap('res\\headA_resized.png')
self.pixmap_b = QPixmap('res\\headB_resized.png')
# adding images to labels
self.head_label_a.setPixmap(self.pixmap_a)
self.head_label_b.setPixmap(self.pixmap_b)
# Optional, resize label to image size and remove background
self.head_label_a.resize(self.pixmap_a.width(), self.pixmap_a.height())
self.head_label_b.resize(self.pixmap_b.width(), self.pixmap_b.height())
self.head_label_a.setStyleSheet("background-color: rgba(255, 255, 255, 0)")
self.head_label_b.setStyleSheet("background-color: rgba(255, 255, 255, 0)")
self.head_label_a.move(300, 200)
self.head_label_b.move(800, 200)
# Update the interface every 0.5 seconds
# make QTimer
self.qTimer = QTimer()
# set interval to 1 s
self.qTimer.setInterval(500) # 1000 ms = 1 s
# connect timeout signal to signal handler
self.qTimer.timeout.connect(self.update_head_position)
# start timer
self.qTimer.start()
def connect_signals_slots(self):
print("No signals implemented yet") # implement action connections
def show_gui_threaded(self):
self.thread = Thread(target=self.show())
def stop_gui_thread(self):
self.thread.join()
def update_head_position(self):
delta_time = time.time() - self.last_time
step = 450 * self.receiver.correlation_score * delta_time
self.head_label_a.move(300 + step, 200)
self.head_label_b.move(800 - step, 200)
self.last_time = time.time()
if self.receiver.correlation_score > 0.9:
self.score += 1
self.ui.score_value.setText(str(self.score))
def reset_head_position(self):
self.head_label_a.move(300, 200)
self.head_label_b.move(800, 200)
|
[
"PyQt5.QtCore.pyqtSignal",
"PyQt5.QtWidgets.QLabel",
"PyQt5.QtCore.QTimer",
"lsl.mbl_lsl_receiver.MBL_LSLReceiver",
"visuals.gui.Ui_MainWindow",
"PyQt5.QtGui.QFont",
"time.time",
"PyQt5.QtGui.QPixmap"
] |
[((435, 450), 'visuals.gui.Ui_MainWindow', 'Ui_MainWindow', ([], {}), '()\n', (448, 450), False, 'from visuals.gui import Ui_MainWindow\n'), ((542, 559), 'lsl.mbl_lsl_receiver.MBL_LSLReceiver', 'MBL_LSLReceiver', ([], {}), '()\n', (557, 559), False, 'from lsl.mbl_lsl_receiver import MBL_LSLReceiver\n'), ((615, 626), 'time.time', 'time.time', ([], {}), '()\n', (624, 626), False, 'import time\n'), ((1031, 1043), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', ([], {}), '()\n', (1041, 1043), False, 'from PyQt5.QtCore import QTimer, pyqtSignal\n'), ((1098, 1110), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['self'], {}), '(self)\n', (1104, 1110), False, 'from PyQt5.QtWidgets import QMainWindow, QDialog, QApplication, QLabel\n'), ((1139, 1151), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['self'], {}), '(self)\n', (1145, 1151), False, 'from PyQt5.QtWidgets import QMainWindow, QDialog, QApplication, QLabel\n'), ((1202, 1235), 'PyQt5.QtGui.QPixmap', 'QPixmap', (['"""res\\\\headA_resized.png"""'], {}), "('res\\\\headA_resized.png')\n", (1209, 1235), False, 'from PyQt5.QtGui import QPixmap, QFont\n'), ((1260, 1293), 'PyQt5.QtGui.QPixmap', 'QPixmap', (['"""res\\\\headB_resized.png"""'], {}), "('res\\\\headB_resized.png')\n", (1267, 1293), False, 'from PyQt5.QtGui import QPixmap, QFont\n'), ((2006, 2014), 'PyQt5.QtCore.QTimer', 'QTimer', ([], {}), '()\n', (2012, 2014), False, 'from PyQt5.QtCore import QTimer, pyqtSignal\n'), ((2795, 2806), 'time.time', 'time.time', ([], {}), '()\n', (2804, 2806), False, 'import time\n'), ((941, 968), 'PyQt5.QtGui.QFont', 'QFont', (['"""MS Shell Dlg 2"""', '(16)'], {}), "('MS Shell Dlg 2', 16)\n", (946, 968), False, 'from PyQt5.QtGui import QPixmap, QFont\n'), ((2577, 2588), 'time.time', 'time.time', ([], {}), '()\n', (2586, 2588), False, 'import time\n')]
|
import os
import glob
import pickle
import zipfile
import warnings
import numpy as np
import pandas as pd
from urllib import request
def trim_eol_whitespace(data_file):
with open(data_file, 'r') as f:
lines = f.readlines()
lines = [line.replace(' \n', '\n') for line in lines]
with open(data_file, 'w') as f:
f.writelines(lines)
def decimal_comma_to_decimal_point(data_file):
with open(data_file, 'r') as f:
lines = f.readlines()
lines = [line.replace(',', '.') for line in lines]
with open(data_file, 'w') as f:
f.writelines(lines)
REGRESSION_DATA = {
'boston':
{'url': 'https://archive.ics.uci.edu/ml/machine-learning-databases/housing/housing.data',
'dir_after_unzip': None,
'data_file': 'housing.data',
'parse_args': {'sep': ' ', 'header': None, 'skipinitialspace': True},
'target_cols': [-1]},
'carbon':
{'url': 'https://archive.ics.uci.edu/ml/machine-learning-databases/00448/carbon_nanotubes.csv',
'dir_after_unzip': None,
'data_file': 'carbon_nanotubes.csv',
'formatter': decimal_comma_to_decimal_point,
'parse_args': {'sep': ';'},
'target_cols': [-1, -2, -3]},
'concrete':
{'url': 'https://archive.ics.uci.edu/ml/machine-learning-databases/concrete/compressive/Concrete_Data.xls',
'dir_after_unzip': None,
'data_file': 'Concrete_Data.xls',
'parse_args': dict(),
'target_cols': [-1]},
'energy':
{'url': 'https://archive.ics.uci.edu/ml/machine-learning-databases/00242/ENB2012_data.xlsx',
'dir_after_unzip': None,
'data_file': 'ENB2012_data.xlsx',
'parse_args': dict(),
'target_cols': [-1, -2]},
'naval':
{'url': 'https://archive.ics.uci.edu/ml/machine-learning-databases/00316/UCI%20CBM%20Dataset.zip',
'dir_after_unzip': 'UCI CBM Dataset',
'data_file': 'data.txt',
'parse_args': {'sep': ' ', 'header': None, 'skipinitialspace': True},
'target_cols': [-1, -2]},
'power plant':
{'url': 'https://archive.ics.uci.edu/ml/machine-learning-databases/00294/CCPP.zip',
'dir_after_unzip': 'CCPP',
'data_file': 'Folds5x2_pp.xlsx',
'parse_args': dict(),
'target_cols': [-1]},
'protein':
{'url': 'https://archive.ics.uci.edu/ml/machine-learning-databases/00265/CASP.csv',
'dir_after_unzip': None,
'data_file': 'CASP.csv',
'parse_args': dict(),
'target_cols': [1]},
'superconductivity':
{'url': 'https://archive.ics.uci.edu/ml/machine-learning-databases/00464/superconduct.zip',
'dir_after_unzip': None,
'data_file': 'train.csv',
'parse_args': dict(),
'target_cols': [-1]},
'wine-red':
{'url': 'https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv',
'dir_after_unzip': None,
'data_file': 'winequality-red.csv',
'parse_args': {'sep': ';'},
'target_cols': [-1]},
'wine-white':
{'url': 'https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-white.csv',
'dir_after_unzip': None,
'data_file': 'winequality-white.csv',
'parse_args': {'sep': ';'},
'target_cols': [-1]},
'yacht':
{'url': 'https://archive.ics.uci.edu/ml/machine-learning-databases/00243/yacht_hydrodynamics.data',
'dir_after_unzip': None,
'data_file': 'yacht_hydrodynamics.data',
'formatter': trim_eol_whitespace,
'parse_args': {'sep': ' ', 'header': None, 'skipinitialspace': True},
'target_cols': [-1]},
'year':
{'url': 'https://archive.ics.uci.edu/ml/machine-learning-databases/00203/YearPredictionMSD.txt.zip',
'dir_after_unzip': None,
'data_file': 'YearPredictionMSD.txt',
'parse_args': dict(),
'target_cols': [1]},
}
def download_all(force_download=False):
# make data directory if it doesn't yet exist
if not os.path.exists('data'):
os.mkdir('data')
# download all regression data experiments
for key in REGRESSION_DATA.keys():
data_dir = os.path.join('data', key)
if not os.path.exists(data_dir):
os.mkdir(data_dir)
file = os.path.join(data_dir, REGRESSION_DATA[key]['url'].split('/')[-1])
if os.path.exists(file) and force_download:
os.remove(file)
elif os.path.exists(file) and not force_download:
print(file.split(os.sep)[-1], 'already exists.')
continue
print('Downloading', file.split(os.sep)[-1])
request.urlretrieve(REGRESSION_DATA[key]['url'], file)
print('Downloads complete!')
def load_data(data_dir, dir_after_unzip, data_file, parse_args, **kwargs):
# save the base data directory as the save directory, since data_dir might be modified below
save_dir = data_dir
# find any zip files
zip_files = glob.glob(os.path.join(data_dir, '*.zip'))
assert len(zip_files) <= 1
# do we need to unzip?
if len(zip_files) or dir_after_unzip is not None:
# unzip it
with zipfile.ZipFile(zip_files[0], 'r') as f:
f.extractall(data_dir)
# update data directory if required
if dir_after_unzip is not None:
data_dir = os.path.join(data_dir, dir_after_unzip)
# correct formatting issues if necessary
if 'formatter' in kwargs.keys() and kwargs['formatter'] is not None:
kwargs['formatter'](os.path.join(data_dir, data_file))
# process files according to type
if os.path.splitext(data_file)[-1] in {'.csv', '.data', '.txt'}:
df = pd.read_csv(os.path.join(data_dir, data_file), **parse_args)
elif os.path.splitext(data_file)[-1] in {'.xls', '.xlsx'}:
df = pd.read_excel(os.path.join(data_dir, data_file))
else:
warnings.warn('Type Not Supported: ' + data_file)
return
# convert to numpy arrays
xy = df.to_numpy(dtype=np.float32)
y = xy[:, kwargs['target_cols']]
x_indices = list(range(xy.shape[1]))
for i in kwargs['target_cols']:
x_indices.pop(i)
x = xy[:, x_indices]
# save data
with open(os.path.join(save_dir, save_dir.split(os.sep)[-1] + '.pkl'), 'wb') as f:
pickle.dump({'data': x, 'target': y}, f)
def generate_toy_data(num_samples=500):
def data_mean(x):
return x * np.sin(x)
def data_std(x):
return np.abs(0.3 * (1 + x))
# sample training data
x_data = np.random.uniform(0, 10, size=num_samples)
y_data = data_mean(x_data) + np.random.normal(scale=data_std(x_data))
# generate evaluation points with the associated actual mean and standard deviation
x_eval = np.linspace(-4, 14, 250)
true_mean = data_mean(x_eval)
true_std = data_std(x_eval)
# process return tuple
return_tuple = (x_data, y_data, x_eval, true_mean, true_std)
return_tuple = (np.expand_dims(np.float32(x), axis=-1) for x in return_tuple)
return return_tuple
if __name__ == '__main__':
# download all the data
download_all()
# process all the data
for key in REGRESSION_DATA.keys():
load_data(data_dir=os.path.join('data', key), **REGRESSION_DATA[key])
print('Processing complete!')
|
[
"numpy.random.uniform",
"os.mkdir",
"pickle.dump",
"numpy.abs",
"os.remove",
"zipfile.ZipFile",
"numpy.float32",
"os.path.exists",
"urllib.request.urlretrieve",
"numpy.sin",
"os.path.splitext",
"numpy.linspace",
"warnings.warn",
"os.path.join"
] |
[((6613, 6655), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(10)'], {'size': 'num_samples'}), '(0, 10, size=num_samples)\n', (6630, 6655), True, 'import numpy as np\n'), ((6832, 6856), 'numpy.linspace', 'np.linspace', (['(-4)', '(14)', '(250)'], {}), '(-4, 14, 250)\n', (6843, 6856), True, 'import numpy as np\n'), ((4103, 4125), 'os.path.exists', 'os.path.exists', (['"""data"""'], {}), "('data')\n", (4117, 4125), False, 'import os\n'), ((4135, 4151), 'os.mkdir', 'os.mkdir', (['"""data"""'], {}), "('data')\n", (4143, 4151), False, 'import os\n'), ((4258, 4283), 'os.path.join', 'os.path.join', (['"""data"""', 'key'], {}), "('data', key)\n", (4270, 4283), False, 'import os\n'), ((4719, 4773), 'urllib.request.urlretrieve', 'request.urlretrieve', (["REGRESSION_DATA[key]['url']", 'file'], {}), "(REGRESSION_DATA[key]['url'], file)\n", (4738, 4773), False, 'from urllib import request\n'), ((5058, 5089), 'os.path.join', 'os.path.join', (['data_dir', '"""*.zip"""'], {}), "(data_dir, '*.zip')\n", (5070, 5089), False, 'import os\n'), ((6379, 6419), 'pickle.dump', 'pickle.dump', (["{'data': x, 'target': y}", 'f'], {}), "({'data': x, 'target': y}, f)\n", (6390, 6419), False, 'import pickle\n'), ((6550, 6571), 'numpy.abs', 'np.abs', (['(0.3 * (1 + x))'], {}), '(0.3 * (1 + x))\n', (6556, 6571), True, 'import numpy as np\n'), ((4299, 4323), 'os.path.exists', 'os.path.exists', (['data_dir'], {}), '(data_dir)\n', (4313, 4323), False, 'import os\n'), ((4337, 4355), 'os.mkdir', 'os.mkdir', (['data_dir'], {}), '(data_dir)\n', (4345, 4355), False, 'import os\n'), ((4449, 4469), 'os.path.exists', 'os.path.exists', (['file'], {}), '(file)\n', (4463, 4469), False, 'import os\n'), ((4502, 4517), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (4511, 4517), False, 'import os\n'), ((5237, 5271), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zip_files[0]', '"""r"""'], {}), "(zip_files[0], 'r')\n", (5252, 5271), False, 'import zipfile\n'), ((5421, 5460), 'os.path.join', 'os.path.join', (['data_dir', 'dir_after_unzip'], {}), '(data_dir, dir_after_unzip)\n', (5433, 5460), False, 'import os\n'), ((5608, 5641), 'os.path.join', 'os.path.join', (['data_dir', 'data_file'], {}), '(data_dir, data_file)\n', (5620, 5641), False, 'import os\n'), ((5689, 5716), 'os.path.splitext', 'os.path.splitext', (['data_file'], {}), '(data_file)\n', (5705, 5716), False, 'import os\n'), ((5776, 5809), 'os.path.join', 'os.path.join', (['data_dir', 'data_file'], {}), '(data_dir, data_file)\n', (5788, 5809), False, 'import os\n'), ((5968, 6017), 'warnings.warn', 'warnings.warn', (["('Type Not Supported: ' + data_file)"], {}), "('Type Not Supported: ' + data_file)\n", (5981, 6017), False, 'import warnings\n'), ((6503, 6512), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (6509, 6512), True, 'import numpy as np\n'), ((7051, 7064), 'numpy.float32', 'np.float32', (['x'], {}), '(x)\n', (7061, 7064), True, 'import numpy as np\n'), ((4531, 4551), 'os.path.exists', 'os.path.exists', (['file'], {}), '(file)\n', (4545, 4551), False, 'import os\n'), ((5834, 5861), 'os.path.splitext', 'os.path.splitext', (['data_file'], {}), '(data_file)\n', (5850, 5861), False, 'import os\n'), ((5915, 5948), 'os.path.join', 'os.path.join', (['data_dir', 'data_file'], {}), '(data_dir, data_file)\n', (5927, 5948), False, 'import os\n'), ((7294, 7319), 'os.path.join', 'os.path.join', (['"""data"""', 'key'], {}), "('data', key)\n", (7306, 7319), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import subprocess
import scrapy.pipelines.files
from scrapy.exceptions import DropItem
import os
import scrapy
import errno
SYMCHK_PATH = r'C:\Program Files (x86)\Windows Kits\10\Debuggers\x64\symchk.exe'
SYM_PATH = r'SRV**https://msdl.microsoft.com/download/symbols'
def expand(source, dest, filter_=None):
if filter_ is None:
filter_ = '*'
subprocess.call(['expand', '-F:{}'.format(filter_), source, dest])
def delete(path):
subprocess.call(['del', '/Q', path], shell=True)
def symchk(path, symchk_path=None, sym_path=None, output_dir=None):
if symchk_path is None:
symchk_path = SYMCHK_PATH
if sym_path is None:
sym_path = SYM_PATH
output_dir_args = []
if output_dir is not None:
output_dir = os.path.join(os.getcwd(), output_dir)
output_dir_args = ['/oc', output_dir]
subprocess.call([symchk_path, '/r', path, '/s', sym_path, ] + output_dir_args)
class MsuDownloadPipeline(scrapy.pipelines.files.FilesPipeline):
def get_media_requests(self, item, info):
url = item['url']
if not url.lower().endswith('.msu'):
raise DropItem('Item not an MSU')
request = scrapy.Request(url)
request.meta['bulletin'] = item['bulletin']
yield request
def item_completed(self, results, item, info):
file_paths = (result['path'] for ok, result in results if ok)
msu_paths = [path for path in file_paths if path.lower().endswith('.msu')]
item['msu_path'] = msu_paths[0]
return item
def file_path(self, request, response=None, info=None):
bulletin = request.meta['bulletin'].upper()
path = os.path.join(bulletin, request.url.rsplit('/', 1)[-1])
return path
class MsuExtractPipeline(object):
@classmethod
def from_crawler(cls, crawler):
instance = cls()
instance.settings = crawler.settings
return instance
def __init__(self):
super(MsuExtractPipeline, self).__init__()
self.settings = None
def process_item(self, item, spider):
msu_path = os.path.join(self.settings['FILES_STORE'], item['msu_path'])
msu_dir = os.path.dirname(msu_path)
msu_name = item['url'].rsplit('/', 1)[-1].rsplit('.', 1)[0]
extract_dir = os.path.join(msu_dir, msu_name)
try:
os.mkdir(extract_dir)
except WindowsError as e:
if e.errno != errno.EEXIST:
raise
extract_cab = '{}.cab'.format(msu_name)
expand(msu_path, extract_dir, extract_cab)
filter_ = self.settings.get('EXTRACT_FILTER', None)
expand(os.path.join(extract_dir, extract_cab), extract_dir, filter_=filter_)
if spider.settings.get('DELETE_RUBBISH', False):
# Delete all files that are not in directories. This includes a lot of rubbish files, as well as the
# original `.cab` file.
delete(os.path.join(extract_dir, '*'))
if not self.settings.get('DONT_DOWNLOAD_SYMBOLS', False):
self.download_symbols(extract_dir)
if spider.settings.get('DELETE_MSU_FILES', False):
try:
os.unlink(msu_path)
except WindowsError:
pass
return item
def download_symbols(self, extract_dir):
symchk_path = self.settings.get('SYMCHK_PATH', None)
sym_path = self.settings.get('SYM_PATH', None)
symchk(extract_dir, symchk_path=symchk_path, sym_path=sym_path)
|
[
"os.mkdir",
"os.unlink",
"scrapy.Request",
"os.getcwd",
"os.path.dirname",
"subprocess.call",
"scrapy.exceptions.DropItem",
"os.path.join"
] |
[((644, 692), 'subprocess.call', 'subprocess.call', (["['del', '/Q', path]"], {'shell': '(True)'}), "(['del', '/Q', path], shell=True)\n", (659, 692), False, 'import subprocess\n'), ((1044, 1120), 'subprocess.call', 'subprocess.call', (["([symchk_path, '/r', path, '/s', sym_path] + output_dir_args)"], {}), "([symchk_path, '/r', path, '/s', sym_path] + output_dir_args)\n", (1059, 1120), False, 'import subprocess\n'), ((1371, 1390), 'scrapy.Request', 'scrapy.Request', (['url'], {}), '(url)\n', (1385, 1390), False, 'import scrapy\n'), ((2283, 2343), 'os.path.join', 'os.path.join', (["self.settings['FILES_STORE']", "item['msu_path']"], {}), "(self.settings['FILES_STORE'], item['msu_path'])\n", (2295, 2343), False, 'import os\n'), ((2362, 2387), 'os.path.dirname', 'os.path.dirname', (['msu_path'], {}), '(msu_path)\n', (2377, 2387), False, 'import os\n'), ((2478, 2509), 'os.path.join', 'os.path.join', (['msu_dir', 'msu_name'], {}), '(msu_dir, msu_name)\n', (2490, 2509), False, 'import os\n'), ((968, 979), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (977, 979), False, 'import os\n'), ((1325, 1352), 'scrapy.exceptions.DropItem', 'DropItem', (['"""Item not an MSU"""'], {}), "('Item not an MSU')\n", (1333, 1352), False, 'from scrapy.exceptions import DropItem\n'), ((2535, 2556), 'os.mkdir', 'os.mkdir', (['extract_dir'], {}), '(extract_dir)\n', (2543, 2556), False, 'import os\n'), ((2828, 2866), 'os.path.join', 'os.path.join', (['extract_dir', 'extract_cab'], {}), '(extract_dir, extract_cab)\n', (2840, 2866), False, 'import os\n'), ((3124, 3154), 'os.path.join', 'os.path.join', (['extract_dir', '"""*"""'], {}), "(extract_dir, '*')\n", (3136, 3154), False, 'import os\n'), ((3363, 3382), 'os.unlink', 'os.unlink', (['msu_path'], {}), '(msu_path)\n', (3372, 3382), False, 'import os\n')]
|
import folium
VANCOUVER = (49.2511587, -123.1344104)
def vancouver():
m = folium.Map(location=VANCOUVER, zoom_start=12)
|
[
"folium.Map"
] |
[((80, 125), 'folium.Map', 'folium.Map', ([], {'location': 'VANCOUVER', 'zoom_start': '(12)'}), '(location=VANCOUVER, zoom_start=12)\n', (90, 125), False, 'import folium\n')]
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras.models import Sequential, Model
import cv2
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# In[2]:
path_all_tfrecord = "fp56.tfrecord"
# In[3]:
dir_from = "/data/fp_img_processed/"
# In[4]:
dir_model = "vgg_cam/"
path_best = dir_model + "model-17-1.17-53.3%.hdf5"
path_best
# # model
# In[5]:
from fp_tensorflow import create_pair_56_dataset, create_single_dataset
from fp_tensorflow import create_vgg_5y_model
model = create_vgg_5y_model()
model.load_weights(path_best)
model.summary()
# # class activation map
# In[6]:
# Get the 512 input weights to the softmax.
class_weights = model.layers[-1].get_weights()[0]
# In[7]:
class_weights.shape
# In[8]:
class_weights.mean(), class_weights.std()
# In[9]:
def get_fp_output(fp, model=model):
final_conv_layer = model.get_layer("conv5_3")
get_output = K.function(
[model.layers[0].input], [final_conv_layer.output, model.layers[-1].output]
)
conv_output, prediction = get_output(np.expand_dims(fp, 0))
return np.squeeze(conv_output, axis=0), np.argmax(prediction)
# In[10]:
def get_fp_cam(fp, model=model):
class_weights = model.layers[-1].get_weights()[0]
conv_output, prediction = get_fp_output(fp, model)
true_class_weights = class_weights[:, prediction]
cam = np.zeros(dtype=np.float32, shape=conv_output.shape[0:2])
for i, w in enumerate(true_class_weights):
cam += w * conv_output[:, :, i]
return cam
# # biclust CAM
# In[11]:
biclusts = np.loadtxt("biclust_col.txt", int)
biclusts
# In[12]:
def get_biclust_cam(fp, biclust, model=model, labels=biclusts):
conv_output, _ = get_fp_output(fp, model)
return conv_output[..., biclusts == biclust].sum(axis=2)
# # plot
# In[13]:
def plot_bgr(img):
fig = plt.figure(figsize=(2, 2), dpi=300)
plt.axes().axis("off")
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.tight_layout()
# In[14]:
def plot_rgb(img):
fig = plt.figure(figsize=(2, 2), dpi=300)
plt.axes().axis("off")
plt.imshow(img)
plt.tight_layout()
# In[15]:
def plot_gray(img, cmap=plt.cm.gray):
fig = plt.figure(figsize=(2, 2), dpi=300)
plt.axes().axis("off")
plt.imshow(img, cmap=cmap)
plt.tight_layout()
# # run
# In[16]:
from floorplan_analysis import read_mono_from_image_unicode
from floorplan_analysis import fp_float_from_mono
from floorplan_analysis import pad_fp
# In[17]:
mono = read_mono_from_image_unicode(dir_from + "2888_118A" + ".png")
fp_full = fp_float_from_mono(mono)
fp = pad_fp(fp_full, 56, 56)
conv_output, prediction = get_fp_output(fp)
# In[18]:
fp_full.shape
# In[19]:
conv_output.shape, prediction.shape
# In[20]:
prediction
# In[21]:
cam = get_fp_cam(fp)
cam = cv2.resize(cam, (56, 56))
cam /= cam.max()
cam[cam <= 0] = 0
heatmap = cv2.applyColorMap(np.uint8(255 * cam), cv2.COLORMAP_VIRIDIS)
# heatmap[cam < 0.2] = 0
plot_bgr(heatmap)
# In[22]:
cam = get_biclust_cam(fp, 3)
cam = cv2.resize(cam, (56, 56))
print(cam.max())
cam /= cam.max()
cam[cam <= 0] = 0
heatmap = cv2.applyColorMap(np.uint8(255 * cam), cv2.COLORMAP_VIRIDIS)
# heatmap[cam < 0.4] = 0
plot_bgr(heatmap)
# In[23]:
def visualize_fp(fps):
# adjusted for different luminance
channel_to_rgba = np.array(
[
[0.0, 0.0, 0.0, 0.0], # wall to black L0
[0.0, 0.33, 0.0, 0.0], # entrance to green L30
[1.0, 0.25, 0.0, 0.0], # LDK to red L57
[0.83, 0.87, 0.0, 0.0], # bedroom to yellow L85
[0.0, 0.26, 1.0, 0.0], # balcony to blue L40
[0.0, 0.81, 0.76, 0.0], # bathroom to cyan L75
]
)
# make colors subtractive
channel_to_rgba[:, 0:3] -= 1
# put it on white
fps_rgba = np.clip(
np.array([1.0, 1.0, 1.0, 1.0]) + (np.array(fps) @ channel_to_rgba), 0, 1
)
return fps_rgba.astype(np.float32)
# In[24]:
rgba = visualize_fp(fp_full)
plot_rgb(rgba)
# In[25]:
def visualize_fp_cam(fp):
fp_rgba = visualize_fp(fp)
fp_light = cv2.cvtColor(fp_rgba, cv2.COLOR_RGB2Lab)[:, :, 0] / 100
fp_pad = pad_fp(fp, 56, 56)
cam = get_fp_cam(fp_pad)
cam = cv2.resize(cam, (56, 56))
cam /= cam.max()
cam[cam <= 0] = 0
heatmap = cv2.applyColorMap(np.uint8(255 * cam), cv2.COLORMAP_VIRIDIS)
heatmap = pad_fp(heatmap, fp_light.shape[1], fp_light.shape[0])
heatmap[fp_light == 0] = 0
heatmap = heatmap.astype(np.float32) / 255
return 0.7 * heatmap + 0.3 * np.expand_dims(fp_light, 2)
# In[26]:
def visualize_biclust_cam(fp, biclust):
fp_rgba = visualize_fp(pad_fp(fp, max(56, fp.shape[1]), max(56, fp.shape[0])))
fp_light = cv2.cvtColor(fp_rgba, cv2.COLOR_RGB2Lab)[:, :, 0] / 100
fp_pad = pad_fp(fp, 56, 56)
cam = get_biclust_cam(fp_pad, biclust)
cam = cv2.resize(cam, (56, 56))
cam /= cam.max()
cam[cam <= 0] = 0
heatmap = cv2.applyColorMap(np.uint8(255 * cam), cv2.COLORMAP_VIRIDIS)
# heatmap = pad_fp(heatmap, fp_light.shape[1], fp_light.shape[0])
heatmap = pad_fp(heatmap, max(56, fp_light.shape[1]), max(56, fp_light.shape[0]))
heatmap = heatmap.astype(np.float32) / 255
return 0.7 * heatmap + 0.3 * np.expand_dims(fp_light, 2)
# In[27]:
plot_bgr(visualize_fp_cam(fp_full))
# In[28]:
plot_bgr(visualize_biclust_cam(fp_full, 3))
# # process representative floorplans
# In[29]:
df = pd.read_csv("biclust.csv")
df["area_group"] = pd.cut(df.Area, [0, 50, 60, 85, np.inf], labels=False)
df
# In[30]:
df_sample = df.groupby(["cluster", "area_group"]).sample(frac=0.005, random_state=1106)
df_sample = df_sample.sort_values(["cluster", "area_group", "year"])
df_sample
# In[31]:
pd.crosstab(df_sample.cluster, df_sample.area_group)
# In[32]:
pd.crosstab(df_sample.cluster, df_sample.area_group).max(axis=0)
# In[33]:
widths = np.asarray([3, 4, 8, 7])
coords_col = np.insert(np.cumsum(widths), 0, 0)[:-1]
coords_col
# In[34]:
heights = np.maximum(
np.ceil(
pd.crosstab(df_sample.cluster, df_sample.area_group).to_numpy() / widths
).astype(int),
1,
).max(axis=1)
heights
# In[35]:
coords_row = np.insert(np.cumsum(heights), 0, 0)[:-1]
coords_row
# In[36]:
sum(heights)
# In[37]:
sum(widths)
# 총 31줄, 19열
# In[38]:
u = 84 # unit size
flip = False
# In[39]:
if not flip:
img_size = (sum(heights) * u, sum(widths) * u)
else:
img_size = (sum(widths) * u, sum(heights) * u)
# In[40]:
img_size
# In[41]:
img = np.ones(img_size + (3,), np.float32)
# img = np.zeros(img_size + (3,), np.float32)
# In[42]:
plot_bgr(pad_fp(visualize_biclust_cam(fp_full, 3), u, u, 1))
# In[43]:
df_sample[(df_sample.cluster == 0) & (df_sample.area_group == 2)]
# In[44]:
df_sample[(df_sample.cluster == 0) & (df_sample.area_group == 2)].ID.iloc[1]
# In[45]:
for ir, rr in enumerate(coords_row):
for ic, cc in enumerate(coords_col):
df_clust = df_sample[(df_sample.cluster == ir) & (df_sample.area_group == ic)]
for i in range(len(df_clust)):
r = i // widths[ic]
c = i - r * widths[ic]
id_ = df_clust.iloc[i].ID
clust = df_clust.iloc[i].cluster
img[
(rr + r) * u : (rr + r + 1) * u, (cc + c) * u : (cc + c + 1) * u
] = pad_fp(
visualize_biclust_cam(
fp_float_from_mono(
read_mono_from_image_unicode(dir_from + id_ + ".png")
),
clust,
),
u,
u,
1,
)
# In[46]:
fig = plt.figure(figsize=(11, 13), dpi=300)
ax = fig.gca()
im = plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
ax.set_xticks((coords_col + widths / 2) * u)
ax.set_xticklabels(
[
"One-room\n($\leq50\mathrm{m^2}$)",
"Small\n($\leq60\mathrm{m^2}$)",
"Medium\n($\leq85\mathrm{m^2}$)",
"Large\n($>85\mathrm{m^2}$)",
]
)
ax.set_yticks((coords_row + heights / 2 + 1 / 6) * u)
ax.set_yticklabels(range(1, biclusts.max() + 2))
ax.vlines(coords_col * u, 0, heights.sum() * u - 1, colors="k", lw=0.3)
ax.hlines(coords_row * u, 0, widths.sum() * u - 1, colors="k", lw=0.3)
fig.savefig("bam.png", bbox_inches="tight", pad_inches=0)
fig.savefig("bam.pdf", bbox_inches="tight", pad_inches=0)
# In[47]:
df_sample[(df_sample.cluster == 0)]
# 101160_113E
# 103915_112C
# 104127_107B
# 107903_113G
# 108838_117B
# In[48]:
def plot_bgr_scale(img):
size_x, size_y = img.shape[:2]
fig = plt.figure(figsize=(2 * size_x / 112, 2 * size_y / 112), dpi=300)
plt.axes().axis("off")
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.tight_layout()
# In[49]:
ir, ic, i = 15, 0, 0
u_single = 84 # 56 84 112
df_clust = df_sample[(df_sample.cluster == ir) & (df_sample.area_group == ic)]
id_ = df_clust.iloc[i].ID
print(id_)
clust = df_clust.iloc[i].cluster
plot_bgr_scale(
pad_fp(
visualize_biclust_cam(
fp_float_from_mono(read_mono_from_image_unicode(dir_from + id_ + ".png")),
clust,
),
u_single,
u_single,
1,
)
)
# In[53]:
def plot_bams(id_, types):
print(id_)
fp = fp_float_from_mono(read_mono_from_image_unicode(dir_from + id_ + ".png"))
size_y, size_x = np.fmax(fp.shape[:2], 56)
clust_name = [
"8090-1",
"8090-2",
"8090-3",
"9000-1",
"9000-2",
"9000-3",
"9000-4",
"00-1",
"00-2",
"00-3",
"00-4",
"0010-1",
"0010-2",
"0010-3",
"10-1",
"10-2",
]
clusts = [type - 1 for type in types]
fig, axs = plt.subplots(1, len(clusts), figsize=(11 / 4 * len(clusts), 5), dpi=300)
for i, clust in enumerate(clusts):
ax = axs[i]
img = pad_fp(visualize_biclust_cam(fp, clust), size_x, size_y, 1)
ax.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
ax.axis("off")
# title on bottom
# ax.set_title(clust_name[clust], y=-size_x / 56 / 10)
# title on top
ax.set_title(clust_name[clust], y=1)
plt.tight_layout()
return fig
# # 1980년대: 판상형, 복도형
# In[54]:
df_sample[
(df_sample.year >= 1980)
& (df_sample.year < 1990)
& (df_sample.cluster.isin([0, 1, 2]))
& (df_sample.area_group == 2)
]
# 복도형 중형
#
# 서울 구로 주공1차, 73.08㎡, 1986년
# In[55]:
fig = plot_bams("137_96", [1, 2, 3])
# fig.savefig("bam.png", bbox_inches="tight", pad_inches=0)
# fig.savefig("bam.pdf", bbox_inches="tight", pad_inches=0)
# In[56]:
df_sample[
(df_sample.year >= 1980)
& (df_sample.year < 1990)
& (df_sample.cluster.isin([0, 1, 2]))
& (df_sample.area_group == 3)
]
# 복도형 대형
#
# 서울 압구정 한양7차, 106.22㎡, 1981년
# In[57]:
fig = plot_bams("501_114A", [1, 2, 3])
# fig.savefig("bam.png", bbox_inches="tight", pad_inches=0)
# fig.savefig("bam.pdf", bbox_inches="tight", pad_inches=0)
# # 1990년대: 판상형, 계단실형
# In[58]:
df_sample[(df_sample.cluster.isin([3])) & (df_sample.area_group == 2)]
# 3LDK 중형
#
# 천안 일성3차, 84.82㎡, 1994년
# In[59]:
id_ = "7479_106"
fig1 = plot_bams(id_, [1, 2, 3])
fig2 = plot_bams(id_, range(4, 7 + 1))
# fig.savefig("bam.png", bbox_inches="tight", pad_inches=0)
# fig.savefig("bam.pdf", bbox_inches="tight", pad_inches=0)
# In[60]:
df_sample[
(df_sample.year >= 1990)
& (df_sample.year < 2000)
& (df_sample.cluster.isin(range(7 + 1)))
& (df_sample.Rooms == 4)
]
# 4LDK 대형
#
# 인천 연수 하나2차, 99.42㎡, 1994년
# In[61]:
id_ = "2292_116"
fig1 = plot_bams(id_, [1, 2, 3])
fig2 = plot_bams(id_, range(4, 7 + 1))
# fig.savefig("bam.png", bbox_inches="tight", pad_inches=0)
# fig.savefig("bam.pdf", bbox_inches="tight", pad_inches=0)
# # 2000년대: 발코니, 코어 후면 배치
# In[62]:
df_sample[
(df_sample.year >= 2000)
& (df_sample.year < 2010)
& (df_sample.cluster.isin([9]))
& (df_sample.area_group == 2)
]
# 판상형
# 중형
#
# 경기
# 동화옥시죤5차,
# 84.58㎡,
# 2005년
# In[63]:
id_ = "17566_118"
fig1 = plot_bams(id_, range(8, 11 + 1))
fig2 = plot_bams(id_, range(12, 14 + 1))
# # 2010년대: 탑상형, 원룸형
# In[64]:
df_sample[(df_sample.cluster.isin([13])) & (df_sample.area_group == 2)]
# 탑상형 중앙부
# 중형
#
# 서울 서초포레스타5,
# 84.4㎡,
# 2014년
# In[65]:
id_ = "107903_112B3"
fig1 = plot_bams(id_, range(12, 14 + 1))
fig2 = plot_bams(id_, range(15, 16 + 1))
# In[66]:
df_sample[(df_sample.cluster.isin([15])) & (df_sample.area_group == 2)]
# 탑상형 단부 중형
#
# 천안 백석더샵,
# 84.25㎡,
# 2016년
# In[67]:
id_ = "108523_111C"
fig1 = plot_bams(id_, range(12, 14 + 1))
fig2 = plot_bams(id_, range(15, 16 + 1))
# In[68]:
df_sample[
(df_sample.cluster.isin([15]))
& (df_sample.year >= 2010)
& (df_sample.area_group == 2)
]
# 혼합형
# (L자형 주동 계단실형 코어)
#
# 세종 가락4단지이지더원,
# 79.59㎡,
# 2014년
# In[69]:
id_ = "107076_106C"
fig1 = plot_bams(id_, range(12, 14 + 1))
fig2 = plot_bams(id_, range(15, 16 + 1))
# In[70]:
df[(df.cluster.isin([14]))].Area.mean()
# In[71]:
df[(df.cluster.isin([14]))].Area.median()
# In[72]:
df[(df.cluster.isin([14])) & (df.year >= 2010) & (df.Area >= 23) & (df.Area <= 29)]
# 원룸형 도시형생활주택
#
# 서울 역삼대명벨리온,
# 23.62㎡,
# 2012년
# In[73]:
id_ = "104259_36G"
fig = plot_bams(id_, [3, 6, 15])
|
[
"numpy.fmax",
"fp_tensorflow.create_vgg_5y_model",
"numpy.argmax",
"pandas.read_csv",
"matplotlib.pyplot.axes",
"numpy.ones",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"floorplan_analysis.read_mono_from_image_unicode",
"cv2.cvtColor",
"matplotlib.pyplot.imshow",
"numpy.cumsum",
"numpy.loadtxt",
"cv2.resize",
"floorplan_analysis.fp_float_from_mono",
"numpy.uint8",
"numpy.asarray",
"pandas.cut",
"numpy.squeeze",
"floorplan_analysis.pad_fp",
"pandas.crosstab",
"numpy.zeros",
"numpy.expand_dims",
"tensorflow.keras.backend.function",
"numpy.array"
] |
[((598, 619), 'fp_tensorflow.create_vgg_5y_model', 'create_vgg_5y_model', ([], {}), '()\n', (617, 619), False, 'from fp_tensorflow import create_vgg_5y_model\n'), ((1659, 1693), 'numpy.loadtxt', 'np.loadtxt', (['"""biclust_col.txt"""', 'int'], {}), "('biclust_col.txt', int)\n", (1669, 1693), True, 'import numpy as np\n'), ((2601, 2662), 'floorplan_analysis.read_mono_from_image_unicode', 'read_mono_from_image_unicode', (["(dir_from + '2888_118A' + '.png')"], {}), "(dir_from + '2888_118A' + '.png')\n", (2629, 2662), False, 'from floorplan_analysis import read_mono_from_image_unicode\n'), ((2673, 2697), 'floorplan_analysis.fp_float_from_mono', 'fp_float_from_mono', (['mono'], {}), '(mono)\n', (2691, 2697), False, 'from floorplan_analysis import fp_float_from_mono\n'), ((2703, 2726), 'floorplan_analysis.pad_fp', 'pad_fp', (['fp_full', '(56)', '(56)'], {}), '(fp_full, 56, 56)\n', (2709, 2726), False, 'from floorplan_analysis import pad_fp\n'), ((2916, 2941), 'cv2.resize', 'cv2.resize', (['cam', '(56, 56)'], {}), '(cam, (56, 56))\n', (2926, 2941), False, 'import cv2\n'), ((3142, 3167), 'cv2.resize', 'cv2.resize', (['cam', '(56, 56)'], {}), '(cam, (56, 56))\n', (3152, 3167), False, 'import cv2\n'), ((5553, 5579), 'pandas.read_csv', 'pd.read_csv', (['"""biclust.csv"""'], {}), "('biclust.csv')\n", (5564, 5579), True, 'import pandas as pd\n'), ((5599, 5653), 'pandas.cut', 'pd.cut', (['df.Area', '[0, 50, 60, 85, np.inf]'], {'labels': '(False)'}), '(df.Area, [0, 50, 60, 85, np.inf], labels=False)\n', (5605, 5653), True, 'import pandas as pd\n'), ((5852, 5904), 'pandas.crosstab', 'pd.crosstab', (['df_sample.cluster', 'df_sample.area_group'], {}), '(df_sample.cluster, df_sample.area_group)\n', (5863, 5904), True, 'import pandas as pd\n'), ((6007, 6031), 'numpy.asarray', 'np.asarray', (['[3, 4, 8, 7]'], {}), '([3, 4, 8, 7])\n', (6017, 6031), True, 'import numpy as np\n'), ((6646, 6682), 'numpy.ones', 'np.ones', (['(img_size + (3,))', 'np.float32'], {}), '(img_size + (3,), np.float32)\n', (6653, 6682), True, 'import numpy as np\n'), ((7783, 7820), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(11, 13)', 'dpi': '(300)'}), '(figsize=(11, 13), dpi=300)\n', (7793, 7820), True, 'import matplotlib.pyplot as plt\n'), ((1003, 1095), 'tensorflow.keras.backend.function', 'K.function', (['[model.layers[0].input]', '[final_conv_layer.output, model.layers[-1].output]'], {}), '([model.layers[0].input], [final_conv_layer.output, model.layers[\n -1].output])\n', (1013, 1095), True, 'import tensorflow.keras.backend as K\n'), ((1458, 1514), 'numpy.zeros', 'np.zeros', ([], {'dtype': 'np.float32', 'shape': 'conv_output.shape[0:2]'}), '(dtype=np.float32, shape=conv_output.shape[0:2])\n', (1466, 1514), True, 'import numpy as np\n'), ((1942, 1977), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(2, 2)', 'dpi': '(300)'}), '(figsize=(2, 2), dpi=300)\n', (1952, 1977), True, 'import matplotlib.pyplot as plt\n'), ((2062, 2080), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2078, 2080), True, 'import matplotlib.pyplot as plt\n'), ((2124, 2159), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(2, 2)', 'dpi': '(300)'}), '(figsize=(2, 2), dpi=300)\n', (2134, 2159), True, 'import matplotlib.pyplot as plt\n'), ((2191, 2206), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (2201, 2206), True, 'import matplotlib.pyplot as plt\n'), ((2211, 2229), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2227, 2229), True, 'import matplotlib.pyplot as plt\n'), ((2292, 2327), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(2, 2)', 'dpi': '(300)'}), '(figsize=(2, 2), dpi=300)\n', (2302, 2327), True, 'import matplotlib.pyplot as plt\n'), ((2359, 2385), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {'cmap': 'cmap'}), '(img, cmap=cmap)\n', (2369, 2385), True, 'import matplotlib.pyplot as plt\n'), ((2390, 2408), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2406, 2408), True, 'import matplotlib.pyplot as plt\n'), ((3006, 3025), 'numpy.uint8', 'np.uint8', (['(255 * cam)'], {}), '(255 * cam)\n', (3014, 3025), True, 'import numpy as np\n'), ((3249, 3268), 'numpy.uint8', 'np.uint8', (['(255 * cam)'], {}), '(255 * cam)\n', (3257, 3268), True, 'import numpy as np\n'), ((3433, 3587), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0, 0.0], [0.0, 0.33, 0.0, 0.0], [1.0, 0.25, 0.0, 0.0], [0.83,\n 0.87, 0.0, 0.0], [0.0, 0.26, 1.0, 0.0], [0.0, 0.81, 0.76, 0.0]]'], {}), '([[0.0, 0.0, 0.0, 0.0], [0.0, 0.33, 0.0, 0.0], [1.0, 0.25, 0.0, 0.0\n ], [0.83, 0.87, 0.0, 0.0], [0.0, 0.26, 1.0, 0.0], [0.0, 0.81, 0.76, 0.0]])\n', (3441, 3587), True, 'import numpy as np\n'), ((4266, 4284), 'floorplan_analysis.pad_fp', 'pad_fp', (['fp', '(56)', '(56)'], {}), '(fp, 56, 56)\n', (4272, 4284), False, 'from floorplan_analysis import pad_fp\n'), ((4325, 4350), 'cv2.resize', 'cv2.resize', (['cam', '(56, 56)'], {}), '(cam, (56, 56))\n', (4335, 4350), False, 'import cv2\n'), ((4484, 4537), 'floorplan_analysis.pad_fp', 'pad_fp', (['heatmap', 'fp_light.shape[1]', 'fp_light.shape[0]'], {}), '(heatmap, fp_light.shape[1], fp_light.shape[0])\n', (4490, 4537), False, 'from floorplan_analysis import pad_fp\n'), ((4900, 4918), 'floorplan_analysis.pad_fp', 'pad_fp', (['fp', '(56)', '(56)'], {}), '(fp, 56, 56)\n', (4906, 4918), False, 'from floorplan_analysis import pad_fp\n'), ((4973, 4998), 'cv2.resize', 'cv2.resize', (['cam', '(56, 56)'], {}), '(cam, (56, 56))\n', (4983, 4998), False, 'import cv2\n'), ((7852, 7888), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (7864, 7888), False, 'import cv2\n'), ((8703, 8768), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(2 * size_x / 112, 2 * size_y / 112)', 'dpi': '(300)'}), '(figsize=(2 * size_x / 112, 2 * size_y / 112), dpi=300)\n', (8713, 8768), True, 'import matplotlib.pyplot as plt\n'), ((8853, 8871), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8869, 8871), True, 'import matplotlib.pyplot as plt\n'), ((9474, 9499), 'numpy.fmax', 'np.fmax', (['fp.shape[:2]', '(56)'], {}), '(fp.shape[:2], 56)\n', (9481, 9499), True, 'import numpy as np\n'), ((10310, 10328), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10326, 10328), True, 'import matplotlib.pyplot as plt\n'), ((1147, 1168), 'numpy.expand_dims', 'np.expand_dims', (['fp', '(0)'], {}), '(fp, 0)\n', (1161, 1168), True, 'import numpy as np\n'), ((1181, 1212), 'numpy.squeeze', 'np.squeeze', (['conv_output'], {'axis': '(0)'}), '(conv_output, axis=0)\n', (1191, 1212), True, 'import numpy as np\n'), ((1214, 1235), 'numpy.argmax', 'np.argmax', (['prediction'], {}), '(prediction)\n', (1223, 1235), True, 'import numpy as np\n'), ((2020, 2056), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (2032, 2056), False, 'import cv2\n'), ((4427, 4446), 'numpy.uint8', 'np.uint8', (['(255 * cam)'], {}), '(255 * cam)\n', (4435, 4446), True, 'import numpy as np\n'), ((5075, 5094), 'numpy.uint8', 'np.uint8', (['(255 * cam)'], {}), '(255 * cam)\n', (5083, 5094), True, 'import numpy as np\n'), ((5919, 5971), 'pandas.crosstab', 'pd.crosstab', (['df_sample.cluster', 'df_sample.area_group'], {}), '(df_sample.cluster, df_sample.area_group)\n', (5930, 5971), True, 'import pandas as pd\n'), ((6056, 6073), 'numpy.cumsum', 'np.cumsum', (['widths'], {}), '(widths)\n', (6065, 6073), True, 'import numpy as np\n'), ((6312, 6330), 'numpy.cumsum', 'np.cumsum', (['heights'], {}), '(heights)\n', (6321, 6330), True, 'import numpy as np\n'), ((8811, 8847), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (8823, 8847), False, 'import cv2\n'), ((9398, 9451), 'floorplan_analysis.read_mono_from_image_unicode', 'read_mono_from_image_unicode', (["(dir_from + id_ + '.png')"], {}), "(dir_from + id_ + '.png')\n", (9426, 9451), False, 'from floorplan_analysis import read_mono_from_image_unicode\n'), ((1982, 1992), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (1990, 1992), True, 'import matplotlib.pyplot as plt\n'), ((2164, 2174), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (2172, 2174), True, 'import matplotlib.pyplot as plt\n'), ((2332, 2342), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (2340, 2342), True, 'import matplotlib.pyplot as plt\n'), ((3934, 3964), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0, 1.0])\n', (3942, 3964), True, 'import numpy as np\n'), ((4196, 4236), 'cv2.cvtColor', 'cv2.cvtColor', (['fp_rgba', 'cv2.COLOR_RGB2Lab'], {}), '(fp_rgba, cv2.COLOR_RGB2Lab)\n', (4208, 4236), False, 'import cv2\n'), ((4650, 4677), 'numpy.expand_dims', 'np.expand_dims', (['fp_light', '(2)'], {}), '(fp_light, 2)\n', (4664, 4677), True, 'import numpy as np\n'), ((4830, 4870), 'cv2.cvtColor', 'cv2.cvtColor', (['fp_rgba', 'cv2.COLOR_RGB2Lab'], {}), '(fp_rgba, cv2.COLOR_RGB2Lab)\n', (4842, 4870), False, 'import cv2\n'), ((5359, 5386), 'numpy.expand_dims', 'np.expand_dims', (['fp_light', '(2)'], {}), '(fp_light, 2)\n', (5373, 5386), True, 'import numpy as np\n'), ((8773, 8783), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (8781, 8783), True, 'import matplotlib.pyplot as plt\n'), ((10085, 10121), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (10097, 10121), False, 'import cv2\n'), ((3968, 3981), 'numpy.array', 'np.array', (['fps'], {}), '(fps)\n', (3976, 3981), True, 'import numpy as np\n'), ((9173, 9226), 'floorplan_analysis.read_mono_from_image_unicode', 'read_mono_from_image_unicode', (["(dir_from + id_ + '.png')"], {}), "(dir_from + id_ + '.png')\n", (9201, 9226), False, 'from floorplan_analysis import read_mono_from_image_unicode\n'), ((7569, 7622), 'floorplan_analysis.read_mono_from_image_unicode', 'read_mono_from_image_unicode', (["(dir_from + id_ + '.png')"], {}), "(dir_from + id_ + '.png')\n", (7597, 7622), False, 'from floorplan_analysis import read_mono_from_image_unicode\n'), ((6154, 6206), 'pandas.crosstab', 'pd.crosstab', (['df_sample.cluster', 'df_sample.area_group'], {}), '(df_sample.cluster, df_sample.area_group)\n', (6165, 6206), True, 'import pandas as pd\n')]
|
#!/usr/bin/env python
#
# <NAME> <<EMAIL>>
#
from __future__ import print_function
standalone = True
import unittestX as unittest
import journal
debug = journal.debug( "scattererxml_TestCase" )
warning = journal.warning( "scattererxml_TestCase" )
scattererxml = 'scatterers/fccNi/Ni-scatterer-SQEkernel.xml'
class scattererxml_TestCase(unittest.TestCase):
def test1(self):
'mccomponents.sample.kernelxml.parser'
from mccomponents.sample.kernelxml import parse_file
scatterer = parse_file( scattererxml )
kernel = scatterer.kernel()
self.assertTrue( isKernel( kernel ) )
return
def test2(self):
'mccomponents.sample.kernelxml.renderer'
sqehisth5 = 'sqehist.h5'
outputs = [sqehisth5]
_remove( outputs )
from mccomponents.sample.kernelxml import parse_file, render
scatterer = parse_file( scattererxml )
renderedxml = "%s.rendered" % scattererxml
print('\n'.join(render(scatterer)), file=open(renderedxml,'w'))
scatterer1 = parse_file( renderedxml )
return
pass # end of scattererxml_TestCase
def _remove( files ):
import os
for path in files:
if os.path.exists( path ):
if not os.path.isfile(path):
raise IOError("%s is not a file" % path)
os.remove( path )
pass
continue
return
def isKernel(candidate):
from mccomponents.homogeneous_scatterer.Kernel import Kernel
return isinstance(candidate, Kernel)
def pysuite():
suite1 = unittest.makeSuite(scattererxml_TestCase)
return unittest.TestSuite( (suite1,) )
def main(): unittest.main()
if __name__ == "__main__":
main()
# version
__id__ = "$Id$"
# End of file
|
[
"os.remove",
"journal.debug",
"mccomponents.sample.kernelxml.parse_file",
"unittestX.main",
"os.path.exists",
"unittestX.makeSuite",
"mccomponents.sample.kernelxml.render",
"os.path.isfile",
"journal.warning",
"unittestX.TestSuite"
] |
[((158, 196), 'journal.debug', 'journal.debug', (['"""scattererxml_TestCase"""'], {}), "('scattererxml_TestCase')\n", (171, 196), False, 'import journal\n'), ((209, 249), 'journal.warning', 'journal.warning', (['"""scattererxml_TestCase"""'], {}), "('scattererxml_TestCase')\n", (224, 249), False, 'import journal\n'), ((1615, 1656), 'unittestX.makeSuite', 'unittest.makeSuite', (['scattererxml_TestCase'], {}), '(scattererxml_TestCase)\n', (1633, 1656), True, 'import unittestX as unittest\n'), ((1668, 1697), 'unittestX.TestSuite', 'unittest.TestSuite', (['(suite1,)'], {}), '((suite1,))\n', (1686, 1697), True, 'import unittestX as unittest\n'), ((1714, 1729), 'unittestX.main', 'unittest.main', ([], {}), '()\n', (1727, 1729), True, 'import unittestX as unittest\n'), ((516, 540), 'mccomponents.sample.kernelxml.parse_file', 'parse_file', (['scattererxml'], {}), '(scattererxml)\n', (526, 540), False, 'from mccomponents.sample.kernelxml import parse_file, render\n'), ((914, 938), 'mccomponents.sample.kernelxml.parse_file', 'parse_file', (['scattererxml'], {}), '(scattererxml)\n', (924, 938), False, 'from mccomponents.sample.kernelxml import parse_file, render\n'), ((1095, 1118), 'mccomponents.sample.kernelxml.parse_file', 'parse_file', (['renderedxml'], {}), '(renderedxml)\n', (1105, 1118), False, 'from mccomponents.sample.kernelxml import parse_file, render\n'), ((1255, 1275), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1269, 1275), False, 'import os\n'), ((1389, 1404), 'os.remove', 'os.remove', (['path'], {}), '(path)\n', (1398, 1404), False, 'import os\n'), ((1025, 1042), 'mccomponents.sample.kernelxml.render', 'render', (['scatterer'], {}), '(scatterer)\n', (1031, 1042), False, 'from mccomponents.sample.kernelxml import parse_file, render\n'), ((1298, 1318), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (1312, 1318), False, 'import os\n')]
|
from sklearn import linear_model as lm
import scipy
import numpy as np
import pandas as pd
from skorecard.utils import convert_sparse_matrix
from sklearn.utils.validation import check_is_fitted
class LogisticRegression(lm.LogisticRegression):
"""Extended Logistic Regression.
Extends [sklearn.linear_model.LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html).
This class provides the following extra statistics, calculated on `.fit()` and accessible via `.get_stats()`:
- `cov_matrix_`: covariance matrix for the estimated parameters.
- `std_err_intercept_`: estimated uncertainty for the intercept
- `std_err_coef_`: estimated uncertainty for the coefficients
- `z_intercept_`: estimated z-statistic for the intercept
- `z_coef_`: estimated z-statistic for the coefficients
- `p_value_intercept_`: estimated p-value for the intercept
- `p_value_coef_`: estimated p-value for the coefficients
Example:
```python
from skorecard.datasets import load_uci_credit_card
from skorecard.bucketers import EqualFrequencyBucketer
from skorecard.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder
X, y = load_uci_credit_card(return_X_y=True)
pipeline = Pipeline([
('bucketer', EqualFrequencyBucketer(n_bins=10)),
('clf', LogisticRegression(calculate_stats=True))
])
pipeline.fit(X, y)
assert pipeline.named_steps['clf'].p_val_coef_[0][0] > 0
pipeline.named_steps['clf'].get_stats()
```
An example output of `.get_stats()`:
Index | Coef. | Std.Err | z | Pz
--------- | ----------| ---------| ----------| ------------
const | -0.537571 | 0.096108 | -5.593394 | 2.226735e-08
EDUCATION | 0.010091 | 0.044874 | 0.224876 | 8.220757e-01
""" # noqa
def __init__(
self,
penalty="l2",
calculate_stats=False,
dual=False,
tol=0.0001,
C=1.0,
fit_intercept=True,
intercept_scaling=1,
class_weight=None,
random_state=None,
solver="lbfgs",
max_iter=100,
multi_class="auto",
verbose=0,
warm_start=False,
n_jobs=None,
l1_ratio=None,
):
"""
Extends [sklearn.linear_model.LogisticRegression.fit()](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html).
Args:
calculate_stats (bool): If true, calculate statistics like standard error during fit, accessible with .get_stats()
""" # noqa
super(LogisticRegression, self).__init__(
penalty=penalty,
dual=dual,
tol=tol,
C=C,
fit_intercept=fit_intercept,
intercept_scaling=intercept_scaling,
class_weight=class_weight,
random_state=random_state,
solver=solver,
max_iter=max_iter,
multi_class=multi_class,
verbose=verbose,
warm_start=warm_start,
n_jobs=n_jobs,
l1_ratio=l1_ratio,
)
self.calculate_stats = calculate_stats
def fit(self, X, y, sample_weight=None, calculate_stats=False, **kwargs):
"""
Fit the model.
Overwrites [sklearn.linear_model.LogisticRegression.fit()](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html).
In addition to the standard fit by sklearn, this function will compute the covariance of the coefficients.
Args:
X (array-like, sparse matrix): Matrix of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y (array-like): of shape (n_samples,)
Target vector relative to X.
sample_weight (array-like): of shape (n_samples,) default=None
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
calculate_stats (bool): If true, calculate statistics like standard error during fit, accessible with .get_stats()
Returns:
self (LogisticRegression): Fitted estimator.
""" # noqa
if not self.calculate_stats and not calculate_stats:
return super().fit(X, y, sample_weight=sample_weight, **kwargs)
X = convert_sparse_matrix(X)
if isinstance(X, pd.DataFrame):
self.names_ = ["const"] + [f for f in X.columns]
else:
self.names_ = ["const"] + [f"x{i}" for i in range(X.shape[1])]
lr = super().fit(X, y, sample_weight=sample_weight, **kwargs)
predProbs = self.predict_proba(X)
# Design matrix -- add column of 1's at the beginning of your X matrix
if lr.fit_intercept:
X_design = np.hstack([np.ones((X.shape[0], 1)), X])
else:
X_design = X
p = np.product(predProbs, axis=1)
self.cov_matrix_ = np.linalg.inv((X_design * p[..., np.newaxis]).T @ X_design)
std_err = np.sqrt(np.diag(self.cov_matrix_)).reshape(1, -1)
# In case fit_intercept is set to True, then in the std_error array
# Index 0 corresponds to the intercept, from index 1 onwards it relates to the coefficients
# If fit intercept is False, then all the values are related to the coefficients
if lr.fit_intercept:
self.std_err_intercept_ = std_err[:, 0]
self.std_err_coef_ = std_err[:, 1:][0]
self.z_intercept_ = self.intercept_ / self.std_err_intercept_
# Get p-values under the gaussian assumption
self.p_val_intercept_ = scipy.stats.norm.sf(abs(self.z_intercept_)) * 2
else:
self.std_err_intercept_ = np.array([np.nan])
self.std_err_coef_ = std_err[0]
self.z_intercept_ = np.array([np.nan])
# Get p-values under the gaussian assumption
self.p_val_intercept_ = np.array([np.nan])
self.z_coef_ = self.coef_ / self.std_err_coef_
self.p_val_coef_ = scipy.stats.norm.sf(abs(self.z_coef_)) * 2
return self
def get_stats(self) -> pd.DataFrame:
"""
Puts the summary statistics of the fit() function into a pandas DataFrame.
Returns:
data (pandas DataFrame): The statistics dataframe, indexed by
the column name
"""
check_is_fitted(self)
if not hasattr(self, "std_err_coef_"):
msg = "Summary statistics were not calculated on .fit(). Options to fix:\n"
msg += "\t- Re-fit using .fit(X, y, calculate_stats=True)\n"
msg += "\t- Re-inititialize using LogisticRegression(calculate_stats=True)"
raise AssertionError(msg)
data = {
"Coef.": (self.intercept_.tolist() + self.coef_.tolist()[0]),
"Std.Err": (self.std_err_intercept_.tolist() + self.std_err_coef_.tolist()),
"z": (self.z_intercept_.tolist() + self.z_coef_.tolist()[0]),
"P>|z|": (self.p_val_intercept_.tolist() + self.p_val_coef_.tolist()[0]),
}
return pd.DataFrame(data, index=self.names_)
|
[
"pandas.DataFrame",
"numpy.ones",
"sklearn.utils.validation.check_is_fitted",
"numpy.product",
"numpy.linalg.inv",
"numpy.array",
"numpy.diag",
"skorecard.utils.convert_sparse_matrix"
] |
[((4592, 4616), 'skorecard.utils.convert_sparse_matrix', 'convert_sparse_matrix', (['X'], {}), '(X)\n', (4613, 4616), False, 'from skorecard.utils import convert_sparse_matrix\n'), ((5146, 5175), 'numpy.product', 'np.product', (['predProbs'], {'axis': '(1)'}), '(predProbs, axis=1)\n', (5156, 5175), True, 'import numpy as np\n'), ((5203, 5262), 'numpy.linalg.inv', 'np.linalg.inv', (['((X_design * p[..., np.newaxis]).T @ X_design)'], {}), '((X_design * p[..., np.newaxis]).T @ X_design)\n', (5216, 5262), True, 'import numpy as np\n'), ((6656, 6677), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self'], {}), '(self)\n', (6671, 6677), False, 'from sklearn.utils.validation import check_is_fitted\n'), ((7380, 7417), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'index': 'self.names_'}), '(data, index=self.names_)\n', (7392, 7417), True, 'import pandas as pd\n'), ((6000, 6018), 'numpy.array', 'np.array', (['[np.nan]'], {}), '([np.nan])\n', (6008, 6018), True, 'import numpy as np\n'), ((6096, 6114), 'numpy.array', 'np.array', (['[np.nan]'], {}), '([np.nan])\n', (6104, 6114), True, 'import numpy as np\n'), ((6209, 6227), 'numpy.array', 'np.array', (['[np.nan]'], {}), '([np.nan])\n', (6217, 6227), True, 'import numpy as np\n'), ((5064, 5088), 'numpy.ones', 'np.ones', (['(X.shape[0], 1)'], {}), '((X.shape[0], 1))\n', (5071, 5088), True, 'import numpy as np\n'), ((5289, 5314), 'numpy.diag', 'np.diag', (['self.cov_matrix_'], {}), '(self.cov_matrix_)\n', (5296, 5314), True, 'import numpy as np\n')]
|
import os
import unittest
from imdetector.image import SuspiciousImage
from imdetector.clipping import Clipping
DIR = os.getcwd()
class TestClipping(unittest.TestCase):
def test_paintout1(self):
expected = 1
img1 = SuspiciousImage(os.path.join(DIR, 'test/image/yrc_5_po.png'))
detector = Clipping()
actual = detector.detect(img1)
detector.save_image(
os.path.join(
DIR, 'test/image/paintout_result.jpg'))
self.assertEqual(expected, actual)
def test_paintout0(self):
expected = 0
img1 = SuspiciousImage(os.path.join(DIR, 'test/image/yrc_16.png'))
detector = Clipping()
actual = detector.detect(img1)
self.assertEqual(expected, actual)
if __name__ == "__main__":
unittest.main()
|
[
"os.getcwd",
"unittest.main",
"os.path.join",
"imdetector.clipping.Clipping"
] |
[((120, 131), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (129, 131), False, 'import os\n'), ((796, 811), 'unittest.main', 'unittest.main', ([], {}), '()\n', (809, 811), False, 'import unittest\n'), ((320, 330), 'imdetector.clipping.Clipping', 'Clipping', ([], {}), '()\n', (328, 330), False, 'from imdetector.clipping import Clipping\n'), ((670, 680), 'imdetector.clipping.Clipping', 'Clipping', ([], {}), '()\n', (678, 680), False, 'from imdetector.clipping import Clipping\n'), ((255, 299), 'os.path.join', 'os.path.join', (['DIR', '"""test/image/yrc_5_po.png"""'], {}), "(DIR, 'test/image/yrc_5_po.png')\n", (267, 299), False, 'import os\n'), ((411, 462), 'os.path.join', 'os.path.join', (['DIR', '"""test/image/paintout_result.jpg"""'], {}), "(DIR, 'test/image/paintout_result.jpg')\n", (423, 462), False, 'import os\n'), ((607, 649), 'os.path.join', 'os.path.join', (['DIR', '"""test/image/yrc_16.png"""'], {}), "(DIR, 'test/image/yrc_16.png')\n", (619, 649), False, 'import os\n')]
|
import FWCore.ParameterSet.Config as cms
METSignificanceParams = cms.PSet(
# jet resolutions
jetThreshold = cms.double(15),
#jet-lepton matching dR
dRMatch = cms.double(0.4),
# eta bins for jet resolution tuning
jeta = cms.vdouble(0.8, 1.3, 1.9, 2.5),
# tuning parameters
#Run I, based on 53X / JME-13-003
#jpar = cms.vdouble(1.20,1.13,1.03,0.96,1.08),
#pjpar = cms.vdouble(-1.9,0.6383)
#Run II MC, based on 76X
#https://indico.cern.ch/event/527789/contributions/2160488/attachments/1271716/1884792/nmirman_20160511.pdf
jpar = cms.vdouble(1.29,1.19,1.07,1.13,1.12),
pjpar = cms.vdouble(-0.04,0.6504),
)
METSignificanceParams_Data=cms.PSet(
# jet resolutions
jetThreshold = cms.double(15),
#jet-lepton matching dR
dRMatch = cms.double(0.4),
# eta bins for jet resolution tuning
jeta = cms.vdouble(0.8, 1.3, 1.9, 2.5),
# tuning parameters
#Run I, based on 53X / JME-13-003
#jpar = cms.vdouble(1.20,1.13,1.03,0.96,1.08),
#pjpar = cms.vdouble(-1.9,0.6383)
#Run II data, based on 76X
#https://indico.cern.ch/event/527789/contributions/2160488/attachments/1271716/1884792/nmirman_20160511.pdf
jpar = cms.vdouble(1.26,1.14,1.13,1.13,1.06),
pjpar = cms.vdouble(-3.3,0.5961),
)
|
[
"FWCore.ParameterSet.Config.double",
"FWCore.ParameterSet.Config.vdouble"
] |
[((122, 136), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(15)'], {}), '(15)\n', (132, 136), True, 'import FWCore.ParameterSet.Config as cms\n'), ((191, 206), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(0.4)'], {}), '(0.4)\n', (201, 206), True, 'import FWCore.ParameterSet.Config as cms\n'), ((265, 296), 'FWCore.ParameterSet.Config.vdouble', 'cms.vdouble', (['(0.8)', '(1.3)', '(1.9)', '(2.5)'], {}), '(0.8, 1.3, 1.9, 2.5)\n', (276, 296), True, 'import FWCore.ParameterSet.Config as cms\n'), ((616, 657), 'FWCore.ParameterSet.Config.vdouble', 'cms.vdouble', (['(1.29)', '(1.19)', '(1.07)', '(1.13)', '(1.12)'], {}), '(1.29, 1.19, 1.07, 1.13, 1.12)\n', (627, 657), True, 'import FWCore.ParameterSet.Config as cms\n'), ((669, 695), 'FWCore.ParameterSet.Config.vdouble', 'cms.vdouble', (['(-0.04)', '(0.6504)'], {}), '(-0.04, 0.6504)\n', (680, 695), True, 'import FWCore.ParameterSet.Config as cms\n'), ((788, 802), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(15)'], {}), '(15)\n', (798, 802), True, 'import FWCore.ParameterSet.Config as cms\n'), ((857, 872), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(0.4)'], {}), '(0.4)\n', (867, 872), True, 'import FWCore.ParameterSet.Config as cms\n'), ((931, 962), 'FWCore.ParameterSet.Config.vdouble', 'cms.vdouble', (['(0.8)', '(1.3)', '(1.9)', '(2.5)'], {}), '(0.8, 1.3, 1.9, 2.5)\n', (942, 962), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1284, 1325), 'FWCore.ParameterSet.Config.vdouble', 'cms.vdouble', (['(1.26)', '(1.14)', '(1.13)', '(1.13)', '(1.06)'], {}), '(1.26, 1.14, 1.13, 1.13, 1.06)\n', (1295, 1325), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1337, 1362), 'FWCore.ParameterSet.Config.vdouble', 'cms.vdouble', (['(-3.3)', '(0.5961)'], {}), '(-3.3, 0.5961)\n', (1348, 1362), True, 'import FWCore.ParameterSet.Config as cms\n')]
|
from django.conf.urls import include, url
from rest_framework import routers
from .views import LinkViewSet, DownloadView, IndexView
router = routers.DefaultRouter()
router.register(r'', LinkViewSet, base_name='files')
urlpatterns = [
url(r'^files/', include(router.urls)),
url(r'^files/(?P<pk>\d+)/download/(?P<name>.+)$', DownloadView.as_view(),
name='download'),
url(r'^$', IndexView.as_view(), name='index'),
]
|
[
"django.conf.urls.include",
"rest_framework.routers.DefaultRouter"
] |
[((144, 167), 'rest_framework.routers.DefaultRouter', 'routers.DefaultRouter', ([], {}), '()\n', (165, 167), False, 'from rest_framework import routers\n'), ((258, 278), 'django.conf.urls.include', 'include', (['router.urls'], {}), '(router.urls)\n', (265, 278), False, 'from django.conf.urls import include, url\n')]
|
#!/usr/bin/env python
#http://code.activestate.com/recipes/528878-ordered-set/
import weakref
class oset(object):
"""
A linked-list with a uniqueness constraint and O(1) lookups/removal.
Modification during iteration is partially supported. If you
remove the just yielded element, it will go on to what was the
next element. If you remove the next element, it will use the
new next element. If you remove both, you get an error.
"""
def __init__(self, iterable=(), allow_move=True):
self._map = {}
self._start = _SentinalNode()
self._end = _SentinalNode()
self._start.next = self._end
self._end.prev = self._start
self._allow_move = allow_move
self.extend(iterable)
def __contains__(self, element):
return element in self._map
def __eq__(self, other):
raise TypeError("OrderedSet does not support comparisons")
def __hash__(self):
raise TypeError("OrderedSet is not hashable")
def __iter__(self):
curnode = self._start
nextnode = curnode.next
while True:
if hasattr(curnode, 'next'):
curnode = curnode.next
elif hasattr(nextnode, 'next'):
curnode = nextnode
else:
raise RuntimeError("OrderedSet modified inappropriately "
"during iteration")
if isinstance(curnode, _SentinalNode):
return
nextnode = curnode.next
yield curnode.content
def __reversed__(self):
curnode = self._end
prevnode = curnode.prev
while True:
if hasattr(curnode, 'prev'):
curnode = curnode.prev
elif hasattr(prevnode, 'prev'):
curnode = prevnode
else:
raise RuntimeError("OrderedSet modified inappropriately "
"during iteration")
if isinstance(curnode, _SentinalNode):
return
prevnode = curnode.prev
yield curnode.content
def __len__(self):
return len(self._map)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, list(self))
def add(self, element):
"""An alias for :func:`spyne.util.oset.old.oset.append`."""
self.append(element)
def append(self, element):
"""Add an element to the right side of the OrderedSet."""
self._insertatnode(self._end.prev, element)
def appendleft(self, element):
"""Add an element to the left side of the OrderedSet."""
self._insertatnode(self._start, element)
def clear(self):
"""Remove all elements from the OrderedSet."""
while self:
self.pop()
def extend(self, iterable):
"""Extend the right side of the OrderedSet with elements from the iterable."""
for element in iterable:
self.append(element)
def extendleft(self, iterable):
"""Extend the left side of the OrderedSet with elements from the iterable."""
for element in iterable:
self.appendleft(element)
def insertleft(self, poselement, element):
"""Inserts element immediately left of poselement's position."""
self._insertatnode(self._map[poselement].prev, element)
def insertright(self, poselement, element):
"""Inserts element immediately right of poselement's position."""
self._insertatnode(self._map[poselement], element)
def _insertatnode(self, node, element):
left = node
right = node.next
#start by determining if element exists already. Need to be careful
#if node or node.next contains the element to be added
existingNode = self._map.get(element)
if existingNode:
if not self._allow_move:
raise ValueError("element already exists")
if existingNode == left or existingNode == right:
return #nothing to do. NB more than just optimisation
#not optimal. element removed from map only to be added again
self.remove(element)
newnode = _Node()
newnode.content = element
newnode.prev = right.prev
newnode.next = right
right.prev = newnode
left.next = newnode
self._map[element] = newnode
def pop(self):
"""Remove and return the rightmost element."""
element = self._end.prev.content
self.remove(element)
return element
def popleft(self):
"""Remove and return the leftmost element."""
element = self._start.next.content
self.remove(element)
return element
def remove(self, element):
"""Remove element from the OrderedSet."""
node = self._map.pop(element)
assert not isinstance(node, _SentinalNode)
left = node.prev
right = node.next
left.next = right
right.prev = node.prev
del node.prev
del node.next
class _Node(object):
__slots__ = '_prev', 'next', 'content', '__weakref__'
# A weakref is used for prev so as to avoid creating cycles.
def _prev_get(self):
return self._prev()
def _prev_set(self, value):
self._prev = weakref.ref(value)
def _prev_del(self):
del self._prev
prev = property(_prev_get, _prev_set, _prev_del)
class _SentinalNode(_Node):
__slots__ = []
__test__ = {
'__foo__': """
>>> oset(range(10))
oset([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> list(reversed(oset(range(10))))
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
>>> stuff = oset()
>>> stuff.extendleft(range(20, 25))
>>> stuff.pop()
20
>>> stuff
oset([24, 23, 22, 21])
>>> stuff.insertleft(23, 99)
>>> stuff
oset([24, 99, 23, 22, 21])
>>> stuff.remove(21)
>>> stuff
oset([24, 99, 23, 22])
>>> len(stuff)
4
>>> 23 in stuff
True
>>> 44 in stuff
False
>>> oset([1, 2, 3, 2])
oset([1, 3, 2])
>>> oset([1, 2, 3, 2], allow_move=False)
Traceback (most recent call last):
...
ValueError: element already exists
""",
}
def _test():
import doctest
doctest.testmod()
if __name__ == '__main__':
_test()
|
[
"weakref.ref",
"doctest.testmod"
] |
[((6355, 6372), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (6370, 6372), False, 'import doctest\n'), ((5309, 5327), 'weakref.ref', 'weakref.ref', (['value'], {}), '(value)\n', (5320, 5327), False, 'import weakref\n')]
|
"""
Implements deep maxent IRL (Wulfmeier et. all) in a general, feature-type
agnostic way.
"""
import sys
import random
from pathlib import Path
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Adam
from tensorboardX import SummaryWriter
sys.path.insert(0, "..")
from neural_nets.base_network import BaseNN
from irlmethods.irlUtils import play_features as play
from irlmethods.irlUtils import lcr_regularizer, monotonic_regularizer
from rlmethods.rlutils import play_complete
import utils
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
class RewardNet(BaseNN):
"""Reward network"""
def __init__(self, state_dims, hidden_dims=128):
super(RewardNet, self).__init__()
self.input = nn.Linear(state_dims, hidden_dims)
self.linear1 = nn.Linear(hidden_dims, hidden_dims)
self.linear2 = nn.Linear(hidden_dims, hidden_dims)
self.head = nn.Linear(hidden_dims, 1)
def forward(self, x):
x = F.relu(self.input(x))
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
x = torch.tanh(self.head(x))
return x
class GeneralDeepMaxent:
"""
Implements deep maxent IRL (Wulfmeier et. al) in a state-type agnostic way.
"""
def __init__(
self,
rl,
env,
expert_trajectories,
learning_rate=1e-3,
l2_regularization=1e-5,
save_folder="./",
saving_interval=10,
):
# RL related
self.rl = rl
self.feature_extractor = self.rl.feature_extractor
# environment attributes
self.env = env
state_size = self.feature_extractor.extract_features(
env.reset()
).shape[0]
# reward net
self.reward_net = RewardNet(state_size, hidden_dims=256)
self.reward_net = self.reward_net.to(DEVICE)
self.reward_optim = Adam(
self.reward_net.parameters(),
lr=learning_rate,
weight_decay=l2_regularization,
)
# expert info
self.expert_trajectories = [
traj.to(torch.float).to(DEVICE) for traj in expert_trajectories
]
# logging and saving
self.save_path = Path(save_folder)
self.tbx_writer = SummaryWriter(
str(self.save_path / "tensorboard_logs")
)
# highjack RL method's tbx_writer
self.rl.tbx_writer = self.tbx_writer
self.data_table = utils.DataTable()
# training meta
self.training_i = 0
self.saving_interval = saving_interval
def save_models(self, filename=None):
self.rl.policy.save(str(self.save_path / "policy"), filename=filename)
self.reward_net.save(
str(self.save_path / "reward_net"), filename=filename
)
def generate_trajectories(
self, num_trajectories, max_env_steps, stochastic,
):
"""
Generate trajectories in environemnt using leanred RL policy.
:param num_trajectories: number of trajectories to generate.
:type num_trajectories: int
:param max_env_steps: max steps to take in environment (rollout length.)
:type max_env_steps: int
:return: list of features encountered in playthrough.
:rtype: list of tensors of shape (num_states x feature_length)
"""
states = []
for _ in range(num_trajectories):
generated_states = play(
self.env,
self.rl.policy,
self.feature_extractor,
max_env_steps,
stochastic,
)
states.append(generated_states)
return states
def discounted_rewards(self, rewards, gamma, account_for_terminal_state):
discounted_sum = 0
t = 0
gamma_t = 1
for t, reward in enumerate(rewards[:-1]):
discounted_sum += gamma_t * reward
gamma_t *= gamma
if account_for_terminal_state:
discounted_sum += (
(gamma / (1 - gamma)) * gamma ** (t + 1) * rewards[-1]
)
else:
discounted_sum += gamma_t * rewards[-1]
return discounted_sum
def train_episode(
self,
num_rl_episodes,
max_rl_episode_length,
num_trajectory_samples,
max_env_steps,
reset_training,
account_for_terminal_state,
gamma,
stochastic_sampling,
):
"""
perform IRL training.
:param num_rl_episodes: Number of RL iterations for this IRL iteration.
:type num_rl_episodes: int.
:param max_rl_episode_length: maximum number of environment steps to
take when doing rollouts using learned RL agent.
:type max_rl_episode_length: int
:param num_trajectory_samples: Number of trajectories to sample using
learned RL agent.
:type num_trajectory_samples: int
:param max_env_steps: maximum number of environment steps to take,
both when training RL agent and when generating rollouts.
:type max_env_steps: int
:param reset_training: Whether to reset RL training every iteration
or not.
:type reset_training: Boolean.
:param account_for_terminal_state: Whether to account for a state
being terminal or not. If true, (gamma/1-gamma)*R will be immitated
by padding the trajectory with its ending state until max_env_steps
length is reached. e.g. if max_env_steps is 5, the trajectory [s_0,
s_1, s_2] will be padded to [s_0, s_1, s_2, s_2, s_2].
:type account_for_terminal_state: Boolean.
:param gamma: The discounting factor.
:type gamma: float.
:param stochastic_sampling: Sample trajectories using stochastic
policy instead of deterministic 'best action policy'
:type stochastic_sampling: Boolean.
"""
# expert loss
expert_loss = 0
for traj in self.expert_trajectories:
expert_rewards = self.reward_net(traj)
expert_loss += self.discounted_rewards(
expert_rewards, gamma, account_for_terminal_state
)
# policy loss
trajectories = self.generate_trajectories(
num_trajectory_samples, max_env_steps, stochastic_sampling
)
policy_loss = 0
for traj in trajectories:
policy_rewards = self.reward_net(traj)
policy_loss += self.discounted_rewards(
policy_rewards, gamma, account_for_terminal_state
)
policy_loss = (
len(self.expert_trajectories) / num_trajectory_samples
) * policy_loss
# Backpropagate IRL loss
loss = policy_loss - expert_loss
self.reward_optim.zero_grad()
loss.backward()
self.reward_optim.step()
# train RL agent
if reset_training:
self.rl.reset_training()
self.rl.train(
num_rl_episodes,
max_rl_episode_length,
reward_network=self.reward_net,
)
# logging
self.tbx_writer.add_scalar(
"IRL/policy_loss", policy_loss, self.training_i
)
self.tbx_writer.add_scalar(
"IRL/expert_loss", expert_loss, self.training_i
)
self.tbx_writer.add_scalar("IRL/total_loss", loss, self.training_i)
self.data_table.add_row(
{
"IRL/policy_loss": policy_loss.item(),
"IRL/expert_loss": expert_loss.item(),
"IRL/total_loss": loss.item(),
},
self.training_i,
)
# save policy and reward network
# TODO: make a uniform dumping function for all agents.
self.save_models(filename="{}.pt".format(self.training_i))
# increment training counter
self.training_i += 1
def train(
self,
num_irl_episodes,
num_rl_episodes,
max_rl_episode_length,
num_trajectory_samples,
max_env_steps,
reset_training=False,
account_for_terminal_state=False,
gamma=0.99,
stochastic_sampling=False,
):
"""
Runs the train_episode() function for 'num_irl_episodes' times. Other
parameters are identical to the aforementioned function, with the same
description and requirements.
"""
for _ in range(num_irl_episodes):
print("IRL episode {}".format(self.training_i), end="\r")
self.train_episode(
num_rl_episodes,
max_rl_episode_length,
num_trajectory_samples,
max_env_steps,
reset_training,
account_for_terminal_state,
gamma,
stochastic_sampling,
)
class MixingDeepMaxent(GeneralDeepMaxent):
def __init__(
self,
rl,
env,
expert_trajectories,
learning_rate=0.001,
l2_regularization=1e-05,
save_folder="./",
saving_interval=25,
):
super().__init__(
rl,
env,
expert_trajectories,
learning_rate=learning_rate,
l2_regularization=l2_regularization,
save_folder=save_folder,
saving_interval=saving_interval,
)
# expert and training datasets
self.all_trajectories = random.sample(
expert_trajectories, len(expert_trajectories)
)
self.expert_label_trajectories = [
traj.to(torch.float).to(DEVICE)
for traj in self.all_trajectories[
: len(self.all_trajectories) // 2
]
]
self.expert_train_trajectories = [
traj.to(torch.float).to(DEVICE)
for traj in self.all_trajectories[
len(self.all_trajectories) // 2 :
]
]
self.pre_data_table = utils.DataTable()
# initial model save
self.save_models(filename="initial_save.pt")
def train_episode(
self,
num_rl_episodes,
max_rl_episode_length,
max_env_steps,
reset_training,
account_for_terminal_state,
gamma,
stochastic_sampling,
num_expert_samples,
num_policy_samples,
):
"""
perform IRL with mix-in of expert samples.
:param num_rl_episodes: Number of RL iterations for this IRL iteration.
:type num_rl_episodes: int.
:param max_rl_episode_length: maximum number of environment steps to
take when doing rollouts using learned RL agent.
:type max_rl_episode_length: int
:param num_trajectory_samples: Number of trajectories to sample using
learned RL agent.
:type num_trajectory_samples: int
:param max_env_steps: maximum number of environment steps to take,
both when training RL agent and when generating rollouts.
:type max_env_steps: int
:param reset_training: Whether to reset RL training every iteration
or not.
:type reset_training: Boolean.
:param account_for_terminal_state: Whether to account for a state
being terminal or not. If true, (gamma/1-gamma)*R will be immitated
by padding the trajectory with its ending state until max_env_steps
length is reached. e.g. if max_env_steps is 5, the trajectory [s_0,
s_1, s_2] will be padded to [s_0, s_1, s_2, s_2, s_2].
:type account_for_terminal_state: Boolean.
:param gamma: The discounting factor.
:type gamma: float.
:param stochastic_sampling: Sample trajectories using stochastic
policy instead of deterministic 'best action policy'
:type stochastic_sampling: Boolean.
"""
# expert loss
expert_loss = 0
expert_samples = random.sample(
self.expert_trajectories, num_expert_samples
)
for traj in expert_samples:
expert_rewards = self.reward_net(traj)
expert_loss += self.discounted_rewards(
expert_rewards, gamma, account_for_terminal_state
)
# policy loss
trajectories = self.generate_trajectories(
num_expert_samples // 2, max_env_steps, stochastic_sampling
)
# mix in expert samples.
trajectories.extend(
random.sample(self.expert_trajectories, num_policy_samples // 2)
)
policy_loss = 0
for traj in trajectories:
policy_rewards = self.reward_net(traj)
policy_loss += self.discounted_rewards(
policy_rewards, gamma, account_for_terminal_state
)
policy_loss = (num_expert_samples / num_policy_samples) * policy_loss
# Backpropagate IRL loss
loss = policy_loss - expert_loss
self.reward_optim.zero_grad()
loss.backward()
self.reward_optim.step()
# train RL agent
if reset_training:
self.rl.reset_training()
self.rl.train(
num_rl_episodes,
max_rl_episode_length,
reward_network=self.reward_net,
)
# logging
self.tbx_writer.add_scalar(
"IRL/policy_loss", policy_loss, self.training_i
)
self.tbx_writer.add_scalar(
"IRL/expert_loss", expert_loss, self.training_i
)
self.tbx_writer.add_scalar("IRL/total_loss", loss, self.training_i)
self.data_table.add_row(
{
"IRL/policy_loss": policy_loss.item(),
"IRL/expert_loss": expert_loss.item(),
"IRL/total_loss": loss.item(),
},
self.training_i,
)
# save policy and reward network
# TODO: make a uniform dumping function for all agents.
if (self.training_i + 1) % self.saving_interval == 0:
self.save_models(filename="{}.pt".format(self.training_i))
# increment training counter
self.training_i += 1
def pre_train_episode(
self, num_trajectory_samples, account_for_terminal_state, gamma,
):
"""
perform IRL pre-training by using only expert samples.
:param num_trajectory_samples: Number of trajectories to sample using
learned RL agent.
:type num_trajectory_samples: int
:param account_for_terminal_state: Whether to account for a state
being terminal or not. If true, (gamma/1-gamma)*R will be immitated
by padding the trajectory with its ending state until max_env_steps
length is reached. e.g. if max_env_steps is 5, the trajectory [s_0,
s_1, s_2] will be padded to [s_0, s_1, s_2, s_2, s_2].
:type account_for_terminal_state: Boolean.
:param gamma: The discounting factor.
:type gamma: float.
"""
# expert loss
expert_loss = 0
expert_sample = random.sample(
self.expert_label_trajectories, num_trajectory_samples
)
for traj in expert_sample:
expert_rewards = self.reward_net(traj)
expert_loss += self.discounted_rewards(
expert_rewards, gamma, account_for_terminal_state
)
# policy loss
trajectories = random.sample(
self.expert_train_trajectories, num_trajectory_samples
)
generator_loss = 0
for traj in trajectories:
policy_rewards = self.reward_net(traj)
generator_loss += self.discounted_rewards(
policy_rewards, gamma, account_for_terminal_state
)
generator_loss = (
len(self.expert_trajectories) / num_trajectory_samples
) * generator_loss
# Backpropagate IRL loss
loss = generator_loss - expert_loss
self.reward_optim.zero_grad()
loss.backward()
self.reward_optim.step()
# logging
self.tbx_writer.add_scalar(
"pre_IRL/generator_loss", generator_loss, self.training_i
)
self.tbx_writer.add_scalar(
"pre_IRL/expert_loss", expert_loss, self.training_i
)
self.tbx_writer.add_scalar("pre_IRL/total_loss", loss, self.training_i)
self.pre_data_table.add_row(
{
"pre_IRL/policy_loss": generator_loss.item(),
"pre_IRL/expert_loss": expert_loss.item(),
"pre_IRL/total_loss": loss.item(),
},
self.training_i,
)
# save policy and reward network
self.reward_net.save(
str(self.save_path / "reward_net"),
filename="pre_{}.pt".format(self.training_i),
)
# increment training counter
self.training_i += 1
def pre_train(
self,
num_pretrain_episodes,
num_trajectory_samples,
account_for_terminal_state=False,
gamma=0.99,
):
"""
Runs the train_episode() function for 'num_irl_episodes' times. Other
parameters are identical to the aforementioned function, with the same
description and requirements.
"""
for _ in range(num_pretrain_episodes):
print(
"IRL pre-training episode {}".format(self.training_i), end="\r"
)
self.pre_train_episode(
num_trajectory_samples, account_for_terminal_state, gamma
)
def train(
self,
num_irl_episodes,
num_rl_episodes,
max_rl_episode_length,
max_env_steps,
reset_training=False,
account_for_terminal_state=False,
gamma=0.99,
stochastic_sampling=False,
num_expert_samples=64,
num_policy_samples=64,
):
"""
Runs the train_episode() function for 'num_irl_episodes' times. Other
parameters are identical to the aforementioned function, with the same
description and requirements.
"""
for _ in range(num_irl_episodes):
print("IRL episode {}".format(self.training_i), end="\r")
self.train_episode(
num_rl_episodes,
max_rl_episode_length,
max_env_steps,
reset_training,
account_for_terminal_state,
gamma,
stochastic_sampling,
num_expert_samples,
num_policy_samples,
)
# final model save
self.save_models(filename="final.pt")
class GCL(MixingDeepMaxent):
def generate_trajectories(self, num_trajectories, max_env_steps, ped_id=None):
"""
Generate trajectories in environemnt using leanred RL policy.
:param num_trajectories: number of trajectories to generate.
:type num_trajectories: int
:param max_env_steps: max steps to take in environment (rollout length.)
:type max_env_steps: int
:return: list of features encountered in playthrough.
:rtype: list of tensors of shape (num_states x feature_length)
"""
buffers = []
for _ in range(num_trajectories):
generated_buffer = play_complete(
self.rl.policy,
self.env,
self.feature_extractor,
max_env_steps,
ped_id=ped_id
)
buffers.append(generated_buffer)
return buffers
def train_episode(
self,
num_rl_episodes,
max_rl_episode_length,
max_env_steps,
reset_training,
account_for_terminal_state,
gamma,
stochastic_sampling,
num_expert_samples,
num_policy_samples,
):
"""
perform IRL with mix-in of expert samples.
:param num_rl_episodes: Number of RL iterations for this IRL iteration.
:type num_rl_episodes: int.
:param max_rl_episode_length: maximum number of environment steps to
take when doing rollouts using learned RL agent.
:type max_rl_episode_length: int
:param num_trajectory_samples: Number of trajectories to sample using
learned RL agent.
:type num_trajectory_samples: int
:param max_env_steps: maximum number of environment steps to take,
both when training RL agent and when generating rollouts.
:type max_env_steps: int
:param reset_training: Whether to reset RL training every iteration
or not.
:type reset_training: Boolean.
:param account_for_terminal_state: Whether to account for a state
being terminal or not. If true, (gamma/1-gamma)*R will be immitated
by padding the trajectory with its ending state until max_env_steps
length is reached. e.g. if max_env_steps is 5, the trajectory [s_0,
s_1, s_2] will be padded to [s_0, s_1, s_2, s_2, s_2].
:type account_for_terminal_state: Boolean.
:param gamma: The discounting factor.
:type gamma: float.
:param stochastic_sampling: Sample trajectories using stochastic
policy instead of deterministic 'best action policy'
:type stochastic_sampling: Boolean.
"""
# regularizers
g_lcr = 0
g_mono = 0
# expert loss
expert_loss = 0
expert_samples = random.sample(
self.expert_trajectories, num_expert_samples
)
for traj in expert_samples:
expert_rewards = self.reward_net(traj)
# update regularizers
g_lcr += lcr_regularizer(expert_rewards)
g_mono += monotonic_regularizer(expert_rewards)
expert_loss += self.discounted_rewards(
expert_rewards, gamma, account_for_terminal_state
)
# policy loss
trajectories = self.generate_trajectories(
num_expert_samples, max_env_steps
)
rewards = []
log_pis = []
for traj in trajectories:
states = [
torch.from_numpy(tran.state).to(torch.float).to(DEVICE)
for tran in traj
]
states.append(
torch.from_numpy(traj[-1].next_state)
.to(torch.float)
.to(DEVICE)
)
states = torch.stack(states)
reward = self.reward_net(states)
#update regularizers
g_lcr += lcr_regularizer(reward)
g_mono += lcr_regularizer(reward)
reward_sum = self.discounted_rewards(reward, gamma, traj[-1].done)
rewards.append(reward_sum)
log_pi = [
torch.from_numpy(tran.action_log_prob)
.to(torch.float)
.to(DEVICE)
for tran in traj
]
log_pis.append(torch.tensor(log_pi).sum())
# log sum exp trick
exponents = torch.cat(rewards) - torch.tensor(log_pis).to(DEVICE)
max_exponent = torch.max(exponents)
log_Z = max_exponent + torch.log(
torch.exp(exponents - max_exponent).sum()
)
policy_loss = log_Z
policy_loss = (num_expert_samples) * policy_loss
# Backpropagate IRL loss
loss = policy_loss - expert_loss + g_mono + g_lcr
self.reward_optim.zero_grad()
loss.backward()
self.reward_optim.step()
# train RL agent
if reset_training:
self.rl.reset_training()
self.rl.train(
num_rl_episodes,
max_rl_episode_length,
reward_network=self.reward_net,
)
# logging
self.tbx_writer.add_scalar(
"IRL/policy_loss", policy_loss, self.training_i
)
self.tbx_writer.add_scalar(
"IRL/expert_loss", expert_loss, self.training_i
)
self.tbx_writer.add_scalar("IRL/total_loss", loss, self.training_i)
self.tbx_writer.add_scalar("IRL/log_Z", log_Z.item(), self.training_i)
self.data_table.add_row(
{
"IRL/policy_loss": policy_loss.item(),
"IRL/expert_loss": expert_loss.item(),
"IRL/total_loss": loss.item(),
"IRL/log_Z": log_Z.item(),
},
self.training_i,
)
# save policy and reward network
# TODO: make a uniform dumping function for all agents.
if (self.training_i + 1) % self.saving_interval == 0:
self.save_models(filename="{}.pt".format(self.training_i))
# increment training counter
self.training_i += 1
class PerTrajGCL(GCL):
def train_episode(
self,
num_rl_episodes,
max_rl_episode_length,
max_env_steps,
reset_training,
account_for_terminal_state,
gamma,
stochastic_sampling,
num_expert_samples,
num_policy_samples,
):
"""
perform IRL with mix-in of expert samples.
:param num_rl_episodes: Number of RL iterations for this IRL iteration.
:type num_rl_episodes: int.
:param max_rl_episode_length: maximum number of environment steps to
take when doing rollouts using learned RL agent.
:type max_rl_episode_length: int
:param num_trajectory_samples: Number of trajectories to sample using
learned RL agent.
:type num_trajectory_samples: int
:param max_env_steps: maximum number of environment steps to take,
both when training RL agent and when generating rollouts.
:type max_env_steps: int
:param reset_training: Whether to reset RL training every iteration
or not.
:type reset_training: Boolean.
:param account_for_terminal_state: Whether to account for a state
being terminal or not. If true, (gamma/1-gamma)*R will be immitated
by padding the trajectory with its ending state until max_env_steps
length is reached. e.g. if max_env_steps is 5, the trajectory [s_0,
s_1, s_2] will be padded to [s_0, s_1, s_2, s_2, s_2].
:type account_for_terminal_state: Boolean.
:param gamma: The discounting factor.
:type gamma: float.
:param stochastic_sampling: Sample trajectories using stochastic
policy instead of deterministic 'best action policy'
:type stochastic_sampling: Boolean.
"""
# regularizers
g_lcr = 0
g_mono = 0
# expert loss
expert_loss = 0
expert_samples = random.sample(
list(enumerate(self.expert_trajectories)), num_expert_samples
)
for _, traj in expert_samples:
expert_rewards = self.reward_net(traj)
expert_loss += self.discounted_rewards(
expert_rewards, gamma, account_for_terminal_state
)
# update regularizers
g_lcr += lcr_regularizer(expert_rewards)
g_mono += monotonic_regularizer(expert_rewards)
# policy loss
trajectories = []
for idx, _ in expert_samples:
trajectories.extend(
self.generate_trajectories(
num_policy_samples, max_env_steps, idx + 1
)
)
policy_loss = 0
# mix in expert samples.
expert_mixin_samples = random.sample(
self.expert_trajectories, num_policy_samples // 2
)
rewards = []
log_pis = []
for traj in trajectories:
states = [
torch.from_numpy(tran.state).to(torch.float).to(DEVICE)
for tran in traj
]
states.append(
torch.from_numpy(traj[-1].next_state)
.to(torch.float)
.to(DEVICE)
)
states = torch.stack(states)
reward = self.reward_net(states)
# update regularizers
g_lcr += lcr_regularizer(reward)
g_mono += monotonic_regularizer(reward)
reward_sum = self.discounted_rewards(reward, gamma, traj[-1].done)
rewards.append(reward_sum)
log_pi = [
torch.from_numpy(tran.action_log_prob)
.to(torch.float)
.to(DEVICE)
for tran in traj
]
log_pis.append(torch.tensor(log_pi).sum())
# log sum exp trick
exponents = torch.cat(rewards) - torch.tensor(log_pis).to(DEVICE)
max_exponent = torch.max(exponents)
log_Z = max_exponent + torch.log(
torch.exp(exponents - max_exponent).sum()
)
policy_loss += log_Z
policy_loss = (num_expert_samples) * policy_loss
# Backpropagate IRL loss
loss = policy_loss - expert_loss + g_mono + g_lcr
self.reward_optim.zero_grad()
loss.backward()
self.reward_optim.step()
# train RL agent
if reset_training:
self.rl.reset_training()
self.rl.train(
num_rl_episodes,
max_rl_episode_length,
reward_network=self.reward_net,
)
# logging
self.tbx_writer.add_scalar(
"IRL/policy_loss", policy_loss, self.training_i
)
self.tbx_writer.add_scalar(
"IRL/expert_loss", expert_loss, self.training_i
)
self.tbx_writer.add_scalar("IRL/total_loss", loss, self.training_i)
self.tbx_writer.add_scalar("IRL/log_Z", log_Z.item(), self.training_i)
self.data_table.add_row(
{
"IRL/policy_loss": policy_loss.item(),
"IRL/expert_loss": expert_loss.item(),
"IRL/total_loss": loss.item(),
"IRL/log_Z": log_Z.item(),
},
self.training_i,
)
# save policy and reward network
# TODO: make a uniform dumping function for all agents.
if (self.training_i + 1) % self.saving_interval == 0:
self.save_models(filename="{}.pt".format(self.training_i))
# increment training counter
self.training_i += 1
class ExpertOnlyMaxent:
"""
Implements expert only deep maxent, using only expert demonstrations and
no environment interaction.
"""
def __init__(
self,
state_size,
expert_trajectories,
learning_rate=1e-3,
l2_regularization=1e-5,
save_folder="./",
):
# reward net
self.reward_net = RewardNet(state_size, hidden_dims=256)
self.reward_net = self.reward_net.to(DEVICE)
self.reward_optim = Adam(
self.reward_net.parameters(),
lr=learning_rate,
weight_decay=l2_regularization,
)
# expert and training datasets
self.all_trajectories = random.sample(
expert_trajectories, len(expert_trajectories)
)
self.expert_trajectories = [
traj.to(torch.float).to(DEVICE)
for traj in self.all_trajectories[
: len(self.all_trajectories) // 2
]
]
self.training_trajectories = [
traj.to(torch.float).to(DEVICE)
for traj in self.all_trajectories[
len(self.all_trajectories) // 2 :
]
]
# logging and saving
self.save_path = Path(save_folder)
self.tbx_writer = SummaryWriter(
str(self.save_path / "tensorboard_logs")
)
self.data_table = utils.DataTable()
# training meta
self.training_i = 0
def discounted_rewards(self, rewards, gamma, account_for_terminal_state):
discounted_sum = 0
t = 0
gamma_t = 1
for t, reward in enumerate(rewards[:-1]):
discounted_sum += gamma_t * reward
gamma_t *= gamma
if account_for_terminal_state:
discounted_sum += (
(gamma / (1 - gamma)) * gamma ** (t + 1) * rewards[-1]
)
else:
discounted_sum += gamma_t * rewards[-1]
return discounted_sum
def train_episode(
self, num_trajectory_samples, account_for_terminal_state, gamma,
):
"""
perform IRL pre-training by using only expert samples.
:param num_trajectory_samples: Number of trajectories to sample using
learned RL agent.
:type num_trajectory_samples: int
:param account_for_terminal_state: Whether to account for a state
being terminal or not. If true, (gamma/1-gamma)*R will be immitated
by padding the trajectory with its ending state until max_env_steps
length is reached. e.g. if max_env_steps is 5, the trajectory [s_0,
s_1, s_2] will be padded to [s_0, s_1, s_2, s_2, s_2].
:type account_for_terminal_state: Boolean.
:param gamma: The discounting factor.
:type gamma: float.
"""
# expert loss
expert_loss = 0
expert_sample = random.sample(
self.expert_trajectories, num_trajectory_samples
)
for traj in expert_sample:
expert_rewards = self.reward_net(traj)
expert_loss += self.discounted_rewards(
expert_rewards, gamma, account_for_terminal_state
)
# policy loss
trajectories = random.sample(
self.training_trajectories, num_trajectory_samples
)
generator_loss = 0
for traj in trajectories:
policy_rewards = self.reward_net(traj)
generator_loss += self.discounted_rewards(
policy_rewards, gamma, account_for_terminal_state
)
generator_loss = (
len(self.expert_trajectories) / num_trajectory_samples
) * generator_loss
# Backpropagate IRL loss
loss = generator_loss - expert_loss
self.reward_optim.zero_grad()
loss.backward()
self.reward_optim.step()
# logging
self.tbx_writer.add_scalar(
"IRL/generator_loss", generator_loss, self.training_i
)
self.tbx_writer.add_scalar(
"IRL/expert_loss", expert_loss, self.training_i
)
self.tbx_writer.add_scalar("IRL/total_loss", loss, self.training_i)
self.data_table.add_row(
{
"IRL/policy_loss": generator_loss.item(),
"IRL/expert_loss": expert_loss.item(),
"IRL/total_loss": loss.item(),
},
self.training_i,
)
# save policy and reward network
self.reward_net.save(str(self.save_path / "reward_net"))
# increment training counter
self.training_i += 1
def train(
self,
num_episodes,
num_trajectory_samples,
account_for_terminal_state=False,
gamma=0.99,
):
"""
Runs the train_episode() function for 'num_irl_episodes' times. Other
parameters are identical to the aforementioned function, with the same
description and requirements.
"""
for _ in range(num_episodes):
print(
"IRL pre-training episode {}".format(self.training_i), end="\r"
)
self.train_episode(
num_trajectory_samples, account_for_terminal_state, gamma
)
|
[
"irlmethods.irlUtils.play_features",
"torch.stack",
"rlmethods.rlutils.play_complete",
"random.sample",
"sys.path.insert",
"utils.DataTable",
"irlmethods.irlUtils.monotonic_regularizer",
"torch.cat",
"torch.exp",
"pathlib.Path",
"torch.cuda.is_available",
"torch.max",
"irlmethods.irlUtils.lcr_regularizer",
"torch.nn.Linear",
"torch.tensor",
"torch.from_numpy"
] |
[((282, 306), 'sys.path.insert', 'sys.path.insert', (['(0)', '""".."""'], {}), "(0, '..')\n", (297, 306), False, 'import sys\n'), ((553, 578), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (576, 578), False, 'import torch\n'), ((760, 794), 'torch.nn.Linear', 'nn.Linear', (['state_dims', 'hidden_dims'], {}), '(state_dims, hidden_dims)\n', (769, 794), True, 'import torch.nn as nn\n'), ((819, 854), 'torch.nn.Linear', 'nn.Linear', (['hidden_dims', 'hidden_dims'], {}), '(hidden_dims, hidden_dims)\n', (828, 854), True, 'import torch.nn as nn\n'), ((878, 913), 'torch.nn.Linear', 'nn.Linear', (['hidden_dims', 'hidden_dims'], {}), '(hidden_dims, hidden_dims)\n', (887, 913), True, 'import torch.nn as nn\n'), ((935, 960), 'torch.nn.Linear', 'nn.Linear', (['hidden_dims', '(1)'], {}), '(hidden_dims, 1)\n', (944, 960), True, 'import torch.nn as nn\n'), ((2246, 2263), 'pathlib.Path', 'Path', (['save_folder'], {}), '(save_folder)\n', (2250, 2263), False, 'from pathlib import Path\n'), ((2482, 2499), 'utils.DataTable', 'utils.DataTable', ([], {}), '()\n', (2497, 2499), False, 'import utils\n'), ((10052, 10069), 'utils.DataTable', 'utils.DataTable', ([], {}), '()\n', (10067, 10069), False, 'import utils\n'), ((12002, 12061), 'random.sample', 'random.sample', (['self.expert_trajectories', 'num_expert_samples'], {}), '(self.expert_trajectories, num_expert_samples)\n', (12015, 12061), False, 'import random\n'), ((15110, 15179), 'random.sample', 'random.sample', (['self.expert_label_trajectories', 'num_trajectory_samples'], {}), '(self.expert_label_trajectories, num_trajectory_samples)\n', (15123, 15179), False, 'import random\n'), ((15467, 15536), 'random.sample', 'random.sample', (['self.expert_train_trajectories', 'num_trajectory_samples'], {}), '(self.expert_train_trajectories, num_trajectory_samples)\n', (15480, 15536), False, 'import random\n'), ((21549, 21608), 'random.sample', 'random.sample', (['self.expert_trajectories', 'num_expert_samples'], {}), '(self.expert_trajectories, num_expert_samples)\n', (21562, 21608), False, 'import random\n'), ((23202, 23222), 'torch.max', 'torch.max', (['exponents'], {}), '(exponents)\n', (23211, 23222), False, 'import torch\n'), ((27583, 27647), 'random.sample', 'random.sample', (['self.expert_trajectories', '(num_policy_samples // 2)'], {}), '(self.expert_trajectories, num_policy_samples // 2)\n', (27596, 27647), False, 'import random\n'), ((28750, 28770), 'torch.max', 'torch.max', (['exponents'], {}), '(exponents)\n', (28759, 28770), False, 'import torch\n'), ((31620, 31637), 'pathlib.Path', 'Path', (['save_folder'], {}), '(save_folder)\n', (31624, 31637), False, 'from pathlib import Path\n'), ((31769, 31786), 'utils.DataTable', 'utils.DataTable', ([], {}), '()\n', (31784, 31786), False, 'import utils\n'), ((33261, 33324), 'random.sample', 'random.sample', (['self.expert_trajectories', 'num_trajectory_samples'], {}), '(self.expert_trajectories, num_trajectory_samples)\n', (33274, 33324), False, 'import random\n'), ((33612, 33677), 'random.sample', 'random.sample', (['self.training_trajectories', 'num_trajectory_samples'], {}), '(self.training_trajectories, num_trajectory_samples)\n', (33625, 33677), False, 'import random\n'), ((3469, 3554), 'irlmethods.irlUtils.play_features', 'play', (['self.env', 'self.rl.policy', 'self.feature_extractor', 'max_env_steps', 'stochastic'], {}), '(self.env, self.rl.policy, self.feature_extractor, max_env_steps,\n stochastic)\n', (3473, 3554), True, 'from irlmethods.irlUtils import play_features as play\n'), ((12534, 12598), 'random.sample', 'random.sample', (['self.expert_trajectories', '(num_policy_samples // 2)'], {}), '(self.expert_trajectories, num_policy_samples // 2)\n', (12547, 12598), False, 'import random\n'), ((19381, 19478), 'rlmethods.rlutils.play_complete', 'play_complete', (['self.rl.policy', 'self.env', 'self.feature_extractor', 'max_env_steps'], {'ped_id': 'ped_id'}), '(self.rl.policy, self.env, self.feature_extractor,\n max_env_steps, ped_id=ped_id)\n', (19394, 19478), False, 'from rlmethods.rlutils import play_complete\n'), ((21774, 21805), 'irlmethods.irlUtils.lcr_regularizer', 'lcr_regularizer', (['expert_rewards'], {}), '(expert_rewards)\n', (21789, 21805), False, 'from irlmethods.irlUtils import lcr_regularizer, monotonic_regularizer\n'), ((21828, 21865), 'irlmethods.irlUtils.monotonic_regularizer', 'monotonic_regularizer', (['expert_rewards'], {}), '(expert_rewards)\n', (21849, 21865), False, 'from irlmethods.irlUtils import lcr_regularizer, monotonic_regularizer\n'), ((22525, 22544), 'torch.stack', 'torch.stack', (['states'], {}), '(states)\n', (22536, 22544), False, 'import torch\n'), ((22646, 22669), 'irlmethods.irlUtils.lcr_regularizer', 'lcr_regularizer', (['reward'], {}), '(reward)\n', (22661, 22669), False, 'from irlmethods.irlUtils import lcr_regularizer, monotonic_regularizer\n'), ((22692, 22715), 'irlmethods.irlUtils.lcr_regularizer', 'lcr_regularizer', (['reward'], {}), '(reward)\n', (22707, 22715), False, 'from irlmethods.irlUtils import lcr_regularizer, monotonic_regularizer\n'), ((23125, 23143), 'torch.cat', 'torch.cat', (['rewards'], {}), '(rewards)\n', (23134, 23143), False, 'import torch\n'), ((27142, 27173), 'irlmethods.irlUtils.lcr_regularizer', 'lcr_regularizer', (['expert_rewards'], {}), '(expert_rewards)\n', (27157, 27173), False, 'from irlmethods.irlUtils import lcr_regularizer, monotonic_regularizer\n'), ((27196, 27233), 'irlmethods.irlUtils.monotonic_regularizer', 'monotonic_regularizer', (['expert_rewards'], {}), '(expert_rewards)\n', (27217, 27233), False, 'from irlmethods.irlUtils import lcr_regularizer, monotonic_regularizer\n'), ((28066, 28085), 'torch.stack', 'torch.stack', (['states'], {}), '(states)\n', (28077, 28085), False, 'import torch\n'), ((28188, 28211), 'irlmethods.irlUtils.lcr_regularizer', 'lcr_regularizer', (['reward'], {}), '(reward)\n', (28203, 28211), False, 'from irlmethods.irlUtils import lcr_regularizer, monotonic_regularizer\n'), ((28234, 28263), 'irlmethods.irlUtils.monotonic_regularizer', 'monotonic_regularizer', (['reward'], {}), '(reward)\n', (28255, 28263), False, 'from irlmethods.irlUtils import lcr_regularizer, monotonic_regularizer\n'), ((28673, 28691), 'torch.cat', 'torch.cat', (['rewards'], {}), '(rewards)\n', (28682, 28691), False, 'import torch\n'), ((23146, 23167), 'torch.tensor', 'torch.tensor', (['log_pis'], {}), '(log_pis)\n', (23158, 23167), False, 'import torch\n'), ((28694, 28715), 'torch.tensor', 'torch.tensor', (['log_pis'], {}), '(log_pis)\n', (28706, 28715), False, 'import torch\n'), ((23048, 23068), 'torch.tensor', 'torch.tensor', (['log_pi'], {}), '(log_pi)\n', (23060, 23068), False, 'import torch\n'), ((23277, 23312), 'torch.exp', 'torch.exp', (['(exponents - max_exponent)'], {}), '(exponents - max_exponent)\n', (23286, 23312), False, 'import torch\n'), ((28596, 28616), 'torch.tensor', 'torch.tensor', (['log_pi'], {}), '(log_pi)\n', (28608, 28616), False, 'import torch\n'), ((28825, 28860), 'torch.exp', 'torch.exp', (['(exponents - max_exponent)'], {}), '(exponents - max_exponent)\n', (28834, 28860), False, 'import torch\n'), ((22245, 22273), 'torch.from_numpy', 'torch.from_numpy', (['tran.state'], {}), '(tran.state)\n', (22261, 22273), False, 'import torch\n'), ((22391, 22428), 'torch.from_numpy', 'torch.from_numpy', (['traj[-1].next_state'], {}), '(traj[-1].next_state)\n', (22407, 22428), False, 'import torch\n'), ((22874, 22912), 'torch.from_numpy', 'torch.from_numpy', (['tran.action_log_prob'], {}), '(tran.action_log_prob)\n', (22890, 22912), False, 'import torch\n'), ((27786, 27814), 'torch.from_numpy', 'torch.from_numpy', (['tran.state'], {}), '(tran.state)\n', (27802, 27814), False, 'import torch\n'), ((27932, 27969), 'torch.from_numpy', 'torch.from_numpy', (['traj[-1].next_state'], {}), '(traj[-1].next_state)\n', (27948, 27969), False, 'import torch\n'), ((28422, 28460), 'torch.from_numpy', 'torch.from_numpy', (['tran.action_log_prob'], {}), '(tran.action_log_prob)\n', (28438, 28460), False, 'import torch\n')]
|
"""
Provides concrete tools for dealing with two of the most useful types of surfaces we have
"""
import numpy as np
from collections import namedtuple
from McUtils.GaussianInterface import GaussianLogReader, GaussianFChkReader
from McUtils.Zachary import Surface, MultiSurface, InterpolatedSurface, TaylorSeriesSurface
__all__=[
"DipoleSurface",
"PotentialSurface"
]
class DipoleSurface(MultiSurface):
"""
Provides a unified interface to working with dipole surfaces.
Currently basically no fancier than a regular surface (although with convenient loading functions), but dipole-specific
stuff could come
"""
def __init__(self, mu_x, mu_y, mu_z):
"""
:param mu_x: X-component of dipole moment
:type mu_x: Surface
:param mu_y: Y-component of dipole moment
:type mu_y: Surface
:param mu_z: Z-component of dipole moment
:type mu_z: Surface
"""
if isinstance(mu_x.base, TaylorSeriesSurface):
self.mode = "taylor"
else:
self.mode = "interp"
super().__init__(
mu_x,
mu_y,
mu_z
)
@staticmethod
def get_log_values(log_file, keys=("StandardCartesianCoordinates", "DipoleMoments")):
with GaussianLogReader(log_file) as parser:
parse_data = parser.parse(keys)
carts = parse_data[keys[0]][1]
dipoles = np.array(parse_data[keys[1]])
return namedtuple('dipole_log_values', ['cartesians', 'dipoles'])(carts, dipoles)
@classmethod
def from_log_file(cls, log_file, coord_transf, keys=("StandardCartesianCoordinates", "DipoleMoments"), tol = .001, **opts):
"""
Loads dipoles from a Gaussian log file and builds a dipole surface by interpolating.
Obviously this only really works if we have a subset of "scan" coordinates, so at this stage the user is obligated
to furnish a function that'll take a set of Cartesian coordinates and convert them to "scan" coordinates.
Coordinerds can be helpful with this, as it provides a convenient syntax for Cartesian <-> ZMatrix conversions
:param log_file: a Gaussian log file to pull from
:type log_file: str
:return:
:rtype:
"""
carts, dipoles = cls.get_log_values(log_file, keys=keys)
scan_coords = coord_transf(carts)
if len(dipoles) != len(scan_coords):
raise ValueError(
"mismatch between number of dipoles ({}) and number of coordinates ({})".format(
len(dipoles),
len(scan_coords)
)
)
if scan_coords.ndim == 1:
scan_sort = np.argsort(scan_coords)
else:
scan_sort = np.lexsort(tuple(reversed(tuple(scan_coords.T))))
scan_coords = scan_coords[scan_sort]
dipoles = dipoles[scan_sort]
# this is particularly relevant for optimization scans...but we pull all the "unique" indices
# then we pull the indices right _before_ each unique one since that's the final one in the block of "uniques"
# finally we do a "roll" to make sure the order from the sort is preserved
tol_coords = np.floor(scan_coords/tol)
if tol_coords.ndim == 1:
diffs = np.diff(tol_coords)
else:
diffs = np.sum(abs(np.diff(tol_coords, axis=0)), axis=1)
inds = np.where(diffs != 0)[0]
inds = np.concatenate((inds, [len(inds)]))
scan_coords = scan_coords[inds]
dipoles = dipoles[inds]
dipoles = list(np.transpose(dipoles))
return DipoleSurface(*(
Surface(
((scan_coords, d), opts),
base = InterpolatedSurface,
dipole_component = "x" if i == 0 else "y" if i == 1 else "z"
) for i,d in enumerate(dipoles)
))
@staticmethod
def get_fchk_values(fchk_file):
with GaussianFChkReader(fchk_file) as parser:
parse_data = parser.parse(["Coordinates", "Dipole Moment", "Dipole Derivatives"])
center = parse_data["Coordinates"]
const_dipole = parse_data["Dipole Moment"]
derivs = parse_data["Dipole Derivatives"]
derivs = np.reshape(derivs, (int(len(derivs) / 3), 3))
return namedtuple('dipole_fchk_values', ['center', 'const', 'derivs'])(center, const_dipole, derivs)
@classmethod
def from_fchk_file(cls, fchk_file, **opts):
"""
Loads dipoles from a Gaussian formatted checkpoint file and builds a dipole surface via a linear approximation
:param fchk_file: a Gaussian fchk file to pull from
:type log_file: str
:return:
:rtype:
"""
center, const_dipole, derivs = cls.get_fchk_values(fchk_file)
derivs = list(np.transpose(derivs))
opts['center'] = center.flatten()
surfs = [None]*3
for i, d in enumerate(zip(derivs, list(const_dipole))):
d, r = d
opts = opts.copy()
opts["ref"] = r
surfs[i] = Surface(
((d,), opts),
base = TaylorSeriesSurface,
dipole_component="x" if i == 0 else "y" if i == 1 else "z"
)
return cls(*surfs)
def __call__(self, gridpoints, **opts):
"""
Explicitly overrides the Surface-level evaluation because we know the Taylor surface needs us to flatten our gridpoints
:param gridpoints:
:type gridpoints:
:param opts:
:type opts:
:return:
:rtype:
"""
gps = np.asarray(gridpoints)
if self.mode == "taylor":
if gps.ndim == 2:
gps = gps.flatten()
elif gps.ndim > 2:
gps = np.reshape(gps, gps.shape[:-2] + (np.product(gps.shape[-2:]),))
return super().__call__(gps, **opts)
class PotentialSurface(Surface):
"""
A potential surface structure to go along with the DipoleSurface.
Provides convenient access to dipole data + a unified interface to things like energy minimization
"""
@staticmethod
def get_log_values(log_file, keys=("StandardCartesianCoordinates", "ScanEnergies")):
with GaussianLogReader(log_file) as parser:
parse_data = parser.parse(keys)
# Need to be smarter about this. At some point we might be able to infer what type of log file we have...
coord_key = keys[0]
coords = parse_data[coord_key][1]
eng_key = keys[1]
if eng_key == "ScanEnergies":
energies = np.array(parse_data[eng_key].energies[:, -1])
else:
raise Exception("Haven't dealt with scan types beyond rigid ones...")
return namedtuple('potential_log_values', ['coords', 'energies'])(coords, energies)
@classmethod
def from_log_file(cls, log_file, coord_transf, keys=("StandardCartesianCoordinates", "ScanEnergies"), tol = .001, **opts):
"""
Loads dipoles from a Gaussian log file and builds a potential surface by interpolating.
Obviously this only really works if we have a subset of "scan" coordinates, so at this stage the user is obligated
to furnish a function that'll take a set of Cartesian coordinates and convert them to "scan" coordinates.
Coordinerds can be helpful with this, as it provides a convenient syntax for Cartesian <-> ZMatrix conversions.
:param log_file: a Gaussian log file to pull from
:type log_file: str
:return:
:rtype:
"""
dat = cls.get_log_values(log_file, keys=keys)
carts = dat.coords
pots = dat.energies
# raise Exception(carts, pots)
scan_coords = coord_transf(carts)
if len(pots) != len(scan_coords):
raise ValueError(
"mismatch between number of potential values ({}) and number of coordinates ({})".format(
len(pots),
len(scan_coords)
)
)
if scan_coords.ndim == 1:
scan_sort = np.argsort(scan_coords)
else:
scan_sort = np.lexsort(tuple(reversed(tuple(scan_coords.T))))
scan_coords = scan_coords[scan_sort]
pots = pots[scan_sort]
# this is particularly relevant for optimization scans...but we pull all the "unique" indices
# then we pull the indices right _before_ each unique one since that's the final one in the block of "uniques"
# finally we do a "roll" to make sure the order from the sort is preserved
tol_coords = np.floor(scan_coords/tol)
if tol_coords.ndim == 1:
diffs = np.diff(tol_coords)
else:
diffs = np.sum(abs(np.diff(tol_coords, axis=0)), axis=1)
inds = np.where(diffs != 0)[0]
inds = np.concatenate((inds, [len(inds)]))
scan_coords = scan_coords[inds].squeeze()
pots = pots[inds]
return cls(
((scan_coords, pots), opts),
base=InterpolatedSurface
)
@staticmethod
def get_fchk_values(fchk_file):
# TODO: I know I probably didn't do this right but I'm just getting a thing out for now
with GaussianFChkReader(fchk_file) as parser:
parse_data = parser.parse(["Coordinates", "Total Energy", "Gradient", "ForceConstants", "ForceDerivatives"])
center = parse_data["Coordinates"]
eng = parse_data["Total Energy"]
derivs = [parse_data['Gradient'], parse_data["ForceConstants"], parse_data["ForceDerivatives"]]
return namedtuple('potential_fchk_values', ['center', 'energy', 'derivs'])(
center, eng, derivs
)
@classmethod
def from_fchk_file(cls, fchk_file, **opts):
"""
Loads potential from a Gaussian formatted checkpoint file and builds a potential surface via a quartic approximation
:param fchk_file: a Gaussian fchk file to pull from
:type log_file: str
:return:
:rtype:
"""
center, energy, derivs = cls.get_fchk_values(fchk_file)
return cls((derivs, dict(ref=energy, center=center.flatten())), base=TaylorSeriesSurface, **opts)
|
[
"McUtils.GaussianInterface.GaussianFChkReader",
"numpy.asarray",
"numpy.floor",
"numpy.transpose",
"McUtils.Zachary.Surface",
"numpy.argsort",
"numpy.product",
"McUtils.GaussianInterface.GaussianLogReader",
"numpy.array",
"collections.namedtuple",
"numpy.diff",
"numpy.where"
] |
[((1430, 1459), 'numpy.array', 'np.array', (['parse_data[keys[1]]'], {}), '(parse_data[keys[1]])\n', (1438, 1459), True, 'import numpy as np\n'), ((3252, 3279), 'numpy.floor', 'np.floor', (['(scan_coords / tol)'], {}), '(scan_coords / tol)\n', (3260, 3279), True, 'import numpy as np\n'), ((5657, 5679), 'numpy.asarray', 'np.asarray', (['gridpoints'], {}), '(gridpoints)\n', (5667, 5679), True, 'import numpy as np\n'), ((8662, 8689), 'numpy.floor', 'np.floor', (['(scan_coords / tol)'], {}), '(scan_coords / tol)\n', (8670, 8689), True, 'import numpy as np\n'), ((1289, 1316), 'McUtils.GaussianInterface.GaussianLogReader', 'GaussianLogReader', (['log_file'], {}), '(log_file)\n', (1306, 1316), False, 'from McUtils.GaussianInterface import GaussianLogReader, GaussianFChkReader\n'), ((1476, 1534), 'collections.namedtuple', 'namedtuple', (['"""dipole_log_values"""', "['cartesians', 'dipoles']"], {}), "('dipole_log_values', ['cartesians', 'dipoles'])\n", (1486, 1534), False, 'from collections import namedtuple\n'), ((2732, 2755), 'numpy.argsort', 'np.argsort', (['scan_coords'], {}), '(scan_coords)\n', (2742, 2755), True, 'import numpy as np\n'), ((3331, 3350), 'numpy.diff', 'np.diff', (['tol_coords'], {}), '(tol_coords)\n', (3338, 3350), True, 'import numpy as np\n'), ((3449, 3469), 'numpy.where', 'np.where', (['(diffs != 0)'], {}), '(diffs != 0)\n', (3457, 3469), True, 'import numpy as np\n'), ((3620, 3641), 'numpy.transpose', 'np.transpose', (['dipoles'], {}), '(dipoles)\n', (3632, 3641), True, 'import numpy as np\n'), ((3984, 4013), 'McUtils.GaussianInterface.GaussianFChkReader', 'GaussianFChkReader', (['fchk_file'], {}), '(fchk_file)\n', (4002, 4013), False, 'from McUtils.GaussianInterface import GaussianLogReader, GaussianFChkReader\n'), ((4343, 4406), 'collections.namedtuple', 'namedtuple', (['"""dipole_fchk_values"""', "['center', 'const', 'derivs']"], {}), "('dipole_fchk_values', ['center', 'const', 'derivs'])\n", (4353, 4406), False, 'from collections import namedtuple\n'), ((4860, 4880), 'numpy.transpose', 'np.transpose', (['derivs'], {}), '(derivs)\n', (4872, 4880), True, 'import numpy as np\n'), ((5117, 5228), 'McUtils.Zachary.Surface', 'Surface', (['((d,), opts)'], {'base': 'TaylorSeriesSurface', 'dipole_component': "('x' if i == 0 else 'y' if i == 1 else 'z')"}), "(((d,), opts), base=TaylorSeriesSurface, dipole_component='x' if i ==\n 0 else 'y' if i == 1 else 'z')\n", (5124, 5228), False, 'from McUtils.Zachary import Surface, MultiSurface, InterpolatedSurface, TaylorSeriesSurface\n'), ((6288, 6315), 'McUtils.GaussianInterface.GaussianLogReader', 'GaussianLogReader', (['log_file'], {}), '(log_file)\n', (6305, 6315), False, 'from McUtils.GaussianInterface import GaussianLogReader, GaussianFChkReader\n'), ((6643, 6688), 'numpy.array', 'np.array', (['parse_data[eng_key].energies[:, -1]'], {}), '(parse_data[eng_key].energies[:, -1])\n', (6651, 6688), True, 'import numpy as np\n'), ((6801, 6859), 'collections.namedtuple', 'namedtuple', (['"""potential_log_values"""', "['coords', 'energies']"], {}), "('potential_log_values', ['coords', 'energies'])\n", (6811, 6859), False, 'from collections import namedtuple\n'), ((8148, 8171), 'numpy.argsort', 'np.argsort', (['scan_coords'], {}), '(scan_coords)\n', (8158, 8171), True, 'import numpy as np\n'), ((8741, 8760), 'numpy.diff', 'np.diff', (['tol_coords'], {}), '(tol_coords)\n', (8748, 8760), True, 'import numpy as np\n'), ((8859, 8879), 'numpy.where', 'np.where', (['(diffs != 0)'], {}), '(diffs != 0)\n', (8867, 8879), True, 'import numpy as np\n'), ((9291, 9320), 'McUtils.GaussianInterface.GaussianFChkReader', 'GaussianFChkReader', (['fchk_file'], {}), '(fchk_file)\n', (9309, 9320), False, 'from McUtils.GaussianInterface import GaussianLogReader, GaussianFChkReader\n'), ((9658, 9725), 'collections.namedtuple', 'namedtuple', (['"""potential_fchk_values"""', "['center', 'energy', 'derivs']"], {}), "('potential_fchk_values', ['center', 'energy', 'derivs'])\n", (9668, 9725), False, 'from collections import namedtuple\n'), ((3396, 3423), 'numpy.diff', 'np.diff', (['tol_coords'], {'axis': '(0)'}), '(tol_coords, axis=0)\n', (3403, 3423), True, 'import numpy as np\n'), ((3688, 3811), 'McUtils.Zachary.Surface', 'Surface', (['((scan_coords, d), opts)'], {'base': 'InterpolatedSurface', 'dipole_component': "('x' if i == 0 else 'y' if i == 1 else 'z')"}), "(((scan_coords, d), opts), base=InterpolatedSurface,\n dipole_component='x' if i == 0 else 'y' if i == 1 else 'z')\n", (3695, 3811), False, 'from McUtils.Zachary import Surface, MultiSurface, InterpolatedSurface, TaylorSeriesSurface\n'), ((8806, 8833), 'numpy.diff', 'np.diff', (['tol_coords'], {'axis': '(0)'}), '(tol_coords, axis=0)\n', (8813, 8833), True, 'import numpy as np\n'), ((5867, 5893), 'numpy.product', 'np.product', (['gps.shape[-2:]'], {}), '(gps.shape[-2:])\n', (5877, 5893), True, 'import numpy as np\n')]
|
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch import from_numpy
from collections import namedtuple
import pandas as pd
Action = namedtuple('Action', ['position', 'action'])
Perspective = namedtuple('Perspective', ['perspective', 'position'])
Transition = namedtuple('Transition',
['state', 'action', 'reward', 'next_state', 'terminal'])
def conv_to_fully_connected(input_size, filter_size, padding, stride):
return (input_size - filter_size + 2 * padding)/ stride + 1
def pad_circular(x, pad):
x = torch.cat([x, x[:,:,:,0:pad]], dim=3)
x = torch.cat([x, x[:,:, 0:pad,:]], dim=2)
x = torch.cat([x[:,:,:,-2 * pad:-pad], x], dim=3)
x = torch.cat([x[:,:, -2 * pad:-pad,:], x], dim=2)
return x
def incremental_mean(x, mu, N):
return mu + (x - mu) / (N)
def convert_from_np_to_tensor(tensor):
tensor = from_numpy(tensor)
tensor = tensor.type('torch.Tensor')
return tensor
|
[
"torch.cat",
"collections.namedtuple",
"torch.from_numpy"
] |
[((209, 253), 'collections.namedtuple', 'namedtuple', (['"""Action"""', "['position', 'action']"], {}), "('Action', ['position', 'action'])\n", (219, 253), False, 'from collections import namedtuple\n'), ((269, 323), 'collections.namedtuple', 'namedtuple', (['"""Perspective"""', "['perspective', 'position']"], {}), "('Perspective', ['perspective', 'position'])\n", (279, 323), False, 'from collections import namedtuple\n'), ((338, 423), 'collections.namedtuple', 'namedtuple', (['"""Transition"""', "['state', 'action', 'reward', 'next_state', 'terminal']"], {}), "('Transition', ['state', 'action', 'reward', 'next_state',\n 'terminal'])\n", (348, 423), False, 'from collections import namedtuple\n'), ((617, 657), 'torch.cat', 'torch.cat', (['[x, x[:, :, :, 0:pad]]'], {'dim': '(3)'}), '([x, x[:, :, :, 0:pad]], dim=3)\n', (626, 657), False, 'import torch\n'), ((663, 703), 'torch.cat', 'torch.cat', (['[x, x[:, :, 0:pad, :]]'], {'dim': '(2)'}), '([x, x[:, :, 0:pad, :]], dim=2)\n', (672, 703), False, 'import torch\n'), ((710, 758), 'torch.cat', 'torch.cat', (['[x[:, :, :, -2 * pad:-pad], x]'], {'dim': '(3)'}), '([x[:, :, :, -2 * pad:-pad], x], dim=3)\n', (719, 758), False, 'import torch\n'), ((764, 812), 'torch.cat', 'torch.cat', (['[x[:, :, -2 * pad:-pad, :], x]'], {'dim': '(2)'}), '([x[:, :, -2 * pad:-pad, :], x], dim=2)\n', (773, 812), False, 'import torch\n'), ((943, 961), 'torch.from_numpy', 'from_numpy', (['tensor'], {}), '(tensor)\n', (953, 961), False, 'from torch import from_numpy\n')]
|
#
# Copyright: 2017 Red Hat, Inc.
# Author: <NAME> <<EMAIL>>
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from pytest import mark
from whitfield import ast
hex = [
["0x1234", 0x1234],
["0x12345678901234567890", 0x12345678901234567890],
["0x1 234", 0x1234],
["0x12 345 678 901 234 567 890", 0x12345678901234567890],
]
dec = [
["1234", 1234],
["12345678901234567890", 12345678901234567890],
["1 234", 1234],
["12 345 678 901 234 567 890", 12345678901234567890],
]
idn = [
"abc",
"ab0",
"ab_",
"a0_",
mark.xfail("0bc"),
mark.xfail("!ab"),
mark.xfail("_ab"),
]
exp = [
["1", 1],
["a", "a"],
["1 @ b", [1, "@", "b"]],
["1 * b", [1, "*", "b"]],
["1 / b", [1, "/", "b"]],
["1 + b", [1, "+", "b"]],
["1 - b", [1, "-", "b"]],
["1 @ b @ 3", [1, "@", ["b", "@", 3]]],
["1 @ b * 3", [[1, "@", "b"], "*", 3]],
["1 @ b / 3", [[1, "@", "b"], "/", 3]],
["1 @ b + 3", [[1, "@", "b"], "+", 3]],
["1 @ b - 3", [[1, "@", "b"], "-", 3]],
["1 * b @ 3", [1, "*", ["b", "@", 3]]],
["1 * b * 3", [[1, "*", "b"], "*", 3]],
["1 * b / 3", [[1, "*", "b"], "/", 3]],
["1 * b + 3", [[1, "*", "b"], "+", 3]],
["1 * b - 3", [[1, "*", "b"], "-", 3]],
["1 / b @ 3", [1, "/", ["b", "@", 3]]],
["1 / b * 3", [[1, "/", "b"], "*", 3]],
["1 / b / 3", [[1, "/", "b"], "/", 3]],
["1 / b + 3", [[1, "/", "b"], "+", 3]],
["1 / b - 3", [[1, "/", "b"], "-", 3]],
["1 + b @ 3", [1, "+", ["b", "@", 3]]],
["1 + b * 3", [1, "+", ["b", "*", 3]]],
["1 + b / 3", [1, "+", ["b", "/", 3]]],
["1 + b + 3", [[1, "+", "b"], "+", 3]],
["1 + b - 3", [[1, "+", "b"], "-", 3]],
["1 - b @ 3", [1, "-", ["b", "@", 3]]],
["1 - b * 3", [1, "-", ["b", "*", 3]]],
["1 - b / 3", [1, "-", ["b", "/", 3]]],
["1 - b + 3", [[1, "-", "b"], "+", 3]],
["1 - b - 3", [[1, "-", "b"], "-", 3]],
["1 @ (b @ 3)", [1, "@", ["b", "@", 3]]],
["1 @ (b * 3)", [1, "@", ["b", "*", 3]]],
["1 @ (b / 3)", [1, "@", ["b", "/", 3]]],
["1 @ (b + 3)", [1, "@", ["b", "+", 3]]],
["1 @ (b - 3)", [1, "@", ["b", "-", 3]]],
["1 * (b @ 3)", [1, "*", ["b", "@", 3]]],
["1 * (b * 3)", [1, "*", ["b", "*", 3]]],
["1 * (b / 3)", [1, "*", ["b", "/", 3]]],
["1 * (b + 3)", [1, "*", ["b", "+", 3]]],
["1 * (b - 3)", [1, "*", ["b", "-", 3]]],
["1 / (b @ 3)", [1, "/", ["b", "@", 3]]],
["1 / (b * 3)", [1, "/", ["b", "*", 3]]],
["1 / (b / 3)", [1, "/", ["b", "/", 3]]],
["1 / (b + 3)", [1, "/", ["b", "+", 3]]],
["1 / (b - 3)", [1, "/", ["b", "-", 3]]],
["1 + (b @ 3)", [1, "+", ["b", "@", 3]]],
["1 + (b * 3)", [1, "+", ["b", "*", 3]]],
["1 + (b / 3)", [1, "+", ["b", "/", 3]]],
["1 + (b + 3)", [1, "+", ["b", "+", 3]]],
["1 + (b - 3)", [1, "+", ["b", "-", 3]]],
["1 - (b @ 3)", [1, "-", ["b", "@", 3]]],
["1 - (b * 3)", [1, "-", ["b", "*", 3]]],
["1 - (b / 3)", [1, "-", ["b", "/", 3]]],
["1 - (b + 3)", [1, "-", ["b", "+", 3]]],
["1 - (b - 3)", [1, "-", ["b", "-", 3]]],
["1 * b - 3 @ c / 5", [[1, "*", "b"], "-", [[3, "@", "c"], "/", 5]]],
]
asn = [[f"x = {e[0]};", ["x", e[1]]] for e in exp] + [
["x = 7;", ["x", 7]],
mark.xfail(["0x = 7;", ["x", 7]]),
mark.xfail(["_x = 7;", ["x", 7]]),
]
arg = [[x, [x]] for x in idn if isinstance(x, str)] \
+ [[f"{x}, {x}", [x, x]] for x in idn if isinstance(x, str)] \
+ [mark.xfail([f"{x} {x}", [x, x]]) for x in idn if isinstance(x, str)]
fnc = [
mark.xfail(["0foo(a)(b){ b = a; }", {}]),
mark.xfail(["_foo(a)(b){ b = a; }", {}]),
mark.xfail(["foo(a)(b){}", {}]),
["foo(a)(b) { b = a; }",
{"name": "foo", "args": ["a"], "rets": ["b"], "body": [["b", "a"]]}],
["foo(a, b)(c, d) { c = a; d = a + b - 7; }",
{"name": "foo", "args": ["a", "b"], "rets": ["c", "d"],
"body": [["c", "a"], ["d", [["a", "+", "b"], "-", 7]]]}],
]
imp = [
"foo",
"bar",
"foo.bar",
"foo_bar",
"foo-bar",
"foo/bar",
"../foo",
"./f00",
"../../foo/b4r",
mark.xfail("foo."),
mark.xfail("bar/"),
]
bdy = [
["import foo;", ["foo"]],
["c = 7; a = 12;", [["c", 7], ["a", 12]]],
["foo(a)(b) { b = a; }",
[{"name": "foo", "args": ["a"], "rets": ["b"], "body": [["b", "a"]]}]],
["import bar; c = 7; foo(a)(b) { b = a; }",
["bar", ["c", 7],
{"name": "foo", "args": ["a"], "rets": ["b"], "body": [["b", "a"]]}]],
]
fld = [
["127", 127],
["2 @ 255 - 19", [[2, "@", 255], "-", 19]]
]
wht = [[f"field {f[0]}; {b[0]}", {"limit": f[1], "items": b[1]}]
for b in bdy for f in fld]
@mark.parametrize("tst,exp", hex)
def test_hex(tst, exp):
assert ast.HEX.parseString(tst)[0] == exp
@mark.parametrize("tst,exp", dec)
def test_dec(tst, exp):
assert ast.DEC.parseString(tst)[0] == exp
@mark.parametrize("tst,exp", hex + dec)
def test_num(tst, exp):
assert ast.NUM.parseString(tst)[0] == exp
@mark.parametrize("tst", idn)
def test_idn(tst):
assert ast.IDN.parseString(tst)[0] == tst
@mark.parametrize("tst,exp", exp)
def test_exp(tst, exp):
assert ast.EXP.parseString(tst).asList()[0] == exp
@mark.parametrize("tst,exp", asn)
def test_asn(tst, exp):
assert ast.ASN.parseString(tst).asList()[0] == exp
@mark.parametrize("tst,exp", arg)
def test_arg(tst, exp):
assert ast.ARG.parseString(tst).asList()[0] == exp
@mark.parametrize("tst,exp", fnc)
def test_fnc(tst, exp):
assert ast.FNC.parseString(tst).asList()[0] == exp
@mark.parametrize("tst", imp)
def test_imp(tst):
assert ast.IMP.parseString(f"import {tst};").asList()[0] == tst
@mark.parametrize("tst,exp", bdy)
def test_bdy(tst, exp):
assert ast.BDY.parseString(tst).asList()[0] == exp
@mark.parametrize("tst,exp", fld)
def test_fld(tst, exp):
assert ast.FLD.parseString(tst).asList()[0] == exp
@mark.parametrize("tst,exp", wht)
def test_fld(tst, exp):
assert ast.WHT.parseString(tst).asList()[0] == exp
|
[
"whitfield.ast.ARG.parseString",
"whitfield.ast.FLD.parseString",
"whitfield.ast.FNC.parseString",
"whitfield.ast.ASN.parseString",
"whitfield.ast.BDY.parseString",
"whitfield.ast.EXP.parseString",
"whitfield.ast.WHT.parseString",
"whitfield.ast.HEX.parseString",
"whitfield.ast.NUM.parseString",
"whitfield.ast.IDN.parseString",
"pytest.mark.parametrize",
"whitfield.ast.DEC.parseString",
"whitfield.ast.IMP.parseString",
"pytest.mark.xfail"
] |
[((5402, 5434), 'pytest.mark.parametrize', 'mark.parametrize', (['"""tst,exp"""', 'hex'], {}), "('tst,exp', hex)\n", (5418, 5434), False, 'from pytest import mark\n'), ((5508, 5540), 'pytest.mark.parametrize', 'mark.parametrize', (['"""tst,exp"""', 'dec'], {}), "('tst,exp', dec)\n", (5524, 5540), False, 'from pytest import mark\n'), ((5614, 5652), 'pytest.mark.parametrize', 'mark.parametrize', (['"""tst,exp"""', '(hex + dec)'], {}), "('tst,exp', hex + dec)\n", (5630, 5652), False, 'from pytest import mark\n'), ((5726, 5754), 'pytest.mark.parametrize', 'mark.parametrize', (['"""tst"""', 'idn'], {}), "('tst', idn)\n", (5742, 5754), False, 'from pytest import mark\n'), ((5823, 5855), 'pytest.mark.parametrize', 'mark.parametrize', (['"""tst,exp"""', 'exp'], {}), "('tst,exp', exp)\n", (5839, 5855), False, 'from pytest import mark\n'), ((5938, 5970), 'pytest.mark.parametrize', 'mark.parametrize', (['"""tst,exp"""', 'asn'], {}), "('tst,exp', asn)\n", (5954, 5970), False, 'from pytest import mark\n'), ((6053, 6085), 'pytest.mark.parametrize', 'mark.parametrize', (['"""tst,exp"""', 'arg'], {}), "('tst,exp', arg)\n", (6069, 6085), False, 'from pytest import mark\n'), ((6168, 6200), 'pytest.mark.parametrize', 'mark.parametrize', (['"""tst,exp"""', 'fnc'], {}), "('tst,exp', fnc)\n", (6184, 6200), False, 'from pytest import mark\n'), ((6283, 6311), 'pytest.mark.parametrize', 'mark.parametrize', (['"""tst"""', 'imp'], {}), "('tst', imp)\n", (6299, 6311), False, 'from pytest import mark\n'), ((6402, 6434), 'pytest.mark.parametrize', 'mark.parametrize', (['"""tst,exp"""', 'bdy'], {}), "('tst,exp', bdy)\n", (6418, 6434), False, 'from pytest import mark\n'), ((6517, 6549), 'pytest.mark.parametrize', 'mark.parametrize', (['"""tst,exp"""', 'fld'], {}), "('tst,exp', fld)\n", (6533, 6549), False, 'from pytest import mark\n'), ((6632, 6664), 'pytest.mark.parametrize', 'mark.parametrize', (['"""tst,exp"""', 'wht'], {}), "('tst,exp', wht)\n", (6648, 6664), False, 'from pytest import mark\n'), ((1298, 1315), 'pytest.mark.xfail', 'mark.xfail', (['"""0bc"""'], {}), "('0bc')\n", (1308, 1315), False, 'from pytest import mark\n'), ((1321, 1338), 'pytest.mark.xfail', 'mark.xfail', (['"""!ab"""'], {}), "('!ab')\n", (1331, 1338), False, 'from pytest import mark\n'), ((1344, 1361), 'pytest.mark.xfail', 'mark.xfail', (['"""_ab"""'], {}), "('_ab')\n", (1354, 1361), False, 'from pytest import mark\n'), ((4285, 4325), 'pytest.mark.xfail', 'mark.xfail', (["['0foo(a)(b){ b = a; }', {}]"], {}), "(['0foo(a)(b){ b = a; }', {}])\n", (4295, 4325), False, 'from pytest import mark\n'), ((4331, 4371), 'pytest.mark.xfail', 'mark.xfail', (["['_foo(a)(b){ b = a; }', {}]"], {}), "(['_foo(a)(b){ b = a; }', {}])\n", (4341, 4371), False, 'from pytest import mark\n'), ((4377, 4408), 'pytest.mark.xfail', 'mark.xfail', (["['foo(a)(b){}', {}]"], {}), "(['foo(a)(b){}', {}])\n", (4387, 4408), False, 'from pytest import mark\n'), ((4834, 4852), 'pytest.mark.xfail', 'mark.xfail', (['"""foo."""'], {}), "('foo.')\n", (4844, 4852), False, 'from pytest import mark\n'), ((4858, 4876), 'pytest.mark.xfail', 'mark.xfail', (['"""bar/"""'], {}), "('bar/')\n", (4868, 4876), False, 'from pytest import mark\n'), ((3998, 4031), 'pytest.mark.xfail', 'mark.xfail', (["['0x = 7;', ['x', 7]]"], {}), "(['0x = 7;', ['x', 7]])\n", (4008, 4031), False, 'from pytest import mark\n'), ((4037, 4070), 'pytest.mark.xfail', 'mark.xfail', (["['_x = 7;', ['x', 7]]"], {}), "(['_x = 7;', ['x', 7]])\n", (4047, 4070), False, 'from pytest import mark\n'), ((4203, 4235), 'pytest.mark.xfail', 'mark.xfail', (["[f'{x} {x}', [x, x]]"], {}), "([f'{x} {x}', [x, x]])\n", (4213, 4235), False, 'from pytest import mark\n'), ((5470, 5494), 'whitfield.ast.HEX.parseString', 'ast.HEX.parseString', (['tst'], {}), '(tst)\n', (5489, 5494), False, 'from whitfield import ast\n'), ((5576, 5600), 'whitfield.ast.DEC.parseString', 'ast.DEC.parseString', (['tst'], {}), '(tst)\n', (5595, 5600), False, 'from whitfield import ast\n'), ((5688, 5712), 'whitfield.ast.NUM.parseString', 'ast.NUM.parseString', (['tst'], {}), '(tst)\n', (5707, 5712), False, 'from whitfield import ast\n'), ((5785, 5809), 'whitfield.ast.IDN.parseString', 'ast.IDN.parseString', (['tst'], {}), '(tst)\n', (5804, 5809), False, 'from whitfield import ast\n'), ((5891, 5915), 'whitfield.ast.EXP.parseString', 'ast.EXP.parseString', (['tst'], {}), '(tst)\n', (5910, 5915), False, 'from whitfield import ast\n'), ((6006, 6030), 'whitfield.ast.ASN.parseString', 'ast.ASN.parseString', (['tst'], {}), '(tst)\n', (6025, 6030), False, 'from whitfield import ast\n'), ((6121, 6145), 'whitfield.ast.ARG.parseString', 'ast.ARG.parseString', (['tst'], {}), '(tst)\n', (6140, 6145), False, 'from whitfield import ast\n'), ((6236, 6260), 'whitfield.ast.FNC.parseString', 'ast.FNC.parseString', (['tst'], {}), '(tst)\n', (6255, 6260), False, 'from whitfield import ast\n'), ((6342, 6379), 'whitfield.ast.IMP.parseString', 'ast.IMP.parseString', (['f"""import {tst};"""'], {}), "(f'import {tst};')\n", (6361, 6379), False, 'from whitfield import ast\n'), ((6470, 6494), 'whitfield.ast.BDY.parseString', 'ast.BDY.parseString', (['tst'], {}), '(tst)\n', (6489, 6494), False, 'from whitfield import ast\n'), ((6585, 6609), 'whitfield.ast.FLD.parseString', 'ast.FLD.parseString', (['tst'], {}), '(tst)\n', (6604, 6609), False, 'from whitfield import ast\n'), ((6700, 6724), 'whitfield.ast.WHT.parseString', 'ast.WHT.parseString', (['tst'], {}), '(tst)\n', (6719, 6724), False, 'from whitfield import ast\n')]
|
import cases.templates.flying_wings as wings
import sharpy.sharpy_main
aero_type = 'lin'
ws = wings.Goland(M=4,
N=12,
Mstar_fact=10,
u_inf=50,
alpha=1.,
rho=1.225,
sweep=0,
physical_time=2,
n_surfaces=2,
route='cases',
case_name='goland_'+aero_type+'_newsolver_pred')
ws.gust_intensity = 0.01
# ws.n_tstep = 2
ws.sigma = 1
ws.clean_test_files()
ws.update_derived_params()
ws.update_aero_prop()
ws.update_fem_prop()
ws.set_default_config_dict()
ws.generate_aero_file()
ws.generate_fem_file()
ws.config['SHARPy']['flow'] = ['BeamLoader', 'AerogridLoader',
#'StaticUvlm',
'StaticCoupled',
'AerogridPlot', 'BeamPlot',
'DynamicCoupled','Modal',
# 'SaveData']
]
ws.config['SHARPy']['write_screen'] = 'on'
ws.config['DynamicCoupled']['aero_solver_settings']['velocity_field_input']['gust_length'] = 5
if aero_type == 'lin':
ws.config['DynamicCoupled']['aero_solver'] = 'StepLinearUVLM'
ws.config['DynamicCoupled']['aero_solver_settings'] = {'dt': ws.dt,
'remove_predictor': False,
'use_sparse': False,
'integr_order': 2,
'velocity_field_generator': 'GustVelocityField',
'velocity_field_input': {'u_inf': ws.u_inf,
'u_inf_direction': [1., 0., 0.],
'gust_shape': '1-cos',
'offset': 15.,
'gust_parameters': {'gust_length': 5.,
'gust_intensity': ws.gust_intensity
* ws.u_inf,
'span': ws.main_chord * ws.aspect_ratio}}}
# 'velocity_field_generator': 'SteadyVelocityField',
# 'velocity_field_input': {'u_inf': ws.u_inf*1,
# 'u_inf_direction': [1., 0., 0.]}}
ws.config.write()
data = sharpy.sharpy_main.main(['',ws.route+ws.case_name+'.solver.txt'])
|
[
"cases.templates.flying_wings.Goland"
] |
[((95, 282), 'cases.templates.flying_wings.Goland', 'wings.Goland', ([], {'M': '(4)', 'N': '(12)', 'Mstar_fact': '(10)', 'u_inf': '(50)', 'alpha': '(1.0)', 'rho': '(1.225)', 'sweep': '(0)', 'physical_time': '(2)', 'n_surfaces': '(2)', 'route': '"""cases"""', 'case_name': "('goland_' + aero_type + '_newsolver_pred')"}), "(M=4, N=12, Mstar_fact=10, u_inf=50, alpha=1.0, rho=1.225,\n sweep=0, physical_time=2, n_surfaces=2, route='cases', case_name=\n 'goland_' + aero_type + '_newsolver_pred')\n", (107, 282), True, 'import cases.templates.flying_wings as wings\n')]
|
import pyaudio
import wave
import os
import numpy as np
from sys import byteorder
from array import array
#CHUNK = 1024
CHUNK = 2048
FORMAT = pyaudio.paInt16
CHANNELS = 1
#RATE = 44100
RATE = 16000
RECORD_SECONDS = 10
WAVE_OUTPUT_FILENAME = "output.wav"
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
# print("* recording")
def wavmaker(stream):
frames = []
recording = array('h')
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
sch = stream.read(CHUNK)
data = sch
frames.append(data)
datab = array('h', sch)
if byteorder == 'big':
datab.byteswap()
recording.extend(datab)
print("* done recording")
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
os.system('ffmpeg -i output.wav -ar 16000 -ac 1 trackb.wav -y')
#print(recording)
return recording
def main():
top = wavmaker(stream)
if __name__ == "__main__":
main()
|
[
"wave.open",
"os.system",
"array.array",
"pyaudio.PyAudio"
] |
[((262, 279), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (277, 279), False, 'import pyaudio\n'), ((526, 536), 'array.array', 'array', (['"""h"""'], {}), "('h')\n", (531, 536), False, 'from array import array\n'), ((905, 942), 'wave.open', 'wave.open', (['WAVE_OUTPUT_FILENAME', '"""wb"""'], {}), "(WAVE_OUTPUT_FILENAME, 'wb')\n", (914, 942), False, 'import wave\n'), ((1103, 1166), 'os.system', 'os.system', (['"""ffmpeg -i output.wav -ar 16000 -ac 1 trackb.wav -y"""'], {}), "('ffmpeg -i output.wav -ar 16000 -ac 1 trackb.wav -y')\n", (1112, 1166), False, 'import os\n'), ((693, 708), 'array.array', 'array', (['"""h"""', 'sch'], {}), "('h', sch)\n", (698, 708), False, 'from array import array\n')]
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import logging
import re
import unicodedata
from website import settings
logger = logging.getLogger(__name__)
if settings.SEARCH_ANALYZER == settings.SEARCH_ANALYZER_JAPANESE:
DEFAULT_OPERATOR = 'AND'
else:
DEFAULT_OPERATOR = 'OR'
TITLE_WEIGHT = 4
DESCRIPTION_WEIGHT = 1.2
JOB_SCHOOL_BOOST = 1
ALL_JOB_SCHOOL_BOOST = 0.125
def build_query(qs='*', start=0, size=10, sort=None, user_guid=None):
query_body = build_query_string(qs)
if user_guid is not None:
query_body = {
'bool': {
'should': [
query_body,
{
'match': {
'id': {
'query': user_guid,
'boost': 10.0
}
}
}
]
}
}
query = {
'query': query_body,
'from': start,
'size': size,
}
if sort:
query['sort'] = [
{
sort: 'desc'
}
]
return query
# Match queryObject in search.js
def build_query_string(qs):
field_boosts = {
'title': TITLE_WEIGHT,
'description': DESCRIPTION_WEIGHT,
'job': JOB_SCHOOL_BOOST,
'school': JOB_SCHOOL_BOOST,
'all_jobs': ALL_JOB_SCHOOL_BOOST,
'all_schools': ALL_JOB_SCHOOL_BOOST,
'_all': 1,
}
fields = ['{}^{}'.format(k, v) for k, v in field_boosts.items()]
# for highlight
add_fields = ['name', 'user', 'text', 'comments.*']
for f in add_fields:
fields.append('{}^1'.format(f))
fields.append('{}.*^1'.format(f))
return {
'query_string': {
'default_operator': DEFAULT_OPERATOR,
'default_field': '_all',
'fields': fields,
'query': qs,
'analyze_wildcard': True,
'lenient': True # TODO, may not want to do this
}
}
def clean_splitters(text):
new_text = text.replace('_', ' ').replace('-', ' ').replace('.', ' ')
if new_text == text:
return ''
return unicode_normalize(new_text)
def es_escape(text):
# see https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-query-string-query.html#_reserved_characte
text = re.sub(r'(?P<ES>[+\-=&|!(){}\[\]^"~*?:\\/])', r'\\\g<ES>', text)
# NOTE: < and > cannot be escaped at all. The only way to prevent
# them from attempting to create a range query is to remove them
# from the query string entirely.
return re.sub(r'(?P<ES>[><])', ' ', text)
def _is_delimiter(char):
# FIXME: re.UNICODE is unnecessary in Python3
return re.match(r'\s\Z', char, flags=re.UNICODE) or char in [u'(', u')']
def quote(string):
"""
return: (quoted_string, quoted)
"""
# Alphanumeric with * or ? string is not quoted.
# e.g. abc* -> abc*
# If abc* is quoted, ...
# bad pattern 1: "abc"* -> equivalent "abc" OR * -> matche all
# bat pattern 2: "abc*" -> equivalent "abc " -> match "abc" only
# FIXME: flags=re.ASCII is necessary in Python3
if re.match(r'[\w\*\?]+\Z', string):
return (string, False)
else:
return (u'"{}"'.format(string), True) # quoted
def _quote(string):
s, _ = quote(string)
return s
def _quote_token(token):
"""
quoting Elasticsearch query string:
https://www.elastic.co/guide/en/elasticsearch/reference/2.3/query-dsl-query-string-query.html#query-string-syntax
"""
if token in [u'AND', u'OR', u'NOT', u'&&', u'||', u'!']:
return token
m = re.match(
r'(?P<prefix_op>\+|-)?' +
r'(?P<body>(?:\\.|[^\\~\^])+)?' +
r'(?P<suffix_op>(?:~|\^)[0-9\.]*)?\Z',
token
)
if m is None:
return token
prefix_op = m.group('prefix_op')
suffix_op = m.group('suffix_op')
body = m.group('body')
res = u''
if prefix_op is not None:
res += prefix_op
if body is not None:
parts = [u'']
in_escape = False
for c in body:
# backslash escape
if in_escape:
parts[-1] += c
in_escape = False
elif c == u'\\':
parts[-1] += c
in_escape = True
elif c == u':':
parts.append(c)
parts.append(u'')
else:
parts[-1] += c
if u':' not in parts:
res += _quote(body)
else:
has_key = False
for part in parts:
if not part:
continue
is_colon = part == u':'
if is_colon or not has_key:
res += part
if is_colon:
has_key = True
else:
res += _quote(part)
if suffix_op is not None:
res += suffix_op
return res
def quote_query_string(chars):
"""
Multibyte charactor string is quoted by double quote.
Because english analyzer of Elasticsearch decomposes
multibyte character strings with OR expression.
e.g. 神保町 -> 神 OR 保 OR 町
"神保町"-> 神保町
"""
if not isinstance(chars, unicode):
chars = chars.decode('utf-8')
token = u''
qs = u''
in_escape = False
in_quote = False
in_token = False
for c in chars:
# backslash escape
if in_escape:
token += c
in_escape = False
continue
if c == u'\\':
token += c
in_escape = True
continue
# quote
if c != u'"' and in_quote:
token += c
continue
if c == u'"' and in_quote:
token += c
qs += token
token = u''
in_quote = False
continue
# otherwise: not in_quote
if _is_delimiter(c) or c == u'"':
if in_token:
qs += _quote_token(token)
token = u''
in_token = False
if c == u'"':
token += c
in_quote = True
else:
qs += c
continue
# otherwise: not _is_delimiter(c)
token += c
in_token = True
if token:
qs += _quote_token(token)
return qs
NORMALIZED_FIELDS = ('user', 'names', 'title', 'description', 'name', 'tags')
def replace_normalized_field(qs):
for name in NORMALIZED_FIELDS:
qs = re.sub('(^|[\\(\\s]){}\\:'.format(name),
'\\1normalized_{}:'.format(name), qs)
return qs
def convert_query_string(qs, normalize=False):
if settings.SEARCH_ANALYZER == settings.SEARCH_ANALYZER_ENGLISH:
qs = quote_query_string(qs)
qs = replace_normalized_field(qs)
logger.debug(u'convert_query_string: {}'.format(qs))
if normalize:
return unicode_normalize(qs)
else:
return qs
def build_private_search_query(user, qs='*', start=0, size=10, sort=None, highlight=None):
match_node = {
'bool': {
'must': [
{
'terms': {
'category': [
'project',
'component',
'registration',
'preprint'
]
}
},
{
'bool': {
'should': [
{
'term': {
'node_contributors.id': user._id
}
},
{
'term': {
'public': True
}
}
]
}
}
]
}
},
match_file = {
'bool': {
'must': [
{
'term': {
'category': 'file'
}
},
{
'bool': {
'should': [
{
'term': {
'node_contributors.id': user._id
}
},
{
'term': {
'node_public': True
}
}
]
}
}
]
}
}
match_wiki = {
'bool': {
'must': [
{
'term': {
'category': 'wiki'
}
},
{
'bool': {
'should': [
{
'term': {
'node_contributors.id': user._id
}
},
{
'term': {
'node_public': True
}
}
]
}
}
]
}
}
match_comment = {
'bool': {
'must': [
{
'term': {
'category': 'comment'
}
},
{
'bool': {
'should': [
{
'term': {
'node_contributors.id': user._id
}
},
{
'term': {
'node_public': True
}
}
]
}
}
]
}
}
inner_query = build_query_string(qs)
query_body = {
'bool': {
# This is a filter to search only accessible data.
# If bool query context is used in a "filter"
# context and it has "should" clauses then at least
# one "should" clause is required to match.
# So "must" is used instead of "filter" here.
# See: https://www.elastic.co/guide/en/elasticsearch/reference/2.3/query-dsl-bool-query.html
'must': [
inner_query,
{
'bool': {
'should': [
match_node,
match_file,
match_wiki,
match_comment,
{
'terms': {
'category': [
'user',
'institution',
'collectionsubmission'
]
}
}
]
}
}
]
}
}
highlight_fields = {}
# Example:
# highlight='title:30,comments.*:30'
# ->
# highlight_fields = {
# 'title': {
# 'fragment_size': 30,
# },
# 'comments.*': {
# 'fragment_size': 124,
# },
# }
if highlight:
fields = highlight.split(',')
for field in fields:
key_val = field.split(':')
if len(key_val) >= 1:
key = key_val[0]
if len(key_val) == 2:
try:
val = int(key_val[1])
except Exception:
val = settings.SEARCH_HIGHLIGHT_FRAGMENT_SIZE
else:
val = settings.SEARCH_HIGHLIGHT_FRAGMENT_SIZE
highlight_fields[key] = {'fragment_size': val}
highlight_fields[key + '.*'] = {'fragment_size': val}
query = {
'query': query_body,
'highlight': {
'number_of_fragments': 1,
'pre_tags': ['<b>'],
'post_tags': ['</b>'],
'fields': highlight_fields,
'require_field_match': False,
'highlight_query': inner_query,
},
'from': start,
'size': size,
'sort': sort_query(sort),
}
return query
def sort_query(sort):
if sort is None: # default
sort = 'modified_desc'
def _split_target_order(sort):
try:
to = sort.split('_')
return to[0], to[1]
except Exception:
return None, None
target, order = _split_target_order(sort)
ASC = 'asc'
DESC = 'desc'
MODIFIED = 'date_modified'
CREATED = 'date_created'
PROJECT = 'sort_node_name'
FILE = 'sort_file_name'
WIKI = 'sort_wiki_name'
USER = 'sort_user_name'
INSTITUTION = 'sort_institution_name'
SCORE = '_score'
ERROR = 'unknown sort parameter: {}'.format(sort)
if order != ASC and order != DESC:
# order = None # use default
raise Exception(ERROR)
if target == 'project':
query = [
{PROJECT: order},
{FILE: order},
{WIKI: order},
{USER: order},
{INSTITUTION: order},
{MODIFIED: DESC},
{SCORE: ASC}
]
elif target == 'file':
query = [
{FILE: order},
{PROJECT: order},
{WIKI: order},
{USER: order},
{INSTITUTION: order},
{MODIFIED: DESC},
{SCORE: ASC}
]
elif target == 'wiki':
query = [
{WIKI: order},
{PROJECT: order},
{FILE: order},
{USER: order},
{INSTITUTION: order},
{MODIFIED: DESC},
{SCORE: ASC}
]
elif target == 'user':
query = [
{USER: order},
{PROJECT: order},
{WIKI: order},
{FILE: order},
{INSTITUTION: order},
{MODIFIED: DESC},
{SCORE: ASC}
]
elif target == 'institution':
query = [
{INSTITUTION: order},
{PROJECT: order},
{WIKI: order},
{FILE: order},
{USER: order},
{MODIFIED: DESC},
{SCORE: ASC}
]
elif target == 'created':
query = [
{CREATED: order},
{PROJECT: ASC},
{FILE: ASC},
{WIKI: ASC},
{USER: ASC},
{INSTITUTION: ASC},
{SCORE: ASC}
]
elif target == 'modified':
query = [
{MODIFIED: order},
{PROJECT: ASC},
{FILE: ASC},
{WIKI: ASC},
{USER: ASC},
{INSTITUTION: ASC},
{SCORE: ASC}
]
else:
raise Exception(ERROR)
return query
def unicode_normalize(text):
if text is None:
return None
if settings.SEARCH_ANALYZER == settings.SEARCH_ANALYZER_JAPANESE:
return text
if not isinstance(text, unicode):
text = text.decode('utf-8')
normalized = unicodedata.normalize('NFKD', text)
if not settings.ENABLE_MULTILINGUAL_SEARCH:
normalized = normalized.encode('ascii', 'ignore')
return normalized
|
[
"unicodedata.normalize",
"re.sub",
"re.match",
"logging.getLogger"
] |
[((146, 173), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (163, 173), False, 'import logging\n'), ((2408, 2478), 're.sub', 're.sub', (['"""(?P<ES>[+\\\\-=&|!(){}\\\\[\\\\]^"~*?:\\\\\\\\/])"""', '"""\\\\\\\\\\\\g<ES>"""', 'text'], {}), '(\'(?P<ES>[+\\\\-=&|!(){}\\\\[\\\\]^"~*?:\\\\\\\\/])\', \'\\\\\\\\\\\\g<ES>\', text)\n', (2414, 2478), False, 'import re\n'), ((2662, 2695), 're.sub', 're.sub', (['"""(?P<ES>[><])"""', '""" """', 'text'], {}), "('(?P<ES>[><])', ' ', text)\n", (2668, 2695), False, 'import re\n'), ((3231, 3266), 're.match', 're.match', (['"""[\\\\w\\\\*\\\\?]+\\\\Z"""', 'string'], {}), "('[\\\\w\\\\*\\\\?]+\\\\Z', string)\n", (3239, 3266), False, 'import re\n'), ((3715, 3839), 're.match', 're.match', (["('(?P<prefix_op>\\\\+|-)?' + '(?P<body>(?:\\\\\\\\.|[^\\\\\\\\~\\\\^])+)?' +\n '(?P<suffix_op>(?:~|\\\\^)[0-9\\\\.]*)?\\\\Z')", 'token'], {}), "('(?P<prefix_op>\\\\+|-)?' + '(?P<body>(?:\\\\\\\\.|[^\\\\\\\\~\\\\^])+)?' +\n '(?P<suffix_op>(?:~|\\\\^)[0-9\\\\.]*)?\\\\Z', token)\n", (3723, 3839), False, 'import re\n'), ((15869, 15904), 'unicodedata.normalize', 'unicodedata.normalize', (['"""NFKD"""', 'text'], {}), "('NFKD', text)\n", (15890, 15904), False, 'import unicodedata\n'), ((2785, 2827), 're.match', 're.match', (['"""\\\\s\\\\Z"""', 'char'], {'flags': 're.UNICODE'}), "('\\\\s\\\\Z', char, flags=re.UNICODE)\n", (2793, 2827), False, 'import re\n')]
|
from flask import current_app
from werkzeug.local import LocalProxy
if False:
import flask_taxonomies.api
current_flask_taxonomies = LocalProxy( # type: flask_taxonomies.api.Api
lambda: current_app.extensions['flask-taxonomies'])
|
[
"werkzeug.local.LocalProxy"
] |
[((139, 202), 'werkzeug.local.LocalProxy', 'LocalProxy', (["(lambda : current_app.extensions['flask-taxonomies'])"], {}), "(lambda : current_app.extensions['flask-taxonomies'])\n", (149, 202), False, 'from werkzeug.local import LocalProxy\n')]
|
# coding=utf-8
# Copyright 2021 The Edward2 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definitions for random feature Gaussian process layer.
## References:
[1]: Liu et al. Simple and principled uncertainty estimation with deterministic
deep learning via distance awareness. In _Neural Information Processing
Systems_, 2020.
https://arxiv.org/abs/2006.10108
[2]: Xu et al. Understanding and Improving Layer Normalization. In _Neural
Information Processing Systems_, 2019.
https://papers.nips.cc/paper/2019/file/2f4fe03d77724a7217006e5d16728874-Paper.pdf
[3]: <NAME> and <NAME>. Random Features for Large-Scale Kernel
Machines. In _Neural Information Processing Systems_, 2007.
https://people.eecs.berkeley.edu/~brecht/papers/07.rah.rec.nips.pdf
[4]: <NAME>, <NAME>, <NAME>. Uncertainty Estimation with Infinitesimal
Jackknife. _arXiv preprint arXiv:2006.07584_, 2020.
https://arxiv.org/abs/2006.07584
"""
import dataclasses
import functools
from typing import Any, Callable, Iterable, Mapping, Optional, Tuple, Union
import flax.linen as nn
import jax
from jax import lax
from jax import random
import jax.numpy as jnp
# Jax-related data types.
PRNGKey = Any
Shape = Iterable[int]
Dtype = type(jnp.float32)
Array = jnp.ndarray
Initializer = Callable[[PRNGKey, Shape, Dtype], Array]
# Default config for random features.
default_rbf_activation = jnp.cos
default_rbf_bias_init = nn.initializers.uniform(scale=2. * jnp.pi)
# Using "he_normal" style random feature distribution. Effectively, this is
# equivalent to approximating a RBF kernel but with the input standardized by
# its dimensionality (i.e., input_scaled = input * sqrt(2. / dim_input)) and
# empirically leads to better performance for neural network inputs.
default_rbf_kernel_init = nn.initializers.variance_scaling(
scale=2.0, mode='fan_in', distribution='normal')
# Default field value for kwargs, to be used for data class declaration.
default_kwarg_dict = lambda: dataclasses.field(default_factory=dict)
SUPPORTED_LIKELIHOOD = ('binary_logistic', 'poisson', 'gaussian')
MIN_SCALE_MONTE_CARLO = 1e-3
class RandomFeatureGaussianProcess(nn.Module):
"""A Gaussian process layer using random Fourier features [1].
Attributes:
features: the number of output units.
hidden_features: the number of hidden random fourier features.
normalize_input: whether to normalize the input using nn.LayerNorm.
norm_kwargs: Optional keyword arguments to the input nn.LayerNorm layer.
hidden_kwargs: Optional keyword arguments to the random feature layer.
output_kwargs: Optional keyword arguments to the predictive logit layer.
covmat_kwargs: Optional keyword arguments to the predictive covmat layer.
"""
features: int
hidden_features: int = 1024
normalize_input: bool = True
# Optional keyword arguments.
norm_kwargs: Mapping[str, Any] = default_kwarg_dict()
hidden_kwargs: Mapping[str, Any] = default_kwarg_dict()
output_kwargs: Mapping[str, Any] = default_kwarg_dict()
covmat_kwargs: Mapping[str, Any] = default_kwarg_dict()
def setup(self):
"""Defines model layers."""
# pylint:disable=invalid-name,not-a-mapping
if self.normalize_input:
# Prefer a parameter-free version of LayerNorm by default [2]. Can be
# overwritten by passing norm_kwargs=dict(use_bias=..., use_scales=...).
LayerNorm = functools.partial(
nn.LayerNorm, use_bias=False, use_scale=False)
self.norm_layer = LayerNorm(**self.norm_kwargs)
self.hidden_layer = RandomFourierFeatures(
features=self.hidden_features, **self.hidden_kwargs)
self.output_layer = nn.Dense(features=self.features, **self.output_kwargs)
self.covmat_layer = LaplaceRandomFeatureCovariance(
hidden_features=self.hidden_features, **self.covmat_kwargs)
# pylint:enable=invalid-name,not-a-mapping
def __call__(self,
inputs: Array,
return_full_covmat: bool = False,
return_random_features: bool = False) -> Array:
"""Computes Gaussian process outputs.
Args:
inputs: the nd-array of shape (batch_size, ..., input_dim).
return_full_covmat: whether to return the full covariance matrix, shape
(batch_size, batch_size), or only return the predictive variances with
shape (batch_size, ).
return_random_features: whether to return the random fourier features for
the inputs.
Returns:
A tuple of predictive logits, predictive covmat and (optionally)
random Fourier features.
"""
gp_inputs = self.norm_layer(inputs) if self.normalize_input else inputs
gp_features = self.hidden_layer(gp_inputs)
gp_logits = self.output_layer(gp_features)
gp_covmat = self.covmat_layer(
gp_features, gp_logits, diagonal_only=not return_full_covmat)
# Returns predictive logits, covmat and (optionally) random features.
if return_random_features:
return gp_logits, gp_covmat, gp_features
return gp_logits, gp_covmat
class RandomFourierFeatures(nn.Module):
"""A random fourier feature (RFF) layer that approximates a kernel model.
The random feature transformation is a one-hidden-layer network with
non-trainable weights (see, e.g., Algorithm 1 of [3]). Specifically:
f(x) = activation(x @ kernel + bias) * output_scale.
The forward pass logic closely follows that of the nn.Dense.
Attributes:
features: the number of output units.
feature_scalefeature_scale: scale to apply to the output
(default: sqrt(2. / features), see Algorithm 1 of [3]).
activation: activation function to apply to the output.
kernel_init: initializer function for the weight matrix.
bias_init: initializer function for the bias.
seed: random seed for generating random features (default: 0). This will
override the external RNGs.
dtype: the dtype of the computation (default: float32).
"""
features: int
feature_scale: Optional[jnp.float32] = 1.
activation: Callable[[Array], Array] = default_rbf_activation
kernel_init: Initializer = default_rbf_kernel_init
bias_init: Initializer = default_rbf_bias_init
seed: int = 0
dtype: Dtype = jnp.float32
collection_name: str = 'random_features'
def setup(self):
# Defines the random number generator.
self.rng = random.PRNGKey(self.seed)
# Processes random feature scale.
self._feature_scale = self.feature_scale
if self._feature_scale is None:
self._feature_scale = jnp.sqrt(2. / self.features)
self._feature_scale = jnp.asarray(self._feature_scale, dtype=self.dtype)
@nn.compact
def __call__(self, inputs: Array) -> Array:
"""Applies random feature transformation along the last dimension of inputs.
Args:
inputs: The nd-array to be transformed.
Returns:
The transformed input.
"""
# Initializes variables.
input_dim = inputs.shape[-1]
kernel_rng, bias_rng = random.split(self.rng, num=2)
kernel_shape = (input_dim, self.features)
kernel = self.variable(self.collection_name, 'kernel', self.kernel_init,
kernel_rng, kernel_shape, self.dtype)
bias = self.variable(self.collection_name, 'bias', self.bias_init,
bias_rng, (self.features,), self.dtype)
# Specifies multiplication dimension.
contracting_dims = ((inputs.ndim - 1,), (0,))
batch_dims = ((), ())
# Performs forward pass.
inputs = jnp.asarray(inputs, self.dtype)
outputs = lax.dot_general(inputs, kernel.value,
(contracting_dims, batch_dims))
outputs = outputs + jnp.broadcast_to(bias.value, outputs.shape)
return self._feature_scale * self.activation(outputs)
class LaplaceRandomFeatureCovariance(nn.Module):
"""Computes the Gaussian Process covariance using Laplace method.
Attributes:
hidden_features: the number of random fourier features.
ridge_penalty: Initial Ridge penalty to weight covariance matrix. This value
is used to stablize the eigenvalues of weight covariance estimate so that
the matrix inverse can be computed for Cov = inv(t(X) @ X + s * I). The
ridge factor s cannot be too large since otherwise it will dominate the
t(X) * X term and make covariance estimate not meaningful.
momentum: A discount factor used to compute the moving average for posterior
precision matrix. Analogous to the momentum factor in batch normalization.
If `None` then update covariance matrix using a naive sum without
momentum, which is desirable if the goal is to compute the exact
covariance matrix by passing through data once (say in the final epoch).
In this case, make sure to reset the precision matrix variable between
epochs by replacing it with self.initial_precision_matrix().
likelihood: The likelihood to use for computing Laplace approximation for
the covariance matrix. Can be one of ('binary_logistic', 'poisson',
'gaussian').
"""
hidden_features: int
ridge_penalty: float = 1.
momentum: Optional[float] = None
likelihood: str = 'gaussian'
collection_name: str = 'laplace_covariance'
dtype: Dtype = jnp.float32
def setup(self):
if self.momentum is not None:
if self.momentum < 0. or self.momentum > 1.:
raise ValueError(f'`momentum` must be between (0, 1). '
f'Got {self.momentum}.')
if self.likelihood not in SUPPORTED_LIKELIHOOD:
raise ValueError(f'"likelihood" must be one of {SUPPORTED_LIKELIHOOD}, '
f'got {self.likelihood}.')
@nn.compact
def __call__(self,
gp_features: Array,
gp_logits: Optional[Array] = None,
diagonal_only: bool = True) -> Optional[Array]:
"""Updates the precision matrix and computes the predictive covariance.
NOTE:
The precision matrix will be updated only during training (i.e., when
`self.collection_name` are in the list of mutable variables). The covariance
matrix will be computed only during inference to avoid repeated calls to the
(expensive) `linalg.inv` op.
Args:
gp_features: The nd-array of random fourier features, shape (batch_size,
..., hidden_features).
gp_logits: The nd-array of predictive logits, shape (batch_size, ...,
logit_dim). Cannot be None if self.likelihood is not `gaussian`.
diagonal_only: Whether to return only the diagonal elements of the
predictive covariance matrix (i.e., the predictive variance).
Returns:
The predictive variances of shape (batch_size, ) if diagonal_only=True,
otherwise the predictive covariance matrix of shape
(batch_size, batch_size).
"""
gp_features = jnp.asarray(gp_features, self.dtype)
# Flatten GP features and logits to 2-d, by doing so we treat all the
# non-final dimensions as the batch dimensions.
gp_features = jnp.reshape(gp_features, [-1, self.hidden_features])
if gp_logits is not None:
gp_logits = jnp.asarray(gp_logits, self.dtype)
gp_logits = jnp.reshape(gp_logits, [gp_features.shape[0], -1])
precision_matrix = self.variable(self.collection_name, 'precision_matrix',
lambda: self.initial_precision_matrix()) # pylint: disable=unnecessary-lambda
# Updates the precision matrix during training.
initializing = self.is_mutable_collection('params')
training = self.is_mutable_collection(self.collection_name)
if training and not initializing:
precision_matrix.value = self.update_precision_matrix(
gp_features, gp_logits, precision_matrix.value)
# Computes covariance matrix during inference.
if not training:
return self.compute_predictive_covariance(gp_features, precision_matrix,
diagonal_only)
def initial_precision_matrix(self):
"""Returns the initial diagonal precision matrix."""
return jnp.eye(self.hidden_features, dtype=self.dtype) * self.ridge_penalty
def update_precision_matrix(self, gp_features: Array,
gp_logits: Optional[Array],
precision_matrix: Array) -> Array:
"""Updates precision matrix given a new batch.
Args:
gp_features: random features from the new batch, shape (batch_size,
hidden_features)
gp_logits: predictive logits from the new batch, shape (batch_size,
logit_dim). Currently only logit_dim=1 is supported.
precision_matrix: the current precision matrix, shape (hidden_features,
hidden_features).
Returns:
Updated precision matrix, shape (hidden_features, hidden_features).
Raises:
(ValueError) If the logit is None or not univariate when likelihood is
not Gaussian.
"""
if self.likelihood != 'gaussian':
if gp_logits is None:
raise ValueError(
f'`gp_logits` cannot be None when likelihood=`{self.likelihood}`')
if gp_logits.ndim > 1 and gp_logits.shape[-1] != 1:
raise ValueError(
f'likelihood `{self.likelihood}` only support univariate logits. '
f'Got logits dimension: {gp_logits.shape[-1]}')
# Computes precision matrix within new batch.
if self.likelihood == 'binary_logistic':
prob = nn.sigmoid(gp_logits)
prob_multiplier = prob * (1. - prob)
elif self.likelihood == 'poisson':
prob_multiplier = jnp.exp(gp_logits)
else:
prob_multiplier = 1.
gp_features_adj = jnp.sqrt(prob_multiplier) * gp_features
batch_prec_mat = jnp.matmul(jnp.transpose(gp_features_adj), gp_features_adj)
# Updates precision matrix.
if self.momentum is None:
# Performs exact update without momentum.
precision_matrix_updated = precision_matrix + batch_prec_mat
else:
batch_size = gp_features.shape[0]
precision_matrix_updated = (
self.momentum * precision_matrix +
(1 - self.momentum) * batch_prec_mat / batch_size)
return precision_matrix_updated
def compute_predictive_covariance(self, gp_features: Array,
precision_matrix: nn.Variable,
diagonal_only: bool) -> Array:
"""Computes the predictive covariance.
Approximates the Gaussian process posterior using random features.
Given training random feature Phi_tr (num_train, num_hidden) and testing
random feature Phi_ts (batch_size, num_hidden). The predictive covariance
matrix is computed as (assuming Gaussian likelihood):
s * Phi_ts @ inv(t(Phi_tr) * Phi_tr + s * I) @ t(Phi_ts),
where s is the ridge factor to be used for stablizing the inverse, and I is
the identity matrix with shape (num_hidden, num_hidden).
Args:
gp_features: the random feature of testing data to be used for computing
the covariance matrix. Shape (batch_size, gp_hidden_size).
precision_matrix: the model's precision matrix.
diagonal_only: whether to return only the diagonal elements of the
predictive covariance matrix (i.e., the predictive variances).
Returns:
The predictive variances of shape (batch_size, ) if diagonal_only=True,
otherwise the predictive covariance matrix of shape
(batch_size, batch_size).
"""
precision_matrix_inv = jnp.linalg.inv(precision_matrix.value)
cov_feature_product = jnp.matmul(precision_matrix_inv,
jnp.transpose(gp_features))
if diagonal_only:
# Compute diagonal element only, shape (batch_size, ).
# Using the identity diag(A @ B) = col_sum(A * tr(B)).
gp_covar = jnp.sum(
gp_features * jnp.transpose(cov_feature_product), axis=-1)
else:
# Compute full covariance matrix, shape (batch_size, batch_size).
gp_covar = jnp.matmul(gp_features, cov_feature_product)
return self.ridge_penalty * gp_covar
class MCSigmoidDenseFASNGP(nn.Module):
"""Heteroscedastic SNGP for data with sigmoid output activation.
Output layer which combines the benefits of the heteroscedastic
(https://arxiv.org/abs/2105.10305) and SNGP (https://arxiv.org/abs/2006.10108)
methods. Assumes spectral normalization is applied to network producing
`inputs` to the __call__ method.
Attributes:
num_outputs: Number of outputs for classification task.
num_factors: Number of factors to use in approximation to full rank
covariance matrix.
temperature: The softmax temperature.
parameter_efficient: Whether to use the parameter efficient
version of the method. If True then samples from the latent distribution
are generated as: mu(x) + v(x) * matmul(V, eps_R) + diag(d(x), eps_K)),
where eps_R ~ N(0, I_R), eps_K ~ N(0, I_K). If False then latent samples
are generated as: mu(x) + matmul(V(x), eps_R) + diag(d(x), eps_K)).
Computing V(x) as function of x increases the number of parameters
introduced by the method.
train_mc_samples: The number of Monte-Carlo samples used to estimate the
predictive distribution during training.
test_mc_samples: The number of Monte-Carlo samples used to estimate the
predictive distribution during testing/inference.
share_samples_across_batch: If True, the latent noise samples
are shared across batch elements. If encountering XLA compilation errors
due to dynamic shape inference setting = True may solve.
logits_only: If True, only return the logits from the __call__ method.
return_locs: If True, return the location parameter of the Gaussian
latent variable in place of the `logits`.
eps: Clip probabilities into [eps, 1.0] before applying log.
het_var_weight: Weighting on the heteroscedastic variance when computing
samples from the Gaussian latent variable.
sngp_var_weight: Weighting on the GP variance when computing samples from
the Gaussian latent variable.
hidden_features: Number of features for Random Fourier Feature GP
approximation.
normalize_input: Whether to normalize the input for the GP layer.
norm_kwargs: Normalization keywords for the GP layer.
hidden_kwargs: Hidden layer keywords for the GP layer.
output_kwargs: Output keywords for the GP layer.
covmat_kwargs: Covariance matrix keywords for the GP layer.
"""
num_outputs: int
num_factors: int # set num_factors = 0 for diagonal method
temperature: float = 1.0
parameter_efficient: bool = False
train_mc_samples: int = 1000
test_mc_samples: int = 1000
share_samples_across_batch: bool = False
logits_only: bool = False
return_locs: bool = False
eps: float = 1e-7
het_var_weight: float = 1.0
sngp_var_weight: float = 0.0
hidden_features: int = 1024
normalize_input: bool = True
# Optional keyword arguments.
norm_kwargs: Mapping[str, Any] = default_kwarg_dict()
hidden_kwargs: Mapping[str, Any] = default_kwarg_dict()
output_kwargs: Mapping[str, Any] = default_kwarg_dict()
covmat_kwargs: Mapping[str, Any] = default_kwarg_dict()
def setup(self):
if self.parameter_efficient:
self._scale_layer_homoscedastic = nn.Dense(
self.num_outputs, name='scale_layer_homoscedastic')
self._scale_layer_heteroscedastic = nn.Dense(
self.num_outputs, name='scale_layer_heteroscedastic')
elif self.num_factors > 0:
self._scale_layer = nn.Dense(
self.num_outputs * self.num_factors, name='scale_layer')
self._loc_layer = RandomFeatureGaussianProcess(
features=self.num_outputs,
hidden_features=self.hidden_features,
normalize_input=self.normalize_input,
norm_kwargs=self.norm_kwargs,
hidden_kwargs=self.hidden_kwargs,
output_kwargs=self.output_kwargs,
covmat_kwargs=self.covmat_kwargs,
name='loc_layer')
self._diag_layer = nn.Dense(self.num_outputs, name='diag_layer')
def _compute_loc_param(self, inputs: Array) -> Array:
"""Computes location parameter of the "logits distribution".
Args:
inputs: The input to the heteroscedastic output layer.
Returns:
Array of shape [batch_size, num_classes].
"""
return self._loc_layer(inputs)
def _compute_scale_param(self, inputs: Array, covmat_sngp: Array,
training: int) -> Tuple[Array, Array]:
"""Computes scale parameter of the "logits distribution".
Args:
inputs: The input to the heteroscedastic output layer.
covmat_sngp: GP output layer covariance matrix.
training: in training mode or not.
Returns:
2-Tuple of Array of shape
([batch_size, num_classes * max(num_factors, 1)],
[batch_size, num_classes]).
"""
if self.parameter_efficient or self.num_factors <= 0:
low_rank = inputs
diag = jax.nn.softplus(self._diag_layer(inputs)) + MIN_SCALE_MONTE_CARLO
else:
low_rank = self._scale_layer(inputs)
diag = jax.nn.softplus(self._diag_layer(inputs)) + MIN_SCALE_MONTE_CARLO
initializing = self.is_mutable_collection('params')
if training or initializing:
diag_comp = diag
else:
# assume diagonal_only=True
sngp_marginal_vars = jnp.expand_dims(covmat_sngp, -1)
diag_comp = jnp.sqrt(self.het_var_weight * jnp.square(diag) +
self.sngp_var_weight * sngp_marginal_vars)
return low_rank, diag_comp
def _compute_diagonal_noise_samples(self, diag_scale: Array,
num_samples: int) -> Array:
"""Computes samples of the diagonal elements logit noise.
Args:
diag_scale: Array of shape [batch_size, num_classes]. Diagonal
elements of scale parameters of the distribution to be sampled.
num_samples: Number of Monte-Carlo samples to take.
Returns:
Array. Logit noise samples of shape:
[batch_size, num_samples, num_outputs].
"""
if self.share_samples_across_batch:
samples_per_batch = 1
else:
samples_per_batch = diag_scale.shape[0]
key = self.make_rng('diag_noise_samples')
return jnp.expand_dims(diag_scale, 1) * jax.random.normal(
key, shape=(samples_per_batch, num_samples, 1))
def _compute_standard_normal_samples(self, factor_loadings: Array,
num_samples: int) -> Array:
"""Utility that computes samples from a standard normal distribution.
Args:
factor_loadings: Array of shape
[batch_size, num_classes * num_factors]. Factor loadings for scale
parameters of the distribution to be sampled.
num_samples: Number of Monte-Carlo samples to take.
Returns:
Array. Samples of shape: [batch_size, num_samples, num_factors].
"""
if self.share_samples_across_batch:
samples_per_batch = 1
else:
samples_per_batch = factor_loadings.shape[0]
key = self.make_rng('standard_norm_noise_samples')
standard_normal_samples = jax.random.normal(
key, shape=(samples_per_batch, num_samples, self.num_factors))
if self.share_samples_across_batch:
standard_normal_samples = jnp.tile(standard_normal_samples,
[factor_loadings.shape[0], 1, 1])
return standard_normal_samples
def _compute_noise_samples(self, scale: Tuple[Array, Array],
num_samples: int) -> Array:
"""Utility function that computes additive noise samples.
Args:
scale: Tuple of Array of shape (
[batch_size, num_classes * num_factors],
[batch_size, num_classes]). Factor loadings and diagonal elements
for scale parameters of the distribution to be sampled.
num_samples: Number of Monte-Carlo samples to take.
Returns:
Array. Logit noise samples of shape:
[batch_size, num_samples, num_outputs].
"""
factor_loadings, diag_scale = scale
# Compute the diagonal noise
diag_noise_samples = self._compute_diagonal_noise_samples(diag_scale,
num_samples)
if self.num_factors > 0:
# Now compute the factors
standard_normal_samples = self._compute_standard_normal_samples(
factor_loadings, num_samples)
if self.parameter_efficient:
res = self._scale_layer_homoscedastic(standard_normal_samples)
res *= jnp.expand_dims(
self._scale_layer_heteroscedastic(factor_loadings), 1)
else:
# reshape scale vector into factor loadings matrix
factor_loadings = jnp.reshape(factor_loadings,
[-1, self.num_outputs, self.num_factors])
# transform standard normal into ~ full rank covariance Gaussian samples
res = jnp.einsum('ijk,iak->iaj',
factor_loadings, standard_normal_samples)
return res + diag_noise_samples
return diag_noise_samples
def _compute_mc_samples(self, locs: Array, scale: Array,
num_samples: int) -> Array:
"""Utility function that computes Monte-Carlo samples (using sigmoid).
Args:
locs: Array of shape [batch_size, total_mc_samples, num_outputs].
Location parameters of the distributions to be sampled.
scale: Array of shape [batch_size, total_mc_samples, num_outputs].
Scale parameters of the distributions to be sampled.
num_samples: Number of Monte-Carlo samples to take.
Returns:
Array of shape [batch_size, num_samples, num_outputs]. Average over the
MC samples.
"""
locs = jnp.expand_dims(locs, axis=1)
noise_samples = self._compute_noise_samples(scale, num_samples)
latents = locs + noise_samples
samples = jax.nn.sigmoid(latents / self.temperature)
return jnp.mean(samples, axis=1)
@nn.compact
def __call__(self, inputs: Array, training: int = True) -> Union[
Tuple[Array, Array], Tuple[Array, Array, Array, Array]]:
"""Computes predictive and log predictive distributions.
Uses Monte Carlo estimate of sigmoid approximation to HetSNGP model to
compute predictive distribution.
Args:
inputs: The input to the heteroscedastic output layer.
training: Whether we are training or not.
Returns:
Tuple of Array: (logits, covmat_sngp) if logits_only = True. Otherwise,
tuple of (logits, covmat_sngp, log_probs, probs). Logits
represents the argument to a sigmoid function that would yield probs
(logits = inverse_sigmoid(probs)), so logits can be used with the
sigmoid cross-entropy loss function.
"""
# return_random_features set to False, so guaranteed to return 2-tuple
locs, covmat_sngp = self._compute_loc_param(inputs) # pylint: disable=assignment-from-none,unbalanced-tuple-unpacking
# guaranteed to return 2-tuple due to scale_layer construction
scale = self._compute_scale_param(inputs, covmat_sngp, training) # pylint: disable=assignment-from-none
if training:
total_mc_samples = self.train_mc_samples
else:
total_mc_samples = self.test_mc_samples
probs_mean = self._compute_mc_samples(locs, scale, total_mc_samples)
probs_mean = jnp.clip(probs_mean, a_min=self.eps)
log_probs = jnp.log(probs_mean)
# inverse sigmoid
probs_mean = jnp.clip(probs_mean, a_min=self.eps, a_max=1.0 - self.eps)
logits = log_probs - jnp.log(1.0 - probs_mean)
if self.return_locs:
logits = locs
if self.logits_only:
return logits, covmat_sngp
return logits, covmat_sngp, log_probs, probs_mean
|
[
"jax.random.PRNGKey",
"jax.numpy.reshape",
"jax.numpy.transpose",
"jax.numpy.tile",
"jax.random.normal",
"jax.numpy.einsum",
"flax.linen.initializers.variance_scaling",
"jax.numpy.expand_dims",
"jax.numpy.matmul",
"flax.linen.initializers.uniform",
"jax.numpy.broadcast_to",
"functools.partial",
"jax.numpy.linalg.inv",
"jax.numpy.square",
"jax.numpy.eye",
"jax.numpy.asarray",
"dataclasses.field",
"jax.nn.sigmoid",
"flax.linen.sigmoid",
"jax.lax.dot_general",
"jax.numpy.log",
"jax.random.split",
"flax.linen.Dense",
"jax.numpy.exp",
"jax.numpy.clip",
"jax.numpy.sqrt",
"jax.numpy.mean"
] |
[((1946, 1989), 'flax.linen.initializers.uniform', 'nn.initializers.uniform', ([], {'scale': '(2.0 * jnp.pi)'}), '(scale=2.0 * jnp.pi)\n', (1969, 1989), True, 'import flax.linen as nn\n'), ((2315, 2401), 'flax.linen.initializers.variance_scaling', 'nn.initializers.variance_scaling', ([], {'scale': '(2.0)', 'mode': '"""fan_in"""', 'distribution': '"""normal"""'}), "(scale=2.0, mode='fan_in', distribution=\n 'normal')\n", (2347, 2401), True, 'import flax.linen as nn\n'), ((2505, 2544), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (2522, 2544), False, 'import dataclasses\n'), ((4170, 4224), 'flax.linen.Dense', 'nn.Dense', ([], {'features': 'self.features'}), '(features=self.features, **self.output_kwargs)\n', (4178, 4224), True, 'import flax.linen as nn\n'), ((6852, 6877), 'jax.random.PRNGKey', 'random.PRNGKey', (['self.seed'], {}), '(self.seed)\n', (6866, 6877), False, 'from jax import random\n'), ((7081, 7131), 'jax.numpy.asarray', 'jnp.asarray', (['self._feature_scale'], {'dtype': 'self.dtype'}), '(self._feature_scale, dtype=self.dtype)\n', (7092, 7131), True, 'import jax.numpy as jnp\n'), ((7472, 7501), 'jax.random.split', 'random.split', (['self.rng'], {'num': '(2)'}), '(self.rng, num=2)\n', (7484, 7501), False, 'from jax import random\n'), ((7989, 8020), 'jax.numpy.asarray', 'jnp.asarray', (['inputs', 'self.dtype'], {}), '(inputs, self.dtype)\n', (8000, 8020), True, 'import jax.numpy as jnp\n'), ((8035, 8104), 'jax.lax.dot_general', 'lax.dot_general', (['inputs', 'kernel.value', '(contracting_dims, batch_dims)'], {}), '(inputs, kernel.value, (contracting_dims, batch_dims))\n', (8050, 8104), False, 'from jax import lax\n'), ((11297, 11333), 'jax.numpy.asarray', 'jnp.asarray', (['gp_features', 'self.dtype'], {}), '(gp_features, self.dtype)\n', (11308, 11333), True, 'import jax.numpy as jnp\n'), ((11479, 11531), 'jax.numpy.reshape', 'jnp.reshape', (['gp_features', '[-1, self.hidden_features]'], {}), '(gp_features, [-1, self.hidden_features])\n', (11490, 11531), True, 'import jax.numpy as jnp\n'), ((15930, 15968), 'jax.numpy.linalg.inv', 'jnp.linalg.inv', (['precision_matrix.value'], {}), '(precision_matrix.value)\n', (15944, 15968), True, 'import jax.numpy as jnp\n'), ((20526, 20571), 'flax.linen.Dense', 'nn.Dense', (['self.num_outputs'], {'name': '"""diag_layer"""'}), "(self.num_outputs, name='diag_layer')\n", (20534, 20571), True, 'import flax.linen as nn\n'), ((23619, 23704), 'jax.random.normal', 'jax.random.normal', (['key'], {'shape': '(samples_per_batch, num_samples, self.num_factors)'}), '(key, shape=(samples_per_batch, num_samples, self.num_factors)\n )\n', (23636, 23704), False, 'import jax\n'), ((26243, 26272), 'jax.numpy.expand_dims', 'jnp.expand_dims', (['locs'], {'axis': '(1)'}), '(locs, axis=1)\n', (26258, 26272), True, 'import jax.numpy as jnp\n'), ((26392, 26434), 'jax.nn.sigmoid', 'jax.nn.sigmoid', (['(latents / self.temperature)'], {}), '(latents / self.temperature)\n', (26406, 26434), False, 'import jax\n'), ((26447, 26472), 'jax.numpy.mean', 'jnp.mean', (['samples'], {'axis': '(1)'}), '(samples, axis=1)\n', (26455, 26472), True, 'import jax.numpy as jnp\n'), ((27853, 27889), 'jax.numpy.clip', 'jnp.clip', (['probs_mean'], {'a_min': 'self.eps'}), '(probs_mean, a_min=self.eps)\n', (27861, 27889), True, 'import jax.numpy as jnp\n'), ((27906, 27925), 'jax.numpy.log', 'jnp.log', (['probs_mean'], {}), '(probs_mean)\n', (27913, 27925), True, 'import jax.numpy as jnp\n'), ((27966, 28024), 'jax.numpy.clip', 'jnp.clip', (['probs_mean'], {'a_min': 'self.eps', 'a_max': '(1.0 - self.eps)'}), '(probs_mean, a_min=self.eps, a_max=1.0 - self.eps)\n', (27974, 28024), True, 'import jax.numpy as jnp\n'), ((3906, 3970), 'functools.partial', 'functools.partial', (['nn.LayerNorm'], {'use_bias': '(False)', 'use_scale': '(False)'}), '(nn.LayerNorm, use_bias=False, use_scale=False)\n', (3923, 3970), False, 'import functools\n'), ((7026, 7055), 'jax.numpy.sqrt', 'jnp.sqrt', (['(2.0 / self.features)'], {}), '(2.0 / self.features)\n', (7034, 7055), True, 'import jax.numpy as jnp\n'), ((8159, 8202), 'jax.numpy.broadcast_to', 'jnp.broadcast_to', (['bias.value', 'outputs.shape'], {}), '(bias.value, outputs.shape)\n', (8175, 8202), True, 'import jax.numpy as jnp\n'), ((11581, 11615), 'jax.numpy.asarray', 'jnp.asarray', (['gp_logits', 'self.dtype'], {}), '(gp_logits, self.dtype)\n', (11592, 11615), True, 'import jax.numpy as jnp\n'), ((11634, 11684), 'jax.numpy.reshape', 'jnp.reshape', (['gp_logits', '[gp_features.shape[0], -1]'], {}), '(gp_logits, [gp_features.shape[0], -1])\n', (11645, 11684), True, 'import jax.numpy as jnp\n'), ((12534, 12581), 'jax.numpy.eye', 'jnp.eye', (['self.hidden_features'], {'dtype': 'self.dtype'}), '(self.hidden_features, dtype=self.dtype)\n', (12541, 12581), True, 'import jax.numpy as jnp\n'), ((13895, 13916), 'flax.linen.sigmoid', 'nn.sigmoid', (['gp_logits'], {}), '(gp_logits)\n', (13905, 13916), True, 'import flax.linen as nn\n'), ((14102, 14127), 'jax.numpy.sqrt', 'jnp.sqrt', (['prob_multiplier'], {}), '(prob_multiplier)\n', (14110, 14127), True, 'import jax.numpy as jnp\n'), ((14174, 14204), 'jax.numpy.transpose', 'jnp.transpose', (['gp_features_adj'], {}), '(gp_features_adj)\n', (14187, 14204), True, 'import jax.numpy as jnp\n'), ((16065, 16091), 'jax.numpy.transpose', 'jnp.transpose', (['gp_features'], {}), '(gp_features)\n', (16078, 16091), True, 'import jax.numpy as jnp\n'), ((16432, 16476), 'jax.numpy.matmul', 'jnp.matmul', (['gp_features', 'cov_feature_product'], {}), '(gp_features, cov_feature_product)\n', (16442, 16476), True, 'import jax.numpy as jnp\n'), ((19811, 19871), 'flax.linen.Dense', 'nn.Dense', (['self.num_outputs'], {'name': '"""scale_layer_homoscedastic"""'}), "(self.num_outputs, name='scale_layer_homoscedastic')\n", (19819, 19871), True, 'import flax.linen as nn\n'), ((19925, 19987), 'flax.linen.Dense', 'nn.Dense', (['self.num_outputs'], {'name': '"""scale_layer_heteroscedastic"""'}), "(self.num_outputs, name='scale_layer_heteroscedastic')\n", (19933, 19987), True, 'import flax.linen as nn\n'), ((21856, 21888), 'jax.numpy.expand_dims', 'jnp.expand_dims', (['covmat_sngp', '(-1)'], {}), '(covmat_sngp, -1)\n', (21871, 21888), True, 'import jax.numpy as jnp\n'), ((22756, 22786), 'jax.numpy.expand_dims', 'jnp.expand_dims', (['diag_scale', '(1)'], {}), '(diag_scale, 1)\n', (22771, 22786), True, 'import jax.numpy as jnp\n'), ((22789, 22854), 'jax.random.normal', 'jax.random.normal', (['key'], {'shape': '(samples_per_batch, num_samples, 1)'}), '(key, shape=(samples_per_batch, num_samples, 1))\n', (22806, 22854), False, 'import jax\n'), ((23782, 23849), 'jax.numpy.tile', 'jnp.tile', (['standard_normal_samples', '[factor_loadings.shape[0], 1, 1]'], {}), '(standard_normal_samples, [factor_loadings.shape[0], 1, 1])\n', (23790, 23849), True, 'import jax.numpy as jnp\n'), ((28050, 28075), 'jax.numpy.log', 'jnp.log', (['(1.0 - probs_mean)'], {}), '(1.0 - probs_mean)\n', (28057, 28075), True, 'import jax.numpy as jnp\n'), ((14023, 14041), 'jax.numpy.exp', 'jnp.exp', (['gp_logits'], {}), '(gp_logits)\n', (14030, 14041), True, 'import jax.numpy as jnp\n'), ((20056, 20121), 'flax.linen.Dense', 'nn.Dense', (['(self.num_outputs * self.num_factors)'], {'name': '"""scale_layer"""'}), "(self.num_outputs * self.num_factors, name='scale_layer')\n", (20064, 20121), True, 'import flax.linen as nn\n'), ((25217, 25287), 'jax.numpy.reshape', 'jnp.reshape', (['factor_loadings', '[-1, self.num_outputs, self.num_factors]'], {}), '(factor_loadings, [-1, self.num_outputs, self.num_factors])\n', (25228, 25287), True, 'import jax.numpy as jnp\n'), ((25422, 25490), 'jax.numpy.einsum', 'jnp.einsum', (['"""ijk,iak->iaj"""', 'factor_loadings', 'standard_normal_samples'], {}), "('ijk,iak->iaj', factor_loadings, standard_normal_samples)\n", (25432, 25490), True, 'import jax.numpy as jnp\n'), ((16288, 16322), 'jax.numpy.transpose', 'jnp.transpose', (['cov_feature_product'], {}), '(cov_feature_product)\n', (16301, 16322), True, 'import jax.numpy as jnp\n'), ((21938, 21954), 'jax.numpy.square', 'jnp.square', (['diag'], {}), '(diag)\n', (21948, 21954), True, 'import jax.numpy as jnp\n')]
|
# Copyright 2020 Silicon Compiler Authors. All Rights Reserved.
import os
import sys
import multiprocessing
import siliconcompiler
import time
# Shared setup routine
def run_design(design, M, job):
chip = siliconcompiler.Chip(loglevel='INFO')
chip.set('design', design)
chip.add('source', design+'.v')
chip.set('jobname', job)
chip.set('relax', True)
chip.set('quiet', True)
chip.set('flowarg','syn_np', str(M))
chip.set('flowarg','syn_place', str(M))
chip.set('flowarg','syn_cts', str(M))
chip.set('flowarg','syn_route', str(M))
chip.load_target("freepdk45_demo")
chip.run()
def main():
####################################
design = 'heartbeat'
N = 2 # parallel flows, change based on your machine
M = 2 # parallel indices, change based on your machine
####################################
# 1. All serial
serial_start = time.time()
for i in range(N):
for j in range(M):
job = f"serial_{i}_{j}"
run_design(design, 1, job)
serial_end = time.time()
###################################
# 2. Parallel steps
parastep_start = time.time()
for i in range(M):
job = f"parasteps_{i}"
run_design(design, M, job)
parastep_end = time.time()
###################################
# 3. Parallel flows
paraflow_start = time.time()
processes = []
for i in range(N):
job = f"paraflows_{i}"
processes.append(multiprocessing.Process(target=run_design,
args=(design,
M,
job)))
# Boiler plate start and join
for p in processes:
p.start()
for p in processes:
p.join()
paraflow_end = time.time()
###################################
# Benchmark calculation
paraflow_time = round(paraflow_end - paraflow_start,2)
parastep_time = round(parastep_end - parastep_start,2)
serial_time = round(serial_end - serial_start,2)
print(f" Serial = {serial_time}s\n",
f"Parallel steps = {parastep_time}s\n",
f"Parallel flows = {paraflow_time}s\n")
if __name__ == '__main__':
main()
|
[
"multiprocessing.Process",
"siliconcompiler.Chip",
"time.time"
] |
[((212, 249), 'siliconcompiler.Chip', 'siliconcompiler.Chip', ([], {'loglevel': '"""INFO"""'}), "(loglevel='INFO')\n", (232, 249), False, 'import siliconcompiler\n'), ((905, 916), 'time.time', 'time.time', ([], {}), '()\n', (914, 916), False, 'import time\n'), ((1059, 1070), 'time.time', 'time.time', ([], {}), '()\n', (1068, 1070), False, 'import time\n'), ((1158, 1169), 'time.time', 'time.time', ([], {}), '()\n', (1167, 1169), False, 'import time\n'), ((1278, 1289), 'time.time', 'time.time', ([], {}), '()\n', (1287, 1289), False, 'import time\n'), ((1377, 1388), 'time.time', 'time.time', ([], {}), '()\n', (1386, 1388), False, 'import time\n'), ((1853, 1864), 'time.time', 'time.time', ([], {}), '()\n', (1862, 1864), False, 'import time\n'), ((1489, 1554), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'run_design', 'args': '(design, M, job)'}), '(target=run_design, args=(design, M, job))\n', (1512, 1554), False, 'import multiprocessing\n')]
|
import json
import os
import pint
from . import _ureg
from .data import nist
from .state import State, StateRegistry
from .transition import Transition, TransitionRegistry
_directory = os.path.dirname(os.path.realpath(__file__))
_periodic_table = os.path.join(_directory, "data", "PeriodicTableJSON.json")
with open(_periodic_table) as f:
periodic_table = json.load(f)
elements = [element["symbol"] for element in periodic_table["elements"]]
class Atom:
"""An atom object, containing states and transitions
Attributes:
name (str): The name of the atom
"""
_ureg: pint.UnitRegistry
name: str
__states: StateRegistry
__transitions: TransitionRegistry
def __init__(self, atom=None, ureg=None, refresh_cache=False):
self._ureg = ureg if ureg is not None else _ureg
self.name = ""
self.__states = StateRegistry(atom=self)
self.__transitions = TransitionRegistry(atom=self)
if atom is None:
return
try:
self.load(atom)
except FileNotFoundError:
self.load_nist(atom, refresh_cache)
self.__transitions.sort()
for state in self.__states:
state.transitions.sort()
def __call__(self, key):
try:
return self.__states(key)
except KeyError:
pass
try:
return self.__transitions(key)
except KeyError:
pass
raise KeyError(f"no property of atom {self.name} {key}")
def __repr__(self):
ground_state = (
f"Ground State: {self.__states[0].term}\n" if self.__states else ""
)
return (
f"{ground_state}{len(self.__states)} States\n"
f"{len(self.__transitions)} Transitions"
)
def _load_states(self, states):
self.__states = StateRegistry(
sorted([State(**state, atom=self) for state in states]),
atom=self,
)
def _load_transitions(self, transitions):
self.__transitions = TransitionRegistry(
[Transition(**transition, atom=self) for transition in transitions],
atom=self,
)
def to_dict(self):
return {
"name": self.name,
"states": self.__states.to_dict(),
"transitions": self.__transitions.to_dict(),
}
def save(self, filename):
with open(filename, "w") as file:
json.dump(self.to_dict(), file, indent=4, ensure_ascii=False)
def load(self, filename):
with open(filename) as file:
data = json.load(file)
self.name = data["name"]
self._load_states(data["states"])
self._load_transitions(data["transitions"])
def load_nist(self, name, refresh_cache=False):
if name in elements:
atom = name + " i"
elif name[-1] == "+" and name[:-1] in elements:
atom = name[:-1] + " ii"
elif name[-1] == "+" and name[-2].isdigit() and name[:-2] in elements:
atom = name[:-2] + " " + "i" * (1 + int(name[-2]))
else:
atom = name
raise ValueError(
f"{atom} does not match a known neutral atom or ionic ion name"
)
atom = atom.lower()
self.name = name
self._load_states(nist.parse_states(nist.fetch_states(atom, refresh_cache)))
self._load_transitions(
nist.parse_transitions(nist.fetch_transitions(atom, refresh_cache))
)
@property
def states(self) -> StateRegistry:
"""StateRegistry: the atomic states."""
return self.__states
@property
def transitions(self) -> TransitionRegistry:
"""TransitionRegistry: the atomic transitions."""
return self.__transitions
@property
def units(self):
"""pint.UnitRegistry(): readonly access to the pint UnitRegistry used by the atom."""
return self._ureg
|
[
"os.path.realpath",
"json.load",
"os.path.join"
] |
[((250, 308), 'os.path.join', 'os.path.join', (['_directory', '"""data"""', '"""PeriodicTableJSON.json"""'], {}), "(_directory, 'data', 'PeriodicTableJSON.json')\n", (262, 308), False, 'import os\n'), ((204, 230), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (220, 230), False, 'import os\n'), ((363, 375), 'json.load', 'json.load', (['f'], {}), '(f)\n', (372, 375), False, 'import json\n'), ((2606, 2621), 'json.load', 'json.load', (['file'], {}), '(file)\n', (2615, 2621), False, 'import json\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: <NAME>
@email: <EMAIL>
@time: 2021/12/12 22:30
"""
from typing import Any, Callable, Dict, List, Optional
from urllib import parse
from requests import Response, Session
from mussel.scheme.api import Interface
class MakeRequest(object):
def __init__(self, session: Optional[Session] = None, host: str = None) -> None:
if session is None:
session = Session()
self.session = session
self.responses: List[Response] = []
self.host = host
def to_send(self, interface: Interface, **kwargs: Any) -> None:
"""
Args:
interface: Interface object.
kwargs: additional keyword arguments to pass through to |request|.
"""
http_requests: Dict[str, Callable] = {
"DELETE": self.session.delete,
"GET": self.session.get,
"HEAD": self.session.head,
"OPTIONS": self.session.options,
"PATCH": self.session.patch,
"POST": self.session.post,
"PUT": self.session.put,
}
method = interface.method.upper()
if method not in http_requests:
# todo: RequestError
raise Exception(f'"{method}" is not a valid HTTP method.')
self.responses.append(
# todo: details log
http_requests[method](parse.urljoin(self.host, interface.url), **kwargs)
)
send = to_send
|
[
"requests.Session",
"urllib.parse.urljoin"
] |
[((438, 447), 'requests.Session', 'Session', ([], {}), '()\n', (445, 447), False, 'from requests import Response, Session\n'), ((1398, 1437), 'urllib.parse.urljoin', 'parse.urljoin', (['self.host', 'interface.url'], {}), '(self.host, interface.url)\n', (1411, 1437), False, 'from urllib import parse\n')]
|
from urllib.parse import quote_plus
from openai import api_requestor, error, util
from openai.openai_object import OpenAIObject
class APIResource(OpenAIObject):
api_prefix = ""
@classmethod
def retrieve(cls, id, api_key=None, request_id=None, **params):
instance = cls(id, api_key, **params)
instance.refresh(request_id=request_id)
return instance
def refresh(self, request_id=None):
self.refresh_from(
self.request("get", self.instance_url(), request_id=request_id)
)
return self
@classmethod
def class_url(cls):
if cls == APIResource:
raise NotImplementedError(
"APIResource is an abstract class. You should perform actions on its subclasses."
)
# Namespaces are separated in object names with periods (.) and in URLs
# with forward slashes (/), so replace the former with the latter.
base = cls.OBJECT_NAME.replace(".", "/") # type: ignore
if cls.api_prefix:
return "/%s/%ss" % (cls.api_prefix, base)
return "/%ss" % (base)
def instance_url(self):
id = self.get("id")
if not isinstance(id, str):
raise error.InvalidRequestError(
"Could not determine which URL to request: %s instance "
"has invalid ID: %r, %s. ID should be of type `str` (or"
" `unicode`)" % (type(self).__name__, id, type(id)),
"id",
)
base = self.class_url()
extn = quote_plus(id)
return "%s/%s" % (base, extn)
# The `method_` and `url_` arguments are suffixed with an underscore to
# avoid conflicting with actual request parameters in `params`.
@classmethod
def _static_request(
cls,
method_,
url_,
api_key=None,
api_base=None,
request_id=None,
api_version=None,
organization=None,
**params,
):
requestor = api_requestor.APIRequestor(
api_key,
api_version=api_version,
organization=organization,
api_base=api_base,
)
response, _, api_key = requestor.request(
method_, url_, params, request_id=request_id
)
return util.convert_to_openai_object(
response, api_key, api_version, organization
)
|
[
"urllib.parse.quote_plus",
"openai.api_requestor.APIRequestor",
"openai.util.convert_to_openai_object"
] |
[((1556, 1570), 'urllib.parse.quote_plus', 'quote_plus', (['id'], {}), '(id)\n', (1566, 1570), False, 'from urllib.parse import quote_plus\n'), ((2008, 2119), 'openai.api_requestor.APIRequestor', 'api_requestor.APIRequestor', (['api_key'], {'api_version': 'api_version', 'organization': 'organization', 'api_base': 'api_base'}), '(api_key, api_version=api_version, organization=\n organization, api_base=api_base)\n', (2034, 2119), False, 'from openai import api_requestor, error, util\n'), ((2306, 2381), 'openai.util.convert_to_openai_object', 'util.convert_to_openai_object', (['response', 'api_key', 'api_version', 'organization'], {}), '(response, api_key, api_version, organization)\n', (2335, 2381), False, 'from openai import api_requestor, error, util\n')]
|
from shutil import move
import glob, os
original_path = "training"
all_paths = sorted(glob.glob(original_path+'/*'))
if not os.path.exists("ImageSets"):
os.makedirs("ImageSets")
write_txt_flag = 1
for path_item in all_paths:
items = sorted(glob.glob(path_item + '/*'))
target_path_item = path_item.replace('training', 'testing')
if not os.path.exists(target_path_item):
os.makedirs(target_path_item)
for index, item in enumerate(items):
if index % 10 == 9:
move(item, target_path_item+'/'+item.split('/')[-1])
if write_txt_flag:
with open("ImageSets/test.txt","a") as f:
f.write(item.split('/')[-1].split('.')[-2])
f.write('\n')
with open("ImageSets/val.txt","a") as f:
f.write(item.split('/')[-1].split('.')[-2])
f.write('\n')
else:
if write_txt_flag:
with open("ImageSets/train.txt","a") as f:
f.write(item.split('/')[-1].split('.')[-2])
f.write('\n')
write_txt_flag = 0
|
[
"os.path.exists",
"os.makedirs",
"glob.glob"
] |
[((87, 118), 'glob.glob', 'glob.glob', (["(original_path + '/*')"], {}), "(original_path + '/*')\n", (96, 118), False, 'import glob, os\n'), ((125, 152), 'os.path.exists', 'os.path.exists', (['"""ImageSets"""'], {}), "('ImageSets')\n", (139, 152), False, 'import glob, os\n'), ((158, 182), 'os.makedirs', 'os.makedirs', (['"""ImageSets"""'], {}), "('ImageSets')\n", (169, 182), False, 'import glob, os\n'), ((249, 276), 'glob.glob', 'glob.glob', (["(path_item + '/*')"], {}), "(path_item + '/*')\n", (258, 276), False, 'import glob, os\n'), ((353, 385), 'os.path.exists', 'os.path.exists', (['target_path_item'], {}), '(target_path_item)\n', (367, 385), False, 'import glob, os\n'), ((395, 424), 'os.makedirs', 'os.makedirs', (['target_path_item'], {}), '(target_path_item)\n', (406, 424), False, 'import glob, os\n')]
|
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import argparse
import shutil
from pathlib import Path
def copy_images(input_dir: Path, output_dir: Path):
"""
Copy images from doxygen xml folder to sphinx folder
"""
output_dir.mkdir(parents=True, exist_ok=True)
extensions = ('*.png', '*.jpg', '*.svg', '*.gif', '*.PNG', '*.JPG', '*.SVG', '*.GIF')
for extension in extensions:
for file in input_dir.glob(extension):
shutil.copy(file, output_dir)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('input_dir', type=Path, help='Path to the folder containing images.')
parser.add_argument('output_dir', type=Path, help='Path to the output folder')
args = parser.parse_args()
input_dir = args.input_dir
output_dir = args.output_dir
copy_images(input_dir, output_dir)
if __name__ == '__main__':
main()
|
[
"argparse.ArgumentParser",
"shutil.copy"
] |
[((554, 579), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (577, 579), False, 'import argparse\n'), ((497, 526), 'shutil.copy', 'shutil.copy', (['file', 'output_dir'], {}), '(file, output_dir)\n', (508, 526), False, 'import shutil\n')]
|
import FWCore.ParameterSet.Config as cms
from DQMOffline.JetMET.jetMETDQMCleanup_cff import *
from DQMOffline.JetMET.metDiagnosticParameterSet_cfi import *
from DQMOffline.JetMET.metDiagnosticParameterSetMiniAOD_cfi import *
#jet corrector defined in jetMETDQMOfflineSource python file
pfCandidateDQMAnalyzer = cms.EDAnalyzer("DQMPFCandidateAnalyzer",
CandType=cms.untracked.string('PFCand'),
PFCandidateLabel = cms.InputTag('particleFlow', ''),
ptMinCand = cms.double(1.),
hcalMin =cms.double(1.),
CleaningParameters = cleaningParameters.clone(
bypassAllPVChecks = cms.bool(False)
),
METDiagonisticsParameters = multPhiCorr_METDiagnostics,
FilterResultsLabelMiniAOD = cms.InputTag("TriggerResults::RECO"),
FilterResultsLabelMiniAOD2 = cms.InputTag("TriggerResults::reRECO"),
LSBegin = cms.int32(0),
LSEnd = cms.int32(-1),
HBHENoiseLabelMiniAOD = cms.string("Flag_HBHENoiseFilter"),
HBHENoiseFilterResultLabel = cms.InputTag("HBHENoiseFilterResultProducer", "HBHENoiseFilterResult"),
HBHENoiseIsoFilterResultLabel = cms.InputTag("HBHENoiseFilterResultProducer", "HBHEIsoNoiseFilterResult"),
verbose = cms.int32(0),
DCSFilter = cms.PSet(
DetectorTypes = cms.untracked.string("ecal:hbhe:hf:pixel:sistrip:es:muon"),
#DebugOn = cms.untracked.bool(True),
Filter = cms.untracked.bool(True)
),
)
packedCandidateDQMAnalyzerMiniAOD = pfCandidateDQMAnalyzer.clone(
CandType=cms.untracked.string('Packed'),
PFCandidateLabel = cms.InputTag('packedPFCandidates', ''),
METDiagonisticsParameters = multPhiCorr_METDiagnosticsMiniAOD,
CleaningParameters = cleaningParameters.clone(
vertexCollection = cms.InputTag( "goodOfflinePrimaryVerticesDQMforMiniAOD" ),
),
)
|
[
"FWCore.ParameterSet.Config.string",
"FWCore.ParameterSet.Config.double",
"FWCore.ParameterSet.Config.untracked.string",
"FWCore.ParameterSet.Config.untracked.bool",
"FWCore.ParameterSet.Config.int32",
"FWCore.ParameterSet.Config.bool",
"FWCore.ParameterSet.Config.InputTag"
] |
[((370, 400), 'FWCore.ParameterSet.Config.untracked.string', 'cms.untracked.string', (['"""PFCand"""'], {}), "('PFCand')\n", (390, 400), True, 'import FWCore.ParameterSet.Config as cms\n'), ((425, 457), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""particleFlow"""', '""""""'], {}), "('particleFlow', '')\n", (437, 457), True, 'import FWCore.ParameterSet.Config as cms\n'), ((481, 496), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(1.0)'], {}), '(1.0)\n', (491, 496), True, 'import FWCore.ParameterSet.Config as cms\n'), ((515, 530), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(1.0)'], {}), '(1.0)\n', (525, 530), True, 'import FWCore.ParameterSet.Config as cms\n'), ((741, 777), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""TriggerResults::RECO"""'], {}), "('TriggerResults::RECO')\n", (753, 777), True, 'import FWCore.ParameterSet.Config as cms\n'), ((813, 851), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""TriggerResults::reRECO"""'], {}), "('TriggerResults::reRECO')\n", (825, 851), True, 'import FWCore.ParameterSet.Config as cms\n'), ((869, 881), 'FWCore.ParameterSet.Config.int32', 'cms.int32', (['(0)'], {}), '(0)\n', (878, 881), True, 'import FWCore.ParameterSet.Config as cms\n'), ((897, 910), 'FWCore.ParameterSet.Config.int32', 'cms.int32', (['(-1)'], {}), '(-1)\n', (906, 910), True, 'import FWCore.ParameterSet.Config as cms\n'), ((949, 983), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""Flag_HBHENoiseFilter"""'], {}), "('Flag_HBHENoiseFilter')\n", (959, 983), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1018, 1088), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""HBHENoiseFilterResultProducer"""', '"""HBHENoiseFilterResult"""'], {}), "('HBHENoiseFilterResultProducer', 'HBHENoiseFilterResult')\n", (1030, 1088), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1126, 1199), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""HBHENoiseFilterResultProducer"""', '"""HBHEIsoNoiseFilterResult"""'], {}), "('HBHENoiseFilterResultProducer', 'HBHEIsoNoiseFilterResult')\n", (1138, 1199), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1220, 1232), 'FWCore.ParameterSet.Config.int32', 'cms.int32', (['(0)'], {}), '(0)\n', (1229, 1232), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1525, 1555), 'FWCore.ParameterSet.Config.untracked.string', 'cms.untracked.string', (['"""Packed"""'], {}), "('Packed')\n", (1545, 1555), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1580, 1618), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""packedPFCandidates"""', '""""""'], {}), "('packedPFCandidates', '')\n", (1592, 1618), True, 'import FWCore.ParameterSet.Config as cms\n'), ((620, 635), 'FWCore.ParameterSet.Config.bool', 'cms.bool', (['(False)'], {}), '(False)\n', (628, 635), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1285, 1343), 'FWCore.ParameterSet.Config.untracked.string', 'cms.untracked.string', (['"""ecal:hbhe:hf:pixel:sistrip:es:muon"""'], {}), "('ecal:hbhe:hf:pixel:sistrip:es:muon')\n", (1305, 1343), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1407, 1431), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(True)'], {}), '(True)\n', (1425, 1431), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1768, 1823), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""goodOfflinePrimaryVerticesDQMforMiniAOD"""'], {}), "('goodOfflinePrimaryVerticesDQMforMiniAOD')\n", (1780, 1823), True, 'import FWCore.ParameterSet.Config as cms\n')]
|
from flask import Blueprint
auth = Blueprint("auth", __name__, url_prefix="/user")
from bolo.auth import endpoints
|
[
"flask.Blueprint"
] |
[((36, 83), 'flask.Blueprint', 'Blueprint', (['"""auth"""', '__name__'], {'url_prefix': '"""/user"""'}), "('auth', __name__, url_prefix='/user')\n", (45, 83), False, 'from flask import Blueprint\n')]
|
from cvdatasetutils.dnnutils import load_json
import os
import pandas as pd
ANNOTATION_FOLDER = 'annotations'
INSTANCES_FILE = 'instances_train2017.json'
def parse_labels(instances):
return { category['id']: category['name'] for category in instances['categories']}
def parse_images(instances):
def to_array(image):
return [image['file_name'], image['height'], image['width'], image['id']]
list_of_lists = map(to_array, instances['images'])
return pd.DataFrame(list(list_of_lists), columns=['file_name', 'height', 'width', 'image_id'])
class COCOSet:
def __init__(self, base_path):
self.base_path = base_path
instances = self.load_dataset()
self.labels = parse_labels(instances)
self.images = parse_images(instances)
self.annotations = self.parse_annotations(instances)
def load_dataset(self):
return load_json(os.path.join(self.base_path, ANNOTATION_FOLDER, INSTANCES_FILE))
def get_annotations(self):
return self.annotations
def get_images(self):
return self.images
def parse_annotations(self, instances):
def to_array(annotation):
return[annotation['area'], annotation['bbox'][0], annotation['bbox'][1], annotation['bbox'][2],
annotation['bbox'][3], annotation['category_id'], self.labels[annotation['category_id']],
annotation['image_id']]
list_of_lists = map(to_array, instances['annotations'])
return pd.DataFrame(list(list_of_lists), columns=['area', 'x', 'y', 'w', 'h', 'class', 'name', 'image_id'])
|
[
"os.path.join"
] |
[((902, 965), 'os.path.join', 'os.path.join', (['self.base_path', 'ANNOTATION_FOLDER', 'INSTANCES_FILE'], {}), '(self.base_path, ANNOTATION_FOLDER, INSTANCES_FILE)\n', (914, 965), False, 'import os\n')]
|
"""Send command to iTerm2 active tab."""
import os
import sys
import json
import shlex
import AppKit
import iterm2
FILE, *ARGS = sys.argv
TAB_ID_FILENAME = os.path.join(os.path.dirname(FILE), 'tabs_id.json')
def _launch_iterm(_id): # type: (dict) -> None
"""Launch Iterm2 if is not running already.
Args:
_id (dict): a dict with the key `bundle` for the application bundle
identifier, and a key `app_name` for the application name.
"""
if not AppKit.NSRunningApplication.runningApplicationsWithBundleIdentifier_(_id['bundle']):
_launch_app(_id['app_name'])
def _launch_app(app_name): # type: (str) -> None
"""Launch or focus application.
Args:
app_name (str): the application name to launch or focus
"""
AppKit.NSWorkspace.sharedWorkspace().launchApplication_(app_name)
_launch_iterm({"bundle": "com.googlecode.iterm2", "app_name": "iTerm"})
async def get_tab_id(): # type: () -> dict
"""Get the tabs id json file.
Returns:
dict: the json data or an empty dict if file is not found.
"""
try:
with open(TAB_ID_FILENAME, 'r') as f:
return json.load(f)
except FileNotFoundError:
return {}
async def write_tab_id(obj): # type(str) -> None
"""Write iTerm2 tab id into the json file."""
with open(TAB_ID_FILENAME, 'w') as f:
json.dump(obj, f, indent=4)
async def main(connection):
"""Start connection to iTerm2 application.
Connect to the iTerm2 application. If no window is present will create one
and create a tab to be reused when sending the commands from vscode.
Args:
connection (_type_): _description_
"""
app = await iterm2.async_get_app(connection)
await app.async_activate()
window = app.current_terminal_window
if not window:
window = await iterm2.Window.async_create(connection)
iterm_tabs_id = [tab.tab_id for tab in window.tabs]
terminal_cmd = ARGS.pop(0)
filename = os.path.basename(terminal_cmd)
files_tabs = await get_tab_id()
file_tab = files_tabs.get(filename)
# If tab id is not in current tabs, then a new tab needs to be created
# otherwise use the existing tab.
if file_tab not in iterm_tabs_id:
await window.async_create_tab()
await window.current_tab.async_set_title(filename)
iterm_new_tab_id = window.current_tab.tab_id
iterm_tabs_id.append(iterm_new_tab_id)
files_tabs.update({filename: iterm_new_tab_id})
await write_tab_id(files_tabs)
file_tab = iterm_new_tab_id
tab_index = iterm_tabs_id.index(file_tab)
vscode_tab = window.tabs[tab_index]
current_tab = vscode_tab.current_session
str_args = shlex.join(ARGS)
await current_tab.async_activate()
await current_tab.async_send_text(f"{terminal_cmd} {str_args}\n")
# XXX: This should focus back, but sometimes it doesn't?
_launch_app("Visual Studio Code")
iterm2.run_until_complete(main, True)
|
[
"AppKit.NSWorkspace.sharedWorkspace",
"json.dump",
"iterm2.async_get_app",
"json.load",
"iterm2.Window.async_create",
"os.path.basename",
"shlex.join",
"os.path.dirname",
"AppKit.NSRunningApplication.runningApplicationsWithBundleIdentifier_",
"iterm2.run_until_complete"
] |
[((2973, 3010), 'iterm2.run_until_complete', 'iterm2.run_until_complete', (['main', '(True)'], {}), '(main, True)\n', (2998, 3010), False, 'import iterm2\n'), ((172, 193), 'os.path.dirname', 'os.path.dirname', (['FILE'], {}), '(FILE)\n', (187, 193), False, 'import os\n'), ((2002, 2032), 'os.path.basename', 'os.path.basename', (['terminal_cmd'], {}), '(terminal_cmd)\n', (2018, 2032), False, 'import os\n'), ((2745, 2761), 'shlex.join', 'shlex.join', (['ARGS'], {}), '(ARGS)\n', (2755, 2761), False, 'import shlex\n'), ((482, 570), 'AppKit.NSRunningApplication.runningApplicationsWithBundleIdentifier_', 'AppKit.NSRunningApplication.runningApplicationsWithBundleIdentifier_', (["_id['bundle']"], {}), "(_id[\n 'bundle'])\n", (550, 570), False, 'import AppKit\n'), ((1375, 1402), 'json.dump', 'json.dump', (['obj', 'f'], {'indent': '(4)'}), '(obj, f, indent=4)\n', (1384, 1402), False, 'import json\n'), ((1711, 1743), 'iterm2.async_get_app', 'iterm2.async_get_app', (['connection'], {}), '(connection)\n', (1731, 1743), False, 'import iterm2\n'), ((779, 815), 'AppKit.NSWorkspace.sharedWorkspace', 'AppKit.NSWorkspace.sharedWorkspace', ([], {}), '()\n', (813, 815), False, 'import AppKit\n'), ((1162, 1174), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1171, 1174), False, 'import json\n'), ((1859, 1897), 'iterm2.Window.async_create', 'iterm2.Window.async_create', (['connection'], {}), '(connection)\n', (1885, 1897), False, 'import iterm2\n')]
|
from django.shortcuts import render, redirect
from .forms import EmployeeForm
from .models import Employee
# Create your views here.
def employee_list(request):
context = {'employee_list': Employee.objects.all()}
return render(request, "employee_register/employee_list.html", context)
def employee_form(request, id=0):
if request.method == "GET":
if id == 0:
form = EmployeeForm()
else:
employee = Employee.objects.get(pk=id)
form = EmployeeForm(instance=employee)
return render(request, "employee_register/employee_form.html", {'form': form})
else:
if id == 0:
form = EmployeeForm(request.POST)
else:
employee = Employee.objects.get(pk=id)
form = EmployeeForm(request.POST, instance = employee)
if form.is_valid():
form.save()
return redirect('/employee/list')
def employee_delete(request, id):
employee = Employee.objects.get(pk=id)
employee.delete()
return redirect('/employee/list')
|
[
"django.shortcuts.render",
"django.shortcuts.redirect"
] |
[((240, 304), 'django.shortcuts.render', 'render', (['request', '"""employee_register/employee_list.html"""', 'context'], {}), "(request, 'employee_register/employee_list.html', context)\n", (246, 304), False, 'from django.shortcuts import render, redirect\n'), ((985, 1011), 'django.shortcuts.redirect', 'redirect', (['"""/employee/list"""'], {}), "('/employee/list')\n", (993, 1011), False, 'from django.shortcuts import render, redirect\n'), ((534, 605), 'django.shortcuts.render', 'render', (['request', '"""employee_register/employee_form.html"""', "{'form': form}"], {}), "(request, 'employee_register/employee_form.html', {'form': form})\n", (540, 605), False, 'from django.shortcuts import render, redirect\n'), ((848, 874), 'django.shortcuts.redirect', 'redirect', (['"""/employee/list"""'], {}), "('/employee/list')\n", (856, 874), False, 'from django.shortcuts import render, redirect\n')]
|
import json
from django.core.urlresolvers import reverse
import pytest
pytestmark = pytest.mark.django_db
from seahub.options.models import UserOptions
from seahub.test_utils import BaseTestCase
class LibrariesTest(BaseTestCase):
def setUp(self):
self.url = reverse('libraries')
from constance import config
self.config = config
def test_user_guide(self):
self.login_as(self.user)
username = self.user.username
assert UserOptions.objects.get_default_repo(username) is None
assert UserOptions.objects.is_user_guide_enabled(username) is True
resp = self.client.get(self.url)
self.assertEqual(200, resp.status_code)
self.assertTemplateUsed(resp, 'libraries.html')
assert resp.context['guide_enabled'] is True
resp = self.client.get(self.url)
assert resp.context['guide_enabled'] is False
assert UserOptions.objects.get_default_repo(username) is not None
assert UserOptions.objects.is_user_guide_enabled(username) is False
def test_pub_repo_creation_config(self):
self.clear_cache()
# user
self.login_as(self.user)
self.config.ENABLE_USER_CREATE_ORG_REPO = 1
assert bool(self.config.ENABLE_USER_CREATE_ORG_REPO) is True
resp = self.client.get(self.url)
self.assertEqual(200, resp.status_code)
assert resp.context['can_add_public_repo'] is True
self.config.ENABLE_USER_CREATE_ORG_REPO = 0
assert bool(self.config.ENABLE_USER_CREATE_ORG_REPO) is False
resp = self.client.get(self.url)
self.assertEqual(200, resp.status_code)
assert resp.context['can_add_public_repo'] is False
# logout
self.logout()
# admin
self.login_as(self.admin)
self.config.ENABLE_USER_CREATE_ORG_REPO = 1
assert bool(self.config.ENABLE_USER_CREATE_ORG_REPO) is True
resp = self.client.get(self.url)
self.assertEqual(200, resp.status_code)
assert resp.context['can_add_public_repo'] is True
self.config.ENABLE_USER_CREATE_ORG_REPO = 0
assert bool(self.config.ENABLE_USER_CREATE_ORG_REPO) is False
resp = self.client.get(self.url)
self.assertEqual(200, resp.status_code)
assert resp.context['can_add_public_repo'] is True
def test_get_user_joined_groups(self):
self.login_as(self.user)
resp = self.client.get(self.url)
self.assertEqual(200, resp.status_code)
self.assertTemplateUsed(resp, 'libraries.html')
assert len(resp.context['joined_groups']) > 0
|
[
"seahub.options.models.UserOptions.objects.is_user_guide_enabled",
"seahub.options.models.UserOptions.objects.get_default_repo",
"django.core.urlresolvers.reverse"
] |
[((273, 293), 'django.core.urlresolvers.reverse', 'reverse', (['"""libraries"""'], {}), "('libraries')\n", (280, 293), False, 'from django.core.urlresolvers import reverse\n'), ((478, 524), 'seahub.options.models.UserOptions.objects.get_default_repo', 'UserOptions.objects.get_default_repo', (['username'], {}), '(username)\n', (514, 524), False, 'from seahub.options.models import UserOptions\n'), ((548, 599), 'seahub.options.models.UserOptions.objects.is_user_guide_enabled', 'UserOptions.objects.is_user_guide_enabled', (['username'], {}), '(username)\n', (589, 599), False, 'from seahub.options.models import UserOptions\n'), ((919, 965), 'seahub.options.models.UserOptions.objects.get_default_repo', 'UserOptions.objects.get_default_repo', (['username'], {}), '(username)\n', (955, 965), False, 'from seahub.options.models import UserOptions\n'), ((993, 1044), 'seahub.options.models.UserOptions.objects.is_user_guide_enabled', 'UserOptions.objects.is_user_guide_enabled', (['username'], {}), '(username)\n', (1034, 1044), False, 'from seahub.options.models import UserOptions\n')]
|
from _pytest.fixtures import fixture
import pytest
from ..nway_callback import NWayCallback
from pytest_mock import mocker
from mock import patch, call
from mock import MagicMock
from icecream import ic
class mock_model:
def __init__(self):
pass
def predict_on_batch(*args, **kwargs):
return True
class mock_ds:
def __init__(self, size=100):
self.size=size
def __iter__(self, *args, **kwargs):
for _ in range(self, self.size):
yield True
@pytest.mark.parametrize('freq', range(1, 4))
def test_init(freq):
encoder = MagicMock()
head = MagicMock()
ds = MagicMock()
callback = NWayCallback(encoder=encoder, head=head, nway_ds=ds, freq=freq)
assert callback.encoder == encoder
assert callback.head == head
assert callback.nway_ds == ds
assert callback.freq == freq
#<EMAIL>.parametrize('count', range(100, 1001, 200))
#<EMAIL>.parametrize('freq', range(1, 101, 20))
@pytest.mark.parametrize('count', [128])
@pytest.mark.parametrize('freq', [7])
def test_on_epoch_end(count, freq):
encoder = MagicMock()
encoder.predict_on_batch.return_value = [i for i in range(4)]
head = MagicMock(return_value=8)
#head.__call__.return_value = 8
ds = MagicMock()
items = list(zip(range(count, 0, -1), range(count, 0, -1)))
labels = list(range(0, count, 1))
ds.__iter__.return_value = list(zip(items, labels))
callback = NWayCallback(encoder=encoder, head=head, nway_ds=ds, freq=freq)
logs = {}
for interval in range(1, count+1):
encoder.reset_mock()
head.reset_mock()
ds.reset_mock()
callback.on_epoch_end(interval, logs=logs)
if interval % freq == 0:
ds.__iter__.assert_called_once()
encoder.assert_has_calls([call.predict_on_batch(item) for item in items])
for item in items:
#ic()
with pytest.raises(AssertionError):
head.assert_called_with([item, item])
assert 'nway_acc' in logs
acc = logs['nway_acc']
assert 0.0 <= acc <= 1.0
assert 'nway_avg_dist' in logs
assert 'nway_avg_var' in logs
ds.__iter__.assert_called_once()
else:
encoder.predict_on_batch.assert_not_called()
head.assert_not_called()
ds.__iter__.assert_not_called()
#head.assert_has_calls(zip(items)
# TODO test prefix name stuff
|
[
"mock.call.predict_on_batch",
"pytest.mark.parametrize",
"mock.MagicMock",
"pytest.raises"
] |
[((947, 986), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""count"""', '[128]'], {}), "('count', [128])\n", (970, 986), False, 'import pytest\n'), ((988, 1024), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""freq"""', '[7]'], {}), "('freq', [7])\n", (1011, 1024), False, 'import pytest\n'), ((576, 587), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (585, 587), False, 'from mock import MagicMock\n'), ((598, 609), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (607, 609), False, 'from mock import MagicMock\n'), ((618, 629), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (627, 629), False, 'from mock import MagicMock\n'), ((1074, 1085), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (1083, 1085), False, 'from mock import MagicMock\n'), ((1161, 1186), 'mock.MagicMock', 'MagicMock', ([], {'return_value': '(8)'}), '(return_value=8)\n', (1170, 1186), False, 'from mock import MagicMock\n'), ((1230, 1241), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (1239, 1241), False, 'from mock import MagicMock\n'), ((1757, 1784), 'mock.call.predict_on_batch', 'call.predict_on_batch', (['item'], {}), '(item)\n', (1778, 1784), False, 'from mock import patch, call\n'), ((1868, 1897), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (1881, 1897), False, 'import pytest\n')]
|
from whatthefood.graph.node import Node
from collections.abc import Sequence
import numpy as np
class Grad(Node):
def __init__(self, y, xs):
self.unwrap = False
if not isinstance(xs, Sequence):
xs = [xs]
self.unwrap = True
self.outputs_graph = self._build_outputs_graph(y, xs)
self.outputs_graph_flattened = self._flatten_outputs_graph(self.outputs_graph)
super(Grad, self).__init__(None, False, *self._get_inputs(self.outputs_graph))
self.y = y
self.xs = xs
def do(self, *inputs):
values = {n: v for n, v in zip(self.inputs, inputs)}
inputs_grads = {}
ret = [None for _ in self.xs]
for op in self.outputs_graph_flattened:
if op is self.y:
grad = np.ones_like(values[self.y])
else:
grad = np.sum([
inputs_grads[o][o.inputs.index(op)]
for o in self.outputs_graph[op]
], axis=0)
input_values = [values[i] for i in op.inputs]
inputs_grads[op] = op.backpropagate(grad, *input_values)
if op in self.xs:
ret[self.xs.index(op)] = grad
return ret[0] if self.unwrap else ret
def _build_outputs_graph(self, y, xs):
outputs = {}
keep = y in xs
for i in y.inputs:
if i in xs:
keep = True
from_i = self._build_outputs_graph(i, xs)
if from_i is not None:
keep = True
for k, v in from_i.items():
if k in outputs:
outputs[k].update(v)
else:
outputs[k] = v
if i in outputs:
outputs[i].add(y)
else:
outputs[i] = {y}
if y not in outputs:
outputs[y] = set()
return outputs if keep else None
def _get_inputs(self, outputs_graph):
inputs = set(outputs_graph)
inputs.update(i for e in outputs_graph for i in e.inputs)
return inputs
def _flatten_outputs_graph(self, outputs):
flattened = []
# copy outputs
outputs = {k: set(v) for k, v in outputs.items()}
while outputs:
lasts = [k for k, v in outputs.items() if not v]
for l in lasts:
outputs.pop(l)
for l in lasts:
for v in outputs.values():
if l in v:
v.remove(l)
flattened.extend(lasts)
return flattened
def _build_tf(self, tf, *inputs):
y_tensors = [t for i, t in zip(self.inputs, inputs) if i is self.y]
assert len(y_tensors) == 1
x_tensors = [t for x in self.xs for i, t in zip(self.inputs, inputs) if i is x]
assert len(x_tensors) == len(self.xs)
return tf.gradients(y_tensors[0], x_tensors)
|
[
"numpy.ones_like"
] |
[((805, 833), 'numpy.ones_like', 'np.ones_like', (['values[self.y]'], {}), '(values[self.y])\n', (817, 833), True, 'import numpy as np\n')]
|
from collections import OrderedDict
from .objects import (
PID_DEFAULT
)
from .widgets import NexPage
from .exceptions import (
NexComponentException,
NexComponentIdException,
NexComponentNameException
)
from .factory import WidgetFactory
class NexComponents:
def __init__(self, nexserial):
self.nexserial = nexserial
self.D_PAGES_BY_NAME = OrderedDict()
self.D_PAGES_BY_PID = OrderedDict()
def hook_page(self, name, pid=PID_DEFAULT):
if name in self.D_PAGES_BY_NAME.keys():
raise NexComponentNameException("name (%s) must be unique" % name)
if pid in self.D_PAGES_BY_PID.keys():
raise NexComponentIdException("pid (%s) must be unique" % pid)
nexpage = NexPage(self.nexserial, name, pid)
self.D_PAGES_BY_NAME[name] = nexpage
self.D_PAGES_BY_PID[pid] = nexpage
return nexpage
def page(self, name=None, pid=None):
if name is not None and pid is None:
return self.D_PAGES_BY_NAME[name]
elif name is None and pid is not None:
return self.D_PAGES_BY_PID[pid]
elif name is not None and pid is not None:
raise NexComponentException("name and pid shouldn't be defined both")
else:
raise NexComponentException("name or pid should be defined")
@property
def pages(self):
for name, page in self.D_PAGES_BY_NAME.items():
yield page
def read_list(self, data):
for d_page in data:
pagename = d_page['name']
if 'pid' in d_page:
pid = d_page['pid']
else:
pid = PID_DEFAULT
page = self.hook_page(pagename, pid=pid)
for d_component in d_page['components']:
typ = d_component['type']
name = d_component['name']
cid = d_component['cid']
widget_type = WidgetFactory.type(typ)
page.hook_widget(widget_type, name, cid=cid)
def to_list(self):
data = []
for pagename, page in self.D_PAGES_BY_NAME.items():
data.append(page.to_dict())
return data
def read_json(self, path_or_buf):
raise NotImplementedError()
|
[
"collections.OrderedDict"
] |
[((379, 392), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (390, 392), False, 'from collections import OrderedDict\n'), ((423, 436), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (434, 436), False, 'from collections import OrderedDict\n')]
|
from django.db import models
from django.utils.translation import gettext_lazy as _
from wagtail.contrib.settings.models import BaseSetting, register_setting
from wagtail.admin.edit_handlers import FieldPanel
from wagtail.core.fields import RichTextField
@register_setting
class SiteSetting(BaseSetting):
gtm_id = models.CharField(max_length=50, blank=True)
google_site_verification = models.CharField(max_length=255, blank=True)
cookie_content = RichTextField(
blank=True, null=True, verbose_name=_("Cookie bar content"), features=[]
)
panels = [FieldPanel("gtm_id"), FieldPanel("cookie_content")]
def __str__(self):
return str(self.site)
class Meta:
verbose_name = _("Site setting")
verbose_name_plural = _("Site settings")
|
[
"django.db.models.CharField",
"wagtail.admin.edit_handlers.FieldPanel",
"django.utils.translation.gettext_lazy"
] |
[((320, 363), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'blank': '(True)'}), '(max_length=50, blank=True)\n', (336, 363), False, 'from django.db import models\n'), ((395, 439), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(True)'}), '(max_length=255, blank=True)\n', (411, 439), False, 'from django.db import models\n'), ((579, 599), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""gtm_id"""'], {}), "('gtm_id')\n", (589, 599), False, 'from wagtail.admin.edit_handlers import FieldPanel\n'), ((601, 629), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""cookie_content"""'], {}), "('cookie_content')\n", (611, 629), False, 'from wagtail.admin.edit_handlers import FieldPanel\n'), ((725, 742), 'django.utils.translation.gettext_lazy', '_', (['"""Site setting"""'], {}), "('Site setting')\n", (726, 742), True, 'from django.utils.translation import gettext_lazy as _\n'), ((773, 791), 'django.utils.translation.gettext_lazy', '_', (['"""Site settings"""'], {}), "('Site settings')\n", (774, 791), True, 'from django.utils.translation import gettext_lazy as _\n'), ((521, 544), 'django.utils.translation.gettext_lazy', '_', (['"""Cookie bar content"""'], {}), "('Cookie bar content')\n", (522, 544), True, 'from django.utils.translation import gettext_lazy as _\n')]
|
from iota import Iota, Seed
from pprint import pprint
import time
# Put your seed from Tutorial 4.a here
my_seed = Seed(b'YOURSEEDFROMTHEPREVIOUSTUTORIAL99999999999999999999999999999999999999999999999999')
# Declare an API object
api = Iota(
adapter='https://nodes.devnet.iota.org:443',
seed=my_seed,
testnet=True
)
# Script actually runs until it finds balance
success = False
while not success:
print('Checking account information on the Tangle...')
# Gather addresses, balance and bundles
response = api.get_account_data()
# response['balance'] is an integer!
if response['balance']:
print('Found the following information based on your seed:')
pprint(response)
success = True
else:
print('Zero balance found, retrying in 30 seconds...')
time.sleep(30)
|
[
"iota.Seed",
"pprint.pprint",
"iota.Iota",
"time.sleep"
] |
[((116, 216), 'iota.Seed', 'Seed', (["b'YOURSEEDFROMTHEPREVIOUSTUTORIAL99999999999999999999999999999999999999999999999999'"], {}), "(\n b'YOURSEEDFROMTHEPREVIOUSTUTORIAL99999999999999999999999999999999999999999999999999'\n )\n", (120, 216), False, 'from iota import Iota, Seed\n'), ((238, 315), 'iota.Iota', 'Iota', ([], {'adapter': '"""https://nodes.devnet.iota.org:443"""', 'seed': 'my_seed', 'testnet': '(True)'}), "(adapter='https://nodes.devnet.iota.org:443', seed=my_seed, testnet=True)\n", (242, 315), False, 'from iota import Iota, Seed\n'), ((701, 717), 'pprint.pprint', 'pprint', (['response'], {}), '(response)\n', (707, 717), False, 'from pprint import pprint\n'), ((822, 836), 'time.sleep', 'time.sleep', (['(30)'], {}), '(30)\n', (832, 836), False, 'import time\n')]
|
# Generated by Django 3.0.5 on 2020-05-06 13:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Entradas', '0003_auto_20200506_1352'),
]
operations = [
migrations.AddField(
model_name='entradas_blog',
name='contra',
field=models.CharField(default='<PASSWORD>aseña', max_length=10),
),
]
|
[
"django.db.models.CharField"
] |
[((342, 400), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""<PASSWORD>aseña"""', 'max_length': '(10)'}), "(default='<PASSWORD>aseña', max_length=10)\n", (358, 400), False, 'from django.db import migrations, models\n')]
|
from datetime import datetime, timedelta
import pytest
from utils import (
get_permissions_for_group,
PermissionsMissing,
get_os_config,
get_params_for_region,
get_stackset_state_data,
update_stackset,
get_owned_stacksets,
CrossAccountStackSetException,
get_instance_details,
)
def test_get_permissions_for_group_no_data(permission_table, permission_table_name):
with pytest.raises(PermissionsMissing):
get_permissions_for_group(table_name=permission_table_name, group_name="nonexistent")
def test_get_permissions_for_group_success(permission_table, permission_table_name):
permissions = get_permissions_for_group(table_name=permission_table_name, group_name="private")
assert "instance_types" in permissions
assert "operating_systems" in permissions
assert "max_days_to_expiry" in permissions
assert "max_instance_count" in permissions
assert "max_extension_count" in permissions
def test_get_os_config_no_group_data(permission_table, permission_table_name):
with pytest.raises(PermissionsMissing):
get_os_config(table_name=permission_table_name, group_name="nonexistent", os_name="nonexistent")
def test_get_os_config_no_os_data(permission_table, permission_table_name):
with pytest.raises(PermissionsMissing):
get_os_config(table_name=permission_table_name, group_name="private", os_name="nonexistent")
def test_get_os_config_success(permission_table, permission_table_name):
config = get_os_config(table_name=permission_table_name, group_name="private", os_name="AWS Linux 2")
assert "name" in config
assert "instance-profile-name" in config
assert "connection-protocol" in config
assert "template-filename" in config
assert "user-data-file" in config
assert "region-map" in config
def test_get_params_for_region_no_data(regional_table, regional_table_name):
with pytest.raises(PermissionsMissing):
get_params_for_region(table_name=regional_table_name, region="nonexistent")
def test_get_params_for_region_success(regional_table, regional_table_name):
region = get_params_for_region(table_name=regional_table_name, region="eu-west-1")
assert "vpc_id" in region
assert "ssh_key_name" in region
assert "subnet_id" in region
def test_get_owned_stacksets_supseruser_success(state_table, state_table_name):
stacksets = get_owned_stacksets(table_name=state_table_name, email="<EMAIL>")
# Should fetch only stacksets belonging to user
assert len(stacksets) == 2
assert "stackset_id" in stacksets[0]
assert "expiry" in stacksets[0]
assert "extension_count" in stacksets[0]
assert "username" in stacksets[0]
assert "email" in stacksets[0]
def test_get_owned_stacksets_non_supseruser_success(state_table, state_table_name):
stacksets = get_owned_stacksets(table_name=state_table_name, email="<EMAIL>", is_superuser=True)
# Should fetch all stacksets
assert len(stacksets) == 3
def test_get_instance_details_non_supseruser_across_accounts_error(account_id, cloudformation):
response = cloudformation.create_stack_set(
StackSetName="fake_name",
TemplateBody="fake_body",
PermissionModel="SELF_MANAGED",
Parameters=[
{
"ParameterKey": "AMI",
"ParameterValue": "ami",
},
{
"ParameterKey": "SecurityGroupId",
"ParameterValue": "security-group",
},
],
)
stackset_id = response["StackSetId"]
cloudformation.create_stack_instances(
StackSetName=stackset_id,
Accounts=["fake_account"],
Regions=["us-east-1"],
)
params = {
"stackset_id": stackset_id,
"expiry": datetime.now() + timedelta(days=1),
"username": "alice",
"email": "<EMAIL>",
"extension_count": 0,
}
with pytest.raises(CrossAccountStackSetException):
get_instance_details(stacksets=[params])
@pytest.mark.xfail(reason="Cloudformation StackSet instance mocking in moto lacks depth to simulate this test")
def test_get_instance_details_non_supseruser_success():
assert False
@pytest.mark.xfail(reason="Cloudformation StackSet instance mocking in moto lacks depth to simulate this test")
def test_get_instance_details_supseruser_success():
assert False
def test_get_stackset_state_data_no_data(state_table, state_table_name):
results = get_stackset_state_data(stackset_id="nonexistent", table_name=state_table_name)
assert results == {}
def test_get_stackset_state_data_success(state_table, state_table_name):
results = get_stackset_state_data(stackset_id="0001", table_name=state_table_name)
assert "stackset_id" in results
assert "username" in results
assert "email" in results
assert "extension_count" in results
assert "expiry" in results
def test_update_stackset_success(cloudformation):
response = cloudformation.create_stack_set(
StackSetName="fake_name",
TemplateBody="fake_body",
PermissionModel="SELF_MANAGED",
Parameters=[
{
"ParameterKey": "AMI",
"ParameterValue": "ami",
},
{
"ParameterKey": "SecurityGroupId",
"ParameterValue": "security-group",
},
],
)
stackset_id = response["StackSetId"]
update_stackset(stackset_id=stackset_id, AMI="new_ami")
stackset = cloudformation.describe_stack_set(StackSetName=stackset_id)
parameters = stackset["StackSet"]["Parameters"]
ami = [item["ParameterValue"] for item in parameters if item["ParameterKey"] == "AMI"][0]
assert ami == "new_ami"
|
[
"utils.get_os_config",
"utils.get_instance_details",
"utils.get_stackset_state_data",
"utils.get_params_for_region",
"utils.get_permissions_for_group",
"utils.update_stackset",
"pytest.raises",
"datetime.timedelta",
"utils.get_owned_stacksets",
"datetime.datetime.now",
"pytest.mark.xfail"
] |
[((4022, 4142), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Cloudformation StackSet instance mocking in moto lacks depth to simulate this test"""'}), "(reason=\n 'Cloudformation StackSet instance mocking in moto lacks depth to simulate this test'\n )\n", (4039, 4142), False, 'import pytest\n'), ((4209, 4329), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Cloudformation StackSet instance mocking in moto lacks depth to simulate this test"""'}), "(reason=\n 'Cloudformation StackSet instance mocking in moto lacks depth to simulate this test'\n )\n", (4226, 4329), False, 'import pytest\n'), ((645, 731), 'utils.get_permissions_for_group', 'get_permissions_for_group', ([], {'table_name': 'permission_table_name', 'group_name': '"""private"""'}), "(table_name=permission_table_name, group_name=\n 'private')\n", (670, 731), False, 'from utils import get_permissions_for_group, PermissionsMissing, get_os_config, get_params_for_region, get_stackset_state_data, update_stackset, get_owned_stacksets, CrossAccountStackSetException, get_instance_details\n'), ((1500, 1596), 'utils.get_os_config', 'get_os_config', ([], {'table_name': 'permission_table_name', 'group_name': '"""private"""', 'os_name': '"""AWS Linux 2"""'}), "(table_name=permission_table_name, group_name='private',\n os_name='AWS Linux 2')\n", (1513, 1596), False, 'from utils import get_permissions_for_group, PermissionsMissing, get_os_config, get_params_for_region, get_stackset_state_data, update_stackset, get_owned_stacksets, CrossAccountStackSetException, get_instance_details\n'), ((2122, 2195), 'utils.get_params_for_region', 'get_params_for_region', ([], {'table_name': 'regional_table_name', 'region': '"""eu-west-1"""'}), "(table_name=regional_table_name, region='eu-west-1')\n", (2143, 2195), False, 'from utils import get_permissions_for_group, PermissionsMissing, get_os_config, get_params_for_region, get_stackset_state_data, update_stackset, get_owned_stacksets, CrossAccountStackSetException, get_instance_details\n'), ((2394, 2459), 'utils.get_owned_stacksets', 'get_owned_stacksets', ([], {'table_name': 'state_table_name', 'email': '"""<EMAIL>"""'}), "(table_name=state_table_name, email='<EMAIL>')\n", (2413, 2459), False, 'from utils import get_permissions_for_group, PermissionsMissing, get_os_config, get_params_for_region, get_stackset_state_data, update_stackset, get_owned_stacksets, CrossAccountStackSetException, get_instance_details\n'), ((2841, 2929), 'utils.get_owned_stacksets', 'get_owned_stacksets', ([], {'table_name': 'state_table_name', 'email': '"""<EMAIL>"""', 'is_superuser': '(True)'}), "(table_name=state_table_name, email='<EMAIL>',\n is_superuser=True)\n", (2860, 2929), False, 'from utils import get_permissions_for_group, PermissionsMissing, get_os_config, get_params_for_region, get_stackset_state_data, update_stackset, get_owned_stacksets, CrossAccountStackSetException, get_instance_details\n'), ((4478, 4557), 'utils.get_stackset_state_data', 'get_stackset_state_data', ([], {'stackset_id': '"""nonexistent"""', 'table_name': 'state_table_name'}), "(stackset_id='nonexistent', table_name=state_table_name)\n", (4501, 4557), False, 'from utils import get_permissions_for_group, PermissionsMissing, get_os_config, get_params_for_region, get_stackset_state_data, update_stackset, get_owned_stacksets, CrossAccountStackSetException, get_instance_details\n'), ((4673, 4745), 'utils.get_stackset_state_data', 'get_stackset_state_data', ([], {'stackset_id': '"""0001"""', 'table_name': 'state_table_name'}), "(stackset_id='0001', table_name=state_table_name)\n", (4696, 4745), False, 'from utils import get_permissions_for_group, PermissionsMissing, get_os_config, get_params_for_region, get_stackset_state_data, update_stackset, get_owned_stacksets, CrossAccountStackSetException, get_instance_details\n'), ((5450, 5505), 'utils.update_stackset', 'update_stackset', ([], {'stackset_id': 'stackset_id', 'AMI': '"""new_ami"""'}), "(stackset_id=stackset_id, AMI='new_ami')\n", (5465, 5505), False, 'from utils import get_permissions_for_group, PermissionsMissing, get_os_config, get_params_for_region, get_stackset_state_data, update_stackset, get_owned_stacksets, CrossAccountStackSetException, get_instance_details\n'), ((411, 444), 'pytest.raises', 'pytest.raises', (['PermissionsMissing'], {}), '(PermissionsMissing)\n', (424, 444), False, 'import pytest\n'), ((454, 544), 'utils.get_permissions_for_group', 'get_permissions_for_group', ([], {'table_name': 'permission_table_name', 'group_name': '"""nonexistent"""'}), "(table_name=permission_table_name, group_name=\n 'nonexistent')\n", (479, 544), False, 'from utils import get_permissions_for_group, PermissionsMissing, get_os_config, get_params_for_region, get_stackset_state_data, update_stackset, get_owned_stacksets, CrossAccountStackSetException, get_instance_details\n'), ((1049, 1082), 'pytest.raises', 'pytest.raises', (['PermissionsMissing'], {}), '(PermissionsMissing)\n', (1062, 1082), False, 'import pytest\n'), ((1092, 1192), 'utils.get_os_config', 'get_os_config', ([], {'table_name': 'permission_table_name', 'group_name': '"""nonexistent"""', 'os_name': '"""nonexistent"""'}), "(table_name=permission_table_name, group_name='nonexistent',\n os_name='nonexistent')\n", (1105, 1192), False, 'from utils import get_permissions_for_group, PermissionsMissing, get_os_config, get_params_for_region, get_stackset_state_data, update_stackset, get_owned_stacksets, CrossAccountStackSetException, get_instance_details\n'), ((1276, 1309), 'pytest.raises', 'pytest.raises', (['PermissionsMissing'], {}), '(PermissionsMissing)\n', (1289, 1309), False, 'import pytest\n'), ((1319, 1415), 'utils.get_os_config', 'get_os_config', ([], {'table_name': 'permission_table_name', 'group_name': '"""private"""', 'os_name': '"""nonexistent"""'}), "(table_name=permission_table_name, group_name='private',\n os_name='nonexistent')\n", (1332, 1415), False, 'from utils import get_permissions_for_group, PermissionsMissing, get_os_config, get_params_for_region, get_stackset_state_data, update_stackset, get_owned_stacksets, CrossAccountStackSetException, get_instance_details\n'), ((1911, 1944), 'pytest.raises', 'pytest.raises', (['PermissionsMissing'], {}), '(PermissionsMissing)\n', (1924, 1944), False, 'import pytest\n'), ((1954, 2029), 'utils.get_params_for_region', 'get_params_for_region', ([], {'table_name': 'regional_table_name', 'region': '"""nonexistent"""'}), "(table_name=regional_table_name, region='nonexistent')\n", (1975, 2029), False, 'from utils import get_permissions_for_group, PermissionsMissing, get_os_config, get_params_for_region, get_stackset_state_data, update_stackset, get_owned_stacksets, CrossAccountStackSetException, get_instance_details\n'), ((3924, 3968), 'pytest.raises', 'pytest.raises', (['CrossAccountStackSetException'], {}), '(CrossAccountStackSetException)\n', (3937, 3968), False, 'import pytest\n'), ((3978, 4018), 'utils.get_instance_details', 'get_instance_details', ([], {'stacksets': '[params]'}), '(stacksets=[params])\n', (3998, 4018), False, 'from utils import get_permissions_for_group, PermissionsMissing, get_os_config, get_params_for_region, get_stackset_state_data, update_stackset, get_owned_stacksets, CrossAccountStackSetException, get_instance_details\n'), ((3785, 3799), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3797, 3799), False, 'from datetime import datetime, timedelta\n'), ((3802, 3819), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (3811, 3819), False, 'from datetime import datetime, timedelta\n')]
|
# -*- coding: utf-8 -*-
# Copyright 2022, SERTIT-ICube - France, https://sertit.unistra.fr/
# This file is part of eoreader project
# https://github.com/sertit/eoreader
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
PlanetScope products.
See `here <https://earth.esa.int/eogateway/documents/20142/37627/Planet-combined-imagery-product-specs-2020.pdf>`_
and `here <https://developers.planet.com/docs/data/planetscope/>`_
for more information.
"""
import logging
from datetime import datetime
from enum import unique
from pathlib import Path
from typing import Union
import geopandas as gpd
import numpy as np
import xarray
from cloudpathlib import CloudPath
from lxml import etree
from rasterio.enums import Resampling
from sertit import files, rasters
from sertit.misc import ListEnum
from sertit.rasters import XDS_TYPE
from eoreader import cache, cached_property, utils
from eoreader.bands import ALL_CLOUDS, CIRRUS, CLOUDS, RAW_CLOUDS, SHADOWS, BandNames
from eoreader.bands import OpticalBandNames as obn
from eoreader.bands import to_str
from eoreader.exceptions import InvalidProductError, InvalidTypeError
from eoreader.products import OpticalProduct
from eoreader.utils import DATETIME_FMT, EOREADER_NAME
LOGGER = logging.getLogger(EOREADER_NAME)
@unique
class PlaInstrument(ListEnum):
"""PlanetScope instrument
See `here <https://developers.planet.com/docs/apis/data/sensors/>`__
for more information.
"""
PS2 = "Dove Classic (PS2)"
"""
Dove Classic (PS2) Instrument: Four-band frame Image with a split-frame VIS+NIR filter
"""
PS2_SD = "Dove-R (PS2.SD)"
"""
Dove-R (PS2.SD) Instrument:
Four-band frame imager with butcher-block filter providing blue, green, red,and NIR stripes
"""
PSB_SD = "SuperDove (PSB.SD)"
"""
SuperDove (PSB.SD) Instrument:
Eight-band frame imager with butcher-block filter providing:
- coastal blue,
- blue,
- green I,
- green II,
- yellow,
- red,
- red-edge,
- NIR stripes
"""
@unique
class PlaProductType(ListEnum):
"""PlanetScope product types (processing levels)"""
L1B = "Basic Scene Product"
"""
**PlanetScope Basic Scene Product (Level 1B)**
Scaled Top of Atmosphere Radiance(at sensor) and sensor corrected product.
This product has scene based framing and is not projected to a cartographic projection.
Radiometric and sensor corrections are applied to the data.
"""
L3B = "Ortho Scene Product"
"""
**PlanetScope Ortho Scene Product (Level 3B)**
Orthorectified, scaled Top of Atmosphere Radiance (at sensor) or Surface Reflectance image product
suitable for analytic and visual applications.
This product has scene based framing and projected to a cartographic projection.
**PSScene3Band**
PlanetScope 3-band multispectral basic and orthorectified scenes.
This data set includes imagery from PlanetScope-0 and PlanetScope-1 sensors
as well as full-frame and split-frame PlanetScope-2 sensors.
Newer PSScene3Band items have a corresponding PSScene4Band item.
Resampled to 3.0m.
**PSScene4Band**
PlanetScope 4-band multispectral basic and orthorectified scenes.
This data set includes imagery from all PlanetScope sensors.
All PSScene4Band items have a corresponding PSScene3Band item.
Resampled to 3.0m.
"""
"""
**PSScene (Not found anywhere else)**
PlanetScope 8-band multispectral basic and orthorectified scenes.
This data set includes imagery from all PlanetScope sensors.
Naming: <acq date>_<acq time>_<acq time seconds ms>_<satellite_id>_<productLevel>_<bandProduct>.<ext>
Asset Types:
ortho_analytic_4b Radiometrically-calibrated analytic image stored as 16-bit scaled radiance.
ortho_analytic_8b Radiometrically-calibrated analytic image stored as 16-bit scaled radiance.
ortho_analytic_8b_sr PlanetScope atmospherically corrected surface reflectance product.
ortho_analytic_8b_xml Radiometrically-calibrated analytic image metadata.
ortho_analytic_4b_sr PlanetScope atmospherically corrected surface reflectance product.
ortho_analytic_4b_xml Radiometrically-calibrated analytic image metadata.
basic_analytic_4b Unorthorectified radiometrically-calibrated analytic image stored as 16-bit scaled radiance.
basic_analytic_8b Unorthorectified radiometrically-calibrated analytic image stored as 16-bit scaled radiance.
basic_analytic_8b_xml Unorthorectified radiometrically-calibrated analytic image metadata
basic_analytic_4b_rpc RPC for unorthorectified analytic image stored as 12-bit digital numbers.
basic_analytic_4b_xml Unorthorectified radiometrically-calibrated analytic image metadata.
basic_udm2 Unorthorectified usable data mask (Cloud 2.0) Read more about this new asset here.
ortho_udm2 Usable data mask (Cloud 2.0)
ortho_visual Visual image with color-correction
"""
L3A = "Ortho Tile Product"
"""
**PlanetScope Ortho Tile Product (Level 3A)**
Radiometric and sensor corrections applied to the data.
Imagery is orthorectified and projected to a UTM projection.
**PSOrthoTile**
PlanetScope Ortho Tiles as 25 km x 25 km UTM tiles. This data set includes imagery from all PlanetScope sensors.
Resampled to 3.125m.
Naming: <strip_id>_<tile_id>_<acquisition date>_<satellite_id>_<bandProduct>.<extension>
Product band order:
- Band 1 = Blue
- Band 2 = Green
- Band 3 = Red
- Band 4 = Near-infrared (analytic products only)
Analytic 5B Product band order:
- Band 1 = Blue
- Band 2 = Green
- Band 3 = Red
- Band 4 = Red-Edge
- Band 5 = Near-infrared
"""
class PlaProduct(OpticalProduct):
"""
Class of PlanetScope products.
See `here <https://earth.esa.int/eogateway/documents/20142/37627/Planet-combined-imagery-product-specs-2020.pdf>`__
for more information.
The scaling factor to retrieve the calibrated radiance is 0.01.
"""
def _pre_init(self) -> None:
"""
Function used to pre_init the products
(setting needs_extraction and so on)
"""
self.needs_extraction = False
# Post init done by the super class
super()._pre_init()
def _post_init(self) -> None:
"""
Function used to post_init the products
(setting sensor type, band names and so on)
"""
# Ortho Tiles
if self.product_type == PlaProductType.L3A:
self.tile_name = self.split_name[1]
# Post init done by the super class
super()._post_init()
def _set_resolution(self) -> float:
"""
Set product default resolution (in meters)
"""
# Ortho Tiles
if self.product_type == PlaProductType.L3A:
return 3.125
# Ortho Scene
else:
return 3.0
def _set_product_type(self) -> None:
"""Set products type"""
# Get MTD XML file
root, nsmap = self.read_mtd()
# Manage product type
prod_type = root.findtext(f".//{nsmap['eop']}productType")
if not prod_type:
raise InvalidProductError(
"Cannot find the product type in the metadata file"
)
# Set correct product type
self.product_type = getattr(PlaProductType, prod_type)
if self.product_type == PlaProductType.L1B:
raise NotImplementedError(
f"Basic Scene Product are not managed for Planet products {self.path}"
)
elif self.product_type == PlaProductType.L3A:
LOGGER.warning(
f"Ortho Tile Product are not well tested for Planet products {self.path}."
f"Use it at your own risk !"
)
# Manage platform
instr_node = root.find(f".//{nsmap['eop']}Instrument")
instrument = instr_node.findtext(f"{nsmap['eop']}shortName")
if not instrument:
raise InvalidProductError("Cannot find the platform in the metadata file")
# Set correct platform
self.instrument = getattr(PlaInstrument, instrument.replace(".", "_"))
# Manage bands of the product
nof_bands = len(
[band for band in root.iterfind(f".//{nsmap['ps']}bandSpecificMetadata")]
)
if nof_bands == 3:
self.band_names.map_bands({obn.BLUE: 1, obn.GREEN: 2, obn.RED: 3})
elif nof_bands == 4:
self.band_names.map_bands(
{obn.BLUE: 1, obn.GREEN: 2, obn.RED: 3, obn.NIR: 4, obn.NARROW_NIR: 4}
)
elif nof_bands == 5:
self.band_names.map_bands(
{
obn.BLUE: 1,
obn.GREEN: 2,
obn.RED: 3,
obn.VRE_1: 4,
obn.NIR: 5,
obn.NARROW_NIR: 5,
}
)
elif nof_bands == 8:
raise NotImplementedError(
f"8 Band Scenes are not yet implemented in EOReader: {self.path}"
)
else:
raise InvalidProductError(
f"Unusual number of bands ({nof_bands}) for {self.path}. "
f"Please check the validity of your product"
)
@cached_property
def footprint(self) -> gpd.GeoDataFrame:
"""
Get real footprint of the products (without nodata, in french == emprise utile)
.. code-block:: python
>>> from eoreader.reader import Reader
>>> path = r"LC08_L1GT_023030_20200518_20200527_01_T2"
>>> prod = Reader().open(path)
>>> prod.footprint
index geometry
0 0 POLYGON ((366165.000 4899735.000, 366165.000 4...
Overload of the generic function because landsat nodata seems to be different in QA than in regular bands.
Indeed, nodata pixels vary according to the band sensor footprint,
whereas QA nodata is where at least one band has nodata.
We chose to keep QA nodata values for the footprint in order to show where all bands are valid.
**TL;DR: We use the QA nodata value to determine the product's footprint**.
Returns:
gpd.GeoDataFrame: Footprint as a GeoDataFrame
"""
nodata = self._load_nodata()
# Vectorize the nodata band (rasters_rio is faster)
footprint = rasters.vectorize(
nodata, values=1, keep_values=False, dissolve=True
).convex_hull
return gpd.GeoDataFrame(geometry=footprint.geometry, crs=footprint.crs)
def get_datetime(self, as_datetime: bool = False) -> Union[str, datetime]:
"""
Get the product's acquisition datetime, with format :code:`YYYYMMDDTHHMMSS` <-> :code:`%Y%m%dT%H%M%S`
.. code-block:: python
>>> from eoreader.reader import Reader
>>> path = r"SENTINEL2A_20190625-105728-756_L2A_T31UEQ_C_V2-2"
>>> prod = Reader().open(path)
>>> prod.get_datetime(as_datetime=True)
datetime.datetime(2019, 6, 25, 10, 57, 28, 756000), fetched from metadata, so we have the ms
>>> prod.get_datetime(as_datetime=False)
'20190625T105728'
Args:
as_datetime (bool): Return the date as a datetime.datetime. If false, returns a string.
Returns:
Union[str, datetime.datetime]: Its acquisition datetime
"""
if self.datetime is None:
# Get MTD XML file
root, nsmap = self.read_mtd()
datetime_str = root.findtext(f".//{nsmap['eop']}acquisitionDate")
if not datetime_str:
raise InvalidProductError(
"Cannot find EARLIESTACQTIME in the metadata file."
)
# Convert to datetime
datetime_str = datetime.strptime(
datetime_str.split("+")[0], "%Y-%m-%dT%H:%M:%S"
)
if not as_datetime:
datetime_str = datetime_str.strftime(DATETIME_FMT)
else:
datetime_str = self.datetime
if not as_datetime:
datetime_str = datetime_str.strftime(DATETIME_FMT)
return datetime_str
def _get_name(self) -> str:
"""
Set product real name from metadata
Returns:
str: True name of the product (from metadata)
"""
if self.name is None:
# Get MTD XML file
root, nsmap = self.read_mtd()
# Open identifier
try:
name = root.findtext(f".//{nsmap['eop']}identifier")
except TypeError:
raise InvalidProductError(
f"{nsmap['eop']}identifier not found in metadata!"
)
else:
name = self.name
return name
def get_band_paths(
self, band_list: list, resolution: float = None, **kwargs
) -> dict:
"""
Return the paths of required bands.
.. code-block:: python
>>> from eoreader.reader import Reader
>>> from eoreader.bands import *
>>> path = r"SENTINEL2A_20190625-105728-756_L2A_T31UEQ_C_V2-2"
>>> prod = Reader().open(path)
>>> prod.get_band_paths([GREEN, RED])
{
<OpticalBandNames.GREEN: 'GREEN'>:
'SENTINEL2A_20190625-105728-756_L2A_T31UEQ_C_V2-2/SENTINEL2A_20190625-105728-756_L2A_T31UEQ_C_V2-2_FRE_B3.tif',
<OpticalBandNames.RED: 'RED'>:
'SENTINEL2A_20190625-105728-756_L2A_T31UEQ_C_V2-2/SENTINEL2A_20190625-105728-756_L2A_T31UEQ_C_V2-2_FRE_B4.tif'
}
Args:
band_list (list): List of the wanted bands
resolution (float): Band resolution
kwargs: Other arguments used to load bands
Returns:
dict: Dictionary containing the path of each queried band
"""
band_paths = {}
for band in band_list:
band_paths[band] = self._get_path(
"AnalyticMS", "tif", invalid_lookahead="_DN_"
)
return band_paths
def _read_band(
self,
path: Union[CloudPath, Path],
band: BandNames = None,
resolution: Union[tuple, list, float] = None,
size: Union[list, tuple] = None,
**kwargs,
) -> XDS_TYPE:
"""
Read band from disk.
.. WARNING::
Invalid pixels are not managed here
Args:
path (Union[CloudPath, Path]): Band path
band (BandNames): Band to read
resolution (Union[tuple, list, float]): Resolution of the wanted band, in dataset resolution unit (X, Y)
size (Union[tuple, list]): Size of the array (width, height). Not used if resolution is provided.
kwargs: Other arguments used to load bands
Returns:
XDS_TYPE: Band xarray
"""
# Read band
band_xda = utils.read(
path,
resolution=resolution,
size=size,
resampling=Resampling.bilinear,
indexes=[self.band_names[band]],
**kwargs,
)
# Compute the correct radiometry of the band
original_dtype = band_xda.encoding.get("dtype", band_xda.dtype)
if original_dtype == "uint16":
band_xda /= 10000.0
# Convert type if needed
if band_xda.dtype != np.float32:
band_xda = band_xda.astype(np.float32)
return band_xda
def _manage_invalid_pixels(
self, band_arr: XDS_TYPE, band: obn, **kwargs
) -> XDS_TYPE:
"""
Manage invalid pixels (Nodata, saturated, defective...)
See
`here <https://earth.esa.int/eogateway/documents/20142/37627/Planet-combined-imagery-product-specs-2020.pdf>`_
(unusable data mask) for more information.
Args:
band_arr (XDS_TYPE): Band array
band (obn): Band name as an OpticalBandNames
kwargs: Other arguments used to load bands
Returns:
XDS_TYPE: Cleaned band array
"""
# Nodata
no_data_mask = self._load_nodata(
size=(band_arr.rio.width, band_arr.rio.height)
).values
# Dubious pixels mapping
dubious_bands = {
key: val + 1 for key, val in self.band_names.items() if val is not None
}
udm = self.open_mask("UNUSABLE", size=(band_arr.rio.width, band_arr.rio.height))
# Workaround:
# FutureWarning: The :code:`numpy.expand_dims` function is not implemented by Dask array.
# You may want to use the da.map_blocks function or something similar to silence this warning.
# Your code may stop working in a future release.
dubious_mask = rasters.read_bit_array(udm.values, dubious_bands[band])
# Combine masks
mask = no_data_mask | dubious_mask
# -- Merge masks
return self._set_nodata_mask(band_arr, mask)
def _manage_nodata(self, band_arr: XDS_TYPE, band: obn, **kwargs) -> XDS_TYPE:
"""
Manage only nodata pixels
Args:
band_arr (XDS_TYPE): Band array
band (obn): Band name as an OpticalBandNames
kwargs: Other arguments used to load bands
Returns:
XDS_TYPE: Cleaned band array
"""
# Nodata
no_data_mask = self._load_nodata(
size=(band_arr.rio.width, band_arr.rio.height)
).values
# -- Merge masks
return self._set_nodata_mask(band_arr, no_data_mask)
def _load_bands(
self,
bands: list,
resolution: float = None,
size: Union[list, tuple] = None,
**kwargs,
) -> dict:
"""
Load bands as numpy arrays with the same resolution (and same metadata).
Args:
bands list: List of the wanted bands
resolution (float): Band resolution in meters
size (Union[tuple, list]): Size of the array (width, height). Not used if resolution is provided.
kwargs: Other arguments used to load bands
Returns:
dict: Dictionary {band_name, band_xarray}
"""
# Return empty if no band are specified
if not bands:
return {}
# Get band paths
band_paths = self.get_band_paths(bands)
# Open bands and get array (resampled if needed)
band_arrays = self._open_bands(
band_paths, resolution=resolution, size=size, **kwargs
)
return band_arrays
def _get_condensed_name(self) -> str:
"""
Get PlanetScope products condensed name ({date}_PLA_{product_type}).
Returns:
str: Condensed name
"""
return f"{self.get_datetime()}_{self.platform.name}_{self.product_type.name}"
@cache
def get_mean_sun_angles(self) -> (float, float):
"""
Get Mean Sun angles (Azimuth and Zenith angles)
.. code-block:: python
>>> from eoreader.reader import Reader
>>> path = r"SENTINEL2A_20190625-105728-756_L2A_T31UEQ_C_V2-2"
>>> prod = Reader().open(path)
>>> prod.get_mean_sun_angles()
(154.554755774838, 27.5941391571236)
Returns:
(float, float): Mean Azimuth and Zenith angle
"""
# Get MTD XML file
root, nsmap = self.read_mtd()
# Open zenith and azimuth angle
try:
elev_angle = float(
root.findtext(f".//{nsmap['opt']}illuminationElevationAngle")
)
azimuth_angle = float(
root.findtext(f".//{nsmap['opt']}illuminationAzimuthAngle")
)
except TypeError:
raise InvalidProductError("Azimuth or Zenith angles not found in metadata!")
# From elevation to zenith
zenith_angle = 90.0 - elev_angle
return azimuth_angle, zenith_angle
@cache
def _read_mtd(self) -> (etree._Element, dict):
"""
Read metadata and outputs the metadata XML root and its namespaces as a dict
.. code-block:: python
>>> from eoreader.reader import Reader
>>> path = r"20210406_015904_37_2407.zip"
>>> prod = Reader().open(path)
>>> prod.read_mtd()
(<Element {http://schemas.planet.com/ps/v1/planet_product_metadata_geocorrected_level}
EarthObservation at 0x1a2621f03c8>,
{
'opt': '{http://earth.esa.int/opt}',
'gml': '{http://www.opengis.net/gml}',
'eop': '{http://earth.esa.int/eop}',
'ps': '{http://schemas.planet.com/ps/v1/planet_product_metadata_geocorrected_level}'
})
Returns:
(etree._Element, dict): Metadata XML root and its namespaces as a dict
"""
mtd_from_path = "metadata*.xml"
mtd_archived = "metadata.*\.xml"
return self._read_mtd_xml(mtd_from_path, mtd_archived)
def _has_cloud_band(self, band: BandNames) -> bool:
"""
Does this products has the specified cloud band ?
"""
# NOTE: CIRRUS == HEAVY HAZE
# FROM DOCUMENTATION: https://developers.planet.com/docs/data/udm-2/
# Percent of heavy haze values in dataset.
# Heavy haze values represent scene content areas (non-blackfilled) that contain thin low altitude clouds,
# higher altitude cirrus clouds, soot and dust which allow fair recognition of land cover features,
# but not having reliable interpretation of the radiometry or surface reflectance.
return True
def _open_clouds(
self,
bands: list,
resolution: float = None,
size: Union[list, tuple] = None,
**kwargs,
) -> dict:
"""
Load cloud files as xarrays.
CIRRUS is HEAVY_HAZE
Args:
bands (list): List of the wanted bands
resolution (int): Band resolution in meters
size (Union[tuple, list]): Size of the array (width, height). Not used if resolution is provided.
kwargs: Additional arguments
Returns:
dict: Dictionary {band_name, band_xarray}
"""
band_dict = {}
# Load default xarray as a template
def_xarr = self._read_band(
self.get_default_band_path(),
band=self.get_default_band(),
resolution=resolution,
size=size,
)
# Load nodata
nodata = self._load_nodata(resolution, size).data
if bands:
for band in bands:
if band == ALL_CLOUDS:
cloud = self._create_mask(
def_xarr.rename(ALL_CLOUDS.name),
(
self.open_mask("CLOUD", resolution, size).data
& self.open_mask("SHADOW", resolution, size).data
& self.open_mask("HEAVY_HAZE", resolution, size).data
),
nodata,
)
elif band == SHADOWS:
cloud = self._create_mask(
def_xarr.rename(SHADOWS.name),
self.open_mask("SHADOW", resolution, size).data,
nodata,
)
elif band == CLOUDS:
cloud = self._create_mask(
def_xarr.rename(CLOUDS.name),
self.open_mask("CLOUD", resolution, size).data,
nodata,
)
elif band == CIRRUS:
cloud = self._create_mask(
def_xarr.rename(CIRRUS.name),
self.open_mask("HEAVY_HAZE", resolution, size).data,
nodata,
)
elif band == RAW_CLOUDS:
cloud = utils.read(self._get_path("udm2", "tif"), resolution, size)
else:
raise InvalidTypeError(
f"Non existing cloud band for Planet: {band}"
)
# Rename
band_name = to_str(band)[0]
cloud.attrs["long_name"] = band_name
band_dict[band] = cloud.rename(band_name)
return band_dict
def open_mask(
self,
mask_id: str,
resolution: float = None,
size: Union[list, tuple] = None,
) -> Union[xarray.DataArray, None]:
"""
Open a Planet UDM2 (Usable Data Mask) mask, band by band, as a xarray.
Returns None if the mask is not available.
Do not open cloud mask with this function. Use :code:`load` instead.
See `here <https://developers.planet.com/docs/data/udm-2/>`_ for more
information.
Accepted mask IDs:
- :code:`CLEAR`: Band 1 Clear map [0, 1] 0: not clear, 1: clear
- :code:`SNOW`: Band 2 Snow map [0, 1] 0: no snow or ice, 1: snow or ice
- :code:`SHADOW`: Band 3 Shadow map [0, 1] 0: no shadow, 1: shadow
- :code:`LIGHT_HAZE`: Band 4 Light haze map [0, 1] 0: no light haze, 1: light haze
- :code:`HEAVY_HAZE`: Band 5 Heavy haze map [0, 1] 0: no heavy haze, 1: heavy haze
- :code:`CLOUD`: Band 6 Cloud map [0, 1] 0: no cloud, 1: cloud
- :code:`CONFIDENCE`: Band 7 Confidence map [0-100] %age value: per-pixel algorithmic confidence in classif
- :code:`UNUSABLE`: Band 8 Unusable pixels -- Equivalent to the UDM asset
.. code-block:: python
>>> from eoreader.bands import *
>>> from eoreader.reader import Reader
>>> path = r"SENTINEL2B_20190401-105726-885_L2A_T31UEQ_D_V2-0.zip"
>>> prod = Reader().open(path)
>>> prod.open_mask("EDG", GREEN)
array([[[0, ..., 0]]], dtype=uint8)
Args:
mask_id: Mask ID
resolution (float): Band resolution in meters
size (Union[tuple, list]): Size of the array (width, height). Not used if resolution is provided.
Returns:
Union[xarray.DataArray, None]: Mask array
"""
band_mapping = {
"CLEAR": 1,
"SNOW": 2,
"SHADOW": 3,
"LIGHT_HAZE": 4,
"HEAVY_HAZE": 5,
"CLOUD": 6,
"CONFIDENCE": 7,
"UNUSABLE": 8,
}
assert mask_id in band_mapping
mask_path = self._get_path("udm2", "tif")
# Open mask band
mask = utils.read(
mask_path,
resolution=resolution,
size=size,
resampling=Resampling.nearest, # Nearest to keep the flags
masked=False,
indexes=[band_mapping[mask_id]],
)
return mask.astype(np.uint8)
def _load_nodata(
self,
resolution: float = None,
size: Union[list, tuple] = None,
) -> Union[xarray.DataArray, None]:
"""
Load nodata (unimaged pixels) as a numpy array.
See
`here <https://earth.esa.int/eogateway/documents/20142/37627/Planet-combined-imagery-product-specs-2020.pdf>`_
(unusable data mask) for more information.
Args:
resolution (float): Band resolution in meters
size (Union[tuple, list]): Size of the array (width, height). Not used if resolution is provided.
Returns:
Union[xarray.DataArray, None]: Nodata array
"""
udm = self.open_mask("UNUSABLE", resolution, size)
nodata = udm.copy(data=rasters.read_bit_array(udm.compute(), 0))
return nodata.rename("NODATA")
def _get_path(self, filename: str, extension: str, invalid_lookahead=None) -> str:
"""
Get either the archived path of the normal path of an asset
Args:
filename (str): Filename with wildcards
extension (str): Extension
Returns:
str: Path
"""
path = ""
try:
if self.is_archived:
if invalid_lookahead:
regex = f".*{filename}(?!{invalid_lookahead})\w*[_]*\.{extension}"
else:
regex = f".*{filename}\w*[_]*\.{extension}"
path = files.get_archived_rio_path(self.path, regex)
else:
paths = list(self.path.glob(f"**/*{filename}*.{extension}"))
if invalid_lookahead:
paths = [
path for path in paths if invalid_lookahead not in str(path)
]
path = paths[0]
except (FileNotFoundError, IndexError):
LOGGER.warning(
f"No file corresponding to *{filename}*.{extension} found in {self.path}"
)
return path
|
[
"eoreader.bands.to_str",
"eoreader.exceptions.InvalidTypeError",
"sertit.rasters.vectorize",
"eoreader.exceptions.InvalidProductError",
"geopandas.GeoDataFrame",
"sertit.rasters.read_bit_array",
"sertit.files.get_archived_rio_path",
"logging.getLogger",
"eoreader.utils.read"
] |
[((1732, 1764), 'logging.getLogger', 'logging.getLogger', (['EOREADER_NAME'], {}), '(EOREADER_NAME)\n', (1749, 1764), False, 'import logging\n'), ((11220, 11284), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', ([], {'geometry': 'footprint.geometry', 'crs': 'footprint.crs'}), '(geometry=footprint.geometry, crs=footprint.crs)\n', (11236, 11284), True, 'import geopandas as gpd\n'), ((15722, 15852), 'eoreader.utils.read', 'utils.read', (['path'], {'resolution': 'resolution', 'size': 'size', 'resampling': 'Resampling.bilinear', 'indexes': '[self.band_names[band]]'}), '(path, resolution=resolution, size=size, resampling=Resampling.\n bilinear, indexes=[self.band_names[band]], **kwargs)\n', (15732, 15852), False, 'from eoreader import cache, cached_property, utils\n'), ((17567, 17622), 'sertit.rasters.read_bit_array', 'rasters.read_bit_array', (['udm.values', 'dubious_bands[band]'], {}), '(udm.values, dubious_bands[band])\n', (17589, 17622), False, 'from sertit import files, rasters\n'), ((27552, 27690), 'eoreader.utils.read', 'utils.read', (['mask_path'], {'resolution': 'resolution', 'size': 'size', 'resampling': 'Resampling.nearest', 'masked': '(False)', 'indexes': '[band_mapping[mask_id]]'}), '(mask_path, resolution=resolution, size=size, resampling=\n Resampling.nearest, masked=False, indexes=[band_mapping[mask_id]])\n', (27562, 27690), False, 'from eoreader import cache, cached_property, utils\n'), ((7777, 7849), 'eoreader.exceptions.InvalidProductError', 'InvalidProductError', (['"""Cannot find the product type in the metadata file"""'], {}), "('Cannot find the product type in the metadata file')\n", (7796, 7849), False, 'from eoreader.exceptions import InvalidProductError, InvalidTypeError\n'), ((8608, 8676), 'eoreader.exceptions.InvalidProductError', 'InvalidProductError', (['"""Cannot find the platform in the metadata file"""'], {}), "('Cannot find the platform in the metadata file')\n", (8627, 8676), False, 'from eoreader.exceptions import InvalidProductError, InvalidTypeError\n'), ((11100, 11169), 'sertit.rasters.vectorize', 'rasters.vectorize', (['nodata'], {'values': '(1)', 'keep_values': '(False)', 'dissolve': '(True)'}), '(nodata, values=1, keep_values=False, dissolve=True)\n', (11117, 11169), False, 'from sertit import files, rasters\n'), ((12383, 12455), 'eoreader.exceptions.InvalidProductError', 'InvalidProductError', (['"""Cannot find EARLIESTACQTIME in the metadata file."""'], {}), "('Cannot find EARLIESTACQTIME in the metadata file.')\n", (12402, 12455), False, 'from eoreader.exceptions import InvalidProductError, InvalidTypeError\n'), ((20568, 20638), 'eoreader.exceptions.InvalidProductError', 'InvalidProductError', (['"""Azimuth or Zenith angles not found in metadata!"""'], {}), "('Azimuth or Zenith angles not found in metadata!')\n", (20587, 20638), False, 'from eoreader.exceptions import InvalidProductError, InvalidTypeError\n'), ((29306, 29351), 'sertit.files.get_archived_rio_path', 'files.get_archived_rio_path', (['self.path', 'regex'], {}), '(self.path, regex)\n', (29333, 29351), False, 'from sertit import files, rasters\n'), ((13386, 13457), 'eoreader.exceptions.InvalidProductError', 'InvalidProductError', (['f"""{nsmap[\'eop\']}identifier not found in metadata!"""'], {}), '(f"{nsmap[\'eop\']}identifier not found in metadata!")\n', (13405, 13457), False, 'from eoreader.exceptions import InvalidProductError, InvalidTypeError\n'), ((25083, 25095), 'eoreader.bands.to_str', 'to_str', (['band'], {}), '(band)\n', (25089, 25095), False, 'from eoreader.bands import to_str\n'), ((9741, 9871), 'eoreader.exceptions.InvalidProductError', 'InvalidProductError', (['f"""Unusual number of bands ({nof_bands}) for {self.path}. Please check the validity of your product"""'], {}), "(\n f'Unusual number of bands ({nof_bands}) for {self.path}. Please check the validity of your product'\n )\n", (9760, 9871), False, 'from eoreader.exceptions import InvalidProductError, InvalidTypeError\n'), ((24919, 24982), 'eoreader.exceptions.InvalidTypeError', 'InvalidTypeError', (['f"""Non existing cloud band for Planet: {band}"""'], {}), "(f'Non existing cloud band for Planet: {band}')\n", (24935, 24982), False, 'from eoreader.exceptions import InvalidProductError, InvalidTypeError\n')]
|
from mongoengine import DynamicDocument, Document, StringField, ReferenceField, ListField, IntField, DateTimeField, BooleanField, DictField
from datetime import datetime
from Models.User import User
from Models.Dataset import Dataset
class AgriWatchView(DynamicDocument):
author = ReferenceField(User, required=True)
dateCreated = DateTimeField(default=datetime.now())
dataset = ReferenceField(Dataset, required=True)
visualType = StringField(required=True)
xData = StringField(required=False)
yData = StringField(required=False)
meta = {'indexes': [{'fields': ["$dataset"], 'weights': {'dataset': 5}}]}
|
[
"mongoengine.StringField",
"mongoengine.ReferenceField",
"datetime.datetime.now"
] |
[((287, 322), 'mongoengine.ReferenceField', 'ReferenceField', (['User'], {'required': '(True)'}), '(User, required=True)\n', (301, 322), False, 'from mongoengine import DynamicDocument, Document, StringField, ReferenceField, ListField, IntField, DateTimeField, BooleanField, DictField\n'), ((393, 431), 'mongoengine.ReferenceField', 'ReferenceField', (['Dataset'], {'required': '(True)'}), '(Dataset, required=True)\n', (407, 431), False, 'from mongoengine import DynamicDocument, Document, StringField, ReferenceField, ListField, IntField, DateTimeField, BooleanField, DictField\n'), ((449, 475), 'mongoengine.StringField', 'StringField', ([], {'required': '(True)'}), '(required=True)\n', (460, 475), False, 'from mongoengine import DynamicDocument, Document, StringField, ReferenceField, ListField, IntField, DateTimeField, BooleanField, DictField\n'), ((488, 515), 'mongoengine.StringField', 'StringField', ([], {'required': '(False)'}), '(required=False)\n', (499, 515), False, 'from mongoengine import DynamicDocument, Document, StringField, ReferenceField, ListField, IntField, DateTimeField, BooleanField, DictField\n'), ((528, 555), 'mongoengine.StringField', 'StringField', ([], {'required': '(False)'}), '(required=False)\n', (539, 555), False, 'from mongoengine import DynamicDocument, Document, StringField, ReferenceField, ListField, IntField, DateTimeField, BooleanField, DictField\n'), ((363, 377), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (375, 377), False, 'from datetime import datetime\n')]
|
# -*- coding: utf-8 -*-
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the
# License.
import uuid
from typing import List, Optional
from ask_sdk_core.dispatch_components import AbstractRequestHandler
from ask_sdk_core.handler_input import HandlerInput
from ask_sdk_core.utils import get_dialog_state, get_slot_value, is_intent_name
from ask_sdk_model import Response
from ask_sdk_model.dialog.delegate_directive import DelegateDirective
from ask_sdk_model.dialog.dynamic_entities_directive import *
from ask_sdk_model.dialog_state import DialogState
from ask_sdk_model.er.dynamic import (
Entity,
EntityListItem,
EntityValueAndSynonyms,
UpdateBehavior,
)
from aws_lambda_powertools.logging import Logger
from aws_lambda_powertools.tracing import Tracer
from ...data import data
from ...service.model.line_stops import LineDetails
# Logging/tracing configuration
logger = Logger(service="Favorite line intent handler")
tracer = Tracer(service="Favorite line intent handler")
class StartedInProgressFavoriteLineHandler(AbstractRequestHandler):
"""
Handler to delegate the favorite line intent dialog to alexa
"""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return (
is_intent_name("SetFavoriteLineIntent")(handler_input)
and get_dialog_state(handler_input) != DialogState.COMPLETED
)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
logger.debug("In StartedInProgressFavoriteLineHandler")
logger.debug(
"Dialog state %s", handler_input.request_envelope.request.dialog_state
)
logger.debug("Slots %s", handler_input.request_envelope.request.intent.slots)
return handler_input.response_builder.add_directive(
DelegateDirective()
).response
class CompletedFavoriteLineHandler(AbstractRequestHandler):
"""
Handler for the favorite line completed intent
"""
def __init__(self, service):
self.stib_service = service
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return (
is_intent_name("SetFavoriteLineIntent")(handler_input)
and get_dialog_state(handler_input) == DialogState.COMPLETED
)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
logger.debug("In CompletedFavoriteLineHandler")
# Boilerplate
_ = handler_input.attributes_manager.request_attributes["_"]
session_attributes = handler_input.attributes_manager.session_attributes
# Retrieve slot values
logger.debug("Slots %s", handler_input.request_envelope.request.intent.slots)
line_id = get_slot_value(handler_input, "line_id")
# Call STIB API to retrieve line details
line_details: Optional[
List[LineDetails]
] = self.stib_service.get_stops_by_line_id(line_id)
logger.debug(line_details)
# Save line details into session attributes for later use
session_attributes["session_line_details"] = [
line_detail.to_dict() for line_detail in line_details
]
# Retrieve destinations from line details
destinations = [line_detail.destination.fr for line_detail in line_details]
# Retrieve transportation_type from line details
stib_transportation_type = line_details[0].route_type.name.lower()
logger.debug("Transportation type: %s", stib_transportation_type)
# Save attributes as session attributes
session_attributes["favorite_line_id"] = line_id
session_attributes["favorite_transportation_type"] = stib_transportation_type
# Prepare skill response
speech = _(data.ELLICIT_DESTINATION_PREFERENCES).format(
stib_transportation_type, line_id, *destinations
)
reprompt_speech = _(data.ELLICIT_DESTINATION_PREFERENCES_REPROMPT).format(
*destinations
)
# Update repeat prompt
session_attributes["repeat_prompt"] = reprompt_speech
# Build entity list items to update model using dynamic entities
entity_list_items = self._build_entity_list_items_from_line_details(
line_details
)
return (
handler_input.response_builder.add_directive(
DynamicEntitiesDirective(
update_behavior=UpdateBehavior.REPLACE, types=entity_list_items
)
)
.speak(speech)
.ask(reprompt_speech)
.response
)
@staticmethod
def _build_entity_list_items_from_line_details(
line_details: List[LineDetails],
) -> List[EntityListItem]:
"""
Create list of dynamic entity items from line details
"""
points = line_details[0].points + line_details[1].points
destinations = [line.destination for line in line_details]
stop_entities = [
Entity(id=point.id, name=EntityValueAndSynonyms(value=point.stop_name_fr))
for point in points
]
# Todo: use destination stop id instead of generated ID
destination_entities = [
Entity(
id=str(uuid.uuid4()), name=EntityValueAndSynonyms(value=destination.fr)
)
for destination in destinations
]
entity_list_items = [
EntityListItem(name="STOP_NAME", values=stop_entities),
EntityListItem(name="DESTINATION_NAME", values=destination_entities),
]
return entity_list_items
|
[
"aws_lambda_powertools.logging.Logger",
"ask_sdk_core.utils.get_slot_value",
"uuid.uuid4",
"ask_sdk_core.utils.is_intent_name",
"aws_lambda_powertools.tracing.Tracer",
"ask_sdk_model.dialog.delegate_directive.DelegateDirective",
"ask_sdk_core.utils.get_dialog_state",
"ask_sdk_model.er.dynamic.EntityValueAndSynonyms",
"ask_sdk_model.er.dynamic.EntityListItem"
] |
[((1364, 1410), 'aws_lambda_powertools.logging.Logger', 'Logger', ([], {'service': '"""Favorite line intent handler"""'}), "(service='Favorite line intent handler')\n", (1370, 1410), False, 'from aws_lambda_powertools.logging import Logger\n'), ((1420, 1466), 'aws_lambda_powertools.tracing.Tracer', 'Tracer', ([], {'service': '"""Favorite line intent handler"""'}), "(service='Favorite line intent handler')\n", (1426, 1466), False, 'from aws_lambda_powertools.tracing import Tracer\n'), ((3220, 3260), 'ask_sdk_core.utils.get_slot_value', 'get_slot_value', (['handler_input', '"""line_id"""'], {}), "(handler_input, 'line_id')\n", (3234, 3260), False, 'from ask_sdk_core.utils import get_dialog_state, get_slot_value, is_intent_name\n'), ((5925, 5979), 'ask_sdk_model.er.dynamic.EntityListItem', 'EntityListItem', ([], {'name': '"""STOP_NAME"""', 'values': 'stop_entities'}), "(name='STOP_NAME', values=stop_entities)\n", (5939, 5979), False, 'from ask_sdk_model.er.dynamic import Entity, EntityListItem, EntityValueAndSynonyms, UpdateBehavior\n'), ((5993, 6061), 'ask_sdk_model.er.dynamic.EntityListItem', 'EntityListItem', ([], {'name': '"""DESTINATION_NAME"""', 'values': 'destination_entities'}), "(name='DESTINATION_NAME', values=destination_entities)\n", (6007, 6061), False, 'from ask_sdk_model.er.dynamic import Entity, EntityListItem, EntityValueAndSynonyms, UpdateBehavior\n'), ((1729, 1768), 'ask_sdk_core.utils.is_intent_name', 'is_intent_name', (['"""SetFavoriteLineIntent"""'], {}), "('SetFavoriteLineIntent')\n", (1743, 1768), False, 'from ask_sdk_core.utils import get_dialog_state, get_slot_value, is_intent_name\n'), ((1800, 1831), 'ask_sdk_core.utils.get_dialog_state', 'get_dialog_state', (['handler_input'], {}), '(handler_input)\n', (1816, 1831), False, 'from ask_sdk_core.utils import get_dialog_state, get_slot_value, is_intent_name\n'), ((2287, 2306), 'ask_sdk_model.dialog.delegate_directive.DelegateDirective', 'DelegateDirective', ([], {}), '()\n', (2304, 2306), False, 'from ask_sdk_model.dialog.delegate_directive import DelegateDirective\n'), ((2636, 2675), 'ask_sdk_core.utils.is_intent_name', 'is_intent_name', (['"""SetFavoriteLineIntent"""'], {}), "('SetFavoriteLineIntent')\n", (2650, 2675), False, 'from ask_sdk_core.utils import get_dialog_state, get_slot_value, is_intent_name\n'), ((2707, 2738), 'ask_sdk_core.utils.get_dialog_state', 'get_dialog_state', (['handler_input'], {}), '(handler_input)\n', (2723, 2738), False, 'from ask_sdk_core.utils import get_dialog_state, get_slot_value, is_intent_name\n'), ((5518, 5566), 'ask_sdk_model.er.dynamic.EntityValueAndSynonyms', 'EntityValueAndSynonyms', ([], {'value': 'point.stop_name_fr'}), '(value=point.stop_name_fr)\n', (5540, 5566), False, 'from ask_sdk_model.er.dynamic import Entity, EntityListItem, EntityValueAndSynonyms, UpdateBehavior\n'), ((5770, 5814), 'ask_sdk_model.er.dynamic.EntityValueAndSynonyms', 'EntityValueAndSynonyms', ([], {'value': 'destination.fr'}), '(value=destination.fr)\n', (5792, 5814), False, 'from ask_sdk_model.er.dynamic import Entity, EntityListItem, EntityValueAndSynonyms, UpdateBehavior\n'), ((5750, 5762), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (5760, 5762), False, 'import uuid\n')]
|
__author__ = 'andrewapperley'
import database
from sqlalchemy import exc
from video import FlaggedVideoModel, VideoFavourite
import sys
from boto.s3.connection import S3Connection, Bucket, Key
from datetime import datetime
import calendar
def process(access_key=None, secret_key=None, bucket_name=None, video_path=None, video=None):
for_deleting = False
if len(sys.argv) > 1:
a = sys.argv[1]
ACCESS_KEY = sys.argv[2]
SECRET_KEY = sys.argv[3]
BUCKET_NAME = sys.argv[4]
database.createDatabase(a)
else:
for_deleting = True
ACCESS_KEY = access_key
SECRET_KEY = secret_key
BUCKET_NAME = bucket_name
database.createDatabase()
aws_s3_connection = S3Connection(ACCESS_KEY, SECRET_KEY)
aws_s3_bucket = Bucket(aws_s3_connection, BUCKET_NAME)
session = database.DBSession()
object_keys = []
# This is if the CRON job is running and is removing flagged videos
if for_deleting is False:
flagged_content = session.query(FlaggedVideoModel).all()
if len(flagged_content) > 0:
time_stamp_now = calendar.timegm(datetime.utcnow().timetuple())
for content in flagged_content:
if content.timeStamp <= time_stamp_now:
video = content.video
favourites_of_video = session.query(VideoFavourite).filter(VideoFavourite.video_id == video.video_id).all()
for key in aws_s3_bucket.list(prefix=content.video_path):
object_keys.append(key)
if len(favourites_of_video) > 0:
for fv in favourites_of_video:
session.delete(fv)
session.delete(content)
session.delete(video)
# This is for when you are deleting a video from the timeline
elif for_deleting is True and video is not None and video_path is not '' and video_path is not None:
favourites_of_video = session.query(VideoFavourite).filter(VideoFavourite.video_id == video.video_id).all()
flags_for_video = session.query(FlaggedVideoModel).filter(FlaggedVideoModel.video_id == video.video_id).all()
# Collect the AWS S3 objects to delete
for key in aws_s3_bucket.list(prefix=video_path):
object_keys.append(key)
# Collect the Video Favourites
if len(favourites_of_video) > 0:
for fv in favourites_of_video:
session.delete(fv)
# Collect the Video Flags
if len(flags_for_video) > 0:
for fv in flags_for_video:
session.delete(fv)
try:
if len(object_keys) > 0:
aws_s3_bucket.delete_keys(object_keys)
session.commit()
session.close()
return True
except exc.SQLAlchemyError:
session.close()
return False
if len(sys.argv) > 1:
process()
|
[
"database.DBSession",
"boto.s3.connection.Bucket",
"database.createDatabase",
"datetime.datetime.utcnow",
"boto.s3.connection.S3Connection"
] |
[((743, 779), 'boto.s3.connection.S3Connection', 'S3Connection', (['ACCESS_KEY', 'SECRET_KEY'], {}), '(ACCESS_KEY, SECRET_KEY)\n', (755, 779), False, 'from boto.s3.connection import S3Connection, Bucket, Key\n'), ((800, 838), 'boto.s3.connection.Bucket', 'Bucket', (['aws_s3_connection', 'BUCKET_NAME'], {}), '(aws_s3_connection, BUCKET_NAME)\n', (806, 838), False, 'from boto.s3.connection import S3Connection, Bucket, Key\n'), ((854, 874), 'database.DBSession', 'database.DBSession', ([], {}), '()\n', (872, 874), False, 'import database\n'), ((521, 547), 'database.createDatabase', 'database.createDatabase', (['a'], {}), '(a)\n', (544, 547), False, 'import database\n'), ((692, 717), 'database.createDatabase', 'database.createDatabase', ([], {}), '()\n', (715, 717), False, 'import database\n'), ((1147, 1164), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1162, 1164), False, 'from datetime import datetime\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-09 08:20
from __future__ import unicode_literals
from django.db import migrations
def update_price_calculator(apps, schema_editor):
PaymentMethod = apps.get_model("payment", "PaymentMethod")
for payment_method in PaymentMethod.objects.all():
if payment_method.module == "lfs_paypal.PayPalProcessor":
payment_method.module = "lfs_paypal.processor.PayPalProcessor"
payment_method.save()
class Migration(migrations.Migration):
dependencies = [
('lfs_paypal', '0001_initial'),
]
operations = [
migrations.RunPython(update_price_calculator),
]
|
[
"django.db.migrations.RunPython"
] |
[((625, 670), 'django.db.migrations.RunPython', 'migrations.RunPython', (['update_price_calculator'], {}), '(update_price_calculator)\n', (645, 670), False, 'from django.db import migrations\n')]
|
import os
import time
import shutil
import numpy as np
import tensorflow as tf
from config import cfg
import mynet
from VRDatasetC import NpDataset
size = cfg.TRAIN.INPUT_SIZE
inputs = tf.keras.Input(shape=(size, size,1))
imgDeconv, imgLab = mynet.Mynet(inputs)
model = tf.keras.Model(inputs, [imgDeconv, imgLab])
keras.utils.plot_model(model, show_shapes=True, show_layer_names=True)
logdir = "./data/log"
optimizer = tf.keras.optimizers.Adam(0.0002)
if os.path.exists(logdir):
shutil.rmtree(logdir)
writer = tf.summary.create_file_writer(logdir)
# global_steps = tf.Variable(1, trainable=False, dtype=tf.int64)
# @tf.function
def learningModel(image_data, target):
with tf.GradientTape() as tape:
image_data = tf.cast(image_data, tf.float32)
target = tf.cast(target, tf.float32)
image_data = image_data / 255.0
target = target / 255.0
img, lab = model(image_data, training=True)
meanLoss = tf.reduce_mean(tf.keras.losses.mean_squared_error(img, image_data)) / 50# / 5000.0
target2 = target * 0.9 + 0.05
softmaxLoss = tf.reduce_mean(tf.square(target2 - lab) * (target * 50 + 1))
# target2 = target*0.9 + 0.05
# softmaxLoss = tf.reduce_mean(tf.keras.losses.mean_squared_error(tf.expand_dims(target2, -1), lab) * (target * 10 + 1))
# y_onehot = tf.one_hot(target, depth=2)
# softmaxLoss = tf.keras.losses.categorical_crossentropy(y_onehot, lab, from_logits=True)
#
# softmaxLoss = softmaxLoss * (target * 2 + 1)
softmaxLoss = tf.reduce_mean(softmaxLoss)
# print(softmaxLoss,meanLoss)
loss_regularization = []
for p in model.trainable_variables:
loss_regularization.append(tf.nn.l2_loss(p))
loss_regularization = tf.reduce_sum(tf.stack(loss_regularization)) * 0.00002
total_loss = softmaxLoss + meanLoss #+ loss_regularization
gradients = tape.gradient(total_loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
# tf.print("lr: %.6f softmaxLoss: %4.5f meanLoss: %4.5f total_loss: %4.5f" %
# (optimizer.lr.numpy(), softmaxLoss, meanLoss, total_loss))
tf.print(softmaxLoss, meanLoss, loss_regularization)
# global_steps
# # writing summary data
# with writer.as_default():
# tf.summary.scalar("lr", optimizer.lr, step=global_steps)
# tf.summary.scalar("loss/total_loss", total_loss, step=global_steps)
# tf.summary.scalar("loss/meanLoss", meanLoss, step=global_steps)
# tf.summary.scalar("loss/softmaxLoss", softmaxLoss, step=global_steps)
# writer.flush()
global_steps = 0
# model.load_weights('oldModel.h5')
trainset = NpDataset('train')
for epoch in range(cfg.TRAIN.EPOCHS):
for image_data, target in trainset:
learningModel(image_data, target)
global_steps += 1
tf.print(global_steps)
model.save("oldModel.h5")
tf.print("saveModel")
|
[
"tensorflow.nn.l2_loss",
"tensorflow.keras.losses.mean_squared_error",
"shutil.rmtree",
"tensorflow.print",
"tensorflow.keras.Input",
"os.path.exists",
"tensorflow.reduce_mean",
"tensorflow.keras.Model",
"tensorflow.cast",
"tensorflow.stack",
"tensorflow.keras.optimizers.Adam",
"mynet.Mynet",
"tensorflow.square",
"tensorflow.summary.create_file_writer",
"VRDatasetC.NpDataset",
"tensorflow.GradientTape"
] |
[((186, 223), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(size, size, 1)'}), '(shape=(size, size, 1))\n', (200, 223), True, 'import tensorflow as tf\n'), ((243, 262), 'mynet.Mynet', 'mynet.Mynet', (['inputs'], {}), '(inputs)\n', (254, 262), False, 'import mynet\n'), ((271, 314), 'tensorflow.keras.Model', 'tf.keras.Model', (['inputs', '[imgDeconv, imgLab]'], {}), '(inputs, [imgDeconv, imgLab])\n', (285, 314), True, 'import tensorflow as tf\n'), ((421, 453), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['(0.0002)'], {}), '(0.0002)\n', (445, 453), True, 'import tensorflow as tf\n'), ((457, 479), 'os.path.exists', 'os.path.exists', (['logdir'], {}), '(logdir)\n', (471, 479), False, 'import os\n'), ((516, 553), 'tensorflow.summary.create_file_writer', 'tf.summary.create_file_writer', (['logdir'], {}), '(logdir)\n', (545, 553), True, 'import tensorflow as tf\n'), ((2793, 2811), 'VRDatasetC.NpDataset', 'NpDataset', (['"""train"""'], {}), "('train')\n", (2802, 2811), False, 'from VRDatasetC import NpDataset\n'), ((485, 506), 'shutil.rmtree', 'shutil.rmtree', (['logdir'], {}), '(logdir)\n', (498, 506), False, 'import shutil\n'), ((3023, 3044), 'tensorflow.print', 'tf.print', (['"""saveModel"""'], {}), "('saveModel')\n", (3031, 3044), True, 'import tensorflow as tf\n'), ((685, 702), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (700, 702), True, 'import tensorflow as tf\n'), ((733, 764), 'tensorflow.cast', 'tf.cast', (['image_data', 'tf.float32'], {}), '(image_data, tf.float32)\n', (740, 764), True, 'import tensorflow as tf\n'), ((782, 809), 'tensorflow.cast', 'tf.cast', (['target', 'tf.float32'], {}), '(target, tf.float32)\n', (789, 809), True, 'import tensorflow as tf\n'), ((1565, 1592), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['softmaxLoss'], {}), '(softmaxLoss)\n', (1579, 1592), True, 'import tensorflow as tf\n'), ((2242, 2294), 'tensorflow.print', 'tf.print', (['softmaxLoss', 'meanLoss', 'loss_regularization'], {}), '(softmaxLoss, meanLoss, loss_regularization)\n', (2250, 2294), True, 'import tensorflow as tf\n'), ((2966, 2988), 'tensorflow.print', 'tf.print', (['global_steps'], {}), '(global_steps)\n', (2974, 2988), True, 'import tensorflow as tf\n'), ((970, 1021), 'tensorflow.keras.losses.mean_squared_error', 'tf.keras.losses.mean_squared_error', (['img', 'image_data'], {}), '(img, image_data)\n', (1004, 1021), True, 'import tensorflow as tf\n'), ((1114, 1138), 'tensorflow.square', 'tf.square', (['(target2 - lab)'], {}), '(target2 - lab)\n', (1123, 1138), True, 'import tensorflow as tf\n'), ((1747, 1763), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['p'], {}), '(p)\n', (1760, 1763), True, 'import tensorflow as tf\n'), ((1809, 1838), 'tensorflow.stack', 'tf.stack', (['loss_regularization'], {}), '(loss_regularization)\n', (1817, 1838), True, 'import tensorflow as tf\n')]
|
# Generated by Django 3.1.7 on 2021-07-26 11:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("eventreg", "0001_initial"),
]
operations = [
migrations.AlterField(
model_name="eventuserdata",
name="studentName",
field=models.CharField(max_length=50),
),
migrations.AlterField(
model_name="eventuserdata",
name="studentReg",
field=models.CharField(max_length=12),
),
]
|
[
"django.db.models.CharField"
] |
[((338, 369), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (354, 369), False, 'from django.db import migrations, models\n'), ((502, 533), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(12)'}), '(max_length=12)\n', (518, 533), False, 'from django.db import migrations, models\n')]
|
import datetime
import pandas as pd
import numpy as np
import subprocess
import pickle
from numpy import array
import seaborn as sn
import tensorflow as tf
import matplotlib.pyplot as plt
#combining predicitions and generating plot for pred. representation
def predict(data):
with open('rolf.pickle', 'rb') as handle:
rolf = pickle.load(handle)
#preproccessed data
data = pd.DataFrame.from_dict(data)
del data['epoch']
data = data.to_numpy()
data = data.astype('float64')
results = []
for i in range(len(data)):
results.append(rolf.predict(data[i]))
result1 = results
model = tf.keras.models.load_model('model')
data = pd.DataFrame.from_dict(data)
del data['epoch']
data = data.to_numpy()
data = data.astype('float64')
results = []
for i in range(len(data)):
results.append(np.argmax(model.predict(array([data[i]])))-1)
result2 = results
result1 = np.array(result1)
result2 = np.array(result2)
assert len(result1) == len(result2)
#couting through the arrays and generate plotting infos
counter1 = np.zeros(29)
counter2 = np.zeros(29)
for int in np.unique(result1):
for i in range(len(result1)):
if result1[i] == int:
counter1[int] = counter1[int] + 1
for int in np.unique(result2):
for i in range(len(result2)):
if result1[i] == int:
counter2[int] = counter2[int] + 1
colors1 = []
for i in range(29):
color = list(np.random.choice(range(256), size=3) / 256)
colors1.append(color)
colors2 = []
for i in range(29):
color = list(np.random.choice(range(256), size=3) / 256)
colors2.append(color)
plt.bar(x=np.arange(29), height=counter1, color=colors1)
plt.bar(x=np.arange(29), height=counter2, color=colors2)
plt.title('occurance of predicted clusters')
plt.xlabel('cluster/class')
plt.xticks(np.arange(29), rotation=90)
plt.ylabel('occurance')
plt.savefig('prediction.png')
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"tensorflow.keras.models.load_model",
"pandas.DataFrame.from_dict",
"numpy.zeros",
"pickle.load",
"numpy.array",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.unique"
] |
[((394, 422), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['data'], {}), '(data)\n', (416, 422), True, 'import pandas as pd\n'), ((634, 669), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['"""model"""'], {}), "('model')\n", (660, 669), True, 'import tensorflow as tf\n'), ((681, 709), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['data'], {}), '(data)\n', (703, 709), True, 'import pandas as pd\n'), ((946, 963), 'numpy.array', 'np.array', (['result1'], {}), '(result1)\n', (954, 963), True, 'import numpy as np\n'), ((978, 995), 'numpy.array', 'np.array', (['result2'], {}), '(result2)\n', (986, 995), True, 'import numpy as np\n'), ((1112, 1124), 'numpy.zeros', 'np.zeros', (['(29)'], {}), '(29)\n', (1120, 1124), True, 'import numpy as np\n'), ((1140, 1152), 'numpy.zeros', 'np.zeros', (['(29)'], {}), '(29)\n', (1148, 1152), True, 'import numpy as np\n'), ((1168, 1186), 'numpy.unique', 'np.unique', (['result1'], {}), '(result1)\n', (1177, 1186), True, 'import numpy as np\n'), ((1325, 1343), 'numpy.unique', 'np.unique', (['result2'], {}), '(result2)\n', (1334, 1343), True, 'import numpy as np\n'), ((1865, 1909), 'matplotlib.pyplot.title', 'plt.title', (['"""occurance of predicted clusters"""'], {}), "('occurance of predicted clusters')\n", (1874, 1909), True, 'import matplotlib.pyplot as plt\n'), ((1914, 1941), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""cluster/class"""'], {}), "('cluster/class')\n", (1924, 1941), True, 'import matplotlib.pyplot as plt\n'), ((1989, 2012), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""occurance"""'], {}), "('occurance')\n", (1999, 2012), True, 'import matplotlib.pyplot as plt\n'), ((2017, 2046), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""prediction.png"""'], {}), "('prediction.png')\n", (2028, 2046), True, 'import matplotlib.pyplot as plt\n'), ((339, 358), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (350, 358), False, 'import pickle\n'), ((1957, 1970), 'numpy.arange', 'np.arange', (['(29)'], {}), '(29)\n', (1966, 1970), True, 'import numpy as np\n'), ((1753, 1766), 'numpy.arange', 'np.arange', (['(29)'], {}), '(29)\n', (1762, 1766), True, 'import numpy as np\n'), ((1814, 1827), 'numpy.arange', 'np.arange', (['(29)'], {}), '(29)\n', (1823, 1827), True, 'import numpy as np\n'), ((888, 904), 'numpy.array', 'array', (['[data[i]]'], {}), '([data[i]])\n', (893, 904), False, 'from numpy import array\n')]
|
import ast
import argparse
import logging
import warnings
import os
import json
import glob
import subprocess
import sys
import boto3
import pickle
import pandas as pd
from collections import Counter
from timeit import default_timer as timer
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import shutil
import networkx as nx
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
from prettytable import PrettyTable
import autogluon as ag
from autogluon import TabularPrediction as task
from autogluon.task.tabular_prediction import TabularDataset
from autogluon.utils.tabular.ml.constants import BINARY, MULTICLASS, REGRESSION, SOFTCLASS
print(f'DEBUG AutoGluon version : {ag.__version__}')
# ------------------------------------------------------------ #
# Training methods #
# ------------------------------------------------------------ #
def du(path):
"""disk usage in human readable format (e.g. '2,1GB')"""
return subprocess.check_output(['du','-sh', path]).split()[0].decode('utf-8')
def __load_input_data(path: str) -> TabularDataset:
"""
Load training data as dataframe
:param path:
:return: DataFrame
"""
input_data_files = os.listdir(path)
try:
input_dfs = [pd.read_csv(f'{path}/{data_file}') for data_file in input_data_files]
return task.Dataset(df=pd.concat(input_dfs))
except:
print(f'No csv data in {path}!')
return None
def format_for_print(df):
table = PrettyTable(list(df.columns))
for row in df.itertuples():
table.add_row(row[1:])
return str(table)
def get_roc_auc(y_test_true, y_test_pred, labels, class_labels_internal, model_output_dir):
from sklearn.preprocessing import label_binarize
from sklearn.metrics import roc_curve, auc
from itertools import cycle
y_test_true_binalized = label_binarize(y_test_true, classes=labels)
if len(labels) == 2:
# binary classification
true_label_index = class_labels_internal.index(1)
y_test_pred = y_test_pred[:,true_label_index]
y_test_pred = np.reshape(y_test_pred, (-1, 1))
labels = labels[true_label_index:true_label_index+1]
n_classes = 1
else:
# multiclass classification
n_classes = len(labels)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test_true_binalized[:, i], y_test_pred[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test_true_binalized.ravel(), y_test_pred.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
sns.set(font_scale=1)
plt.figure()
lw = 2
colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])
for i, color in zip(range(n_classes), colors):
plt.plot(fpr[i], tpr[i], color=color,
lw=lw, label=f'ROC curve for {labels[i]} (area = %0.2f)' % roc_auc[i])
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
plt.savefig(f'{model_output_dir}/roc_auc_curve.png')
def train(args):
model_output_dir = f'{args.output_dir}/data'
is_distributed = len(args.hosts) > 1
host_rank = args.hosts.index(args.current_host)
dist_ip_addrs = args.hosts
dist_ip_addrs.pop(host_rank)
# Load training and validation data
print(f'Train files: {os.listdir(args.train)}')
train_data = __load_input_data(args.train)
# Extract column info
target = args.fit_args['label']
columns = train_data.columns.tolist()
column_dict = {"columns":columns}
with open('columns.pkl', 'wb') as f:
pickle.dump(column_dict, f)
# Train models
predictor = task.fit(
train_data=train_data,
output_directory=args.model_dir,
**args.fit_args,
)
# Results summary
predictor.fit_summary(verbosity=3)
model_summary_fname_src = os.path.join(predictor.output_directory, 'SummaryOfModels.html')
model_summary_fname_tgt = os.path.join(model_output_dir, 'SummaryOfModels.html')
if os.path.exists(model_summary_fname_src):
shutil.copy(model_summary_fname_src, model_summary_fname_tgt)
# ensemble visualization
G = predictor._trainer.model_graph
remove = [node for node,degree in dict(G.degree()).items() if degree < 1]
G.remove_nodes_from(remove)
A = nx.nx_agraph.to_agraph(G)
A.graph_attr.update(rankdir='BT')
A.node_attr.update(fontsize=10)
for node in A.iternodes():
node.attr['shape'] = 'rectagle'
A.draw(os.path.join(model_output_dir, 'ensemble-model.png'), format='png', prog='dot')
# Optional test data
if args.test:
print(f'Test files: {os.listdir(args.test)}')
test_data = __load_input_data(args.test)
# Test data must be labeled for scoring
if args.fit_args['label'] in test_data:
# Leaderboard on test data
print('Running model on test data and getting Leaderboard...')
leaderboard = predictor.leaderboard(dataset=test_data, silent=True)
print(format_for_print(leaderboard), end='\n\n')
leaderboard.to_csv(f'{model_output_dir}/leaderboard.csv', index=False)
# Feature importance on test data
# Note: Feature importance must be calculated on held-out (test) data.
# If calculated on training data it will be biased due to overfitting.
if args.feature_importance:
print('Feature importance:')
# Increase rows to print feature importance
pd.set_option('display.max_rows', 500)
feature_importance = predictor.feature_importance(test_data)
feature_importance_df = pd.DataFrame(feature_importance, columns=['Importance score']).rename_axis(index='Feature')
print(feature_importance_df)
feature_importance_df.to_csv(f'{model_output_dir}/feature_importance.csv', index=True)
# Classification report and confusion matrix for classification model
if predictor.problem_type in [BINARY, MULTICLASS]:
from sklearn.metrics import classification_report, confusion_matrix
X_test = test_data.drop(args.fit_args['label'], axis=1)
y_test_true = test_data[args.fit_args['label']]
y_test_pred = predictor.predict(X_test)
y_test_pred_prob = predictor.predict_proba(X_test, as_multiclass=True)
report_dict = classification_report(y_test_true, y_test_pred, output_dict=True, labels=predictor.class_labels)
report_dict_df = pd.DataFrame(report_dict).T
report_dict_df.to_csv(f'{model_output_dir}/classification_report.csv', index=True)
cm = confusion_matrix(y_test_true, y_test_pred, labels=predictor.class_labels)
cm_df = pd.DataFrame(cm, predictor.class_labels, predictor.class_labels)
sns.set(font_scale=1)
cmap = 'coolwarm'
sns.heatmap(cm_df, annot=True, fmt='d', cmap=cmap)
plt.title('Confusion Matrix')
plt.ylabel('true label')
plt.xlabel('predicted label')
plt.show()
plt.savefig(f'{model_output_dir}/confusion_matrix.png')
get_roc_auc(y_test_true, y_test_pred_prob, predictor.class_labels, predictor.class_labels_internal, model_output_dir)
else:
warnings.warn('Skipping eval on test data since label column is not included.')
# Files summary
print(f'Model export summary:')
print(f"/opt/ml/model/: {os.listdir('/opt/ml/model/')}")
models_contents = os.listdir('/opt/ml/model/models')
print(f"/opt/ml/model/models: {models_contents}")
print(f"/opt/ml/model directory size: {du('/opt/ml/model/')}\n")
# ------------------------------------------------------------ #
# Training execution #
# ------------------------------------------------------------ #
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.register('type','bool', lambda v: v.lower() in ('yes', 'true', 't', '1'))
# Environment parameters
parser.add_argument('--hosts', type=list, default=json.loads(os.environ['SM_HOSTS']))
parser.add_argument('--current-host', type=str, default=os.environ['SM_CURRENT_HOST'])
parser.add_argument('--num-gpus', type=int, default=os.environ['SM_NUM_GPUS'])
parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])
parser.add_argument('--output-dir', type=str, default=os.environ['SM_OUTPUT_DIR'])
parser.add_argument('--train', type=str, default=os.environ['SM_CHANNEL_TRAINING'])
# Arguments to be passed to task.fit()
parser.add_argument('--fit_args', type=lambda s: ast.literal_eval(s),
default="{'presets': ['optimize_for_deployment']}",
help='https://autogluon.mxnet.io/api/autogluon.task.html#tabularprediction')
# Additional options
parser.add_argument('--feature_importance', type='bool', default=True)
return parser.parse_args()
if __name__ == "__main__":
start = timer()
args = parse_args()
# Verify label is included
if 'label' not in args.fit_args:
raise ValueError('"label" is a required parameter of "fit_args"!')
# Convert optional fit call hyperparameters from strings
if 'hyperparameters' in args.fit_args:
for model_type,options in args.fit_args['hyperparameters'].items():
assert isinstance(options, dict)
for k,v in options.items():
args.fit_args['hyperparameters'][model_type][k] = eval(v)
# Print SageMaker args
print('fit_args:')
for k,v in args.fit_args.items():
print(f'{k}, type: {type(v)}, value: {v}')
# Make test data optional
if os.environ.get('SM_CHANNEL_TESTING'):
args.test = os.environ['SM_CHANNEL_TESTING']
else:
args.test = None
train(args)
# Package inference code with model export
subprocess.call('mkdir /opt/ml/model/code'.split())
subprocess.call('cp /opt/ml/code/inference.py /opt/ml/model/code/'.split())
subprocess.call('cp columns.pkl /opt/ml/model/code/'.split())
elapsed_time = round(timer()-start,3)
print(f'Elapsed time: {elapsed_time} seconds. Training Completed!')
|
[
"matplotlib.pyplot.title",
"sklearn.metrics.confusion_matrix",
"pickle.dump",
"seaborn.heatmap",
"argparse.ArgumentParser",
"autogluon.TabularPrediction.fit",
"pandas.read_csv",
"sklearn.metrics.classification_report",
"matplotlib.pyplot.figure",
"itertools.cycle",
"os.path.join",
"pandas.set_option",
"shutil.copy",
"pandas.DataFrame",
"json.loads",
"os.path.exists",
"warnings.catch_warnings",
"numpy.reshape",
"seaborn.set",
"pandas.concat",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"subprocess.check_output",
"sklearn.preprocessing.label_binarize",
"matplotlib.pyplot.ylabel",
"os.listdir",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.plot",
"warnings.filterwarnings",
"sklearn.metrics.roc_curve",
"timeit.default_timer",
"networkx.nx_agraph.to_agraph",
"os.environ.get",
"sklearn.metrics.auc",
"ast.literal_eval",
"warnings.warn",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((358, 383), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (381, 383), False, 'import warnings\n'), ((389, 451), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'DeprecationWarning'}), "('ignore', category=DeprecationWarning)\n", (412, 451), False, 'import warnings\n'), ((1316, 1332), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1326, 1332), False, 'import os\n'), ((1980, 2023), 'sklearn.preprocessing.label_binarize', 'label_binarize', (['y_test_true'], {'classes': 'labels'}), '(y_test_true, classes=labels)\n', (1994, 2023), False, 'from sklearn.preprocessing import label_binarize\n'), ((2858, 2889), 'sklearn.metrics.auc', 'auc', (["fpr['micro']", "tpr['micro']"], {}), "(fpr['micro'], tpr['micro'])\n", (2861, 2889), False, 'from sklearn.metrics import roc_curve, auc\n'), ((2899, 2920), 'seaborn.set', 'sns.set', ([], {'font_scale': '(1)'}), '(font_scale=1)\n', (2906, 2920), True, 'import seaborn as sns\n'), ((2925, 2937), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2935, 2937), True, 'import matplotlib.pyplot as plt\n'), ((2962, 3009), 'itertools.cycle', 'cycle', (["['aqua', 'darkorange', 'cornflowerblue']"], {}), "(['aqua', 'darkorange', 'cornflowerblue'])\n", (2967, 3009), False, 'from itertools import cycle\n'), ((3200, 3261), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]'], {'color': '"""navy"""', 'lw': 'lw', 'linestyle': '"""--"""'}), "([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n", (3208, 3261), True, 'import matplotlib.pyplot as plt\n'), ((3266, 3286), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (3274, 3286), True, 'import matplotlib.pyplot as plt\n'), ((3291, 3312), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (3299, 3312), True, 'import matplotlib.pyplot as plt\n'), ((3317, 3350), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (3327, 3350), True, 'import matplotlib.pyplot as plt\n'), ((3355, 3387), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {}), "('True Positive Rate')\n", (3365, 3387), True, 'import matplotlib.pyplot as plt\n'), ((3392, 3446), 'matplotlib.pyplot.title', 'plt.title', (['"""Receiver operating characteristic example"""'], {}), "('Receiver operating characteristic example')\n", (3401, 3446), True, 'import matplotlib.pyplot as plt\n'), ((3451, 3480), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (3461, 3480), True, 'import matplotlib.pyplot as plt\n'), ((3485, 3495), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3493, 3495), True, 'import matplotlib.pyplot as plt\n'), ((3500, 3552), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{model_output_dir}/roc_auc_curve.png"""'], {}), "(f'{model_output_dir}/roc_auc_curve.png')\n", (3511, 3552), True, 'import matplotlib.pyplot as plt\n'), ((4190, 4276), 'autogluon.TabularPrediction.fit', 'task.fit', ([], {'train_data': 'train_data', 'output_directory': 'args.model_dir'}), '(train_data=train_data, output_directory=args.model_dir, **args.\n fit_args)\n', (4198, 4276), True, 'from autogluon import TabularPrediction as task\n'), ((4399, 4463), 'os.path.join', 'os.path.join', (['predictor.output_directory', '"""SummaryOfModels.html"""'], {}), "(predictor.output_directory, 'SummaryOfModels.html')\n", (4411, 4463), False, 'import os\n'), ((4494, 4548), 'os.path.join', 'os.path.join', (['model_output_dir', '"""SummaryOfModels.html"""'], {}), "(model_output_dir, 'SummaryOfModels.html')\n", (4506, 4548), False, 'import os\n'), ((4561, 4600), 'os.path.exists', 'os.path.exists', (['model_summary_fname_src'], {}), '(model_summary_fname_src)\n', (4575, 4600), False, 'import os\n'), ((4863, 4888), 'networkx.nx_agraph.to_agraph', 'nx.nx_agraph.to_agraph', (['G'], {}), '(G)\n', (4885, 4888), True, 'import networkx as nx\n'), ((8309, 8343), 'os.listdir', 'os.listdir', (['"""/opt/ml/model/models"""'], {}), "('/opt/ml/model/models')\n", (8319, 8343), False, 'import os\n'), ((8695, 8774), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), '(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n', (8718, 8774), False, 'import argparse\n'), ((9895, 9902), 'timeit.default_timer', 'timer', ([], {}), '()\n', (9900, 9902), True, 'from timeit import default_timer as timer\n'), ((10605, 10641), 'os.environ.get', 'os.environ.get', (['"""SM_CHANNEL_TESTING"""'], {}), "('SM_CHANNEL_TESTING')\n", (10619, 10641), False, 'import os\n'), ((2220, 2252), 'numpy.reshape', 'np.reshape', (['y_test_pred', '(-1, 1)'], {}), '(y_test_pred, (-1, 1))\n', (2230, 2252), True, 'import numpy as np\n'), ((2586, 2643), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_test_true_binalized[:, i]', 'y_test_pred[:, i]'], {}), '(y_test_true_binalized[:, i], y_test_pred[:, i])\n', (2595, 2643), False, 'from sklearn.metrics import roc_curve, auc\n'), ((2665, 2684), 'sklearn.metrics.auc', 'auc', (['fpr[i]', 'tpr[i]'], {}), '(fpr[i], tpr[i])\n', (2668, 2684), False, 'from sklearn.metrics import roc_curve, auc\n'), ((3070, 3183), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr[i]', 'tpr[i]'], {'color': 'color', 'lw': 'lw', 'label': "(f'ROC curve for {labels[i]} (area = %0.2f)' % roc_auc[i])"}), "(fpr[i], tpr[i], color=color, lw=lw, label=\n f'ROC curve for {labels[i]} (area = %0.2f)' % roc_auc[i])\n", (3078, 3183), True, 'import matplotlib.pyplot as plt\n'), ((4122, 4149), 'pickle.dump', 'pickle.dump', (['column_dict', 'f'], {}), '(column_dict, f)\n', (4133, 4149), False, 'import pickle\n'), ((4610, 4671), 'shutil.copy', 'shutil.copy', (['model_summary_fname_src', 'model_summary_fname_tgt'], {}), '(model_summary_fname_src, model_summary_fname_tgt)\n', (4621, 4671), False, 'import shutil\n'), ((5045, 5097), 'os.path.join', 'os.path.join', (['model_output_dir', '"""ensemble-model.png"""'], {}), "(model_output_dir, 'ensemble-model.png')\n", (5057, 5097), False, 'import os\n'), ((1363, 1397), 'pandas.read_csv', 'pd.read_csv', (['f"""{path}/{data_file}"""'], {}), "(f'{path}/{data_file}')\n", (1374, 1397), True, 'import pandas as pd\n'), ((8089, 8168), 'warnings.warn', 'warnings.warn', (['"""Skipping eval on test data since label column is not included."""'], {}), "('Skipping eval on test data since label column is not included.')\n", (8102, 8168), False, 'import warnings\n'), ((8958, 8992), 'json.loads', 'json.loads', (["os.environ['SM_HOSTS']"], {}), "(os.environ['SM_HOSTS'])\n", (8968, 8992), False, 'import json\n'), ((11024, 11031), 'timeit.default_timer', 'timer', ([], {}), '()\n', (11029, 11031), True, 'from timeit import default_timer as timer\n'), ((1464, 1484), 'pandas.concat', 'pd.concat', (['input_dfs'], {}), '(input_dfs)\n', (1473, 1484), True, 'import pandas as pd\n'), ((3853, 3875), 'os.listdir', 'os.listdir', (['args.train'], {}), '(args.train)\n', (3863, 3875), False, 'import os\n'), ((6102, 6140), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', '(500)'], {}), "('display.max_rows', 500)\n", (6115, 6140), True, 'import pandas as pd\n'), ((7083, 7184), 'sklearn.metrics.classification_report', 'classification_report', (['y_test_true', 'y_test_pred'], {'output_dict': '(True)', 'labels': 'predictor.class_labels'}), '(y_test_true, y_test_pred, output_dict=True, labels=\n predictor.class_labels)\n', (7104, 7184), False, 'from sklearn.metrics import classification_report, confusion_matrix\n'), ((7378, 7451), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test_true', 'y_test_pred'], {'labels': 'predictor.class_labels'}), '(y_test_true, y_test_pred, labels=predictor.class_labels)\n', (7394, 7451), False, 'from sklearn.metrics import classification_report, confusion_matrix\n'), ((7476, 7540), 'pandas.DataFrame', 'pd.DataFrame', (['cm', 'predictor.class_labels', 'predictor.class_labels'], {}), '(cm, predictor.class_labels, predictor.class_labels)\n', (7488, 7540), True, 'import pandas as pd\n'), ((7557, 7578), 'seaborn.set', 'sns.set', ([], {'font_scale': '(1)'}), '(font_scale=1)\n', (7564, 7578), True, 'import seaborn as sns\n'), ((7629, 7679), 'seaborn.heatmap', 'sns.heatmap', (['cm_df'], {'annot': '(True)', 'fmt': '"""d"""', 'cmap': 'cmap'}), "(cm_df, annot=True, fmt='d', cmap=cmap)\n", (7640, 7679), True, 'import seaborn as sns\n'), ((7696, 7725), 'matplotlib.pyplot.title', 'plt.title', (['"""Confusion Matrix"""'], {}), "('Confusion Matrix')\n", (7705, 7725), True, 'import matplotlib.pyplot as plt\n'), ((7742, 7766), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""true label"""'], {}), "('true label')\n", (7752, 7766), True, 'import matplotlib.pyplot as plt\n'), ((7783, 7812), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""predicted label"""'], {}), "('predicted label')\n", (7793, 7812), True, 'import matplotlib.pyplot as plt\n'), ((7829, 7839), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7837, 7839), True, 'import matplotlib.pyplot as plt\n'), ((7856, 7911), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{model_output_dir}/confusion_matrix.png"""'], {}), "(f'{model_output_dir}/confusion_matrix.png')\n", (7867, 7911), True, 'import matplotlib.pyplot as plt\n'), ((8255, 8283), 'os.listdir', 'os.listdir', (['"""/opt/ml/model/"""'], {}), "('/opt/ml/model/')\n", (8265, 8283), False, 'import os\n'), ((9524, 9543), 'ast.literal_eval', 'ast.literal_eval', (['s'], {}), '(s)\n', (9540, 9543), False, 'import ast\n'), ((5198, 5219), 'os.listdir', 'os.listdir', (['args.test'], {}), '(args.test)\n', (5208, 5219), False, 'import os\n'), ((7213, 7238), 'pandas.DataFrame', 'pd.DataFrame', (['report_dict'], {}), '(report_dict)\n', (7225, 7238), True, 'import pandas as pd\n'), ((1077, 1121), 'subprocess.check_output', 'subprocess.check_output', (["['du', '-sh', path]"], {}), "(['du', '-sh', path])\n", (1100, 1121), False, 'import subprocess\n'), ((6258, 6320), 'pandas.DataFrame', 'pd.DataFrame', (['feature_importance'], {'columns': "['Importance score']"}), "(feature_importance, columns=['Importance score'])\n", (6270, 6320), True, 'import pandas as pd\n')]
|
'''
Adjusted Kapre Spectrogram and Melspectrogram classes which don't
apply range compression in the case that melgram=True
'''
from kapre.time_frequency import Spectrogram, Melspectrogram
#from __future__ import absolute_import
import numpy as np
from keras import backend as K
def amplitude_to_decibel(x, amin=1e-10, dynamic_range=80.0):
"""[K] Convert (linear) amplitude to decibel (log10(x)).
x: Keras tensor or variable.
amin: minimum amplitude. amplitude smaller than `amin` is set to this.
dynamic_range: dynamic_range in decibel
"""
log_spec = 10 * K.log(K.maximum(x, amin)) / np.log(10).astype(K.floatx())
#### These lines which to batch-wise range compression removed:-
## log_spec = log_spec - K.max(log_spec) # [-?, 0]
## log_spec = K.maximum(log_spec, -1 * dynamic_range) # [-80, 0]
return log_spec
class SpectrogramModified(Spectrogram):
def call(self, x):
output = self._spectrogram_mono(x[:, 0:1, :])
if self.is_mono is False:
for ch_idx in range(1, self.n_ch):
output = K.concatenate((output,
self._spectrogram_mono(x[:, ch_idx:ch_idx + 1, :])),
axis=self.ch_axis_idx)
if self.power_spectrogram != 2.0:
output = K.pow(K.sqrt(output), self.power_spectrogram)
if self.return_decibel_spectrogram:
output = amplitude_to_decibel(output) ## only difference from non-modified class
return output
class MelspectrogramModified(Melspectrogram):
def call(self, x):
power_spectrogram = super(Melspectrogram, self).call(x)
# now, channels_first: (batch_sample, n_ch, n_freq, n_time)
# channels_last: (batch_sample, n_freq, n_time, n_ch)
if self.image_data_format == 'channels_first':
power_spectrogram = K.permute_dimensions(power_spectrogram, [0, 1, 3, 2])
else:
power_spectrogram = K.permute_dimensions(power_spectrogram, [0, 3, 2, 1])
# now, whatever image_data_format, (batch_sample, n_ch, n_time, n_freq)
output = K.dot(power_spectrogram, self.freq2mel)
if self.image_data_format == 'channels_first':
output = K.permute_dimensions(output, [0, 1, 3, 2])
else:
output = K.permute_dimensions(output, [0, 3, 2, 1])
if self.power_melgram != 2.0:
output = K.pow(K.sqrt(output), self.power_melgram)
if self.return_decibel_melgram:
output = amplitude_to_decibel(output) ## only difference from non-modified class
return output
|
[
"keras.backend.dot",
"keras.backend.sqrt",
"numpy.log",
"keras.backend.floatx",
"keras.backend.maximum",
"keras.backend.permute_dimensions"
] |
[((2148, 2187), 'keras.backend.dot', 'K.dot', (['power_spectrogram', 'self.freq2mel'], {}), '(power_spectrogram, self.freq2mel)\n', (2153, 2187), True, 'from keras import backend as K\n'), ((636, 646), 'keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (644, 646), True, 'from keras import backend as K\n'), ((1897, 1950), 'keras.backend.permute_dimensions', 'K.permute_dimensions', (['power_spectrogram', '[0, 1, 3, 2]'], {}), '(power_spectrogram, [0, 1, 3, 2])\n', (1917, 1950), True, 'from keras import backend as K\n'), ((1997, 2050), 'keras.backend.permute_dimensions', 'K.permute_dimensions', (['power_spectrogram', '[0, 3, 2, 1]'], {}), '(power_spectrogram, [0, 3, 2, 1])\n', (2017, 2050), True, 'from keras import backend as K\n'), ((2264, 2306), 'keras.backend.permute_dimensions', 'K.permute_dimensions', (['output', '[0, 1, 3, 2]'], {}), '(output, [0, 1, 3, 2])\n', (2284, 2306), True, 'from keras import backend as K\n'), ((2342, 2384), 'keras.backend.permute_dimensions', 'K.permute_dimensions', (['output', '[0, 3, 2, 1]'], {}), '(output, [0, 3, 2, 1])\n', (2362, 2384), True, 'from keras import backend as K\n'), ((596, 614), 'keras.backend.maximum', 'K.maximum', (['x', 'amin'], {}), '(x, amin)\n', (605, 614), True, 'from keras import backend as K\n'), ((618, 628), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (624, 628), True, 'import numpy as np\n'), ((1338, 1352), 'keras.backend.sqrt', 'K.sqrt', (['output'], {}), '(output)\n', (1344, 1352), True, 'from keras import backend as K\n'), ((2450, 2464), 'keras.backend.sqrt', 'K.sqrt', (['output'], {}), '(output)\n', (2456, 2464), True, 'from keras import backend as K\n')]
|
# Generated by Django 3.1.2 on 2020-11-05 05:59
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Saved_Predictions',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('stock_ticker', models.CharField(max_length=5)),
('date_predicted', models.CharField(max_length=10)),
('img', models.TextField()),
],
),
migrations.CreateModel(
name='Stock_ID',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('stock_ticker', models.CharField(max_length=5)),
('company_name', models.CharField(max_length=64)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"django.db.models.TextField",
"django.db.migrations.swappable_dependency",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.AutoField"
] |
[((247, 304), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (278, 304), False, 'from django.db import migrations, models\n'), ((446, 539), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (462, 539), False, 'from django.db import migrations, models\n'), ((571, 601), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(5)'}), '(max_length=5)\n', (587, 601), False, 'from django.db import migrations, models\n'), ((639, 670), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)'}), '(max_length=10)\n', (655, 670), False, 'from django.db import migrations, models\n'), ((697, 715), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (713, 715), False, 'from django.db import migrations, models\n'), ((849, 942), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (865, 942), False, 'from django.db import migrations, models\n'), ((974, 1004), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(5)'}), '(max_length=5)\n', (990, 1004), False, 'from django.db import migrations, models\n'), ((1040, 1071), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)'}), '(max_length=64)\n', (1056, 1071), False, 'from django.db import migrations, models\n'), ((1099, 1195), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL)\n', (1116, 1195), False, 'from django.db import migrations, models\n')]
|
# -*- encoding: utf-8 -*-
#
# Copyright (c) 2016 Intel
#
# Authors: <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Script for the sync tool."""
import sys
from oslo_log import log
from watcher.common import service
from watcher import conf
from watcher.decision_engine import sync
LOG = log.getLogger(__name__)
CONF = conf.CONF
def main():
LOG.info('Watcher sync started.')
service.prepare_service(sys.argv, CONF)
syncer = sync.Syncer()
syncer.sync()
LOG.info('Watcher sync finished.')
|
[
"watcher.common.service.prepare_service",
"oslo_log.log.getLogger",
"watcher.decision_engine.sync.Syncer"
] |
[((807, 830), 'oslo_log.log.getLogger', 'log.getLogger', (['__name__'], {}), '(__name__)\n', (820, 830), False, 'from oslo_log import log\n'), ((905, 944), 'watcher.common.service.prepare_service', 'service.prepare_service', (['sys.argv', 'CONF'], {}), '(sys.argv, CONF)\n', (928, 944), False, 'from watcher.common import service\n'), ((958, 971), 'watcher.decision_engine.sync.Syncer', 'sync.Syncer', ([], {}), '()\n', (969, 971), False, 'from watcher.decision_engine import sync\n')]
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
LightGBM/Python inferencing script
"""
import os
import sys
import argparse
from lightgbm import Booster, Dataset
from subprocess import PIPE
from subprocess import run as subprocess_run
from subprocess import TimeoutExpired
# let's add the right PYTHONPATH for common module
COMMON_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
if COMMON_ROOT not in sys.path:
print(f"Adding {COMMON_ROOT} to PYTHONPATH")
sys.path.append(str(COMMON_ROOT))
# before doing local import
from common.metrics import LogTimeBlock
from common.io import input_file_path
def get_arg_parser(parser=None):
"""Adds component/module arguments to a given argument parser.
Args:
parser (argparse.ArgumentParser): an argument parser instance
Returns:
ArgumentParser: the argument parser instance
Notes:
if parser is None, creates a new parser instance
"""
# add arguments that are specific to the module
if parser is None:
parser = argparse.ArgumentParser(__doc__)
group_i = parser.add_argument_group("Input Data")
group_i.add_argument("--lightgbm_exec",
required=True, type=str, help="Path to lightgbm.exe (file path)")
group_i.add_argument("--data",
required=True, type=input_file_path, help="Inferencing data location (file path)")
group_i.add_argument("--model",
required=False, type=input_file_path, help="Exported model location")
group_i.add_argument("--output",
required=False, default=None, type=str, help="Inferencing output location (file path)")
return parser
def run(args, other_args=[]):
"""Run script with arguments (the core of the component)
Args:
args (argparse.namespace): command line arguments provided to script
unknown_args (list[str]): list of arguments not known
"""
# create sub dir and output file
if args.output:
os.makedirs(args.output, exist_ok=True)
args.output = os.path.join(args.output, "predictions.txt")
if not os.path.isfile(args.lightgbm_exec):
raise Exception(f"Could not find lightgbm exec under path {args.lightgbm_exec}")
lightgbm_cli_command = [
args.lightgbm_exec,
"task=prediction",
f"data={args.data}",
f"input_model={args.model}",
"verbosity=2"
]
if args.output:
lightgbm_cli_command.append(f"output_result={args.output}")
metric_tags = {'framework':'lightgbm_cli','task':'score'}
print(f"Running .predict()")
with LogTimeBlock("inferencing", methods=['print'], tags=metric_tags):
lightgbm_cli_call = subprocess_run(
" ".join(lightgbm_cli_command),
stdout=PIPE,
stderr=PIPE,
universal_newlines=True,
check=False, # will not raise an exception if subprocess fails (so we capture with .returncode)
timeout=None
)
print(f"LightGBM stdout: {lightgbm_cli_call.stdout}")
print(f"LightGBM stderr: {lightgbm_cli_call.stderr}")
print(f"LightGBM return code: {lightgbm_cli_call.returncode}")
def main(cli_args=None):
""" Component main function, parses arguments and executes run() function.
Args:
cli_args (List[str], optional): list of args to feed script, useful for debugging. Defaults to None.
"""
# construct arg parser
parser = get_arg_parser()
args, unknown_args = parser.parse_known_args(cli_args)
# run the actual thing
run(args, unknown_args)
if __name__ == "__main__":
main()
|
[
"os.makedirs",
"argparse.ArgumentParser",
"os.path.dirname",
"common.metrics.LogTimeBlock",
"os.path.isfile",
"os.path.join"
] |
[((398, 423), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (413, 423), False, 'import os\n'), ((1083, 1115), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['__doc__'], {}), '(__doc__)\n', (1106, 1115), False, 'import argparse\n'), ((2001, 2040), 'os.makedirs', 'os.makedirs', (['args.output'], {'exist_ok': '(True)'}), '(args.output, exist_ok=True)\n', (2012, 2040), False, 'import os\n'), ((2063, 2107), 'os.path.join', 'os.path.join', (['args.output', '"""predictions.txt"""'], {}), "(args.output, 'predictions.txt')\n", (2075, 2107), False, 'import os\n'), ((2120, 2154), 'os.path.isfile', 'os.path.isfile', (['args.lightgbm_exec'], {}), '(args.lightgbm_exec)\n', (2134, 2154), False, 'import os\n'), ((2618, 2682), 'common.metrics.LogTimeBlock', 'LogTimeBlock', (['"""inferencing"""'], {'methods': "['print']", 'tags': 'metric_tags'}), "('inferencing', methods=['print'], tags=metric_tags)\n", (2630, 2682), False, 'from common.metrics import LogTimeBlock\n')]
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 24 18:01:21 2017
@author: johnkenny
"""
from django.conf.urls import url
from django.conf import settings
from django.conf.urls.static import static
from . import views
#from rest_framework import routers
urlpatterns = [
url(r'^hello', views.hello_world, name="hello_world"),
url(r'^getData', views.getData, name="getData"),
# Examples:
# url(r'^$', 'marine.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', views.index, name='index'),
url(r'^team/', views.create_UpdateDB, name='create_UpdateDB'),
url(r'^squad/', views.squad, name='squad'),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
[
"django.conf.urls.static.static",
"django.conf.urls.url"
] |
[((701, 764), 'django.conf.urls.static.static', 'static', (['settings.STATIC_URL'], {'document_root': 'settings.STATIC_ROOT'}), '(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n', (707, 764), False, 'from django.conf.urls.static import static\n'), ((306, 358), 'django.conf.urls.url', 'url', (['"""^hello"""', 'views.hello_world'], {'name': '"""hello_world"""'}), "('^hello', views.hello_world, name='hello_world')\n", (309, 358), False, 'from django.conf.urls import url\n'), ((369, 415), 'django.conf.urls.url', 'url', (['"""^getData"""', 'views.getData'], {'name': '"""getData"""'}), "('^getData', views.getData, name='getData')\n", (372, 415), False, 'from django.conf.urls import url\n'), ((543, 579), 'django.conf.urls.url', 'url', (['"""^$"""', 'views.index'], {'name': '"""index"""'}), "('^$', views.index, name='index')\n", (546, 579), False, 'from django.conf.urls import url\n'), ((586, 646), 'django.conf.urls.url', 'url', (['"""^team/"""', 'views.create_UpdateDB'], {'name': '"""create_UpdateDB"""'}), "('^team/', views.create_UpdateDB, name='create_UpdateDB')\n", (589, 646), False, 'from django.conf.urls import url\n'), ((653, 694), 'django.conf.urls.url', 'url', (['"""^squad/"""', 'views.squad'], {'name': '"""squad"""'}), "('^squad/', views.squad, name='squad')\n", (656, 694), False, 'from django.conf.urls import url\n')]
|
import numpy
import joblib
import json
from azureml.core.model import Model
# from inference_schema.schema_decorators import input_schema, output_schema
def init():
# load the model from file into a global object
global model
# we assume that we have just one model
# AZUREML_MODEL_DIR is an environment variable created during deployment.
# It is the path to the model folder
# (./azureml-models/$MODEL_NAME/$VERSION)
model_path = Model.get_model_path(
model_name="driver_training_model.pkl")
model = joblib.load(model_path)
# input_sample = numpy.array([
# [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0],
# [10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0]])
# output_sample = numpy.array([
# 5021.509689995557,
# 3693.645386402646])
# Inference_schema generates a schema for your web service
# It then creates an OpenAPI (Swagger) specification for the web service
# at http://<scoring_base_url>/swagger.json
# @input_schema('data', NumpyParameterType(input_sample))
# @output_schema(NumpyParameterType(output_sample))
def run(raw_data, request_headers):
data = json.loads(raw_data)["data"]
data = numpy.array(data) #Ensuring that we're deserialising the data in str format
result = model.predict(data)
# Demonstrate how we can log custom data into the Application Insights
# traces collection.
# The 'X-Ms-Request-id' value is generated internally and can be used to
# correlate a log entry with the Application Insights requests collection.
# The HTTP 'traceparent' header may be set by the caller to implement
# distributed tracing (per the W3C Trace Context proposed specification)
# and can be used to correlate the request to external systems.
print(
(
'{{"RequestId":"{0}", '
'"TraceParent":"{1}", '
'"NumberOfPredictions":{2}}}'
).format(
request_headers.get("X-Ms-Request-Id", ""),
request_headers.get("Traceparent", ""),
len(result),
)
)
return {"result": result.tolist()}
if __name__ == "__main__":
# Test scoring
init()
test_row = '{"data": [[0,1,8,1,0,0,1,0,0,0,0,0,0,0,12,1,0,0,0.5,0.3,0.610327781,7,1,-1,0,-1,1,1,1,2,1,65,1,0.316227766,0.669556409,0.352136337,3.464101615,0.1,0.8,0.6,1,1,6,3,6,2,9,1,1,1,12,0,1,1,0,0,1],[4,2,5,1,0,0,0,0,1,0,0,0,0,0,5,1,0,0,0.9,0.5,0.771362431,4,1,-1,0,0,11,1,1,0,1,103,1,0.316227766,0.60632002,0.358329457,2.828427125,0.4,0.5,0.4,3,3,8,4,10,2,7,2,0,3,10,0,0,1,1,0,1]]}'
prediction = run(test_row, {})
print("Test result: ", prediction)
|
[
"joblib.load",
"numpy.array",
"json.loads",
"azureml.core.model.Model.get_model_path"
] |
[((464, 524), 'azureml.core.model.Model.get_model_path', 'Model.get_model_path', ([], {'model_name': '"""driver_training_model.pkl"""'}), "(model_name='driver_training_model.pkl')\n", (484, 524), False, 'from azureml.core.model import Model\n'), ((546, 569), 'joblib.load', 'joblib.load', (['model_path'], {}), '(model_path)\n', (557, 569), False, 'import joblib\n'), ((1186, 1203), 'numpy.array', 'numpy.array', (['data'], {}), '(data)\n', (1197, 1203), False, 'import numpy\n'), ((1146, 1166), 'json.loads', 'json.loads', (['raw_data'], {}), '(raw_data)\n', (1156, 1166), False, 'import json\n')]
|
from core.db.decorator import query
@query.delete("delete from parse_data_details")
def clear_all():
pass
@query.insert("insert into parse_data_details(pid,binding_key,routing_keys,status) values({pid},{binding_key},"
"{routing_keys},{status})")
def insert_one(pid, binding_key, routing_keys, status):
pass
@query.delete("delete from parse_data_details where pid={pid}")
def delete_by_pid(pid):
pass
|
[
"core.db.decorator.query.insert",
"core.db.decorator.query.delete"
] |
[((39, 85), 'core.db.decorator.query.delete', 'query.delete', (['"""delete from parse_data_details"""'], {}), "('delete from parse_data_details')\n", (51, 85), False, 'from core.db.decorator import query\n'), ((115, 260), 'core.db.decorator.query.insert', 'query.insert', (['"""insert into parse_data_details(pid,binding_key,routing_keys,status) values({pid},{binding_key},{routing_keys},{status})"""'], {}), "(\n 'insert into parse_data_details(pid,binding_key,routing_keys,status) values({pid},{binding_key},{routing_keys},{status})'\n )\n", (127, 260), False, 'from core.db.decorator import query\n'), ((335, 397), 'core.db.decorator.query.delete', 'query.delete', (['"""delete from parse_data_details where pid={pid}"""'], {}), "('delete from parse_data_details where pid={pid}')\n", (347, 397), False, 'from core.db.decorator import query\n')]
|
import subprocess
import yaml
import os
import sys
import shutil
import datetime
from attrdict import AttrDict
import itertools
import atexit
from .utils import save_yaml, load_yaml
import argparse
import kfp
from ray.util.multiprocessing import Pool
import ray.util.multiprocessing
ALL_CONFIGS = []
LOG_DIR = ""
FAILED_CONFIGS = []
SUCCESSFUL_CONFIGS = []
def run_experiment(
yaml_path,
timeout=240_000,
copy_script=False,
slurm=False,
n_gpus=1,
cuda_id=0,
kubeflow=False,
):
"""
Run either on a list of configurations or on a dict of possible parameters.
"""
global ALL_CONFIGS
global LOG_DIR
global FAILED_CONFIGS
global SUCCESSFUL_CONFIGS
# Parse Yaml to dict
configs = load_yaml(yaml_path)
print(configs)
if isinstance(configs, list):
ALL_CONFIGS = configs
if isinstance(configs, dict):
for (key, value) in configs.items():
if isinstance(value, list):
configs[key] = value
else:
configs[key] = [value]
# One script and log directory
assert len(configs["log_dir"]) == 1
assert len(configs["python_path"]) == 1
assert len(configs["dataset"]) == 1
# Compute the nested cartesian product
ALL_CONFIGS = [
dict(zip(configs, x)) for x in itertools.product(*configs.values())
]
experiment_uid = (
os.path.basename(yaml_path)[0 : -len(".yaml")]
+ "-"
+ datetime.datetime.now().strftime("%m%d-%H%M%S")
)
config = AttrDict(ALL_CONFIGS[0])
LOG_DIR = os.path.join(
config.log_dir,
config.dataset,
os.path.basename(config.python_path)[0 : -len(".py")],
experiment_uid,
)
if not os.path.exists(LOG_DIR):
os.makedirs(LOG_DIR)
# Save a copy of the configs and the script
shutil.copy(
yaml_path,
os.path.join(LOG_DIR, os.path.basename(yaml_path))[0 : -len(".yaml")]
+ ".config",
)
python_path_copy = os.path.join(LOG_DIR, os.path.basename(config.python_path))
shutil.copy(config.python_path, python_path_copy)
# When the script is calling local modules we can't really run on the copy
if copy_script:
python_path = python_path_copy
else:
python_path = config.python_path
def run_config(i, config, cuda_id=cuda_id):
print(
f"\n\n{datetime.datetime.now()}\nRunning configuration {i+1}/{len(ALL_CONFIGS)}.\n {config}\n Cuda id: {cuda_id}"
)
# config = AttrDict(config)
# Load the model and prepare the logs
# python_path = config.python_path
log_path = os.path.join(
LOG_DIR,
str(i) + ".yaml",
)
# Generate temporary Abseil flagfile
flagfile_path = os.path.join(LOG_DIR, "flags.txt")
with open(flagfile_path, "w") as f:
for (flag, value) in config.items():
if flag == "device" and cuda_id > 0:
f.write(f"--device=cuda:{cuda_id}\n")
else:
f.write(f"--{flag}={value}\n")
f.write(f"--log_path={log_path}\n")
# No need to define these flags inside the model
f.write(f"--undefok=dataset,python_path\n")
# Run the model on the given configuration
shell_path = log_path[0 : -len(".yaml")] + ".log"
with open(shell_path, "w") as f:
if kubeflow:
pipeline_path = config["pipeline_path"]
config.pop("log_dir", None)
config.pop("pipeline_path", None)
config["gcs_prefix"] = os.path.join(LOG_DIR, f"{i}.yaml")
# Submit a pipeline run
result = kfp.Client().create_run_from_pipeline_package(
pipeline_path,
run_name=str(experiment_uid) + f" {i}",
experiment_name="",
arguments=config,
)
print("Kubeflow run id:")
print(result)
else:
try:
command = ["python", python_path, "--flagfile", flagfile_path]
if slurm:
command = ["srun"] + command
result = subprocess.run(
command,
stdout=f,
stderr=f,
text=True,
timeout=timeout,
check=True,
)
print(result)
SUCCESSFUL_CONFIGS.append(config)
except Exception as e:
print(f"This configuration failed. \n {e}")
FAILED_CONFIGS.append(config)
if n_gpus <= 1:
for i, config in enumerate(ALL_CONFIGS):
run_config(i, config)
else:
with Pool(processes=n_gpus) as pool:
pids_list = pool.map(lambda i: (i, os.getpid()), range(n_gpus))
pids = {}
for (i, pid) in pids_list:
pids[pid] = i
print(pids)
def run_config_process(args):
i = args[0]
config = args[1]
cuda_id = pids[os.getpid()]
run_config(i, config, cuda_id)
pool.map(run_config_process, list(enumerate(ALL_CONFIGS)))
def is_yaml(filename):
if filename[-len(".yaml") :] == ".yaml" or filename[-len(".yml") :] == ".yml":
return True
return False
def main(configs, slurm=False, n_gpus=1, cuda_id=0):
def save_configs():
print(f"Saving configurations to {LOG_DIR} before exit.")
save_yaml(os.path.join(LOG_DIR, "failed_configs.config"), FAILED_CONFIGS)
save_yaml(
os.path.join(LOG_DIR, "successful_configs.config"), SUCCESSFUL_CONFIGS
)
save_yaml(os.path.join(LOG_DIR, "all_configs.config"), ALL_CONFIGS)
atexit.register(save_configs)
yaml_paths = []
if os.path.isdir(configs):
for config_file in os.listdir(configs):
if is_yaml(config_file):
yaml_paths.append(os.path.join(configs, config_file))
elif is_yaml(configs):
yaml_paths.append(configs)
print(f"Running rex on the following yaml files:\n {yaml_paths}")
for yaml_path in yaml_paths:
run_experiment(
os.path.realpath(yaml_path), slurm=slurm, n_gpus=n_gpus, cuda_id=cuda_id
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"configs",
type=str,
help="Path to a yaml config file or to a directory of config files",
)
parser.add_argument("--slurm", dest="slurm", action="store_true")
parser.add_argument(
"--n_gpus",
type=int,
default=1,
)
parser.add_argument(
"--cuda_id",
type=int,
default=0,
)
args = parser.parse_args()
main(args.configs, args.slurm, args.n_gpus, args.cuda_id)
|
[
"atexit.register",
"subprocess.run",
"os.getpid",
"argparse.ArgumentParser",
"os.makedirs",
"os.path.basename",
"os.path.isdir",
"os.path.realpath",
"os.path.exists",
"datetime.datetime.now",
"ray.util.multiprocessing.Pool",
"kfp.Client",
"attrdict.AttrDict",
"os.path.join",
"os.listdir",
"shutil.copy"
] |
[((1571, 1595), 'attrdict.AttrDict', 'AttrDict', (['ALL_CONFIGS[0]'], {}), '(ALL_CONFIGS[0])\n', (1579, 1595), False, 'from attrdict import AttrDict\n'), ((2108, 2157), 'shutil.copy', 'shutil.copy', (['config.python_path', 'python_path_copy'], {}), '(config.python_path, python_path_copy)\n', (2119, 2157), False, 'import shutil\n'), ((6003, 6032), 'atexit.register', 'atexit.register', (['save_configs'], {}), '(save_configs)\n', (6018, 6032), False, 'import atexit\n'), ((6060, 6082), 'os.path.isdir', 'os.path.isdir', (['configs'], {}), '(configs)\n', (6073, 6082), False, 'import os\n'), ((6565, 6590), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6588, 6590), False, 'import argparse\n'), ((1776, 1799), 'os.path.exists', 'os.path.exists', (['LOG_DIR'], {}), '(LOG_DIR)\n', (1790, 1799), False, 'import os\n'), ((1809, 1829), 'os.makedirs', 'os.makedirs', (['LOG_DIR'], {}), '(LOG_DIR)\n', (1820, 1829), False, 'import os\n'), ((2066, 2102), 'os.path.basename', 'os.path.basename', (['config.python_path'], {}), '(config.python_path)\n', (2082, 2102), False, 'import os\n'), ((2839, 2873), 'os.path.join', 'os.path.join', (['LOG_DIR', '"""flags.txt"""'], {}), "(LOG_DIR, 'flags.txt')\n", (2851, 2873), False, 'import os\n'), ((6111, 6130), 'os.listdir', 'os.listdir', (['configs'], {}), '(configs)\n', (6121, 6130), False, 'import os\n'), ((1680, 1716), 'os.path.basename', 'os.path.basename', (['config.python_path'], {}), '(config.python_path)\n', (1696, 1716), False, 'import os\n'), ((4948, 4970), 'ray.util.multiprocessing.Pool', 'Pool', ([], {'processes': 'n_gpus'}), '(processes=n_gpus)\n', (4952, 4970), False, 'from ray.util.multiprocessing import Pool\n'), ((5746, 5792), 'os.path.join', 'os.path.join', (['LOG_DIR', '"""failed_configs.config"""'], {}), "(LOG_DIR, 'failed_configs.config')\n", (5758, 5792), False, 'import os\n'), ((5841, 5891), 'os.path.join', 'os.path.join', (['LOG_DIR', '"""successful_configs.config"""'], {}), "(LOG_DIR, 'successful_configs.config')\n", (5853, 5891), False, 'import os\n'), ((5940, 5983), 'os.path.join', 'os.path.join', (['LOG_DIR', '"""all_configs.config"""'], {}), "(LOG_DIR, 'all_configs.config')\n", (5952, 5983), False, 'import os\n'), ((6440, 6467), 'os.path.realpath', 'os.path.realpath', (['yaml_path'], {}), '(yaml_path)\n', (6456, 6467), False, 'import os\n'), ((1432, 1459), 'os.path.basename', 'os.path.basename', (['yaml_path'], {}), '(yaml_path)\n', (1448, 1459), False, 'import os\n'), ((1503, 1526), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1524, 1526), False, 'import datetime\n'), ((3681, 3715), 'os.path.join', 'os.path.join', (['LOG_DIR', 'f"""{i}.yaml"""'], {}), "(LOG_DIR, f'{i}.yaml')\n", (3693, 3715), False, 'import os\n'), ((1945, 1972), 'os.path.basename', 'os.path.basename', (['yaml_path'], {}), '(yaml_path)\n', (1961, 1972), False, 'import os\n'), ((2431, 2454), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2452, 2454), False, 'import datetime\n'), ((4327, 4414), 'subprocess.run', 'subprocess.run', (['command'], {'stdout': 'f', 'stderr': 'f', 'text': '(True)', 'timeout': 'timeout', 'check': '(True)'}), '(command, stdout=f, stderr=f, text=True, timeout=timeout,\n check=True)\n', (4341, 4414), False, 'import subprocess\n'), ((5306, 5317), 'os.getpid', 'os.getpid', ([], {}), '()\n', (5315, 5317), False, 'import os\n'), ((6203, 6237), 'os.path.join', 'os.path.join', (['configs', 'config_file'], {}), '(configs, config_file)\n', (6215, 6237), False, 'import os\n'), ((3782, 3794), 'kfp.Client', 'kfp.Client', ([], {}), '()\n', (3792, 3794), False, 'import kfp\n'), ((5027, 5038), 'os.getpid', 'os.getpid', ([], {}), '()\n', (5036, 5038), False, 'import os\n')]
|
import subprocess
from pydantic.main import BaseModel
from morpho.consumer import RestWorkConsumer
from morpho.rest.models import (
PipeService,
TransformDocumentPipeRequest,
TransformDocumentResponse,
)
from morpho.client import Client, ClientConfig
import sys
import pytest
from threading import Thread
from multiprocessing import Process
from pathlib import Path
import requests as r
from morpho.server import Service
import time
# TODO: create fixture for rest server
# TODO: rename to test_rest.py
# retry for 60 seconds
MAX_RETRIES = 120
def work(document: str) -> str:
return document
def morpho_test_service(**kwargs):
Service(
name="TEST", version="0.0.1", consumers=[RestWorkConsumer], worker=work
).run(**kwargs)
# services for pipe
def work1(document: str):
print("work1 called")
return document + ",TEST1"
def work2(document: str):
print("work2 called")
return document + ",TEST2"
def work3(document: str):
print("work3 called")
return document + ",TEST3"
def morpho_test_service_1(**kwargs):
Service(name="TEST1", version="0.0.1", worker=work1).run(**kwargs)
def morpho_test_service_2(**kwargs):
Service(name="TEST2", version="0.0.1", worker=work2).run(**kwargs)
def morpho_test_service_3(**kwargs):
Service(name="TEST3", version="0.0.1", worker=work3).run(**kwargs)
import requests
@pytest.fixture(scope="module")
def eureka_server():
integration_path = Path(__file__).parent
eureka_jar_path = integration_path.joinpath("eureka-0.0.1-SNAPSHOT.jar")
if not eureka_jar_path.exists():
raise FileNotFoundError(f"{eureka_jar_path} not found!")
eureka_process = subprocess.Popen(
["java", "-jar", "{}/eureka-0.0.1-SNAPSHOT.jar".format(integration_path)]
)
# wait for eureka service to be ready
for _ in range(MAX_RETRIES):
try:
result = requests.get("http://localhost:8761/actuator/health")
break
except requests.exceptions.ConnectionError:
pass
time.sleep(0.5)
if not result.json()["status"] == "UP":
eureka_process.terminate()
return
# launch services
sys.argv = [sys.argv[0], "--register"]
services = [
morpho_test_service_1,
morpho_test_service_2,
morpho_test_service_3,
]
service_ports = [50001 + index for index in range(3)]
service_processes = [
Process(
target=run, kwargs={"port_to_listen": port, "register": True}, daemon=True,
)
for port, run in zip(service_ports, services)
]
for service in service_processes:
service.start()
services_ready = set()
for _ in range(MAX_RETRIES):
for service_port in service_ports:
if service_port in services_ready:
continue
try:
result_service = requests.get(f"http://localhost:{service_port}/health")
except requests.exceptions.ConnectionError:
continue
except requests.exceptions.RequestException as exception:
SystemExit(exception)
if (
result_service.status_code == 200
and result_service.json()["status"] == "UP"
):
services_ready.add(service_port)
if len(services_ready) == 3:
break
time.sleep(0.5)
yield
print("Module Session Scope Closed.")
for process in service_processes:
process.terminate()
process.join()
print(
"process: "
+ str(process.ident)
+ " gracefully stopped with: "
+ str(process.exitcode)
)
eureka_process.terminate()
@pytest.fixture(scope="module")
def rest_server():
# TODO: use a client to interact with the server to use more than one component?
# remove all other program arguments and add the rest protocol
# sys.argv = [sys.argv[0], "--protocols=rest"]
sys.argv = [sys.argv[0]]
# app = QDS_TEST
service_process = Process(
target=morpho_test_service, kwargs={"port_to_listen": 50000}, daemon=True
)
# app_thread = Thread(target=morpho_test_service.run, daemon=True)
service_process.start()
result = None
for _ in range(MAX_RETRIES):
try:
result = requests.get("http://localhost:50000/health")
break
except requests.exceptions.ConnectionError:
pass
time.sleep(0.5)
if (
not result is None
and result.status_code == 200
and result.json()["status"] == "UP"
):
yield
else:
raise Exception("Something went wrong while starting the service process!")
service_process.terminate()
def test_rest_transform(rest_server):
result = r.post(
"http://1192.168.127.12:50000/v1/document/transform",
json={"document": "Hello World!", "service_name": "QDS.TEST"},
)
assert result.status_code == 200
assert result.text
transform_document_response = TransformDocumentResponse(**result.json())
assert transform_document_response.document == "Hello World!"
assert transform_document_response.error == []
def test_rest_list(rest_server):
result = r.get("http://127.0.0.1:50000/v1/service/list")
assert result.status_code == 200
assert result.text
document_response = result.json()
assert document_response["services"] == [{"name": "TEST", "options": {}}]
def test_rest_transform_pipe(eureka_server):
print("start pipe test")
config = ClientConfig("http://127.0.0.1:8761/eureka")
client = Client(config)
response = client.transform_document_pipe(
TransformDocumentPipeRequest(
document="Hello World",
services=[
PipeService(name="TEST1"),
PipeService(name="TEST2"),
PipeService(name="TEST3"),
],
)
)
assert response.document == "Hello World,TEST1,TEST2,TEST3"
assert response.last_transformer == "TEST3"
|
[
"morpho.rest.models.PipeService",
"morpho.client.ClientConfig",
"pytest.fixture",
"time.sleep",
"morpho.client.Client",
"pathlib.Path",
"requests.get",
"requests.post",
"multiprocessing.Process",
"morpho.server.Service"
] |
[((1392, 1422), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (1406, 1422), False, 'import pytest\n'), ((3744, 3774), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (3758, 3774), False, 'import pytest\n'), ((4070, 4156), 'multiprocessing.Process', 'Process', ([], {'target': 'morpho_test_service', 'kwargs': "{'port_to_listen': 50000}", 'daemon': '(True)'}), "(target=morpho_test_service, kwargs={'port_to_listen': 50000},\n daemon=True)\n", (4077, 4156), False, 'from multiprocessing import Process\n'), ((4827, 4955), 'requests.post', 'r.post', (['"""http://1192.168.127.12:50000/v1/document/transform"""'], {'json': "{'document': 'Hello World!', 'service_name': 'QDS.TEST'}"}), "('http://1192.168.127.12:50000/v1/document/transform', json={\n 'document': 'Hello World!', 'service_name': 'QDS.TEST'})\n", (4833, 4955), True, 'import requests as r\n'), ((5277, 5324), 'requests.get', 'r.get', (['"""http://127.0.0.1:50000/v1/service/list"""'], {}), "('http://127.0.0.1:50000/v1/service/list')\n", (5282, 5324), True, 'import requests as r\n'), ((5590, 5634), 'morpho.client.ClientConfig', 'ClientConfig', (['"""http://127.0.0.1:8761/eureka"""'], {}), "('http://127.0.0.1:8761/eureka')\n", (5602, 5634), False, 'from morpho.client import Client, ClientConfig\n'), ((5648, 5662), 'morpho.client.Client', 'Client', (['config'], {}), '(config)\n', (5654, 5662), False, 'from morpho.client import Client, ClientConfig\n'), ((1467, 1481), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1471, 1481), False, 'from pathlib import Path\n'), ((2053, 2068), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (2063, 2068), False, 'import time\n'), ((2438, 2525), 'multiprocessing.Process', 'Process', ([], {'target': 'run', 'kwargs': "{'port_to_listen': port, 'register': True}", 'daemon': '(True)'}), "(target=run, kwargs={'port_to_listen': port, 'register': True},\n daemon=True)\n", (2445, 2525), False, 'from multiprocessing import Process\n'), ((3392, 3407), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (3402, 3407), False, 'import time\n'), ((4493, 4508), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (4503, 4508), False, 'import time\n'), ((655, 740), 'morpho.server.Service', 'Service', ([], {'name': '"""TEST"""', 'version': '"""0.0.1"""', 'consumers': '[RestWorkConsumer]', 'worker': 'work'}), "(name='TEST', version='0.0.1', consumers=[RestWorkConsumer], worker=work\n )\n", (662, 740), False, 'from morpho.server import Service\n'), ((1084, 1136), 'morpho.server.Service', 'Service', ([], {'name': '"""TEST1"""', 'version': '"""0.0.1"""', 'worker': 'work1'}), "(name='TEST1', version='0.0.1', worker=work1)\n", (1091, 1136), False, 'from morpho.server import Service\n'), ((1194, 1246), 'morpho.server.Service', 'Service', ([], {'name': '"""TEST2"""', 'version': '"""0.0.1"""', 'worker': 'work2'}), "(name='TEST2', version='0.0.1', worker=work2)\n", (1201, 1246), False, 'from morpho.server import Service\n'), ((1304, 1356), 'morpho.server.Service', 'Service', ([], {'name': '"""TEST3"""', 'version': '"""0.0.1"""', 'worker': 'work3'}), "(name='TEST3', version='0.0.1', worker=work3)\n", (1311, 1356), False, 'from morpho.server import Service\n'), ((1904, 1957), 'requests.get', 'requests.get', (['"""http://localhost:8761/actuator/health"""'], {}), "('http://localhost:8761/actuator/health')\n", (1916, 1957), False, 'import requests\n'), ((4352, 4397), 'requests.get', 'requests.get', (['"""http://localhost:50000/health"""'], {}), "('http://localhost:50000/health')\n", (4364, 4397), False, 'import requests\n'), ((2893, 2948), 'requests.get', 'requests.get', (['f"""http://localhost:{service_port}/health"""'], {}), "(f'http://localhost:{service_port}/health')\n", (2905, 2948), False, 'import requests\n'), ((5823, 5848), 'morpho.rest.models.PipeService', 'PipeService', ([], {'name': '"""TEST1"""'}), "(name='TEST1')\n", (5834, 5848), False, 'from morpho.rest.models import PipeService, TransformDocumentPipeRequest, TransformDocumentResponse\n'), ((5866, 5891), 'morpho.rest.models.PipeService', 'PipeService', ([], {'name': '"""TEST2"""'}), "(name='TEST2')\n", (5877, 5891), False, 'from morpho.rest.models import PipeService, TransformDocumentPipeRequest, TransformDocumentResponse\n'), ((5909, 5934), 'morpho.rest.models.PipeService', 'PipeService', ([], {'name': '"""TEST3"""'}), "(name='TEST3')\n", (5920, 5934), False, 'from morpho.rest.models import PipeService, TransformDocumentPipeRequest, TransformDocumentResponse\n')]
|
from nose.tools import eq_
from mock import MagicMock, patch
from bufferapp.models.profile import Profile, PATHS
mocked_response = {
'name': 'me',
'service': 'twiter',
'id': 1
}
def test_profile_schedules_getter():
'''
Test schedules gettering from buffer api
'''
mocked_api = MagicMock()
mocked_api.get.return_value = '123'
profile = Profile(mocked_api, mocked_response)
eq_(profile.schedules, '123')
mocked_api.get.assert_called_once_with(url = PATHS['GET_SCHEDULES'] % 1)
def test_profile_schedules_setter():
'''
Test schedules setter from buffer api
'''
mocked_api = MagicMock()
mocked_api.get.return_value = '123'
profile = Profile(mocked_api, mocked_response)
profile.schedules = {
'times': ['mo']
}
mocked_api.post.assert_called_once_with(url=PATHS['UPDATE_SCHEDULES'] % 1,
data='schedules[0][times][]=mo&')
def test_profile_updates():
'''
Test updates relationship with a profile
'''
mocked_api = MagicMock()
with patch('bufferapp.models.profile.Updates') as mocked_updates:
profile = Profile(api=mocked_api, raw_response={'id': 1})
updates = profile.updates
mocked_updates.assert_called_once_with(api=mocked_api, profile_id=1)
|
[
"mock.MagicMock",
"nose.tools.eq_",
"bufferapp.models.profile.Profile",
"mock.patch"
] |
[((297, 308), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (306, 308), False, 'from mock import MagicMock, patch\n'), ((360, 396), 'bufferapp.models.profile.Profile', 'Profile', (['mocked_api', 'mocked_response'], {}), '(mocked_api, mocked_response)\n', (367, 396), False, 'from bufferapp.models.profile import Profile, PATHS\n'), ((400, 429), 'nose.tools.eq_', 'eq_', (['profile.schedules', '"""123"""'], {}), "(profile.schedules, '123')\n", (403, 429), False, 'from nose.tools import eq_\n'), ((613, 624), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (622, 624), False, 'from mock import MagicMock, patch\n'), ((676, 712), 'bufferapp.models.profile.Profile', 'Profile', (['mocked_api', 'mocked_response'], {}), '(mocked_api, mocked_response)\n', (683, 712), False, 'from bufferapp.models.profile import Profile, PATHS\n'), ((984, 995), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (993, 995), False, 'from mock import MagicMock, patch\n'), ((1004, 1045), 'mock.patch', 'patch', (['"""bufferapp.models.profile.Updates"""'], {}), "('bufferapp.models.profile.Updates')\n", (1009, 1045), False, 'from mock import MagicMock, patch\n'), ((1079, 1126), 'bufferapp.models.profile.Profile', 'Profile', ([], {'api': 'mocked_api', 'raw_response': "{'id': 1}"}), "(api=mocked_api, raw_response={'id': 1})\n", (1086, 1126), False, 'from bufferapp.models.profile import Profile, PATHS\n')]
|
from typing import List
from trescope.config import Config, AnchorType, AnchorCM
from trescope.core.Utils import toListIfNumpyOrTensorArray
class VectorField3DConfig(Config):
"""Config for :py:meth:`trescope.Output.plotVectorField3D`"""
def __init__(self):
super().__init__()
self.__sizeFactor: float = .5
self.__autoScaleByLocation = False
self.__colorScale = [[0, 0x88000000], [1, 0x88000000]]
self.__x: List[float] = []
self.__y: List[float] = []
self.__z: List[float] = []
self.__anchor: str = str(AnchorCM)
def sizeFactor(self, sizeFactor: float):
"""
Specify size factor .
:param sizeFactor: size factor , default .5
:return: self , for chain call
"""
self.__sizeFactor = sizeFactor
return self
def anchor(self, anchor: AnchorType):
"""
Specify anchor type .
:param anchor: anchor
:return: self , for chain call
"""
self.__anchor = str(anchor)
return self
def autoScaleByLocation(self, autoScale: bool):
"""
Specify auto scale or not .
:param autoScale: auto scale , default `False`
:return: self , for chain call
"""
self.__autoScaleByLocation = autoScale
return self
def locations(self, x: List[float], y: List[float], z: List[float]):
"""
Specify locations .
:param x: x
:param y: y
:param z: z
:return: self , for chain call
"""
self.__x, self.__y, self.__z = x, y, z
return self
def color(self, color: int):
"""
Specify color .
:param color: color
:return: self , for chain call
"""
self.__colorScale = [[0, color], [1, color]]
return self
def toDict(self):
return {
**super().toDict(),
'sizeFactor': self.__sizeFactor,
'autoScaleByLocation': self.__autoScaleByLocation,
'colorScale': self.__colorScale,
'locationX': toListIfNumpyOrTensorArray(self.__x),
'locationY': toListIfNumpyOrTensorArray(self.__y),
'locationZ': toListIfNumpyOrTensorArray(self.__z),
'anchor': self.__anchor
}
|
[
"trescope.core.Utils.toListIfNumpyOrTensorArray"
] |
[((2103, 2139), 'trescope.core.Utils.toListIfNumpyOrTensorArray', 'toListIfNumpyOrTensorArray', (['self.__x'], {}), '(self.__x)\n', (2129, 2139), False, 'from trescope.core.Utils import toListIfNumpyOrTensorArray\n'), ((2166, 2202), 'trescope.core.Utils.toListIfNumpyOrTensorArray', 'toListIfNumpyOrTensorArray', (['self.__y'], {}), '(self.__y)\n', (2192, 2202), False, 'from trescope.core.Utils import toListIfNumpyOrTensorArray\n'), ((2229, 2265), 'trescope.core.Utils.toListIfNumpyOrTensorArray', 'toListIfNumpyOrTensorArray', (['self.__z'], {}), '(self.__z)\n', (2255, 2265), False, 'from trescope.core.Utils import toListIfNumpyOrTensorArray\n')]
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Faq.index'
db.add_column(u'home_faq', 'index',
self.gf('django.db.models.fields.IntegerField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Faq.index'
db.delete_column(u'home_faq', 'index')
models = {
u'home.faq': {
'Meta': {'ordering': "['index', 'created']", 'object_name': 'Faq'},
'answer': ('django.db.models.fields.TextField', [], {'max_length': '3000'}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'question': ('django.db.models.fields.TextField', [], {'max_length': '200'})
}
}
complete_apps = ['home']
|
[
"south.db.db.delete_column"
] |
[((523, 561), 'south.db.db.delete_column', 'db.delete_column', (['u"""home_faq"""', '"""index"""'], {}), "(u'home_faq', 'index')\n", (539, 561), False, 'from south.db import db\n')]
|
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
from collections import defaultdict
from decimal import Decimal
from typing import List, Optional, Union
import aiohttp
from obm import exceptions
from obm.connectors import base
__all__ = [
"BitcoinCoreConnector",
]
class BitcoinCoreConnector(base.Connector):
node = "bitcoin-core"
currency = "bitcoin"
# TODO: Migrate to __slots__
METHODS = {
"rpc_list_transactions": "listtransactions",
"rpc_estimate_smart_fee": "estimatesmartfee",
"rpc_send_to_address": "sendtoaddress",
"rpc_get_new_address": "getnewaddress",
"rpc_get_block_count": "getblockcount",
"rpc_get_block": "getblock",
"rpc_get_transaction": "gettransaction",
"rpc_get_raw_transaction": "getrawtransaction",
}
DEFAULT_PORT = 18332
def __init__(
self,
rpc_host: str = "localhost",
rpc_port: int = DEFAULT_PORT,
rpc_username: Optional[str] = None,
rpc_password: Optional[str] = None,
loop: Optional[asyncio.AbstractEventLoop] = None,
session: Optional[aiohttp.ClientSession] = None,
timeout: Union[int, float] = base.DEFAULT_TIMEOUT,
):
rpc_port = rpc_port or self.DEFAULT_PORT
if rpc_username is not None and rpc_password is not None:
self.auth = aiohttp.BasicAuth(rpc_username, rpc_password)
self.headers = {
"content-type": "application/json",
}
super().__init__(rpc_host, rpc_port, loop, session, timeout)
async def wrapper(self, *args, method: str = None) -> Union[dict, list]:
assert method is not None
response = await self.call(payload={"method": method, "params": args,})
return await self.validate(response)
# BitcoinCore specific interface
@staticmethod
def format_transaction(tx, latest_block_number):
def get_amount(details, category):
if category in ["send", "oneself"]:
return details.get("send", {}).get("amount")
elif category == "receive":
return details.get("receive", {}).get("amount")
def get_category(from_address, to_address):
if from_address and to_address:
return "send" if from_address != to_address else "oneself"
elif from_address:
return "send"
elif to_address:
return "receive"
category = tx.get("category")
if category is None:
# Tx is send_transaction output.
details = {detail["category"]: detail for detail in tx["details"]}
fee = details.get("send", {}).get("fee")
from_address = details.get("send", {}).get("address")
to_address = details.get("receive", {}).get("address")
category = get_category(from_address, to_address)
amount = get_amount(details, category)
else:
# Tx is list_transactions output.
if category == "oneself":
from_address = to_address = tx["address"]
# Recover original category to prevent lie in info key.
tx["category"] = "send"
else:
to_address = tx["address"]
from_address = None
amount = tx["amount"]
fee = tx.get("fee", 0)
if tx["confirmations"] == -1:
# This mean that tx out of the main chain.
# Reference: https://bitcoin.org/en/developer-reference#listtransactions
block_number = -1
elif tx["confirmations"] == 0:
# Transaction still in mempool.
block_number = None
else:
number_from_end = tx["confirmations"] - 1
block_number = latest_block_number - number_from_end
return {
"txid": tx["txid"],
"from_address": from_address,
"to_address": to_address,
"amount": abs(amount),
"fee": abs(fee) if fee is not None else None,
"block_number": block_number,
"category": category,
"timestamp": tx["time"],
"info": tx,
}
# Unified interface
@property
async def latest_block_number(self) -> int:
return await self.rpc_get_block_count()
async def create_address( # pylint: disable=unused-argument
self, password: str = ""
) -> str:
# TODO: Add args
return await self.rpc_get_new_address()
async def estimate_fee( # pylint: disable=unused-argument
self,
from_address: str = None,
to_address: str = None,
amount: str = None,
fee: Union[dict, Decimal] = None,
data: str = None,
conf_target: int = 1,
) -> Decimal:
result = await self.rpc_estimate_smart_fee(conf_target)
if errors := result.get("errors"):
raise exceptions.NodeError(errors)
return result["feerate"]
async def send_transaction( # pylint: disable=unused-argument
self,
amount: Union[Decimal, float],
to_address: str,
from_address: str = None,
fee: Union[dict, Decimal] = None,
password: str = "",
subtract_fee_from_amount: bool = False,
) -> dict:
# TODO: Validate
latest_block_number = await self.latest_block_number
txid = await self.rpc_send_to_address(
to_address, amount, "", "", subtract_fee_from_amount
)
tx = await self.rpc_get_transaction(txid)
return self.format_transaction(tx, latest_block_number)
async def fetch_recent_transactions(
self, limit: int = 10, **kwargs
) -> List[dict]:
"""Fetches most recent transactions from a blockchain.
Args:
limit: The number of transactions to return. Defaults to 10.
Returns:
Most recent transactions list.
"""
def combine_duplicates(txs):
"""Combines doubles that is transaction self-sending outcome."""
txs_by_txid = defaultdict(list)
for tx in txs:
txs_by_txid[tx["txid"]].append(tx)
duplicate_txids = [
txid
for txid, tx_or_txs in txs_by_txid.items()
if len(tx_or_txs) == 2
]
txs = [tx for tx in txs if tx["txid"] not in duplicate_txids]
for duplicate_txid in duplicate_txids:
duplicate_txs = {
tx["category"]: tx for tx in txs_by_txid[duplicate_txid]
}
assert "send" in duplicate_txs and "receive" in duplicate_txs
duplicate_txs["send"]["category"] = "oneself"
txs.append(duplicate_txs["send"])
return txs
# Double increase the transactions number for feching to prevent cases
# when transaction was sent on in-wallet address and is present in
# bitcoin core list tansaction twice with different categories.
count = limit * 2
latest_block_number = await self.latest_block_number
txs = await self.rpc_list_transactions(
kwargs.get("label", "*"),
count,
kwargs.get("skip", 0),
kwargs.get("include_watchonly", False),
)
sorted_txs = sorted(
[
self.format_transaction(tx, latest_block_number)
for tx in combine_duplicates(txs)
],
key=lambda x: x["timestamp"],
reverse=True,
)
return sorted_txs[:limit]
async def fetch_in_wallet_transaction(self, txid: str) -> dict:
"""Fetches the transaction by txid from a blockchain.
Args:
txid: Transaction ID to return.
Returns:
Dict that represent the transaction.
"""
tx = await self.rpc_get_transaction(txid)
latest_block_number = await self.latest_block_number
return self.format_transaction(tx, latest_block_number)
|
[
"obm.exceptions.NodeError",
"collections.defaultdict",
"aiohttp.BasicAuth"
] |
[((1896, 1941), 'aiohttp.BasicAuth', 'aiohttp.BasicAuth', (['rpc_username', 'rpc_password'], {}), '(rpc_username, rpc_password)\n', (1913, 1941), False, 'import aiohttp\n'), ((5481, 5509), 'obm.exceptions.NodeError', 'exceptions.NodeError', (['errors'], {}), '(errors)\n', (5501, 5509), False, 'from obm import exceptions\n'), ((6646, 6663), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (6657, 6663), False, 'from collections import defaultdict\n')]
|
#! /usr/bin/env python3
"""
Option parser and the main function
"""
from __future__ import print_function, absolute_import
import argparse
from trashtalk.generate_trashs import generate_trashs
import sys
from trashtalk.tools import print_files
__all__ = ["trashtalk"]
def parse_option(args=None):
parser = argparse.ArgumentParser(
description="Taking out your trash easily")
# CLASSIC
from trashtalk.__init__ import __version__
parser.add_argument('-v', '--version', action='version',
version='%(prog)s: ' + __version__)
parser.add_argument('--verbose', action='store_true')
# TRASH SELECTION
selection = parser.add_argument_group('trash selections')
option = parser.add_argument_group('trash options')
selection.add_argument('trash', nargs='*', default=[],
help=("name where you want use trash, "
"this is a list, you can write home or media,"
" by default your home trash was selected"))
selection.add_argument('-a', action='store_true', default=False,
help="select all trash (home + all your media)")
selection.add_argument('-u', action='store', nargs='*',
help="select user")
selection.add_argument('-am', action='store_true', default=False,
help="select all media, this can depend of user")
# TRASH OPTION
option.add_argument('-p', action='store_true', default=False,
help="print trash path")
option.add_argument('-f', '--files', action='store', nargs='*',
help="select file in trash")
option.add_argument('-l', action='store_true', default=False,
help="list file in trash")
option.add_argument('-s', action='store_true',
help=("print size"))
option.add_argument('-i', action='store_true',
help=("print info"))
option.add_argument('-cl', '--clean', action='store_true',
help="clean file, or without file all")
option.add_argument('-rm', action='store', nargs='*',
help="move file to selected trash")
option.add_argument('-re', action='store_true', default=False,
help="restore file from selected trash")
if args:
return parser.parse_known_args(args)
return parser.parse_args()
def trashtalk():
options = parse_option()
if (options.am or options.trash) and "home" not in options.trash:
home = False
else:
home = True
if options.trash:
options.trash.remove('home')
if options.a:
options.am = True
trashs , error = generate_trashs(options.u, options.trash, home, options.am)
for error in error:
print(error, file=sys.stderr)
for trash in trashs:
error = []
if not trash:
continue
if options.p or (
not options.l and not
options.s and not options.clean
and not options.rm):
if options.p:
print('\033[34;1m%s: ' % trash.name, end='')
print("%s\033[0m" % str(trash))
if options.l or options.s:
print_files(
list(trash.list_files(options.files, options.s, options.i)),
options.s + options.i * 2 + 1
)
if options.clean:
error = trash.clean(options.files)
if options.rm:
error = trash.remove(options.rm)
if options.re:
error = trash.restore(options.f)
if error:
for e in error:
print(e, file=sys.stderr)
|
[
"trashtalk.generate_trashs.generate_trashs",
"argparse.ArgumentParser"
] |
[((313, 380), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Taking out your trash easily"""'}), "(description='Taking out your trash easily')\n", (336, 380), False, 'import argparse\n'), ((2784, 2843), 'trashtalk.generate_trashs.generate_trashs', 'generate_trashs', (['options.u', 'options.trash', 'home', 'options.am'], {}), '(options.u, options.trash, home, options.am)\n', (2799, 2843), False, 'from trashtalk.generate_trashs import generate_trashs\n')]
|
import paho.mqtt.client as mqtt
from store_Sensor_data import sensor_Data_Handler
# MQTT Settings
MQTT_Broker = "192.168.127.12"
MQTT_Port = 1883
Keep_Alive_Interval = 45
MQTT_Topic = "Home/Control"
#Subscribe to all Sensors at Base Topic
def on_connect(mosq, obj, rc):
mqttc.subscribe(MQTT_Topic, 0)
#Save Data into DB Table
def on_message(mosq, obj, msg):
sensor_Data_Handler(msg.topic, msg.payload)
def on_subscribe(mosq, obj, mid, granted_qos):
pass
mqttc = mqtt.Client()
# Assign event callbacks
mqttc.on_message = on_message
mqttc.on_connect = on_connect
mqttc.on_subscribe = on_subscribe
# Connect
mqttc.connect(MQTT_Broker, int(MQTT_Port), int(Keep_Alive_Interval))
# Continue the network loop
mqttc.loop_forever()
|
[
"paho.mqtt.client.Client",
"store_Sensor_data.sensor_Data_Handler"
] |
[((485, 498), 'paho.mqtt.client.Client', 'mqtt.Client', ([], {}), '()\n', (496, 498), True, 'import paho.mqtt.client as mqtt\n'), ((374, 417), 'store_Sensor_data.sensor_Data_Handler', 'sensor_Data_Handler', (['msg.topic', 'msg.payload'], {}), '(msg.topic, msg.payload)\n', (393, 417), False, 'from store_Sensor_data import sensor_Data_Handler\n')]
|
import frontend as fe
import backend as be
import sys
#Does the transactions and saves the summary in separate files
#filename - unique name for each summary file
#actions - list of actions to perform, start with login and ending with logout
#passes transaction summary file back to main
def transactions(filename, actions):
if len(actions) == 0:
#runs program allowing for manual input
fe.main("valid_accounts_list.txt", filename)
else:
#creating a file the system can be used as input
input_file = open("actions.txt", "w")
for act in actions:
input_file.write(act+'\n')
input_file.close()
fe.automatedInput("actions.txt", filename)
#creates the new Transaction summary file from all the merged files
#passes this file to main to invoke the backend
def mergedFile(files):
merged_file = open("merged_transaction_summary_file.txt", "w")
file_number = len(files)
count = 0
for f in files:
count += 1
last = True if count == file_number else False
merged_file = fileHelper(f, merged_file, last)
merged_file.close()
#writes every line of the transaction file into the merged transaciton file
def fileHelper(f, merged, last):
if f != None:
for line in f:
line = line.strip()
if line != "EOS 0000000 000 0000000 ***":
merged.write(line+'\n')
if last == True:
merged.write("EOS 0000000 000 0000000 ***")
return merged
#this method is to be used when using the dailyScript by itself to simulate
#one day of operation.
def oneDay():
trans1 = ["login", "agent", "createAccount", "1234567", "Emma", "logout"]
#trans2 will output "Account doesn't exist"
trans2 = ["login", "machine", "withdraw", "1234567", "logout"]
trans3 = ["login", "agent", "createAccount", "7654321", "Kathryn", "logout", "0"]
fileName1 = "transaction_file_1.txt"
fileName2 = "transaction_file_2.txt"
fileName3 = "transaction_file_3.txt"
transactions(fileName1, trans1)
transactions(fileName2, trans2)
transactions(fileName3, trans3)
f1 = open(fileName1, "r")
f2 = open(fileName2, "r")
f3 = open(fileName3, "r")
files = [f1, f2, f3]
mergedFile(files)
for f in files:
f.close()
be.main("master_accounts_file.txt", "merged_transaction_summary_file.txt")
#calls the transactions and passes a different file name every time
#calls backend for merged file
def main(trans1, trans2, trans3):
fileName1 = "transaction_file_1.txt"
fileName2 = "transaction_file_2.txt"
fileName3 = "transaction_file_3.txt"
transactions(fileName1, trans1)
transactions(fileName2, trans2)
transactions(fileName3, trans3)
f1 = open(fileName1, "r")
f2 = open(fileName2, "r")
f3 = open(fileName3, "r")
files = [f1, f2, f3]
mergedFile(files)
for f in files:
f.close()
be.main("master_accounts_file.txt", "merged_transaction_summary_file.txt")
|
[
"backend.main",
"frontend.automatedInput",
"frontend.main"
] |
[((2331, 2405), 'backend.main', 'be.main', (['"""master_accounts_file.txt"""', '"""merged_transaction_summary_file.txt"""'], {}), "('master_accounts_file.txt', 'merged_transaction_summary_file.txt')\n", (2338, 2405), True, 'import backend as be\n'), ((2956, 3030), 'backend.main', 'be.main', (['"""master_accounts_file.txt"""', '"""merged_transaction_summary_file.txt"""'], {}), "('master_accounts_file.txt', 'merged_transaction_summary_file.txt')\n", (2963, 3030), True, 'import backend as be\n'), ((409, 453), 'frontend.main', 'fe.main', (['"""valid_accounts_list.txt"""', 'filename'], {}), "('valid_accounts_list.txt', filename)\n", (416, 453), True, 'import frontend as fe\n'), ((669, 711), 'frontend.automatedInput', 'fe.automatedInput', (['"""actions.txt"""', 'filename'], {}), "('actions.txt', filename)\n", (686, 711), True, 'import frontend as fe\n')]
|
# Copyright (C) 2012 Linaro Limited
#
# Author: <NAME> <<EMAIL>>
#
# This file is part of versiontools.
#
# versiontools is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3
# as published by the Free Software Foundation
#
# versiontools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with versiontools. If not, see <http://www.gnu.org/licenses/>.
"""
versiontools.versiontools_support
=================================
A small standalone module that allows any package to use versiontools.
Typically you should copy this file verbatim into your source distribution.
Historically versiontools was depending on a exotic feature of setuptools to
work. Setuptools has so-called setup-time dependencies, that is modules that
need to be downloaded and imported/interrogated for setup.py to run
successfully. Versiontools supports this by installing a handler for the
'version' keyword of the setup() function.
This approach was always a little annoying as this setuptools feature is rather
odd and very few other packages made any use of it. In the future the standard
tools for python packaging (especially in python3 world) this feature may be
removed or have equivalent thus rendering versiontools completely broken.
Currently the biggest practical issue is the apparent inability to prevent
setuptools from downloading packages designated as setup_requires. This is
discussed in this pip issue: https://github.com/pypa/pip/issues/410
To counter this issue I've redesigned versiontools to be a little smarter. The
old mode stays as-is for compatibility. The new mode works differently, without
the need for using setup_requires in your setup() call. Instead it requires
each package that uses versiontools to ship a verbatim copy of this module and
to import it in their setup.py script. This module helps setuptools find
package version in the standard PKG-INFO file that is created for all source
distributions. Remember that you only need this mode when you don't want to add
a dependency on versiontools. This will still allow you to use versiontools (in
a limited way) in your setup.py file.
Technically this module defines an improved version of one of
distutils.dist.DistributionMetadata class and monkey-patches distutils to use
it. To retain backward compatibility the new feature is only active when a
special version string is passed to the setup() call.
"""
__version__ = (1, 0, 0, "final", 0)
import distutils.dist
import distutils.errors
class VersiontoolsEnchancedDistributionMetadata(distutils.dist.DistributionMetadata):
"""
A subclass of distutils.dist.DistributionMetadata that uses versiontools
Typically you would not instantiate this class directly. It is constructed
by distutils.dist.Distribution.__init__() method. Since there is no other
way to do it, this module monkey-patches distutils to override the original
version of DistributionMetadata
"""
# Reference to the original class. This is only required because distutils
# was created before the introduction of new-style classes to python.
__base = distutils.dist.DistributionMetadata
def get_version(self):
"""
Get distribution version.
This method is enhanced compared to original distutils implementation.
If the version string is set to a special value then instead of using
the actual value the real version is obtained by querying versiontools.
If versiontools package is not installed then the version is obtained
from the standard section of the ``PKG-INFO`` file. This file is
automatically created by any source distribution. This method is less
useful as it cannot take advantage of version control information that
is automatically loaded by versiontools. It has the advantage of not
requiring versiontools installation and that it does not depend on
``setup_requires`` feature of ``setuptools``.
"""
if (self.name is not None and self.version is not None
and self.version.startswith(":versiontools:")):
return (self.__get_live_version() or self.__get_frozen_version()
or self.__fail_to_get_any_version())
else:
return self.__base.get_version(self)
def __get_live_version(self):
"""
Get a live version string using versiontools
"""
try:
import versiontools
except ImportError:
return None
else:
return str(versiontools.Version.from_expression(self.name))
def __get_frozen_version(self):
"""
Get a fixed version string using an existing PKG-INFO file
"""
try:
return self.__base("PKG-INFO").version
except IOError:
return None
def __fail_to_get_any_version(self):
"""
Raise an informative exception
"""
raise SystemExit(
"""This package requires versiontools for development or testing.
See http://versiontools.readthedocs.org/ for more information about
what versiontools is and why it is useful.
To install versiontools now please run:
$ pip install versiontools
Note: versiontools works best when you have additional modules for
integrating with your preferred version control system. Refer to
the documentation for a full list of required modules.""")
# If DistributionMetadata is not a subclass of
# VersiontoolsEnhancedDistributionMetadata then monkey patch it. This should
# prevent a (odd) case of multiple imports of this module.
if not issubclass(
distutils.dist.DistributionMetadata,
VersiontoolsEnchancedDistributionMetadata):
distutils.dist.DistributionMetadata = VersiontoolsEnchancedDistributionMetadata
|
[
"versiontools.Version.from_expression"
] |
[((4973, 5020), 'versiontools.Version.from_expression', 'versiontools.Version.from_expression', (['self.name'], {}), '(self.name)\n', (5009, 5020), False, 'import versiontools\n')]
|
import unittest
from min_max import max, min # noqa
class Tests(unittest.TestCase):
TESTS = {
"1. Basics": [
{'input': "max(3, 2)", 'answer': 3},
{'input': "min(3, 2)", 'answer': 2},
{'input': "max([1, 2, 0, 3, 4])", 'answer': 4},
{'input': "min('hello')", 'answer': "e"},
{'input': "max(2.2, 5.6, 5.9, key=int)", 'answer': 5.6},
{
'input': "min([[1, 2], [3, 4], [9, 0]], key=lambda x: x[1])",
'answer': [9, 0],
},
],
"Extra": [
{'input': "max([0])", 'answer': 0},
{'input': "min((9,))", 'answer': 9},
{'input': "max(range(6))", 'answer': 5},
{'input': "min(abs(i) for i in range(-10, 10))", 'answer': 0},
{'input': "max(x + 5 for x in range(6))", 'answer': 10},
{'input': 'max(filter(str.isalpha,"@v$e56r5CY{]"))', 'answer': 'v'},
{'input': "min({1, 2, 3, 4, -10})", 'answer': -10},
{'input': "max(set('djsaljldsklfjzx'))", 'answer': "z"},
{'input': "min(set('djsaljldsklfjzx'))", 'answer': "a"},
{'input': "max([1, 2, 3], [5, 6], [7], [0, 0, 0, 1])", 'answer': [7]},
{
'input': "min([1, 2, 3], [5, 6], [7], [0, 0, 0, 10], key=sum)",
'answer': [1, 2, 3],
},
{'input': "max(True, False, -1, key=lambda x: not x)", 'answer': False},
{'input': "min(True, False, -1)", 'answer': -1},
],
}
def test_Basics(self):
for i in self.TESTS['1. Basics']:
assert eval(i['input']) == i['answer']
def test_Extra(self):
for i in self.TESTS['Extra']:
assert eval(i['input']) == i['answer']
if __name__ == "__main__": # pragma: no cover
unittest.main()
|
[
"unittest.main"
] |
[((1836, 1851), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1849, 1851), False, 'import unittest\n')]
|
'''modules
SUMMARY OF APIs
l2():
output(theta) --> number
gradient(theta) --> vector
linear():
output(theta, x) --> scores
gradient(x) --> vector
linear_n(num_inputs, num_outputs):
output(theta, x) --> scores
gradient(x) --> vector
negative_likelihood():
output(probs, y) --> number
gradient(probs, y) --> vector
softmax():
output(input) --> vector
gradient(output)
squared():
output(predicted, expected) --> number
gradient(predicted, expected) --> vector
'''
from module_linear import linear
from module_linear_n import linear_n
from module_negative_likelihood import negative_likelihood
from module_softmax import softmax
if False:
# use all imports, so as to avoid an error from pyflakes
linear()
linear_n()
negative_likelihood()
softmax()
|
[
"module_softmax.softmax",
"module_linear_n.linear_n",
"module_negative_likelihood.negative_likelihood",
"module_linear.linear"
] |
[((767, 775), 'module_linear.linear', 'linear', ([], {}), '()\n', (773, 775), False, 'from module_linear import linear\n'), ((780, 790), 'module_linear_n.linear_n', 'linear_n', ([], {}), '()\n', (788, 790), False, 'from module_linear_n import linear_n\n'), ((795, 816), 'module_negative_likelihood.negative_likelihood', 'negative_likelihood', ([], {}), '()\n', (814, 816), False, 'from module_negative_likelihood import negative_likelihood\n'), ((821, 830), 'module_softmax.softmax', 'softmax', ([], {}), '()\n', (828, 830), False, 'from module_softmax import softmax\n')]
|
import itertools
from pint import pi_theorem
from pint.testsuite import QuantityTestCase
class TestPiTheorem(QuantityTestCase):
FORCE_NDARRAY = False
def test_simple(self):
# simple movement
with self.capture_log() as buffer:
self.assertEqual(
pi_theorem({"V": "m/s", "T": "s", "L": "m"}),
[{"V": 1, "T": 1, "L": -1}],
)
# pendulum
self.assertEqual(
pi_theorem({"T": "s", "M": "grams", "L": "m", "g": "m/s**2"}),
[{"g": 1, "T": 2, "L": -1}],
)
self.assertEqual(len(buffer), 7)
def test_inputs(self):
V = "km/hour"
T = "ms"
L = "cm"
f1 = lambda x: x
f2 = lambda x: self.Q_(1, x)
f3 = lambda x: self.Q_(1, x).units
f4 = lambda x: self.Q_(1, x).dimensionality
fs = f1, f2, f3, f4
for fv, ft, fl in itertools.product(fs, fs, fs):
qv = fv(V)
qt = ft(T)
ql = ft(L)
self.assertEqual(
self.ureg.pi_theorem({"V": qv, "T": qt, "L": ql}),
[{"V": 1.0, "T": 1.0, "L": -1.0}],
)
|
[
"pint.pi_theorem",
"itertools.product"
] |
[((941, 970), 'itertools.product', 'itertools.product', (['fs', 'fs', 'fs'], {}), '(fs, fs, fs)\n', (958, 970), False, 'import itertools\n'), ((302, 346), 'pint.pi_theorem', 'pi_theorem', (["{'V': 'm/s', 'T': 's', 'L': 'm'}"], {}), "({'V': 'm/s', 'T': 's', 'L': 'm'})\n", (312, 346), False, 'from pint import pi_theorem\n'), ((477, 538), 'pint.pi_theorem', 'pi_theorem', (["{'T': 's', 'M': 'grams', 'L': 'm', 'g': 'm/s**2'}"], {}), "({'T': 's', 'M': 'grams', 'L': 'm', 'g': 'm/s**2'})\n", (487, 538), False, 'from pint import pi_theorem\n')]
|
#!/usr/bin/env python3
# This file provides an interface between Thunderbird and Cultured
# Code's Things, by implementing the Mozilla native Messaging
# interface and using Apple Script to tell Things what to do.
# Code is derived from
# https://github.com/mdn/webextensions-examples/blob/master/native-messaging/app/ping_pong.py
import sys
import json
import struct
import subprocess
import urllib.parse
# Python 3.x version
# Read a message from stdin and decode it.
def getMessage(debug=False):
if debug:
# just take a string from the stidin
message = sys.stdin.read()
else:
rawLength = sys.stdin.buffer.read(4)
if len(rawLength) == 0:
sys.exit(0)
messageLength = struct.unpack('@I', rawLength)[0]
message = sys.stdin.buffer.read(messageLength).decode('utf-8')
return json.loads(message)
# Encode a message for transmission,
# given its content.
def encodeMessage(messageContent):
encodedContent = json.dumps(messageContent).encode('utf-8')
encodedLength = struct.pack('@I', len(encodedContent))
return {'length': encodedLength, 'content': encodedContent}
# Send an encoded message to stdout
def sendMessage(encodedMessage):
sys.stdout.buffer.write(encodedMessage['length'])
sys.stdout.buffer.write(encodedMessage['content'])
sys.stdout.buffer.flush()
def createTask(d):
str = ""
for k in d:
if str:
str += ","
str = str + k + ":\""+d[k]+"\""
s = '''\
tell application id "com.culturedcode.ThingsMac"
show quick entry panel with properties {{ {propmap} }}
end tell
'''
s = s.format(propmap = str)
process = subprocess.Popen(["osascript"], stdin=subprocess.PIPE)
process.communicate(s.encode('utf-8'))
def run():
while True:
createTask(getMessage())
if __name__ == '__main__':
run()
|
[
"subprocess.Popen",
"sys.stdin.read",
"json.loads",
"sys.stdout.buffer.flush",
"struct.unpack",
"json.dumps",
"sys.stdin.buffer.read",
"sys.stdout.buffer.write",
"sys.exit"
] |
[((849, 868), 'json.loads', 'json.loads', (['message'], {}), '(message)\n', (859, 868), False, 'import json\n'), ((1224, 1273), 'sys.stdout.buffer.write', 'sys.stdout.buffer.write', (["encodedMessage['length']"], {}), "(encodedMessage['length'])\n", (1247, 1273), False, 'import sys\n'), ((1278, 1328), 'sys.stdout.buffer.write', 'sys.stdout.buffer.write', (["encodedMessage['content']"], {}), "(encodedMessage['content'])\n", (1301, 1328), False, 'import sys\n'), ((1333, 1358), 'sys.stdout.buffer.flush', 'sys.stdout.buffer.flush', ([], {}), '()\n', (1356, 1358), False, 'import sys\n'), ((1677, 1731), 'subprocess.Popen', 'subprocess.Popen', (["['osascript']"], {'stdin': 'subprocess.PIPE'}), "(['osascript'], stdin=subprocess.PIPE)\n", (1693, 1731), False, 'import subprocess\n'), ((580, 596), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (594, 596), False, 'import sys\n'), ((627, 651), 'sys.stdin.buffer.read', 'sys.stdin.buffer.read', (['(4)'], {}), '(4)\n', (648, 651), False, 'import sys\n'), ((696, 707), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (704, 707), False, 'import sys\n'), ((732, 762), 'struct.unpack', 'struct.unpack', (['"""@I"""', 'rawLength'], {}), "('@I', rawLength)\n", (745, 762), False, 'import struct\n'), ((984, 1010), 'json.dumps', 'json.dumps', (['messageContent'], {}), '(messageContent)\n', (994, 1010), False, 'import json\n'), ((784, 820), 'sys.stdin.buffer.read', 'sys.stdin.buffer.read', (['messageLength'], {}), '(messageLength)\n', (805, 820), False, 'import sys\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#import re
import os
import argparse
import string
import json
import json_schema_generator
from json_schema_generator import Recorder, Validator
def record(args):
if(os.path.isfile(args.json_source)):
rec = Recorder.from_file(args.json_source)
else:
rec = Recorder.from_url(args.json_source)
rec.save_json_schema(args.json_schema_file_path, indent=4)
def validate(args):
from six.moves.urllib.request import urlopen
# insert json_input_file choice, not only url
if(os.path.isfile(args.json_source)):
with open(args.json_source, 'rb') as f:
json_data_unload = f.read()
json_data = json.loads(json_data_unload)
is_load = True
else:
json_data = urlopen(args.json_source).read()
is_load = False
validator = Validator.from_path(args.json_schema_file_path)
is_valid = validator.assert_json(json_data, is_load)
if is_valid:
print (" * JSON is valid")
else:
print (" ! JSON is broken ")
print (validator.error_message)
def homologate(args):
template_file_path = os.path.join(os.path.dirname(json_schema_generator.__file__), 'test_template.py.tmpl')
json_schemas_dir = os.path.join(args.path, 'json_schemas')
json_schema_file_name = '%s.json_schema' % args.homologation_name
json_schema_file_path = os.path.join(json_schemas_dir, json_schema_file_name)
test_file_path = os.path.join(args.path, 'test_%s_json_schema.py' % args.homologation_name)
with open(template_file_path) as template_file:
tmpl = string.Template(template_file.read())
if not os.path.exists(json_schemas_dir):
os.mkdir(json_schemas_dir)
if not os.path.exists(json_schema_file_path):
rec = Recorder.from_url(args.json_source)
rec.save_json_schema(json_schema_file_path, indent=4)
rendered = tmpl.substitute(
homologation_name=args.homologation_name,
service_url=args.json_source,
json_schema_file_name=json_schema_file_name,
json_schemas_dir=json_schemas_dir
)
with open(test_file_path, 'w') as test_file:
test_file.write(rendered)
def main():
parser = argparse.ArgumentParser()
default_parser = argparse.ArgumentParser(add_help=False)
default_parser.add_argument('json_source', type=str, help='url or file')
default_parser.add_argument('--path', dest='path', default='', help='set path')
subparsers = parser.add_subparsers(help='sub-command help')
parser_record = subparsers.add_parser('record', parents=[default_parser])
parser_record.add_argument('json_schema_file_path', type=str, help='json schema file path')
parser_record.set_defaults(func=record)
parser_validate = subparsers.add_parser('validate', parents=[default_parser])
parser_validate.add_argument('json_schema_file_path', type=str, help='json schema file path')
parser_validate.set_defaults(func=validate)
parser_homologate = subparsers.add_parser('homologate', parents=[default_parser])
parser_homologate.add_argument('homologation_name', type=str, help='json schema file path')
parser_homologate.set_defaults(func=homologate)
args = parser.parse_args()
try:
args.func
except AttributeError:
import sys
print("missing 1 or more required arguments (see '%s --help')" % sys.argv[0])
exit(1)
else:
args.func(args)
if __name__ == '__main__':
main()
|
[
"json_schema_generator.Validator.from_path",
"os.mkdir",
"argparse.ArgumentParser",
"json.loads",
"os.path.dirname",
"json_schema_generator.Recorder.from_url",
"os.path.exists",
"json_schema_generator.Recorder.from_file",
"six.moves.urllib.request.urlopen",
"os.path.isfile",
"os.path.join"
] |
[((235, 267), 'os.path.isfile', 'os.path.isfile', (['args.json_source'], {}), '(args.json_source)\n', (249, 267), False, 'import os\n'), ((594, 626), 'os.path.isfile', 'os.path.isfile', (['args.json_source'], {}), '(args.json_source)\n', (608, 626), False, 'import os\n'), ((910, 957), 'json_schema_generator.Validator.from_path', 'Validator.from_path', (['args.json_schema_file_path'], {}), '(args.json_schema_file_path)\n', (929, 957), False, 'from json_schema_generator import Recorder, Validator\n'), ((1330, 1369), 'os.path.join', 'os.path.join', (['args.path', '"""json_schemas"""'], {}), "(args.path, 'json_schemas')\n", (1342, 1369), False, 'import os\n'), ((1470, 1523), 'os.path.join', 'os.path.join', (['json_schemas_dir', 'json_schema_file_name'], {}), '(json_schemas_dir, json_schema_file_name)\n', (1482, 1523), False, 'import os\n'), ((1546, 1620), 'os.path.join', 'os.path.join', (['args.path', "('test_%s_json_schema.py' % args.homologation_name)"], {}), "(args.path, 'test_%s_json_schema.py' % args.homologation_name)\n", (1558, 1620), False, 'import os\n'), ((2328, 2353), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2351, 2353), False, 'import argparse\n'), ((2378, 2417), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'add_help': '(False)'}), '(add_help=False)\n', (2401, 2417), False, 'import argparse\n'), ((285, 321), 'json_schema_generator.Recorder.from_file', 'Recorder.from_file', (['args.json_source'], {}), '(args.json_source)\n', (303, 321), False, 'from json_schema_generator import Recorder, Validator\n'), ((348, 383), 'json_schema_generator.Recorder.from_url', 'Recorder.from_url', (['args.json_source'], {}), '(args.json_source)\n', (365, 383), False, 'from json_schema_generator import Recorder, Validator\n'), ((740, 768), 'json.loads', 'json.loads', (['json_data_unload'], {}), '(json_data_unload)\n', (750, 768), False, 'import json\n'), ((1232, 1279), 'os.path.dirname', 'os.path.dirname', (['json_schema_generator.__file__'], {}), '(json_schema_generator.__file__)\n', (1247, 1279), False, 'import os\n'), ((1744, 1776), 'os.path.exists', 'os.path.exists', (['json_schemas_dir'], {}), '(json_schemas_dir)\n', (1758, 1776), False, 'import os\n'), ((1787, 1813), 'os.mkdir', 'os.mkdir', (['json_schemas_dir'], {}), '(json_schemas_dir)\n', (1795, 1813), False, 'import os\n'), ((1828, 1865), 'os.path.exists', 'os.path.exists', (['json_schema_file_path'], {}), '(json_schema_file_path)\n', (1842, 1865), False, 'import os\n'), ((1882, 1917), 'json_schema_generator.Recorder.from_url', 'Recorder.from_url', (['args.json_source'], {}), '(args.json_source)\n', (1899, 1917), False, 'from json_schema_generator import Recorder, Validator\n'), ((825, 850), 'six.moves.urllib.request.urlopen', 'urlopen', (['args.json_source'], {}), '(args.json_source)\n', (832, 850), False, 'from six.moves.urllib.request import urlopen\n')]
|
import os, os.path, discord
from discord.ext import commands
from colorama import Fore
os.system('cls' if os.name == 'nt' else 'clear')
cleardmtitle()
print(f"""{y}[{w}+{y}]{w} Enter your token""")
token = input(f"""{y}[{b}#{y}]{w} Token: """)
print(f"""\n{y}[{b}#{y}]{w} Write "!clear" in one of your DMs to delete your messages""")
global bot
bot = commands.Bot(command_prefix="!", self_bot=True)
bot.remove_command("help")
@bot.command()
async def clear(ctx, limit: int=None):
passed = 0
failed = 0
async for msg in ctx.message.channel.history(limit=limit):
if msg.author.id == bot.user.id:
try:
await msg.delete()
passed += 1
except:
failed += 1
print(f"\n{y}[{w}+{y}]{w} Removed {passed} messages with {failed} fails")
input(f"""\n{y}[{b}#{y}]{w} Press ENTER to exit""")
main()
bot.run(token, bot=False)
|
[
"os.system",
"discord.ext.commands.Bot"
] |
[((92, 140), 'os.system', 'os.system', (["('cls' if os.name == 'nt' else 'clear')"], {}), "('cls' if os.name == 'nt' else 'clear')\n", (101, 140), False, 'import os, os.path, discord\n'), ((364, 411), 'discord.ext.commands.Bot', 'commands.Bot', ([], {'command_prefix': '"""!"""', 'self_bot': '(True)'}), "(command_prefix='!', self_bot=True)\n", (376, 411), False, 'from discord.ext import commands\n')]
|
"""The Poincaré ball model."""
import tensorflow as tf
from tensorflow_riemopt.manifolds.manifold import Manifold
from tensorflow_riemopt.manifolds import utils
class Poincare(Manifold):
"""Manifold of `math:`n`-dimensional hyperbolic space as embedded in the
Poincaré ball model.
Nickel, Maximillian, and <NAME>. "Poincaré embeddings for learning
hierarchical representations." Advances in neural information processing
systems. 2017.
Ganea, Octavian, <NAME>, and <NAME>. "Poincaré neural
networks." Advances in neural information processing systems. 2018.
"""
name = "Poincaré"
ndims = 1
def __init__(self, k=1.0):
"""Instantiate the Poincaré manifold.
Args:
k: scale of the hyperbolic space, k > 0.
"""
self.k = k
super().__init__()
def __repr__(self):
return "{0} (k={1}, ndims={2}) manifold".format(
self.name, self.k, self.ndims
)
def _check_point_on_manifold(self, x, atol, rtol):
k = tf.cast(self.k, x.dtype)
sq_norm = tf.reduce_sum(x * x, axis=-1, keepdims=True)
return tf.reduce_all(sq_norm * k < tf.ones_like(sq_norm))
def _check_vector_on_tangent(self, x, u, atol, rtol):
return tf.constant(True)
def _mobius_add(self, x, y):
"""Compute the Möbius addition of :math:`x` and :math:`y` in
:math:`\mathcal{D}^{n}_{k}`
:math:`x \oplus y = \frac{(1 + 2k\langle x, y\rangle + k||y||^2)x + (1
- k||x||^2)y}{1 + 2k\langle x,y\rangle + k^2||x||^2||y||^2}`
"""
x_2 = tf.reduce_sum(tf.math.square(x), axis=-1, keepdims=True)
y_2 = tf.reduce_sum(tf.math.square(y), axis=-1, keepdims=True)
x_y = tf.reduce_sum(x * y, axis=-1, keepdims=True)
k = tf.cast(self.k, x.dtype)
return ((1 + 2 * k * x_y + k * y_2) * x + (1 - k * x_2) * y) / (
1 + 2 * k * x_y + k ** 2 * x_2 * y_2
)
def _mobius_scal_mul(self, x, r):
"""Compute the Möbius scalar multiplication of :math:`x \in
\mathcal{D}^{n}_{k} \ {0}` by :math:`r`
:math:`x \otimes r = (1/\sqrt{k})\tanh(r
\atanh(\sqrt{k}||x||))\frac{x}{||x||}`
"""
sqrt_k = tf.math.sqrt(tf.cast(self.k, x.dtype))
norm_x = tf.linalg.norm(x, axis=-1, keepdims=True)
eps = utils.get_eps(x)
tan = tf.clip_by_value(sqrt_k * norm_x, -1.0 + eps, 1.0 - eps)
return (1 / sqrt_k) * tf.math.tanh(r * tf.math.atanh(tan)) * x / norm_x
def _gyration(self, u, v, w):
"""Compute the gyration of :math:`u`, :math:`v`, :math:`w`:
:math:`\operatorname{gyr}[u, v]w =
\ominus (u \oplus_\kappa v) \oplus (u \oplus_\kappa (v \oplus_\kappa w))`
"""
min_u_v = -self._mobius_add(u, v)
v_w = self._mobius_add(v, w)
u_v_w = self._mobius_add(u, v_w)
return self._mobius_add(min_u_v, u_v_w)
def _lambda(self, x, keepdims=False):
"""Compute the conformal factor :math:`lambda_x^k`"""
k = tf.cast(self.k, x.dtype)
norm_x_2 = tf.reduce_sum(x * x, axis=-1, keepdims=keepdims)
return 2.0 / (1.0 - k * norm_x_2)
def inner(self, x, u, v, keepdims=False):
lambda_x = self._lambda(x, keepdims=keepdims)
return tf.reduce_sum(u * v, axis=-1, keepdims=keepdims) * lambda_x ** 2
def norm(self, x, u, keepdims=False):
lambda_x = self._lambda(x, keepdims=keepdims)
return tf.linalg.norm(u, axis=-1, keepdims=keepdims) * lambda_x
def proju(self, x, u):
lambda_x = self._lambda(x, keepdims=True)
return u / lambda_x ** 2
def projx(self, x):
sqrt_k = tf.math.sqrt(tf.cast(self.k, x.dtype))
norm = tf.linalg.norm(x, axis=-1, keepdims=True)
return tf.where(
sqrt_k * norm < tf.ones_like(norm),
x,
x / (sqrt_k * norm + 10 * utils.get_eps(x)),
)
def dist(self, x, y, keepdims=False):
sqrt_k = tf.math.sqrt(tf.cast(self.k, x.dtype))
x_y = self._mobius_add(-x, y)
norm_x_y = tf.linalg.norm(x_y, axis=-1, keepdims=keepdims)
eps = utils.get_eps(x)
tanh = tf.clip_by_value(sqrt_k * norm_x_y, -1.0 + eps, 1.0 - eps)
return 2 * tf.math.atanh(tanh) / sqrt_k
def exp(self, x, u):
sqrt_k = tf.math.sqrt(tf.cast(self.k, x.dtype))
norm_u = tf.linalg.norm(u, axis=-1, keepdims=True)
lambda_x = self._lambda(x, keepdims=True)
y = (
tf.math.tanh(sqrt_k * norm_u * lambda_x / 2.0)
* u
/ (sqrt_k * norm_u)
)
return self._mobius_add(x, y)
def log(self, x, y):
sqrt_k = tf.math.sqrt(tf.cast(self.k, x.dtype))
x_y = self._mobius_add(-x, y)
norm_x_y = tf.linalg.norm(x_y, axis=-1, keepdims=True)
eps = utils.get_eps(x)
tanh = tf.clip_by_value(sqrt_k * norm_x_y, -1.0 + eps, 1.0 - eps)
lambda_x = self._lambda(x, keepdims=True)
return 2 * (x_y / norm_x_y) * tf.math.atanh(tanh) / (sqrt_k * lambda_x)
retr = exp
def ptransp(self, x, y, v):
lambda_x = self._lambda(x, keepdims=True)
lambda_y = self._lambda(y, keepdims=True)
return self._gyration(y, -x, v) * lambda_x / lambda_y
transp = ptransp
def geodesic(self, x, u, t):
sqrt_k = tf.math.sqrt(tf.cast(self.k, x.dtype))
norm_u = tf.linalg.norm(u, axis=-1, keepdims=True)
y = tf.math.tanh(sqrt_k * t / 2.0) * u / (norm_u * sqrt_k)
return self._mobius_add(x, y)
def exp0(self, u):
"""Perform an exponential map from the origin"""
sqrt_k = tf.math.sqrt(tf.cast(self.k, u.dtype))
norm_u = tf.linalg.norm(u, axis=-1, keepdims=True)
return tf.math.tanh(sqrt_k * norm_u) * u / (sqrt_k * norm_u)
def log0(self, y):
"""Perform a logarithmic map from the origin"""
sqrt_k = tf.math.sqrt(tf.cast(self.k, y.dtype))
norm_y = tf.linalg.norm(y, axis=-1, keepdims=True)
return tf.math.atanh(sqrt_k * norm_y) * y / (sqrt_k * norm_y)
def ptransp0(self, y, v):
"""Perform a parallel transport from the origin"""
lambda_y = self._lambda(y, keepdims=True)
return 2 * v / lambda_y
def random(self, shape, dtype=tf.float32):
return self.projx(
tf.random.uniform(shape, minval=-1e-3, maxval=1e-3, dtype=dtype)
)
|
[
"tensorflow.math.tanh",
"tensorflow.reduce_sum",
"tensorflow.clip_by_value",
"tensorflow.random.uniform",
"tensorflow.math.atanh",
"tensorflow.constant",
"tensorflow.ones_like",
"tensorflow.cast",
"tensorflow.math.square",
"tensorflow_riemopt.manifolds.utils.get_eps",
"tensorflow.linalg.norm"
] |
[((1041, 1065), 'tensorflow.cast', 'tf.cast', (['self.k', 'x.dtype'], {}), '(self.k, x.dtype)\n', (1048, 1065), True, 'import tensorflow as tf\n'), ((1084, 1128), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(x * x)'], {'axis': '(-1)', 'keepdims': '(True)'}), '(x * x, axis=-1, keepdims=True)\n', (1097, 1128), True, 'import tensorflow as tf\n'), ((1269, 1286), 'tensorflow.constant', 'tf.constant', (['(True)'], {}), '(True)\n', (1280, 1286), True, 'import tensorflow as tf\n'), ((1747, 1791), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(x * y)'], {'axis': '(-1)', 'keepdims': '(True)'}), '(x * y, axis=-1, keepdims=True)\n', (1760, 1791), True, 'import tensorflow as tf\n'), ((1804, 1828), 'tensorflow.cast', 'tf.cast', (['self.k', 'x.dtype'], {}), '(self.k, x.dtype)\n', (1811, 1828), True, 'import tensorflow as tf\n'), ((2299, 2340), 'tensorflow.linalg.norm', 'tf.linalg.norm', (['x'], {'axis': '(-1)', 'keepdims': '(True)'}), '(x, axis=-1, keepdims=True)\n', (2313, 2340), True, 'import tensorflow as tf\n'), ((2355, 2371), 'tensorflow_riemopt.manifolds.utils.get_eps', 'utils.get_eps', (['x'], {}), '(x)\n', (2368, 2371), False, 'from tensorflow_riemopt.manifolds import utils\n'), ((2386, 2442), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['(sqrt_k * norm_x)', '(-1.0 + eps)', '(1.0 - eps)'], {}), '(sqrt_k * norm_x, -1.0 + eps, 1.0 - eps)\n', (2402, 2442), True, 'import tensorflow as tf\n'), ((3049, 3073), 'tensorflow.cast', 'tf.cast', (['self.k', 'x.dtype'], {}), '(self.k, x.dtype)\n', (3056, 3073), True, 'import tensorflow as tf\n'), ((3093, 3141), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(x * x)'], {'axis': '(-1)', 'keepdims': 'keepdims'}), '(x * x, axis=-1, keepdims=keepdims)\n', (3106, 3141), True, 'import tensorflow as tf\n'), ((3741, 3782), 'tensorflow.linalg.norm', 'tf.linalg.norm', (['x'], {'axis': '(-1)', 'keepdims': '(True)'}), '(x, axis=-1, keepdims=True)\n', (3755, 3782), True, 'import tensorflow as tf\n'), ((4094, 4141), 'tensorflow.linalg.norm', 'tf.linalg.norm', (['x_y'], {'axis': '(-1)', 'keepdims': 'keepdims'}), '(x_y, axis=-1, keepdims=keepdims)\n', (4108, 4141), True, 'import tensorflow as tf\n'), ((4156, 4172), 'tensorflow_riemopt.manifolds.utils.get_eps', 'utils.get_eps', (['x'], {}), '(x)\n', (4169, 4172), False, 'from tensorflow_riemopt.manifolds import utils\n'), ((4188, 4246), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['(sqrt_k * norm_x_y)', '(-1.0 + eps)', '(1.0 - eps)'], {}), '(sqrt_k * norm_x_y, -1.0 + eps, 1.0 - eps)\n', (4204, 4246), True, 'import tensorflow as tf\n'), ((4394, 4435), 'tensorflow.linalg.norm', 'tf.linalg.norm', (['u'], {'axis': '(-1)', 'keepdims': '(True)'}), '(u, axis=-1, keepdims=True)\n', (4408, 4435), True, 'import tensorflow as tf\n'), ((4794, 4837), 'tensorflow.linalg.norm', 'tf.linalg.norm', (['x_y'], {'axis': '(-1)', 'keepdims': '(True)'}), '(x_y, axis=-1, keepdims=True)\n', (4808, 4837), True, 'import tensorflow as tf\n'), ((4852, 4868), 'tensorflow_riemopt.manifolds.utils.get_eps', 'utils.get_eps', (['x'], {}), '(x)\n', (4865, 4868), False, 'from tensorflow_riemopt.manifolds import utils\n'), ((4884, 4942), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['(sqrt_k * norm_x_y)', '(-1.0 + eps)', '(1.0 - eps)'], {}), '(sqrt_k * norm_x_y, -1.0 + eps, 1.0 - eps)\n', (4900, 4942), True, 'import tensorflow as tf\n'), ((5413, 5454), 'tensorflow.linalg.norm', 'tf.linalg.norm', (['u'], {'axis': '(-1)', 'keepdims': '(True)'}), '(u, axis=-1, keepdims=True)\n', (5427, 5454), True, 'import tensorflow as tf\n'), ((5714, 5755), 'tensorflow.linalg.norm', 'tf.linalg.norm', (['u'], {'axis': '(-1)', 'keepdims': '(True)'}), '(u, axis=-1, keepdims=True)\n', (5728, 5755), True, 'import tensorflow as tf\n'), ((5978, 6019), 'tensorflow.linalg.norm', 'tf.linalg.norm', (['y'], {'axis': '(-1)', 'keepdims': '(True)'}), '(y, axis=-1, keepdims=True)\n', (5992, 6019), True, 'import tensorflow as tf\n'), ((1619, 1636), 'tensorflow.math.square', 'tf.math.square', (['x'], {}), '(x)\n', (1633, 1636), True, 'import tensorflow as tf\n'), ((1690, 1707), 'tensorflow.math.square', 'tf.math.square', (['y'], {}), '(y)\n', (1704, 1707), True, 'import tensorflow as tf\n'), ((2256, 2280), 'tensorflow.cast', 'tf.cast', (['self.k', 'x.dtype'], {}), '(self.k, x.dtype)\n', (2263, 2280), True, 'import tensorflow as tf\n'), ((3300, 3348), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(u * v)'], {'axis': '(-1)', 'keepdims': 'keepdims'}), '(u * v, axis=-1, keepdims=keepdims)\n', (3313, 3348), True, 'import tensorflow as tf\n'), ((3477, 3522), 'tensorflow.linalg.norm', 'tf.linalg.norm', (['u'], {'axis': '(-1)', 'keepdims': 'keepdims'}), '(u, axis=-1, keepdims=keepdims)\n', (3491, 3522), True, 'import tensorflow as tf\n'), ((3700, 3724), 'tensorflow.cast', 'tf.cast', (['self.k', 'x.dtype'], {}), '(self.k, x.dtype)\n', (3707, 3724), True, 'import tensorflow as tf\n'), ((4011, 4035), 'tensorflow.cast', 'tf.cast', (['self.k', 'x.dtype'], {}), '(self.k, x.dtype)\n', (4018, 4035), True, 'import tensorflow as tf\n'), ((4351, 4375), 'tensorflow.cast', 'tf.cast', (['self.k', 'x.dtype'], {}), '(self.k, x.dtype)\n', (4358, 4375), True, 'import tensorflow as tf\n'), ((4711, 4735), 'tensorflow.cast', 'tf.cast', (['self.k', 'x.dtype'], {}), '(self.k, x.dtype)\n', (4718, 4735), True, 'import tensorflow as tf\n'), ((5370, 5394), 'tensorflow.cast', 'tf.cast', (['self.k', 'x.dtype'], {}), '(self.k, x.dtype)\n', (5377, 5394), True, 'import tensorflow as tf\n'), ((5671, 5695), 'tensorflow.cast', 'tf.cast', (['self.k', 'u.dtype'], {}), '(self.k, u.dtype)\n', (5678, 5695), True, 'import tensorflow as tf\n'), ((5935, 5959), 'tensorflow.cast', 'tf.cast', (['self.k', 'y.dtype'], {}), '(self.k, y.dtype)\n', (5942, 5959), True, 'import tensorflow as tf\n'), ((6349, 6415), 'tensorflow.random.uniform', 'tf.random.uniform', (['shape'], {'minval': '(-0.001)', 'maxval': '(0.001)', 'dtype': 'dtype'}), '(shape, minval=-0.001, maxval=0.001, dtype=dtype)\n', (6366, 6415), True, 'import tensorflow as tf\n'), ((1172, 1193), 'tensorflow.ones_like', 'tf.ones_like', (['sq_norm'], {}), '(sq_norm)\n', (1184, 1193), True, 'import tensorflow as tf\n'), ((3836, 3854), 'tensorflow.ones_like', 'tf.ones_like', (['norm'], {}), '(norm)\n', (3848, 3854), True, 'import tensorflow as tf\n'), ((4266, 4285), 'tensorflow.math.atanh', 'tf.math.atanh', (['tanh'], {}), '(tanh)\n', (4279, 4285), True, 'import tensorflow as tf\n'), ((4512, 4558), 'tensorflow.math.tanh', 'tf.math.tanh', (['(sqrt_k * norm_u * lambda_x / 2.0)'], {}), '(sqrt_k * norm_u * lambda_x / 2.0)\n', (4524, 4558), True, 'import tensorflow as tf\n'), ((5031, 5050), 'tensorflow.math.atanh', 'tf.math.atanh', (['tanh'], {}), '(tanh)\n', (5044, 5050), True, 'import tensorflow as tf\n'), ((5467, 5497), 'tensorflow.math.tanh', 'tf.math.tanh', (['(sqrt_k * t / 2.0)'], {}), '(sqrt_k * t / 2.0)\n', (5479, 5497), True, 'import tensorflow as tf\n'), ((5771, 5800), 'tensorflow.math.tanh', 'tf.math.tanh', (['(sqrt_k * norm_u)'], {}), '(sqrt_k * norm_u)\n', (5783, 5800), True, 'import tensorflow as tf\n'), ((6035, 6065), 'tensorflow.math.atanh', 'tf.math.atanh', (['(sqrt_k * norm_y)'], {}), '(sqrt_k * norm_y)\n', (6048, 6065), True, 'import tensorflow as tf\n'), ((3909, 3925), 'tensorflow_riemopt.manifolds.utils.get_eps', 'utils.get_eps', (['x'], {}), '(x)\n', (3922, 3925), False, 'from tensorflow_riemopt.manifolds import utils\n'), ((2490, 2508), 'tensorflow.math.atanh', 'tf.math.atanh', (['tan'], {}), '(tan)\n', (2503, 2508), True, 'import tensorflow as tf\n')]
|
"""
:class:`.Photon` geocoder.
"""
from geopy.compat import urlencode, string_compare
from geopy.geocoders.base import (
Geocoder,
DEFAULT_FORMAT_STRING,
DEFAULT_TIMEOUT,
DEFAULT_SCHEME
)
from geopy.location import Location
from geopy.util import logger
__all__ = ("Photon", )
class Photon(Geocoder): # pylint: disable=W0223
"""
Geocoder using Photon geocoding service (data based on OpenStreetMap and
service provided by Komoot on https://photon.komoot.de).
Documentation at https://github.com/komoot/photon
"""
def __init__(
self,
format_string=DEFAULT_FORMAT_STRING,
scheme=DEFAULT_SCHEME,
timeout=DEFAULT_TIMEOUT,
proxies=None,
domain='photon.komoot.de',
user_agent=None,
): # pylint: disable=R0913
"""
Initialize a Photon/Komoot geocoder which aims to let you "search as
you type with OpenStreetMap". No API Key is needed by this platform.
:param str format_string: String containing '%s' where
the string to geocode should be interpolated before querying
the geocoder. For example: '%s, Mountain View, CA'. The default
is just '%s'.
:param str scheme: Use 'https' or 'http' as the API URL's scheme.
Default is https. Note that SSL connections' certificates are not
verified.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception.
:param dict proxies: If specified, routes this geocoder's requests
through the specified proxy. E.g., {"https": "192.0.2.0"}. For
more information, see documentation on
:class:`urllib2.ProxyHandler`.
:param str domain: Should be the localized Photon domain to
connect to. The default is 'photon.komoot.de', but you
can change it to a domain of your own.
:param str user_agent: Use a custom User-Agent header.
.. versionadded:: 1.12.0
"""
super(Photon, self).__init__(
format_string, scheme, timeout, proxies, user_agent=user_agent
)
self.domain = domain.strip('/')
self.api = "%s://%s/api" % (self.scheme, self.domain)
self.reverse_api = "%s://%s/reverse" % (self.scheme, self.domain)
def geocode(
self,
query,
exactly_one=True,
timeout=None,
location_bias=None,
language=False,
limit=None,
osm_tag=None
): # pylint: disable=W0221
"""
Geocode a location query.
:param str query: The address or query you wish to geocode.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:param location_bias: The coordinates to used as location bias.
:type query: :class:`geopy.point.Point`, list or tuple of (latitude,
longitude), or string as "%(latitude)s, %(longitude)s"
:param str language: Preferred language in which to return results.
:param int limit: Limit the number of returned results, defaults to no
limit.
.. versionadded:: 1.12.0
:param osm_tag: The expression to filter (include/exclude) by key and/
or value, str as 'key:value' or list/set of str if multiple filters
are required as ['key:!val', '!key', ':!value'].
:type osm_tag: str or list or set
"""
params = {
'q': self.format_string % query
}
if limit:
params['limit'] = int(limit)
if exactly_one:
params['limit'] = 1
if language:
params['lang'] = language
if location_bias:
try:
lat, lon = [x.strip() for x
in self._coerce_point_to_string(location_bias)
.split(',')]
params['lon'] = lon
params['lat'] = lat
except ValueError:
raise ValueError(("Location bias must be a"
" coordinate pair or Point"))
if osm_tag:
if isinstance(osm_tag, string_compare):
params['osm_tag'] = [osm_tag]
else:
if not isinstance(osm_tag, (list, set)):
raise ValueError(
"osm_tag must be a string expression or "
"a set/list of string expressions"
)
params['osm_tag'] = osm_tag
url = "?".join((self.api, urlencode(params, doseq=True)))
logger.debug("%s.geocode: %s", self.__class__.__name__, url)
return self._parse_json(
self._call_geocoder(url, timeout=timeout),
exactly_one
)
def reverse(
self,
query,
exactly_one=True,
timeout=None,
language=False,
limit=None,
): # pylint: disable=W0221
"""
Returns a reverse geocoded location.
:param query: The coordinates for which you wish to obtain the
closest human-readable addresses.
:type query: :class:`geopy.point.Point`, list or tuple of (latitude,
longitude), or string as "%(latitude)s, %(longitude)s"
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:param str language: Preferred language in which to return results.
:param int limit: Limit the number of returned results, defaults to no
limit.
.. versionadded:: 1.12.0
"""
try:
lat, lon = [x.strip() for x in
self._coerce_point_to_string(query).split(',')]
except ValueError:
raise ValueError("Must be a coordinate pair or Point")
params = {
'lat': lat,
'lon': lon,
}
if limit:
params['limit'] = int(limit)
if exactly_one:
params['limit'] = 1
if language:
params['lang'] = language
url = "?".join((self.reverse_api, urlencode(params)))
logger.debug("%s.reverse: %s", self.__class__.__name__, url)
return self._parse_json(
self._call_geocoder(url, timeout=timeout), exactly_one
)
@classmethod
def _parse_json(cls, resources, exactly_one=True):
"""
Parse display name, latitude, and longitude from a JSON response.
"""
if not len(resources['features']): # pragma: no cover
return None
if exactly_one:
return cls.parse_resource(resources['features'][0])
else:
return [cls.parse_resource(resource) for resource
in resources['features']]
@classmethod
def parse_resource(cls, resource):
"""
Return location and coordinates tuple from dict.
"""
name_elements = ['name', 'housenumber', 'street',
'postcode', 'street', 'city',
'state', 'country']
name = [resource['properties'].get(k) for k
in name_elements if resource['properties'].get(k)]
location = ', '.join(name)
latitude = resource['geometry']['coordinates'][1] or None
longitude = resource['geometry']['coordinates'][0] or None
if latitude and longitude:
latitude = float(latitude)
longitude = float(longitude)
return Location(location, (latitude, longitude), resource)
|
[
"geopy.util.logger.debug",
"geopy.location.Location",
"geopy.compat.urlencode"
] |
[((5098, 5158), 'geopy.util.logger.debug', 'logger.debug', (['"""%s.geocode: %s"""', 'self.__class__.__name__', 'url'], {}), "('%s.geocode: %s', self.__class__.__name__, url)\n", (5110, 5158), False, 'from geopy.util import logger\n'), ((6971, 7031), 'geopy.util.logger.debug', 'logger.debug', (['"""%s.reverse: %s"""', 'self.__class__.__name__', 'url'], {}), "('%s.reverse: %s', self.__class__.__name__, url)\n", (6983, 7031), False, 'from geopy.util import logger\n'), ((8325, 8376), 'geopy.location.Location', 'Location', (['location', '(latitude, longitude)', 'resource'], {}), '(location, (latitude, longitude), resource)\n', (8333, 8376), False, 'from geopy.location import Location\n'), ((5058, 5087), 'geopy.compat.urlencode', 'urlencode', (['params'], {'doseq': '(True)'}), '(params, doseq=True)\n', (5067, 5087), False, 'from geopy.compat import urlencode, string_compare\n'), ((6943, 6960), 'geopy.compat.urlencode', 'urlencode', (['params'], {}), '(params)\n', (6952, 6960), False, 'from geopy.compat import urlencode, string_compare\n')]
|
# coding: UTF-8
'''
Created on Jun 17, 2014
@author: hernan
'''
import re
from usig_normalizador_amba.StringDireccion import StringDireccion
from usig_normalizador_amba.Callejero import Callejero
from usig_normalizador_amba.Calle import Calle
from usig_normalizador_amba.Errors import ErrorTextoSinDireccion, ErrorCalleInexistente, ErrorCruceInexistente, ErrorCalleInexistenteAEsaAltura
from usig_normalizador_amba.Direccion import Direccion
from usig_normalizador_amba.settings import default_settings
from usig_normalizador_amba.settings import CALLE, CALLE_ALTURA, CALLE_Y_CALLE
from usig_normalizador_amba.settings import MATCH_EXACTO, MATCH_PERMUTADO, MATCH_INCLUIDO, MATCH
from usig_normalizador_amba.commons import matcheaTexto
class NormalizadorDirecciones:
'''
NormalizadorDirecciones
Esta clase implementa integramente un normalizador de direcciones que utiliza un Callejero de USIG para transformar un string en una direccion normalizada.
@ivar c: El callejero del partido
@type c: Callejero
@ivar partido: El partido del callejero que se quiere instanciar
@type partido: Partido
'''
c = None
partido = None
def __init__(self, partido, config={}):
'''
@param partido: Indica el partido del callejero
@type partido: Partido
'''
# default config
self.config = default_settings.copy()
# custom config
self.config.update(config)
try:
if partido is None:
raise Exception('Debe indicar el partido.')
self.c = Callejero(partido, config)
self.partido = partido
except Exception as e:
raise e
def recargarCallejero(self):
try:
self.c.cargarCallejero()
except Exception as e:
raise e
def normalizar(self, direccion, maxOptions=10):
'''
@param direccion: La cadena a ser transformada en direccion
@type direccion: Unicode
@param maxOptions: Maximo numero de opciones a retornar. Por defecto es 10.
@type maxOptions: Integer
@return: Las opciones halladas que se corresponden con direccion
@rtype: Array de Direccion
'''
res = []
global errorGlobal
errorGlobal = None
if direccion == '':
raise ErrorCalleInexistente('')
if type(direccion) != str:
direccion = str(direccion, encoding='utf-8', errors='ignore')
strDir = StringDireccion(direccion)
for candidato in strDir.candidatos:
if candidato['tipo'] == CALLE:
res += self.buscarCalle(candidato['calle'], maxOptions)
elif candidato['tipo'] == CALLE_ALTURA:
try:
res += self.normalizarCalleAltura(candidato['calle'], candidato['altura'], maxOptions)
except Exception as error:
errorGlobal = error
elif candidato['tipo'] == CALLE_Y_CALLE:
try:
res += self.normalizarCalleYCalle(candidato['calle'], candidato['cruce'], maxOptions)
except Exception as error:
errorGlobal = error
if not res:
direccion_sin_palabras_claves = self._quitarPalabrasClaves(direccion)
if direccion_sin_palabras_claves != direccion:
try:
res = self.normalizar(direccion_sin_palabras_claves, maxOptions)
except:
pass
if isinstance(res, list):
if res:
return res
else:
if errorGlobal is not None:
raise errorGlobal
else:
raise ErrorCalleInexistente(strDir.strOriginal)
else:
return res
def _quitarPalabrasClaves(self, texto):
try:
patrones = [
'^avenida ', ' avenida ', ' avenida$',
'^avda\.? ', ' avda\.? ', ' avda\.?$',
'^av\.? ', ' av\.? ', ' av\.?$',
'^pasaje ', ' pasaje ', ' pasaje$',
'^psje\.? ', ' psje\.? ', ' psje\.?$',
'^pje\.? ', ' pje\.? ', ' pje\.?$',
]
patron = '|'.join(patrones)
return re.sub(patron, ' ', texto, flags=re.IGNORECASE)
except:
return texto
def buscarCalle(self, inCalle, maxOptions):
res = self.c.buscarCalle(inCalle, maxOptions)
return res
def normalizarCalleAltura(self, inCalle, inAltura, maxOptions=10):
'''
Normaliza una direccion de tipo Calle-altura
@param inCalle: La calle a ser normalizada
@type inCalle: String
@param inAltura: La altura de la calle a ser normalizada
@type inCalle: int
@param maxOptions: Maximo numero de opciones a retornar.
@type maxOptions: Integer
@return: Las opciones halladas
@rtype: Array de Direcciones
'''
opts = []
calles = self.c.buscarCalle(inCalle)
for calle in calles:
if calle.alturaValida(inAltura):
d = Direccion(calle, inAltura)
opts.append(d)
if(len(opts) == 0 and len(calles) > 0):
raise ErrorCalleInexistenteAEsaAltura(inCalle, calles, inAltura)
return opts
def normalizarCalleYCalle(self, inCalle, inCruce, maxOptions=10):
'''
Normaliza una direccion de tipo calle-calle
@param inCalle: Calle a ser normalizada
@type inCalle: String
@param inCruce: Cruce a ser normalizado
@type inCruce: String
@param maxOptions: Maximo numero de opciones a retornar. Por defecto es 10.
@type maxOptions: Integer
@return: Las opciones halladas que se corresponden con dir
@rtype: Array de Direccion
'''
calles = self.c.buscarCalle(inCalle)
opts = [[], [], [], [], []]
for calle in calles:
for idCruce in calle.cruces:
cruces = self.c.buscarCodigo(idCruce)
for cruce in cruces:
res = matcheaTexto(inCruce, cruce[2])
if res:
objCruce = Calle(cruce[0], cruce[1], [], cruce[4], calle.partido, cruce[5])
opts[res].append(Direccion(calle, 0, objCruce))
if(len(opts[MATCH_EXACTO]) >= maxOptions):
break
if(len(opts[MATCH_EXACTO]) >= maxOptions):
break
opts = (opts[MATCH_EXACTO] + opts[MATCH_PERMUTADO] + opts[MATCH_INCLUIDO] + opts[MATCH])[:maxOptions]
if(len(opts) == 0 and len(calles) > 0):
raise ErrorCruceInexistente(inCalle, [], inCruce, [])
return opts
def _buscarIndicesDeCalleEnLista(self, palabras, sentido):
# si sentido es -1 se desplaza a la izq, si es 1 se desplaza a la der
retval = None
cant_palabras = len(palabras)
for i in range(cant_palabras):
indice = (cant_palabras - 1 - i, cant_palabras) if sentido == -1 else (0, i + 1)
calle = ' '.join(palabras[indice[0]:indice[1]])
try:
self.normalizar(calle)
retval = indice
except:
break
return retval
def _buscarDireccionCalleAltura(self, token):
retval = None
palabras = re.split('\s', token.string[:token.start()])
altura = token.groupdict()['dir_altura']
indice = self._buscarIndicesDeCalleEnLista(palabras, -1)
if indice:
try:
calle = ' '.join(palabras[indice[0]:indice[1]])
direccion = '{0} {1}'.format(calle, altura)
res = [r for r in self.normalizar(direccion) if r.tipo == CALLE_ALTURA]
if not res:
raise Exception()
posicion = len(' '.join(palabras[:indice[0]]))
posicion = posicion if indice[0] == 0 else posicion + 1 # Le sumo el espacio entre la direccion y lo que no es direccion
texto = token.string[posicion:token.end()]
retval = {'posicion': posicion, 'texto': texto, 'direcciones': res}
except Exception:
pass
return retval
def _buscarDireccionCalleCalle(self, token):
retval = None
palabras_izq = re.split('\s', token.string[:token.start()])
palabras_der = re.split('\s', token.string[token.end():])
indice_izq = self._buscarIndicesDeCalleEnLista(palabras_izq, -1)
indice_der = self._buscarIndicesDeCalleEnLista(palabras_der, 1)
if indice_izq and indice_der:
try:
calle_izq = ' '.join(palabras_izq[indice_izq[0]:indice_izq[1]])
calle_der = ' '.join(palabras_der[indice_der[0]:indice_der[1]])
direccion = '{0}{1}{2}'.format(calle_izq, token.groupdict()['esq_conector'], calle_der)
res = self.normalizar(direccion)
res = [r for r in self.normalizar(direccion) if r.tipo == CALLE_Y_CALLE]
if not res:
raise Exception()
posicion = len(' '.join(palabras_izq[:indice_izq[0]]))
posicion = posicion if indice_izq[0] == 0 else posicion + 1 # Le sumo el espacio entre la direccion y lo que no es direccion
retval = {'posicion': posicion, 'texto': direccion, 'direcciones': res}
except Exception:
pass
return retval
def buscarDireccion(self, texto=''):
texto = str(texto)
''' Patron: (dir_calle [al] dir_altura) | (esq_calle y|e esq_cruce) '''
patron_calle_altura = '(?:(?P<dir_conector>(?:\s+al)?\s+)(?P<dir_altura>[0-9]+))'
patron_calle_calle = '(?P<esq_conector>\s+(?:y|e)\s+)'
patron = r'{0}|{1}'.format(patron_calle_altura, patron_calle_calle)
tokens = re.finditer(patron, texto, re.I)
retval = []
for token in tokens:
res = None
if token.groupdict()['dir_altura']:
res = self._buscarDireccionCalleAltura(token)
elif token.groupdict()['esq_conector']:
res = self._buscarDireccionCalleCalle(token)
if res:
retval.append(res)
if not retval:
raise ErrorTextoSinDireccion(texto)
return retval
|
[
"usig_normalizador_amba.Calle.Calle",
"usig_normalizador_amba.Direccion.Direccion",
"usig_normalizador_amba.commons.matcheaTexto",
"re.finditer",
"usig_normalizador_amba.Errors.ErrorTextoSinDireccion",
"usig_normalizador_amba.Callejero.Callejero",
"usig_normalizador_amba.Errors.ErrorCalleInexistente",
"usig_normalizador_amba.StringDireccion.StringDireccion",
"usig_normalizador_amba.Errors.ErrorCalleInexistenteAEsaAltura",
"usig_normalizador_amba.settings.default_settings.copy",
"re.sub",
"usig_normalizador_amba.Errors.ErrorCruceInexistente"
] |
[((1367, 1390), 'usig_normalizador_amba.settings.default_settings.copy', 'default_settings.copy', ([], {}), '()\n', (1388, 1390), False, 'from usig_normalizador_amba.settings import default_settings\n'), ((2501, 2527), 'usig_normalizador_amba.StringDireccion.StringDireccion', 'StringDireccion', (['direccion'], {}), '(direccion)\n', (2516, 2527), False, 'from usig_normalizador_amba.StringDireccion import StringDireccion\n'), ((10003, 10035), 're.finditer', 're.finditer', (['patron', 'texto', 're.I'], {}), '(patron, texto, re.I)\n', (10014, 10035), False, 'import re\n'), ((1577, 1603), 'usig_normalizador_amba.Callejero.Callejero', 'Callejero', (['partido', 'config'], {}), '(partido, config)\n', (1586, 1603), False, 'from usig_normalizador_amba.Callejero import Callejero\n'), ((2348, 2373), 'usig_normalizador_amba.Errors.ErrorCalleInexistente', 'ErrorCalleInexistente', (['""""""'], {}), "('')\n", (2369, 2373), False, 'from usig_normalizador_amba.Errors import ErrorTextoSinDireccion, ErrorCalleInexistente, ErrorCruceInexistente, ErrorCalleInexistenteAEsaAltura\n'), ((4314, 4361), 're.sub', 're.sub', (['patron', '""" """', 'texto'], {'flags': 're.IGNORECASE'}), "(patron, ' ', texto, flags=re.IGNORECASE)\n", (4320, 4361), False, 'import re\n'), ((5304, 5362), 'usig_normalizador_amba.Errors.ErrorCalleInexistenteAEsaAltura', 'ErrorCalleInexistenteAEsaAltura', (['inCalle', 'calles', 'inAltura'], {}), '(inCalle, calles, inAltura)\n', (5335, 5362), False, 'from usig_normalizador_amba.Errors import ErrorTextoSinDireccion, ErrorCalleInexistente, ErrorCruceInexistente, ErrorCalleInexistenteAEsaAltura\n'), ((6764, 6811), 'usig_normalizador_amba.Errors.ErrorCruceInexistente', 'ErrorCruceInexistente', (['inCalle', '[]', 'inCruce', '[]'], {}), '(inCalle, [], inCruce, [])\n', (6785, 6811), False, 'from usig_normalizador_amba.Errors import ErrorTextoSinDireccion, ErrorCalleInexistente, ErrorCruceInexistente, ErrorCalleInexistenteAEsaAltura\n'), ((10429, 10458), 'usig_normalizador_amba.Errors.ErrorTextoSinDireccion', 'ErrorTextoSinDireccion', (['texto'], {}), '(texto)\n', (10451, 10458), False, 'from usig_normalizador_amba.Errors import ErrorTextoSinDireccion, ErrorCalleInexistente, ErrorCruceInexistente, ErrorCalleInexistenteAEsaAltura\n'), ((5179, 5205), 'usig_normalizador_amba.Direccion.Direccion', 'Direccion', (['calle', 'inAltura'], {}), '(calle, inAltura)\n', (5188, 5205), False, 'from usig_normalizador_amba.Direccion import Direccion\n'), ((3761, 3802), 'usig_normalizador_amba.Errors.ErrorCalleInexistente', 'ErrorCalleInexistente', (['strDir.strOriginal'], {}), '(strDir.strOriginal)\n', (3782, 3802), False, 'from usig_normalizador_amba.Errors import ErrorTextoSinDireccion, ErrorCalleInexistente, ErrorCruceInexistente, ErrorCalleInexistenteAEsaAltura\n'), ((6176, 6207), 'usig_normalizador_amba.commons.matcheaTexto', 'matcheaTexto', (['inCruce', 'cruce[2]'], {}), '(inCruce, cruce[2])\n', (6188, 6207), False, 'from usig_normalizador_amba.commons import matcheaTexto\n'), ((6271, 6335), 'usig_normalizador_amba.Calle.Calle', 'Calle', (['cruce[0]', 'cruce[1]', '[]', 'cruce[4]', 'calle.partido', 'cruce[5]'], {}), '(cruce[0], cruce[1], [], cruce[4], calle.partido, cruce[5])\n', (6276, 6335), False, 'from usig_normalizador_amba.Calle import Calle\n'), ((6377, 6406), 'usig_normalizador_amba.Direccion.Direccion', 'Direccion', (['calle', '(0)', 'objCruce'], {}), '(calle, 0, objCruce)\n', (6386, 6406), False, 'from usig_normalizador_amba.Direccion import Direccion\n')]
|
import re
import json
import webbrowser
import sublime
import sublime_plugin
import mdpopups
SCOPES = [
"support.function.builtin.scr",
"keyword.control.flow.scr",
"keyword.control.loop.scr",
"keyword.control.scr"
]
VERSION = {
"AA": "Medal of Honor: Allied Assault",
"SH": "Medal of Honor: Allied Assault - Spearhead",
"BT": "Medal of Honor: Allied Assault - Breakthrough",
"REBORN": "MoH:AA 1.12 Reborn Patch"
}
class MOHAATooltips(sublime_plugin.EventListener):
def on_selection_modified(self, view):
if not Pref.show_tooltips:
return
if Pref.isActive:
return
if not any(x in view.scope_name(view.sel()[0].a) for x in SCOPES):
return
command = Pref.data.get(view.substr(view.word(view.sel()[0])).lower())
if not command:
return
Pref.isActive = True
global copy, menus
menus = ["<body id='mohtooltip'>"]
menus.append("<style>{}</style>".format(Pref.css))
menus.append("<div class='header'>")
if command["class"]:
menus.append("<div class='class-container'>")
menus.append("<span>Class:</span>")
for i, cl in enumerate(command["class"]):
if i: menus.append(",")
menus.append("<span class='class'> {}</span>".format(cl))
menus.append("</div>")
if command["gamever"]:
menus.append("<div class='version-container'>")
menus.append(" [")
for i, ver in enumerate(command["gamever"]):
if i:
menus.append(", ")
menus.append("<span class='version {0}' title='{1}'>{0}</span>".format(ver, VERSION[ver]))
menus.append("]</div>")
menus.append("</div>")
menus.append("<div class='content'>")
if command["syntax"]:
name, *args = re.split("(\\W)", command["syntax"], maxsplit=1)
menus.append("<div class='syntax-container'>")
menus.append("<strong class='name'>{}</strong>".format(name))
if args:
menus.append("<var>{}</var>".format("".join(args).replace("\n", "<br>")))
menus.append("</div>")
if command["description"]:
menus.append("<div class='description-container'>{}</div>".format(command["description"].replace("\n", "<br>")))
menus.append("</div>")
if command["example"]:
lang = mdpopups.get_language_from_view(view) or ""
example = mdpopups.syntax_highlight(view, command["example"], language=lang)
menus.append("<div class='example-container'>")
menus.append("Example:")
menus.append("<code class='example-code'>{}</code>".format(example))
menus.append("<a class='example-copy' href='tooltip.copy'>copy</a>")
menus.append("</div>")
copy = command["example"]
menus.append("<div class='footer'>")
menus.append("<a href='https://x-null.net/wiki' title='MoH:AA Reborn Wiki'>Wiki</a>".format(name))
menus.append("<a href='https://www.x-null.net/forums/forum.php' title='xNULL | MoH:AA 1.12 Reborn Forums'>xNULL</a>")
menus.append("<a href='http://mohreborn.com' title='mohreborn.com'>MoHReborn</a>")
menus.append("</div>")
menus.append("</body>")
max_width, max_height = view.viewport_extent()
max_width *= 0.90
max_height *= 0.90
a = view.word(view.sel()[0]).begin()
b = view.word(view.sel()[0]).end()
self.view = view
view.show_popup(
"".join(menus),
sublime.HIDE_ON_MOUSE_MOVE_AWAY,
location=b,
max_width=max_width,
max_height=max_height,
on_navigate=self.on_navigate,
on_hide=self.on_hide
)
view.add_regions(
"moh_tooltip",
[sublime.Region(a, b)],
"invalid",
"",
sublime.HIDE_ON_MINIMAP | sublime.DRAW_NO_FILL
)
def on_navigate(self, link):
if link == "tooltip.copy":
sublime.set_clipboard(copy)
self.view.update_popup("".join(menus).replace(">copy</a>", ">copied✔</a>"))
else:
webbrowser.open_new_tab(link)
def on_hide(self):
self.view.erase_regions("moh_tooltip")
Pref.isActive = False
def lang_map_settings():
# load the settings to transfer
res = sublime.load_resource("Packages/MOHAA/tooltips/lang_map.sublime-settings")
lang_map = sublime.decode_value(res).get("mdpopups.sublime_user_lang_map", {})
# load user settings
user_settings = sublime.load_settings("Preferences.sublime-settings")
user_lang_map = user_settings.get("mdpopups.sublime_user_lang_map", {})
if user_lang_map.get("Morpheus") == lang_map.get("Morpheus"):
return
# transfer the settings to the user settings
user_lang_map.update(lang_map)
user_settings.set("mdpopups.sublime_user_lang_map", user_lang_map)
# save the user settings
sublime.save_settings("Preferences.sublime-settings")
def plugin_loaded():
global Pref
class Pref:
def load(self):
Pref.isActive = False
Pref.show_tooltips = settings.get("show_tooltips", True)
Pref = Pref()
settings = sublime.load_settings("MOHAA.sublime-settings")
morpheus = json.loads(sublime.load_resource("Packages/MOHAA/tooltips/db/Morpheus.json"))
reborn = json.loads(sublime.load_resource("Packages/MOHAA/tooltips/db/Reborn.json"))
morpheus.update(reborn)
Pref.data = morpheus
Pref.css = sublime.load_resource("Packages/MOHAA/tooltips/style.css").replace("\r", "")
Pref.load()
lang_map_settings()
settings.add_on_change("reload", lambda:Pref.load())
|
[
"re.split",
"sublime.save_settings",
"sublime.decode_value",
"sublime.Region",
"mdpopups.syntax_highlight",
"webbrowser.open_new_tab",
"mdpopups.get_language_from_view",
"sublime.load_settings",
"sublime.set_clipboard",
"sublime.load_resource"
] |
[((4518, 4592), 'sublime.load_resource', 'sublime.load_resource', (['"""Packages/MOHAA/tooltips/lang_map.sublime-settings"""'], {}), "('Packages/MOHAA/tooltips/lang_map.sublime-settings')\n", (4539, 4592), False, 'import sublime\n'), ((4722, 4775), 'sublime.load_settings', 'sublime.load_settings', (['"""Preferences.sublime-settings"""'], {}), "('Preferences.sublime-settings')\n", (4743, 4775), False, 'import sublime\n'), ((5124, 5177), 'sublime.save_settings', 'sublime.save_settings', (['"""Preferences.sublime-settings"""'], {}), "('Preferences.sublime-settings')\n", (5145, 5177), False, 'import sublime\n'), ((5394, 5441), 'sublime.load_settings', 'sublime.load_settings', (['"""MOHAA.sublime-settings"""'], {}), "('MOHAA.sublime-settings')\n", (5415, 5441), False, 'import sublime\n'), ((5468, 5533), 'sublime.load_resource', 'sublime.load_resource', (['"""Packages/MOHAA/tooltips/db/Morpheus.json"""'], {}), "('Packages/MOHAA/tooltips/db/Morpheus.json')\n", (5489, 5533), False, 'import sublime\n'), ((5559, 5622), 'sublime.load_resource', 'sublime.load_resource', (['"""Packages/MOHAA/tooltips/db/Reborn.json"""'], {}), "('Packages/MOHAA/tooltips/db/Reborn.json')\n", (5580, 5622), False, 'import sublime\n'), ((1926, 1974), 're.split', 're.split', (['"""(\\\\W)"""', "command['syntax']"], {'maxsplit': '(1)'}), "('(\\\\W)', command['syntax'], maxsplit=1)\n", (1934, 1974), False, 'import re\n'), ((2563, 2629), 'mdpopups.syntax_highlight', 'mdpopups.syntax_highlight', (['view', "command['example']"], {'language': 'lang'}), "(view, command['example'], language=lang)\n", (2588, 2629), False, 'import mdpopups\n'), ((4173, 4200), 'sublime.set_clipboard', 'sublime.set_clipboard', (['copy'], {}), '(copy)\n', (4194, 4200), False, 'import sublime\n'), ((4315, 4344), 'webbrowser.open_new_tab', 'webbrowser.open_new_tab', (['link'], {}), '(link)\n', (4338, 4344), False, 'import webbrowser\n'), ((4608, 4633), 'sublime.decode_value', 'sublime.decode_value', (['res'], {}), '(res)\n', (4628, 4633), False, 'import sublime\n'), ((5692, 5750), 'sublime.load_resource', 'sublime.load_resource', (['"""Packages/MOHAA/tooltips/style.css"""'], {}), "('Packages/MOHAA/tooltips/style.css')\n", (5713, 5750), False, 'import sublime\n'), ((2497, 2534), 'mdpopups.get_language_from_view', 'mdpopups.get_language_from_view', (['view'], {}), '(view)\n', (2528, 2534), False, 'import mdpopups\n'), ((3961, 3981), 'sublime.Region', 'sublime.Region', (['a', 'b'], {}), '(a, b)\n', (3975, 3981), False, 'import sublime\n')]
|
from setuptools import find_packages, setup
setup(
name="Klimawatch",
description="Ein Open Data-Plattform zur Darstellung von kommunalen CO2-Emissionen und Schutzkonzepten",
version="0.1.0",
author="<NAME>",
author_email="<EMAIL>",
url="https://klimawatch.de/",
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Operating System :: POSIX",
],
packages=find_packages(),
scripts=[],
python_requires="~=3.7",
install_requires=["pandas>=1.2.4", "plotly>=5.0.0", "numpy", "scipy"],
extras_require={
"dev": [
"black",
"docformatter",
"jupyter",
"pre-commit",
"pylama",
"pytest",
"rope",
],
},
)
|
[
"setuptools.find_packages"
] |
[((459, 474), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (472, 474), False, 'from setuptools import find_packages, setup\n')]
|
'''
The whole reading model.
CNN + LSTM + A classifier with multinomial distribution.
'''
import torch
from torch import optim
from torch import nn
import torch.nn.functional as F
from torch.distributions import Bernoulli, Categorical
from torchtext import datasets
from torchtext import data
import os
import time
import numpy as np
import random
import argparse
from sklearn.metrics import accuracy_score
from networks import CNN_LSTM, Policy_C, Policy_N, Policy_S, ValueNetwork
from utils.utils import sample_policy_c, sample_policy_n, sample_policy_s, evaluate_earlystop, compute_policy_value_losses
from utils.utils import cnn_cost, lstm_cost, c_cost, n_cost, s_cost, cnn_whole
desc = '''
The whole reading model.
CNN + LSTM + A classifier with multinomial distribution.
'''
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--seed', type=int, default=2019, metavar='S',
help='random seed (default: 2019)')
args = parser.parse_args()
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
set_seed(args.seed)
TEXT = data.Field(sequential=True, tokenize='spacy', lower=True, fix_length=400) #
LABEL = data.LabelField(dtype=torch.float)
print('Splitting data...')
# download the IMDB dataset
train, test_data = datasets.IMDB.splits(TEXT, LABEL) # 25,000 training and 25,000 testing data
train_data, valid_data = train.split(split_ratio=0.8) # split training data into 20,000 training and 5,000 vlidation sample
print(f'Number of training examples: {len(train_data)}')
print(f'Number of validation examples: {len(valid_data)}')
print(f'Number of testing examples: {len(test_data)}')
MAX_VOCAB_SIZE = 25000
# use pretrained embedding of glove
print('Building vocabulary...')
TEXT.build_vocab(train_data, max_size=MAX_VOCAB_SIZE, vectors="glove.6B.100d", unk_init = torch.Tensor.normal_)
LABEL.build_vocab(train_data)
# split the datasets into batches
BATCH_SIZE = 64 # the batch size for a dataset iterator
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f'device: {device}')
print('Building iterators...')
train_iterator, valid_iterator, test_iterator = data.BucketIterator.splits(
(train_data, valid_data, test_data),
batch_size=BATCH_SIZE,
device=device)
# set up parameters
INPUT_DIM = len(TEXT.vocab)
EMBEDDING_DIM = 100
KER_SIZE = 5
HIDDEN_DIM = 128
LABEL_DIM = 2
N_FILTERS = 128
learning_rate = 0.001
# the number of training epoches
num_of_epoch = 10
# set up the criterion
criterion = nn.CrossEntropyLoss().to(device)
# set up models
clstm = CNN_LSTM(INPUT_DIM, EMBEDDING_DIM, KER_SIZE, N_FILTERS, HIDDEN_DIM).to(device)
policy_c = Policy_C(HIDDEN_DIM, HIDDEN_DIM, LABEL_DIM).to(device)
# set up optimiser
params = list(clstm.parameters()) + list(policy_c.parameters())
optimizer = optim.Adam(params, lr=learning_rate)
# add pretrained embeddings
pretrained_embeddings = TEXT.vocab.vectors
clstm.embedding.weight.data.copy_(pretrained_embeddings)
clstm.embedding.weight.requires_grad = True # update the initial weights
def evaluate(iterator):
clstm.eval()
policy_c.eval()
true_labels = []
pred_labels = []
eval_loss = 0
for index, valid in enumerate(iterator):
label = valid.label
text = valid.text.transpose(0,1)
batch_size = label.size()[0]
h_0 = torch.zeros([1, batch_size, 128]).to(device)
ht = clstm(text, h_0)
label_raws = policy_c(ht)
label_probs = F.softmax(label_raws, dim=1)
m = Categorical(label_probs)
pred_label = m.sample()
true_labels.extend(label.cpu().numpy())
pred_labels.extend(pred_label.cpu().squeeze().numpy())
loss = criterion(label_raws.squeeze(), label.to(torch.long))
eval_loss += loss/len(iterator)
eval_accuracy = accuracy_score(true_labels, pred_labels)
return eval_loss, eval_accuracy
def main():
'''
Training and evaluation of the model.
'''
print('training starts...')
for epoch in range(num_of_epoch):
clstm.train()
policy_c.train()
true_labels = []
pred_labels = []
train_loss = 0
for index, train in enumerate(train_iterator):
label = train.label # output_dim:64
text = train.text.transpose(0,1) #: 64, 400
batch_size = label.size()[0]
h_0 = torch.zeros([1, batch_size, 128]).to(device)
ht = clstm(text, h_0) #: 64, 128
label_raws = policy_c(ht)
optimizer.zero_grad()
loss = criterion(label_raws.squeeze(), label.to(torch.long))
loss.backward()
optimizer.step()
# draw a prediction label
label_probs = F.softmax(label_raws.detach(), dim=1)
m = Categorical(label_probs)
pred_label = m.sample()
true_labels.extend(label.cpu().numpy())
pred_labels.extend(pred_label.cpu().squeeze().numpy())
train_loss += loss/len(train_iterator)
train_accuracy = accuracy_score(true_labels, pred_labels)
print('epoch:{0}, train accuracy:{1}, train_loss:{2}'.format(epoch, train_accuracy, train_loss))
eval_loss, eval_accuracy = evaluate(valid_iterator)
print('epoch:{0}, eval accuracy:{1}, eval_loss:{2}'.format(epoch, eval_accuracy, eval_loss))
# testing
test_loss, test_accuracy = evaluate(test_iterator)
print('\n Test accuracy:{1}, test loss:{2}'.format(epoch, test_accuracy, test_loss))
if __name__ == '__main__':
main()
cost = cnn_whole + c_cost + lstm_cost * 24
print('whole reading FLOPs per data: ', cost)
|
[
"numpy.random.seed",
"argparse.ArgumentParser",
"torch.distributions.Categorical",
"torchtext.datasets.IMDB.splits",
"torch.manual_seed",
"sklearn.metrics.accuracy_score",
"torch.cuda.manual_seed",
"torch.nn.CrossEntropyLoss",
"torch.nn.functional.softmax",
"torchtext.data.LabelField",
"torch.optim.Adam",
"random.seed",
"torch.cuda.is_available",
"networks.CNN_LSTM",
"networks.Policy_C",
"torch.zeros",
"torchtext.data.BucketIterator.splits",
"torchtext.data.Field"
] |
[((795, 836), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'desc'}), '(description=desc)\n', (818, 836), False, 'import argparse\n'), ((1238, 1311), 'torchtext.data.Field', 'data.Field', ([], {'sequential': '(True)', 'tokenize': '"""spacy"""', 'lower': '(True)', 'fix_length': '(400)'}), "(sequential=True, tokenize='spacy', lower=True, fix_length=400)\n", (1248, 1311), False, 'from torchtext import data\n'), ((1323, 1357), 'torchtext.data.LabelField', 'data.LabelField', ([], {'dtype': 'torch.float'}), '(dtype=torch.float)\n', (1338, 1357), False, 'from torchtext import data\n'), ((1433, 1466), 'torchtext.datasets.IMDB.splits', 'datasets.IMDB.splits', (['TEXT', 'LABEL'], {}), '(TEXT, LABEL)\n', (1453, 1466), False, 'from torchtext import datasets\n'), ((2308, 2414), 'torchtext.data.BucketIterator.splits', 'data.BucketIterator.splits', (['(train_data, valid_data, test_data)'], {'batch_size': 'BATCH_SIZE', 'device': 'device'}), '((train_data, valid_data, test_data), batch_size=\n BATCH_SIZE, device=device)\n', (2334, 2414), False, 'from torchtext import data\n'), ((2961, 2997), 'torch.optim.Adam', 'optim.Adam', (['params'], {'lr': 'learning_rate'}), '(params, lr=learning_rate)\n', (2971, 2997), False, 'from torch import optim\n'), ((1012, 1029), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (1023, 1029), False, 'import random\n'), ((1034, 1054), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1048, 1054), True, 'import numpy as np\n'), ((1059, 1082), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (1076, 1082), False, 'import torch\n'), ((1087, 1115), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (1109, 1115), False, 'import torch\n'), ((3958, 3998), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['true_labels', 'pred_labels'], {}), '(true_labels, pred_labels)\n', (3972, 3998), False, 'from sklearn.metrics import accuracy_score\n'), ((2164, 2189), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2187, 2189), False, 'import torch\n'), ((2663, 2684), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (2682, 2684), False, 'from torch import nn\n'), ((2720, 2787), 'networks.CNN_LSTM', 'CNN_LSTM', (['INPUT_DIM', 'EMBEDDING_DIM', 'KER_SIZE', 'N_FILTERS', 'HIDDEN_DIM'], {}), '(INPUT_DIM, EMBEDDING_DIM, KER_SIZE, N_FILTERS, HIDDEN_DIM)\n', (2728, 2787), False, 'from networks import CNN_LSTM, Policy_C, Policy_N, Policy_S, ValueNetwork\n'), ((2810, 2853), 'networks.Policy_C', 'Policy_C', (['HIDDEN_DIM', 'HIDDEN_DIM', 'LABEL_DIM'], {}), '(HIDDEN_DIM, HIDDEN_DIM, LABEL_DIM)\n', (2818, 2853), False, 'from networks import CNN_LSTM, Policy_C, Policy_N, Policy_S, ValueNetwork\n'), ((3620, 3648), 'torch.nn.functional.softmax', 'F.softmax', (['label_raws'], {'dim': '(1)'}), '(label_raws, dim=1)\n', (3629, 3648), True, 'import torch.nn.functional as F\n'), ((3661, 3685), 'torch.distributions.Categorical', 'Categorical', (['label_probs'], {}), '(label_probs)\n', (3672, 3685), False, 'from torch.distributions import Bernoulli, Categorical\n'), ((5209, 5249), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['true_labels', 'pred_labels'], {}), '(true_labels, pred_labels)\n', (5223, 5249), False, 'from sklearn.metrics import accuracy_score\n'), ((4953, 4977), 'torch.distributions.Categorical', 'Categorical', (['label_probs'], {}), '(label_probs)\n', (4964, 4977), False, 'from torch.distributions import Bernoulli, Categorical\n'), ((3489, 3522), 'torch.zeros', 'torch.zeros', (['[1, batch_size, 128]'], {}), '([1, batch_size, 128])\n', (3500, 3522), False, 'import torch\n'), ((4527, 4560), 'torch.zeros', 'torch.zeros', (['[1, batch_size, 128]'], {}), '([1, batch_size, 128])\n', (4538, 4560), False, 'import torch\n')]
|
#!/usr/bin/env python3
"""Script to verify if a manual migration to upgrade from CrateDB 0.57 to 1.0
is required.
`blobs.path` filesystem layout changes:
From
<blobs.path>/indices/<indexName>/<shard>/blobs
To
<blobs.path>/nodes/<node_lock>/indices/<indexName>/<shard>/blobs
`path.data` handling changes - if it contains multiple paths all of them will
be utilized.
Before (up to 0.57):
<path.data.1>/ <-- contains blob data
<path.data.2>/ <-- might contain shard-state data
(to migrate blob data for the shards that contain
shard data in this folder need to be moved)
After
<path.data.1>/ <-- contains blob data
<path.data.2>/ <-- contains blob data
"""
import re
import sys
import pathlib
import argparse
from collections import OrderedDict
from crate.client import connect
def info(message, *args):
"""Print info messages to stdout"""
print(message.format(*args))
def ok(message, *args):
"""Print OK messages to stdout"""
print('\033[32;1m' + message.format(*args) + '\033[0m')
def warn(message, *args):
"""Print WARN messages to stdout"""
print('\033[33;1m' + message.format(*args) + '\033[0m')
def error(message, *args):
"""Print ERROR messages to stderr"""
print('\033[31;1m' + message.format(*args) + '\033[0m', file=sys.stderr)
def info_grouped(groups):
"""Print INFO messages grouped by node to stdout"""
for node, msg in groups.items():
ok('[{}]', node)
[info(x) for x in msg]
def exit_if_no_blob_tables(c):
c.execute("select count(*) from information_schema.tables where table_schema = 'blob'")
if c.fetchone()[0] == 0:
ok('No blob tables were found. No migration required.')
sys.exit(0)
def get_target_path(path, blob_path):
""" Return the target path for a blob path
>>> get_target_path('/t/d/c/nodes/0/indices/t1/1', '/t/b/indices/t1/1/blobs')
'/t/b/nodes/0/indices/t1/1/blobs'
"""
node_lock = re.findall('/nodes/(\d+)/indices/', path)
if not node_lock:
raise ValueError("Invalid path: {} doesn't contain /nodes/<n>/indices/:".format(path))
indices_start = blob_path.rindex('/indices/')
return blob_path[0:indices_start] + '/nodes/' + node_lock[0] + blob_path[indices_start:]
def has_custom_blob_path(rows):
""" Checks if any blob_paths have a custom blob path
>>> has_custom_blob_path([
... ('crate1',
... '/t/d/c/nodes/0/indices/.blob_t1/1',
... '/t/b/indices/.blob_t1/1/blobs'),
... ('crate2',
... '/t/d/c/nodes/0/indices/.blob_t1/1',
... '/t/b/indices/.blob_t1/1/blobs'),
... ])
(True, OrderedDict([('crate1', ['mv "/t/b/indices/.blob_t1/1/blobs" "/t/b/nodes/0/indices/.blob_t1/1/blobs"']), ('crate2', ['mv "/t/b/indices/.blob_t1/1/blobs" "/t/b/nodes/0/indices/.blob_t1/1/blobs"'])]))
"""
ret, msg = False, OrderedDict()
for node, path, blob_path in rows:
if node not in msg:
msg[node] = []
path = pathlib.Path(path)
blob_path = pathlib.Path(blob_path)
try:
blob_path.relative_to(path)
except ValueError:
ret = True
msg[node].append('mv "{}" "{}"'.format(
blob_path,
get_target_path(str(path), str(blob_path))))
return ret, msg
def has_path_diff(rows):
""" Checks if there are any blob_paths that aren't childs of path
>>> has_path_diff([
... ('crate1', '/tmp/data1/x', '/tmp/data1/x/blobs'),
... ('crate1', '/tmp/data2/x', '/tmp/data1/x/blobs'),
... ('crate2', '/tmp/data1/x', '/tmp/data1/x/blobs'),
... ('crate2', '/tmp/data2/x', '/tmp/data1/x/blobs'),
... ])
(True, OrderedDict([('crate1', ['mv "/tmp/data1/x/blobs" "/tmp/data2/x"']), ('crate2', ['mv "/tmp/data1/x/blobs" "/tmp/data2/x"'])]))
"""
ret, msg = False, OrderedDict()
for node, path, blob_path in rows:
if node not in msg:
msg[node] = []
path = pathlib.Path(path)
blob_path = pathlib.Path(blob_path)
try:
blob_path.relative_to(path)
except ValueError:
ret = True
msg[node].append('mv "{}" "{}"'.format(blob_path, path))
return ret, msg
def exit_if_multiple_data_paths(c):
c.execute("select fs['data']['path'] from sys.nodes")
fs_data_path_rows = c.fetchall()
c.execute("select _node['hostname'] as node, path, blob_path from sys.shards \
where schema_name = 'blob' and path is not null order by 1, 2")
path_and_blob_paths = c.fetchall()
if any(len(row[0]) > 1 for row in fs_data_path_rows):
ret, msg = has_path_diff(path_and_blob_paths)
if ret:
warn('WARNING: Multiple path.data paths have been found. Migration is required!')
warn('Move the blob paths to their new location using the following commands on the given hosts:')
info_grouped(msg)
sys.exit(ret)
def exit_if_custom_blob_path_set(c):
c.execute("select _node['hostname'] as node, path, blob_path from sys.shards \
where schema_name = 'blob' and path is not null order by 1, 2")
path_and_blob_paths = c.fetchall()
ret, msg = has_custom_blob_path(path_and_blob_paths)
if ret:
warn('WARNING: A custom blob path set. Migration is required!')
warn('Move the blob paths to their new location using the following commands on the given hosts:')
info_grouped(msg)
sys.exit(ret)
def to_version_tuple(v):
major, minor, hotfix = v.split('.', maxsplit=3)
return (int(major), int(minor), int(hotfix))
def exit_if_invalid_crate_version(c):
c.execute("select version['number'] from sys.nodes")
res = c.fetchall()
if not all(to_version_tuple(v) >= (0, 57, 3) for (v,) in res):
error('Some nodes in the cluster run a version lower than 0.57.3.')
warn('Please upgrade your cluster to the latest 0.57 version first!')
sys.exit(1)
if not all(to_version_tuple(v) < (0, 58, 0) for (v,) in res):
error('Some nodes in the cluster run a version greater of equal than 1.0.0.')
sys.exit(1)
def main():
parser = argparse.ArgumentParser(prog='migrate.py',
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--host', type=str, default='localhost:4200')
parser.add_argument('--self-test', action='count')
args = parser.parse_args()
if args.self_test:
import doctest
doctest.testmod()
return
info('Running migration test against {}', args.host)
conn = connect(servers=args.host)
c = conn.cursor()
exit_if_invalid_crate_version(c)
exit_if_no_blob_tables(c)
# TODO: warn that shard allocation should be turned off?
exit_if_multiple_data_paths(c)
exit_if_custom_blob_path_set(c)
ok('No migration required.')
sys.exit(0)
if __name__ == "__main__":
main()
|
[
"argparse.ArgumentParser",
"doctest.testmod",
"pathlib.Path",
"crate.client.connect",
"re.findall",
"collections.OrderedDict",
"sys.exit"
] |
[((2040, 2082), 're.findall', 're.findall', (['"""/nodes/(\\\\d+)/indices/"""', 'path'], {}), "('/nodes/(\\\\d+)/indices/', path)\n", (2050, 2082), False, 'import re\n'), ((6279, 6400), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""migrate.py"""', 'description': '__doc__', 'formatter_class': 'argparse.RawDescriptionHelpFormatter'}), "(prog='migrate.py', description=__doc__,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n", (6302, 6400), False, 'import argparse\n'), ((6783, 6809), 'crate.client.connect', 'connect', ([], {'servers': 'args.host'}), '(servers=args.host)\n', (6790, 6809), False, 'from crate.client import connect\n'), ((7069, 7080), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (7077, 7080), False, 'import sys\n'), ((1796, 1807), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1804, 1807), False, 'import sys\n'), ((2957, 2970), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2968, 2970), False, 'from collections import OrderedDict\n'), ((3080, 3098), 'pathlib.Path', 'pathlib.Path', (['path'], {}), '(path)\n', (3092, 3098), False, 'import pathlib\n'), ((3119, 3142), 'pathlib.Path', 'pathlib.Path', (['blob_path'], {}), '(blob_path)\n', (3131, 3142), False, 'import pathlib\n'), ((3955, 3968), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3966, 3968), False, 'from collections import OrderedDict\n'), ((4078, 4096), 'pathlib.Path', 'pathlib.Path', (['path'], {}), '(path)\n', (4090, 4096), False, 'import pathlib\n'), ((4117, 4140), 'pathlib.Path', 'pathlib.Path', (['blob_path'], {}), '(blob_path)\n', (4129, 4140), False, 'import pathlib\n'), ((5577, 5590), 'sys.exit', 'sys.exit', (['ret'], {}), '(ret)\n', (5585, 5590), False, 'import sys\n'), ((6068, 6079), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6076, 6079), False, 'import sys\n'), ((6240, 6251), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6248, 6251), False, 'import sys\n'), ((6681, 6698), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (6696, 6698), False, 'import doctest\n'), ((5042, 5055), 'sys.exit', 'sys.exit', (['ret'], {}), '(ret)\n', (5050, 5055), False, 'import sys\n')]
|
""" Read the manually-curated xlink xlsx file and export it to an xlink ontology
:Author: <NAME> <<EMAIL>>
:Date: 2019-08-05
:Copyright: 2019, Karr Lab
:License: MIT
"""
import pandas as pd
from ruamel import yaml
XLSX_PATH = './xlink.xlsx'
YML_PATH = './xlink.yml'
def main():
df = pd.read_excel(XLSX_PATH)
# print(df)
yml_data = {}
for index, row in df.iterrows():
row_dict = {}
row_dict['synonyms'] = []
row_dict['synonyms'].append(row['resid_name'])
if not pd.isnull(row['bond_common_name']):
row_dict['common_name'] = row['bond_common_name']
row_dict['l_monomer_alphabet'] = row['left_monomer_alphabet']
row_dict['l_monomer'] = row['left_monomer']
row_dict['l_bond_atoms'] = [atom.strip() for atom in row['left_bond_atoms'].split(',')]
row_dict['l_displaced_atoms'] = [atom.strip() for atom in row['left_displace_atoms'].split(',')]
row_dict['r_monomer_alphabet'] = row['right_monomer_alphabet']
row_dict['r_monomer'] = row['right_monomer']
row_dict['r_bond_atoms'] = [atom.strip() for atom in row['right_bond_atoms'].split(',')]
row_dict['r_displaced_atoms'] = [atom.strip() for atom in row['right_displace_atoms'].split(',')]
yml_data[row['xlink_id']] = row_dict
yaml_writer = yaml.YAML()
yaml_writer.default_flow_style = False
with open(YML_PATH, 'wb') as file:
yaml_writer.dump(yml_data, file)
if __name__ == '__main__':
main()
|
[
"pandas.read_excel",
"pandas.isnull",
"ruamel.yaml.YAML"
] |
[((292, 316), 'pandas.read_excel', 'pd.read_excel', (['XLSX_PATH'], {}), '(XLSX_PATH)\n', (305, 316), True, 'import pandas as pd\n'), ((1332, 1343), 'ruamel.yaml.YAML', 'yaml.YAML', ([], {}), '()\n', (1341, 1343), False, 'from ruamel import yaml\n'), ((518, 552), 'pandas.isnull', 'pd.isnull', (["row['bond_common_name']"], {}), "(row['bond_common_name'])\n", (527, 552), True, 'import pandas as pd\n')]
|
# DISTRIBUTION STATEMENT A. Approved for public release: distribution unlimited.
#
# This material is based upon work supported by the Assistant Secretary of Defense for Research and
# Engineering under Air Force Contract No. FA8721-05-C-0002 and/or FA8702-15-D-0001. Any opinions,
# findings, conclusions or recommendations expressed in this material are those of the author(s) and
# do not necessarily reflect the views of the Assistant Secretary of Defense for Research and
# Engineering.
#
# © 2017 Massachusetts Institute of Technology.
#
# MIT Proprietary, Subject to FAR52.227-11 Patent Rights - Ownership by the contractor (May 2014)
#
# The software/firmware is provided to you on an As-Is basis
#
# Delivered to the U.S. Government with Unlimited Rights, as defined in DFARS Part 252.227-7013 or
# 7014 (Feb 2014). Notwithstanding any copyright notice, U.S. Government rights in this work are
# defined by DFARS 252.227-7013 or DFARS 252.227-7014 as detailed above. Use of this work other than
# as specifically authorized by the U.S. Government may violate any copyrights that exist in this
# work.
import numpy as np
import json
import pickle
import torch
import math
import h5py
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.dataloader import default_collate
import random
def invert_dict(d):
return {v: k for k, v in d.items()}
def load_vocab(path):
with open(path, 'r') as f:
vocab = json.load(f)
vocab['question_idx_to_token'] = invert_dict(vocab['question_token_to_idx'])
vocab['answer_idx_to_token'] = invert_dict(vocab['answer_token_to_idx'])
return vocab
class GQADataset(Dataset):
def __init__(self, vocab, answers, questions, questions_len, q_image_indices, question_id, object_feature, spatial_feature,
img_info, num_answer):
# convert data to tensor
self.all_answers = answers
self.all_questions = torch.from_numpy(np.asarray(questions)).long()
self.all_questions_len = torch.from_numpy(
np.asarray(questions_len)).long()
self.all_q_image_idxs = np.asarray(q_image_indices)
self.all_question_idxs = torch.from_numpy(np.asarray(question_id)).long()
self.spatial_feature = spatial_feature
self.object_feature = object_feature
self.img_info = img_info
self.num_answer = num_answer
self.vocab = vocab
def __getitem__(self, index):
answer = self.all_answers[index] if self.all_answers is not None else None
question = self.all_questions[index]
question_len = self.all_questions_len[index]
image_idx = self.all_q_image_idxs[index].item()
question_idx = self.all_question_idxs[index].item()
index = self.img_info[str(image_idx)]['index']
w = self.img_info[str(image_idx)]['width']
h = self.img_info[str(image_idx)]['height']
image_idx = torch.from_numpy(np.array([1]))
with h5py.File(self.object_feature, 'r') as fObject:
node_feat = fObject['features'][index] # (100, 2048)
boxes = fObject['bboxes'][index] # (4, 100)
with h5py.File(self.spatial_feature, 'r') as fSpatial:
scene_feat = fSpatial['features'][index] # (2048, 7, 7)
scene_feat = scene_feat.mean(2).mean(1)
scene_feat = np.expand_dims(scene_feat, axis=0)
scene_box = np.array([0, 0, w, h])
scene_box = np.expand_dims(scene_box, axis=0)
node_feat = np.concatenate([scene_feat, node_feat], axis=0) # (101, 2053)
boxes = np.concatenate([scene_box, boxes], axis=0)
spatial_feat = [0] * boxes.shape[0]
for i in range(boxes.shape[0]):
bbox = np.copy(boxes[i])
bbox_x = bbox[2] - bbox[0]
bbox_y = bbox[3] - bbox[1]
area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1]) / (w * h)
bbox[0] /= w
bbox[1] /= h
bbox[2] /= w
bbox[3] /= h
spatial_feat[i] = np.array([bbox[0], bbox[1], bbox[2], bbox[3], bbox_x / w, bbox_y / h, area])
spatial_feat = torch.from_numpy(np.array(spatial_feat)).float()
node_feat = torch.from_numpy(node_feat)
return (question_idx, image_idx, answer, question, question_len, node_feat, spatial_feat)
def __len__(self):
return len(self.all_questions)
class GQADataLoader(DataLoader):
def __init__(self, **kwargs):
vocab_json_path = str(kwargs.pop('vocab_json'))
print('loading vocab from %s' % (vocab_json_path))
vocab = load_vocab(vocab_json_path)
question_pt_path = str(kwargs.pop('question_pt'))
print('loading questions from %s' % (question_pt_path))
with open(question_pt_path, 'rb') as f:
obj = pickle.load(f)
questions = obj['questions']
questions_len = obj['questions_len']
q_image_indices = obj['image_ids']
question_id = obj['question_ids'].astype(np.int)
answers = np.asarray(obj['answers'])
glove_matrix = obj['glove']
# print(q_image_indices)
# exit()
if 'train_num' in kwargs:
train_num = kwargs.pop('train_num')
if train_num > 0:
choices = random.choices(range(len(questions)), k=train_num)
questions = questions[choices]
questions_len = questions_len[choices]
q_image_indices = q_image_indices[choices]
question_id = question_id[choices]
answers = answers[choices]
if 'val_num' in kwargs:
val_num = kwargs.pop('val_num')
if val_num > 0:
choices = random.choices(range(len(questions)), k=val_num)
questions = questions[choices]
questions_len = questions_len[choices]
q_image_indices = q_image_indices[choices]
question_id = question_id[choices]
if 'test_num' in kwargs:
test_num = kwargs.pop('test_num')
if test_num > 0:
choices = random.choices(range(len(questions)), k=test_num)
questions = questions[choices]
questions_len = questions_len[choices]
q_image_indices = q_image_indices[choices]
question_id = question_id[choices]
self.object_feature = kwargs.pop('object_feature')
print('loading object feature from %s' % (self.object_feature))
self.spatial_feature = kwargs.pop('spatial_feature')
print('loading spatial feature from %s' % (self.spatial_feature))
self.img_info = kwargs.pop('img_info')
with open(self.img_info, "r") as file:
self.img_info = json.load(file)
self.dataset = GQADataset(vocab, answers, questions, questions_len, q_image_indices, question_id, self.object_feature,
self.spatial_feature, self.img_info, len(vocab['answer_token_to_idx']))
self.vocab = vocab
self.batch_size = kwargs['batch_size']
self.glove_matrix = glove_matrix
kwargs['collate_fn'] = default_collate
super().__init__(self.dataset, **kwargs)
def __len__(self):
return math.ceil(len(self.dataset) / self.batch_size)
|
[
"h5py.File",
"json.load",
"numpy.copy",
"numpy.asarray",
"numpy.expand_dims",
"pickle.load",
"numpy.array",
"numpy.concatenate",
"torch.from_numpy"
] |
[((1444, 1456), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1453, 1456), False, 'import json\n'), ((2111, 2138), 'numpy.asarray', 'np.asarray', (['q_image_indices'], {}), '(q_image_indices)\n', (2121, 2138), True, 'import numpy as np\n'), ((3505, 3552), 'numpy.concatenate', 'np.concatenate', (['[scene_feat, node_feat]'], {'axis': '(0)'}), '([scene_feat, node_feat], axis=0)\n', (3519, 3552), True, 'import numpy as np\n'), ((3584, 3626), 'numpy.concatenate', 'np.concatenate', (['[scene_box, boxes]'], {'axis': '(0)'}), '([scene_box, boxes], axis=0)\n', (3598, 3626), True, 'import numpy as np\n'), ((4198, 4225), 'torch.from_numpy', 'torch.from_numpy', (['node_feat'], {}), '(node_feat)\n', (4214, 4225), False, 'import torch\n'), ((2938, 2951), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (2946, 2951), True, 'import numpy as np\n'), ((2966, 3001), 'h5py.File', 'h5py.File', (['self.object_feature', '"""r"""'], {}), "(self.object_feature, 'r')\n", (2975, 3001), False, 'import h5py\n'), ((3150, 3186), 'h5py.File', 'h5py.File', (['self.spatial_feature', '"""r"""'], {}), "(self.spatial_feature, 'r')\n", (3159, 3186), False, 'import h5py\n'), ((3345, 3379), 'numpy.expand_dims', 'np.expand_dims', (['scene_feat'], {'axis': '(0)'}), '(scene_feat, axis=0)\n', (3359, 3379), True, 'import numpy as np\n'), ((3404, 3426), 'numpy.array', 'np.array', (['[0, 0, w, h]'], {}), '([0, 0, w, h])\n', (3412, 3426), True, 'import numpy as np\n'), ((3451, 3484), 'numpy.expand_dims', 'np.expand_dims', (['scene_box'], {'axis': '(0)'}), '(scene_box, axis=0)\n', (3465, 3484), True, 'import numpy as np\n'), ((3731, 3748), 'numpy.copy', 'np.copy', (['boxes[i]'], {}), '(boxes[i])\n', (3738, 3748), True, 'import numpy as np\n'), ((4028, 4104), 'numpy.array', 'np.array', (['[bbox[0], bbox[1], bbox[2], bbox[3], bbox_x / w, bbox_y / h, area]'], {}), '([bbox[0], bbox[1], bbox[2], bbox[3], bbox_x / w, bbox_y / h, area])\n', (4036, 4104), True, 'import numpy as np\n'), ((4805, 4819), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4816, 4819), False, 'import pickle\n'), ((5040, 5066), 'numpy.asarray', 'np.asarray', (["obj['answers']"], {}), "(obj['answers'])\n", (5050, 5066), True, 'import numpy as np\n'), ((6778, 6793), 'json.load', 'json.load', (['file'], {}), '(file)\n', (6787, 6793), False, 'import json\n'), ((1952, 1973), 'numpy.asarray', 'np.asarray', (['questions'], {}), '(questions)\n', (1962, 1973), True, 'import numpy as np\n'), ((2045, 2070), 'numpy.asarray', 'np.asarray', (['questions_len'], {}), '(questions_len)\n', (2055, 2070), True, 'import numpy as np\n'), ((2189, 2212), 'numpy.asarray', 'np.asarray', (['question_id'], {}), '(question_id)\n', (2199, 2212), True, 'import numpy as np\n'), ((4146, 4168), 'numpy.array', 'np.array', (['spatial_feat'], {}), '(spatial_feat)\n', (4154, 4168), True, 'import numpy as np\n')]
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowResourceHistoryRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'resource_id': 'str',
'marker': 'str',
'limit': 'int',
'earlier_time': 'int',
'later_time': 'int',
'chronological_order': 'str'
}
attribute_map = {
'resource_id': 'resource_id',
'marker': 'marker',
'limit': 'limit',
'earlier_time': 'earlier_time',
'later_time': 'later_time',
'chronological_order': 'chronological_order'
}
def __init__(self, resource_id=None, marker=None, limit=None, earlier_time=None, later_time=None, chronological_order=None):
"""ShowResourceHistoryRequest - a model defined in huaweicloud sdk"""
self._resource_id = None
self._marker = None
self._limit = None
self._earlier_time = None
self._later_time = None
self._chronological_order = None
self.discriminator = None
self.resource_id = resource_id
if marker is not None:
self.marker = marker
if limit is not None:
self.limit = limit
if earlier_time is not None:
self.earlier_time = earlier_time
if later_time is not None:
self.later_time = later_time
if chronological_order is not None:
self.chronological_order = chronological_order
@property
def resource_id(self):
"""Gets the resource_id of this ShowResourceHistoryRequest.
资源ID
:return: The resource_id of this ShowResourceHistoryRequest.
:rtype: str
"""
return self._resource_id
@resource_id.setter
def resource_id(self, resource_id):
"""Sets the resource_id of this ShowResourceHistoryRequest.
资源ID
:param resource_id: The resource_id of this ShowResourceHistoryRequest.
:type: str
"""
self._resource_id = resource_id
@property
def marker(self):
"""Gets the marker of this ShowResourceHistoryRequest.
分页参数,通过上一个请求中返回的marker信息作为输入,获取当前页
:return: The marker of this ShowResourceHistoryRequest.
:rtype: str
"""
return self._marker
@marker.setter
def marker(self, marker):
"""Sets the marker of this ShowResourceHistoryRequest.
分页参数,通过上一个请求中返回的marker信息作为输入,获取当前页
:param marker: The marker of this ShowResourceHistoryRequest.
:type: str
"""
self._marker = marker
@property
def limit(self):
"""Gets the limit of this ShowResourceHistoryRequest.
最大的返回数量
:return: The limit of this ShowResourceHistoryRequest.
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this ShowResourceHistoryRequest.
最大的返回数量
:param limit: The limit of this ShowResourceHistoryRequest.
:type: int
"""
self._limit = limit
@property
def earlier_time(self):
"""Gets the earlier_time of this ShowResourceHistoryRequest.
指定查询范围的起始时间点,如果不设置此参数,默认为最早的时间
:return: The earlier_time of this ShowResourceHistoryRequest.
:rtype: int
"""
return self._earlier_time
@earlier_time.setter
def earlier_time(self, earlier_time):
"""Sets the earlier_time of this ShowResourceHistoryRequest.
指定查询范围的起始时间点,如果不设置此参数,默认为最早的时间
:param earlier_time: The earlier_time of this ShowResourceHistoryRequest.
:type: int
"""
self._earlier_time = earlier_time
@property
def later_time(self):
"""Gets the later_time of this ShowResourceHistoryRequest.
指定查询范围的结束时间点,如果不设置此参数,默认为当前时间
:return: The later_time of this ShowResourceHistoryRequest.
:rtype: int
"""
return self._later_time
@later_time.setter
def later_time(self, later_time):
"""Sets the later_time of this ShowResourceHistoryRequest.
指定查询范围的结束时间点,如果不设置此参数,默认为当前时间
:param later_time: The later_time of this ShowResourceHistoryRequest.
:type: int
"""
self._later_time = later_time
@property
def chronological_order(self):
"""Gets the chronological_order of this ShowResourceHistoryRequest.
指定返回数据的时间顺序,默认为倒序
:return: The chronological_order of this ShowResourceHistoryRequest.
:rtype: str
"""
return self._chronological_order
@chronological_order.setter
def chronological_order(self, chronological_order):
"""Sets the chronological_order of this ShowResourceHistoryRequest.
指定返回数据的时间顺序,默认为倒序
:param chronological_order: The chronological_order of this ShowResourceHistoryRequest.
:type: str
"""
self._chronological_order = chronological_order
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowResourceHistoryRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"huaweicloudsdkcore.utils.http_utils.sanitize_for_serialization",
"six.iteritems",
"sys.setdefaultencoding"
] |
[((5413, 5446), 'six.iteritems', 'six.iteritems', (['self.openapi_types'], {}), '(self.openapi_types)\n', (5426, 5446), False, 'import six\n'), ((6431, 6462), 'sys.setdefaultencoding', 'sys.setdefaultencoding', (['"""utf-8"""'], {}), "('utf-8')\n", (6453, 6462), False, 'import sys\n'), ((6489, 6521), 'huaweicloudsdkcore.utils.http_utils.sanitize_for_serialization', 'sanitize_for_serialization', (['self'], {}), '(self)\n', (6515, 6521), False, 'from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization\n')]
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except jin compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from ..layers import collective
from ..framework import Parameter
__parallel_ctx__clz__ = None
def _is_data_parallel_mode():
global __parallel_ctx__clz__
return __parallel_ctx__clz__ is not None and int(
os.getenv("PADDLE_TRAINERS_NUM", "1")) > 1
def _is_parallel_ctx_initialized():
global __parallel_ctx__clz__
return __parallel_ctx__clz__ is not None
def _set_parallel_ctx(nccl_parallel_context):
global __parallel_ctx__clz__
assert __parallel_ctx__clz__ is None, \
"ParallelContext can only be initialized once."
__parallel_ctx__clz__ = nccl_parallel_context
def _init_parallel_ctx():
global __parallel_ctx__clz__
assert __parallel_ctx__clz__ is not None, \
"ParallelContext should be initialized."
__parallel_ctx__clz__.init()
def _broadcast_parameters(parameters):
for param in parameters:
# In model parallel, some parameters are split into multiple devices,
# so we could not broadcast these parameters.
if param.is_distributed: continue
if isinstance(param, Parameter) and param.trainable:
collective._broadcast(param, 0, sync_mode=True)
|
[
"os.getenv"
] |
[((843, 880), 'os.getenv', 'os.getenv', (['"""PADDLE_TRAINERS_NUM"""', '"""1"""'], {}), "('PADDLE_TRAINERS_NUM', '1')\n", (852, 880), False, 'import os\n')]
|
"""
Create a .csv file that contains the Sentinel-2 values
with the bands as columns and the indivual pixel as rows.
Call script as
python 06_create_full_csv.py DATE LEVEL
with
DATE: date of Sentinel-2 image as YYYYMMDD
LEVEL: processing level of Sentinel-2 image (either L1C or L2A)
"""
import sys
import numpy as np
import pandas as pd
from skimage.io import imread
# arguments upon executing script
date = sys.argv[1]
level = sys.argv[2]
print(
f'Open {date}_{level}_merged.tif and convert pixel in area to .csv file..',
end=' ')
# read tif
data = imread(f'data/new_data/{date}_data/{date}_{level}_merged.tif')
height, width = data.shape[0], data.shape[1]
if level == 'L1C':
bands = ['b1', 'b2', 'b3',
'b4', 'b5', 'b6',
'b7', 'b8', 'b9',
'b10', 'b11', 'b12',
'b8a']
elif level == 'L2A':
bands = ['b1', 'b2', 'b3',
'b4', 'b5', 'b6',
'b7', 'b8', 'b9',
'b11', 'b12', 'b8a',
'AOT', 'WVP', 'CLD',
'SCL', 'SNW']
else:
print(f'{level} is not a valid LEVEL argument, choose either L1C or L2A.')
# flatten array from shape (height, width, channels)
# to (height*width, channels)
data_flat = np.zeros((height*width, len(bands)+1))
for i in range(len(bands)+1):
data_flat[:, i] = data[:, :, i].flatten()
# transform to pd.DataFrame
df = pd.DataFrame(data=data_flat,
columns=bands+['is_in_area'])
# drop pixel outside of area of interest
df = df[df['is_in_area'] == 1]
df = df.drop(columns=['is_in_area'])
# save as csv
df.to_csv(f'data/new_data/{date}_data/{date}_{level}_full.csv',
sep=',',
encoding='utf-8')
print('done.')
|
[
"pandas.DataFrame",
"skimage.io.imread"
] |
[((568, 630), 'skimage.io.imread', 'imread', (['f"""data/new_data/{date}_data/{date}_{level}_merged.tif"""'], {}), "(f'data/new_data/{date}_data/{date}_{level}_merged.tif')\n", (574, 630), False, 'from skimage.io import imread\n'), ((1383, 1443), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data_flat', 'columns': "(bands + ['is_in_area'])"}), "(data=data_flat, columns=bands + ['is_in_area'])\n", (1395, 1443), True, 'import pandas as pd\n')]
|
import datetime
from decimal import Decimal
import factory
import pytest
from mquery import HistoryEntry, filter_history, group_history_by_date, read_history
class HistoryEntryFactory(factory.Factory):
class Meta:
model = HistoryEntry
date = factory.Faker("past_date", start_date="-7d")
description = factory.Faker("sentence", nb_words=8)
category = factory.Faker("sentence", nb_words=3)
amount = factory.Faker(
"pydecimal", left_digits=3, right_digits=2, positive=False, max_value=-1
)
currency = "PLN"
def test_read_history(tmpdir):
test_data = b"""
# First line of the header
#
# Last line of the header
2015-10-15;LOREM;;;-1234,50 PLN
2019-04-30;"LOREM IPSUM DOLOR SIT AMET";;FOO;-0,12 PLN
2020-12-03;;;"FOO BAR";6543,21 PLN
"""
expected_history = [
HistoryEntry(
datetime.date(2015, 10, 15), "LOREM", "", Decimal("-1234.50"), "PLN"
),
HistoryEntry(
datetime.date(2019, 4, 30),
"LOREM IPSUM DOLOR SIT AMET",
"FOO",
Decimal("-0.12"),
"PLN",
),
HistoryEntry(
datetime.date(2020, 12, 3), "", "FOO BAR", Decimal("6543.21"), "PLN"
),
]
test_data_path = tmpdir.join("test_data.csv")
with open(test_data_path, "wb") as test_data_file:
test_data_file.write(test_data)
history = read_history(test_data_path, "utf-8", "# Last line")
assert history == expected_history
def test_group_history_by_date_returns_entries_grouped_by_date():
history = HistoryEntryFactory.create_batch(10)
grouped = group_history_by_date(history)
for date, entries in grouped.items():
for entry in entries:
assert entry.date == date
def test_group_history_by_date_returns_all_entries():
history = HistoryEntryFactory.create_batch(10)
grouped = group_history_by_date(history)
assert len(history) == sum([len(entries) for entries in grouped.values()])
TEST_HISTORY = [
HistoryEntry( # 0
date=datetime.date(2020, 1, 10),
amount=Decimal("150.0"),
currency="PLN",
category="foo",
description="lorem ipsum",
),
HistoryEntry( # 1
date=datetime.date(2020, 1, 9),
amount=Decimal("-100.0"),
currency="PLN",
category="foo",
description="dolor ipsum",
),
HistoryEntry( # 2
date=datetime.date(2020, 1, 9),
amount=Decimal("-40.0"),
currency="PLN",
category="foo",
description="dolor sit amet",
),
HistoryEntry( # 3
date=datetime.date(2020, 1, 8),
amount=Decimal("-150.0"),
currency="PLN",
category="bar",
description="lorem ipsum",
),
HistoryEntry( # 4
date=datetime.date(2020, 1, 8),
amount=Decimal("-90.0"),
currency="EUR",
category="foo",
description="lorem ipsum",
),
HistoryEntry( # 5
date=datetime.date(2020, 1, 7),
amount=Decimal("-90.0"),
currency="PLN",
category="bar",
description="dolor sit amet",
),
]
@pytest.mark.parametrize(
"history, filters, expected_history",
[
(
TEST_HISTORY,
{"amount_from": 100},
[TEST_HISTORY[0], TEST_HISTORY[1], TEST_HISTORY[3]],
),
(
TEST_HISTORY,
{"amount_to": 95},
[TEST_HISTORY[2], TEST_HISTORY[4], TEST_HISTORY[5]],
),
(
TEST_HISTORY,
{"amount_from": 80, "amount_to": 95},
[TEST_HISTORY[4], TEST_HISTORY[5]],
),
(TEST_HISTORY, {"category": "ar"}, [TEST_HISTORY[3], TEST_HISTORY[5]],),
(TEST_HISTORY, {"currency": "EUR"}, [TEST_HISTORY[4]],),
(
TEST_HISTORY,
{"description": "ipsum"},
[TEST_HISTORY[0], TEST_HISTORY[1], TEST_HISTORY[3], TEST_HISTORY[4]],
),
(
TEST_HISTORY,
{"date_from": datetime.date(2020, 1, 9)},
[TEST_HISTORY[0], TEST_HISTORY[1], TEST_HISTORY[2]],
),
(
TEST_HISTORY,
{"date_to": datetime.date(2020, 1, 8)},
[TEST_HISTORY[3], TEST_HISTORY[4], TEST_HISTORY[5]],
),
(
TEST_HISTORY,
{
"date_from": datetime.date(2020, 1, 8),
"date_to": datetime.date(2020, 1, 9),
},
[TEST_HISTORY[1], TEST_HISTORY[2], TEST_HISTORY[3], TEST_HISTORY[4]],
),
],
)
def test_filter_history_returns_history_without_not_matching_entries(
history, filters, expected_history
):
filtered_history = filter_history(history, **filters)
assert filtered_history == expected_history
|
[
"factory.Faker",
"mquery.group_history_by_date",
"mquery.filter_history",
"decimal.Decimal",
"datetime.date",
"mquery.read_history"
] |
[((263, 307), 'factory.Faker', 'factory.Faker', (['"""past_date"""'], {'start_date': '"""-7d"""'}), "('past_date', start_date='-7d')\n", (276, 307), False, 'import factory\n'), ((326, 363), 'factory.Faker', 'factory.Faker', (['"""sentence"""'], {'nb_words': '(8)'}), "('sentence', nb_words=8)\n", (339, 363), False, 'import factory\n'), ((379, 416), 'factory.Faker', 'factory.Faker', (['"""sentence"""'], {'nb_words': '(3)'}), "('sentence', nb_words=3)\n", (392, 416), False, 'import factory\n'), ((430, 521), 'factory.Faker', 'factory.Faker', (['"""pydecimal"""'], {'left_digits': '(3)', 'right_digits': '(2)', 'positive': '(False)', 'max_value': '(-1)'}), "('pydecimal', left_digits=3, right_digits=2, positive=False,\n max_value=-1)\n", (443, 521), False, 'import factory\n'), ((1390, 1442), 'mquery.read_history', 'read_history', (['test_data_path', '"""utf-8"""', '"""# Last line"""'], {}), "(test_data_path, 'utf-8', '# Last line')\n", (1402, 1442), False, 'from mquery import HistoryEntry, filter_history, group_history_by_date, read_history\n'), ((1615, 1645), 'mquery.group_history_by_date', 'group_history_by_date', (['history'], {}), '(history)\n', (1636, 1645), False, 'from mquery import HistoryEntry, filter_history, group_history_by_date, read_history\n'), ((1877, 1907), 'mquery.group_history_by_date', 'group_history_by_date', (['history'], {}), '(history)\n', (1898, 1907), False, 'from mquery import HistoryEntry, filter_history, group_history_by_date, read_history\n'), ((4698, 4732), 'mquery.filter_history', 'filter_history', (['history'], {}), '(history, **filters)\n', (4712, 4732), False, 'from mquery import HistoryEntry, filter_history, group_history_by_date, read_history\n'), ((848, 875), 'datetime.date', 'datetime.date', (['(2015)', '(10)', '(15)'], {}), '(2015, 10, 15)\n', (861, 875), False, 'import datetime\n'), ((890, 909), 'decimal.Decimal', 'Decimal', (['"""-1234.50"""'], {}), "('-1234.50')\n", (897, 909), False, 'from decimal import Decimal\n'), ((962, 988), 'datetime.date', 'datetime.date', (['(2019)', '(4)', '(30)'], {}), '(2019, 4, 30)\n', (975, 988), False, 'import datetime\n'), ((1063, 1079), 'decimal.Decimal', 'Decimal', (['"""-0.12"""'], {}), "('-0.12')\n", (1070, 1079), False, 'from decimal import Decimal\n'), ((1145, 1171), 'datetime.date', 'datetime.date', (['(2020)', '(12)', '(3)'], {}), '(2020, 12, 3)\n', (1158, 1171), False, 'import datetime\n'), ((1188, 1206), 'decimal.Decimal', 'Decimal', (['"""6543.21"""'], {}), "('6543.21')\n", (1195, 1206), False, 'from decimal import Decimal\n'), ((2042, 2068), 'datetime.date', 'datetime.date', (['(2020)', '(1)', '(10)'], {}), '(2020, 1, 10)\n', (2055, 2068), False, 'import datetime\n'), ((2085, 2101), 'decimal.Decimal', 'Decimal', (['"""150.0"""'], {}), "('150.0')\n", (2092, 2101), False, 'from decimal import Decimal\n'), ((2229, 2254), 'datetime.date', 'datetime.date', (['(2020)', '(1)', '(9)'], {}), '(2020, 1, 9)\n', (2242, 2254), False, 'import datetime\n'), ((2271, 2288), 'decimal.Decimal', 'Decimal', (['"""-100.0"""'], {}), "('-100.0')\n", (2278, 2288), False, 'from decimal import Decimal\n'), ((2416, 2441), 'datetime.date', 'datetime.date', (['(2020)', '(1)', '(9)'], {}), '(2020, 1, 9)\n', (2429, 2441), False, 'import datetime\n'), ((2458, 2474), 'decimal.Decimal', 'Decimal', (['"""-40.0"""'], {}), "('-40.0')\n", (2465, 2474), False, 'from decimal import Decimal\n'), ((2605, 2630), 'datetime.date', 'datetime.date', (['(2020)', '(1)', '(8)'], {}), '(2020, 1, 8)\n', (2618, 2630), False, 'import datetime\n'), ((2647, 2664), 'decimal.Decimal', 'Decimal', (['"""-150.0"""'], {}), "('-150.0')\n", (2654, 2664), False, 'from decimal import Decimal\n'), ((2792, 2817), 'datetime.date', 'datetime.date', (['(2020)', '(1)', '(8)'], {}), '(2020, 1, 8)\n', (2805, 2817), False, 'import datetime\n'), ((2834, 2850), 'decimal.Decimal', 'Decimal', (['"""-90.0"""'], {}), "('-90.0')\n", (2841, 2850), False, 'from decimal import Decimal\n'), ((2978, 3003), 'datetime.date', 'datetime.date', (['(2020)', '(1)', '(7)'], {}), '(2020, 1, 7)\n', (2991, 3003), False, 'import datetime\n'), ((3020, 3036), 'decimal.Decimal', 'Decimal', (['"""-90.0"""'], {}), "('-90.0')\n", (3027, 3036), False, 'from decimal import Decimal\n'), ((4018, 4043), 'datetime.date', 'datetime.date', (['(2020)', '(1)', '(9)'], {}), '(2020, 1, 9)\n', (4031, 4043), False, 'import datetime\n'), ((4182, 4207), 'datetime.date', 'datetime.date', (['(2020)', '(1)', '(8)'], {}), '(2020, 1, 8)\n', (4195, 4207), False, 'import datetime\n'), ((4365, 4390), 'datetime.date', 'datetime.date', (['(2020)', '(1)', '(8)'], {}), '(2020, 1, 8)\n', (4378, 4390), False, 'import datetime\n'), ((4419, 4444), 'datetime.date', 'datetime.date', (['(2020)', '(1)', '(9)'], {}), '(2020, 1, 9)\n', (4432, 4444), False, 'import datetime\n')]
|
'''
DS1307 RTC drive
Author: shaoziyang
Date: 2018.3
http://www.micropython.org.cn
'''
from micropython import const
DS1307_I2C_ADDRESS = const(104)
DS1307_REG_SECOND = const(0)
DS1307_REG_MINUTE = const(1)
DS1307_REG_HOUR = const(2)
DS1307_REG_WEEKDAY = const(3)
DS1307_REG_DAY = const(4)
DS1307_REG_MONTH = const(5)
DS1307_REG_YEAR = const(6)
DS1307_REG_CTRL = const(7)
DS1307_REG_RAM = const(8)
class DS1307():
def __init__(self, i2c):
self.i2c = i2c
self.DT = [0] * 8
self.buf = bytearray(8)
self.tb = bytearray(1)
self.rb = bytearray(1)
self.start()
# set reg
def setReg(self, reg, dat):
self.tb[0] = dat
self.i2c.writeto_mem(DS1307_I2C_ADDRESS, reg, self.tb)
# get reg
def getReg(self, reg):
self.i2c.readfrom_mem_into(DS1307_I2C_ADDRESS, reg, self.rb)
return self.rb[0]
def start(self):
t = self.getReg(DS1307_REG_SECOND)
self.setReg(DS1307_REG_SECOND, t&0x7F)
def stop(self):
t = self.getReg(DS1307_REG_SECOND)
self.setReg(DS1307_REG_SECOND, t|0x80)
def DecToHex(self, dat):
return (dat//10) * 16 + (dat%10)
def HexToDec(self, dat):
return (dat//16) * 10 + (dat%16)
def datetime(self, DT=None):
if DT == None:
self.i2c.readfrom_mem_into(DS1307_I2C_ADDRESS, DS1307_REG_SECOND, self.buf)
self.DT[0] = self.HexToDec(self.buf[6]) + 2000
self.DT[1] = self.HexToDec(self.buf[5])
self.DT[2] = self.HexToDec(self.buf[4])
self.DT[3] = self.HexToDec(self.buf[3])
self.DT[4] = self.HexToDec(self.buf[2])
self.DT[5] = self.HexToDec(self.buf[1])
self.DT[6] = self.HexToDec(self.buf[0])
self.DT[7] = 0
return self.DT
else:
self.buf[0] = 0
self.buf[1] = self.DecToHex(DT[6]%60) # second
self.buf[2] = self.DecToHex(DT[5]%60) # minute
self.buf[3] = self.DecToHex(DT[4]%24) # hour
self.buf[4] = self.DecToHex(DT[3]%8) # week day
self.buf[5] = self.DecToHex(DT[2]%32) # date
self.buf[6] = self.DecToHex(DT[1]%13) # month
self.buf[7] = self.DecToHex(DT[0]%100) # year
self.i2c.writeto(DS1307_I2C_ADDRESS, self.buf)
def year(self, year = None):
if year == None:
return self.HexToDec(self.getReg(DS1307_REG_YEAR)) + 2000
else:
self.setReg(DS1307_REG_YEAR, self.DecToHex(year%100))
def month(self, month = None):
if month == None:
return self.HexToDec(self.getReg(DS1307_REG_MONTH))
else:
self.setReg(DS1307_REG_MONTH, self.DecToHex(month%13))
def day(self, day = None):
if day == None:
return self.HexToDec(self.getReg(DS1307_REG_DAY))
else:
self.setReg(DS1307_REG_DAY, self.DecToHex(day%32))
def weekday(self, weekday = None):
if weekday == None:
return self.HexToDec(self.getReg(DS1307_REG_WEEKDAY))
else:
self.setReg(DS1307_REG_WEEKDAY, self.DecToHex(weekday%8))
def hour(self, hour = None):
if hour == None:
return self.HexToDec(self.getReg(DS1307_REG_HOUR))
else:
self.setReg(DS1307_REG_HOUR, self.DecToHex(hour%24))
def minute(self, minute = None):
if minute == None:
return self.HexToDec(self.getReg(DS1307_REG_MINUTE))
else:
self.setReg(DS1307_REG_MINUTE, self.DecToHex(minute%60))
def second(self, second = None):
if second == None:
return self.HexToDec(self.getReg(DS1307_REG_SECOND))
else:
self.setReg(DS1307_REG_SECOND, self.DecToHex(second%60))
def ram(self, reg, dat = None):
if dat == None:
return self.getReg(DS1307_REG_RAM + (reg%56))
else:
self.setReg(DS1307_REG_RAM + (reg%56), dat)
|
[
"micropython.const"
] |
[((160, 170), 'micropython.const', 'const', (['(104)'], {}), '(104)\n', (165, 170), False, 'from micropython import const\n'), ((193, 201), 'micropython.const', 'const', (['(0)'], {}), '(0)\n', (198, 201), False, 'from micropython import const\n'), ((224, 232), 'micropython.const', 'const', (['(1)'], {}), '(1)\n', (229, 232), False, 'from micropython import const\n'), ((255, 263), 'micropython.const', 'const', (['(2)'], {}), '(2)\n', (260, 263), False, 'from micropython import const\n'), ((286, 294), 'micropython.const', 'const', (['(3)'], {}), '(3)\n', (291, 294), False, 'from micropython import const\n'), ((317, 325), 'micropython.const', 'const', (['(4)'], {}), '(4)\n', (322, 325), False, 'from micropython import const\n'), ((348, 356), 'micropython.const', 'const', (['(5)'], {}), '(5)\n', (353, 356), False, 'from micropython import const\n'), ((379, 387), 'micropython.const', 'const', (['(6)'], {}), '(6)\n', (384, 387), False, 'from micropython import const\n'), ((410, 418), 'micropython.const', 'const', (['(7)'], {}), '(7)\n', (415, 418), False, 'from micropython import const\n'), ((441, 449), 'micropython.const', 'const', (['(8)'], {}), '(8)\n', (446, 449), False, 'from micropython import const\n')]
|
"""Config flow for NuHeat integration."""
import logging
import nuheat
import requests.exceptions
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
from homeassistant.const import (
CONF_PASSWORD,
CONF_USERNAME,
HTTP_BAD_REQUEST,
HTTP_INTERNAL_SERVER_ERROR,
)
from .const import CONF_SERIAL_NUMBER
from .const import DOMAIN # pylint:disable=unused-import
_LOGGER = logging.getLogger(__name__)
DATA_SCHEMA = vol.Schema(
{
vol.Required(CONF_USERNAME): str,
vol.Required(CONF_PASSWORD): str,
vol.Required(CONF_SERIAL_NUMBER): str,
}
)
async def validate_input(hass: core.HomeAssistant, data):
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
api = nuheat.NuHeat(data[CONF_USERNAME], data[CONF_PASSWORD])
try:
await hass.async_add_executor_job(api.authenticate)
except requests.exceptions.Timeout as ex:
raise CannotConnect from ex
except requests.exceptions.HTTPError as ex:
if (
ex.response.status_code > HTTP_BAD_REQUEST
and ex.response.status_code < HTTP_INTERNAL_SERVER_ERROR
):
raise InvalidAuth from ex
raise CannotConnect from ex
#
# The underlying module throws a generic exception on login failure
#
except Exception as ex:
raise InvalidAuth from ex
try:
thermostat = await hass.async_add_executor_job(
api.get_thermostat, data[CONF_SERIAL_NUMBER]
)
except requests.exceptions.HTTPError as ex:
raise InvalidThermostat from ex
return {"title": thermostat.room, "serial_number": thermostat.serial_number}
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for NuHeat."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
try:
info = await validate_input(self.hass, user_input)
except CannotConnect:
errors["base"] = "cannot_connect"
except InvalidAuth:
errors["base"] = "invalid_auth"
except InvalidThermostat:
errors["base"] = "invalid_thermostat"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
if "base" not in errors:
await self.async_set_unique_id(info["serial_number"])
self._abort_if_unique_id_configured()
return self.async_create_entry(title=info["title"], data=user_input)
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors
)
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
class InvalidAuth(exceptions.HomeAssistantError):
"""Error to indicate there is invalid auth."""
class InvalidThermostat(exceptions.HomeAssistantError):
"""Error to indicate there is invalid thermostat."""
|
[
"nuheat.NuHeat",
"voluptuous.Required",
"logging.getLogger"
] |
[((420, 447), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (437, 447), False, 'import logging\n'), ((825, 880), 'nuheat.NuHeat', 'nuheat.NuHeat', (['data[CONF_USERNAME]', 'data[CONF_PASSWORD]'], {}), '(data[CONF_USERNAME], data[CONF_PASSWORD])\n', (838, 880), False, 'import nuheat\n'), ((489, 516), 'voluptuous.Required', 'vol.Required', (['CONF_USERNAME'], {}), '(CONF_USERNAME)\n', (501, 516), True, 'import voluptuous as vol\n'), ((531, 558), 'voluptuous.Required', 'vol.Required', (['CONF_PASSWORD'], {}), '(CONF_PASSWORD)\n', (543, 558), True, 'import voluptuous as vol\n'), ((573, 605), 'voluptuous.Required', 'vol.Required', (['CONF_SERIAL_NUMBER'], {}), '(CONF_SERIAL_NUMBER)\n', (585, 605), True, 'import voluptuous as vol\n')]
|