hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k โ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 โ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 โ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k โ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 โ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 โ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k โ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 โ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 โ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0e9544a0b4d7d1664ac8ac4ecb9dec668b7b299a | 2,313 | py | Python | wrapper.py | maxfrei750/CarbonBlackSegmentation | ff5aeaf03a9c60c1a0396f1d2b6d5a3347808a30 | [
"MIT"
] | null | null | null | wrapper.py | maxfrei750/CarbonBlackSegmentation | ff5aeaf03a9c60c1a0396f1d2b6d5a3347808a30 | [
"MIT"
] | null | null | null | wrapper.py | maxfrei750/CarbonBlackSegmentation | ff5aeaf03a9c60c1a0396f1d2b6d5a3347808a30 | [
"MIT"
] | null | null | null | """
This module contains a series of wrappers, that take an input
in the form of image paths and output segmented masks.
Various renditions are available. Demonstration of the use of
these wrappers is provided in the scripts in the demo folder.
"""
import os # used in generating file paths
from glob import glob
import numpy as np
from PIL import Image
from tqdm import tqdm # used for progress bar in loops
from deployment import Segmenter # local module for segmentation
def single_image(image_path="", device="cpu"):
"""Simple wrapper for segmentation of a single image.
Contributors: @tsipkens, @maxfrei750, Oct/2020
:param image_paths: single of image path
:param device: e.g., "cpu" or "cuda"
"""
# Load image.
# By default, if image_path="", use the test image 201805A_A6_004.png.
if image_path == "":
image_path = os.path.join("test_images", "201805A_A6_004.png")
image = Image.open(image_path).convert("RGB") # open image
image = np.asarray(image) # convert image to numpy array
segmenter = Segmenter(device=device) # create a Segmenter object
mask = segmenter.segment_image(image) # segment an image
return mask
def multi_image(image_paths="", device="cuda"):
"""Simple wrapper for segmentation of multiple image.
When segmenting many images, it may be advisable to use a GPU.
Thus, device="cuda" by default.
Contributors: @tsipkens, @maxfrei750, Oct/2020
:param image_paths: single of image path
:param device: e.g., "cpu" or "cuda"
"""
# Gather paths of images to be segmented.
# By default, if image_paths="", use images in test_images folder.
if image_paths == "":
data_dir = os.path.join("test_images")
image_paths = glob(os.path.join(data_dir, "*.*"))
# Perform the segmentation.
segmenter = Segmenter(device=device) # create a Segmenter object
masks = [] # create an empty list to store the masks
for image_path in tqdm(image_paths): # iterate the image paths
# Load an image and convert it to an RGB array.
image = np.asarray(Image.open(image_path).convert("RGB"))
mask = segmenter.segment_image(image) # segment the image
masks.append(mask) # store the mask in the list of masks.
return masks
| 33.042857 | 74 | 0.69131 |
4c3219e79d597720cd1f9d8fc6c3833edde9d278 | 388 | py | Python | CataractProject/Scripts/pip3-script.py | Pangxiaox/- | 605293093f8a9c28b33e29ab4253f8e5c407788f | [
"MIT"
] | null | null | null | CataractProject/Scripts/pip3-script.py | Pangxiaox/- | 605293093f8a9c28b33e29ab4253f8e5c407788f | [
"MIT"
] | null | null | null | CataractProject/Scripts/pip3-script.py | Pangxiaox/- | 605293093f8a9c28b33e29ab4253f8e5c407788f | [
"MIT"
] | 2 | 2019-09-06T11:13:01.000Z | 2019-09-06T11:24:54.000Z | #!D:\CataractProject\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
| 29.846154 | 69 | 0.659794 |
9a3912d86317f934ee56051515a3d72dfee54754 | 257 | py | Python | aula2.py | HellDonXXl/python | cd33dd879fb1cc640e1e0171a590cf2d3e89c96b | [
"MIT"
] | null | null | null | aula2.py | HellDonXXl/python | cd33dd879fb1cc640e1e0171a590cf2d3e89c96b | [
"MIT"
] | null | null | null | aula2.py | HellDonXXl/python | cd33dd879fb1cc640e1e0171a590cf2d3e89c96b | [
"MIT"
] | null | null | null |
numeropessoas = int(input("digite o numero de pessoa: "))
valordaconta = float(input("digite o valor da conta: "))
a = 0.1
dezgarcom = valordaconta * a
couvert = numeropessoas * 10
valortotal = valordaconta + dezgarcom + couvert
print("",valortotal)
| 21.416667 | 57 | 0.712062 |
77de37550bf747b649bbe4848354b63698783d00 | 1,797 | py | Python | pyspedas_examples/examples/ex_dsl2gse.py | spedas/pyspedas_examples | 846ed292237bb1f11e1f737d7241ec15dfd1b743 | [
"MIT"
] | 1 | 2022-01-25T10:56:09.000Z | 2022-01-25T10:56:09.000Z | pyspedas_examples/examples/ex_dsl2gse.py | spedas/pyspedas_examples | 846ed292237bb1f11e1f737d7241ec15dfd1b743 | [
"MIT"
] | 1 | 2022-02-09T16:54:44.000Z | 2022-02-10T15:00:46.000Z | pyspedas_examples/examples/ex_dsl2gse.py | spedas/pyspedas_examples | 846ed292237bb1f11e1f737d7241ec15dfd1b743 | [
"MIT"
] | 2 | 2020-05-08T18:37:31.000Z | 2020-05-10T17:50:07.000Z | """
Load tha_fgl_dsl data and perform a dsl to gse transformation.
Notes
-----
To duplicate this example on IDL SPEDAS,
and compare the results to pyspedas,
run the following IDL code:
pro thm_crib_fgm
timespan, '2017-03-23', 1
thm_load_state, probe='a', /get_support_data
thm_load_fgm, lev=2, probe=['a']
dsl2gse, 'tha_fgl_dsl', 'tha_state_spinras', 'tha_state_spindec',$
'tha_fgl_gse'
get_data, 'tha_fgl_dsl', data=d1
get_data, 'tha_fgl_gse', data=d2
z1 = d1.y[*,2]
z2 = d2.y[*,2]
store_data, 'z_dsl', d1.x, z1
store_data, 'z_gse', d2.x, z2
tplot, ['tha_fgl_dsl', 'tha_fgl_gse', 'z_dsl', 'z_gse']
end
"""
import pyspedas
import pytplot
from pyspedas.themis.cotrans.dsl2gse import dsl2gse
def ex_dsl2gse(plot=True):
"""Run dsl2gse."""
time_range = ['2017-03-23 00:00:00', '2017-03-23 23:59:59']
pyspedas.themis.state(probe='a', trange=time_range, get_support_data=True,
varnames=['tha_spinras', 'tha_spindec'])
pyspedas.themis.fgm(probe='a', trange=time_range, varnames=['tha_fgl_dsl'])
dsl2gse('tha_fgl_dsl', 'tha_spinras', 'tha_spindec', 'tha_fgl_gse')
# Get the third component only
d_in = pytplot.get_data('tha_fgl_dsl')
pytplot.store_data('z_dsl', data={'x': d_in[0], 'y': d_in[1][:, 2]})
d_out = pytplot.get_data('tha_fgl_gse')
pytplot.store_data('z_gse', data={'x': d_out[0], 'y': d_out[1][:, 2]})
# Plot
pytplot.tplot_options('title', 'tha_fgl DSL and GSE, 2017-03-23')
if plot:
pytplot.tplot(['tha_fgl_dsl', 'tha_fgl_gse', 'z_dsl', 'z_gse'])
# Return 1 as indication that the example finished without problems.
return 1
# Run the example code
# ex_dsl2gse()
| 29.459016 | 79 | 0.632165 |
2e61fa070d6726137be3e28128fb2da2e980c0e5 | 345 | py | Python | 560-subarray-sum-equals-k/560-subarray-sum-equals-k.py | Nitin-Diwakar/75DaysCodingChallenge-TechMaestro | 69a355edd6f3a331b9c303ab622b1fbf49b4744a | [
"MIT"
] | null | null | null | 560-subarray-sum-equals-k/560-subarray-sum-equals-k.py | Nitin-Diwakar/75DaysCodingChallenge-TechMaestro | 69a355edd6f3a331b9c303ab622b1fbf49b4744a | [
"MIT"
] | null | null | null | 560-subarray-sum-equals-k/560-subarray-sum-equals-k.py | Nitin-Diwakar/75DaysCodingChallenge-TechMaestro | 69a355edd6f3a331b9c303ab622b1fbf49b4744a | [
"MIT"
] | null | null | null | class Solution:
def subarraySum(self, nums: List[int], k: int) -> int:
ans=0
prefsum=0
d={0:1}
for num in nums:
prefsum = prefsum + num
if prefsum-k in d:
ans = ans + d[prefsum-k]
if prefsum not in d:
d[prefsum] = 1
else:
d[prefsum] = d[prefsum]+1
return ans
| 18.157895 | 58 | 0.507246 |
c217816a8568997dd56e5b107273418cade492fe | 4,311 | py | Python | ha_healthchecker/ha_healthchecker/action/refresher.py | bzhaoopenstack/labkeeper | fecda8a306fd3f1dea3f66606ac8bd962981d2b0 | [
"Apache-2.0"
] | 4 | 2019-04-02T03:49:13.000Z | 2022-01-22T14:57:33.000Z | ha_healthchecker/ha_healthchecker/action/refresher.py | bzhaoopenstack/labkeeper | fecda8a306fd3f1dea3f66606ac8bd962981d2b0 | [
"Apache-2.0"
] | 451 | 2019-03-25T07:27:52.000Z | 2021-07-26T01:26:43.000Z | ha_healthchecker/ha_healthchecker/action/refresher.py | bzhaoopenstack/labkeeper | fecda8a306fd3f1dea3f66606ac8bd962981d2b0 | [
"Apache-2.0"
] | 14 | 2018-09-28T18:45:12.000Z | 2022-01-22T14:57:22.000Z | import datetime
from ha_healthchecker.action import base
class Refresher(base.Action):
def __init__(self, zk, cluster_config):
super(Refresher, self).__init__(zk, cluster_config)
def _local_node_service_process(self, node_obj):
service_objs = self.zk.list_services(node_name_filter=node_obj.name)
for service_obj in service_objs:
self._refresh_service(service_obj, node_obj)
self._report_heart_beat(node_obj)
def _refresh_service(self, service_obj, node_obj):
cur_status = self._get_service_status(service_obj.name)
update_dict = {}
if cur_status == 'up':
if service_obj.status != 'up':
update_dict['status'] = 'up'
update_dict['restarted'] = False
update_dict['alarmed'] = False
update_dict['restarted_account'] = 0
self.LOG.debug("Fix Service %(name)s status from %(orig)s to "
"UP.", {'name': service_obj.name,
'orig': service_obj.status})
else:
if not service_obj.restarted:
update_dict['status'] = 'restarting'
update_dict['restarted'] = True
self.LOG.debug("Service %(name)s is Restarting.",
{'name': service_obj.name})
else:
if (service_obj.restarted_count >=
self.cluster_config.service_restart_max_times):
update_dict['status'] = 'down'
self.LOG.debug("Service %(name)s is Down.",
{'name': service_obj.name})
else:
update_dict[
'restarted_account'] = service_obj.restarted_count + 1
self.LOG.debug("Service %(name)s continue in restarting, "
"tried %(count)s times",
{'name': service_obj.name,
'count': service_obj.restarted_count})
if update_dict:
self.zk.update_service(service_obj.name, node_obj.name,
**update_dict)
def _need_fix_alarmed_status(self, node):
if not node.alarmed:
return False
if node.role == 'slave':
return True
for service in self.zk.list_services(node_name_filter=node.name):
if service.is_necessary:
continue
if service.alarmed and self._is_alarmed_timeout(service):
return False
return True
def _report_heart_beat(self, node_obj):
hb = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
update_dict = {'heartbeat': hb}
if node_obj.status == 'initializing' or node_obj.status == 'down':
update_dict['status'] = 'up'
if self._need_fix_alarmed_status(node_obj):
update_dict['alarmed'] = False
self.zk.update_node(node_obj.name, **update_dict)
self.LOG.debug("Report node %(name)s heartbeat %(hb)s",
{'name': node_obj.name, 'hb': hb})
def _other_node_check(self, other_node_obj):
if other_node_obj.status == 'maintaining':
return
if (not self._ping(other_node_obj.ip) and
self._is_check_heart_beat_overtime(other_node_obj)):
if other_node_obj.status == 'up':
self.zk.update_node(other_node_obj.name, status='down')
self.LOG.info("%(role)s node %(name)s can not reach, updated "
"with %(status)s status.",
{'role': other_node_obj.role,
'name': other_node_obj.name,
'status': 'down'.upper()})
def run(self):
if self.node.status == 'maintaining':
self.LOG.debug(
'Node %(name)s status is MAINTAINING, Skipping refresh.',
{'name': self.node.name})
return
self._local_node_service_process(self.node)
if self.oppo_node:
self._other_node_check(self.oppo_node)
if self.zk_node:
self._other_node_check(self.zk_node)
| 41.854369 | 78 | 0.54187 |
b2cb1cc565566d158c149b8480db7b28fc5ed0fc | 2,536 | py | Python | pinball_ext/job/hadoop_jobs.py | DotModus/pinball | deeb4ec20bbd000ad44f7b44e6a7c0fa900dbbea | [
"Apache-2.0"
] | 1,143 | 2015-03-06T22:10:53.000Z | 2022-02-23T21:16:47.000Z | pinball_ext/job/hadoop_jobs.py | DotModus/pinball | deeb4ec20bbd000ad44f7b44e6a7c0fa900dbbea | [
"Apache-2.0"
] | 70 | 2015-03-06T00:44:39.000Z | 2019-05-01T13:15:10.000Z | pinball_ext/job/hadoop_jobs.py | Betterment/pinball | 11120b54fcc25b2857631a5de65a1195ffcffb5c | [
"Apache-2.0"
] | 169 | 2015-03-09T21:27:12.000Z | 2022-03-19T08:09:13.000Z | # Copyright 2015, Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pinball_ext.common import utils
from pinball_ext.job.basic_jobs import ClusterJob
__author__ = 'Changshu Liu, Mohammad Shahangian, Mao Ye'
__copyright__ = 'Copyright 2015, Pinterest, Inc.'
__credits__ = [__author__]
__license__ = 'Apache'
__version__ = '2.0'
LOG = utils.get_logger('pinball_ext.job.hadoop_jobs')
class HadoopJob(ClusterJob):
"""Base class for actual Hadoop jobs.
App jar and lib jars are configured in executor, please see
Executor.run_hadoop_job() for detailed info.
Derived class should at least override _get_class_name() to specify what's
the main Java class to execute. It can also optionally override _setup() to
config the follow parameters to further tune the job config:
- self.jobconf_args
- self.extra_jars
- self.extra_arguments
self.params derived from JobBase will also be passed as job's extra
arguments (together with self.extra_arguments).
"""
def __init__(self, params, settings=None):
super(HadoopJob, self).__init__(params, settings)
self.jobconf_args = {}
self.extra_arguments = []
self.extra_jars = []
def _get_class_name(self):
raise NotImplementedError('No class name specified for this Hadoop Job')
def _execute(self):
param_args = ['-%s %s' % (k, v) for k, v in self.params.iteritems()]
self._job_output, self._job_stderr, self._job_ids = \
self.executor.run_hadoop_job(
self._get_class_name(),
jobconf_args=self.jobconf_args,
extra_args=param_args + self.extra_arguments,
extra_jars=self.extra_jars)
LOG.info('Dump job output ...')
for line in self._job_output:
LOG.info('\t'.join(line))
def __str__(self):
return '(%s): (%s) - (%s)' % (self.job_name,
self.params,
self.jobconf_args)
| 35.222222 | 80 | 0.671924 |
76afdbcf760149ce903a5387adbe3a817ddd97d9 | 6,734 | py | Python | software/python-package/shepherd/launcher.py | orgua/shepherd | 347af12740b008a78b7012ae748d9eb2b147274b | [
"MIT"
] | null | null | null | software/python-package/shepherd/launcher.py | orgua/shepherd | 347af12740b008a78b7012ae748d9eb2b147274b | [
"MIT"
] | 5 | 2020-10-14T14:30:55.000Z | 2020-11-04T15:52:07.000Z | software/python-package/shepherd/launcher.py | orgua/shepherd | 347af12740b008a78b7012ae748d9eb2b147274b | [
"MIT"
] | 1 | 2020-07-15T09:21:28.000Z | 2020-07-15T09:21:28.000Z | # -*- coding: utf-8 -*-
"""
shepherd.launcher
~~~~~
Launcher allows to start and stop shepherd service with the press of a button.
Relies on systemd service.
:copyright: (c) 2019 Networked Embedded Systems Lab, TU Dresden.
:license: MIT, see LICENSE for more details.
"""
from typing import NoReturn
from threading import Event, Thread
import dbus
import time
import logging
import os
from periphery import GPIO
logger = logging.getLogger(__name__)
def call_repeatedly(interval, func, *args):
stopped = Event()
def loop():
while not stopped.wait(interval): # the first call is in `interval` secs
func(*args)
Thread(target=loop).start()
return stopped.set
class Launcher(object):
"""Stores data coming from PRU's in HDF5 format
Args:
pin_button (int): Pin number where button is connected. Must be
configured as input with pull up and connected against ground
pin_led (int): Pin number of LED for displaying launcher status
service_name (str): Name of shepherd systemd service
"""
def __init__(
self,
pin_button: int = 65,
pin_led: int = 22,
pin_ack_watchdog: int = 68,
service_name: str = "shepherd",
):
self.pin_button = pin_button
self.pin_led = pin_led
self.pin_ack_watchdog = pin_ack_watchdog
self.service_name = service_name
def __enter__(self):
self.gpio_led = GPIO(self.pin_led, "out")
self.gpio_button = GPIO(self.pin_button, "in")
self.gpio_ack_watchdog = GPIO(self.pin_ack_watchdog, "out")
self.gpio_button.edge = "falling"
logger.debug("configured gpio")
self.cancel_wd_timer = call_repeatedly(interval=600, func=self.ack_watchdog)
sys_bus = dbus.SystemBus()
systemd1 = sys_bus.get_object(
"org.freedesktop.systemd1", "/org/freedesktop/systemd1"
)
self.manager = dbus.Interface(
systemd1, "org.freedesktop.systemd1.Manager"
)
shepherd_object = self.manager.LoadUnit(
f"{ self.service_name }.service"
)
self.shepherd_proxy = sys_bus.get_object(
"org.freedesktop.systemd1", str(shepherd_object)
)
logger.debug("configured dbus for systemd")
return self
def __exit__(self, *exc):
self.gpio_led.close()
self.gpio_button.close()
def run(self) -> NoReturn:
"""Infinite loop waiting for button presses.
Waits for falling edge on configured button pin. On detection of the
edge, shepherd service is either started or stopped. Double button
press while idle causes system shutdown.
"""
while True:
logger.info("waiting for falling edge..")
self.gpio_led.write(True)
if not self.gpio_button.poll():
# note: poll is suspected to exit after ~ 1-2 weeks running -> fills mmc with random measurement
# TODO: observe behavior, hopefully this change fixes the bug
continue
self.gpio_led.write(False)
logger.debug("edge detected")
if not self.get_state():
time.sleep(0.25)
if self.gpio_button.poll(timeout=5):
logging.debug("falling edge detected")
logging.info("shutdown requested")
self.initiate_shutdown()
self.gpio_led.write(False)
time.sleep(3)
continue
self.set_service(not self.get_state())
time.sleep(10)
def get_state(self, timeout: float = 10) -> bool:
"""Queries systemd for state of shepherd service.
Args:
timeout (float): Time to wait for service state to settle
Raises:
TimeoutError: If state remains changing for longer than timeout
"""
ts_end = time.time() + timeout
while True:
systemd_state = self.shepherd_proxy.Get(
"org.freedesktop.systemd1.Unit",
"ActiveState",
dbus_interface="org.freedesktop.DBus.Properties",
)
if systemd_state in ["deactivating", "activating"]:
time.sleep(0.1)
else:
break
if time.time() > ts_end:
raise TimeoutError("Timed out waiting for service state")
logger.debug(f"service ActiveState: { systemd_state }")
if systemd_state == "active":
return True
elif systemd_state == "inactive":
return False
raise Exception(f"Unknown state { systemd_state }")
def set_service(self, requested_state: bool):
"""Changes state of shepherd service.
Args:
requested_state (bool): Target state of service
"""
active_state = self.get_state()
if requested_state == active_state:
logger.debug("service already in requested state")
self.gpio_led.write(active_state)
return
if active_state:
logger.info("stopping service")
self.manager.StopUnit("shepherd.service", "fail")
else:
logger.info("starting service")
self.manager.StartUnit("shepherd.service", "fail")
time.sleep(1)
new_state = self.get_state()
if new_state != requested_state:
raise Exception(f"state didn't change")
return new_state
def initiate_shutdown(self, timeout: int = 5) -> NoReturn:
""" Initiates system shutdown.
Args:
timeout (int): Number of seconds to wait before powering off
system
"""
logger.debug("initiating shutdown routine..")
time.sleep(0.25)
for _ in range(timeout):
if self.gpio_button.poll(timeout=0.5):
logger.debug("edge detected")
logger.info("shutdown cancelled")
return
self.gpio_led.write(True)
if self.gpio_button.poll(timeout=0.5):
logger.debug("edge detected")
logger.info("shutdown cancelled")
return
self.gpio_led.write(False)
os.sync()
logger.info("shutting down now")
self.manager.PowerOff()
def ack_watchdog(self) -> NoReturn:
""" prevent system-reset from watchdog
hw-rev2 has a watchdog that can turn on the BB every ~60 min
"""
self.gpio_ack_watchdog.write(True)
time.sleep(0.002)
self.gpio_ack_watchdog.write(False)
logger.debug("Signaled ACK to Watchdog")
| 32.220096 | 112 | 0.594001 |
ee2f9f265580bcc3eeb65a790b57535060142f65 | 34,753 | py | Python | venv/lib/python2.7/site-packages/ansible/modules/cloud/centurylink/clc_loadbalancer.py | haind27/test01 | 7f86c0a33eb0874a6c3f5ff9a923fd0cfc8ef852 | [
"MIT"
] | 1 | 2019-04-16T21:23:15.000Z | 2019-04-16T21:23:15.000Z | venv/lib/python2.7/site-packages/ansible/modules/cloud/centurylink/clc_loadbalancer.py | haind27/test01 | 7f86c0a33eb0874a6c3f5ff9a923fd0cfc8ef852 | [
"MIT"
] | 5 | 2020-02-26T20:10:50.000Z | 2021-09-23T23:23:18.000Z | venv/lib/python2.7/site-packages/ansible/modules/cloud/centurylink/clc_loadbalancer.py | haind27/test01 | 7f86c0a33eb0874a6c3f5ff9a923fd0cfc8ef852 | [
"MIT"
] | 1 | 2020-02-13T14:24:57.000Z | 2020-02-13T14:24:57.000Z | #!/usr/bin/python
#
# Copyright (c) 2015 CenturyLink
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: clc_loadbalancer
short_description: Create, Delete shared loadbalancers in CenturyLink Cloud.
description:
- An Ansible module to Create, Delete shared loadbalancers in CenturyLink Cloud.
version_added: "2.0"
options:
name:
description:
- The name of the loadbalancer
required: True
description:
description:
- A description for the loadbalancer
alias:
description:
- The alias of your CLC Account
required: True
location:
description:
- The location of the datacenter where the load balancer resides in
required: True
method:
description:
-The balancing method for the load balancer pool
choices: ['leastConnection', 'roundRobin']
persistence:
description:
- The persistence method for the load balancer
choices: ['standard', 'sticky']
port:
description:
- Port to configure on the public-facing side of the load balancer pool
choices: [80, 443]
nodes:
description:
- A list of nodes that needs to be added to the load balancer pool
default: []
status:
description:
- The status of the loadbalancer
default: enabled
choices: ['enabled', 'disabled']
state:
description:
- Whether to create or delete the load balancer pool
default: present
choices: ['present', 'absent', 'port_absent', 'nodes_present', 'nodes_absent']
requirements:
- python = 2.7
- requests >= 2.5.0
- clc-sdk
author: "CLC Runner (@clc-runner)"
notes:
- To use this module, it is required to set the below environment variables which enables access to the
Centurylink Cloud
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
'''
EXAMPLES = '''
# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
- name: Create Loadbalancer
hosts: localhost
connection: local
tasks:
- name: Actually Create things
clc_loadbalancer:
name: test
description: test
alias: TEST
location: WA1
port: 443
nodes:
- ipAddress: 10.11.22.123
privatePort: 80
state: present
- name: Add node to an existing loadbalancer pool
hosts: localhost
connection: local
tasks:
- name: Actually Create things
clc_loadbalancer:
name: test
description: test
alias: TEST
location: WA1
port: 443
nodes:
- ipAddress: 10.11.22.234
privatePort: 80
state: nodes_present
- name: Remove node from an existing loadbalancer pool
hosts: localhost
connection: local
tasks:
- name: Actually Create things
clc_loadbalancer:
name: test
description: test
alias: TEST
location: WA1
port: 443
nodes:
- ipAddress: 10.11.22.234
privatePort: 80
state: nodes_absent
- name: Delete LoadbalancerPool
hosts: localhost
connection: local
tasks:
- name: Actually Delete things
clc_loadbalancer:
name: test
description: test
alias: TEST
location: WA1
port: 443
nodes:
- ipAddress: 10.11.22.123
privatePort: 80
state: port_absent
- name: Delete Loadbalancer
hosts: localhost
connection: local
tasks:
- name: Actually Delete things
clc_loadbalancer:
name: test
description: test
alias: TEST
location: WA1
port: 443
nodes:
- ipAddress: 10.11.22.123
privatePort: 80
state: absent
'''
RETURN = '''
loadbalancer:
description: The load balancer result object from CLC
returned: success
type: dict
sample:
{
"description":"test-lb",
"id":"ab5b18cb81e94ab9925b61d1ca043fb5",
"ipAddress":"66.150.174.197",
"links":[
{
"href":"/v2/sharedLoadBalancers/wfad/wa1/ab5b18cb81e94ab9925b61d1ca043fb5",
"rel":"self",
"verbs":[
"GET",
"PUT",
"DELETE"
]
},
{
"href":"/v2/sharedLoadBalancers/wfad/wa1/ab5b18cb81e94ab9925b61d1ca043fb5/pools",
"rel":"pools",
"verbs":[
"GET",
"POST"
]
}
],
"name":"test-lb",
"pools":[
],
"status":"enabled"
}
'''
__version__ = '${version}'
import json
import os
from time import sleep
from distutils.version import LooseVersion
try:
import requests
except ImportError:
REQUESTS_FOUND = False
else:
REQUESTS_FOUND = True
#
# Requires the clc-python-sdk.
# sudo pip install clc-sdk
#
try:
import clc as clc_sdk
from clc import APIFailedResponse
except ImportError:
CLC_FOUND = False
clc_sdk = None
else:
CLC_FOUND = True
from ansible.module_utils.basic import AnsibleModule
class ClcLoadBalancer:
clc = None
def __init__(self, module):
"""
Construct module
"""
self.clc = clc_sdk
self.module = module
self.lb_dict = {}
if not CLC_FOUND:
self.module.fail_json(
msg='clc-python-sdk required for this module')
if not REQUESTS_FOUND:
self.module.fail_json(
msg='requests library is required for this module')
if requests.__version__ and LooseVersion(
requests.__version__) < LooseVersion('2.5.0'):
self.module.fail_json(
msg='requests library version should be >= 2.5.0')
self._set_user_agent(self.clc)
def process_request(self):
"""
Execute the main code path, and handle the request
:return: none
"""
changed = False
result_lb = None
loadbalancer_name = self.module.params.get('name')
loadbalancer_alias = self.module.params.get('alias')
loadbalancer_location = self.module.params.get('location')
loadbalancer_description = self.module.params.get('description')
loadbalancer_port = self.module.params.get('port')
loadbalancer_method = self.module.params.get('method')
loadbalancer_persistence = self.module.params.get('persistence')
loadbalancer_nodes = self.module.params.get('nodes')
loadbalancer_status = self.module.params.get('status')
state = self.module.params.get('state')
if loadbalancer_description is None:
loadbalancer_description = loadbalancer_name
self._set_clc_credentials_from_env()
self.lb_dict = self._get_loadbalancer_list(
alias=loadbalancer_alias,
location=loadbalancer_location)
if state == 'present':
changed, result_lb, lb_id = self.ensure_loadbalancer_present(
name=loadbalancer_name,
alias=loadbalancer_alias,
location=loadbalancer_location,
description=loadbalancer_description,
status=loadbalancer_status)
if loadbalancer_port:
changed, result_pool, pool_id = self.ensure_loadbalancerpool_present(
lb_id=lb_id,
alias=loadbalancer_alias,
location=loadbalancer_location,
method=loadbalancer_method,
persistence=loadbalancer_persistence,
port=loadbalancer_port)
if loadbalancer_nodes:
changed, result_nodes = self.ensure_lbpool_nodes_set(
alias=loadbalancer_alias,
location=loadbalancer_location,
name=loadbalancer_name,
port=loadbalancer_port,
nodes=loadbalancer_nodes)
elif state == 'absent':
changed, result_lb = self.ensure_loadbalancer_absent(
name=loadbalancer_name,
alias=loadbalancer_alias,
location=loadbalancer_location)
elif state == 'port_absent':
changed, result_lb = self.ensure_loadbalancerpool_absent(
alias=loadbalancer_alias,
location=loadbalancer_location,
name=loadbalancer_name,
port=loadbalancer_port)
elif state == 'nodes_present':
changed, result_lb = self.ensure_lbpool_nodes_present(
alias=loadbalancer_alias,
location=loadbalancer_location,
name=loadbalancer_name,
port=loadbalancer_port,
nodes=loadbalancer_nodes)
elif state == 'nodes_absent':
changed, result_lb = self.ensure_lbpool_nodes_absent(
alias=loadbalancer_alias,
location=loadbalancer_location,
name=loadbalancer_name,
port=loadbalancer_port,
nodes=loadbalancer_nodes)
self.module.exit_json(changed=changed, loadbalancer=result_lb)
def ensure_loadbalancer_present(
self, name, alias, location, description, status):
"""
Checks to see if a load balancer exists and creates one if it does not.
:param name: Name of loadbalancer
:param alias: Alias of account
:param location: Datacenter
:param description: Description of loadbalancer
:param status: Enabled / Disabled
:return: (changed, result, lb_id)
changed: Boolean whether a change was made
result: The result object from the CLC load balancer request
lb_id: The load balancer id
"""
changed = False
result = name
lb_id = self._loadbalancer_exists(name=name)
if not lb_id:
if not self.module.check_mode:
result = self.create_loadbalancer(name=name,
alias=alias,
location=location,
description=description,
status=status)
lb_id = result.get('id')
changed = True
return changed, result, lb_id
def ensure_loadbalancerpool_present(
self, lb_id, alias, location, method, persistence, port):
"""
Checks to see if a load balancer pool exists and creates one if it does not.
:param lb_id: The loadbalancer id
:param alias: The account alias
:param location: the datacenter the load balancer resides in
:param method: the load balancing method
:param persistence: the load balancing persistence type
:param port: the port that the load balancer will listen on
:return: (changed, group, pool_id) -
changed: Boolean whether a change was made
result: The result from the CLC API call
pool_id: The string id of the load balancer pool
"""
changed = False
result = port
if not lb_id:
return changed, None, None
pool_id = self._loadbalancerpool_exists(
alias=alias,
location=location,
port=port,
lb_id=lb_id)
if not pool_id:
if not self.module.check_mode:
result = self.create_loadbalancerpool(
alias=alias,
location=location,
lb_id=lb_id,
method=method,
persistence=persistence,
port=port)
pool_id = result.get('id')
changed = True
return changed, result, pool_id
def ensure_loadbalancer_absent(self, name, alias, location):
"""
Checks to see if a load balancer exists and deletes it if it does
:param name: Name of the load balancer
:param alias: Alias of account
:param location: Datacenter
:return: (changed, result)
changed: Boolean whether a change was made
result: The result from the CLC API Call
"""
changed = False
result = name
lb_exists = self._loadbalancer_exists(name=name)
if lb_exists:
if not self.module.check_mode:
result = self.delete_loadbalancer(alias=alias,
location=location,
name=name)
changed = True
return changed, result
def ensure_loadbalancerpool_absent(self, alias, location, name, port):
"""
Checks to see if a load balancer pool exists and deletes it if it does
:param alias: The account alias
:param location: the datacenter the load balancer resides in
:param name: the name of the load balancer
:param port: the port that the load balancer listens on
:return: (changed, result) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
changed = False
result = None
lb_exists = self._loadbalancer_exists(name=name)
if lb_exists:
lb_id = self._get_loadbalancer_id(name=name)
pool_id = self._loadbalancerpool_exists(
alias=alias,
location=location,
port=port,
lb_id=lb_id)
if pool_id:
changed = True
if not self.module.check_mode:
result = self.delete_loadbalancerpool(
alias=alias,
location=location,
lb_id=lb_id,
pool_id=pool_id)
else:
result = "Pool doesn't exist"
else:
result = "LB Doesn't Exist"
return changed, result
def ensure_lbpool_nodes_set(self, alias, location, name, port, nodes):
"""
Checks to see if the provided list of nodes exist for the pool
and set the nodes if any in the list those doesn't exist
:param alias: The account alias
:param location: the datacenter the load balancer resides in
:param name: the name of the load balancer
:param port: the port that the load balancer will listen on
:param nodes: The list of nodes to be updated to the pool
:return: (changed, result) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
result = {}
changed = False
lb_exists = self._loadbalancer_exists(name=name)
if lb_exists:
lb_id = self._get_loadbalancer_id(name=name)
pool_id = self._loadbalancerpool_exists(
alias=alias,
location=location,
port=port,
lb_id=lb_id)
if pool_id:
nodes_exist = self._loadbalancerpool_nodes_exists(alias=alias,
location=location,
lb_id=lb_id,
pool_id=pool_id,
nodes_to_check=nodes)
if not nodes_exist:
changed = True
result = self.set_loadbalancernodes(alias=alias,
location=location,
lb_id=lb_id,
pool_id=pool_id,
nodes=nodes)
else:
result = "Pool doesn't exist"
else:
result = "Load balancer doesn't Exist"
return changed, result
def ensure_lbpool_nodes_present(self, alias, location, name, port, nodes):
"""
Checks to see if the provided list of nodes exist for the pool and add the missing nodes to the pool
:param alias: The account alias
:param location: the datacenter the load balancer resides in
:param name: the name of the load balancer
:param port: the port that the load balancer will listen on
:param nodes: the list of nodes to be added
:return: (changed, result) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
changed = False
lb_exists = self._loadbalancer_exists(name=name)
if lb_exists:
lb_id = self._get_loadbalancer_id(name=name)
pool_id = self._loadbalancerpool_exists(
alias=alias,
location=location,
port=port,
lb_id=lb_id)
if pool_id:
changed, result = self.add_lbpool_nodes(alias=alias,
location=location,
lb_id=lb_id,
pool_id=pool_id,
nodes_to_add=nodes)
else:
result = "Pool doesn't exist"
else:
result = "Load balancer doesn't Exist"
return changed, result
def ensure_lbpool_nodes_absent(self, alias, location, name, port, nodes):
"""
Checks to see if the provided list of nodes exist for the pool and removes them if found any
:param alias: The account alias
:param location: the datacenter the load balancer resides in
:param name: the name of the load balancer
:param port: the port that the load balancer will listen on
:param nodes: the list of nodes to be removed
:return: (changed, result) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
changed = False
lb_exists = self._loadbalancer_exists(name=name)
if lb_exists:
lb_id = self._get_loadbalancer_id(name=name)
pool_id = self._loadbalancerpool_exists(
alias=alias,
location=location,
port=port,
lb_id=lb_id)
if pool_id:
changed, result = self.remove_lbpool_nodes(alias=alias,
location=location,
lb_id=lb_id,
pool_id=pool_id,
nodes_to_remove=nodes)
else:
result = "Pool doesn't exist"
else:
result = "Load balancer doesn't Exist"
return changed, result
def create_loadbalancer(self, name, alias, location, description, status):
"""
Create a loadbalancer w/ params
:param name: Name of loadbalancer
:param alias: Alias of account
:param location: Datacenter
:param description: Description for loadbalancer to be created
:param status: Enabled / Disabled
:return: result: The result from the CLC API call
"""
result = None
try:
result = self.clc.v2.API.Call('POST',
'/v2/sharedLoadBalancers/%s/%s' % (alias,
location),
json.dumps({"name": name,
"description": description,
"status": status}))
sleep(1)
except APIFailedResponse as e:
self.module.fail_json(
msg='Unable to create load balancer "{0}". {1}'.format(
name, str(e.response_text)))
return result
def create_loadbalancerpool(
self, alias, location, lb_id, method, persistence, port):
"""
Creates a pool on the provided load balancer
:param alias: the account alias
:param location: the datacenter the load balancer resides in
:param lb_id: the id string of the load balancer
:param method: the load balancing method
:param persistence: the load balancing persistence type
:param port: the port that the load balancer will listen on
:return: result: The result from the create API call
"""
result = None
try:
result = self.clc.v2.API.Call(
'POST', '/v2/sharedLoadBalancers/%s/%s/%s/pools' %
(alias, location, lb_id), json.dumps(
{
"port": port, "method": method, "persistence": persistence
}))
except APIFailedResponse as e:
self.module.fail_json(
msg='Unable to create pool for load balancer id "{0}". {1}'.format(
lb_id, str(e.response_text)))
return result
def delete_loadbalancer(self, alias, location, name):
"""
Delete CLC loadbalancer
:param alias: Alias for account
:param location: Datacenter
:param name: Name of the loadbalancer to delete
:return: result: The result from the CLC API call
"""
result = None
lb_id = self._get_loadbalancer_id(name=name)
try:
result = self.clc.v2.API.Call(
'DELETE', '/v2/sharedLoadBalancers/%s/%s/%s' %
(alias, location, lb_id))
except APIFailedResponse as e:
self.module.fail_json(
msg='Unable to delete load balancer "{0}". {1}'.format(
name, str(e.response_text)))
return result
def delete_loadbalancerpool(self, alias, location, lb_id, pool_id):
"""
Delete the pool on the provided load balancer
:param alias: The account alias
:param location: the datacenter the load balancer resides in
:param lb_id: the id string of the load balancer
:param pool_id: the id string of the load balancer pool
:return: result: The result from the delete API call
"""
result = None
try:
result = self.clc.v2.API.Call(
'DELETE', '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s' %
(alias, location, lb_id, pool_id))
except APIFailedResponse as e:
self.module.fail_json(
msg='Unable to delete pool for load balancer id "{0}". {1}'.format(
lb_id, str(e.response_text)))
return result
def _get_loadbalancer_id(self, name):
"""
Retrieves unique ID of loadbalancer
:param name: Name of loadbalancer
:return: Unique ID of the loadbalancer
"""
id = None
for lb in self.lb_dict:
if lb.get('name') == name:
id = lb.get('id')
return id
def _get_loadbalancer_list(self, alias, location):
"""
Retrieve a list of loadbalancers
:param alias: Alias for account
:param location: Datacenter
:return: JSON data for all loadbalancers at datacenter
"""
result = None
try:
result = self.clc.v2.API.Call(
'GET', '/v2/sharedLoadBalancers/%s/%s' % (alias, location))
except APIFailedResponse as e:
self.module.fail_json(
msg='Unable to fetch load balancers for account: {0}. {1}'.format(
alias, str(e.response_text)))
return result
def _loadbalancer_exists(self, name):
"""
Verify a loadbalancer exists
:param name: Name of loadbalancer
:return: False or the ID of the existing loadbalancer
"""
result = False
for lb in self.lb_dict:
if lb.get('name') == name:
result = lb.get('id')
return result
def _loadbalancerpool_exists(self, alias, location, port, lb_id):
"""
Checks to see if a pool exists on the specified port on the provided load balancer
:param alias: the account alias
:param location: the datacenter the load balancer resides in
:param port: the port to check and see if it exists
:param lb_id: the id string of the provided load balancer
:return: result: The id string of the pool or False
"""
result = False
try:
pool_list = self.clc.v2.API.Call(
'GET', '/v2/sharedLoadBalancers/%s/%s/%s/pools' %
(alias, location, lb_id))
except APIFailedResponse as e:
return self.module.fail_json(
msg='Unable to fetch the load balancer pools for for load balancer id: {0}. {1}'.format(
lb_id, str(e.response_text)))
for pool in pool_list:
if int(pool.get('port')) == int(port):
result = pool.get('id')
return result
def _loadbalancerpool_nodes_exists(
self, alias, location, lb_id, pool_id, nodes_to_check):
"""
Checks to see if a set of nodes exists on the specified port on the provided load balancer
:param alias: the account alias
:param location: the datacenter the load balancer resides in
:param lb_id: the id string of the provided load balancer
:param pool_id: the id string of the load balancer pool
:param nodes_to_check: the list of nodes to check for
:return: result: True / False indicating if the given nodes exist
"""
result = False
nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id)
for node in nodes_to_check:
if not node.get('status'):
node['status'] = 'enabled'
if node in nodes:
result = True
else:
result = False
return result
def set_loadbalancernodes(self, alias, location, lb_id, pool_id, nodes):
"""
Updates nodes to the provided pool
:param alias: the account alias
:param location: the datacenter the load balancer resides in
:param lb_id: the id string of the load balancer
:param pool_id: the id string of the pool
:param nodes: a list of dictionaries containing the nodes to set
:return: result: The result from the CLC API call
"""
result = None
if not lb_id:
return result
if not self.module.check_mode:
try:
result = self.clc.v2.API.Call('PUT',
'/v2/sharedLoadBalancers/%s/%s/%s/pools/%s/nodes'
% (alias, location, lb_id, pool_id), json.dumps(nodes))
except APIFailedResponse as e:
self.module.fail_json(
msg='Unable to set nodes for the load balancer pool id "{0}". {1}'.format(
pool_id, str(e.response_text)))
return result
def add_lbpool_nodes(self, alias, location, lb_id, pool_id, nodes_to_add):
"""
Add nodes to the provided pool
:param alias: the account alias
:param location: the datacenter the load balancer resides in
:param lb_id: the id string of the load balancer
:param pool_id: the id string of the pool
:param nodes_to_add: a list of dictionaries containing the nodes to add
:return: (changed, result) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
changed = False
result = {}
nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id)
for node in nodes_to_add:
if not node.get('status'):
node['status'] = 'enabled'
if node not in nodes:
changed = True
nodes.append(node)
if changed is True and not self.module.check_mode:
result = self.set_loadbalancernodes(
alias,
location,
lb_id,
pool_id,
nodes)
return changed, result
def remove_lbpool_nodes(
self, alias, location, lb_id, pool_id, nodes_to_remove):
"""
Removes nodes from the provided pool
:param alias: the account alias
:param location: the datacenter the load balancer resides in
:param lb_id: the id string of the load balancer
:param pool_id: the id string of the pool
:param nodes_to_remove: a list of dictionaries containing the nodes to remove
:return: (changed, result) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
changed = False
result = {}
nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id)
for node in nodes_to_remove:
if not node.get('status'):
node['status'] = 'enabled'
if node in nodes:
changed = True
nodes.remove(node)
if changed is True and not self.module.check_mode:
result = self.set_loadbalancernodes(
alias,
location,
lb_id,
pool_id,
nodes)
return changed, result
def _get_lbpool_nodes(self, alias, location, lb_id, pool_id):
"""
Return the list of nodes available to the provided load balancer pool
:param alias: the account alias
:param location: the datacenter the load balancer resides in
:param lb_id: the id string of the load balancer
:param pool_id: the id string of the pool
:return: result: The list of nodes
"""
result = None
try:
result = self.clc.v2.API.Call('GET',
'/v2/sharedLoadBalancers/%s/%s/%s/pools/%s/nodes'
% (alias, location, lb_id, pool_id))
except APIFailedResponse as e:
self.module.fail_json(
msg='Unable to fetch list of available nodes for load balancer pool id: {0}. {1}'.format(
pool_id, str(e.response_text)))
return result
@staticmethod
def define_argument_spec():
"""
Define the argument spec for the ansible module
:return: argument spec dictionary
"""
argument_spec = dict(
name=dict(required=True),
description=dict(default=None),
location=dict(required=True),
alias=dict(required=True),
port=dict(choices=[80, 443]),
method=dict(choices=['leastConnection', 'roundRobin']),
persistence=dict(choices=['standard', 'sticky']),
nodes=dict(type='list', default=[]),
status=dict(default='enabled', choices=['enabled', 'disabled']),
state=dict(
default='present',
choices=[
'present',
'absent',
'port_absent',
'nodes_present',
'nodes_absent'])
)
return argument_spec
def _set_clc_credentials_from_env(self):
"""
Set the CLC Credentials on the sdk by reading environment variables
:return: none
"""
env = os.environ
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
clc_alias = env.get('CLC_ACCT_ALIAS', False)
api_url = env.get('CLC_V2_API_URL', False)
if api_url:
self.clc.defaults.ENDPOINT_URL_V2 = api_url
if v2_api_token and clc_alias:
self.clc._LOGIN_TOKEN_V2 = v2_api_token
self.clc._V2_ENABLED = True
self.clc.ALIAS = clc_alias
elif v2_api_username and v2_api_passwd:
self.clc.v2.SetCredentials(
api_username=v2_api_username,
api_passwd=v2_api_passwd)
else:
return self.module.fail_json(
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
"environment variables")
@staticmethod
def _set_user_agent(clc):
if hasattr(clc, 'SetRequestsSession'):
agent_string = "ClcAnsibleModule/" + __version__
ses = requests.Session()
ses.headers.update({"Api-Client": agent_string})
ses.headers['User-Agent'] += " " + agent_string
clc.SetRequestsSession(ses)
def main():
"""
The main function. Instantiates the module and calls process_request.
:return: none
"""
module = AnsibleModule(argument_spec=ClcLoadBalancer.define_argument_spec(),
supports_check_mode=True)
clc_loadbalancer = ClcLoadBalancer(module)
clc_loadbalancer.process_request()
if __name__ == '__main__':
main()
| 37.409042 | 119 | 0.561448 |
ef64c3861730c25062a1640ce79c835540ce6d76 | 3,258 | py | Python | ratatosk/lib/utils/misc.py | SciLifeLab/ratatosk | 4e9c9d8dc868b19a7c70eb7b326422c87bc3d7c0 | [
"Apache-2.0"
] | null | null | null | ratatosk/lib/utils/misc.py | SciLifeLab/ratatosk | 4e9c9d8dc868b19a7c70eb7b326422c87bc3d7c0 | [
"Apache-2.0"
] | null | null | null | ratatosk/lib/utils/misc.py | SciLifeLab/ratatosk | 4e9c9d8dc868b19a7c70eb7b326422c87bc3d7c0 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2013 Per Unneberg
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import os
import luigi
import time
import shutil
import logging
from ratatosk.job import InputJobTask, JobTask, DefaultShellJobRunner, DefaultGzShellJobRunner
from ratatosk.utils import rreplace
logger = logging.getLogger('luigi-interface')
class MiscJobRunner(DefaultShellJobRunner):
pass
class ResyncMatesJobRunner(DefaultGzShellJobRunner):
pass
class InputFastqFile(InputJobTask):
_config_section = "misc"
_config_subsection = "InputFastqFile"
parent_task = luigi.Parameter(default="ratatosk.lib.files.external.FastqFile")
class ResyncMatesJobTask(JobTask):
_config_section = "misc"
_config_subsection = "ResyncMates"
executable = luigi.Parameter(default="resyncMates.pl")
label = luigi.Parameter(default=".sync")
target = luigi.Parameter(default=[], is_list=True)
parent_task = luigi.Parameter(default="ratatosk.lib.utils.misc.InputFastqFile")
read1_suffix = luigi.Parameter(default="_R1_001")
read2_suffix = luigi.Parameter(default="_R2_001")
def job_runner(self):
return ResyncMatesJobRunner()
def requires(self):
cls = self.set_parent_task()
sources = self._make_paired_source_file_names()
return [cls(target=x) for x in sources]
def output(self):
return [luigi.LocalTarget(self.target[0]), luigi.LocalTarget(self.target[1])]
def args(self):
return ["-i", self.input()[0], "-j", self.input()[1], "-o", self.output()[0], "-p", self.output()[1]]
# Put this here for now since this is the first case I've sofar
# encountered where there is a 2-2 target-source mapping
def _make_paired_source_file_names(self):
"""Construct source file name from a target.
"""
source_list = self.target
for source in source_list:
if isinstance(self.target_suffix, tuple):
if self.target_suffix[0] and self.source_suffix:
source = rreplace(source, self.target_suffix[0], self.source_suffix, 1)
else:
if self.target_suffix and self.source_suffix:
source = rreplace(source, self.target_suffix, self.source_suffix, 1)
if not self.label:
source_list.append(source)
if source.count(self.label) > 1:
logger.warn("label '{}' found multiple times in target '{}'; this could be intentional".format(self.label, source))
elif source.count(self.label) == 0:
logger.warn("label '{}' not found in target '{}'; are you sure your target is correctly formatted?".format(self.label, source))
return [rreplace(x, self.label, "", 1) for x in source_list]
| 40.222222 | 143 | 0.689994 |
9d165a525002aecf012cec39190adbeadeee2b7b | 1,022 | py | Python | {{cookiecutter.repo_name}}/runtests.py | HandyCodeJob/hcj-django-temp | b42c8c27cf4644b29a480356f48281e97fe97fcc | [
"BSD-3-Clause"
] | null | null | null | {{cookiecutter.repo_name}}/runtests.py | HandyCodeJob/hcj-django-temp | b42c8c27cf4644b29a480356f48281e97fe97fcc | [
"BSD-3-Clause"
] | 10 | 2015-11-06T06:28:34.000Z | 2015-12-26T22:48:31.000Z | {{cookiecutter.repo_name}}/runtests.py | HandyCodeJob/hcj-django-temp | b42c8c27cf4644b29a480356f48281e97fe97fcc | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
"""
http://www.crccheck.com/blog/django-nose-without-django-nose/
HACK to support the Django + nose without django-nose.
Built based on documentation from:
* https://docs.djangoproject.com/en/1.8/topics/testing/advanced/#using-the-django-test-runner-to-test-reusable-applications # noqa
* http://nose.readthedocs.org/en/latest/usage.html#basic-usage
"""
import sys
import django
import nose
from django.test.utils import setup_test_environment, teardown_test_environment
from django.db import connection
from config.settings.common import NOSE_ARGS
if __name__ == '__main__':
django.setup()
try:
sys.argv.remove('--keepdb')
except ValueError:
keepdb = False
else:
keepdb = True
setup_test_environment()
test_db_name = connection.creation.create_test_db(keepdb=keepdb)
result = nose.run(argv=NOSE_ARGS)
connection.creation.destroy_test_db(test_db_name, keepdb=keepdb)
teardown_test_environment()
if not result:
sys.exit(1)
| 29.2 | 131 | 0.739726 |
9090f821615f3c8581c28fa1f8b3131a55b42b60 | 3,336 | py | Python | test/multiapi/Expected/AcceptanceTests/MultiapiCustomBaseUrl/multiapicustombaseurl/v1/_configuration.py | qwordy/autorest.python | 6b12df51c2a39a1285546b5a771b69f5896e794f | [
"MIT"
] | 35 | 2018-04-03T12:15:53.000Z | 2022-03-11T14:03:34.000Z | test/multiapi/Expected/AcceptanceTests/MultiapiCustomBaseUrl/multiapicustombaseurl/v1/_configuration.py | qwordy/autorest.python | 6b12df51c2a39a1285546b5a771b69f5896e794f | [
"MIT"
] | 652 | 2017-08-28T22:44:41.000Z | 2022-03-31T21:20:31.000Z | test/multiapi/Expected/AcceptanceTests/MultiapiCustomBaseUrl/multiapicustombaseurl/v1/_configuration.py | qwordy/autorest.python | 6b12df51c2a39a1285546b5a771b69f5896e794f | [
"MIT"
] | 29 | 2017-08-28T20:57:01.000Z | 2022-03-11T14:03:38.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any
from azure.core.credentials import TokenCredential
VERSION = "unknown"
class MultiapiCustomBaseUrlServiceClientConfiguration(Configuration):
"""Configuration for MultiapiCustomBaseUrlServiceClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param endpoint: Pass in https://localhost:3000.
:type endpoint: str
"""
def __init__(
self,
credential, # type: "TokenCredential"
endpoint, # type: str
**kwargs # type: Any
):
# type: (...) -> None
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if endpoint is None:
raise ValueError("Parameter 'endpoint' must not be None.")
super(MultiapiCustomBaseUrlServiceClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.endpoint = endpoint
self.api_version = "1.0.0"
self.credential_scopes = kwargs.pop('credential_scopes', [])
kwargs.setdefault('sdk_moniker', 'multiapicustombaseurl/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if not self.credential_scopes and not self.authentication_policy:
raise ValueError("You must provide either credential_scopes or authentication_policy as kwargs")
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| 46.333333 | 129 | 0.683453 |
d27365f416ce973c9031264e92bba4ee934af2a7 | 1,788 | py | Python | plugins/wigle/komand_wigle/actions/get_files_status/action.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 46 | 2019-06-05T20:47:58.000Z | 2022-03-29T10:18:01.000Z | plugins/wigle/komand_wigle/actions/get_files_status/action.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 386 | 2019-06-07T20:20:39.000Z | 2022-03-30T17:35:01.000Z | plugins/wigle/komand_wigle/actions/get_files_status/action.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 43 | 2019-07-09T14:13:58.000Z | 2022-03-28T12:04:46.000Z | import komand
from .schema import GetFilesStatusInput, GetFilesStatusOutput
# Custom imports below
class GetFilesStatus(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="get_files_status",
description="Get the status of files uploaded by the current user",
input=GetFilesStatusInput(),
output=GetFilesStatusOutput(),
)
def run(self, params={}):
self.logger.info("GetFileStatus: Fetching status of uploaded files from server ...")
response = self.connection.call_api(
"get",
"file/transactions",
params={"pagestart": params.get("pagestart", 0), "pageend": params.get("pageend", 100)},
)
return response
def test(self):
return {
"results": [
{
"transid": "string",
"username": "string",
"firstTime": "2018-08-28T17:24:07.478Z",
"lastupdt": "2018-08-28T17:24:07.478Z",
"fileName": "string",
"fileSize": 0,
"fileLines": 0,
"status": "string",
"discoveredGps": 0,
"discovered": 0,
"total": 0,
"totalGps": 0,
"totalLocations": 0,
"percentDone": 0,
"timeParsing": 0,
"genDiscovered": 0,
"genDiscoveredGps": 0,
"genTotal": 0,
"genTotalGps": 0,
"genTotalLocations": 0,
"wait": 0,
}
],
"processingQueueDepth": 0,
}
| 33.111111 | 100 | 0.456376 |
aadc46ec098c00ae103836a8bf3320d0b9765e2e | 1,876 | py | Python | scripts/train.py | davnn/deep_pommerman | 98375ffbd076ade584309b9feef248e30de77120 | [
"Apache-2.0"
] | null | null | null | scripts/train.py | davnn/deep_pommerman | 98375ffbd076ade584309b9feef248e30de77120 | [
"Apache-2.0"
] | null | null | null | scripts/train.py | davnn/deep_pommerman | 98375ffbd076ade584309b9feef248e30de77120 | [
"Apache-2.0"
] | null | null | null | import argparse
from typing import Optional, Dict
import torch
from graphic_pomme_env.wrappers import NUM_STACK
from stable_baselines3 import PPO
from stable_baselines3.common.env_util import make_vec_env
from stable_baselines3.common.callbacks import CallbackList, CheckpointCallback, EvalCallback
from scripts.agents import make_actor, ACTORS
from scripts.env import GraphicPommerEnv
from scripts.model import Extractor
policy_kwargs = dict(
features_extractor_class=Extractor,
features_extractor_kwargs=dict(features_dim=256),
)
parser = argparse.ArgumentParser(description="Train a PPO model")
parser.add_argument("--model", help="Path to your model .zip file")
make_env = lambda: GraphicPommerEnv(num_stack=NUM_STACK,
start_pos=0, # random
opponent_actor=make_actor(ACTORS.simple),
board="GraphicOVOCompact-v0")
checkpoint_callback = CheckpointCallback(save_freq=500000, save_path=f"./logs", name_prefix="PPO_")
eval_callback = EvalCallback(make_env(), best_model_save_path=f"./logs/best", log_path=f"./logs", eval_freq=100000)
callback = CallbackList([checkpoint_callback]) # , eval_callback])
def create_model(env, path: Optional[str] = None, policy_kwargs: Optional[Dict] = None):
if path is not None:
model = PPO.load(path, env)
elif policy_kwargs is not None:
model = PPO("CnnPolicy", env, n_steps=4096, ent_coef=0.0001, policy_kwargs=policy_kwargs, verbose=True)
else:
model = PPO("CnnPolicy", env, n_steps=4096, ent_coef=0.0001, verbose=True)
return model
if __name__ == "__main__":
args = parser.parse_args()
env = make_vec_env(make_env, n_envs=2)
model = create_model(env=env, path=args.model)
model = model.learn(total_timesteps=50000000, callback=callback)
model.save("PPO")
| 40.782609 | 115 | 0.724947 |
91921a0cec639471d9e700bb16b4a2dba4d3d1cc | 714 | py | Python | app/main/views/sub_navigation_dictionaries.py | GouvQC/notification-admin | 5707d8526668e0800ede256db925bdec6f58455d | [
"MIT"
] | null | null | null | app/main/views/sub_navigation_dictionaries.py | GouvQC/notification-admin | 5707d8526668e0800ede256db925bdec6f58455d | [
"MIT"
] | null | null | null | app/main/views/sub_navigation_dictionaries.py | GouvQC/notification-admin | 5707d8526668e0800ede256db925bdec6f58455d | [
"MIT"
] | null | null | null | def features_nav():
return [
{
"name": "Features",
"link": "main.features",
"sub_navigation_items": [
{
"name": "Emails",
"link": "main.features_email",
},
{
"name": "Text messages",
"link": "main.features_sms",
},
{
"name": "Template formatting guide",
"link": "main.features_templates",
},
]
}
]
def pricing_nav():
return [
{
"name": "Pricing",
"link": "main.pricing",
},
]
| 23.032258 | 56 | 0.330532 |
e6e83d883a5a3ae8a05dc491dcbb6ab0edb63389 | 14,392 | py | Python | tutorials/DT_RNN_Tut.py | msiahbani/GroundHog-NMT | d2beadc3d0fb6b007a2bc6c26a3d5e914452da9d | [
"BSD-3-Clause"
] | 65 | 2015-05-21T21:33:38.000Z | 2021-11-15T14:12:40.000Z | tutorials/DT_RNN_Tut.py | janchorowski/GroundHog | 68d01c90becbf31a487be18985defb0c95f2b522 | [
"BSD-3-Clause"
] | 7 | 2015-11-24T17:25:17.000Z | 2016-04-05T13:05:45.000Z | tutorials/DT_RNN_Tut.py | janchorowski/GroundHog | 68d01c90becbf31a487be18985defb0c95f2b522 | [
"BSD-3-Clause"
] | 45 | 2015-06-29T15:34:55.000Z | 2021-03-24T10:24:31.000Z | """
Test of the classical LM model for language modelling
"""
from groundhog.datasets import LMIterator
from groundhog.trainer.SGD_momentum import SGD as SGD_m
from groundhog.trainer.SGD import SGD
from groundhog.mainLoop import MainLoop
from groundhog.layers import MultiLayer, \
RecurrentMultiLayer, \
RecurrentMultiLayerInp, \
RecurrentMultiLayerShortPath, \
RecurrentMultiLayerShortPathInp, \
RecurrentMultiLayerShortPathInpAll, \
SoftmaxLayer, \
LastState,\
UnaryOp, \
Operator, \
Shift, \
GaussianNoise, \
SigmoidLayer
from groundhog.layers import maxpool, \
maxpool_ntimes, \
last, \
last_ntimes,\
tanh, \
sigmoid, \
rectifier,\
hard_sigmoid, \
hard_tanh
from groundhog.models import LM_Model
from theano.sandbox.scan import scan
import numpy
import theano
import theano.tensor as TT
linear = lambda x:x
theano.config.allow_gc = False
def get_text_data(state):
def out_format (x, y, r):
return {'x':x, 'y' :y, 'reset': r}
def out_format_valid (x, y, r):
return {'x':x, 'y' :y, 'reset': r}
train_data = LMIterator(
batch_size=state['bs'],
path = state['path'],
stop=-1,
seq_len = state['seqlen'],
mode="train",
chunks=state['chunks'],
shift = state['shift'],
output_format = out_format,
can_fit=True)
valid_data = LMIterator(
batch_size=state['bs'],
path=state['path'],
stop=-1,
use_infinite_loop=False,
allow_short_sequences = True,
seq_len= state['seqlen'],
mode="valid",
reset =state['reset'],
chunks=state['chunks'],
shift = state['shift'],
output_format = out_format_valid,
can_fit=True)
test_data = LMIterator(
batch_size=state['bs'],
path = state['path'],
stop=-1,
use_infinite_loop=False,
allow_short_sequences=True,
seq_len= state['seqlen'],
mode="test",
chunks=state['chunks'],
shift = state['shift'],
output_format = out_format_valid,
can_fit=True)
if 'wiki' in state['path']:
test_data = None
return train_data, valid_data, test_data
def jobman(state, channel):
# load dataset
rng = numpy.random.RandomState(state['seed'])
# declare the dimensionalies of the input and output
if state['chunks'] == 'words':
state['n_in'] = 10000
state['n_out'] = 10000
else:
state['n_in'] = 50
state['n_out'] = 50
train_data, valid_data, test_data = get_text_data(state)
## BEGIN Tutorial
### Define Theano Input Variables
x = TT.lvector('x')
y = TT.lvector('y')
h0 = theano.shared(numpy.zeros((eval(state['nhids'])[-1],), dtype='float32'))
### Neural Implementation of the Operators: \oplus
#### Word Embedding
emb_words = MultiLayer(
rng,
n_in=state['n_in'],
n_hids=eval(state['inp_nhids']),
activation=eval(state['inp_activ']),
init_fn='sample_weights_classic',
weight_noise=state['weight_noise'],
rank_n_approx = state['rank_n_approx'],
scale=state['inp_scale'],
sparsity=state['inp_sparse'],
learn_bias = True,
bias_scale=eval(state['inp_bias']),
name='emb_words')
#### Deep Transition Recurrent Layer
rec = eval(state['rec_layer'])(
rng,
eval(state['nhids']),
activation = eval(state['rec_activ']),
#activation = 'TT.nnet.sigmoid',
bias_scale = eval(state['rec_bias']),
scale=eval(state['rec_scale']),
sparsity=eval(state['rec_sparse']),
init_fn=eval(state['rec_init']),
weight_noise=state['weight_noise'],
name='rec')
#### Stiching them together
##### (1) Get the embedding of a word
x_emb = emb_words(x, no_noise_bias=state['no_noise_bias'])
##### (2) Embedding + Hidden State via DT Recurrent Layer
reset = TT.scalar('reset')
rec_layer = rec(x_emb, n_steps=x.shape[0],
init_state=h0*reset,
no_noise_bias=state['no_noise_bias'],
truncate_gradient=state['truncate_gradient'],
batch_size=1)
### Neural Implementation of the Operators: \lhd
#### Softmax Layer
output_layer = SoftmaxLayer(
rng,
eval(state['nhids'])[-1],
state['n_out'],
scale=state['out_scale'],
bias_scale=state['out_bias_scale'],
init_fn="sample_weights_classic",
weight_noise=state['weight_noise'],
sparsity=state['out_sparse'],
sum_over_time=True,
name='out')
### Few Optional Things
#### Direct shortcut from x to y
if state['shortcut_inpout']:
shortcut = MultiLayer(
rng,
n_in=state['n_in'],
n_hids=eval(state['inpout_nhids']),
activations=eval(state['inpout_activ']),
init_fn='sample_weights_classic',
weight_noise = state['weight_noise'],
scale=eval(state['inpout_scale']),
sparsity=eval(state['inpout_sparse']),
learn_bias=eval(state['inpout_learn_bias']),
bias_scale=eval(state['inpout_bias']),
name='shortcut')
#### Learning rate scheduling (1/(1+n/beta))
state['clr'] = state['lr']
def update_lr(obj, cost):
stp = obj.step
if isinstance(obj.state['lr_start'], int) and stp > obj.state['lr_start']:
time = float(stp - obj.state['lr_start'])
new_lr = obj.state['clr']/(1+time/obj.state['lr_beta'])
obj.lr = new_lr
if state['lr_adapt']:
rec.add_schedule(update_lr)
### Neural Implementations of the Language Model
#### Training
if state['shortcut_inpout']:
train_model = output_layer(rec_layer,
no_noise_bias=state['no_noise_bias'],
additional_inputs=[shortcut(x)]).train(target=y,
scale=numpy.float32(1./state['seqlen']))
else:
train_model = output_layer(rec_layer,
no_noise_bias=state['no_noise_bias']).train(target=y,
scale=numpy.float32(1./state['seqlen']))
nw_h0 = rec_layer.out[rec_layer.out.shape[0]-1]
if state['carry_h0']:
train_model.updates += [(h0, nw_h0)]
#### Validation
h0val = theano.shared(numpy.zeros((eval(state['nhids'])[-1],), dtype='float32'))
rec_layer = rec(emb_words(x, use_noise=False),
n_steps = x.shape[0],
batch_size=1,
init_state=h0val*reset,
use_noise=False)
nw_h0 = rec_layer.out[rec_layer.out.shape[0]-1]
if not state['shortcut_inpout']:
valid_model = output_layer(rec_layer,
use_noise=False).validate(target=y, sum_over_time=True)
else:
valid_model = output_layer(rec_layer,
additional_inputs=[shortcut(x, use_noise=False)],
use_noise=False).validate(target=y, sum_over_time=True)
valid_updates = []
if state['carry_h0']:
valid_updates = [(h0val, nw_h0)]
valid_fn = theano.function([x,y, reset], valid_model.cost,
name='valid_fn', updates=valid_updates)
#### Sampling
##### single-step sampling
def sample_fn(word_tm1, h_tm1):
x_emb = emb_words(word_tm1, use_noise = False, one_step=True)
h0 = rec(x_emb, state_before=h_tm1, one_step=True, use_noise=False)[-1]
word = output_layer.get_sample(state_below=h0, temp=1.)
return word, h0
##### scan for iterating the single-step sampling multiple times
[samples, summaries], updates = scan(sample_fn,
states = [
TT.alloc(numpy.int64(0), state['sample_steps']),
TT.alloc(numpy.float32(0), 1, eval(state['nhids'])[-1])],
n_steps= state['sample_steps'],
name='sampler_scan')
##### define a Theano function
sample_fn = theano.function([], [samples],
updates=updates, profile=False, name='sample_fn')
##### Load a dictionary
dictionary = numpy.load(state['dictionary'])
if state['chunks'] == 'chars':
dictionary = dictionary['unique_chars']
else:
dictionary = dictionary['unique_words']
def hook_fn():
sample = sample_fn()[0]
print 'Sample:',
if state['chunks'] == 'chars':
print "".join(dictionary[sample])
else:
for si in sample:
print dictionary[si],
print
### Build and Train a Model
#### Define a model
model = LM_Model(
cost_layer = train_model,
weight_noise_amount=state['weight_noise_amount'],
valid_fn = valid_fn,
clean_before_noise_fn = False,
noise_fn = None,
rng = rng)
if state['reload']:
model.load(state['prefix']+'model.npz')
#### Define a trainer
##### Training algorithm (SGD)
if state['moment'] < 0:
algo = SGD(model, state, train_data)
else:
algo = SGD_m(model, state, train_data)
##### Main loop of the trainer
main = MainLoop(train_data,
valid_data,
test_data,
model,
algo,
state,
channel,
train_cost = False,
hooks = hook_fn,
validate_postprocess = eval(state['validate_postprocess']))
main.main()
## END Tutorial
if __name__=='__main__':
state = {}
# complete path to data (cluster specific)
state['seqlen'] = 100
state['path']= "/data/lisa/data/PennTreebankCorpus/pentree_char_and_word.npz"
state['dictionary']= "/data/lisa/data/PennTreebankCorpus/dictionaries.npz"
state['chunks'] = 'chars'
state['seed'] = 123
# flag .. don't need to change it. It says what to do if you get cost to
# be nan .. you could raise, though I would leave it to this
state['on_nan'] = 'warn'
# DATA
# For wikipedia validation set is extremely large. Is very time
# wasteful. This value is only used for validation set, and IMHO should
# be something like seqlen * 10000 (i.e. the validation should be only
# 10000 steps
state['reset'] = -1
# For music/ word level I think 50 is a good idea. For character this
# should be at least 100 (I think there are problems with getting state
# of the art otherwise). Note most people use 200 !
# The job stops when learning rate declines to this value. It can be
# useful, because sometimes is hopeless to wait for validation error to
# get below minerr, or for the time to expire
state['minlr'] = float(5e-7)
# Layers
# Input
# Input weights are sampled from a gaussian with std=scale; this is the
# standard way to initialize
state['rank_n_approx'] = 0
state['inp_nhids'] = '[400]'
state['inp_activ'] = '[linear]'
state['inp_bias'] = '[0.]'
state['inp_sparse']= -1 # dense
state['inp_scale'] = .1
# This is for the output weights
state['out_scale'] = .1
state['out_bias_scale'] = -.5
state['out_sparse'] = -1
# HidLayer
# Hidden units on for the internal layers of DT-RNN. Having a single
# value results in a standard RNN
state['nhids'] = '[200, 200]'
# Activation of each layer
state['rec_activ'] = '"TT.nnet.sigmoid"'
state['rec_bias'] = '.0'
state['rec_sparse'] ='20'
state['rec_scale'] = '1.'
# sample_weights - you rescale the weights such that the largest
# singular value is scale
# sample_weights_classic : just sample weights from a gaussian with std
# equal to scale
state['rec_init'] = "'sample_weights'"
state['rec_layer'] = 'RecurrentMultiLayerShortPathInpAll'
# SGD params
state['bs'] = 1 # the size of the minibatch
state['lr'] = 1. # initial learn_ing rate
state['cutoff'] = 1. # threshold for gradient rescaling
state['moment'] = 0.995 #-.1 # momentum
# Do not optimize these
state['weight_noise'] = True # white Gaussian noise in weights
state['weight_noise_amount'] = 0.075 # standard deviation
# maximal number of updates
state['loopIters'] = int(1e8)
# maximal number of minutes to wait until killing job
state['timeStop'] = 48*60 # 48 hours
# Construct linear connections from input to output. These are factored
# (like the rank_n) to deal with the possible high dimensionality of the
# input, but it is a linear projection that feeds into the softmax
state['shortcut_inpout'] = False
state['shortcut_rank'] = 200
# Main Loop
# Make this to be a decently large value. Otherwise you waste a lot of
# memory keeping track of the training error (and other things) at each
# step + the stdout becomes extremely large
state['trainFreq'] = 100
state['hookFreq'] = 5000
state['validFreq'] = 1000
state['saveFreq'] = 15 # save every 15 minutes
state['prefix'] = 'model_' # prefix of the save files
state['reload'] = False # reload
state['overwrite'] = 1
# Threhold should be 1.004 for PPL, for entropy (which is what
# everything returns, it should be much smaller. Running value is 1.0002
# We should not hyperoptimize this
state['divide_lr'] = 2.
state['cost_threshold'] = 1.0002
state['patience'] = 1
state['validate_postprocess'] = 'lambda x:10**(x/numpy.log(10))'
state['truncate_gradient'] = 80 # truncated BPTT
state['lr_adapt'] = 0 # 1/(1 + n/n0) scheduling
state['lr_beta'] = 10*1900.
state['lr_start'] = 'on_error'
state['no_noise_bias'] = True # do not use weight noise for biases
state['carry_h0'] = True # carry over h0 across updates
state['sample_steps'] = 80
# Do not change these
state['minerr'] = -1
state['shift'] = 1 # n-step forward prediction
state['cutoff_rescale_length'] = False
jobman(state, None)
| 34.430622 | 84 | 0.594427 |
c9b3e75408bb1af50cd690e9ac41e175bf7c8044 | 716 | py | Python | FeatureCollection/metadata_aggregation.py | monocilindro/qgis-earthengine-examples | 82aea8926d34ed3f4ad4a4a345ddbd225819d28f | [
"MIT"
] | 646 | 2019-12-03T06:09:03.000Z | 2022-03-28T03:37:08.000Z | FeatureCollection/metadata_aggregation.py | csaybar/qgis-earthengine-examples | ba8942683834d2847ff3246bdd1859b36e50fe44 | [
"MIT"
] | 10 | 2019-12-30T03:42:44.000Z | 2021-05-22T07:34:07.000Z | FeatureCollection/metadata_aggregation.py | csaybar/qgis-earthengine-examples | ba8942683834d2847ff3246bdd1859b36e50fe44 | [
"MIT"
] | 219 | 2019-12-06T02:20:53.000Z | 2022-03-30T15:14:27.000Z | import ee
from ee_plugin import Map
def cal_area(feature):
num = ee.Number.parse(feature.get('areasqkm'))
return feature.set('areasqkm', num)
# Load watersheds from a data table.
sheds = ee.FeatureCollection('USGS/WBD/2017/HUC06') \
.filterBounds(ee.Geometry.Rectangle(-127.18, 19.39, -62.75, 51.29)) \
.map(cal_area)
# Display the table and print its first element.
# Map.addLayer(sheds, {}, 'watersheds')
Map.addLayer(ee.Image().paint(sheds, 1, 2), {}, 'watersheds')
print('First watershed', sheds.first().getInfo())
# Print the number of watersheds.
print('Count:', sheds.size().getInfo())
# Print stats for an area property.
# print('Area stats:', sheds.aggregate_stats('areasqkm').getInfo())
| 31.130435 | 71 | 0.703911 |
f2da0d620f11aaab5a7c16dfc1af780089e05924 | 2,963 | py | Python | wenmo-sourcecode/wenmo/utils.py | Ryan0v0/DatabaseDesign-WenMo | a1f86af6e34f1121caec0ee2526d0e0d24504425 | [
"MIT"
] | 3 | 2019-12-12T16:36:50.000Z | 2021-09-08T02:14:44.000Z | wenmo-sourcecode/wenmo/utils.py | Ryan0v0/DatabaseDesign-WenMo | a1f86af6e34f1121caec0ee2526d0e0d24504425 | [
"MIT"
] | 5 | 2021-03-19T09:14:47.000Z | 2022-01-13T01:57:15.000Z | wenmo-sourcecode/wenmo/utils.py | Ryan0v0/DatabaseDesign-WenMo | a1f86af6e34f1121caec0ee2526d0e0d24504425 | [
"MIT"
] | null | null | null |
import os
import uuid
try:
from urlparse import urlparse, urljoin
except ImportError:
from urllib.parse import urlparse, urljoin
import PIL
from PIL import Image
from flask import current_app, request, url_for, redirect, flash
from itsdangerous import BadSignature, SignatureExpired
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from wenmo.extensions import db
from wenmo.models import User
from wenmo.settings import Operations
def generate_token(user, operation, expire_in=None, **kwargs):
s = Serializer(current_app.config['SECRET_KEY'], expire_in)
data = {'id': user.id, 'operation': operation}
data.update(**kwargs)
return s.dumps(data)
def validate_token(user, token, operation, new_password=None):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except (SignatureExpired, BadSignature):
return False
if operation != data.get('operation') or user.id != data.get('id'):
return False
if operation == Operations.CONFIRM:
user.confirmed = True
elif operation == Operations.RESET_PASSWORD:
user.set_password(new_password)
elif operation == Operations.CHANGE_EMAIL:
new_email = data.get('new_email')
if new_email is None:
return False
if User.query.filter_by(email=new_email).first() is not None:
return False
user.email = new_email
else:
return False
db.session.commit()
return True
def rename_image(old_filename):
ext = os.path.splitext(old_filename)[1]
new_filename = uuid.uuid4().hex + ext
return new_filename
def resize_image(image, filename, base_width):
filename, ext = os.path.splitext(filename)
img = Image.open(image)
if img.size[0] <= base_width:
return filename + ext
w_percent = (base_width / float(img.size[0]))
h_size = int((float(img.size[1]) * float(w_percent)))
img = img.resize((base_width, h_size), PIL.Image.ANTIALIAS)
filename += current_app.config['WENMO_PHOTO_SUFFIX'][base_width] + ext
img.save(os.path.join(current_app.config['WENMO_UPLOAD_PATH'], filename), optimize=True, quality=85)
return filename
def is_safe_url(target):
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, target))
return test_url.scheme in ('http', 'https') and \
ref_url.netloc == test_url.netloc
def redirect_back(default='main.index', **kwargs):
for target in request.args.get('next'), request.referrer:
if not target:
continue
if is_safe_url(target):
return redirect(target)
return redirect(url_for(default, **kwargs))
def flash_errors(form):
for field, errors in form.errors.items():
for error in errors:
flash(u"Error in the %s field - %s" % (
getattr(form, field).label.text,
error
))
| 29.336634 | 104 | 0.676679 |
44e34a3639994b8da6dd7f6a74a4353539883c4d | 30,791 | py | Python | selfdrive/controls/controlsd.py | DS1SQM/OPKR084test_20210505 | 76fc12bff1472b8bbe62206cb8ae014f4c2fb969 | [
"MIT"
] | null | null | null | selfdrive/controls/controlsd.py | DS1SQM/OPKR084test_20210505 | 76fc12bff1472b8bbe62206cb8ae014f4c2fb969 | [
"MIT"
] | null | null | null | selfdrive/controls/controlsd.py | DS1SQM/OPKR084test_20210505 | 76fc12bff1472b8bbe62206cb8ae014f4c2fb969 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os
import math
from cereal import car, log
from common.numpy_fast import clip, interp
from common.realtime import sec_since_boot, config_realtime_process, Priority, Ratekeeper, DT_CTRL
from common.profiler import Profiler
from common.params import Params, put_nonblocking
import cereal.messaging as messaging
from selfdrive.config import Conversions as CV
from selfdrive.swaglog import cloudlog
from selfdrive.boardd.boardd import can_list_to_can_capnp
from selfdrive.car.car_helpers import get_car, get_startup_event, get_one_can
from selfdrive.controls.lib.lane_planner import CAMERA_OFFSET, CAMERA_OFFSET_A
from selfdrive.controls.lib.drive_helpers import update_v_cruise, initialize_v_cruise
from selfdrive.controls.lib.longcontrol import LongControl, STARTING_TARGET_SPEED
from selfdrive.controls.lib.latcontrol_pid import LatControlPID
from selfdrive.controls.lib.latcontrol_indi import LatControlINDI
from selfdrive.controls.lib.latcontrol_lqr import LatControlLQR
from selfdrive.controls.lib.latcontrol_angle import LatControlAngle
from selfdrive.controls.lib.events import Events, ET
from selfdrive.controls.lib.alertmanager import AlertManager
from selfdrive.controls.lib.vehicle_model import VehicleModel
from selfdrive.controls.lib.longitudinal_planner import LON_MPC_STEP
from selfdrive.locationd.calibrationd import Calibration
from selfdrive.hardware import HARDWARE, TICI
from selfdrive.car.hyundai.values import Buttons
import common.log as trace1
LDW_MIN_SPEED = 50 * CV.KPH_TO_MS
LANE_DEPARTURE_THRESHOLD = 0.1
STEER_ANGLE_SATURATION_TIMEOUT = 1.0 / DT_CTRL
STEER_ANGLE_SATURATION_THRESHOLD = 15 # Degrees
SIMULATION = "SIMULATION" in os.environ
NOSENSOR = "NOSENSOR" in os.environ
IGNORE_PROCESSES = set(["rtshield", "uploader", "deleter", "loggerd", "logmessaged", "tombstoned", "logcatd", "proclogd", "clocksd", "updated", "timezoned", "manage_athenad"])
ThermalStatus = log.DeviceState.ThermalStatus
State = log.ControlsState.OpenpilotState
PandaType = log.PandaState.PandaType
Desire = log.LateralPlan.Desire
LaneChangeState = log.LateralPlan.LaneChangeState
LaneChangeDirection = log.LateralPlan.LaneChangeDirection
EventName = car.CarEvent.EventName
class Controls:
def __init__(self, sm=None, pm=None, can_sock=None):
config_realtime_process(7 if TICI else 3, Priority.CTRL_HIGH)
# Setup sockets
self.pm = pm
if self.pm is None:
self.pm = messaging.PubMaster(['sendcan', 'controlsState', 'carState',
'carControl', 'carEvents', 'carParams'])
self.sm = sm
if self.sm is None:
ignore = ['driverCameraState', 'managerState'] if SIMULATION else None
self.sm = messaging.SubMaster(['deviceState', 'pandaState', 'modelV2', 'liveCalibration',
'driverMonitoringState', 'longitudinalPlan', 'lateralPlan', 'liveLocationKalman',
'roadCameraState', 'driverCameraState', 'managerState', 'liveParameters', 'radarState'], ignore_alive=ignore)
self.can_sock = can_sock
if can_sock is None:
can_timeout = None if os.environ.get('NO_CAN_TIMEOUT', False) else 100
self.can_sock = messaging.sub_sock('can', timeout=can_timeout)
# wait for one pandaState and one CAN packet
print("Waiting for CAN messages...")
get_one_can(self.can_sock)
self.CI, self.CP = get_car(self.can_sock, self.pm.sock['sendcan'])
# read params
params = Params()
self.is_metric = params.get_bool("IsMetric")
self.is_ldw_enabled = params.get_bool("IsLdwEnabled")
community_feature_toggle = params.get_bool("CommunityFeaturesToggle")
openpilot_enabled_toggle = params.get_bool("OpenpilotEnabledToggle")
passive = params.get_bool("Passive") or not openpilot_enabled_toggle
self.commIssue_ignored = params.get_bool("ComIssueGone")
self.auto_enabled = params.get_bool("AutoEnable")
# detect sound card presence and ensure successful init
sounds_available = HARDWARE.get_sound_card_online()
car_recognized = self.CP.carName != 'mock'
# If stock camera is disconnected, we loaded car controls and it's not dashcam mode
controller_available = self.CP.enableCamera and self.CI.CC is not None and not passive and not self.CP.dashcamOnly
community_feature_disallowed = self.CP.communityFeature and not community_feature_toggle
self.read_only = not car_recognized or not controller_available or \
self.CP.dashcamOnly or community_feature_disallowed
if self.read_only:
self.CP.safetyModel = car.CarParams.SafetyModel.noOutput
# Write CarParams for radard and boardd safety mode
cp_bytes = self.CP.to_bytes()
params.put("CarParams", cp_bytes)
put_nonblocking("CarParamsCache", cp_bytes)
self.CC = car.CarControl.new_message()
self.AM = AlertManager()
self.events = Events()
self.LoC = LongControl(self.CP, self.CI.compute_gb)
self.VM = VehicleModel(self.CP)
self.lateral_control_method = 0
if self.CP.steerControlType == car.CarParams.SteerControlType.angle:
self.LaC = LatControlAngle(self.CP)
self.lateral_control_method = 3
elif self.CP.lateralTuning.which() == 'pid':
self.LaC = LatControlPID(self.CP)
self.lateral_control_method = 0
elif self.CP.lateralTuning.which() == 'indi':
self.LaC = LatControlINDI(self.CP)
self.lateral_control_method = 1
elif self.CP.lateralTuning.which() == 'lqr':
self.LaC = LatControlLQR(self.CP)
self.lateral_control_method = 2
self.controlsAllowed = False
self.state = State.disabled
self.enabled = False
self.active = False
self.can_rcv_error = False
self.soft_disable_timer = 0
self.v_cruise_kph = 255
self.v_cruise_kph_last = 0
self.mismatch_counter = 0
self.can_error_counter = 0
self.last_blinker_frame = 0
self.saturated_count = 0
self.distance_traveled = 0
self.last_functional_fan_frame = 0
self.events_prev = []
self.current_alert_types = [ET.PERMANENT]
self.logged_comm_issue = False
self.sm['liveCalibration'].calStatus = Calibration.CALIBRATED
self.sm['deviceState'].freeSpacePercent = 100
self.sm['driverMonitoringState'].events = []
self.sm['driverMonitoringState'].awarenessStatus = 1.
self.sm['driverMonitoringState'].faceDetected = False
self.sm['liveParameters'].valid = True
self.startup_event = get_startup_event(car_recognized, controller_available)
if not sounds_available:
self.events.add(EventName.soundsUnavailable, static=True)
if community_feature_disallowed:
self.events.add(EventName.communityFeatureDisallowed, static=True)
if not car_recognized:
self.events.add(EventName.carUnrecognized, static=True)
#elif self.read_only:
# self.events.add(EventName.dashcamMode, static=True)
# controlsd is driven by can recv, expected at 100Hz
self.rk = Ratekeeper(100, print_delay_threshold=None)
self.prof = Profiler(False) # off by default
self.hyundai_lkas = self.read_only #read_only
self.mpc_frame = 0
self.steerRatio_Max = float(int(Params().get("SteerRatioMaxAdj")) * 0.1)
self.angle_differ_range = [0, 15]
self.steerRatio_range = [self.CP.steerRatio, self.steerRatio_Max]
self.new_steerRatio = self.CP.steerRatio
self.new_steerRatio_prev = self.CP.steerRatio
self.steerRatio_to_send = 0
def auto_enable(self, CS):
if self.state != State.enabled and CS.vEgo >= 3 * CV.KPH_TO_MS and CS.gearShifter == 2 and self.sm['liveCalibration'].calStatus != Calibration.UNCALIBRATED:
if self.sm.all_alive_and_valid() and self.enabled != self.controlsAllowed:
self.events.add( EventName.pcmEnable )
def update_events(self, CS):
"""Compute carEvents from carState"""
self.events.clear()
self.events.add_from_msg(CS.events)
self.events.add_from_msg(self.sm['driverMonitoringState'].events)
# Handle startup event
if self.startup_event is not None:
self.events.add(self.startup_event)
self.startup_event = None
# Create events for battery, temperature, disk space, and memory
if self.sm['deviceState'].batteryPercent < 1 and self.sm['deviceState'].chargingError:
# at zero percent battery, while discharging, OP should not allowed
self.events.add(EventName.lowBattery)
if self.sm['deviceState'].thermalStatus >= ThermalStatus.red:
self.events.add(EventName.overheat)
if self.sm['deviceState'].freeSpacePercent < 7:
# under 7% of space free no enable allowed
self.events.add(EventName.outOfSpace)
if self.sm['deviceState'].memoryUsagePercent > 90:
self.events.add(EventName.lowMemory)
# Alert if fan isn't spinning for 5 seconds
if self.sm['pandaState'].pandaType in [PandaType.uno, PandaType.dos]:
if self.sm['pandaState'].fanSpeedRpm == 0 and self.sm['deviceState'].fanSpeedPercentDesired > 50:
if (self.sm.frame - self.last_functional_fan_frame) * DT_CTRL > 5.0:
self.events.add(EventName.fanMalfunction)
else:
self.last_functional_fan_frame = self.sm.frame
# Handle calibration status
cal_status = self.sm['liveCalibration'].calStatus
if cal_status != Calibration.CALIBRATED:
if cal_status == Calibration.UNCALIBRATED:
self.events.add(EventName.calibrationIncomplete)
else:
self.events.add(EventName.calibrationInvalid)
# Handle lane change
if self.sm['lateralPlan'].laneChangeState == LaneChangeState.preLaneChange:
direction = self.sm['lateralPlan'].laneChangeDirection
if (CS.leftBlindspot and direction == LaneChangeDirection.left) or \
(CS.rightBlindspot and direction == LaneChangeDirection.right):
self.events.add(EventName.laneChangeBlocked)
else:
if direction == LaneChangeDirection.left:
self.events.add(EventName.preLaneChangeLeft)
else:
self.events.add(EventName.preLaneChangeRight)
elif self.sm['lateralPlan'].laneChangeState in [LaneChangeState.laneChangeStarting,
LaneChangeState.laneChangeFinishing]:
self.events.add(EventName.laneChange)
if self.can_rcv_error or (not CS.canValid and self.sm.frame > 5 / DT_CTRL):
self.events.add(EventName.canError)
safety_mismatch = self.sm['pandaState'].safetyModel != self.CP.safetyModel
#safety_mismatch = safety_mismatch or self.sm['pandaState'].safetyParam != self.CP.safetyParam
if (safety_mismatch and self.sm.frame > 2 / DT_CTRL) or self.mismatch_counter >= 200:
self.events.add(EventName.controlsMismatch)
if not self.sm['liveParameters'].valid:
self.events.add(EventName.vehicleModelInvalid)
if len(self.sm['radarState'].radarErrors):
self.events.add(EventName.radarFault)
elif not self.sm.all_alive_and_valid() and self.sm['pandaState'].pandaType != PandaType.whitePanda and not self.commIssue_ignored:
self.events.add(EventName.commIssue)
if not self.logged_comm_issue:
cloudlog.error(f"commIssue - valid: {self.sm.valid} - alive: {self.sm.alive}")
self.logged_comm_issue = True
else:
self.logged_comm_issue = False
if not self.sm['lateralPlan'].mpcSolutionValid and not (EventName.laneChangeManual in self.events.names) and CS.steeringAngleDeg < 15:
self.events.add(EventName.plannerError)
if not self.sm['liveLocationKalman'].sensorsOK and not NOSENSOR:
if self.sm.frame > 5 / DT_CTRL: # Give locationd some time to receive all the inputs
self.events.add(EventName.sensorDataInvalid)
if not self.sm['liveLocationKalman'].posenetOK:
self.events.add(EventName.posenetInvalid)
if not self.sm['liveLocationKalman'].deviceStable:
self.events.add(EventName.deviceFalling)
if log.PandaState.FaultType.relayMalfunction in self.sm['pandaState'].faults:
self.events.add(EventName.relayMalfunction)
if self.sm['longitudinalPlan'].fcw:
self.events.add(EventName.fcw)
# TODO: fix simulator
if not SIMULATION:
#if not NOSENSOR:
# if not self.sm.alive['ubloxRaw'] and (self.sm.frame > 10. / DT_CTRL):
# self.events.add(EventName.gpsMalfunction)
# elif not self.sm['liveLocationKalman'].gpsOK and (self.distance_traveled > 1000) and not TICI:
# # Not show in first 1 km to allow for driving out of garage. This event shows after 5 minutes
# self.events.add(EventName.noGps)
if not self.sm.all_alive(['roadCameraState', 'driverCameraState']) and (self.sm.frame > 5 / DT_CTRL):
self.events.add(EventName.cameraMalfunction)
if self.sm['modelV2'].frameDropPerc > 20:
self.events.add(EventName.modeldLagging)
# Check if all manager processes are running
not_running = set(p.name for p in self.sm['managerState'].processes if not p.running)
if self.sm.rcv_frame['managerState'] and (not_running - IGNORE_PROCESSES):
self.events.add(EventName.processNotRunning)
# Only allow engagement with brake pressed when stopped behind another stopped car
#if CS.brakePressed and self.sm['longitudinalPlan'].vTargetFuture >= STARTING_TARGET_SPEED \
# and self.CP.openpilotLongitudinalControl and CS.vEgo < 0.3:
# self.events.add(EventName.noTarget)
if self.auto_enabled:
self.auto_enable( CS )
def data_sample(self):
"""Receive data from sockets and update carState"""
# Update carState from CAN
can_strs = messaging.drain_sock_raw(self.can_sock, wait_for_one=True)
CS = self.CI.update(self.CC, can_strs)
self.sm.update(0)
# Check for CAN timeout
if not can_strs:
self.can_error_counter += 1
self.can_rcv_error = True
else:
self.can_rcv_error = False
# When the panda and controlsd do not agree on controls_allowed
# we want to disengage openpilot. However the status from the panda goes through
# another socket other than the CAN messages and one can arrive earlier than the other.
# Therefore we allow a mismatch for two samples, then we trigger the disengagement.
self.controlsAllowed = self.sm['pandaState'].controlsAllowed
if not self.enabled:
self.mismatch_counter = 0
elif not self.controlsAllowed and self.enabled:
self.mismatch_counter += 1
self.distance_traveled += CS.vEgo * DT_CTRL
return CS
def state_transition(self, CS):
"""Compute conditional state transitions and execute actions on state transitions"""
# if stock cruise is completely disabled, then we can use our own set speed logic
self.CP.enableCruise = self.CI.CP.enableCruise
if not self.CP.enableCruise:
self.v_cruise_kph = update_v_cruise(self.v_cruise_kph, CS.buttonEvents, self.enabled)
if int(CS.vSetDis)-1 > self.v_cruise_kph:
self.v_cruise_kph = int(CS.vSetDis)
elif self.CP.enableCruise and CS.cruiseState.enabled:
if CS.cruiseButtons == Buttons.RES_ACCEL and Params().get_bool('OpkrVariableCruise') and CS.cruiseState.modeSel != 0 and CS.vSetDis < (self.v_cruise_kph_last - 1):
self.v_cruise_kph = self.v_cruise_kph_last
if int(CS.vSetDis)-1 > self.v_cruise_kph:
self.v_cruise_kph = int(CS.vSetDis)
elif CS.cruiseButtons == Buttons.RES_ACCEL and Params().get_bool('OpkrVariableCruise') and CS.cruiseState.modeSel != 0 and 30 <= self.v_cruise_kph_last <= round(CS.vEgo*CV.MS_TO_KPH):
self.v_cruise_kph = round(CS.vEgo*CV.MS_TO_KPH)
if int(CS.vSetDis)-1 > self.v_cruise_kph:
self.v_cruise_kph = int(CS.vSetDis)
self.v_cruise_kph_last = self.v_cruise_kph
elif CS.cruiseButtons == Buttons.RES_ACCEL or CS.cruiseButtons == Buttons.SET_DECEL:
self.v_cruise_kph = round(CS.cruiseState.speed * CV.MS_TO_KPH)
self.v_cruise_kph_last = self.v_cruise_kph
elif CS.driverAcc and Params().get_bool('OpkrVariableCruise') and Params().get_bool('CruiseOverMaxSpeed') and 30 <= self.v_cruise_kph < int(round(CS.vEgo*CV.MS_TO_KPH)):
self.v_cruise_kph = int(round(CS.vEgo*CV.MS_TO_KPH))
self.v_cruise_kph_last = self.v_cruise_kph
# decrease the soft disable timer at every step, as it's reset on
# entrance in SOFT_DISABLING state
self.soft_disable_timer = max(0, self.soft_disable_timer - 1)
self.current_alert_types = [ET.PERMANENT]
# ENABLED, PRE ENABLING, SOFT DISABLING
if self.state != State.disabled:
# user and immediate disable always have priority in a non-disabled state
if self.events.any(ET.USER_DISABLE):
self.state = State.disabled
self.current_alert_types.append(ET.USER_DISABLE)
elif self.events.any(ET.IMMEDIATE_DISABLE):
self.state = State.disabled
self.current_alert_types.append(ET.IMMEDIATE_DISABLE)
else:
# ENABLED
if self.state == State.enabled:
if self.events.any(ET.SOFT_DISABLE):
self.state = State.softDisabling
self.soft_disable_timer = 300 # 3s
self.current_alert_types.append(ET.SOFT_DISABLE)
# SOFT DISABLING
elif self.state == State.softDisabling:
if not self.events.any(ET.SOFT_DISABLE):
# no more soft disabling condition, so go back to ENABLED
self.state = State.enabled
elif self.events.any(ET.SOFT_DISABLE) and self.soft_disable_timer > 0:
self.current_alert_types.append(ET.SOFT_DISABLE)
elif self.soft_disable_timer <= 0:
self.state = State.disabled
# PRE ENABLING
elif self.state == State.preEnabled:
if not self.events.any(ET.PRE_ENABLE):
self.state = State.enabled
else:
self.current_alert_types.append(ET.PRE_ENABLE)
# DISABLED
elif self.state == State.disabled:
if self.events.any(ET.ENABLE):
if self.events.any(ET.NO_ENTRY):
self.current_alert_types.append(ET.NO_ENTRY)
else:
if self.events.any(ET.PRE_ENABLE):
self.state = State.preEnabled
else:
self.state = State.enabled
self.current_alert_types.append(ET.ENABLE)
#self.v_cruise_kph = initialize_v_cruise(CS.vEgo, CS.buttonEvents, self.v_cruise_kph_last)
self.v_cruise_kph = 0
self.v_cruise_kph_last = 0
# Check if actuators are enabled
self.active = self.state == State.enabled or self.state == State.softDisabling
if self.active:
self.current_alert_types.append(ET.WARNING)
# Check if openpilot is engaged
self.enabled = self.active or self.state == State.preEnabled
def state_control(self, CS):
"""Given the state, this function returns an actuators packet"""
lat_plan = self.sm['lateralPlan']
long_plan = self.sm['longitudinalPlan']
anglesteer_current = CS.steeringAngleDeg
anglesteer_desire = lat_plan.steerAngleDesireDeg
output_scale = lat_plan.outputScale
live_sr = Params().get_bool('OpkrLiveSteerRatio')
if not live_sr:
angle_diff = abs(anglesteer_desire) - abs(anglesteer_current)
if abs(output_scale) >= self.CP.steerMaxV[0] and CS.vEgo > 8:
self.new_steerRatio_prev = interp(angle_diff, self.angle_differ_range, self.steerRatio_range)
if self.new_steerRatio_prev > self.new_steerRatio:
self.new_steerRatio = self.new_steerRatio_prev
else:
self.mpc_frame += 1
if self.mpc_frame % 100 == 0:
self.new_steerRatio -= 0.1
if self.new_steerRatio <= self.CP.steerRatio:
self.new_steerRatio = self.CP.steerRatio
self.mpc_frame = 0
# Update VehicleModel
params = self.sm['liveParameters']
x = max(params.stiffnessFactor, 0.1)
if live_sr:
sr = max(params.steerRatio, 0.1)
else:
sr = max(self.new_steerRatio, 0.1)
self.VM.update_params(x, sr)
self.steerRatio_to_send = sr
actuators = car.CarControl.Actuators.new_message()
if CS.leftBlinker or CS.rightBlinker:
self.last_blinker_frame = self.sm.frame
# State specific actions
if not self.active:
self.LaC.reset()
self.LoC.reset(v_pid=CS.vEgo)
long_plan_age = DT_CTRL * (self.sm.frame - self.sm.rcv_frame['longitudinalPlan'])
# no greater than dt mpc + dt, to prevent too high extraps
dt = min(long_plan_age, LON_MPC_STEP + DT_CTRL) + DT_CTRL
a_acc_sol = long_plan.aStart + (dt / LON_MPC_STEP) * (long_plan.aTarget - long_plan.aStart)
v_acc_sol = long_plan.vStart + dt * (a_acc_sol + long_plan.aStart) / 2.0
# Gas/Brake PID loop
actuators.gas, actuators.brake = self.LoC.update(self.active, CS, v_acc_sol, long_plan.vTargetFuture, a_acc_sol, self.CP)
# Steering PID loop and lateral MPC
actuators.steer, actuators.steeringAngleDeg, lac_log = self.LaC.update(self.active, CS, self.CP, self.VM, params, lat_plan)
# Check for difference between desired angle and angle for angle based control
angle_control_saturated = self.CP.steerControlType == car.CarParams.SteerControlType.angle and \
abs(actuators.steeringAngleDeg - CS.steeringAngleDeg) > STEER_ANGLE_SATURATION_THRESHOLD
if angle_control_saturated and not CS.steeringPressed and self.active:
self.saturated_count += 1
else:
self.saturated_count = 0
# Send a "steering required alert" if saturation count has reached the limit
if (lac_log.saturated and not CS.steeringPressed) or \
(self.saturated_count > STEER_ANGLE_SATURATION_TIMEOUT):
if len(lat_plan.dPathPoints):
# Check if we deviated from the path
left_deviation = actuators.steer > 0 and lat_plan.dPathPoints[0] < -0.1
right_deviation = actuators.steer < 0 and lat_plan.dPathPoints[0] > 0.1
if left_deviation or right_deviation:
self.events.add(EventName.steerSaturated)
return actuators, v_acc_sol, a_acc_sol, lac_log
def publish_logs(self, CS, start_time, actuators, v_acc, a_acc, lac_log):
"""Send actuators and hud commands to the car, send controlsstate and MPC logging"""
self.log_alertTextMsg1 = trace1.global_alertTextMsg1
self.log_alertTextMsg2 = trace1.global_alertTextMsg2
CC = car.CarControl.new_message()
CC.enabled = self.enabled
CC.actuators = actuators
CC.cruiseControl.override = True
CC.cruiseControl.cancel = self.CP.enableCruise and not self.enabled and CS.cruiseState.enabled
# Some override values for Honda
# brake discount removes a sharp nonlinearity
brake_discount = (1.0 - clip(actuators.brake * 3., 0.0, 1.0))
speed_override = max(0.0, (self.LoC.v_pid + CS.cruiseState.speedOffset) * brake_discount)
CC.cruiseControl.speedOverride = float(speed_override if self.CP.enableCruise else 0.0)
CC.cruiseControl.accelOverride = self.CI.calc_accel_override(CS.aEgo, self.sm['longitudinalPlan'].aTarget, CS.vEgo, self.sm['longitudinalPlan'].vTarget)
CC.hudControl.setSpeed = float(self.v_cruise_kph * CV.KPH_TO_MS)
CC.hudControl.speedVisible = self.enabled
CC.hudControl.lanesVisible = self.enabled
CC.hudControl.leadVisible = self.sm['longitudinalPlan'].hasLead
right_lane_visible = self.sm['lateralPlan'].rProb > 0.5
left_lane_visible = self.sm['lateralPlan'].lProb > 0.5
CC.hudControl.rightLaneVisible = bool(right_lane_visible)
CC.hudControl.leftLaneVisible = bool(left_lane_visible)
recent_blinker = (self.sm.frame - self.last_blinker_frame) * DT_CTRL < 5.0 # 5s blinker cooldown
ldw_allowed = self.is_ldw_enabled and CS.vEgo > LDW_MIN_SPEED and not recent_blinker \
and not self.active and self.sm['liveCalibration'].calStatus == Calibration.CALIBRATED
meta = self.sm['modelV2'].meta
if len(meta.desirePrediction) and ldw_allowed:
l_lane_change_prob = meta.desirePrediction[Desire.laneChangeLeft - 1]
r_lane_change_prob = meta.desirePrediction[Desire.laneChangeRight - 1]
if CS.cruiseState.modeSel == 3:
l_lane_close = left_lane_visible and (self.sm['modelV2'].laneLines[1].y[0] > -(1.08 + CAMERA_OFFSET_A))
r_lane_close = right_lane_visible and (self.sm['modelV2'].laneLines[2].y[0] < (1.08 - CAMERA_OFFSET_A))
else:
l_lane_close = left_lane_visible and (self.sm['modelV2'].laneLines[1].y[0] > -(1.08 + CAMERA_OFFSET))
r_lane_close = right_lane_visible and (self.sm['modelV2'].laneLines[2].y[0] < (1.08 - CAMERA_OFFSET))
CC.hudControl.leftLaneDepart = bool(l_lane_change_prob > LANE_DEPARTURE_THRESHOLD and l_lane_close)
CC.hudControl.rightLaneDepart = bool(r_lane_change_prob > LANE_DEPARTURE_THRESHOLD and r_lane_close)
if CC.hudControl.rightLaneDepart or CC.hudControl.leftLaneDepart:
self.events.add(EventName.ldw)
clear_event = ET.WARNING if ET.WARNING not in self.current_alert_types else None
alerts = self.events.create_alerts(self.current_alert_types, [self.CP, self.sm, self.is_metric])
self.AM.add_many(self.sm.frame, alerts, self.enabled)
self.AM.process_alerts(self.sm.frame, clear_event)
CC.hudControl.visualAlert = self.AM.visual_alert
if not self.hyundai_lkas and self.enabled:
# send car controls over can
can_sends = self.CI.apply(CC, self.sm)
self.pm.send('sendcan', can_list_to_can_capnp(can_sends, msgtype='sendcan', valid=CS.canValid))
force_decel = (self.sm['driverMonitoringState'].awarenessStatus < 0.) or \
(self.state == State.softDisabling)
# Curvature & Steering angle
params = self.sm['liveParameters']
lat_plan = self.sm['lateralPlan']
steer_angle_without_offset = math.radians(CS.steeringAngleDeg - params.angleOffsetAverageDeg)
curvature = -self.VM.calc_curvature(steer_angle_without_offset, CS.vEgo)
angle_steers_des = math.degrees(self.VM.get_steer_from_curvature(-lat_plan.curvature, CS.vEgo))
angle_steers_des += params.angleOffsetDeg
# controlsState
dat = messaging.new_message('controlsState')
dat.valid = CS.canValid
controlsState = dat.controlsState
controlsState.alertText1 = self.AM.alert_text_1
controlsState.alertText2 = self.AM.alert_text_2
controlsState.alertSize = self.AM.alert_size
controlsState.alertStatus = self.AM.alert_status
controlsState.alertBlinkingRate = self.AM.alert_rate
controlsState.alertType = self.AM.alert_type
controlsState.alertSound = self.AM.audible_alert
controlsState.canMonoTimes = list(CS.canMonoTimes)
controlsState.longitudinalPlanMonoTime = self.sm.logMonoTime['longitudinalPlan']
controlsState.lateralPlanMonoTime = self.sm.logMonoTime['lateralPlan']
controlsState.enabled = self.enabled
controlsState.active = self.active
controlsState.curvature = curvature
controlsState.steeringAngleDesiredDeg = angle_steers_des
controlsState.state = self.state
controlsState.engageable = not self.events.any(ET.NO_ENTRY)
controlsState.longControlState = self.LoC.long_control_state
controlsState.vPid = float(self.LoC.v_pid)
controlsState.vCruise = float(self.v_cruise_kph)
controlsState.upAccelCmd = float(self.LoC.pid.p)
controlsState.uiAccelCmd = float(self.LoC.pid.id)
controlsState.ufAccelCmd = float(self.LoC.pid.f)
controlsState.vTargetLead = float(v_acc)
controlsState.aTarget = float(a_acc)
controlsState.cumLagMs = -self.rk.remaining * 1000.
controlsState.startMonoTime = int(start_time * 1e9)
controlsState.forceDecel = bool(force_decel)
controlsState.canErrorCounter = self.can_error_counter
controlsState.alertTextMsg1 = self.log_alertTextMsg1
controlsState.alertTextMsg2 = self.log_alertTextMsg2
controlsState.limitSpeedCamera = float(self.sm['longitudinalPlan'].targetSpeedCamera)
controlsState.limitSpeedCameraDist = float(self.sm['longitudinalPlan'].targetSpeedCameraDist)
controlsState.lateralControlMethod = int(self.lateral_control_method)
controlsState.steerRatio = float(self.steerRatio_to_send)
if self.CP.steerControlType == car.CarParams.SteerControlType.angle:
controlsState.lateralControlState.angleState = lac_log
elif self.CP.lateralTuning.which() == 'pid':
controlsState.lateralControlState.pidState = lac_log
elif self.CP.lateralTuning.which() == 'lqr':
controlsState.lateralControlState.lqrState = lac_log
elif self.CP.lateralTuning.which() == 'indi':
controlsState.lateralControlState.indiState = lac_log
self.pm.send('controlsState', dat)
# carState
car_events = self.events.to_msg()
cs_send = messaging.new_message('carState')
cs_send.valid = CS.canValid
cs_send.carState = CS
cs_send.carState.events = car_events
self.pm.send('carState', cs_send)
# carEvents - logged every second or on change
if (self.sm.frame % int(1. / DT_CTRL) == 0) or (self.events.names != self.events_prev):
ce_send = messaging.new_message('carEvents', len(self.events))
ce_send.carEvents = car_events
self.pm.send('carEvents', ce_send)
self.events_prev = self.events.names.copy()
# carParams - logged every 50 seconds (> 1 per segment)
if (self.sm.frame % int(50. / DT_CTRL) == 0):
cp_send = messaging.new_message('carParams')
cp_send.carParams = self.CP
self.pm.send('carParams', cp_send)
# carControl
cc_send = messaging.new_message('carControl')
cc_send.valid = CS.canValid
cc_send.carControl = CC
self.pm.send('carControl', cc_send)
# copy CarControl to pass to CarInterface on the next iteration
self.CC = CC
def step(self):
start_time = sec_since_boot()
self.prof.checkpoint("Ratekeeper", ignore=True)
# Sample data from sockets and get a carState
CS = self.data_sample()
self.prof.checkpoint("Sample")
if self.read_only:
self.hyundai_lkas = self.read_only
elif CS.cruiseState.enabled and self.hyundai_lkas:
self.hyundai_lkas = False
self.update_events(CS)
if not self.hyundai_lkas:
# Update control state
self.state_transition(CS)
self.prof.checkpoint("State transition")
# Compute actuators (runs PID loops and lateral MPC)
actuators, v_acc, a_acc, lac_log = self.state_control(CS)
self.prof.checkpoint("State Control")
# Publish data
self.publish_logs(CS, start_time, actuators, v_acc, a_acc, lac_log)
self.prof.checkpoint("Sent")
if not CS.cruiseState.enabled and not self.hyundai_lkas:
self.hyundai_lkas = True
def controlsd_thread(self):
while True:
self.step()
self.rk.monitor_time()
self.prof.display()
def main(sm=None, pm=None, logcan=None):
controls = Controls(sm, pm, logcan)
controls.controlsd_thread()
if __name__ == "__main__":
main()
| 43.799431 | 189 | 0.719171 |
812fac19f86095d63e68d0e4ee7d0e7c4eff8815 | 39,490 | py | Python | Lib/asyncio/selector_events.py | gnprice/cpython-old | fc60914f56ad64139577cd1925883bac0bd9edd1 | [
"PSF-2.0"
] | null | null | null | Lib/asyncio/selector_events.py | gnprice/cpython-old | fc60914f56ad64139577cd1925883bac0bd9edd1 | [
"PSF-2.0"
] | null | null | null | Lib/asyncio/selector_events.py | gnprice/cpython-old | fc60914f56ad64139577cd1925883bac0bd9edd1 | [
"PSF-2.0"
] | null | null | null | """Event loop using a selector and related classes.
A selector is a "notify-when-ready" multiplexer. For a subclass which
also includes support for signal handling, see the unix_events sub-module.
"""
__all__ = ['BaseSelectorEventLoop']
import collections
import errno
import functools
import socket
import warnings
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
from . import base_events
from . import compat
from . import constants
from . import events
from . import futures
from . import selectors
from . import transports
from . import sslproto
from .coroutines import coroutine
from .log import logger
def _test_selector_event(selector, fd, event):
# Test if the selector is monitoring 'event' events
# for the file descriptor 'fd'.
try:
key = selector.get_key(fd)
except KeyError:
return False
else:
return bool(key.events & event)
class BaseSelectorEventLoop(base_events.BaseEventLoop):
"""Selector event loop.
See events.EventLoop for API specification.
"""
def __init__(self, selector=None):
super().__init__()
if selector is None:
selector = selectors.DefaultSelector()
logger.debug('Using selector: %s', selector.__class__.__name__)
self._selector = selector
self._make_self_pipe()
def _make_socket_transport(self, sock, protocol, waiter=None, *,
extra=None, server=None):
return _SelectorSocketTransport(self, sock, protocol, waiter,
extra, server)
def _make_ssl_transport(self, rawsock, protocol, sslcontext, waiter=None,
*, server_side=False, server_hostname=None,
extra=None, server=None):
if not sslproto._is_sslproto_available():
return self._make_legacy_ssl_transport(
rawsock, protocol, sslcontext, waiter,
server_side=server_side, server_hostname=server_hostname,
extra=extra, server=server)
ssl_protocol = sslproto.SSLProtocol(self, protocol, sslcontext, waiter,
server_side, server_hostname)
_SelectorSocketTransport(self, rawsock, ssl_protocol,
extra=extra, server=server)
return ssl_protocol._app_transport
def _make_legacy_ssl_transport(self, rawsock, protocol, sslcontext,
waiter, *,
server_side=False, server_hostname=None,
extra=None, server=None):
# Use the legacy API: SSL_write, SSL_read, etc. The legacy API is used
# on Python 3.4 and older, when ssl.MemoryBIO is not available.
return _SelectorSslTransport(
self, rawsock, protocol, sslcontext, waiter,
server_side, server_hostname, extra, server)
def _make_datagram_transport(self, sock, protocol,
address=None, waiter=None, extra=None):
return _SelectorDatagramTransport(self, sock, protocol,
address, waiter, extra)
def close(self):
if self.is_running():
raise RuntimeError("Cannot close a running event loop")
if self.is_closed():
return
self._close_self_pipe()
super().close()
if self._selector is not None:
self._selector.close()
self._selector = None
def _socketpair(self):
raise NotImplementedError
def _close_self_pipe(self):
self.remove_reader(self._ssock.fileno())
self._ssock.close()
self._ssock = None
self._csock.close()
self._csock = None
self._internal_fds -= 1
def _make_self_pipe(self):
# A self-socket, really. :-)
self._ssock, self._csock = self._socketpair()
self._ssock.setblocking(False)
self._csock.setblocking(False)
self._internal_fds += 1
self.add_reader(self._ssock.fileno(), self._read_from_self)
def _process_self_data(self, data):
pass
def _read_from_self(self):
while True:
try:
data = self._ssock.recv(4096)
if not data:
break
self._process_self_data(data)
except InterruptedError:
continue
except BlockingIOError:
break
def _write_to_self(self):
# This may be called from a different thread, possibly after
# _close_self_pipe() has been called or even while it is
# running. Guard for self._csock being None or closed. When
# a socket is closed, send() raises OSError (with errno set to
# EBADF, but let's not rely on the exact error code).
csock = self._csock
if csock is not None:
try:
csock.send(b'\0')
except OSError:
if self._debug:
logger.debug("Fail to write a null byte into the "
"self-pipe socket",
exc_info=True)
def _start_serving(self, protocol_factory, sock,
sslcontext=None, server=None):
self.add_reader(sock.fileno(), self._accept_connection,
protocol_factory, sock, sslcontext, server)
def _accept_connection(self, protocol_factory, sock,
sslcontext=None, server=None):
try:
conn, addr = sock.accept()
if self._debug:
logger.debug("%r got a new connection from %r: %r",
server, addr, conn)
conn.setblocking(False)
except (BlockingIOError, InterruptedError, ConnectionAbortedError):
pass # False alarm.
except OSError as exc:
# There's nowhere to send the error, so just log it.
if exc.errno in (errno.EMFILE, errno.ENFILE,
errno.ENOBUFS, errno.ENOMEM):
# Some platforms (e.g. Linux keep reporting the FD as
# ready, so we remove the read handler temporarily.
# We'll try again in a while.
self.call_exception_handler({
'message': 'socket.accept() out of system resource',
'exception': exc,
'socket': sock,
})
self.remove_reader(sock.fileno())
self.call_later(constants.ACCEPT_RETRY_DELAY,
self._start_serving,
protocol_factory, sock, sslcontext, server)
else:
raise # The event loop will catch, log and ignore it.
else:
extra = {'peername': addr}
accept = self._accept_connection2(protocol_factory, conn, extra,
sslcontext, server)
self.create_task(accept)
@coroutine
def _accept_connection2(self, protocol_factory, conn, extra,
sslcontext=None, server=None):
protocol = None
transport = None
try:
protocol = protocol_factory()
waiter = futures.Future(loop=self)
if sslcontext:
transport = self._make_ssl_transport(
conn, protocol, sslcontext, waiter=waiter,
server_side=True, extra=extra, server=server)
else:
transport = self._make_socket_transport(
conn, protocol, waiter=waiter, extra=extra,
server=server)
try:
yield from waiter
except:
transport.close()
raise
# It's now up to the protocol to handle the connection.
except Exception as exc:
if self._debug:
context = {
'message': ('Error on transport creation '
'for incoming connection'),
'exception': exc,
}
if protocol is not None:
context['protocol'] = protocol
if transport is not None:
context['transport'] = transport
self.call_exception_handler(context)
def add_reader(self, fd, callback, *args):
"""Add a reader callback."""
self._check_closed()
handle = events.Handle(callback, args, self)
try:
key = self._selector.get_key(fd)
except KeyError:
self._selector.register(fd, selectors.EVENT_READ,
(handle, None))
else:
mask, (reader, writer) = key.events, key.data
self._selector.modify(fd, mask | selectors.EVENT_READ,
(handle, writer))
if reader is not None:
reader.cancel()
def remove_reader(self, fd):
"""Remove a reader callback."""
if self.is_closed():
return False
try:
key = self._selector.get_key(fd)
except KeyError:
return False
else:
mask, (reader, writer) = key.events, key.data
mask &= ~selectors.EVENT_READ
if not mask:
self._selector.unregister(fd)
else:
self._selector.modify(fd, mask, (None, writer))
if reader is not None:
reader.cancel()
return True
else:
return False
def add_writer(self, fd, callback, *args):
"""Add a writer callback.."""
self._check_closed()
handle = events.Handle(callback, args, self)
try:
key = self._selector.get_key(fd)
except KeyError:
self._selector.register(fd, selectors.EVENT_WRITE,
(None, handle))
else:
mask, (reader, writer) = key.events, key.data
self._selector.modify(fd, mask | selectors.EVENT_WRITE,
(reader, handle))
if writer is not None:
writer.cancel()
def remove_writer(self, fd):
"""Remove a writer callback."""
if self.is_closed():
return False
try:
key = self._selector.get_key(fd)
except KeyError:
return False
else:
mask, (reader, writer) = key.events, key.data
# Remove both writer and connector.
mask &= ~selectors.EVENT_WRITE
if not mask:
self._selector.unregister(fd)
else:
self._selector.modify(fd, mask, (reader, None))
if writer is not None:
writer.cancel()
return True
else:
return False
def sock_recv(self, sock, n):
"""Receive data from the socket.
The return value is a bytes object representing the data received.
The maximum amount of data to be received at once is specified by
nbytes.
This method is a coroutine.
"""
if self._debug and sock.gettimeout() != 0:
raise ValueError("the socket must be non-blocking")
fut = futures.Future(loop=self)
self._sock_recv(fut, False, sock, n)
return fut
def _sock_recv(self, fut, registered, sock, n):
# _sock_recv() can add itself as an I/O callback if the operation can't
# be done immediately. Don't use it directly, call sock_recv().
fd = sock.fileno()
if registered:
# Remove the callback early. It should be rare that the
# selector says the fd is ready but the call still returns
# EAGAIN, and I am willing to take a hit in that case in
# order to simplify the common case.
self.remove_reader(fd)
if fut.cancelled():
return
try:
data = sock.recv(n)
except (BlockingIOError, InterruptedError):
self.add_reader(fd, self._sock_recv, fut, True, sock, n)
except Exception as exc:
fut.set_exception(exc)
else:
fut.set_result(data)
def sock_sendall(self, sock, data):
"""Send data to the socket.
The socket must be connected to a remote socket. This method continues
to send data from data until either all data has been sent or an
error occurs. None is returned on success. On error, an exception is
raised, and there is no way to determine how much data, if any, was
successfully processed by the receiving end of the connection.
This method is a coroutine.
"""
if self._debug and sock.gettimeout() != 0:
raise ValueError("the socket must be non-blocking")
fut = futures.Future(loop=self)
if data:
self._sock_sendall(fut, False, sock, data)
else:
fut.set_result(None)
return fut
def _sock_sendall(self, fut, registered, sock, data):
fd = sock.fileno()
if registered:
self.remove_writer(fd)
if fut.cancelled():
return
try:
n = sock.send(data)
except (BlockingIOError, InterruptedError):
n = 0
except Exception as exc:
fut.set_exception(exc)
return
if n == len(data):
fut.set_result(None)
else:
if n:
data = data[n:]
self.add_writer(fd, self._sock_sendall, fut, True, sock, data)
def sock_connect(self, sock, address):
"""Connect to a remote socket at address.
The address must be already resolved to avoid the trap of hanging the
entire event loop when the address requires doing a DNS lookup. For
example, it must be an IP address, not an hostname, for AF_INET and
AF_INET6 address families. Use getaddrinfo() to resolve the hostname
asynchronously.
This method is a coroutine.
"""
if self._debug and sock.gettimeout() != 0:
raise ValueError("the socket must be non-blocking")
fut = futures.Future(loop=self)
try:
base_events._check_resolved_address(sock, address)
except ValueError as err:
fut.set_exception(err)
else:
self._sock_connect(fut, sock, address)
return fut
def _sock_connect(self, fut, sock, address):
fd = sock.fileno()
try:
sock.connect(address)
except (BlockingIOError, InterruptedError):
# Issue #23618: When the C function connect() fails with EINTR, the
# connection runs in background. We have to wait until the socket
# becomes writable to be notified when the connection succeed or
# fails.
fut.add_done_callback(functools.partial(self._sock_connect_done,
fd))
self.add_writer(fd, self._sock_connect_cb, fut, sock, address)
except Exception as exc:
fut.set_exception(exc)
else:
fut.set_result(None)
def _sock_connect_done(self, fd, fut):
self.remove_writer(fd)
def _sock_connect_cb(self, fut, sock, address):
if fut.cancelled():
return
try:
err = sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
# Jump to any except clause below.
raise OSError(err, 'Connect call failed %s' % (address,))
except (BlockingIOError, InterruptedError):
# socket is still registered, the callback will be retried later
pass
except Exception as exc:
fut.set_exception(exc)
else:
fut.set_result(None)
def sock_accept(self, sock):
"""Accept a connection.
The socket must be bound to an address and listening for connections.
The return value is a pair (conn, address) where conn is a new socket
object usable to send and receive data on the connection, and address
is the address bound to the socket on the other end of the connection.
This method is a coroutine.
"""
if self._debug and sock.gettimeout() != 0:
raise ValueError("the socket must be non-blocking")
fut = futures.Future(loop=self)
self._sock_accept(fut, False, sock)
return fut
def _sock_accept(self, fut, registered, sock):
fd = sock.fileno()
if registered:
self.remove_reader(fd)
if fut.cancelled():
return
try:
conn, address = sock.accept()
conn.setblocking(False)
except (BlockingIOError, InterruptedError):
self.add_reader(fd, self._sock_accept, fut, True, sock)
except Exception as exc:
fut.set_exception(exc)
else:
fut.set_result((conn, address))
def _process_events(self, event_list):
for key, mask in event_list:
fileobj, (reader, writer) = key.fileobj, key.data
if mask & selectors.EVENT_READ and reader is not None:
if reader._cancelled:
self.remove_reader(fileobj)
else:
self._add_callback(reader)
if mask & selectors.EVENT_WRITE and writer is not None:
if writer._cancelled:
self.remove_writer(fileobj)
else:
self._add_callback(writer)
def _stop_serving(self, sock):
self.remove_reader(sock.fileno())
sock.close()
class _SelectorTransport(transports._FlowControlMixin,
transports.Transport):
max_size = 256 * 1024 # Buffer size passed to recv().
_buffer_factory = bytearray # Constructs initial value for self._buffer.
# Attribute used in the destructor: it must be set even if the constructor
# is not called (see _SelectorSslTransport which may start by raising an
# exception)
_sock = None
def __init__(self, loop, sock, protocol, extra=None, server=None):
super().__init__(extra, loop)
self._extra['socket'] = sock
self._extra['sockname'] = sock.getsockname()
if 'peername' not in self._extra:
try:
self._extra['peername'] = sock.getpeername()
except socket.error:
self._extra['peername'] = None
self._sock = sock
self._sock_fd = sock.fileno()
self._protocol = protocol
self._protocol_connected = True
self._server = server
self._buffer = self._buffer_factory()
self._conn_lost = 0 # Set when call to connection_lost scheduled.
self._closing = False # Set when close() called.
if self._server is not None:
self._server._attach()
def __repr__(self):
info = [self.__class__.__name__]
if self._sock is None:
info.append('closed')
elif self._closing:
info.append('closing')
info.append('fd=%s' % self._sock_fd)
# test if the transport was closed
if self._loop is not None and not self._loop.is_closed():
polling = _test_selector_event(self._loop._selector,
self._sock_fd, selectors.EVENT_READ)
if polling:
info.append('read=polling')
else:
info.append('read=idle')
polling = _test_selector_event(self._loop._selector,
self._sock_fd,
selectors.EVENT_WRITE)
if polling:
state = 'polling'
else:
state = 'idle'
bufsize = self.get_write_buffer_size()
info.append('write=<%s, bufsize=%s>' % (state, bufsize))
return '<%s>' % ' '.join(info)
def abort(self):
self._force_close(None)
def is_closing(self):
return self._closing
def close(self):
if self._closing:
return
self._closing = True
self._loop.remove_reader(self._sock_fd)
if not self._buffer:
self._conn_lost += 1
self._loop.call_soon(self._call_connection_lost, None)
# On Python 3.3 and older, objects with a destructor part of a reference
# cycle are never destroyed. It's not more the case on Python 3.4 thanks
# to the PEP 442.
if compat.PY34:
def __del__(self):
if self._sock is not None:
warnings.warn("unclosed transport %r" % self, ResourceWarning)
self._sock.close()
def _fatal_error(self, exc, message='Fatal error on transport'):
# Should be called from exception handler only.
if isinstance(exc, (BrokenPipeError,
ConnectionResetError, ConnectionAbortedError)):
if self._loop.get_debug():
logger.debug("%r: %s", self, message, exc_info=True)
else:
self._loop.call_exception_handler({
'message': message,
'exception': exc,
'transport': self,
'protocol': self._protocol,
})
self._force_close(exc)
def _force_close(self, exc):
if self._conn_lost:
return
if self._buffer:
self._buffer.clear()
self._loop.remove_writer(self._sock_fd)
if not self._closing:
self._closing = True
self._loop.remove_reader(self._sock_fd)
self._conn_lost += 1
self._loop.call_soon(self._call_connection_lost, exc)
def _call_connection_lost(self, exc):
try:
if self._protocol_connected:
self._protocol.connection_lost(exc)
finally:
self._sock.close()
self._sock = None
self._protocol = None
self._loop = None
server = self._server
if server is not None:
server._detach()
self._server = None
def get_write_buffer_size(self):
return len(self._buffer)
class _SelectorSocketTransport(_SelectorTransport):
def __init__(self, loop, sock, protocol, waiter=None,
extra=None, server=None):
super().__init__(loop, sock, protocol, extra, server)
self._eof = False
self._paused = False
self._loop.call_soon(self._protocol.connection_made, self)
# only start reading when connection_made() has been called
self._loop.call_soon(self._loop.add_reader,
self._sock_fd, self._read_ready)
if waiter is not None:
# only wake up the waiter when connection_made() has been called
self._loop.call_soon(futures._set_result_unless_cancelled,
waiter, None)
def pause_reading(self):
if self._closing:
raise RuntimeError('Cannot pause_reading() when closing')
if self._paused:
raise RuntimeError('Already paused')
self._paused = True
self._loop.remove_reader(self._sock_fd)
if self._loop.get_debug():
logger.debug("%r pauses reading", self)
def resume_reading(self):
if not self._paused:
raise RuntimeError('Not paused')
self._paused = False
if self._closing:
return
self._loop.add_reader(self._sock_fd, self._read_ready)
if self._loop.get_debug():
logger.debug("%r resumes reading", self)
def _read_ready(self):
try:
data = self._sock.recv(self.max_size)
except (BlockingIOError, InterruptedError):
pass
except Exception as exc:
self._fatal_error(exc, 'Fatal read error on socket transport')
else:
if data:
self._protocol.data_received(data)
else:
if self._loop.get_debug():
logger.debug("%r received EOF", self)
keep_open = self._protocol.eof_received()
if keep_open:
# We're keeping the connection open so the
# protocol can write more, but we still can't
# receive more, so remove the reader callback.
self._loop.remove_reader(self._sock_fd)
else:
self.close()
def write(self, data):
if not isinstance(data, (bytes, bytearray, memoryview)):
raise TypeError('data argument must be a bytes-like object, '
'not %r' % type(data).__name__)
if self._eof:
raise RuntimeError('Cannot call write() after write_eof()')
if not data:
return
if self._conn_lost:
if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
logger.warning('socket.send() raised exception.')
self._conn_lost += 1
return
if not self._buffer:
# Optimization: try to send now.
try:
n = self._sock.send(data)
except (BlockingIOError, InterruptedError):
pass
except Exception as exc:
self._fatal_error(exc, 'Fatal write error on socket transport')
return
else:
data = data[n:]
if not data:
return
# Not all was written; register write handler.
self._loop.add_writer(self._sock_fd, self._write_ready)
# Add it to the buffer.
self._buffer.extend(data)
self._maybe_pause_protocol()
def _write_ready(self):
assert self._buffer, 'Data should not be empty'
try:
n = self._sock.send(self._buffer)
except (BlockingIOError, InterruptedError):
pass
except Exception as exc:
self._loop.remove_writer(self._sock_fd)
self._buffer.clear()
self._fatal_error(exc, 'Fatal write error on socket transport')
else:
if n:
del self._buffer[:n]
self._maybe_resume_protocol() # May append to buffer.
if not self._buffer:
self._loop.remove_writer(self._sock_fd)
if self._closing:
self._call_connection_lost(None)
elif self._eof:
self._sock.shutdown(socket.SHUT_WR)
def write_eof(self):
if self._eof:
return
self._eof = True
if not self._buffer:
self._sock.shutdown(socket.SHUT_WR)
def can_write_eof(self):
return True
class _SelectorSslTransport(_SelectorTransport):
_buffer_factory = bytearray
def __init__(self, loop, rawsock, protocol, sslcontext, waiter=None,
server_side=False, server_hostname=None,
extra=None, server=None):
if ssl is None:
raise RuntimeError('stdlib ssl module not available')
if not sslcontext:
sslcontext = sslproto._create_transport_context(server_side, server_hostname)
wrap_kwargs = {
'server_side': server_side,
'do_handshake_on_connect': False,
}
if server_hostname and not server_side:
wrap_kwargs['server_hostname'] = server_hostname
sslsock = sslcontext.wrap_socket(rawsock, **wrap_kwargs)
super().__init__(loop, sslsock, protocol, extra, server)
# the protocol connection is only made after the SSL handshake
self._protocol_connected = False
self._server_hostname = server_hostname
self._waiter = waiter
self._sslcontext = sslcontext
self._paused = False
# SSL-specific extra info. (peercert is set later)
self._extra.update(sslcontext=sslcontext)
if self._loop.get_debug():
logger.debug("%r starts SSL handshake", self)
start_time = self._loop.time()
else:
start_time = None
self._on_handshake(start_time)
def _wakeup_waiter(self, exc=None):
if self._waiter is None:
return
if not self._waiter.cancelled():
if exc is not None:
self._waiter.set_exception(exc)
else:
self._waiter.set_result(None)
self._waiter = None
def _on_handshake(self, start_time):
try:
self._sock.do_handshake()
except ssl.SSLWantReadError:
self._loop.add_reader(self._sock_fd,
self._on_handshake, start_time)
return
except ssl.SSLWantWriteError:
self._loop.add_writer(self._sock_fd,
self._on_handshake, start_time)
return
except BaseException as exc:
if self._loop.get_debug():
logger.warning("%r: SSL handshake failed",
self, exc_info=True)
self._loop.remove_reader(self._sock_fd)
self._loop.remove_writer(self._sock_fd)
self._sock.close()
self._wakeup_waiter(exc)
if isinstance(exc, Exception):
return
else:
raise
self._loop.remove_reader(self._sock_fd)
self._loop.remove_writer(self._sock_fd)
peercert = self._sock.getpeercert()
if not hasattr(self._sslcontext, 'check_hostname'):
# Verify hostname if requested, Python 3.4+ uses check_hostname
# and checks the hostname in do_handshake()
if (self._server_hostname and
self._sslcontext.verify_mode != ssl.CERT_NONE):
try:
ssl.match_hostname(peercert, self._server_hostname)
except Exception as exc:
if self._loop.get_debug():
logger.warning("%r: SSL handshake failed "
"on matching the hostname",
self, exc_info=True)
self._sock.close()
self._wakeup_waiter(exc)
return
# Add extra info that becomes available after handshake.
self._extra.update(peercert=peercert,
cipher=self._sock.cipher(),
compression=self._sock.compression(),
ssl_object=self._sock,
)
self._read_wants_write = False
self._write_wants_read = False
self._loop.add_reader(self._sock_fd, self._read_ready)
self._protocol_connected = True
self._loop.call_soon(self._protocol.connection_made, self)
# only wake up the waiter when connection_made() has been called
self._loop.call_soon(self._wakeup_waiter)
if self._loop.get_debug():
dt = self._loop.time() - start_time
logger.debug("%r: SSL handshake took %.1f ms", self, dt * 1e3)
def pause_reading(self):
# XXX This is a bit icky, given the comment at the top of
# _read_ready(). Is it possible to evoke a deadlock? I don't
# know, although it doesn't look like it; write() will still
# accept more data for the buffer and eventually the app will
# call resume_reading() again, and things will flow again.
if self._closing:
raise RuntimeError('Cannot pause_reading() when closing')
if self._paused:
raise RuntimeError('Already paused')
self._paused = True
self._loop.remove_reader(self._sock_fd)
if self._loop.get_debug():
logger.debug("%r pauses reading", self)
def resume_reading(self):
if not self._paused:
raise RuntimeError('Not paused')
self._paused = False
if self._closing:
return
self._loop.add_reader(self._sock_fd, self._read_ready)
if self._loop.get_debug():
logger.debug("%r resumes reading", self)
def _read_ready(self):
if self._write_wants_read:
self._write_wants_read = False
self._write_ready()
if self._buffer:
self._loop.add_writer(self._sock_fd, self._write_ready)
try:
data = self._sock.recv(self.max_size)
except (BlockingIOError, InterruptedError, ssl.SSLWantReadError):
pass
except ssl.SSLWantWriteError:
self._read_wants_write = True
self._loop.remove_reader(self._sock_fd)
self._loop.add_writer(self._sock_fd, self._write_ready)
except Exception as exc:
self._fatal_error(exc, 'Fatal read error on SSL transport')
else:
if data:
self._protocol.data_received(data)
else:
try:
if self._loop.get_debug():
logger.debug("%r received EOF", self)
keep_open = self._protocol.eof_received()
if keep_open:
logger.warning('returning true from eof_received() '
'has no effect when using ssl')
finally:
self.close()
def _write_ready(self):
if self._read_wants_write:
self._read_wants_write = False
self._read_ready()
if not (self._paused or self._closing):
self._loop.add_reader(self._sock_fd, self._read_ready)
if self._buffer:
try:
n = self._sock.send(self._buffer)
except (BlockingIOError, InterruptedError, ssl.SSLWantWriteError):
n = 0
except ssl.SSLWantReadError:
n = 0
self._loop.remove_writer(self._sock_fd)
self._write_wants_read = True
except Exception as exc:
self._loop.remove_writer(self._sock_fd)
self._buffer.clear()
self._fatal_error(exc, 'Fatal write error on SSL transport')
return
if n:
del self._buffer[:n]
self._maybe_resume_protocol() # May append to buffer.
if not self._buffer:
self._loop.remove_writer(self._sock_fd)
if self._closing:
self._call_connection_lost(None)
def write(self, data):
if not isinstance(data, (bytes, bytearray, memoryview)):
raise TypeError('data argument must be a bytes-like object, '
'not %r' % type(data).__name__)
if not data:
return
if self._conn_lost:
if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
logger.warning('socket.send() raised exception.')
self._conn_lost += 1
return
if not self._buffer:
self._loop.add_writer(self._sock_fd, self._write_ready)
# Add it to the buffer.
self._buffer.extend(data)
self._maybe_pause_protocol()
def can_write_eof(self):
return False
class _SelectorDatagramTransport(_SelectorTransport):
_buffer_factory = collections.deque
def __init__(self, loop, sock, protocol, address=None,
waiter=None, extra=None):
super().__init__(loop, sock, protocol, extra)
self._address = address
self._loop.call_soon(self._protocol.connection_made, self)
# only start reading when connection_made() has been called
self._loop.call_soon(self._loop.add_reader,
self._sock_fd, self._read_ready)
if waiter is not None:
# only wake up the waiter when connection_made() has been called
self._loop.call_soon(futures._set_result_unless_cancelled,
waiter, None)
def get_write_buffer_size(self):
return sum(len(data) for data, _ in self._buffer)
def _read_ready(self):
try:
data, addr = self._sock.recvfrom(self.max_size)
except (BlockingIOError, InterruptedError):
pass
except OSError as exc:
self._protocol.error_received(exc)
except Exception as exc:
self._fatal_error(exc, 'Fatal read error on datagram transport')
else:
self._protocol.datagram_received(data, addr)
def sendto(self, data, addr=None):
if not isinstance(data, (bytes, bytearray, memoryview)):
raise TypeError('data argument must be a bytes-like object, '
'not %r' % type(data).__name__)
if not data:
return
if self._address and addr not in (None, self._address):
raise ValueError('Invalid address: must be None or %s' %
(self._address,))
if self._conn_lost and self._address:
if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
logger.warning('socket.send() raised exception.')
self._conn_lost += 1
return
if not self._buffer:
# Attempt to send it right away first.
try:
if self._address:
self._sock.send(data)
else:
self._sock.sendto(data, addr)
return
except (BlockingIOError, InterruptedError):
self._loop.add_writer(self._sock_fd, self._sendto_ready)
except OSError as exc:
self._protocol.error_received(exc)
return
except Exception as exc:
self._fatal_error(exc,
'Fatal write error on datagram transport')
return
# Ensure that what we buffer is immutable.
self._buffer.append((bytes(data), addr))
self._maybe_pause_protocol()
def _sendto_ready(self):
while self._buffer:
data, addr = self._buffer.popleft()
try:
if self._address:
self._sock.send(data)
else:
self._sock.sendto(data, addr)
except (BlockingIOError, InterruptedError):
self._buffer.appendleft((data, addr)) # Try again later.
break
except OSError as exc:
self._protocol.error_received(exc)
return
except Exception as exc:
self._fatal_error(exc,
'Fatal write error on datagram transport')
return
self._maybe_resume_protocol() # May append to buffer.
if not self._buffer:
self._loop.remove_writer(self._sock_fd)
if self._closing:
self._call_connection_lost(None)
| 36.769088 | 89 | 0.56356 |
dd396bd68c5cf4d0a1d0ce169737034b72f548fb | 920 | py | Python | src/catalog/urls.py | litedesk/litedesk-webserver-provision | 1576b9d3e5e2e64d1136d276767c2710cfb1938f | [
"Apache-2.0"
] | 1 | 2016-01-18T08:19:22.000Z | 2016-01-18T08:19:22.000Z | src/catalog/urls.py | litedesk/litedesk-webserver-provision | 1576b9d3e5e2e64d1136d276767c2710cfb1938f | [
"Apache-2.0"
] | null | null | null | src/catalog/urls.py | litedesk/litedesk-webserver-provision | 1576b9d3e5e2e64d1136d276767c2710cfb1938f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014, Deutsche Telekom AG - Laboratories (T-Labs)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls import url, patterns
import views
urlpatterns = patterns(
'catalog.views',
url(r'^offers$', views.OfferListView.as_view(), name='offer-list'),
url(r'^offer/(?P<pk>\d+)$', views.OfferView.as_view(), name='offer-detail'),
)
| 31.724138 | 80 | 0.719565 |
a5ee16c49eddcb3c1a0b8aa2477f4468dba7ad4d | 5,516 | py | Python | etc/examples/tf-model/input_data.py | adrian555/FfDL | 937c32310d98c6d900e19e59e2da1228557cbcd4 | [
"Apache-2.0"
] | 680 | 2018-02-09T18:16:41.000Z | 2022-03-25T06:54:13.000Z | etc/examples/tf-model/input_data.py | adrian555/FfDL | 937c32310d98c6d900e19e59e2da1228557cbcd4 | [
"Apache-2.0"
] | 118 | 2018-02-09T23:12:57.000Z | 2022-03-02T02:24:59.000Z | etc/examples/tf-model/input_data.py | adrian555/FfDL | 937c32310d98c6d900e19e59e2da1228557cbcd4 | [
"Apache-2.0"
] | 202 | 2018-02-09T18:25:07.000Z | 2021-12-05T09:47:15.000Z |
#!/usr/bin/env python
"""Functions for downloading and reading MNIST data."""
import gzip
from six.moves import xrange
from six.moves.urllib.request import urlretrieve
import numpy
import os
def _read32(bytestream):
dt = numpy.dtype(numpy.uint32).newbyteorder('>')
return numpy.frombuffer(bytestream.read(4), dtype=dt)[0]
def extract_images(filename):
"""Extract the images into a 4D uint8 numpy array [index, y, x, depth]."""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
magic = _read32(bytestream)
if magic != 2051:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' %
(magic, filename))
num_images = _read32(bytestream)
rows = _read32(bytestream)
cols = _read32(bytestream)
buf = bytestream.read(rows * cols * num_images)
data = numpy.frombuffer(buf, dtype=numpy.uint8)
data = data.reshape(num_images, rows, cols, 1)
return data
def dense_to_one_hot(labels_dense, num_classes=10):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels_dense.shape[0]
index_offset = numpy.arange(num_labels) * num_classes
labels_one_hot = numpy.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
def extract_labels(filename, one_hot=False):
"""Extract the labels into a 1D uint8 numpy array [index]."""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
magic = _read32(bytestream)
if magic != 2049:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' %
(magic, filename))
num_items = _read32(bytestream)
buf = bytestream.read(num_items)
labels = numpy.frombuffer(buf, dtype=numpy.uint8)
if one_hot:
return dense_to_one_hot(labels)
return labels
class DataSet(object):
def __init__(self, images, labels, fake_data=False):
if fake_data:
self._num_examples = 10000
else:
assert images.shape[0] == labels.shape[0], (
"images.shape: %s labels.shape: %s" % (images.shape,
labels.shape))
self._num_examples = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
assert images.shape[3] == 1
images = images.reshape(images.shape[0],
images.shape[1] * images.shape[2])
# Convert from [0, 255] -> [0.0, 1.0].
images = images.astype(numpy.float32)
images = numpy.multiply(images, 1.0 / 255.0)
self._images = images
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
@property
def images(self):
return self._images
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
@property
def epochs_completed(self):
return self._epochs_completed
def next_batch(self, batch_size, fake_data=False):
"""Return the next `batch_size` examples from this data set."""
if fake_data:
fake_image = [1.0 for _ in xrange(784)]
fake_label = 0
return [fake_image for _ in xrange(batch_size)], [
fake_label for _ in xrange(batch_size)]
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Shuffle the data
perm = numpy.arange(self._num_examples)
numpy.random.shuffle(perm)
self._images = self._images[perm]
self._labels = self._labels[perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size
assert batch_size <= self._num_examples
end = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
def read_data_sets(train_images_file, train_labels_file, test_images_file, test_labels_file, fake_data=False, one_hot=False):
class DataSets(object):
pass
data_sets = DataSets()
if fake_data:
data_sets.train = DataSet([], [], fake_data=True)
data_sets.validation = DataSet([], [], fake_data=True)
data_sets.test = DataSet([], [], fake_data=True)
return data_sets
TRAIN_IMAGES = train_images_file
TRAIN_LABELS = train_labels_file
TEST_IMAGES = test_images_file
TEST_LABELS = test_labels_file
VALIDATION_SIZE = 5000
train_images = extract_images(TRAIN_IMAGES)
train_labels = extract_labels(TRAIN_LABELS, one_hot=one_hot)
test_images = extract_images(TEST_IMAGES)
test_labels = extract_labels(TEST_LABELS, one_hot=one_hot)
validation_images = train_images[:VALIDATION_SIZE]
validation_labels = train_labels[:VALIDATION_SIZE]
train_images = train_images[VALIDATION_SIZE:]
train_labels = train_labels[VALIDATION_SIZE:]
data_sets.train = DataSet(train_images, train_labels)
data_sets.validation = DataSet(validation_images, validation_labels)
data_sets.test = DataSet(test_images, test_labels)
return data_sets
| 37.020134 | 125 | 0.638144 |
040b96b06dd9d4dc0980c5837641a1b144af536a | 28,528 | py | Python | utils/fl.py | xnyhps/fl-utils | 423dedae38a35fcd97e981376eb2d19d42c2e006 | [
"MIT"
] | null | null | null | utils/fl.py | xnyhps/fl-utils | 423dedae38a35fcd97e981376eb2d19d42c2e006 | [
"MIT"
] | null | null | null | utils/fl.py | xnyhps/fl-utils | 423dedae38a35fcd97e981376eb2d19d42c2e006 | [
"MIT"
] | null | null | null | #!/usr/bin/python2
# -*- coding: utf-8 -*-
import json, re
from html.parser import HTMLParser
cache = {}
data = {}
categories = {0: 'Unspecified',
1: 'Currency',
101: 'Weapon',
103: 'Hat',
104: 'Gloves',
105: 'Boots',
106: 'Companion',
107: 'Clothing',
150: 'Curiosity',
160: 'Advantage',
170: 'Document',
200: 'Goods',
1000: 'Basic Ability',
2000: 'Specific Ability',
3000: 'Profession',
5000: 'Story',
5001: 'Intrigue',
5002: 'Dreams',
5003: 'Reputation',
5004: 'Quirk',
5025: 'Acquaintance',
5050: 'Accomplishment',
5100: 'Venture',
5200: 'Progress',
5500: 'Menace',
6000: 'Contacts',
6661: 'Hidden',
6662: 'Randomizer',
7000: 'Ambition',
8000: 'Route',
9000: 'Seasonal',
10000: 'Ship',
11000: 'Constant Companion',
12000: 'Club',
13000: 'Affiliation',
14000: 'Transportation',
15000: 'Home Comfort',
16000: 'Academic',
17000: 'Cartography',
18000: 'Contraband',
19000: 'Elder',
20000: 'Infernal',
21000: 'Influence',
22000: 'Literature',
22500: 'Lodgings',
23000: 'Luminosity',
24000: 'Mysteries',
25000: 'Nostalgia',
26000: 'Rag Trade',
27000: 'Ratness',
28000: 'Rumour',
29000: 'Legal',
30000: 'Wild Words',
31000: 'Wines',
32000: 'Rubbery',
33000: 'Sidebar Ability',
34000: 'Major Lateral',
35000: 'Quest',
36000: 'Minor Lateral',
37000: 'Circumstance',
39000: 'Avatar',
40000: 'Objective',
45000: 'Key',
50000: 'Knowledge',
60000: 'Destiny',
70000: 'Modfier',
70001: 'Great Game',
70002: 'Zee Treasures',
70003: 'Sustenance'
}
parser = HTMLParser()
def render_html(string):
string = re.sub(r'<.{,2}?br.{,2}?>','\n', string)
string = re.sub(r'<.{,2}?[pP].{,2}?>','', string)
string = re.sub('</?em>', '_', string)
string = re.sub('</?i>', '_', string)
string = re.sub('</?strong>', '*', string)
string = re.sub('</?b>', '*', string)
return string
class Quality:
def __init__(self, jdata):
#HimbleLevel is used to determine order within categories for items
self.raw = jdata
self.name = jdata.get('Name', '(no name)')
self.id = jdata['Id']
self.desc = jdata.get('Description', '(no description)')
self.pyramid = 'UsePyramidNumbers' in jdata
self.nature = jdata.get('Nature', 1) #1: quality; 2: item
try:
qldstr = jdata['ChangeDescriptionText']
self.changedesc = parse_qlds(qldstr)
except KeyError:
pass
try:
qldstr = jdata['LevelDescriptionText']
self.leveldesc = parse_qlds(qldstr)
except KeyError:
pass
try:
variables = {}
d = json.loads(jdata['VariableDescriptionText'])
for x in list(d.items()):
variables[x[0]] = parse_qlds(x[1])
self.variables = variables
except KeyError:
pass
self.cap = jdata.get('Cap')
self.category = categories.get(jdata.get('Category'))
self.tag = jdata.get('Tag')
self.test_type = 'Narrow' if 'DifficultyTestType' in jdata else 'Broad'
self.difficulty = jdata.get('DifficultyScaler')
self.slot = jdata.get('AssignToSlot', {}).get('Id')
self.event = jdata.get('UseEvent', {}).get('Id') #fix infinite loop
try:
self.enhancements = []
for x in jdata['Enhancements']:
self.enhancements.append('{:+} {}'.format(x['Level'], Quality.get(x['AssociatedQuality']['Id']).name))
except KeyError:
pass
def __repr__(self):
return 'Quality: {}'.format(self.name)
def __str__(self):
string = 'Quality: {}'.format(self.name)
try:
string += '\nCategory: {}'.format(self.category)
except AttributeError:
pass
try:
if self.enhancements:
string += '\nEnhancements: [{}]'.format(', '.join(self.enhancements))
except AttributeError:
pass
return string
@classmethod
def get(self, id):
key = 'qualities:{}'.format(id)
if key in cache:
return cache[key]
else:
cache[key] = Quality(data[key])
if cache[key].event:
cache[key].event = Storylet.get(cache[key].event)
return cache[key]
def sub_qualities(string):
for x in re.findall(r'\[qb?:(\d+)\]', string):
string = string.replace(x, Quality.get(int(x)).name)
return string
def parse_qlds(string):
qld = {}
qlds = string.split('~')
for d in qlds:
level, text = d.split('|', 1)
level = int(level)
qld[level] = text
return qld
class Requirement: #done
def __init__(self, jdata):
self.raw = jdata
self.quality = Quality.get(jdata['AssociatedQuality']['Id'])
self.is_cost = jdata.get('IsCostRequirement', False)
try:
self.upper_bound = jdata['MaxLevel']
except:
try:
self.upper_bound = sub_qualities(jdata['MaxAdvanced'])
except KeyError:
pass
try:
self.lower_bound = jdata['MinLevel']
except:
try:
self.lower_bound = sub_qualities(jdata['MinAdvanced'])
except KeyError:
pass
try:
self.difficulty = jdata['DifficultyLevel']
except:
try:
self.difficulty = sub_qualities(jdata['DifficultyAdvanced'])
except KeyError:
pass
if hasattr(self, 'difficulty'):
self.type = 'Challenge'
self.test_type = self.quality.test_type
else:
self.type = 'Requirement'
assert jdata.get('BranchVisibleWhenRequirementFailed') == jdata.get('VisibleWhenRequirementFailed')
self.visibility = jdata.get('BranchVisibleWhenRequirementFailed', False)
def __repr__(self):
string = ''
if not self.visibility:
string += '[Branch hidden if failed] '
if self.type == 'Challenge':
if self.quality.id == 432:
string += 'Luck: {}% chance'.format(50 - self.difficulty * 10)
else:
string += '{} {}: {} {}'.format(self.test_type, self.type, self.quality.name, self.difficulty)
else:
try:
if self.lower_bound == self.upper_bound:
string += '{} exactly {}'.format(self.quality.name, self.lower_bound)
else:
string += '{} [{}-{}]'.format(self.quality.name, self.lower_bound, self.upper_bound)
except:
try:
string += '{} at least {}'.format(self.quality.name, self.lower_bound)
except:
string += '{} no more than {}'.format(self.quality.name, self.upper_bound)
return string
def render_requirements(rl, fate):
reqs = []
challenges = []
if fate is not None:
reqs.append('{} FATE'.format(fate))
for r in rl:
if r.type == 'Requirement':
reqs.append(str(r))
else:
challenges.append(str(r))
if not reqs and not challenges:
return 'None'
return ', '.join(reqs) + '\n' + '\n'.join(challenges)
class Storylet: #done?
def __init__(self, jdata, shallow=False):
self.raw = jdata
self.title = parser.unescape(jdata.get('Name', '(no name)'))
self.desc = jdata.get('Description', '(no description)')
self.id = jdata['Id']
try:
self.setting = Setting.get(jdata['Setting']['Id'])
except KeyError:
self.setting = None
try:
self.area = Area.get(jdata['LimitedToArea']['Id'])
except KeyError:
self.area = None
self.type = 'Storylet' if jdata['Deck']['Name'] == 'Always' else 'Card' if jdata['Deck']['Name'] == 'Sometimes' else 'Unknown type'
if self.type == 'Card':
self.frequency = jdata['Distribution']
self.requirements = []
for r in jdata['QualitiesRequired']:
self.requirements.append(Requirement(r))
self.branches = []
if not shallow:
for b in jdata['ChildBranches']:
branch=Branch.get(b, self)
self.branches.append(branch)
for e in list(branch.events.items()):
if e[0].endswith('Event'):
e[1].parent = branch
def __repr__(self):
return '{}: "{}"'.format(self.type, self.title)
def __str__(self):
#_,c = os.popen('stty size', u'r').read().split()
string = '{} Title: "{}"\n'.format(self.type, self.title)
try:
string += 'Appears in {} '.format(self.setting.title)
except AttributeError:
pass
try:
string += 'Limited to area: {}'.format(self.area.name)
except AttributeError:
pass
string += '\nDescription: {}'.format(render_html(self.desc))
string += '\nRequirements: {}'.format(render_requirements(self.requirements, None))
string += '\nBranches:\n{}'.format('\n\n{}\n\n'.format('~' * 20).join(self.render_branches()))
return string
def render_branches(self):
return [str(b) for b in self.branches]
@classmethod
def get(self, id):
key = 'storylets:{}'.format(id)
if key in cache:
return cache[key]
else:
cache[key] = Storylet(data['events:{}'.format(id)],True)
cache[key] = Storylet(data['events:{}'.format(id)],False)
return cache[key]
class Branch: #done
def __init__(self, jdata, parent):
self.raw = jdata
self.title = parser.unescape(jdata.get('Name', '(no title)'))
self.id = jdata['Id']
self.parent = parent
self.desc = jdata.get('Description', '(no description)')
self.cost = jdata.get('ActionCost', 1)
self.button = jdata.get('ButtonText', 'Go')
self.fate = jdata.get('CurrencyCost')
try:
self.act = Act.get(jdata['Act']['Id'])
except KeyError:
self.act = None
self.requirements = []
for r in jdata['QualitiesRequired']:
self.requirements.append(Requirement(r))
costs = [ {'AssociatedQuality': {'Id': r.quality.id}, 'Level': -r.lower_bound} for r in self.requirements if r.is_cost ]
self.events = {}
for key in list(jdata.keys()):
if key in ['DefaultEvent', 'SuccessEvent', 'RareSuccessEvent', 'RareSuccessEventChance', 'RareDefaultEvent', 'RareDefaultEventChance']:
if key.endswith('Chance'):
self.events[key] = jdata[key]
else:
self.events[key] = Event.get(jdata[key], costs)
def __repr__(self):
return '"{}"'.format(self.title)
def __str__(self):
string = 'Branch Title: "{}"'.format(self.title)
if self.desc:
string += '\nDescription: {}'.format(render_html(self.desc))
string += '\nRequirements: {}'.format(render_requirements(self.requirements, self.fate if hasattr(self, 'fate') else None))
if self.cost != 1:
string += '\nAction cost: {}'.format(self.cost)
string += '\n{}'.format(render_events(self.events))
return string
@classmethod
def get(self, jdata, parent=None):
key = 'branches:{}'.format(jdata['Id'])
if key in cache:
return cache[key]
else:
cache[key] = Branch(jdata, parent)
return cache[key]
class Event: #done
def __init__(self, jdata, costs):
self.raw = jdata
self.id = jdata['Id']
self.parent = None
self.title = parser.unescape(jdata.get('Name', '(no title)'))
self.desc = jdata.get('Description', '(no description)')
self.category = jdata.get('Category')
self.effects = []
if costs:
for c in costs:
self.effects.append(Effect(c))
for e in jdata['QualitiesAffected']:
self.effects.append(Effect(e))
try:
if jdata['ExoticEffects'] != '':
self.exotic_effect = jdata['ExoticEffects']
else:
self.exotic_effect = None
except KeyError:
self.exotic_effect = None
self.lodging = jdata.get('MoveToDomicile', {}).get('Id')
self.livingstory = jdata.get('LivingStory', {}).get('Id')
self.img = jdata.get('Image')
assert jdata.get('SwitchToSettingId') == jdata.get('SwitchToSetting', {}).get('Id')
try:
self.newsetting = Setting.get(jdata.get('SwitchToSettingId'))
except:
self.newsetting = None
try:
self.newarea = Area.get(jdata.get('MoveToArea', {}).get('Id'))
except:
self.newarea = None
try:
self.linkedevent = Storylet.get(jdata['LinkToEvent']['Id'])
except KeyError:
self.linkedevent = None
def __repr__(self):
return 'Event: {}'.format(self.title) if self.title != '' else 'Event: (no title)'
def __str__(self):
return 'Title: "{}"\nDescription: {}\nEffects: {}\n'.format(self.title if self.title != '' else '(no title)', render_html(self.desc), self.list_effects())
def list_effects(self):
effects = []
if self.effects != []:
effects.append('[{}]'.format(', '.join([str(e) for e in self.effects])))
if self.exotic_effect:
effects.append('Exotic effect: {}'.format(self.exotic_effect))
if self.livingstory:
effects.append('Triggers Living Story: {}'.format(self.livingstory)) #todo make Livingstory class
if self.lodging:
effects.append('Move to lodging: {}'.format(self.lodging)) #todo make lodgings class
if self.newsetting:
effects.append('Move to new setting: {}'.format(self.newsetting)) #todo flesh out setting class
if self.newarea:
effects.append('Move to new area: {}'.format(self.newarea))
try:
if self.parent.act:
effects.append('Associated social action: {}'.format(self.parent.act))
except:
pass
if self.linkedevent:
effects.append('Linked event: "{}" (Id {})'.format(self.linkedevent.title, self.linkedevent.id))
return '\n'.join(effects)
@classmethod
def get(self, jdata, costs):
key = 'events:{}'.format(jdata['Id'])
if key in cache:
return cache[key]
else:
cache[key] = Event(jdata, costs)
return cache[key]
def render_events(ed):
strings = []
try:
se = ed['SuccessEvent']
strings.append( 'Success: "{}"\n{}\nEffects: {}'.format(se.title, render_html(se.desc), se.list_effects()))
except KeyError:
pass
try:
rse = ed['RareSuccessEvent']
strings.append('Rare Success: "{}" ({}% chance)\n{}\nEffects: {}'.format(rse.title, ed['RareSuccessEventChance'], render_html(rse.desc), rse.list_effects()))
except KeyError:
pass
try:
fe = ed['DefaultEvent']
strings.append('{}: "{}"\n{}\nEffects: {}'.format('Failure' if len(strings) > 0 else 'Event', fe.title, render_html(fe.desc), fe.list_effects()))
except KeyError:
pass
try:
rfe = ed['RareDefaultEvent']
strings.append('Rare {}: "{}" ({}% chance)\n{}\nEffects: {}'.format('Failure' if len(strings) > 1 else 'Success', rfe.title, ed['RareDefaultEventChance'], render_html(rfe.desc), rfe.list_effects()))
except KeyError:
pass
return '\n\n{}\n\n'.format('-' * 20).join(strings)
class Effect: #done: Priority goes 3/2/1/0
def __init__(self, jdata, costs=None):
self.raw = jdata
self.quality = Quality.get(jdata['AssociatedQuality']['Id'])
self.equip = 'ForceEquip' in jdata
try:
self.amount = jdata['Level']
except:
try:
self.amount = sub_qualities(jdata['ChangeByAdvanced'])
except KeyError:
pass
try:
self.setTo = jdata['SetToExactly']
except:
try:
self.setTo = sub_qualities(jdata['SetToExactlyAdvanced'])
except KeyError:
pass
try:
self.ceil = jdata['OnlyIfNoMoreThan']
except KeyError:
pass
try:
self.floor = jdata['OnlyIfAtLeast']
except KeyError:
pass
try:
self.priority = jdata['Priority']
except KeyError:
self.priority = 0
def __repr__(self):
try:
limits = ' if no more than {} and at least {}'.format(self.ceil, self.floor)
except:
try:
limits = ' if no more than {}'.format(self.ceil)
except:
try:
limits = ' only if at least {}'.format(self.floor)
except:
limits = ''
if self.equip:
limits += ' (force equipped)'
try:
if hasattr(self.quality, 'leveldesc') and isinstance(self.setTo, int):
descs = sorted(list(self.quality.leveldesc.items()), reverse=True)
for x in descs:
if x[0] <= self.setTo:
desc = x
break
try:
return '{} (set to {} ({}){})'.format(self.quality.name, self.setTo, desc[1], limits)
except NameError:
pass
return '{} (set to {}{})'.format(self.quality.name, self.setTo, limits)
except:
if self.quality.nature == 2 or not self.quality.pyramid:
try:
return '{:+} x {}{}'.format(self.amount, self.quality.name, limits)
except:
return '{} {}{}'.format(('' if self.amount.startswith('-') else '+') + self.amount, self.quality.name, limits)
else:
try:
return '{} ({:+} cp{})'.format(self.quality.name, self.amount, limits)
except:
return '{} ({} cp{})'.format(self.quality.name, '' if self.amount.startswith('-') else '' + self.amount, limits)
class Lodging:
def __init__(self, jdata):
self.raw = jdata
self.id = jdata.get('Id')
self.name = jdata.get('Name', '(no name)')
self.desc = render_html(jdata.get('Description', '(no description)'))
self.image = jdata.get('ImageName')
self.hand = jdata.get('MaxHandSize')
def __repr__(self):
return self.name
def __str__(self):
string = 'Lodging: {} (Id {})'.format(self.name, self.id)
string += '\nDescription: {}'.format(self.desc)
if not self.hand:
string += '\nHand size: None'
elif self.hand == 1:
string += '\nHand size: 1 card'
else:
string += '\nHand size: {}'.format('{} cards'.format(self.hand) if self.hand else 'N/A')
return string
@classmethod
def get(self, id):
key = 'domiciles:{}'.format(id)
if key in cache:
return cache[key]
else:
cache[key] = Lodging(data[key])
return cache[key]
class Setting:
def __init__(self, jdata):
self.raw = jdata
self.id = jdata.get('Id')
self.title = jdata.get('Name')
self.persona = jdata.get('Personae')
self.maxactions = jdata.get('MaxActionsAllowed')
self.exhaustion = jdata.get('ActionsInPeriodBeforeExhaustion')
self.turnlength = jdata.get('TurnLengthSeconds')
self.area = jdata.get('StartingArea', {}).get('Id')
if self.area:
assert jdata.get('StartingArea') == data['areas:{}'.format(self.area)]
self.area = Area.get(self.area)
self.domicile = jdata.get('StartingDomicile')
if self.domicile:
self.domicile = Lodging(self.domicile)
# self.exchange = jdata.get('Exchange')
# if self.exchange:
# self.exchange = Exchange(self.exchange)
self.items = 'ItemsUsableHere' in jdata
def __repr__(self):
return self.title
def __str__(self):
string = 'Setting name: {} (Id {})'.format(self.title, self.id)
if self.area:
string += '\nStarting area: {}'.format(self.area)
if self.domicile:
string += '\nStarting lodging: {}'.format(self.domicile)
string += '\nItems are {}usable here'.format('' if self.items else 'NOT ')
return string
@classmethod
def get(self, id):
key = 'settings:{}'.format(id)
if key in cache:
return cache[key]
else:
cache[key] = Setting(data[key])
return cache[key]
class Area:
def __init__(self, jdata):
self.raw = jdata
self.id = jdata.get('Id')
self.name = jdata.get('Name', '(no name)')
self.desc = jdata.get('Description', '(no description)')
self.image = jdata.get('ImageName', '(no image)')
self.showOps = 'ShowOps' in jdata
try:
self.unlock = Quality.get(jdata['UnlocksWithQuality']['Id'])
except:
pass
self.premium = 'PremiumSubRequired' in jdata
self.message = jdata.get('MoveMessage', '(no move message)')
def __repr__(self):
return '{} (Id {})'.format(self.name, self.id)
def __str__(self):
string = '{} (Id {})'.format(self.name, self.id)
string += '\nDescription: {}'.format(self.desc)
string += '\nOpportunity cards are ' + ('' if self.showOps else 'NOT ') + 'visible'
try:
string += '\nUnlocks with {}'.format(self.unlock.name)
except AttributeError:
pass
if self.premium:
string += '\nRequires Exceptional Friendship'
string += '\n{}'.format(self.message)
return string
@classmethod
def get(self, id):
key = 'areas:{}'.format(id)
if key in cache:
return cache[key]
else:
cache[key] = Area(data[key])
return cache[key]
class Act: #for social actions
def __init__(self, jdata):
self.raw = jdata
self.name = jdata['Name']
self.msg = jdata['InviteMessage']
def __repr__(self):
return '"{}"'.format(self.name)
@classmethod
def get(self, id):
key = 'acts:{}'.format(id)
if key in cache:
return cache[key]
else:
cache[key] = Act(data[key])
return cache[key]
class AccessCode:
def __init__(self, jdata):
self.raw = jdata
self.name = jdata.get('Name', '(no name)')
self.message1 = jdata.get('InitialMessage', '(no message)')
self.message2 = jdata.get('CompletedMessage', '(no message)')
self.effects = []
for e in jdata['QualitiesAffected']:
self.effects.append(Effect(e))
def __repr__(self):
string = 'Access code name: {}'.format(self.name)
string += '\nInitial message: {}'.format(self.message1)
string += '\nFinish message: {}'.format(self.message2)
string += '\nEffects: {}'.format(self.list_effects())
return string
def list_effects(self):
if self.effects != []:
return '[{}]'.format(', '.join([str(e) for e in self.effects]))
@classmethod
def get(self, id):
key = 'accesscodes:{}'.format(id)
if key in cache:
return cache[key]
else:
cache[key] = AccessCode(data[key])
return cache[key]
class Exchange:
def __init__(self, jdata):
self.raw = jdata
self.id = jdata.get('Id')
self.name = jdata.get('Name', '(no name)')
self.title = jdata.get('Title', '(no title)')
self.desc = jdata.get('Description', '(no description)')
self.shops = []
for x in jdata.get('Shops', []):
self.shops.append(Shop(x))
def __repr__(self):
return 'Exchange Title: {} (ID {})'.format(self.title, self.id)
def __str__(self):
return 'Exchange Name: {} (ID {})\nExchange Title: {}\nExchange Description: {}\nShops:\n{}'.format(self.name, self.id, self.title, self.desc, '\n'.join([s.name for s in self.shops]))
@classmethod
def get(self, id):
key = 'exchanges:{}'.format(id)
if key in cache:
return cache[key]
else:
cache[key] = Exchange(data[key])
return cache[key]
class Shop:
def __init__(self, jdata):
self.raw = jdata
self.id = jdata.get('Id')
self.name = jdata.get('Name', '(no name)')
self.desc = jdata.get('Description', '(no description)')
self.image = jdata.get('Image')
self.requirements = []
for r in jdata.get('QualitiesRequired', []):
self.requirements.append(Requirement(r))
self.offerings = {}
for item in jdata.get('Availabilities'):
i = Offering(item)
self.offerings[i.item.name] = i
def __repr__(self):
return self.name
def __str__(self):
return 'Shop Name: {}\nDescription: {}\nItems: [{}]'.format(self.name, self.desc, ', '.join(list(self.offerings.keys())))
def __getitem__(self, key):
return self.offerings[key]
class Offering:
def __init__(self, jdata):
self.raw = jdata
self.id = jdata.get('Id')
self.item = Quality.get(jdata.get('Quality', {}).get('Id'))
self.price = Quality.get(jdata.get('PurchaseQuality', {}).get('Id'))
self.buymessage = jdata.get('BuyMessage', '(no message)')
if not self.buymessage.replace('"',''):
self.buymessage = '(no message)'
self.sellmessage = jdata.get('SellMessage', '(no message)')
if not self.sellmessage.replace('"',''):
self.sellmessage = '(no message)'
if 'Cost' in jdata:
self.buy = (jdata.get('Cost'), self.price)
if 'SellPrice' in jdata:
self.sell = (jdata.get('SellPrice'), self.price)
def __repr__(self):
return 'Item: {}'.format(self.item.name)
def __str__(self):
string = 'Item: {}'.format(self.item.name)
try:
string += '\nBuy for {0[0]} x {0[1].name}'.format(self.buy)
if self.buymessage != '(no message)':
string += ' - Buy Message: {}'.format(self.buymessage)
except AttributeError:
if self.buymessage != '(no message)':
string += '\nBuy Message: {} (cannot be bought)'.format(self.buymessage)
try:
string += '\nSell for {0[0]} x {0[1].name}'.format(self.sell)
if self.sellmessage != '(no message)':
string += ' - Sell Message: {}'.format(self.sellmessage)
except AttributeError:
if self.sellmessage != '(no message)':
string += '\nSell Message: {} (cannot be sold)'.format(self.sellmessage)
return string
| 36.857881 | 206 | 0.524502 |
c06ac8a3471733f9917ee6b7165b90efebcdf657 | 17,602 | py | Python | code_doc/utils/send_new_artifact.py | coordt/code_doc | c2fac64ac3ad61952a2d9f036727166741f9aff9 | [
"BSD-3-Clause"
] | null | null | null | code_doc/utils/send_new_artifact.py | coordt/code_doc | c2fac64ac3ad61952a2d9f036727166741f9aff9 | [
"BSD-3-Clause"
] | null | null | null | code_doc/utils/send_new_artifact.py | coordt/code_doc | c2fac64ac3ad61952a2d9f036727166741f9aff9 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""The intent of this module is to send "artifacts" to a code_doc server, without using any of the
Django related stuff and possibly no extra packages. This is done by
* logging into the code_doc server using credentials and storing the cookies associated to
this session
* filling a form that contains the details of the artifact to send
* sending the content
The main class is :class:`PostMultipartWithSession` and the entry point when used as a script
is the :func:`main`.
"""
import mimetypes
import mimetools
import urllib2
import cookielib
import os
import re
import urllib
import StringIO
import types
import logging
logging.basicConfig(format="%(asctime)s %(message)s", datefmt="%m/%d/%Y %I:%M:%S %p")
logger = logging
class PostMultipartWithSession(object):
"""Creates and maintains a session with a Django server, and allows to fill forms
(any type of fields including files)
This class is used in the project in order to send information to Django.
"""
class MyHTTPRedirectHandler(urllib2.HTTPRedirectHandler):
"""Small utility class allowing to follow redirections or not"""
def __init__(self, instance_name=None, *args, **kwargs):
self.instance_name = instance_name
self.redirection_map = {}
self.avoid_redirections = False
def http_error_302(self, req, fp, code, msg, headers):
redir = "Location" in headers
if redir:
self.redirection_map[req.get_full_url()] = headers["Location"]
if self.avoid_redirections and redir:
raise urllib2.HTTPError(
req.get_full_url(), 401, "redirections disabled", headers, None
)
return urllib2.HTTPRedirectHandler.http_error_302(
self, req, fp, code, msg, headers
)
http_error_301 = http_error_303 = http_error_307 = http_error_302
def __init__(self, host):
"""Initializes the current instance
:param string host: the host to which we connect. It should include the fully qualified URL
(eg. http://mycoffeepi:8081).
"""
self.cookies = cookielib.CookieJar()
self.redirection_intercepter = PostMultipartWithSession.MyHTTPRedirectHandler(
"redir"
)
self.opener = urllib2.build_opener(
self.redirection_intercepter, urllib2.HTTPCookieProcessor(self.cookies)
)
urllib2.install_opener(self.opener)
self.host = host
def get_redirection(self, initial_page):
"""Returns the redirection from one page to another if it exists, None otherwise"""
value = self.redirection_intercepter.redirection_map.get(
self.host + initial_page, None
)
if value is None:
return value
if value.find(self.host) == 0:
value = value[len(self.host) :]
pos = value.find("?")
if pos > 0:
value = value[:pos]
return value
def login(self, login_page=None, username=None, password=None):
"""Allow to log in to Django, and store the credentials into the current session"""
self.redirection_intercepter.avoid_redirections = False
request = urllib2.Request(self.host + login_page)
response = self.opener.open(request)
content = response.read()
# csrf token needed for the login forms
token = self._get_csrf_token(content)
login_data = dict(
username=username, password=password, csrfmiddlewaretoken=token
)
request = urllib2.Request(response.geturl(), data=urllib.urlencode(login_data))
# the referer is needed by NGinx in order to not be considered as a
# robot/spambot/malicious software
request.add_header("Referer", self.host + login_page)
try:
response = self.opener.open(request)
except urllib2.HTTPError as e:
logger.error(
"""[login] an error occurred during login:\n
\tError code: %d\n
\tError reason: %s\n
\tError details %s""",
e.code,
e.reason,
e.fp.read(),
)
raise
return response
def _get_csrf_token(self, content):
"""Returns the csrf token put (hidden) into a form"""
token = None
pos = content.find("csrfmiddlewaretoken")
if pos > -1:
for c in self.cookies:
if c.name == "csrftoken":
token = c.value
break
else:
logger.info("[csrf] cookie not found from the content")
pos = content.find("value", pos)
m = re.match(r"value=\'([\w\d]+?)\'", content[pos:])
if m is not None:
token = m.group(1)
return token
def _encode_multipart_formdata(self, fields, files):
"""Internal helper function for helping in the construction of form requests.
:param iterable fields: sequence of tuples (name, value) elements for regular form fields.
:param iterable files: sequence of tuples (name, filename, value) elements for data to be
uploaded as files
:returns: (content_type, body) ready for httplib.HTTP instance
"""
mime_boundary = mimetools.choose_boundary()
buf = StringIO.StringIO()
for (key, value) in fields.items():
buf.write("--%s\r\n" % mime_boundary)
buf.write('Content-Disposition: form-data; name="%s"' % key)
buf.write("\r\n\r\n" + value + "\r\n")
for (key, filename_to_add_or_file_descriptor) in files:
if isinstance(
filename_to_add_or_file_descriptor, types.StringType
) or isinstance(filename_to_add_or_file_descriptor, types.UnicodeType):
fd = open(filename_to_add_or_file_descriptor, "rb")
filename = os.path.basename(filename_to_add_or_file_descriptor)
contenttype = (
mimetypes.guess_type(filename_to_add_or_file_descriptor)[0]
or "application/octet-stream"
)
else:
fd = filename_to_add_or_file_descriptor
filename = filename_to_add_or_file_descriptor.name
contenttype = (
"application/octet-stream"
) # we cannot be more precise here
buf.write("--%s\r\n" % mime_boundary)
buf.write(
'Content-Disposition: form-data; name="%s"; filename="%s"\r\n'
% (key, filename)
)
buf.write("Content-Type: %s\r\n" % contenttype)
fd.seek(0)
string_file = fd.read()
buf.write("\r\n" + string_file + "\r\n")
buf.write("--" + mime_boundary + "--\r\n\r\n")
body = buf.getvalue()
content_type = "multipart/form-data; boundary=%s" % mime_boundary
return content_type, body
def get(self, page, avoid_redirections=False):
self.redirection_intercepter.avoid_redirections = avoid_redirections
server_url = "%s%s" % (self.host, page)
request = urllib2.Request(server_url)
try:
response = self.opener.open(request)
except urllib2.HTTPError as e:
return e
return response
def post_multipart(
self, page_url, form_fields, form_files, avoid_redirections=True
):
""" Post form_fields and form_files to an http://host/page_url as multipart/form-data.
:param form_fields: is a sequence of (name, value) elements for regular form form_fields.
:param form_files: is a sequence of (name, filename, value) elements for data to be
uploaded as form_files
:param avoid_redirections: True if the request does not follow any redirections
(login redirection for instance)
:returns: the server's response page_url.
"""
server_url = "%s%s" % (self.host, page_url)
# get for the cookie and opening a session
request = urllib2.Request(server_url)
self.redirection_intercepter.avoid_redirections = avoid_redirections
try:
response = self.opener.open(request)
except urllib2.HTTPError as e:
return e
content = response.read()
token = self._get_csrf_token(content)
fields_with_token = dict(form_fields.items())
if token is not None:
fields_with_token["csrfmiddlewaretoken"] = token
content_type, body = self._encode_multipart_formdata(
fields_with_token, form_files
)
headers = {"Content-Type": content_type, "Content-Length": str(len(body))}
# the url should be non-unicode object otherwise the library makes the assumption that data
# is also unicode, which is not.
if isinstance(server_url, types.UnicodeType):
request_url = server_url.encode("ascii")
else:
request_url = server_url
request = urllib2.Request(request_url, data=body, headers=headers)
# again (as for login) the referer is needed by NGinx
request.add_header("Referer", self.host + page_url)
try:
response = self.opener.open(request)
return response
except urllib2.HTTPError as e:
logger.error(
"""[post] an error occurred during posting of the form to %s:\n
\tError code: %d\n
\tError reason: %s\n
\tError details %s""",
self.host + page_url,
e.code,
e.reason,
e.fp.read(),
)
raise
def main():
import argparse
import hashlib
import json
description = """code_doc upload script:
This utility sends a file to a code_doc instance. It logs onto a code_doc instance with the
provided credentials, gets the id of the project and version that are given from the command
line and sends the file to this specific version.
"""
epilog = ""
parser = argparse.ArgumentParser(
prog="code_doc-archival", description=description, epilog=epilog
)
group = parser.add_argument_group("artifact")
group.add_argument(
"-f",
"--file",
dest="inputfile",
action="store",
required=True,
nargs=1,
type=argparse.FileType("rb"),
help="""The file that should be sent to the server.""",
)
group.add_argument(
"--project",
dest="project",
required=True,
help="""The name of the project concerned by this upload""",
)
group.add_argument(
"--artifact_series",
dest="series",
required=True,
help="""The name of the series concerned by this upload""",
)
group.add_argument(
"--artifact_branch",
dest="branch",
required=True,
help="""The name of the branch concerned by this upload. If the branch is specified,
the artifact_revision should also be specified""",
)
group.add_argument(
"--artifact_revision",
dest="revision",
required=True,
help="""The name or hash of the revision concerned by this upload. This value
will be transformed to lower case.""",
)
group.add_argument(
"--is_doc",
dest="is_doc",
action="store_true",
help="""Indicates that the file is an archive containing a documentation that should be deflated on the
server side. Additionally, the `doc_entry` parameter should be set.""",
)
group.add_argument(
"--doc_entry",
dest="doc_entry",
default=None,
help="""Indicates the file that is used as the documentation main entry point.""",
)
group.add_argument(
"--artifact_description",
dest="description",
action="store",
help="""Describes the artifact.""",
)
group = parser.add_argument_group("server")
group.add_argument(
"-s",
"--code_doc_url",
dest="url",
metavar="URL",
action="store",
default="http://localhost:8000",
help="""CodeDoc (Django) server to which the results will be send. The URL should contain
the http scheme, the dns name and the port (default: "http://localhost:8000")""",
)
group.add_argument(
"--username",
dest="username",
required=True,
help="""The username used for updating the results (the user should exist on the Django instance is should be
allowed to add results)""",
)
group.add_argument(
"--password",
dest="password",
required=True,
help="""The password of the provided user on the Django instance""",
)
args = parser.parse_args()
args.inputfile = args.inputfile[0]
if args.inputfile.closed:
logger.error("[configuration] the provided file cannot be accessed")
raise Exception("[configuration] cannot open the artifact file")
if args.is_doc and args.doc_entry is None:
logger.error(
"[configuration] the entry point should be provided for documentation artifacts"
)
raise Exception("[configuration] cannot open the artifact file")
instance = PostMultipartWithSession(args.url)
# logging with the provided credentials
logger.debug("[log in] Logging to the server")
instance.login(
login_page="/accounts/login/", username=args.username, password=args.password
)
# getting the location (id of the project and version) to which the upload should be done
logger.debug("[meta] Retrieving the project and series IDs")
post_url = "/api/%s/%s/" % (args.project, args.series)
try:
response = instance.get(post_url)
except Exception as e:
logger.error(
"[login] an exception was raised while trying to login to the server %r", e
)
raise
try:
res = response.read()
dic_ids = json.loads(res)
project_id = int(dic_ids["project_id"])
series_id = int(dic_ids["series_id"])
except Exception as e:
logger.error(
"""[meta] an error occurred during the retrieval of the projects informations from %s:\n
\tError details %s""",
args.url + post_url,
e,
)
raise
# sending the artifact
# preparing the form
fields = {}
fields["description"] = (
args.description if args.description is not None else "uploaded by a robot"
)
fields["is_documentation"] = "True" if args.is_doc else "False"
fields["documentation_entry_file"] = (
args.doc_entry if args.doc_entry is not None else ""
)
# fields['upload_date'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
fields["branch"] = args.branch
if fields["branch"]:
fields["branch"] = fields["branch"].strip()
fields["revision"] = args.revision
if fields["revision"]:
fields["revision"] = fields["revision"].strip().lower()
if fields["branch"] and not fields["revision"]:
logger.error("[configuration] branch is specified while revision is not")
raise Exception("[configuration] branch is specified while revision is not")
files = []
files.append(("artifactfile", args.inputfile))
post_url = "/artifacts/%d/%d/add" % (project_id, series_id)
# sending
logger.debug("[transfer] Sending artifact")
response = instance.post_multipart(
post_url, fields, files, avoid_redirections=False
)
if response.code != 200:
msg = (
f"[transfer] an error was returned by the server during the "
"transfer of the file, return code is {response.code}"
)
logger.error(msg)
raise Exception(msg)
# checking artifact properly stored
logger.debug("[integrity] Checking artifact")
post_url = "/artifacts/api/%d/%d" % (project_id, series_id)
response = instance.get(post_url)
if response.code != 200:
msg = (
f"[transfer] an error was returned by the server while querying "
"for artifacts, return code is {response.code}"
)
logger.error(msg)
raise Exception(msg)
args.inputfile.seek(0)
hash_file = hashlib.md5(args.inputfile.read()).hexdigest().upper()
dic_ids = json.loads(response.read())
artifacts = dic_ids["artifacts"]
for art_id, art in artifacts.items():
if art["md5"].upper() == hash_file:
logger.info("[integrity] artifact successfully stored on the server")
break
else:
logger.error("[integrity] the artifact cannot be found on the server")
raise Exception("[integrity] the artifact cannot be found on the server")
if __name__ == "__main__":
import sys
try:
main()
sys.exit(0)
except Exception as e:
print(e)
logger.error(f"[ERROR] The artifact was not pushed to the server: {e}")
sys.exit(2)
| 33.785029 | 117 | 0.599023 |
ffd36bd878d3456db26fb59ebf30683fcda7a5e3 | 2,627 | py | Python | dataset_partitioning/merge_parquet.py | theferrit32/dataset-partitioning | 5c29437f33da79aaf54ee1644e3a57f49a83f002 | [
"MIT"
] | null | null | null | dataset_partitioning/merge_parquet.py | theferrit32/dataset-partitioning | 5c29437f33da79aaf54ee1644e3a57f49a83f002 | [
"MIT"
] | null | null | null | dataset_partitioning/merge_parquet.py | theferrit32/dataset-partitioning | 5c29437f33da79aaf54ee1644e3a57f49a83f002 | [
"MIT"
] | null | null | null | import os, sys, gc
import pandas as pd
import logging
import binascii
from multiprocessing import Pool
# logging.basicConfig(level=logging.DEBUG)
from dataset_partitioning import logging_util
logger = logging_util.get_logger(__name__)
def rand_string(length):
return binascii.hexlify(os.urandom(int(length/2 + 1))).decode('utf-8')[:length]
def merge_parquet_directory(directory_path):
if os.path.isdir(directory_path) is False:
msg = 'Directory does not exist: ' + directory_path
logger.error(msg)
raise RuntimeError(msg)
logger.info('Merging parquet files in directory: %s' % directory_path)
df = None
parquet_filenames = [f for f in os.listdir(directory_path) if f.endswith('.parquet')]
parquet_paths = ['%s/%s' % (directory_path, f) for f in parquet_filenames]
total_records_initial = 0
for fpath in parquet_paths:
if df is None:
df = pd.read_parquet(fpath) # use_pandas_metadata=False
total_records_initial += len(df)
else:
df_new = pd.read_parquet(fpath)
total_records_initial += len(df_new)
df = df.append(df_new)
del df_new
gc.collect()
logger.info('Finished reading parquet files %s' % (str(parquet_paths)))
logger.debug('Read %d initial records' % (total_records_initial))
combined_file_path = '%s/%s' % (
directory_path, rand_string(16) + '.parquet'
)
logger.info('Sorting by POS')
# TODO sorting may help, pandas to_parquet may already group values though
# df.sort_values('POS', axis=0)
logger.info('Writing files %s to combined file %s' % (str(parquet_paths), combined_file_path))
df.to_parquet(path=combined_file_path, index=False)
for fpath in parquet_paths:
os.remove(fpath)
logger.info('Deleted old files')
return combined_file_path
def merge_partitioned_directory(partitioned_path, processes=1):
if os.path.isdir(partitioned_path) is False:
raise RuntimeError('Directory does not exist: %s' % partitioned_path)
paths = []
for parquet_directory in os.listdir(partitioned_path):
path = os.path.join(partitioned_path, parquet_directory)
paths.append((path,))
# combined_file = merge_parquet_directory(path)
# logger.info('Created file %s from directory %s' % (combined_file, path))
with Pool(processes=processes) as pool:
pool.starmap(merge_parquet_directory, paths)
if __name__ == '__main__':
if len(sys.argv) < 2:
raise RuntimeError('Must provide dir name')
merge_partitioned_directory(sys.argv[1])
| 35.026667 | 98 | 0.688618 |
d2f0716a6f378f193446b1bff90bb160d0a6abdb | 1,819 | py | Python | my_site/blog/migrations/0001_initial.py | sidharth-lucy/Blog | 33afd31faf5a1da44e050b13e3364b419f108c7f | [
"MIT"
] | null | null | null | my_site/blog/migrations/0001_initial.py | sidharth-lucy/Blog | 33afd31faf5a1da44e050b13e3364b419f108c7f | [
"MIT"
] | null | null | null | my_site/blog/migrations/0001_initial.py | sidharth-lucy/Blog | 33afd31faf5a1da44e050b13e3364b419f108c7f | [
"MIT"
] | null | null | null | # Generated by Django 3.2.4 on 2021-06-26 17:38
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=50)),
('last_name', models.CharField(max_length=50)),
('email_address', models.EmailField(max_length=254)),
],
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('caption', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=300)),
('excerpt', models.CharField(max_length=400)),
('image_name', models.CharField(max_length=100)),
('date', models.DateField(auto_now=True)),
('slug', models.SlugField(unique=True)),
('content', models.TextField(validators=[django.core.validators.MinLengthValidator(10)])),
('author', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='posts', to='blog.author')),
('tags', models.ManyToManyField(to='blog.Tag')),
],
),
]
| 38.702128 | 143 | 0.57779 |
13afd185acb14fccea98095dbb692294ae494df0 | 16,552 | py | Python | Packs/CommonScripts/Scripts/FindSimilarIncidentsV2/FindSimilarIncidentsV2.py | jrauen/content | 81a92be1cbb053a5f26a6f325eff3afc0ca840e0 | [
"MIT"
] | 2 | 2021-12-06T21:38:24.000Z | 2022-01-13T08:23:36.000Z | Packs/CommonScripts/Scripts/FindSimilarIncidentsV2/FindSimilarIncidentsV2.py | jrauen/content | 81a92be1cbb053a5f26a6f325eff3afc0ca840e0 | [
"MIT"
] | 61 | 2021-10-07T08:54:38.000Z | 2022-03-31T10:25:35.000Z | Packs/CommonScripts/Scripts/FindSimilarIncidentsV2/FindSimilarIncidentsV2.py | jrauen/content | 81a92be1cbb053a5f26a6f325eff3afc0ca840e0 | [
"MIT"
] | 2 | 2022-01-05T15:27:01.000Z | 2022-02-01T19:27:43.000Z | # type: ignore
from CommonServerPython import *
import collections
from dateutil import parser # type: ignore[import]
EXACT_MATCH = 0
CONTAINS = '*'
SEVERITY_MAP = {
'0': 'Unknown',
'0.5': 'Informational',
'1': 'Low',
'2': 'Medium',
'3': 'High',
'4': 'Critical'
}
STATUS_MAP = {
'0': 'Pending',
'1': 'Active',
'2': 'Closed',
'3': 'Closed'
}
def parse_input(csv):
if not csv:
return {}
values = csv.split(",")
keys = []
equality_map = {}
for value in values:
if ':' in value:
value, count = value.split(':')
value = value.strip()
equality_map[value] = int(count.strip()) if count != CONTAINS else count
else:
equality_map[value] = EXACT_MATCH
keys.append(value.strip())
return equality_map
def remove_prefix(prefix, s):
if s and s.startswith(prefix):
return s[len(prefix):]
return s
def parse_datetime(datetime_str):
return parser.parse(datetime_str)
def nested_dict_flatted(d, parent_key='', sep='.'):
if d:
items = [] # type: ignore
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, list) and len(v) > 0:
v = v[0]
if isinstance(v, collections.MutableMapping) and len(v) > 0:
items.extend(nested_dict_flatted(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
else:
return {}
def get_map_from_nested_dict(nested_dict, keys, raise_error=False):
result = {}
flat_dict = nested_dict_flatted(nested_dict)
for key in keys:
if key:
if key in flat_dict:
result[key] = flat_dict[key]
elif key in nested_dict:
result[key] = nested_dict[key]
else:
message = "Missing key\label\custom\context field for incident: %s" % key
if raise_error:
return_error(message)
else:
continue
return result
def get_incident_labels_map(labels):
if labels is None:
return {}
labels_map = {} # type: ignore
for label in labels:
label_type = label['type']
label_value = label['value']
if label_type in labels_map:
if not isinstance(labels_map[label_type], list):
labels_map[label_type] = [labels_map[label_type]]
labels_map[label_type].append(label_value)
else:
labels_map[label_type] = label_value
for label_key, label_value in labels_map.items():
if isinstance(label_value, list):
label_value.sort()
labels_map[label_key] = label_value
return labels_map
def handle_str_field(key, value):
value = value.replace('"', r'\"').replace("\n", "\\n").replace("\r", "\\r").replace(r'\\"', r'\\\"')
query = '{}="{}"'.format(key, value.encode('utf-8'))
return query.decode('utf-8')
def handle_int_field(key, value):
query_template = '{}:={}'
return query_template.format(key, str(value))
def handle_list_field(key, value):
if not value: # empty list
return '{}={}'.format(key, str(value))
queries_list = []
for item in value:
queries_list.append(handle_field[type(item)](key, item))
return queries_list
handle_field = {
int: handle_int_field,
str: handle_str_field,
unicode: handle_str_field,
list: handle_list_field
}
def build_incident_fields_query(incident_data):
similar_keys_list = []
for key, value in incident_data.items():
result = handle_field[type(value)](key, value)
similar_keys_list.extend(result) if isinstance(result, list) else\
similar_keys_list.append(result) # type: ignore
return similar_keys_list
def get_incidents_by_keys(similar_incident_keys, time_field, incident_time, incident_id, hours_back, ignore_closed,
max_number_of_results, extra_query, applied_condition):
condition_string = ' %s ' % applied_condition.lower()
incident_fields_query = build_incident_fields_query(similar_incident_keys)
similar_keys_query = condition_string.join(incident_fields_query)
incident_time = parse_datetime(incident_time)
max_date = incident_time
min_date = incident_time - timedelta(hours=hours_back)
query = build_incident_query(similar_keys_query, ignore_closed, incident_id, extra_query)
demisto.log("Find similar incidents based on initial query: %s" % query)
get_incidents_argument = {'query': query, 'size': max_number_of_results, 'sort': '%s.desc' % time_field}
get_incidents_argument['fromdate'] = min_date.isoformat()
get_incidents_argument['todate'] = max_date.isoformat()
res = demisto.executeCommand("getIncidents", get_incidents_argument)
if res[0]['Type'] == entryTypes['error']:
return_error(str(res[0]['Contents']))
incident_list = res[0]['Contents']['data'] or []
return incident_list
def get_context(incident_id):
res = demisto.executeCommand("getContext", {'id': incident_id})
try:
return res[0]['Contents']['context']
except Exception:
return {}
def camel_case_to_space(s):
return ''.join(map(lambda x: x if x.islower() else " " + x, s)).strip().capitalize()
def incident_to_record(incident, time_field):
def parse_time(date_time_str):
try:
if date_time_str.find('.') > 0:
date_time_str = date_time_str[:date_time_str.find('.')]
if date_time_str.find('+') > 0:
date_time_str = date_time_str[:date_time_str.find('+')]
return date_time_str.replace('T', ' ')
except Exception:
return date_time_str
time = parse_time(incident[time_field])
return {'id': "[%s](#/Details/%s)" % (incident['id'], incident['id']),
'rawId': incident['id'],
'name': incident['name'],
'closedTime': parse_time(incident['closed']) if incident['closed'] != "0001-01-01T00:00:00Z" else "",
'time': time}
def is_text_equal_by_x_different_words(text1, text2, number_of_different_words, separator=' '):
if not isinstance(text1, basestring):
text1 = str(text1)
if not isinstance(text2, basestring):
text2 = str(text2)
text1 = text1.lower()
text2 = text2.lower()
if number_of_different_words == EXACT_MATCH:
return text1 == text2
elif number_of_different_words == CONTAINS:
return text1.find(text2) >= 0 or text2.find(text1) >= 0
else:
words_set1 = set([x for x in map(lambda x: x.strip(), text1.replace("\\n", separator).split(separator)) if x])
words_set2 = set([x for x in map(lambda x: x.strip(), text2.replace("\\n", separator).split(separator)) if x])
return len(words_set1.difference(words_set2)) <= number_of_different_words and len(
words_set2.difference(words_set1)) <= number_of_different_words
def verify_map_equals(values_map1, values_map2, equality_map):
if not equality_map or len(equality_map) == 0:
return True
if not values_map1 or len(values_map1) == 0 or not values_map2 or len(values_map2) == 0:
return False
for key in equality_map:
if key not in values_map1 or key not in values_map2:
return False
value1 = values_map1[key]
value2 = values_map2[key]
if isinstance(value1, basestring) and isinstance(value2, basestring):
is_values_equals = is_text_equal_by_x_different_words(values_map1[key], values_map2[key], equality_map[key])
if not is_values_equals:
return False
elif isinstance(value1, list) and isinstance(value2, list):
try:
return set(value1) == set(value2)
except Exception:
return value1 == value2
else:
return value1 == value2
return True
def did_not_found_duplicates():
context = {
'isSimilarIncidentFound': False
}
demisto.results({'ContentsFormat': formats['markdown'],
'Type': entryTypes['note'],
'Contents': 'No duplicate incidents has been found.',
'EntryContext': context})
sys.exit(0)
def merge_incident_fields(incident):
custom_fields = incident.get('CustomFields', {}) or {}
for k, v in custom_fields.items():
incident[k] = v
incident['severity'] = SEVERITY_MAP.get(str(incident['severity']))
incident['status'] = STATUS_MAP.get(str(incident['status']))
return incident
def build_incident_query(similar_keys_query, ignore_closed, incident_id, extra_query):
query = ''
if similar_keys_query:
query = similar_keys_query
if ignore_closed:
query += " and -status:Closed" if query else "-status:Closed"
if incident_id:
query = "(-id:%s) and (%s)" % (incident_id, query) if query else "(-id:%s)" % (incident_id)
if extra_query:
query += " and (%s)" % extra_query if query else extra_query
return query
def main():
SIMILAR_INCIDENT_KEYS = [x for x in demisto.args().get('similarIncidentKeys', '').split(",") if x]
SIMILAR_CUSTOMS_FIELDS_MAP = parse_input(demisto.args().get('similarCustomFields', ''))
SIMILAR_INCIDENTS_FIELDS_MAP = parse_input(demisto.args().get('similarIncidentFields', ''))
if len(SIMILAR_INCIDENTS_FIELDS_MAP) > 0 and (
len(SIMILAR_INCIDENT_KEYS) > 0 or len(SIMILAR_CUSTOMS_FIELDS_MAP) > 0):
return_error('If using similarIncidentFields do not use deprecated similarCustomFields\\similarIncidentKeys')
else:
SIMILAR_INCIDENTS_FIELDS_MAP.update(SIMILAR_CUSTOMS_FIELDS_MAP)
for k in map(lambda x: remove_prefix('incident.', x), SIMILAR_INCIDENT_KEYS):
if k and len(k) > 0:
SIMILAR_INCIDENTS_FIELDS_MAP[k] = EXACT_MATCH
SIMILAR_LABELS_MAP = parse_input(demisto.args().get('similarLabelsKeys', ''))
SIMILAR_CONTEXT_MAP = parse_input(demisto.args().get('similarContextKeys', ''))
HOURS_BACK = float(demisto.args()['hoursBack'])
TIME_FIELD = demisto.args()['timeField']
IGNORE_CLOSED = demisto.args()['ignoreClosedIncidents'] == 'yes'
MAX_NUMBER_OF_INCIDENTS = int(demisto.args()['maxNumberOfIncidents'])
MAX_CANDIDATES_IN_LIST = int(demisto.args()['maxResults'])
EXTRA_QUERY = demisto.args().get('filterQuery')
INCIDENT_FIELDS_APPLIED_CONDITION = demisto.args()['incidentFieldsAppliedCondition']
RAISE_ERROR_MISSING_VALUES = not (demisto.args()['skipMissingValues'] == 'yes')
# set the incident
incident = merge_incident_fields(demisto.incidents()[0]) # type: ignore # pylint: disable=no-value-for-parameter
# validate fields
exact_match_incident_fields = get_map_from_nested_dict(incident,
{k: v for k, v in SIMILAR_INCIDENTS_FIELDS_MAP.items() if
v == EXACT_MATCH}, raise_error=RAISE_ERROR_MISSING_VALUES)
SIMILAR_INCIDENTS_FIELDS_MAP = {k: v for k, v in SIMILAR_INCIDENTS_FIELDS_MAP.items() if v != EXACT_MATCH}
similar_incident_fields = get_map_from_nested_dict(incident, SIMILAR_INCIDENTS_FIELDS_MAP.keys(),
raise_error=RAISE_ERROR_MISSING_VALUES)
labels_map = get_incident_labels_map(incident.get('labels', []))
incident_similar_labels = get_map_from_nested_dict(labels_map, SIMILAR_LABELS_MAP.keys(),
raise_error=RAISE_ERROR_MISSING_VALUES)
incident_similar_context = demisto.context()
original_context_map = {}
if incident_similar_context:
for key in SIMILAR_CONTEXT_MAP.keys():
response = demisto.dt(incident_similar_context, key)
original_context_map[key] = response
if not response and RAISE_ERROR_MISSING_VALUES:
raise ValueError("Error: Missing context key for incident: %s" % key)
log_message = 'Incident fields with exact match: %s' % exact_match_incident_fields
if len(exact_match_incident_fields) > 1:
log_message += ', applied with %s condition' % INCIDENT_FIELDS_APPLIED_CONDITION
demisto.log(log_message)
if len(similar_incident_fields) > 0:
demisto.log('Similar incident fields (not exact match): %s' % similar_incident_fields)
if len(incident_similar_labels) > 0:
demisto.log('Similar labels: %s' % incident_similar_labels)
if len(incident_similar_context) > 0:
demisto.log('Similar context keys: %s' % original_context_map)
if len(exact_match_incident_fields) == 0 and len(similar_incident_fields) == 0 and len(
incident_similar_labels) == 0 and len(original_context_map) == 0:
return_error("Does not have any field to compare in the current incident")
duplicate_incidents = get_incidents_by_keys(exact_match_incident_fields, TIME_FIELD, incident[TIME_FIELD],
incident['id'],
HOURS_BACK, IGNORE_CLOSED, MAX_NUMBER_OF_INCIDENTS, EXTRA_QUERY,
INCIDENT_FIELDS_APPLIED_CONDITION)
if len(duplicate_incidents) == 0:
did_not_found_duplicates()
duplicate_incidents = map(merge_incident_fields, duplicate_incidents)
# filter by labels
if len(incident_similar_labels or {}) > 0:
duplicate_incidents = [c for c in duplicate_incidents if
verify_map_equals(incident_similar_labels,
get_incident_labels_map(c.get('labels', [])),
SIMILAR_LABELS_MAP)
]
# filter by incident similar fields
if len(similar_incident_fields or {}) > 0:
duplicate_incidents = [c for c in duplicate_incidents
if verify_map_equals(similar_incident_fields,
get_map_from_nested_dict(c,
SIMILAR_INCIDENTS_FIELDS_MAP.keys(),
raise_error=False),
SIMILAR_INCIDENTS_FIELDS_MAP)
]
filter_by_context = []
if original_context_map:
for c in duplicate_incidents:
other_incident_context = get_context(c['id'])
if other_incident_context:
other_incident_context_map = {}
for key in SIMILAR_CONTEXT_MAP.keys():
response = demisto.dt(other_incident_context, key)
if response:
other_incident_context_map[key] = response
if verify_map_equals(original_context_map, other_incident_context_map, SIMILAR_CONTEXT_MAP):
filter_by_context.append(c)
duplicate_incidents = filter_by_context
# update context
if len(duplicate_incidents or []) > 0:
duplicate_incidents_rows = map(lambda x: incident_to_record(x, TIME_FIELD), duplicate_incidents)
duplicate_incidents_rows = list(sorted(duplicate_incidents_rows, key=lambda x: (x['time'], x['id'])))
context = {
'similarIncidentList': duplicate_incidents_rows[:MAX_CANDIDATES_IN_LIST],
'similarIncident': duplicate_incidents_rows[0],
'isSimilarIncidentFound': True
}
duplicate_incidents_rows = duplicate_incidents_rows[:MAX_CANDIDATES_IN_LIST]
hr_result = map(lambda row: dict((camel_case_to_space(k), v) for k, v in row.items()), duplicate_incidents_rows)
markdown_result = tableToMarkdown("Duplicate incidents",
hr_result,
headers=['Id', 'Name', 'Closed time', 'Time'])
return {'ContentsFormat': formats['markdown'],
'Type': entryTypes['note'],
'Contents': markdown_result,
'EntryContext': context}
else:
did_not_found_duplicates()
if __name__ in ['__main__', '__builtin__', 'builtins']:
entry = main()
demisto.results(entry)
| 38.945882 | 120 | 0.622342 |
6ed451bb05e127fb0e12eac77f4fbed3cd5382b8 | 360 | py | Python | pingo/examples/blink_firmata_auto.py | willingc/pingo | 0890bf5ed763e9061320093fc3fb5f7543c5cc2c | [
"MIT"
] | null | null | null | pingo/examples/blink_firmata_auto.py | willingc/pingo | 0890bf5ed763e9061320093fc3fb5f7543c5cc2c | [
"MIT"
] | null | null | null | pingo/examples/blink_firmata_auto.py | willingc/pingo | 0890bf5ed763e9061320093fc3fb5f7543c5cc2c | [
"MIT"
] | null | null | null | """Blink an LED on a remote Arduino
This script assumes:
- this computer is connected to an Arduino
- the Arduino is running the Examples->Firmata->StandardFirmata sketch
"""
import time
import pingo
ard = pingo.arduino.get_arduino()
print('Connected to: %s' % ard)
led = ard.pins[13]
led.mode = pingo.OUT
while True:
led.toggle()
time.sleep(.1)
| 17.142857 | 70 | 0.713889 |
7fd998abe6d8410d3bba492685465a569a8ac888 | 1,053 | py | Python | src/garage/np/algos/base.py | parachutel/garage | e9d4301278f5dd31e3cbd20df1422befa2d0b6c4 | [
"MIT"
] | 3 | 2020-09-26T16:17:52.000Z | 2021-04-23T08:56:04.000Z | src/garage/np/algos/base.py | parachutel/garage | e9d4301278f5dd31e3cbd20df1422befa2d0b6c4 | [
"MIT"
] | 1 | 2019-09-03T19:57:40.000Z | 2019-09-03T19:57:40.000Z | src/garage/np/algos/base.py | parachutel/garage | e9d4301278f5dd31e3cbd20df1422befa2d0b6c4 | [
"MIT"
] | 1 | 2020-12-09T03:06:48.000Z | 2020-12-09T03:06:48.000Z | import abc
class RLAlgorithm(abc.ABC):
"""Base class for all the algorithms.
Note:
If sampler_cls isn't specified to the LocalRunner,
self.sampler_cls is required to provide default sampler
for algorithm.
"""
@abc.abstractmethod
def train_once(self, itr, paths):
"""Performs one step of policy optimization given one batch of samples.
Args:
itr (int): Iteration number.
paths (list[dict]): A list of collected paths.
"""
pass
@abc.abstractmethod
def train(self, runner, batch_size):
"""Obtain samplers and start actual training for each epoch.
Args:
runner (LocalRunner): LocalRunner is passed to give algorithm
the access to runner.step_epochs(), which provides services
such as snapshotting and sampler control.
batch_size (int): Batch size used to obtain samplers.
Returns:
The average return in last epoch cycle.
"""
pass
| 26.325 | 79 | 0.611586 |
578acba5ce831384b7eebed8267d6cdffc76bc28 | 2,873 | py | Python | custom_components/sensor/rpi_power.py | aegaas/home-assistant-config | 7442c4c9d3e165bd039c4436107769053ae06091 | [
"Apache-2.0"
] | 1 | 2018-12-22T17:45:11.000Z | 2018-12-22T17:45:11.000Z | custom_components/sensor/rpi_power.py | aegaas/home-assistant-config | 7442c4c9d3e165bd039c4436107769053ae06091 | [
"Apache-2.0"
] | null | null | null | custom_components/sensor/rpi_power.py | aegaas/home-assistant-config | 7442c4c9d3e165bd039c4436107769053ae06091 | [
"Apache-2.0"
] | null | null | null | """
A sensor platform which detects underruns and capped status from the official Raspberry Pi Kernel.
Minimal Kernel needed is 4.14+
"""
import logging
import voluptuous as vol
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import (PLATFORM_SCHEMA)
__version__ = '0.0.8'
_LOGGER = logging.getLogger(__name__)
SYSFILE = '/sys/devices/platform/soc/soc:firmware/get_throttled'
CONF_TEXT_STATE = 'text_state'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_TEXT_STATE, default=False): cv.boolean,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the sensor platform"""
import os
text_state = config.get(CONF_TEXT_STATE)
exist = os.path.isfile(SYSFILE)
if exist:
add_devices([RaspberryChargerSensor(text_state)], True)
else:
_LOGGER.critical('Can not read system information, your hardware is not supported.')
class RaspberryChargerSensor(Entity):
"""The class for this sensor"""
def __init__(self, text_state):
self._state = None
self._description = None
self._text_state = text_state
self.update()
def update(self):
"""The update method"""
_throttled = open(SYSFILE, 'r').read()[:-1]
if _throttled == '0':
self._description = 'No throttling detected'
elif _throttled == '1000':
self._description = 'An under-voltage has occurred.'
elif _throttled == '2000':
self._description = 'ARM frequency capped due to under-voltage.'
elif _throttled == '3000':
self._description = 'ARM frequency capped due to under-voltage.'
elif _throttled == '4000':
self._description = 'CPU is throttled due to under-voltage.'
elif _throttled == '5000':
self._description = 'CPU is throttled due to under-voltage.'
elif _throttled == '8000':
self._description = 'Soft Temp limit has occurred.'
else:
self._description = 'There is a problem with your power supply or system.'
if self._text_state:
self._state = self._description
self._attribute = {'value': _throttled}
else:
self._state = _throttled
self._attribute = {'description': self._description}
@property
def name(self):
"""Return the name of the sensor"""
return 'RPi Power status'
@property
def state(self):
"""Return the state of the sensor"""
return self._state
@property
def icon(self):
"""Return the icon of the sensor"""
return 'mdi:raspberrypi'
@property
def device_state_attributes(self):
"""Return the attribute(s) of the sensor"""
return self._attribute
| 33.022989 | 98 | 0.650888 |
120b082b103f6b8e405158a53e1299a7ec029489 | 2,780 | py | Python | src/game/game.py | Jaavv/poetic_pumas | 7274bcf5790eb4aed590042f2ffdbadcd4af71ca | [
"MIT"
] | null | null | null | src/game/game.py | Jaavv/poetic_pumas | 7274bcf5790eb4aed590042f2ffdbadcd4af71ca | [
"MIT"
] | null | null | null | src/game/game.py | Jaavv/poetic_pumas | 7274bcf5790eb4aed590042f2ffdbadcd4af71ca | [
"MIT"
] | null | null | null | from blessed.keyboard import Keystroke
from blessed import Terminal
from pathlib import Path
from .element_data import ElementData
from .input_handler import InputHandler
from .level import Level
from .level import create_level_from_file
from .renderer import Renderer
from .soundboard import Soundboard
from ..elements.exitdoor import ExitDoor
class GameState:
"""Overall game state.
Contains all necessary information about the game information and processes
user input actions.
"""
def __init__(self, level: Level, renderer: Renderer):
self.level = level
self.renderer = renderer
self.input_handler = InputHandler()
self.soundboard = Soundboard()
self.element_data = ElementData(level=self.level, soundboard=self.soundboard)
self.current_level = 1
self.winning_location = self.level.find_element(ExitDoor).position
self.level_dict = {
1: "level-1.txt",
2: "level-2.txt",
3: "level-3.txt",
4: "level-4.txt",
5: "level-5.txt",
6: "level-6.txt",
7: "level-7.txt",
8: "level-8.txt",
9: "level-9.txt",
10: "level-10.txt",
11: "level-11.txt",
}
def game_state(self) -> None:
"""Checks near MAIN_CHARACTER position for exit door element"""
# TODO: if active element is dude
if (self.level.active_element.position == self.winning_location):
self.next_level()
self.set_level()
def next_level(self) -> None:
self.current_level += 1
def set_level(self) -> None:
if self.current_level not in self.level_dict.keys():
self.current_level = 1
else:
self.level = create_level_from_file(
level_file_name=self.level_dict[self.current_level],
levels_directory=Path(__file__).parent.parent / Path("resources/levels"))
self.winning_location = self.level.find_element(ExitDoor).position
self.renderer.terminal.move_xy(0, 0)
self.renderer = Renderer(terminal=Terminal(), level=self.level)
self.element_data = ElementData(level=self.level, soundboard=self.soundboard)
def process_input(self, keystroke: Keystroke) -> None:
"""Takes the active element of the level and applies the input onto it."""
self.input_handler.handle_input(keystroke, data=self.element_data)
def update(self, keystroke: Keystroke) -> None:
"""Process the game on every looped update call.
Args:
keystroke (Keystroke): user input on keyboard
"""
self.process_input(keystroke)
self.game_state()
self.renderer.render_level()
| 35.641026 | 89 | 0.63705 |
cc2044a18344255866a4b2034d683f43b386e22f | 359 | py | Python | simplemooc/courses/migrations/0006_auto_20200827_2021.py | matheuspercario/simplemooc-django | 05e3a3ef2249482adb3186b5394e07460030ef5d | [
"MIT"
] | 1 | 2020-09-14T11:55:01.000Z | 2020-09-14T11:55:01.000Z | simplemooc/courses/migrations/0006_auto_20200827_2021.py | matheuspercario/simplemooc-django | 05e3a3ef2249482adb3186b5394e07460030ef5d | [
"MIT"
] | null | null | null | simplemooc/courses/migrations/0006_auto_20200827_2021.py | matheuspercario/simplemooc-django | 05e3a3ef2249482adb3186b5394e07460030ef5d | [
"MIT"
] | null | null | null | # Generated by Django 3.1 on 2020-08-27 23:21
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('courses', '0005_auto_20200827_2011'),
]
operations = [
migrations.RenameField(
model_name='material',
old_name='nome',
new_name='name',
),
]
| 18.894737 | 47 | 0.582173 |
d0f8323824bef2a99d4c0ab88803df902a569117 | 1,156 | py | Python | support/cross/aio/clock.py | pmp-p/python-wasm-plus | ccd34cf46fc00924e256a0ad1d8cf5061b9520ac | [
"MIT"
] | 3 | 2022-03-11T22:02:39.000Z | 2022-03-16T08:04:33.000Z | support/cross/aio/clock.py | pmp-p/python-wasm-plus | ccd34cf46fc00924e256a0ad1d8cf5061b9520ac | [
"MIT"
] | null | null | null | support/cross/aio/clock.py | pmp-p/python-wasm-plus | ccd34cf46fc00924e256a0ad1d8cf5061b9520ac | [
"MIT"
] | null | null | null | import sys
import time
import asyncio
class tui:
# use direct access, it is absolute addressing on raw terminal.
out = sys.__stdout__.write
# save cursor
def __enter__(self):
self.out("\x1b7\x1b[?25l")
return self
# restore cursor
def __exit__(self, *tb):
self.out("\x1b8\x1b[?25h")
def __call__(self, *a, **kw):
self.out("\x1b[{};{}H{}".format(kw.get("z", 12), kw.get("x", 40), " ".join(a)))
def step(x=70, y=0, z=2):
import time
def box(t, x, y, z):
lines = t.split("\n")
fill = "โ" * len(t)
if z > 1:
print("โ%sโ" % fill, x=x, z=z - 1)
for t in lines:
print("โ%sโ" % t, x=x, z=z)
z += 1
print("โ%sโ" % fill, x=x, z=z)
with tui() as print:
# draw a clock
t = "%02d:%02d:%02d โข 99%% " % time.localtime()[3:6]
box(t, x=x, y=y, z=z)
async def clock(x=70, y=0, z=2):
# run as a daemon
while True: # not asyncio.exit:
step(x, y, z)
await asyncio.sleep(1)
sys.stdout.flush()
def start(x=70, y=0, z=2):
asyncio.create_task(clock(x, y, z))
| 22.230769 | 87 | 0.49654 |
5eb8d01aaa82ed5f107852932e2d352f233bc2cb | 8,328 | py | Python | integration/combination/test_function_with_alias.py | hawflau/serverless-application-model | d2cf4b7e23d26cdf677c564d53bb58e6a5b6cac2 | [
"Apache-2.0"
] | null | null | null | integration/combination/test_function_with_alias.py | hawflau/serverless-application-model | d2cf4b7e23d26cdf677c564d53bb58e6a5b6cac2 | [
"Apache-2.0"
] | 1 | 2020-03-03T01:46:46.000Z | 2020-03-03T01:46:46.000Z | integration/combination/test_function_with_alias.py | hawflau/serverless-application-model | d2cf4b7e23d26cdf677c564d53bb58e6a5b6cac2 | [
"Apache-2.0"
] | null | null | null | import json
from botocore.exceptions import ClientError
from integration.helpers.base_test import BaseTest, LOG
from integration.helpers.common_api import get_function_versions
class TestFunctionWithAlias(BaseTest):
def test_updating_version_by_changing_property_value(self):
self.create_and_verify_stack("combination/function_with_alias")
alias_name = "Live"
function_name = self.get_physical_id_by_type("AWS::Lambda::Function")
version_ids = self.get_function_version_by_name(function_name)
self.assertEqual(["1"], version_ids)
alias = self.get_alias(function_name, alias_name)
self.assertEqual("1", alias["FunctionVersion"])
# Changing CodeUri should create a new version, and leave the existing version in tact
self.set_template_resource_property("MyLambdaFunction", "CodeUri", self.file_to_s3_uri_map["code2.zip"]["uri"])
self.transform_template()
self.deploy_stack()
version_ids = self.get_function_version_by_name(function_name)
self.assertEqual(["1", "2"], version_ids)
alias = self.get_alias(function_name, alias_name)
self.assertEqual("2", alias["FunctionVersion"])
# Make sure the stack has only One Version & One Alias resource
alias = self.get_stack_resources("AWS::Lambda::Alias")
versions = self.get_stack_resources("AWS::Lambda::Version")
self.assertEqual(len(alias), 1)
self.assertEqual(len(versions), 1)
def test_alias_deletion_must_retain_version(self):
self.create_and_verify_stack("combination/function_with_alias")
alias_name = "Live"
function_name = self.get_physical_id_by_type("AWS::Lambda::Function")
version_ids = self.get_function_version_by_name(function_name)
self.assertEqual(["1"], version_ids)
# Check that the DeletionPolicy on Lambda Version holds good
# Remove alias, update stack, and verify the version still exists by calling Lambda APIs
self.remove_template_resource_property("MyLambdaFunction", "AutoPublishAlias")
self.transform_template()
self.deploy_stack()
# Make sure both Lambda version & alias resource does not exist in stack
alias = self.get_stack_resources("AWS::Lambda::Alias")
versions = self.get_stack_resources("AWS::Lambda::Version")
self.assertEqual(len(alias), 0)
self.assertEqual(len(versions), 0)
# Make sure the version still exists in Lambda
version_ids = self.get_function_version_by_name(function_name)
self.assertEqual(["1"], version_ids)
def test_function_with_alias_with_intrinsics(self):
parameters = self.get_default_test_template_parameters()
self.create_and_verify_stack("combination/function_with_alias_intrinsics", parameters)
alias_name = "Live"
function_name = self.get_physical_id_by_type("AWS::Lambda::Function")
version_ids = get_function_versions(function_name, self.client_provider.lambda_client)
self.assertEqual(["1"], version_ids)
alias = self.get_alias(function_name, alias_name)
self.assertEqual("1", alias["FunctionVersion"])
# Let's change Key by updating the template parameter, but keep template same
# This should create a new version and leave existing version intact
parameters[1]["ParameterValue"] = "code2.zip"
# self.deploy_stack(parameters)
self.update_stack("combination/function_with_alias_intrinsics", parameters)
version_ids = get_function_versions(function_name, self.client_provider.lambda_client)
self.assertEqual(["1"], version_ids)
alias = self.get_alias(function_name, alias_name)
self.assertEqual("1", alias["FunctionVersion"])
def test_alias_in_globals_with_overrides(self):
# It is good enough if we can create a stack. Globals are pre-processed on the SAM template and don't
# add any extra runtime behavior that needs to be verified
self.create_and_verify_stack("combination/function_with_alias_globals")
def test_alias_with_event_sources_get_correct_permissions(self):
# There are two parts to testing Event Source integrations:
# 1. Check if all event sources get wired to the alias
# 2. Check if Lambda::Permissions for the event sources are applied on the Alias
#
# This test checks #2 only because the former is easy to validate directly by looking at the CFN template in unit tests
# Also #1 requires calls to many different services which is hard.
self.create_and_verify_stack("combination/function_with_alias_and_event_sources")
alias_name = "Live"
# Verify the permissions on the Alias are setup correctly. There should be as many resource policies as the Lambda::Permission resources
function_name = self.get_physical_id_by_type("AWS::Lambda::Function")
alias_arn = self.get_alias(function_name, alias_name)["AliasArn"]
permission_resources = self.get_stack_resources("AWS::Lambda::Permission")
# Get the policies on both function & alias
# Alias should have as many policies as the Lambda::Permissions resource
alias_policy_str = self.get_function_policy(alias_arn)
alias_policy = json.loads(alias_policy_str)
self.assertIsNotNone(alias_policy.get("Statement"))
self.assertEqual(len(alias_policy["Statement"]), len(permission_resources))
# Function should have *no* policies
function_policy_str = self.get_function_policy(function_name)
self.assertIsNone(function_policy_str)
# Remove the alias, deploy the stack, and verify that *all* permission entities transfer to the function
self.remove_template_resource_property("MyAwesomeFunction", "AutoPublishAlias")
self.transform_template()
self.deploy_stack()
# Get the policies on both function & alias
# Alias should have *no* policies
alias_policy_str = self.get_function_policy(alias_arn)
self.assertIsNone(alias_policy_str)
# Function should have as many policies as the Lambda::Permissions resource
function_policy_str = self.get_function_policy(function_name)
function_policy = json.loads(function_policy_str)
self.assertEqual(len(function_policy["Statement"]), len(permission_resources))
def get_function_version_by_name(self, function_name):
lambda_client = self.client_provider.lambda_client
versions = lambda_client.list_versions_by_function(FunctionName=function_name)["Versions"]
# Exclude $LATEST from the list and simply return all the version numbers.
filtered_versions = [version["Version"] for version in versions if version["Version"] != "$LATEST"]
return filtered_versions
def get_alias(self, function_name, alias_name):
lambda_client = self.client_provider.lambda_client
return lambda_client.get_alias(FunctionName=function_name, Name=alias_name)
def get_function_policy(self, function_arn):
lambda_client = self.client_provider.lambda_client
try:
policy_result = lambda_client.get_policy(FunctionName=function_arn)
return policy_result["Policy"]
except ClientError as error:
if error.response["Error"]["Code"] == "ResourceNotFoundException":
LOG.debug("The resource you requested does not exist.")
return None
else:
raise error
def get_default_test_template_parameters(self):
parameters = [
{
"ParameterKey": "Bucket",
"ParameterValue": self.s3_bucket_name,
"UsePreviousValue": False,
"ResolvedValue": "string",
},
{
"ParameterKey": "CodeKey",
"ParameterValue": "code.zip",
"UsePreviousValue": False,
"ResolvedValue": "string",
},
{
"ParameterKey": "SwaggerKey",
"ParameterValue": "swagger1.json",
"UsePreviousValue": False,
"ResolvedValue": "string",
},
]
return parameters
| 48.418605 | 144 | 0.692843 |
e65443b585324725f92d3c9ffae12123b3b9177c | 1,954 | py | Python | lustre/num_osts_demo.py | PawseySC/Intermediate-Supercomputing | 2adfdbe994d1a07491ffee783bfd2a2c03f03e7e | [
"CC0-1.0"
] | 3 | 2018-04-06T02:57:44.000Z | 2018-11-27T22:01:11.000Z | lustre/num_osts_demo.py | PawseySupercomputing/Intermediate-Supercomputing | 28252556226354ef9a01178d6ebb54757b0bea2f | [
"CC0-1.0"
] | 1 | 2020-10-09T07:20:40.000Z | 2020-10-22T04:23:27.000Z | lustre/num_osts_demo.py | PawseySupercomputing/Intermediate-Supercomputing | 28252556226354ef9a01178d6ebb54757b0bea2f | [
"CC0-1.0"
] | 3 | 2018-03-01T05:56:44.000Z | 2018-10-02T15:54:39.000Z | ### LUSTRE - NUMBER OF OSTS DEMO
###------------------------------------------------------------------------------------------------
## In this demo we will show how the number of OSTs used to store a file in Lustre
## can effect the write and write performance from/to that file.
import os, shutil, sys, subprocess
## Let's start by defining an array of OST numbers. Make sure you only use whole
## integers.
num_osts = range(1,8)
## For each number of OSTs, we will do the following:
##
## - create a new directory named after the number of OSTs
## - set the newly created directory to be striped over the specified number of OSTs
## with a stripe isize equal to 1MB
## - generate a random test file in that directory and time how long it takes to write that
## file in the new directory
##
## Here we go...
print "\n"
for N in num_osts:
# create the new directory
dirname = str(N) + "_OSTS"
command_str = "mkdir " + dirname
os.system( command_str )
# set the Lustre stripe size of the new directory to 1 MB. The number of OSTs it is striped
# over will be equal to N
stripe_in_bytes = int(1024 * 1024)
command_str = "lfs setstripe -S " + str(stripe_in_bytes) + " -c " + str(N) + " " + dirname
os.system( command_str )
# Let's double-check that newly created directory has the expected Lustre stripe size (using the -S flag)
# and number of OSTs (using the -c flag)
command_str = "lfs getstripe -S -c " + dirname
os.system( command_str )
# We will use a tool called dd to generate a file filed with null characters (about 8.6GB worth).
# Let's time how long it takes to generate this file in each of our striped directories.
test_file = os.getcwd() + "/" + dirname + "/test.dat"
command_str = "time dd if=/dev/zero of=" + test_file + " count=8k bs=1M"
os.system( command_str )
# We finish by deleting the test directory
shutil.rmtree( dirname )
| 34.892857 | 107 | 0.645343 |
3a71938710910cb0b853642bacd902fd82a2de66 | 12,457 | py | Python | tensorflow_federated/python/simulation/baselines/task_data.py | zhihansh/federated-oss | 38cfcb05702ff7297db76d3ccb5f5afef53ca09b | [
"Apache-2.0"
] | 1,918 | 2019-02-22T21:17:28.000Z | 2022-03-30T14:49:53.000Z | tensorflow_federated/python/simulation/baselines/task_data.py | zhihansh/federated-oss | 38cfcb05702ff7297db76d3ccb5f5afef53ca09b | [
"Apache-2.0"
] | 999 | 2019-02-22T21:47:44.000Z | 2022-03-31T11:06:42.000Z | tensorflow_federated/python/simulation/baselines/task_data.py | zhihansh/federated-oss | 38cfcb05702ff7297db76d3ccb5f5afef53ca09b | [
"Apache-2.0"
] | 498 | 2019-02-22T21:17:56.000Z | 2022-03-29T02:54:15.000Z | # Copyright 2021, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes for loading and preprocessing data for federated baseline tasks."""
import collections
from typing import Any, Callable, List, Optional, Union
import numpy as np
import tensorflow as tf
from tensorflow_federated.python.core.api import computation_base
from tensorflow_federated.python.simulation.datasets import client_data
CentralOrClientData = Union[tf.data.Dataset, client_data.ClientData]
PreprocessFnType = Union[Callable[[tf.data.Dataset], tf.data.Dataset],
computation_base.Computation]
def _get_element_spec(data: CentralOrClientData,
preprocess_fn: Optional[PreprocessFnType] = None):
"""Determines the element type of a dataset after preprocessing."""
if isinstance(data, client_data.ClientData):
if preprocess_fn is not None:
preprocessed_data = data.preprocess(preprocess_fn)
else:
preprocessed_data = data
element_spec = preprocessed_data.element_type_structure
else:
if preprocess_fn is not None:
preprocessed_data = preprocess_fn(data)
else:
preprocessed_data = data
element_spec = preprocessed_data.element_spec
return element_spec
class BaselineTaskDatasets(object):
"""A convenience class for a task's data and preprocessing logic.
Attributes:
train_data: A `tff.simulation.datasets.ClientData` for training.
test_data: The test data for the baseline task. Can be a
`tff.simulation.datasets.ClientData` or a `tf.data.Dataset`.
validation_data: The validation data for the baseline task. Can be one of
`tff.simulation.datasets.ClientData`, `tf.data.Dataset`, or `None` if the
task does not have a validation dataset.
train_preprocess_fn: A callable mapping accepting and return
`tf.data.Dataset` instances, used for preprocessing train datasets. Set to
`None` if no train preprocessing occurs for the task.
eval_preprocess_fn: A callable mapping accepting and return
`tf.data.Dataset` instances, used for preprocessing evaluation datasets.
Set to `None` if no eval preprocessing occurs for the task.
element_type_structure: A nested structure of `tf.TensorSpec` objects
defining the type of the elements contained in datasets associated to this
task.
"""
def __init__(self,
train_data: client_data.ClientData,
test_data: CentralOrClientData,
validation_data: Optional[CentralOrClientData] = None,
train_preprocess_fn: Optional[PreprocessFnType] = None,
eval_preprocess_fn: Optional[PreprocessFnType] = None):
"""Creates a `BaselineTaskDatasets`.
Args:
train_data: A `tff.simulation.datasets.ClientData` for training.
test_data: An optional `tff.simulation.datasets.ClientData` for computing
test metrics.
validation_data: A `tff.simulation.datasets.ClientData` for computing
validation metrics.
train_preprocess_fn: An optional callable accepting and returning a
`tf.data.Dataset`, used to perform dataset preprocessing for training.
If set to `None`, we use the identity map for all train preprocessing.
eval_preprocess_fn: An optional callable accepting and returning a
`tf.data.Dataset`, used to perform evaluation (eg. validation, testing)
preprocessing. If `None`, evaluation preprocessing will be done via the
identity map.
Raises:
ValueError: If `train_data` and `test_data` have different element types
after preprocessing with `train_preprocess_fn` and `eval_preprocess_fn`,
or if `validation_data` is not `None` and has a different element type
than the test data.
"""
self._train_data = train_data
self._test_data = test_data
self._validation_data = validation_data
self._train_preprocess_fn = train_preprocess_fn
self._eval_preprocess_fn = eval_preprocess_fn
if (train_preprocess_fn is not None and not callable(train_preprocess_fn)):
raise ValueError('The train_preprocess_fn must be None or callable.')
self._train_preprocess_fn = train_preprocess_fn
if (eval_preprocess_fn is not None) and (not callable(eval_preprocess_fn)):
raise ValueError('The eval_preprocess_fn must be None or callable.')
self._eval_preprocess_fn = eval_preprocess_fn
post_preprocess_train_type = _get_element_spec(train_data,
train_preprocess_fn)
post_preprocess_test_type = _get_element_spec(test_data, eval_preprocess_fn)
if post_preprocess_train_type != post_preprocess_test_type:
raise ValueError(
'The train and test element structures after preprocessing must be '
'equal. Found train type {} and test type {}'.format(
post_preprocess_train_type, post_preprocess_test_type))
if train_preprocess_fn is None:
self._preprocess_train_data = train_data
else:
self._preprocess_train_data = train_data.preprocess(train_preprocess_fn)
self._element_type_structure = post_preprocess_train_type
if validation_data is not None:
test_type = _get_element_spec(test_data)
validation_type = _get_element_spec(validation_data)
if test_type != validation_type:
raise ValueError(
'The validation set must be None, or have the same element type '
'structure as the test data. Found test type {} and validation type'
' {}'.format(test_type, validation_type))
self._data_info = None
@property
def train_data(self) -> client_data.ClientData:
return self._train_data
@property
def test_data(self) -> CentralOrClientData:
return self._test_data
@property
def validation_data(self) -> Optional[CentralOrClientData]:
return self._validation_data
@property
def train_preprocess_fn(self) -> Optional[PreprocessFnType]:
return self._train_preprocess_fn
@property
def eval_preprocess_fn(self) -> Optional[PreprocessFnType]:
return self._eval_preprocess_fn
@property
def element_type_structure(self):
return self._element_type_structure
def _record_dataset_information(self):
"""Records a summary of the train, test, and validation data."""
data_info = collections.OrderedDict()
data_info['header'] = ['Split', 'Dataset Type', 'Number of Clients']
num_train_clients = len(self._train_data.client_ids)
data_info['train'] = ['Train', 'Federated', num_train_clients]
if isinstance(self._test_data, client_data.ClientData):
test_type = 'Federated'
num_test_clients = len(self._test_data.client_ids)
else:
test_type = 'Centralized'
num_test_clients = 'N/A'
data_info['test'] = ['Test', test_type, num_test_clients]
if self._validation_data is not None:
if isinstance(self._validation_data, client_data.ClientData):
validation_type = 'Federated'
num_validation_clients = len(self._validation_data.client_ids)
else:
validation_type = 'Centralized'
num_validation_clients = 'N/A'
data_info['validation'] = [
'Validation', validation_type, num_validation_clients
]
return data_info
def sample_train_clients(
self,
num_clients: int,
replace: bool = False,
random_seed: Optional[int] = None) -> List[tf.data.Dataset]:
"""Samples training clients uniformly at random.
Args:
num_clients: A positive integer representing number of clients to be
sampled.
replace: Whether to sample with replacement. If set to `False`, then
`num_clients` cannot exceed the number of training clients in the
associated train data.
random_seed: An optional integer used to set a random seed for sampling.
If no random seed is passed or the random seed is set to `None`, this
will attempt to set the random seed according to the current system time
(see `numpy.random.RandomState` for details).
Returns:
A list of `tf.data.Dataset` instances representing the client datasets.
"""
random_state = np.random.RandomState(seed=random_seed)
client_ids = random_state.choice(
self._preprocess_train_data.client_ids,
size=num_clients,
replace=replace)
return [
self._preprocess_train_data.create_tf_dataset_for_client(x)
for x in client_ids
]
def get_centralized_test_data(self) -> tf.data.Dataset:
"""Returns a `tf.data.Dataset` of test data for the task.
If the baseline task has centralized data, then this method will return
the centralized data after applying preprocessing. If the test data is
federated, then this method will first amalgamate the client datasets into
a single dataset, then apply preprocessing.
"""
test_data = self._test_data
if isinstance(test_data, client_data.ClientData):
test_data = test_data.create_tf_dataset_from_all_clients()
preprocess_fn = self._eval_preprocess_fn
if preprocess_fn is not None:
test_data = preprocess_fn(test_data)
return test_data
def summary(self, print_fn: Callable[[str], Any] = print):
"""Prints a summary of the train, test, and validation data.
The summary will be printed as a table containing information on the type
of train, test, and validation data (ie. federated or centralized) and the
number of clients each data structure has (if it is federated). For example,
if the train data has 10 clients, and both the test and validation data are
centralized, then this will print the following table:
```
Split |Dataset Type |Number of Clients |
=============================================
Train |Federated |10 |
Test |Centralized |N/A |
Validation |Centralized |N/A |
_____________________________________________
```
In addition, this will print two lines after the table indicating whether
train and eval preprocessing functions were passed in. In the example above,
if we passed in a train preprocessing function but no eval preprocessing
function, it would also print the lines:
```
Train Preprocess Function: True
Eval Preprocess Function: False
```
To capture the summary, you can use a custom print function. For example,
setting `print_fn = summary_list.append` will cause each of the lines above
to be appended to `summary_list`.
Args:
print_fn: An optional callable accepting string inputs. Used to print each
row of the summary. Defaults to `print` if not specified.
"""
if self._data_info is None:
self._data_info = self._record_dataset_information()
data_info = self._data_info
num_cols = len(data_info['header'])
max_lengths = [0 for _ in range(num_cols)]
for col_values in data_info.values():
for j, col_value in enumerate(col_values):
max_lengths[j] = max([len(str(col_value)), max_lengths[j]])
col_lengths = [a + 1 for a in max_lengths]
row_strings = []
for col_values in data_info.values():
row_string = ''
for (col_val, col_len) in zip(col_values, col_lengths):
row_string += '{col_val:<{col_len}}|'.format(
col_val=col_val, col_len=col_len)
row_strings.append(row_string)
total_width = sum(col_lengths) + num_cols
row_strings.insert(1, '=' * total_width)
row_strings.append('_' * total_width)
for x in row_strings:
print_fn(x)
train_preprocess_fn_exists = (self._train_preprocess_fn is not None)
print_fn('Train Preprocess Function: {}'.format(train_preprocess_fn_exists))
eval_preprocess_fn_exists = (self._eval_preprocess_fn is not None)
print_fn('Eval Preprocess Function: {}'.format(eval_preprocess_fn_exists))
| 41.248344 | 80 | 0.712371 |
e132143330b7cbb0327b0f57be106bbef79b276d | 965 | py | Python | lib/model/roi_crop/buildWin.py | georkap/faster-rcnnwv.pytorch | 56ae1a5f5b620e2c019821a31543dbd3cb72c2f3 | [
"MIT"
] | null | null | null | lib/model/roi_crop/buildWin.py | georkap/faster-rcnnwv.pytorch | 56ae1a5f5b620e2c019821a31543dbd3cb72c2f3 | [
"MIT"
] | null | null | null | lib/model/roi_crop/buildWin.py | georkap/faster-rcnnwv.pytorch | 56ae1a5f5b620e2c019821a31543dbd3cb72c2f3 | [
"MIT"
] | null | null | null | from __future__ import print_function
import os
import torch
from torch.utils.ffi import create_extension
#this_file = os.path.dirname(__file__)
sources = ['src/roi_crop.c']
headers = ['src/roi_crop.h']
defines = []
with_cuda = False
if torch.cuda.is_available():
print('Including CUDA code.')
sources += ['src/roi_crop_cuda.cpp']
headers += ['src/roi_crop_cuda.h']
defines += [('WITH_CUDA', None)]
with_cuda = True
this_file = os.path.dirname(os.path.realpath(__file__))
print(this_file)
extra_objects = ['src/roi_crop_cuda_kernel.cu.o']
extra_objects = [os.path.join(this_file, fname) for fname in extra_objects]
ffi = create_extension(
'_ext.roi_crop',
headers=headers,
sources=sources,
define_macros=defines,
relative_to=__file__,
with_cuda=with_cuda,
extra_objects=extra_objects,
extra_compile_args=["/MT","-std=c99"],
libraries=['ATen', '_C','cudart']
)
if __name__ == '__main__':
ffi.build()
| 24.74359 | 75 | 0.705699 |
38ef4b1577c0a7434ede0dacec38ae6d3cb3f487 | 34,520 | py | Python | scripts/misc/TrainFilm.py | zgjslc/Film-Recovery-master1 | 4497a9930398c9e826ac364056a79e5bcbf6c953 | [
"Apache-2.0"
] | null | null | null | scripts/misc/TrainFilm.py | zgjslc/Film-Recovery-master1 | 4497a9930398c9e826ac364056a79e5bcbf6c953 | [
"Apache-2.0"
] | null | null | null | scripts/misc/TrainFilm.py | zgjslc/Film-Recovery-master1 | 4497a9930398c9e826ac364056a79e5bcbf6c953 | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.rnn as rnn_utils
import torch.optim as optim
import os
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import Dataset, DataLoader
#import load_data
from load_data_npy import filmDataset
from tensorboardX import SummaryWriter
import numpy as np
from network import Net
# import torchsnooper
import cv2
from genDataNPY import repropocess
from scipy.interpolate import griddata
from write_image import write_image_tensor, write_image_np, write_image, write_image_01, write_image_np, write_cmap_gauss
import time
from cal_times import CallingCounter
# training or test
isTrain = False #"""""""""""""""""""""""""""
# setup dataloader
dr_dataset_train_1 = 'npy/' #'Data_final/Part001/Mesh_Film/npy/' # 2000
dr_dataset_train_2 = None #'Data_final/Part003/Mesh_Film/npy/' # 5429
dr_dataset_test = 'npy_test/' #'Data_final/Part002/Mesh_Film/npy/' #1389
# setup model
model_name = 'Try_0915'
preTrain = True #""""""""""""""""""""""""""""
# optimizer
LearningRate = 0.001
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,5,6"
# output
write_summary = False
write_txt = True
write_image_train = True
write_image_val = False
write_image_test = True #""""""""""""""""""""""""""""
calculate_CC = False
summary_name = model_name
save_dir = 'model/'
output_dir ='/home1/share/film_output/' + model_name + '/' #'output_image/'+ model_name + '/'
if not os.path.exists(output_dir):
os.mkdir(output_dir)
if not os.path.exists(output_dir+'train/'):
os.mkdir(output_dir+'train/')
if not os.path.exists(output_dir+'test/'):
os.mkdir(output_dir+'test/')
pretrained_model_dir = '/home1/liuli/film_code/model/Model1_0908_0.0002_model.pkl'
# /home1/liuli/film_code/model/Model3_0912_9.pkl
# /home1/share/liuli/film_code/model/Model1_0908_0.0002_model.pkl
# /home1/share/liuli/film_code/model/Model5_0913_50.pkl
# @torchsnooper.snoop()
def train(args, model, device, train_loader, optimizer, criterion, epoch, writer, output_dir, isWriteImage, isVal=False, test_loader=None):
model.train()
correct=0
# print('begin')
for batch_idx, data in enumerate(train_loader):
#------------Setup data-----------#
ori = data[0]
ab = data[1]
depth = data[2]
normal = data[3]
cmap = data[4]
uv = data[5]
background = data[6]
# ori_1080 = data[7]
# bmap = data[6]
ori, ab, depth, normal, uv, cmap, back = ori.to(device), ab.to(device), depth.to(device), \
normal.to(device), uv.to(device), cmap.to(device), background.to(device) #bmap.to(device)
optimizer.zero_grad()
uv_map, coor_map, normal_map, albedo_map, depth_map, back_map = model(ori)
# define loss
loss_back = criterion(back_map, back).float()
loss_cmap = criterion(coor_map, cmap).float() # 3d map = coor_map = cmap
loss_uv = criterion(uv_map, uv).float()
loss_depth = criterion(depth_map, depth).float()
loss_normal = criterion(normal_map, normal).float()
# loss_bmap = criterion(bw_map, bmap).float()
loss_ab = criterion(albedo_map, torch.unsqueeze(ab[:,0,:,:], 1).float())
loss = 4 * loss_uv + 4 * loss_ab + loss_normal + loss_depth + 2 * loss_back + loss_cmap
loss.backward()
optimizer.step()
lrate = get_lr(optimizer)
# print('0.2', loss)
if batch_idx % args.log_interval == 0:
# acc = 100 * correct/(data.size(1)* args.log_interval)
print('Epoch: {} \nBatch index: {}/{}, \t Lr: {:.8f}, \t '
'Training Loss: {:.6f}, \t ab: {:.4f}, \t cmap: {:.4f}, \t uv: {:.6f}, \t normal: {:.4f}, \t depth: {:.4f}, \t back: {:.6f}'.format(
epoch, batch_idx+1, len(train_loader.dataset)//args.batch_size, lrate, loss.item(),
loss_ab.item(), loss_cmap.item(), loss_uv.item(), loss_normal.item(), loss_depth.item(), loss_back.item()
))
if write_summary:
# writer.add_scalar('summary/train_acc', acc, global_step=epoch*len(train_loader)+batch_idx+1)
writer.add_scalar('summary/train_loss', loss.item(), global_step=epoch*len(train_loader)+batch_idx+1)
writer.add_scalar('summary/train_cmap_loss', loss_cmap.item(), global_step=epoch * len(train_loader) + batch_idx + 1)
writer.add_scalar('summary/train_uv_loss', loss_uv.item(),
global_step=epoch * len(train_loader) + batch_idx + 1)
writer.add_scalar('summary/train_normal_loss', loss_normal.item(),
global_step=epoch * len(train_loader) + batch_idx + 1)
writer.add_scalar('summary/train_depth_loss', loss_depth.item(),
global_step=epoch * len(train_loader) + batch_idx + 1)
writer.add_scalar('summary/train_ab_loss', loss_ab.item(),
global_step=epoch * len(train_loader) + batch_idx + 1)
writer.add_scalar('summary/train_back_loss', loss_back.item(),
global_step=epoch * len(train_loader) + batch_idx + 1)
writer.add_scalar('summary/lrate', lrate, global_step=epoch * len(train_loader) + batch_idx + 1)
# acc = 0
# correct = 0
if isWriteImage:
if batch_idx == len(train_loader.dataset)//args.batch_size:
print('writting image')
if not os.path.exists(output_dir + 'train/epoch_{}'.format(epoch)):
os.mkdir(output_dir + 'train/epoch_{}'.format(epoch))
for k in range(5):
albedo_pred = albedo_map[k, :, :, :]
uv_pred = uv_map[k, :, :, :]
back_pred = back_map[k, :, :, :]
ori_gt = ori[k, :, :, :]
ab_gt = ab[k, :, :, :]
uv_gt = uv[k, :, :, :]
back_gt = back[k, :, :, :]
bw_gt = uv2bmap(uv_gt, back_gt)
bw_pred = uv2bmap(uv_pred, back_pred)
# bw_gt = bmap[k, :, :, :]
dewarp_ori = bw_mapping(bw_pred, ori_gt, device)
dewarp_ab = bw_mapping(bw_pred, ab_gt, device)
dewarp_ori_gt = bw_mapping(bw_gt, ori_gt, device)
cmap_gt = cmap[k, :, :, :]
cmap_pred = coor_map[k, :, :, :]
# bw_gt = bw_gt.transpose(0, 1).transpose(1, 2)
# bw_pred = bw_pred.transpose(0, 1).transpose(1, 2)
bb = (-1) * torch.ones((256, 256, 1)).to(device)
bb_numpy = (-1) * np.ones((256, 256, 1))
"""pred"""
write_image_np(np.concatenate((bw_pred, bb_numpy), 2),
output_dir + 'train/epoch_{}/pred_bw_ind_{}'.format(epoch, k) + '.jpg')
write_image(torch.cat([uv_pred.transpose(0, 1).transpose(1, 2), bb], 2),
output_dir + 'train/epoch_{}/pred_uv_ind_{}'.format(epoch, k) + '.jpg')
write_image_01(back_pred.transpose(0, 1).transpose(1, 2)[:, :, 0],
output_dir + 'train/epoch_{}/pred_back_ind_{}'.format(epoch, k) + '.jpg')
write_image(albedo_pred.transpose(0, 1).transpose(1, 2)[:,:,0],
output_dir + 'train/epoch_{}/pred_ab_ind_{}'.format(epoch, k) + '.jpg')
write_cmap_gauss(cmap_pred.transpose(0, 1).transpose(1, 2),
output_dir + 'train/epoch_{}/pred_3D_ind_{}'.format(epoch, k) + '.jpg')
"""gt"""
write_image(ori_gt.transpose(0, 1).transpose(1, 2),
output_dir + 'train/epoch_{}/gt_ori_ind_{}'.format(epoch, k) + '.jpg')
write_image(ab_gt.transpose(0, 1).transpose(1, 2)[:,:,0],
output_dir + 'train/epoch_{}/gt_ab_ind_{}'.format(epoch, k) + '.jpg')
write_cmap_gauss(cmap_gt.transpose(0, 1).transpose(1, 2),
output_dir + 'train/epoch_{}/gt_3D_ind_{}'.format(epoch, k) + '.jpg')
write_image_np(np.concatenate((bw_gt, bb_numpy), 2),
output_dir + 'train/epoch_{}/gt_bw_ind_{}'.format(epoch, k) + '.jpg')
write_image(torch.cat([uv_gt.transpose(0, 1).transpose(1, 2), bb], 2),
output_dir + 'train/epoch_{}/gt_uv_ind_{}'.format(epoch, k) + '.jpg')
write_image_01(back_gt.transpose(0, 1).transpose(1, 2)[:,:,0],
output_dir + 'train/epoch_{}/gt_back_ind_{}'.format(epoch, k) + '.jpg')
write_image(dewarp_ori_gt,
output_dir + 'train/epoch_{}/gt_dewarpOri_ind_{}'.format(epoch, k) + '.jpg')
"""dewarp"""
write_image(dewarp_ori, output_dir + 'train/epoch_{}/dewarp_ori_ind_{}'.format(epoch, k) + '.jpg')
write_image(dewarp_ab, output_dir + 'train/epoch_{}/dewarp_ab_ind_{}'.format(epoch, k) + '.jpg')
if isVal and (batch_idx+1) % 100 == 0:
sstep = test.count +1
test(args, model, device, test_loader, criterion, epoch, writer, output_dir, write_image_val, sstep)
return lrate
@CallingCounter
def test(args, model, device, test_loader, criterion, epoch, writer, output_dir, isWriteImage, sstep):
print('Testing')
# print('len(test_loader.dataset)', len(test_loader.dataset))
model.eval() # without batchNorm and dropout
test_loss = 0
correct = 0
cc_uv=0
cc_cmap=0
cc_ab=0
cc_bw = 0
cc_dewarp_ori =0
cc_dewarp_ab = 0
with torch.no_grad():
# for data in test_loader:
loss_sum =0
loss_sum_ab = 0
loss_sum_cmap = 0
loss_sum_uv = 0
loss_sum_normal = 0
loss_sum_depth = 0
loss_sum_back = 0
print(len(test_loader))
start_time=time.time()
for batch_idx, data in enumerate(test_loader):
time0 = time.time()
# print(test_loader)
ori = data[0]
ab = data[1]
depth = data[2]
normal = data[3]
cmap = data[4]
uv = data[5]
background = data[6]
# ori_1080 = data[7]
ori, ab, depth, normal, uv, cmap, back = ori.to(device), ab.to(device), depth.to(device), \
normal.to(device), uv.to(device), cmap.to(device), background.to(
device) # bmap.to(device)
uv_map, coor_map, normal_map, albedo_map, depth_map, back_map = model(ori)
loss_back = criterion(back_map, back).float()
loss_cmap = criterion(coor_map, cmap).float()
loss_uv = criterion(uv_map, uv).float()
loss_depth = criterion(depth_map, depth).float()
loss_normal = criterion(normal_map, normal).float()
# loss_bmap = criterion(bw_map, bmap).float()
loss_ab = criterion(albedo_map, torch.unsqueeze(ab[:, 0, :, :], 1).float())
test_loss = 4 * loss_uv + 4 * loss_ab + loss_normal + loss_depth + 2* loss_back + loss_cmap # + loss_bmap
loss_sum = loss_sum + test_loss
loss_sum_ab += loss_ab
loss_sum_cmap += loss_cmap
loss_sum_uv += loss_uv
loss_sum_normal += loss_normal
loss_sum_depth += loss_depth
loss_sum_back += loss_back
if calculate_CC:
c_ab = cal_CC(albedo_map, torch.unsqueeze(ab[:, 0, :, :], 1))
c_uv = cal_CC(uv_map, uv)
c_cmap = cal_CC(coor_map, cmap)
bw_pred = uv2bmap4d(uv_map, back_map)
bw_gt = uv2bmap4d(uv, back) # [b, h, w, 2]
c_bw = cal_CC_np(bw_pred, bw_gt)
"""่ฎก็ฎdewarp"""
dewarp_ori = bw_mapping4d(bw_pred, ori, device)
dewarp_ori_gt = bw_mapping4d(bw_gt, ori, device)
c_dewarp_ori = cal_CC(dewarp_ori, dewarp_ori_gt)
# print('c_dewarp_ori', c_dewarp_ori)
dewarp_ab = bw_mapping4d(bw_pred, albedo_map, device)
dewarp_ab_gt = bw_mapping4d(bw_gt, torch.unsqueeze(ab[:, 0, :, :], 1), device)
c_dewarp_ab = cal_CC_ab(dewarp_ab, dewarp_ab_gt)
cc_ab += c_ab
cc_uv += c_uv
cc_cmap += c_cmap
cc_bw += c_bw
cc_dewarp_ori += c_dewarp_ori
cc_dewarp_ab += c_dewarp_ab
# print('Epoch: {} \n'
# 'Test Loss: {:.6f}, \t ab: {:.4f}, \t cmap: {:.4f}, \t uv: {:.4f}, \t normal: {:.4f}, \t depth: {:.4f}, \t back: {:.4f}'.format(
# epoch, test_loss.item(),
# loss_ab.item(), loss_cmap.item(), loss_uv.item(), loss_normal.item(),
# loss_depth.item(), loss_back.item()
# ))
# #print('CC_uv: {}\t CC_cmap: {}\t CC_ab: {}\t CC_bw: {}'.format(c_uv, c_cmap, c_ab, c_bw))
# print('CC_uv: {}\t CC_cmap: {}\t CC_ab: {}'.format(c_uv, c_cmap, c_ab))
# print(time.time() - time0)
if isWriteImage:
if True: # batch_idx == 0: write all the test images
if not os.path.exists(output_dir + 'test/epoch_{}_batch_{}'.format(epoch, batch_idx)):
os.mkdir(output_dir + 'test/epoch_{}_batch_{}'.format(epoch, batch_idx))
print('writting image')
for k in range(args.test_batch_size):
# print('k', k)
albedo_pred = albedo_map[k, :, :, :]
uv_pred = uv_map[k, :, :, :]
back_pred = back_map[k, :, :, :]
cmap_pred = coor_map[k, :, :, :]
depth_pred = depth_map[k, :, :, :]
normal_pred = normal_map[k, :, :, :]
ori_gt = ori[k, :, :, :]
ab_gt = ab[k, :, :, :]
uv_gt = uv[k, :, :, :]
back_gt = back[k, :, :, :]
cmap_gt = cmap[k, :, :, :]
depth_gt = depth[k, :, :, :]
normal_gt = normal[k, :, :, :]
bw_gt = uv2bmap(uv_gt, back_gt)
bw_pred = uv2bmap(uv_pred, back_pred) # [-1,1], [256, 256, 3]
# bw_gt = bmap[k, :, :, :]
dewarp_ori = bw_mapping(bw_pred, ori_gt, device)
dewarp_ab = bw_mapping(bw_pred, ab_gt, device)
dewarp_ori_gt = bw_mapping(bw_gt, ori_gt, device)
output_dir1 = output_dir + 'test/epoch_{}_batch_{}/'.format(epoch, batch_idx)
output_uv_pred = output_dir1 + 'pred_uv_ind_{}'.format(k) + '.jpg'
output_back_pred = output_dir1 + 'pred_back_ind_{}'.format(k) + '.jpg'
output_ab_pred = output_dir1 + 'pred_ab_ind_{}'.format(k) + '.jpg'
output_3d_pred = output_dir1 + 'pred_3D_ind_{}'.format(k) + '.jpg'
output_bw_pred = output_dir1 + 'pred_bw_ind_{}'.format(k) + '.jpg'
output_depth_pred = output_dir1 + 'pred_depth_ind_{}'.format(k) + '.jpg'
output_normal_pred = output_dir1 + 'pred_normal_ind_{}'.format(k) + '.jpg'
output_ori = output_dir1 + 'gt_ori_ind_{}'.format(k) + '.jpg'
output_uv_gt = output_dir1 + 'gt_uv_ind_{}'.format(k) + '.jpg'
output_ab_gt = output_dir1 + 'gt_ab_ind_{}'.format(k) + '.jpg'
output_cmap_gt = output_dir1 + 'gt_cmap_ind_{}'.format(k) + '.jpg'
output_back_gt = output_dir1 + 'gt_back_ind_{}'.format(k) + '.jpg'
output_bw_gt = output_dir1 + 'gt_bw_ind_{}'.format(k) + '.jpg'
output_dewarp_ori_gt = output_dir1 + 'gt_dewarpOri_ind_{}'.format(k) + '.jpg'
output_depth_gt = output_dir1 + 'gt_depth_ind_{}'.format(k) + '.jpg'
output_normal_gt = output_dir1 + 'gt_normal_ind_{}'.format(k) + '.jpg'
output_dewarp_ori = output_dir1 + 'dewarp_ori_ind_{}'.format(k) + '.jpg'
output_dewarp_ab = output_dir1 + 'dewarp_ab_ind_{}'.format(k) + '.jpg'
"""pred"""
write_image_tensor(uv_pred, output_uv_pred, 'std', device=device)
write_image_tensor(back_pred, output_back_pred, '01')
write_image_tensor(albedo_pred, output_ab_pred, 'std')
write_image_tensor(cmap_pred, output_3d_pred, 'gauss', mean=[0.100, 0.326, 0.289], std=[0.096, 0.332, 0.298])
write_image_tensor(depth_pred, output_depth_pred, 'gauss', mean=[0.316], std=[0.309])
write_image_tensor(normal_pred, output_normal_pred, 'gauss', mean=[0.584, 0.294, 0.300], std=[0.483, 0.251, 0.256])
write_image_np(bw_pred, output_bw_pred)
"""gt"""
write_image_tensor(ori_gt, output_ori, 'std')
write_image_tensor(uv_gt, output_uv_gt, 'std', device=device)
write_image_tensor(back_gt, output_back_gt, '01')
write_image_tensor(ab_gt, output_ab_gt, 'std')
write_image_tensor(cmap_gt, output_cmap_gt, 'gauss', mean=[0.100, 0.326, 0.289], std=[0.096, 0.332, 0.298])
write_image_tensor(depth_gt, output_depth_gt, 'gauss', mean=[0.316], std=[0.309])
write_image_tensor(normal_gt, output_normal_gt, 'gauss', mean=[0.584, 0.294, 0.300], std=[0.483, 0.251, 0.256])
write_image_np(bw_gt, output_bw_gt)
write_image(dewarp_ori_gt, output_dewarp_ori_gt)
"""dewarp"""
write_image(dewarp_ori, output_dewarp_ori)
write_image(dewarp_ab, output_dewarp_ab)
if (batch_idx+1) % 20 ==0:
print('It cost {} seconds to test {} images'.format(time.time()-start_time, (batch_idx+1)*args.test_batch_size))
start_time = time.time()
test_loss = loss_sum /(len(test_loader.dataset)/args.test_batch_size)
test_loss_ab = loss_sum_ab / (len(test_loader.dataset) / args.test_batch_size)
test_loss_cmap = loss_sum_cmap / (len(test_loader.dataset) / args.test_batch_size)
test_loss_uv = loss_sum_uv / (len(test_loader.dataset) / args.test_batch_size)
test_loss_normal = loss_sum_normal / (len(test_loader.dataset) / args.test_batch_size)
test_loss_depth = loss_sum_depth / (len(test_loader.dataset) / args.test_batch_size)
test_loss_back = loss_sum_back / (len(test_loader.dataset) / args.test_batch_size)
if calculate_CC:
cc_uv = cc_uv / (len(test_loader.dataset)/args.test_batch_size)
cc_cmap = cc_cmap / (len(test_loader.dataset) / args.test_batch_size)
cc_ab = cc_ab / (len(test_loader.dataset) / args.test_batch_size)
cc_bw = cc_bw / (len(test_loader.dataset) / args.test_batch_size)
cc_dewarp_ori = cc_dewarp_ori / (len(test_loader.dataset) / args.test_batch_size)
cc_dewarp_ab = cc_dewarp_ab / (len(test_loader.dataset) / args.test_batch_size)
print('Epoch: {} \n'
'Test Loss: {:.6f}, \t ab: {:.4f}, \t cmap: {:.4f}, \t uv: {:.6f}, \t normal: {:.4f}, \t depth: {:.4f}, \t back: {:.6f}'.format(
epoch, test_loss,
test_loss_ab.item(), test_loss_cmap.item(), test_loss_uv.item(), test_loss_normal.item(), test_loss_depth.item(), test_loss_back.item()
))
if calculate_CC:
print('CC_uv: {}\t CC_cmap: {}\t CC_ab: {}\t CC_bw: {}\t CC_dewarp_ori: {}\t CC_dewarp_ab: {}'.format(cc_uv, cc_cmap, cc_ab, cc_bw, cc_dewarp_ori, cc_dewarp_ab))
if write_txt:
txt_dir = 'output_txt/' + model_name + '.txt'
f = open(txt_dir,'a')
f.write('Epoch: {} \t Test Loss: {:.6f}, \t ab: {:.4f}, \t cmap: {:.4f}, \t uv: {:.6f}, \t normal: {:.4f}, \t depth: {:.4f}, \t back: {:.6f} CC_uv: {}\t CC_cmap: {}\t CC_ab: {}\t CC_bw: {}\t CC_dewarp_ori: {}\t CC_dewarp_ab: {}\n'.format(
epoch, test_loss,
test_loss_ab.item(), test_loss_cmap.item(), test_loss_uv.item(), test_loss_normal.item(), test_loss_depth.item(), test_loss_back.item(), cc_uv, cc_cmap, cc_ab, cc_bw, cc_dewarp_ori, cc_dewarp_ab))
f.close()
if write_summary:
print('sstep', sstep)
# writer.add_scalar('test_acc', 100. * correct / len(test_loader.dataset), global_step=epoch+1)
writer.add_scalar('summary/test_loss', test_loss.item(), global_step=sstep)
writer.add_scalar('summary/test_loss_ab', test_loss_ab.item(), global_step=sstep)
writer.add_scalar('summary/test_loss_cmap', test_loss_cmap.item(), global_step=sstep)
writer.add_scalar('summary/test_loss_uv', test_loss_uv.item(), global_step=sstep)
writer.add_scalar('summary/test_loss_normal', test_loss_normal.item(), global_step=sstep)
writer.add_scalar('summary/test_loss_depth', test_loss_depth.item(), global_step=sstep)
writer.add_scalar('summary/test_loss_back', test_loss_back.item(), global_step=sstep)
def cal_CC(pred, GT):
"""
calculate CC
"""
# input tensor [B, C, H, W]
pccs=0
pred = pred.detach().cpu().numpy()
GT = GT.detach().cpu().numpy()
b= pred.shape[0]
for batch in range(b):
pred_b = pred[batch, :, :, :].reshape(-1)
GT_b = GT[batch, :, :, :].reshape(-1)
# print('pred_b', pred_b)
# print('GT_b', GT_b)
# print(pred_b.max(), pred_b.min())
# print(GT_b.max(), GT_b.min())
cc = np.corrcoef(pred_b, GT_b)[0,1]
# print('cc',cc)
pccs += cc
# print('pccs',pccs)
# print('b',b)
return pccs/b
def cal_CC_ab(pred, GT):
# input tensor [B, C, H, W]
pccs=0
pred = pred.detach().cpu().numpy()
GT = GT.detach().cpu().numpy()
b= pred.shape[0]
for batch in range(b):
pred_b = pred[batch, :, :].reshape(-1)
GT_b = GT[batch, :, :].reshape(-1)
# print('pred_b', pred_b)
# print('GT_b', GT_b)
# print(pred_b.max(), pred_b.min())
# print(GT_b.max(), GT_b.min())
cc = np.corrcoef(pred_b, GT_b)[0,1]
# print('cc',cc)
pccs += cc
# print('pccs',pccs)
# print('b',b)
return pccs/b
def cal_CC_np(pred, GT):
# input numpy [B, H, W, C]
pccs=0
b, h, w, c = pred.shape
for batch in range(b):
pred_b = pred[batch, :, :, :].reshape(-1)
GT_b = GT[batch, :, :, :].reshape(-1)
pccs += np.corrcoef(pred_b, GT_b)[0,1]
return pccs/b
def uv2bmap(uv, background):
uv = uv.detach().cpu().numpy()
background = background.detach().cpu().numpy()
img_bgr = (uv + 1) / 2 # [c h w]
img_rgb = img_bgr[::-1, :, :]
img_rgb[1, :, :] = 1 - img_rgb[1, :, :]
s_x = (img_rgb[0, :, :] * 256)
s_y = (img_rgb[1, :, :] * 256)
mask = background[0, :, :] > 0 #0.6
s_x = s_x[mask]
s_y = s_y[mask]
index = np.argwhere(mask)
t_y = index[:, 0]
t_x = index[:, 1]
x = np.arange(256)
y = np.arange(256)
xi, yi = np.meshgrid(x, y)
# zz = np.zeros((256, 256))
zx = griddata((s_x, s_y), t_x, (xi, yi), method='linear')
zy = griddata((s_x, s_y), t_y, (xi, yi), method='linear')
# backward_img = np.stack([zy, zx, zz], axis=2)
backward_img = np.stack([zy, zx], axis=2)
backward_img[np.isnan(backward_img)] = 0
backward_img = (backward_img/ 256)*2 -1
# np.save('C:/tmp/'+uv_path.split('/')[-1].split('.')[0]+'_backward',backward_img)
# cv2.imwrite('C:/tmp/'+uv_path.split('/')[-1].split('.')[0]+'_backward.png',backward_img*255)
return backward_img
def uv2bmap4d(uv, background):
"""input: [batch, channel, h ,w]"""
"""output: numpy"""
batch = uv.size()[0]
uv = uv.detach().cpu().numpy()
background = background.detach().cpu().numpy()
output = np.zeros(shape=(0, 256, 256, 2))
for c in range(batch):
img_bgr = (uv[c, :, :, :] + 1) / 2 # [c h w]
img_rgb = img_bgr[::-1, :, :]
img_rgb[1, :, :] = 1 - img_rgb[1, :, :]
s_x = (img_rgb[0, :, :] * 256)
s_y = (img_rgb[1, :, :] * 256)
mask = background[c, 0, :, :] > 0 #0.6
s_x = s_x[mask]
s_y = s_y[mask]
index = np.argwhere(mask)
t_y = index[:, 0]
t_x = index[:, 1]
x = np.arange(256)
y = np.arange(256)
xi, yi = np.meshgrid(x, y)
zx = griddata((s_x, s_y), t_x, (xi, yi), method='linear')
zy = griddata((s_x, s_y), t_y, (xi, yi), method='linear')
backward_img = np.stack([zy, zx], axis=2)
backward_img[np.isnan(backward_img)] = 0
backward_img = (backward_img/ 256) *2-1 # [h, w, 2]
backward_img = np.expand_dims(backward_img, axis=0)
output = np.concatenate((output, backward_img), 0)
return output
def bw_mapping(bw_map, image, device):
image = torch.unsqueeze(image, 0) #[1, 3, 256, 256]
image_t = image.transpose(2,3)
# bw
# from [h, w, 2]
# to 4D tensor [-1, 1] [b, h, w, 2]
bw_map = torch.from_numpy(bw_map).type(torch.float32).to(device)
bw_map = torch.unsqueeze(bw_map, 0)
# bw_map = bw_map.transpose(1, 2).transpose(2, 3)
output = F.grid_sample(input=image, grid=bw_map)
output_t = F.grid_sample(input=image_t, grid=bw_map)
output = output.transpose(1, 2).transpose(2, 3)
output = output.squeeze()
output_t = output_t.transpose(1, 2).transpose(2, 3)
output_t = output_t.squeeze()
return output_t#.transpose(1,2).transpose(0,1)
def bw_mapping4d(bw_map, image, device):
"""image""" #[batch, 3, 256, 256]
image_t = image.transpose(2,3)
# bw
# from [h, w, 2]
# to 4D tensor [-1, 1] [b, h, w, 2]
bw_map = torch.from_numpy(bw_map).type(torch.float32).to(device)
# bw_map = torch.unsqueeze(bw_map, 0)
# bw_map = bw_map.transpose(1, 2).transpose(2, 3)
output = F.grid_sample(input=image, grid=bw_map)
output_t = F.grid_sample(input=image_t, grid=bw_map)
output = output.transpose(1, 2).transpose(2, 3)
output = output.squeeze()
output_t = output_t.transpose(1, 2).transpose(2, 3)
output_t = output_t.squeeze()
return output_t#.transpose(1,2).transpose(0,1)
# def write_image(image_float, dir):
# image_uint8 = ((image_float+1)/2 *255).type(torch.uint8).cpu().numpy()
# cv2.imwrite(dir, image_uint8)
#
# def write_image_np(image_float, dir):
# image_uint8 = ((image_float+1)/2 *255).astype(np.uint8)
# cv2.imwrite(dir, image_uint8)
#
# def write_cmap_gauss(image_float, dir, mean=[0.100, 0.326, 0.289], std=[0.096, 0.332, 0.298]):
# image_float = repropocess(image_float.detach().cpu().numpy(), mean, std)
# image_uint8 = (image_float *255).astype(np.uint8)
# cv2.imwrite(dir, image_uint8)
#
# def write_image_01(image_float, dir):
# image_uint8 = (image_float *255).type(torch.uint8).cpu().numpy()
# cv2.imwrite(dir, image_uint8)
def get_lr(optimizer):
for param_group in optimizer.param_groups:
return float(param_group['lr'])
def main():
# Training settings
# global sstep
sstep = 0
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch_size', type=int, default=20, metavar='N', # 50 for 4 gpu
help='input batch size for training (default: 64)')
parser.add_argument('--test_batch_size', type=int, default=1, metavar='N', # 100 for 4 gpu
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=100, metavar='N',
help='number of epochs to train (default: 14)')
parser.add_argument('--lr', type=float, default=LearningRate, metavar='LR',
help='learning rate (default: 1.0)')
parser.add_argument('--gamma', type=float, default=0.85, metavar='M',
help='Learning rate step gamma (default: 0.7)')
parser.add_argument('--no_cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log_interval', type=int, default=1, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save_model', action='store_true', default=True,
help='For Saving the current Model')
parser.add_argument('--visualize_para', action='store_true', default=True,
help='For visualizing the Model parameters')
parser.add_argument('--pretrained', action='store_true', default=preTrain,
help='Load model parameters from pretrained model')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
dataset_test = filmDataset(npy_dir=dr_dataset_test)
dataset_test_loader = DataLoader(dataset_test,
batch_size=args.test_batch_size,
# num_workers=1,
shuffle=False,
**kwargs)
dataset_train = filmDataset(npy_dir=dr_dataset_train_1, npy_dir_2=dr_dataset_train_2)
dataset_train_loader = DataLoader(dataset_train,
batch_size=args.batch_size,
# num_workers=1,
shuffle=True,
**kwargs)
# model = Net().to(device)
model = Net()
model = torch.nn.DataParallel(model, device_ids=[0])
model = model.to(device)
start_epoch = 1
start_lr = args.lr
args.pretrained = False
if args.pretrained:
# pretrained_dict = torch.load(pretrained_model_dir, map_location=None)
# model_dict = model.state_dict()
# # 1. filter out unnecessary keys
# pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
# # 2. overwrite entries in the existing state dict
# model_dict.update(pretrained_dict)
# model.load_state_dict(model_dict)
pretrained_dict = torch.load(pretrained_model_dir, map_location=None)
model.load_state_dict(pretrained_dict['model_state'])
start_lr = pretrained_dict['lr']
start_epoch = pretrained_dict['epoch']
# start_lr = 0.00005
optimizer = optim.Adam(model.parameters(), lr=start_lr)
# Adadelta(model.parameters(), lr=args.lr)
criterion = torch.nn.MSELoss()
if args.visualize_para:
for name, parameters in model.named_parameters():
print(name, ':', parameters.size())
if write_summary:
if not os.path.exists('summary/' + summary_name +'_start_epoch{}'.format(start_epoch)):
os.mkdir('summary/' + summary_name+'_start_epoch{}'.format(start_epoch))
writer = SummaryWriter(logdir='summary/' + summary_name+'_start_epoch{}'.format(start_epoch))
print(summary_name)
else:
writer = 0
scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
# scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=5,eta_min=4e-08)
print('start_lr', start_lr)
print('start_epoch', start_epoch)
isTrain = True
write_image_train = False
write_image_test = False
"""start training/ test"""
for epoch in range(start_epoch, args.epochs + 1):
if isTrain:
lr = train(args, model, device, dataset_train_loader, optimizer, criterion, epoch, writer, output_dir,
write_image_train, isVal=True, test_loader=dataset_test_loader)
sstep = test.count + 1
test(args, model, device, dataset_test_loader, criterion, epoch, writer, output_dir, write_image_test,
sstep)
else:
sstep = test.count +1
test(args, model, device, dataset_test_loader, criterion, epoch, writer, output_dir, write_image_test, sstep)
break
# if epoch % 2 ==0:
scheduler.step() # change lr with gamma decay
if isTrain and args.save_model:
state ={'epoch': epoch+1, # saving the next epoch
'lr': lr, # saving the lr of next epoch
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict()
}
torch.save(state, save_dir+"{}_{}.pkl".format(model_name, epoch))
def exist_or_make(path):
if not os.path.exists(path):
os.mkdir(path)
if __name__ == '__main__':
main()
# ckpt = torch.load('model/' + pretrain_name + '/model_' + str(pretrain_epoch) + '.pth')
# model_dict = model.state_dict()
# restore_dict = {}
# for (k, v) in ckpt.items():
# if k in model_dict:
# restore_dict[k] = v
# model_dict.update(restore_dict)
# model.load_state_dict(model_dict) | 45.782493 | 246 | 0.56095 |
6a458a99f5d27d7b8d3c28b3fe766161c1bb6c30 | 8,274 | py | Python | codenode/frontend/async/backend.py | ccordoba12/codenode | a1e9095f35a179a8dcfe3e4054760864118b4e2b | [
"BSD-3-Clause"
] | 1 | 2015-08-14T04:38:41.000Z | 2015-08-14T04:38:41.000Z | codenode/frontend/async/backend.py | ccordoba12/codenode | a1e9095f35a179a8dcfe3e4054760864118b4e2b | [
"BSD-3-Clause"
] | null | null | null | codenode/frontend/async/backend.py | ccordoba12/codenode | a1e9095f35a179a8dcfe3e4054760864118b4e2b | [
"BSD-3-Clause"
] | null | null | null | #########################################################################
# Copyright (C) 2007, 2008, 2009
# Alex Clemesha <alex@clemesha.org> & Dorian Raymer <deldotdr@gmail.com>
#
# This module is part of codenode, and is distributed under the terms
# of the BSD License: http://www.opensource.org/licenses/bsd-license.php
#########################################################################
import os
import uuid
import pickle
#from StringIO import StringIO
from zope.interface import implements
from twisted.internet import defer
from twisted.web import xmlrpc
from twisted.web import resource
from twisted.web import server
from twisted.web.client import getPage
from twisted.python import log
from django.utils import simplejson as json
from django.conf import settings
from codenode.frontend.notebook import models as notebook_models
def write_image(image):
fn = str(uuid.uuid4()) + '.png'
fullpath = os.path.join(settings.PLOT_IMAGES, fn)
f = open(fullpath, 'w')
f.write(image)
f.close()
return fn
class BackendAdmin:
"""
This is a base/mix in class for conveniently admin related requests
(not specific to an engine). These functions still use the rpc client
instead of the web client. The usage of both clients is kind of an
experiment still, but the main reason is that the admin client is for
making specific method calls on the backend. Engine requests are
(ideally) just passed along to the engine with out inspecting the
details (which method, etc.); in practice the requests are still
inspected though, so the natural seam between these interactions hasn't
fully revealed itself yet...
"""
@defer.inlineCallbacks
def newAccessId(self, engine_type):
"""
Backend administrative call.
"""
url = os.path.join(self.base_url, 'admin', '')
client = xmlrpc.Proxy(url)
access_id = yield client.callRemote('allocateEngine', str(engine_type))
defer.returnValue(access_id)
class BackendClient(object, BackendAdmin):
"""
Has address to use for all requests.
"""
def __init__(self, address):
"""
"""
self.base_url = str(address)
def __repr__(self):
return 'BackendClient("%s")' % self.base_url
def __str__(self):
return 'BackendClient @ %s)' % self.base_url
@defer.inlineCallbacks
def _send(self, access_id, msg):
"""
Send to backend engine.
"""
url = os.path.join(self.base_url, 'engine', str(access_id))
result = yield getPage(url,
contextFactory=None,
method='POST',
postdata=str(msg))
defer.returnValue(result)
@defer.inlineCallbacks
def send(self, access_id, msg):
"""
Use JSON for serialization.
"""
ser_msg = json.dumps(msg)
log.msg('to _send: %s' % ser_msg)
result_ser = yield self._send(access_id, ser_msg)
result = json.loads(result_ser)
defer.returnValue(result)
class BackendBus(object):
"""
This holds on to backend clients (which cache backend address).
The context coming in is the notebook id. Look up the backend to pass
message to; create backend if one does not exist yet.
In comes browser to frontend bus message.
Message has method attribute in header. Depending on this method, route
message to appropriate component.
Handle response from backend client.
- OK, rely response
- ERR, check reason, take correcting action, or propagate error
backends dict of backend_name to backend_client instance
notebook_map dict of notebook_id to (backend, access_id,)
"""
backendFactory = BackendClient
def __init__(self):
"""
"""
self.backends = {}
self.notebook_map = {}
def addBackend(self, backend_name, backend_address):
"""
"""
backend = self.backendFactory(backend_address)
self.backends[backend_name] = backend
return backend
def addNotebook(self, notebook_id):
"""
"""
nb = notebook_models.Notebook.objects.get(guid=notebook_id)
access_id = nb.backend.all()[0].access_id
backend_name = nb.backend.all()[0].engine_type.backend.name
try:
backend = self.backends[backend_name]
except KeyError:
backend_address = nb.backend.all()[0].engine_type.backend.address
backend = self.addBackend(backend_name, backend_address)
# check key d n e
self.notebook_map[notebook_id] = (backend, access_id,)
return (backend, access_id,)
@defer.inlineCallbacks
def handleRequest(self, notebook_id, msg):
"""
"""
try:
backend, access_id = self.notebook_map[notebook_id]
except KeyError:
backend, access_id = self.addNotebook(notebook_id)
log.msg('notebooks backend: %s' % backend)
result = yield backend.send(access_id, msg)
status = result['status']
if status == 'OK':
defer.returnValue(result['response'])
if status == 'ERR':
"""check error"""
log.err('Backend error %s' % str(result['response']))
err = result['response']
if err == 'InvalidAccessId':
#self.reset_access_id(self, notebook_id)
nb = notebook_models.Notebook.objects.get(guid=notebook_id)
engine_type = str(nb.backend.all()[0].engine_type.name)
new_access_id = yield backend.newAccessId(engine_type)
nb.backend.all()[0].access_id = new_access_id
nb.save()
self.notebook_map[notebook_id] = (backend, new_access_id,)
result_retry = yield backend.send(new_access_id, msg)
status = result_retry['status']
# TODO: Better handling. return no matter what for now
defer.returnValue(result_retry['response'])
class EngineSessionAdapter(resource.Resource):
"""
There should be a better way to do this, have to figure that out.
"""
isLeaf = True
def __init__(self, engine_bus, notebook_id):
resource.Resource.__init__(self)
self.engine_bus = engine_bus
self.notebook_id = notebook_id
self.putChild("", self)
def render(self, request):
"""
This is where we un-serialize the content sent between the frontend
and backend engine bus.
"""
content = request.content.read()
if content:
msg = json.loads(content)
log.msg('Engine message deserialized %s' % str(msg))
else:
return
cellid = msg.get('cellid', '')
d = self.engine_bus.handleRequest(self.notebook_id, msg)
d.addCallback(self._success, request, cellid)
d.addErrback(self._fail, request)
return server.NOT_DONE_YET
def _success(self, data, request, cellid):
"""
horrible. not always eval...
"""
log.msg('handling data: %s' % str(data))
if 'cellstyle' in data and data['cellstyle'] == 'outputimage':
image_data = pickle.loads(data['out']).getvalue()
image_file_name = write_image(image_data)
data['out'] = image_file_name
data['cellid'] = cellid
jsobj = json.dumps(data)
request.write(jsobj)
request.finish()
def _fail(self, reason, request):
"""
Add conditional to return real traceback...only do it if
settings.DEBUG is true, or something.
"""
log.err(reason)
if settings.DEBUG:
request.write(str(reason))
else:
request.write('err') #XXX improve error handling
request.finish()
class EngineBusAdapter(resource.Resource):
def __init__(self, engine_bus):
resource.Resource.__init__(self)
self.engine_bus = engine_bus
self.putChild("", self)
def getChild(self, path, request):
"""XXX Can this refer back to itself?
"""
return EngineSessionAdapter(self.engine_bus, path)
| 32.447059 | 79 | 0.614213 |
944f881008eb5d84b47a36ecee4177f046e4da69 | 3,051 | py | Python | videodownloader/main.py | JakeWharton/py-videodownloader | 787e88ad46280288f559e728598fcb2a8487129c | [
"Apache-2.0"
] | 41 | 2015-02-02T02:05:40.000Z | 2021-08-11T04:36:21.000Z | videodownloader/main.py | clsung/py-videodownloader | 0e560d768b8445c39610e18667f5f4954ecbb253 | [
"Apache-2.0"
] | 2 | 2016-08-20T16:50:55.000Z | 2017-07-06T15:36:16.000Z | videodownloader/main.py | clsung/py-videodownloader | 0e560d768b8445c39610e18667f5f4954ecbb253 | [
"Apache-2.0"
] | 15 | 2015-03-12T08:55:33.000Z | 2020-06-04T22:51:42.000Z | #!/usr/bin/env python
__license__ = '''
Copyright 2010 Jake Wharton
py-video-downloader is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
py-video-downloader is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General
Public License along with py-video-downloader. If not, see
<http://www.gnu.org/licenses/>.
'''
from optparse import OptionParser, OptionGroup
import sys
import os
import urllib2
from videodownloader import providers
def main():
DEFAULT_DEBUG = False
print 'videodownloader-2.0.0 - by Jake Wharton <jakewharton@gmail.com>'
print
parser = OptionParser(usage="Usage: %prog -p PROVIDER [-f FMT] [-d DIR] videoID [... videoID]")
provider_list = ', '.join(["'%s'" % provider for provider in providers.__all__])
parser.add_option('-e', '--ext', dest='ext', default=providers.Provider.DEFAULT_EXT, help='Manually override video extension.')
parser.add_option('-f', '--format', dest='format', help='Format of video to download. Run with no video IDs for a provider specific list.')
parser.add_option('-t', '--title', dest='title', help='Manually override video title.')
parser.add_option('-p', '--provider', dest='provider', help='Online provider from where to download the video. (Available: %s)'%provider_list)
parser.add_option('--debug', dest='is_debug', action='store_true', default=DEFAULT_DEBUG, help='Enable debugging output.')
options, videos = parser.parse_args()
try:
provider = getattr(providers, options.provider)
except Exception:
print 'ERROR: Could not load provider "%s".' % options.provider
sys.exit(1)
if len(videos) == 0:
#Print out a format list for that provider
print '%-10s %-40s' % ('Format', 'Description')
print '-'*10, '-'*40
for format in provider.FORMATS.iteritems():
print '%-10s %-40s' % format
else:
for video in videos:
v = provider(video, title=options.title, format=options.format, ext=options.ext, debug=options.is_debug)
print 'Downloading "%s"...' % v.title
try:
v.run()
except KeyboardInterrupt:
print "WARNING: Aborting download."
#Try to delete partially completed file
try:
os.remove(v.full_filename)
except IOError:
print 'WARNING: Could not remove partial file.'
except (urllib2.HTTPError, IOError) as e:
if options.is_debug:
print e
print "ERROR: Fatal HTTP error."
print
print 'Done.'
| 39.115385 | 146 | 0.654539 |
9046d3c42ed9ed6a66c37564d99a3423ad7e6cda | 22,247 | py | Python | tensorflow_models/GLMP.py | shiquanyang/GLMP | 7f085bdd66aa414c8a4efd301810ad81160ac563 | [
"MIT"
] | null | null | null | tensorflow_models/GLMP.py | shiquanyang/GLMP | 7f085bdd66aa414c8a4efd301810ad81160ac563 | [
"MIT"
] | 3 | 2019-09-17T10:56:04.000Z | 2019-10-16T00:13:28.000Z | tensorflow_models/GLMP.py | shiquanyang/GLMP | 7f085bdd66aa414c8a4efd301810ad81160ac563 | [
"MIT"
] | null | null | null | import tensorflow as tf
from utils.config import *
from tensorflow_models.encoder import ContextRNN
from tensorflow_models.ExternalKnowledge import ExternalKnowledge
from tensorflow_models.decoder import LocalMemoryDecoder
import random
import numpy as np
from tensorflow.python.framework import ops
import json
from utils.measures import wer, moses_multi_bleu
from utils.tensorflow_masked_cross_entropy import *
from utils.utils_general import *
import pdb
class GLMP(tf.keras.Model):
def __init__(self, hidden_size, lang, max_resp_len, path, task, lr, n_layers, dropout):
super(GLMP, self).__init__()
# self.name = 'GLMP'
self.task = task
self.input_size = lang.n_words
self.output_size = lang.n_words
self.hidden_size = hidden_size
self.lang = lang
self.lr = lr
self.n_layers = n_layers
self.dropout = dropout
self.max_resp_len = max_resp_len
self.decoder_hop = n_layers
self.softmax = tf.keras.layers.Softmax(0)
self.encoder = ContextRNN(lang.n_words, hidden_size, dropout)
self.extKnow = ExternalKnowledge(lang.n_words, hidden_size, n_layers, dropout)
self.decoder = LocalMemoryDecoder(self.encoder.embedding, lang,
hidden_size, self.decoder_hop, dropout)
self.checkpoint = tf.train.Checkpoint(encoder=self.encoder,
extKnow=self.extKnow,
decoder=self.decoder)
if path:
self.checkpoint.restore(path) # path include: directory + prefix + id.
self.encoder_optimizer = tf.keras.optimizers.Adam(lr)
self.extKnow_optimizer = tf.keras.optimizers.Adam(lr)
self.decoder_optimizer = tf.keras.optimizers.Adam(lr)
# TODO: lr scheduler.
self.train_loss = tf.keras.metrics.Mean('train_loss', dtype=tf.float32)
self.train_loss_g = tf.keras.metrics.Mean('train_loss_global', dtype=tf.float32)
self.train_loss_v = tf.keras.metrics.Mean('train_loss_vocab', dtype=tf.float32)
self.train_loss_l = tf.keras.metrics.Mean('train_loss_local', dtype=tf.float32)
self.train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy('train_accuracy')
# self.criterion_bce = tf.nn.sigmoid_cross_entropy_with_logits() # need to check if this loss function actually equals pytorch criterion_bce.
self.reset()
def print_loss(self):
print_loss_avg = self.loss / self.print_every
print_loss_g = self.loss_g / self.print_every
print_loss_v = self.loss_v / self.print_every
print_loss_l = self.loss_l / self.print_every
self.print_every += 1
return 'L:{:.2f}, LE:{:.2f}, LG:{:.2f}, LP:{:.2f}'.format(
print_loss_avg, print_loss_g, print_loss_v, print_loss_l)
def reset(self):
self.loss, self.print_every, self.loss_g, self.loss_v, self.loss_l = 0.0, 1.0, 0.0, 0.0, 0.0
def save_model(self, dec_type):
name_data = "KVR/" if self.task=='' else "BABI/"
layer_info = str(self.n_layers)
directory = 'save/GLMP-'+args["addName"]+name_data+str(self.task)+'HDD'+\
str(self.hidden_size)+'BSZ'+str(args['batch'])+'DR'+str(self.dropout)+\
'L'+layer_info+'lr'+str(self.lr)+str(dec_type)
if not os.path.exists(directory):
os.makedirs(directory)
checkpoint_prefix = directory + '/ckpt'
self.checkpoint.save(file_prefix=checkpoint_prefix)
def encode_and_decode(self, data, max_target_length, use_teacher_forcing,
get_decoded_words, training):
# build unknown mask for memory if training mode
if args['unk_mask'] and training: # different: training flag need to be fed from outside explicitly.
story_size = data[0].shape # data[0]: context_arr.
rand_mask = np.ones(story_size, dtype=np.float32)
bi_mask = np.random.binomial([np.ones((story_size[0], story_size[1]), dtype=np.float32)],
1 - self.dropout)[0]
rand_mask[:, :, 0] = rand_mask[:, :, 0] * bi_mask
conv_rand_mask = np.ones(data[3].shape, dtype=np.float32) # data[3]: conv_arr.
for bi in range(story_size[0]):
start, end = data[13][bi] - 1, data[13][bi] - 1 + data[12][bi] # data[13]: kb_arr_lengths, data[12]: conv_arr_lengths.
# start, end = data['kb_arr_lengths'][bi], data['kb_arr_lengths'][bi] + data['conv_arr_lengths'][bi] # data[13]: kb_arr_lengths, data[12]: conv_arr_lengths.
# conv_rand_mask[:end.numpy()[0]-start.numpy()[0], bi, :] = rand_mask[bi, start.numpy()[0]:end.numpy()[0], :] # necessary to explictly move data to cuda ?
conv_rand_mask[bi, :end-start, :] = rand_mask[bi, start:end, :] # story_size dimension order is different from pytorch, so the slice index is different from pytorch one. necessary to explictly move data to cuda ?
conv_story = data[3] * conv_rand_mask # data[3]: conv_arr.
story = data[0] * rand_mask # data[0]: context_arr.
else:
story, conv_story = data[0], data[3] # data[0]: context_arr, data[3]: conv_arr.
# encode dialogue history and KB to vectors
# TODO: need to check the shape and meaning of each tensor.
dh_outputs, dh_hidden = self.encoder(conv_story, data[12], training=training) # data[12]: conv_arr_lengths.
global_pointer, kb_readout, global_pointer_logits = self.extKnow.load_memory(story,
data[13], # data[13]: kb_arr_lengths.
data[12], # data[12]: conv_arr_lengths.
dh_hidden,
dh_outputs,
training=training)
encoded_hidden = tf.concat([dh_hidden, kb_readout], 1)
# get the words that can be copy from the memory
batch_size = len(data[10]) # data[10]: context_arr_lengths.
self.copy_list = []
# pdb.set_trace()
for elm in data[7]: # data[7]: context_arr_plain.
# elm_temp = [word_arr[0] for word_arr in elm]
elm_temp = []
for word_arr in elm:
# elm_temp.append(word_arr[0])
if word_arr[0].numpy().decode() != 'PAD':
elm_temp.append(word_arr[0].numpy().decode())
else:
# self.copy_list.append(elm_temp)
break
self.copy_list.append(elm_temp)
# pdb.set_trace()
outputs_vocab, outputs_ptr, decoded_fine, decoded_coarse = self.decoder(self.extKnow,
story.shape,
data[10], # data[10]: context_arr_lengths.
self.copy_list,
encoded_hidden,
data[2], # data[2]: sketch_response.
max_target_length,
batch_size,
use_teacher_forcing,
get_decoded_words,
global_pointer,
training=training)
return outputs_vocab, outputs_ptr, decoded_fine, decoded_coarse, global_pointer, global_pointer_logits
@tf.function
def train_batch(self, data, clip, reset=0):
# FOR DEBUG
# pdb.set_trace()
# t1 = self.encoder.trainable_variables
# model training process
# no need to zero gradients of optimizers in tensorflow
# encode and decode
with tf.GradientTape(persistent=True) as tape:
# pdb.set_trace()
use_teacher_forcing = random.random() < args['teacher_forcing_ratio']
max_target_length = max(data[11]) # data[11]: response_lengths.
# max_target_length = train_max_len_global
all_decoder_outputs_vocab, all_decoder_outputs_ptr, _, _, global_pointer, global_pointer_logits = self.encode_and_decode(data,
max_target_length,
use_teacher_forcing,
False,
True)
# loss calculation and backpropagation
# pdb.set_trace()
loss_g = tf.cast(tf.compat.v1.losses.sigmoid_cross_entropy(data[5], tf.cast(global_pointer_logits, dtype=tf.double)), dtype=tf.float32)
#loss_gs = tf.keras.backend.binary_crossentropy(tf.cast(data[5], dtype=tf.double), tf.cast(global_pointer, dtype=tf.double))
#loss_g = tf.cast(tf.reduce_sum(loss_gs) / (loss_gs.shape[0]*loss_gs.shape[1]), dtype=tf.float32)
# loss_g_mat = tf.nn.sigmoid_cross_entropy_with_logits(tf.cast(global_pointer, dtype=tf.double), data['selector_index']) # data[5]: selector_index.
# loss_g = tf.cast(tf.reduce_sum(loss_g_mat) / (loss_g_mat.shape[0] * loss_g_mat.shape[1]), dtype=tf.float32)
# print("loss_g:", loss_g)
loss_v = masked_cross_entropy(tf.transpose(all_decoder_outputs_vocab, [1, 0, 2]), # need to transpose ?
data[2],
tf.cast(data[11], dtype=tf.int32)) # data[2]: skectch_response, data[11]: response_lengths.
# print("loss_v:", loss_v)
loss_l = masked_cross_entropy(tf.transpose(all_decoder_outputs_ptr, [1, 0, 2]), # need to transpose ?
data[4],
tf.cast(data[11], dtype=tf.int32)) # data[4]: ptr_index, data[11]: response_lengths.
# print("loss_l:", loss_l)
loss = loss_g + loss_v + loss_l
# compute gradients for encoder, decoder and external knowledge
encoder_variables = self.encoder.trainable_variables
extKnow_variables = self.extKnow.trainable_variables
decoder_variables = self.decoder.trainable_variables
for var in encoder_variables:
name = var.name
params = var.read_value()
np.savetxt('/home/yimeng/shiquan/tmp3/GLMP/params/{}'.format(name), params.numpy())
for var in extKnow_variables:
name = var.name
params = var.read_value()
np.savetxt('/home/yimeng/shiquan/tmp3/GLMP/params/{}'.format(name), params.numpy())
for var in decoder_variables:
name = var.name
params = var.read_value()
np.savetxt('/home/yimeng/shiquan/tmp3/GLMP/params/{}'.format(name), params.numpy())
pdb.set_trace()
encoder_gradients = tape.gradient(loss, encoder_variables)
extKnow_gradients = tape.gradient(loss, extKnow_variables)
decoder_gradients = tape.gradient(loss, decoder_variables)
# clip gradients
encoder_gradients, ec = tf.clip_by_global_norm(encoder_gradients, clip)
extKnow_gradients, kc = tf.clip_by_global_norm(extKnow_gradients, clip)
decoder_gradients, dc = tf.clip_by_global_norm(decoder_gradients, clip)
# clipped_encoder_gradients = [elem if isinstance(elem, ops.IndexedSlices) else tf.clip_by_norm(elem, clip) for elem in encoder_gradients]
# clipped_extKnow_gradients = [elem if isinstance(elem, ops.IndexedSlices) else tf.clip_by_norm(elem, clip) for elem in extKnow_gradients]
# clipped_decoder_gradients = [elem if isinstance(elem, ops.IndexedSlices) else tf.clip_by_norm(elem, clip) for elem in decoder_gradients]
# apply update
self.encoder_optimizer.apply_gradients(
zip(encoder_gradients, self.encoder.trainable_variables))
self.extKnow_optimizer.apply_gradients(
zip(extKnow_gradients, self.extKnow.trainable_variables))
self.decoder_optimizer.apply_gradients(
zip(decoder_gradients, self.decoder.trainable_variables))
self.loss += loss.numpy()
self.loss_g += loss_g.numpy()
self.loss_v += loss_v.numpy()
self.loss_l += loss_l.numpy()
self.train_loss(loss.numpy())
self.train_loss_g(loss_g.numpy())
self.train_loss_v(loss_v.numpy())
self.train_loss_l(loss_l.numpy())
def evaluate(self, dev, dev_length, matric_best, early_stop=None):
print('STARTING EVALUATION:')
ref, hyp = [], []
acc, total = 0, 0
dialog_acc_dict = {}
F1_pred, F1_cal_pred, F1_nav_pred, F1_wet_pred = 0, 0, 0, 0
F1_count, F1_cal_count, F1_nav_count, F1_wet_count = 0, 0, 0, 0
pbar = tqdm(enumerate(dev.take(-1)), total=(dev_length))
new_precision, new_recall, new_f1_score = 0, 0, 0
if args['dataset'] == 'kvr':
with open('data/KVR/kvret_entities.json') as f:
global_entity = json.load(f)
global_entity_list = []
for key in global_entity.keys():
if key != 'poi':
global_entity_list += [item.lower().replace(' ', '_') for item in global_entity[key]]
else:
for item in global_entity['poi']:
global_entity_list += [item[k].lower().replace(' ', '_') for k in item.keys()]
global_entity_list = list(set(global_entity_list))
for j, data_dev in pbar:
# Encode and Decode
# pdb.set_trace()
# max_target_length = max(data_dev['response_lengths']) # data[11]: response_lengths.
# max_target_length = dev_max_len_global
_, _, decoded_fine, decoded_coarse, global_pointer, global_pointer_logits = self.encode_and_decode(data_dev,
self.max_resp_len,
False,
True,
False)
decoded_coarse = np.transpose(decoded_coarse)
decoded_fine = np.transpose(decoded_fine)
for bi, row in enumerate(decoded_fine):
st = ''
for e in row:
if e == 'EOS':
break
else:
st += e + ' '
st_c = ''
for e in decoded_coarse[bi]:
if e == 'EOS':
break
else:
st_c += e + ' '
pred_sent = st.lstrip().rstrip()
pred_sent_coarse = st_c.lstrip().rstrip()
# pdb.set_trace()
gold_sent = data_dev[8][bi][0].numpy().decode().lstrip().rstrip() # data[8]: response_plain.
ref.append(gold_sent)
hyp.append(pred_sent)
if args['dataset'] == 'kvr':
# compute F1 SCORE
single_f1, count = self.compute_prf(data_dev[14][bi], pred_sent.split(),
global_entity_list, data_dev[9][bi]) # data[14]: ent_index, data[9]: kb_arr_plain.
F1_pred += single_f1
F1_count += count
single_f1, count = self.compute_prf(data_dev[16][bi], pred_sent.split(),
global_entity_list, data_dev[9][bi]) # data[16]: ent_idx_cal, data[9]: kb_arr_plain.
F1_cal_pred += single_f1
F1_cal_count += count
single_f1, count = self.compute_prf(data_dev[17][bi], pred_sent.split(),
global_entity_list, data_dev[9][bi]) # data[17]: ent_idx_nav, data[9]: kb_arr_plain.
F1_nav_pred += single_f1
F1_nav_count += count
single_f1, count = self.compute_prf(data_dev[18][bi], pred_sent.split(),
global_entity_list, data_dev[9][bi]) # data[18]: ent_idx_wet, data[9]: kb_arr_plain.
F1_wet_pred += single_f1
F1_wet_count += count
else:
# compute Dialogue Accuracy Score
current_id = data_dev[22][bi]
if current_id not in dialog_acc_dict.keys():
dialog_acc_dict[current_id] = []
if gold_sent == pred_sent:
dialog_acc_dict[current_id].append(1)
else:
dialog_acc_dict[current_id].append(0)
# compute Per-response Accuracy Score
total += 1
if (gold_sent == pred_sent):
acc += 1
if args['genSample']:
self.print_examples(bi, data_dev, pred_sent, pred_sent_coarse, gold_sent)
# pdb.set_trace()
bleu_score = moses_multi_bleu(np.array(hyp), np.array(ref), lowercase=True)
acc_score = acc / float(total)
print("ACC SCORE:\t" + str(acc_score))
if args['dataset'] == 'kvr':
F1_score = F1_pred / float(F1_count)
print("F1 SCORE:\t{}".format(F1_pred / float(F1_count)))
print("\tCAL F1:\t{}".format(F1_cal_pred / float(F1_cal_count)))
print("\tWET F1:\t{}".format(F1_wet_pred / float(F1_wet_count)))
print("\tNAV F1:\t{}".format(F1_nav_pred / float(F1_nav_count)))
print("BLEU SCORE:\t" + str(bleu_score))
else:
dia_acc = 0
for k in dialog_acc_dict.keys():
if len(dialog_acc_dict[k]) == sum(dialog_acc_dict[k]):
dia_acc += 1
print("Dialog Accuracy:\t" + str(dia_acc * 1.0 / len(dialog_acc_dict.keys())))
if (early_stop == 'BLEU'):
if (bleu_score >= matric_best):
self.save_model('BLEU-' + str(bleu_score))
print("MODEL SAVED")
return bleu_score
elif (early_stop == 'ENTF1'):
if (F1_score >= matric_best):
self.save_model('ENTF1-{:.4f}'.format(F1_score))
print("MODEL SAVED")
return F1_score
else:
if (acc_score >= matric_best):
self.save_model('ACC-{:.4f}'.format(acc_score))
print("MODEL SAVED")
return acc_score
def compute_prf(self, gold, pred, global_entity_list, kb_plain):
# local_kb_word = [k[0] for k in kb_plain]
# local_kb_word = [k[0] if k[0].decode() != '$' and k[0].decode() != 'PAD' for k in kb_plain]
local_kb_word = []
for k in kb_plain:
if k[0].numpy().decode() != '$$$$' and k[0].numpy().decode() != 'PAD':
local_kb_word.append(k[0].numpy().decode())
else:
break
# gold_decode = [ent.decode() if ent.decode() != 'PAD' for ent in gold]
gold_decode = []
for ent in gold:
if ent.numpy().decode() != 'PAD':
gold_decode.append(ent.numpy().decode())
else:
break
TP, FP, FN = 0, 0, 0
if len(gold_decode) != 0:
count = 1
for g in gold_decode:
if g in pred:
TP += 1
else:
FN += 1
for p in set(pred):
if p in global_entity_list or p in local_kb_word:
if p not in gold_decode:
FP += 1
precision = TP / float(TP + FP) if (TP + FP) != 0 else 0
recall = TP / float(TP + FN) if (TP + FN) != 0 else 0
F1 = 2 * precision * recall / float(precision + recall) if (precision + recall) != 0 else 0
else:
precision, recall, F1, count = 0, 0, 0, 0
return F1, count
def print_examples(self, batch_idx, data, pred_sent, pred_sent_coarse, gold_sent):
kb_len = len(data['context_arr_plain'][batch_idx]) - data['conv_arr_lengths'][batch_idx] - 1
print("{}: ID{} id{} ".format(data['domain'][batch_idx], data['ID'][batch_idx], data['id'][batch_idx]))
for i in range(kb_len):
kb_temp = [w for w in data['context_arr_plain'][batch_idx][i] if w != 'PAD']
kb_temp = kb_temp[::-1]
if 'poi' not in kb_temp:
print(kb_temp)
flag_uttr, uttr = '$u', []
for word_idx, word_arr in enumerate(data['context_arr_plain'][batch_idx][kb_len:]):
if word_arr[1] == flag_uttr:
uttr.append(word_arr[0])
else:
print(flag_uttr, ': ', " ".join(uttr))
flag_uttr = word_arr[1]
uttr = [word_arr[0]]
print('Sketch System Response : ', pred_sent_coarse)
print('Final System Response : ', pred_sent)
print('Gold System Response : ', gold_sent)
print('\n')
| 55.066832 | 229 | 0.531802 |
a68a72dfa9bd949d784452d4cd84db616897a21f | 362 | py | Python | server.py | teovoinea/opv | 878026c2099972b352926b3d357d4e041ef458cd | [
"MIT"
] | 1 | 2019-11-05T09:32:23.000Z | 2019-11-05T09:32:23.000Z | server.py | teovoinea/opv | 878026c2099972b352926b3d357d4e041ef458cd | [
"MIT"
] | 49 | 2018-01-15T00:23:11.000Z | 2020-06-01T11:08:25.000Z | server.py | teovoinea/opv | 878026c2099972b352926b3d357d4e041ef458cd | [
"MIT"
] | 1 | 2021-03-23T23:31:04.000Z | 2021-03-23T23:31:04.000Z | import socket
import time
UDP_IP = "127.0.0.1"
UDP_PORT = 4242
sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
sock.bind((UDP_IP, UDP_PORT))
start_time = time.time()
while True:
data, addr = sock.recvfrom(1500) # buffer size is 1024 bytes
print (time.time() - start_time)
start_time = time.time() | 22.625 | 64 | 0.654696 |
c4f7c5315dea5f8fbf73b079c26c47aa1a40e143 | 2,661 | py | Python | cloud-build.py | Obirvalger/cloud-build | 4cf56dcd9b1567ca10fb3d693207215eee798fcc | [
"MIT"
] | 1 | 2019-08-22T13:19:42.000Z | 2019-08-22T13:19:42.000Z | cloud-build.py | Obirvalger/cloud-build | 4cf56dcd9b1567ca10fb3d693207215eee798fcc | [
"MIT"
] | null | null | null | cloud-build.py | Obirvalger/cloud-build | 4cf56dcd9b1567ca10fb3d693207215eee798fcc | [
"MIT"
] | null | null | null | #!/usr/bin/python3
from collections.abc import Iterable
import argparse
import yaml
import sys
import cloud_build
PROG = 'cloud-build'
def parse_args():
def is_dict(string):
raw_dict = dict(yaml.safe_load(string))
result = {}
for k, v in raw_dict.items():
key = k.lower()
if not isinstance(v, Iterable) or isinstance(v, str):
result[key] = [v]
else:
result[key] = v
return result
stages = ['build', 'test', 'copy_external_files', 'sign', 'sync']
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
'-c',
'--config',
default=f'/etc/{PROG}/config.yaml',
help='path to config',
)
parser.add_argument(
'--built-images-dir',
help='path to already built image for stages other then build',
)
parser.add_argument(
'--stages',
nargs='+',
default=stages,
choices=stages,
help='list of stages',
)
parser.add_argument(
'--skip-stages',
nargs='+',
default=[],
choices=stages,
help='list of sipping stages',
)
parser.add_argument(
'--create-remote-dirs',
action='store_true',
help='create remote directories',
)
parser.add_argument(
'--force-rebuild',
action='store_true',
help='forces rebuild',
)
parser.add_argument(
'--no-tests',
action='store_true',
help='disable running tests',
)
parser.add_argument(
'--no-sign',
action='store_true',
help='disable creating check sum and signing it',
)
parser.add_argument(
'--tasks',
default={},
type=is_dict,
help='add tasks to repositories',
)
args = parser.parse_args()
return args
def main():
args = parse_args()
stages = set(args.stages) - set(args.skip_stages)
cb = cloud_build.CB(
config=args.config,
tasks=args.tasks,
built_images_dir=args.built_images_dir,
force_rebuild=args.force_rebuild,
)
if 'build' in stages:
no_tests = 'test' not in stages
cb.create_images(no_tests=no_tests)
if 'copy_external_files' in stages:
cb.copy_external_files()
if 'sign' in stages:
cb.sign()
if 'sync' in stages:
cb.sync(create_remote_dirs=args.create_remote_dirs)
if __name__ == '__main__':
try:
main()
except cloud_build.Error as e:
print(e, file=sys.stdout)
exit(1)
| 23.342105 | 71 | 0.570838 |
9b5d3f8a370da457532a10aaa48515cd3583b826 | 15,321 | py | Python | cassiopeia/datastores/riotapi/leagues.py | mertkutay/cassiopeia | 1c4005f78f216322d179f3465303d105261beab2 | [
"MIT"
] | null | null | null | cassiopeia/datastores/riotapi/leagues.py | mertkutay/cassiopeia | 1c4005f78f216322d179f3465303d105261beab2 | [
"MIT"
] | null | null | null | cassiopeia/datastores/riotapi/leagues.py | mertkutay/cassiopeia | 1c4005f78f216322d179f3465303d105261beab2 | [
"MIT"
] | null | null | null | from typing import Type, TypeVar, MutableMapping, Any, Iterable, Generator
from datapipelines import DataSource, PipelineContext, Query, NotFoundError, validate_query
from .common import RiotAPIService, APINotFoundError
from ...data import Platform, Queue
from ...dto.league import LeaguesListDto, ChallengerLeagueListDto, MasterLeagueListDto,GrandmasterLeagueListDto, LeaguePositionsDto, LeagueListDto
from ..uniquekeys import convert_region_to_platform
T = TypeVar("T")
class LeaguesAPI(RiotAPIService):
@DataSource.dispatch
def get(self, type: Type[T], query: MutableMapping[str, Any], context: PipelineContext = None) -> T:
pass
@DataSource.dispatch
def get_many(self, type: Type[T], query: MutableMapping[str, Any], context: PipelineContext = None) -> Iterable[T]:
pass
# League positions
_validate_get_league_positions_query = Query. \
has("summoner.id").as_(str).also. \
has("platform").as_(Platform)
@get.register(LeaguePositionsDto)
@validate_query(_validate_get_league_positions_query, convert_region_to_platform)
def get_league_position(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> LeaguePositionsDto:
url = "https://{platform}.api.riotgames.com/lol/league/v4/positions/by-summoner/{summonerId}".format(platform=query["platform"].value.lower(), summonerId=query["summoner.id"])
try:
endpoint = "positions/by-summoner/summonerId {}".format(query["platform"].value)
app_limiter, method_limiter = self._get_rate_limiter(query["platform"], endpoint)
data = self._get(url, {}, app_limiter=app_limiter, method_limiter=method_limiter)
except APINotFoundError as error:
raise NotFoundError(str(error)) from error
data = {"positions": data}
data["region"] = query["platform"].region.value
data["summonerId"] = query["summoner.id"]
for position in data["positions"]:
position["region"] = data["region"]
return LeaguePositionsDto(data)
_validate_get_many_league_positions_query = Query. \
has("summoner.ids").as_(Iterable).also. \
has("platform").as_(Platform)
@get_many.register(LeaguePositionsDto)
@validate_query(_validate_get_many_league_positions_query, convert_region_to_platform)
def get_leagues(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> Generator[LeaguePositionsDto, None, None]:
def generator():
for id in query["summoner.ids"]:
url = "https://{platform}.api.riotgames.com/lol/league/v3/positions/by-summoner/{summonerId}".format(platform=query["platform"].value.lower(), summonerId=id)
try:
endpoint = "positions/by-summoner/summonerId {}".format(query["platform"].value)
app_limiter, method_limiter = self._get_rate_limiter(query["platform"], endpoint)
data = self._get(url, {}, app_limiter=app_limiter, method_limiter=method_limiter)
except APINotFoundError as error:
raise NotFoundError(str(error)) from error
data = {"positions": data}
data["region"] = query["platform"].region.value
data["summonerId"] = id
for position in data["positions"]:
position["region"] = data["region"]
yield LeaguePositionsDto(data)
return generator()
# Leagues
_validate_get_leagues_query = Query. \
has("id").as_(str).also. \
has("platform").as_(Platform)
@get.register(LeagueListDto)
@validate_query(_validate_get_leagues_query, convert_region_to_platform)
def get_leagues_list(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> LeagueListDto:
url = "https://{platform}.api.riotgames.com/lol/league/v4/leagues/{leagueId}".format(platform=query["platform"].value.lower(), leagueId=query["id"])
try:
endpoint = "leagues/leagueId {}".format(query["platform"].value)
app_limiter, method_limiter = self._get_rate_limiter(query["platform"], endpoint)
data = self._get(url, {}, app_limiter=app_limiter, method_limiter=method_limiter)
except APINotFoundError as error:
raise NotFoundError(str(error)) from error
data["region"] = query["platform"].region.value
for entry in data["entries"]:
entry["region"] = data["region"]
entry["tier"] = data["tier"]
return LeagueListDto(data)
_validate_get_many_leagues_by_summoner_query = Query. \
has("summoner.ids").as_(Iterable).also. \
has("platform").as_(Platform)
@get_many.register(LeaguesListDto)
@validate_query(_validate_get_many_leagues_by_summoner_query, convert_region_to_platform)
def get_many_leagues_list(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> Generator[LeaguesListDto, None, None]:
def generator():
for id in query["summoner.ids"]:
url = "https://{platform}.api.riotgames.com/lol/league/v4/leagues/by-summoner/{summonerId}".format(platform=query["platform"].value.lower(), summonerId=id)
try:
endpoint = "leagues/by-summoner/summonerId {}".format(query["platform"].value)
app_limiter, method_limiter = self._get_rate_limiter(query["platform"], endpoint)
data = self._get(url, {}, app_limiter=app_limiter, method_limiter=method_limiter)
except APINotFoundError as error:
raise NotFoundError(str(error)) from error
data["region"] = query["platform"].region.value
data["summonerId"] = id
for entry in data["entries"]:
entry["region"] = data["region"]
yield LeaguesListDto(data)
return generator()
_validate_get_many_leagues_query = Query. \
has("ids").as_(Iterable).also. \
has("platform").as_(Platform)
@get_many.register(LeagueListDto)
@validate_query(_validate_get_many_leagues_query, convert_region_to_platform)
def get_many_leagues_list(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> Generator[LeagueListDto, None, None]:
def generator():
for id in query["ids"]:
url = "https://{platform}.api.riotgames.com/lol/league/v4/leagues/{leagueId}".format(platform=query["platform"].value.lower(), leagueId=id)
try:
endpoint = "leagues/leagueId {}".format(query["platform"].value)
app_limiter, method_limiter = self._get_rate_limiter(query["platform"], endpoint)
data = self._get(url, {}, app_limiter=app_limiter, method_limiter=method_limiter)
except APINotFoundError as error:
raise NotFoundError(str(error)) from error
data = {"leagues": data}
data["region"] = query["platform"].region.value
for league in data["leagues"]:
league["region"] = data["region"]
for entry in league["entries"]:
entry["region"] = data["region"]
yield LeagueListDto(data)
return generator()
_validate_get_challenger_league_query = Query. \
has("queue").as_(Queue).also. \
has("platform").as_(Platform)
@get.register(ChallengerLeagueListDto)
@validate_query(_validate_get_challenger_league_query, convert_region_to_platform)
def get_challenger_league_list(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> ChallengerLeagueListDto:
url = "https://{platform}.api.riotgames.com/lol/league/v4/challengerleagues/by-queue/{queueName}".format(platform=query["platform"].value.lower(), queueName=query["queue"].value)
try:
endpoint = "challengerleagues/by-queue {}".format(query["platform"].value)
app_limiter, method_limiter = self._get_rate_limiter(query["platform"], endpoint)
data = self._get(url, {}, app_limiter=app_limiter, method_limiter=method_limiter)
except APINotFoundError as error:
raise NotFoundError(str(error)) from error
data["region"] = query["platform"].region.value
data["queue"] = query["queue"].value
for entry in data["entries"]:
entry["region"] = data["region"]
return ChallengerLeagueListDto(data)
_validate_get_many_challenger_league_query = Query. \
has("queues").as_(Iterable).also. \
has("platform").as_(Platform)
@get_many.register(ChallengerLeagueListDto)
@validate_query(_validate_get_many_challenger_league_query, convert_region_to_platform)
def get_challenger_leagues_list(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> Generator[ChallengerLeagueListDto, None, None]:
def generator():
for queue in query["queues"]:
url = "https://{platform}.api.riotgames.com/lol/league/v4/challengerleagues/by-queue/{queueName}".format(platform=query["platform"].value.lower(), queueName=queue.value)
try:
endpoint = "challengerleagues/by-queue {}".format(query["platform"].value)
app_limiter, method_limiter = self._get_rate_limiter(query["platform"], endpoint)
data = self._get(url, {}, app_limiter=app_limiter, method_limiter=method_limiter)
except APINotFoundError as error:
raise NotFoundError(str(error)) from error
data = {"leagues": data}
data["region"] = query["platform"].region.value
data["queue"] = queue.value
for entry in data["entries"]:
entry["region"] = data["region"]
yield ChallengerLeagueListDto(data)
return generator()
_validate_get_grandmaster_league_query = Query. \
has("queue").as_(Queue).also. \
has("platform").as_(Platform)
@get.register(GrandmasterLeagueListDto)
@validate_query(_validate_get_grandmaster_league_query, convert_region_to_platform)
def get_master_league_list(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> GrandmasterLeagueListDto:
url = "https://{platform}.api.riotgames.com/lol/league/v4/grandmasterleagues/by-queue/{queueName}".format(platform=query["platform"].value.lower(), queueName=query["queue"].value)
try:
endpoint = "grandmasterleagues/by-queue {}".format(query["platform"].value)
app_limiter, method_limiter = self._get_rate_limiter(query["platform"], endpoint)
data = self._get(url, {}, app_limiter=app_limiter, method_limiter=method_limiter)
except APINotFoundError as error:
raise NotFoundError(str(error)) from error
data["region"] = query["platform"].region.value
data["queue"] = query["queue"].value
for entry in data["entries"]:
entry["region"] = data["region"]
return GrandmasterLeagueListDto(data)
_validate_get_many_grandmaster_league_query = Query. \
has("queues").as_(Iterable).also. \
has("platform").as_(Platform)
@get_many.register(GrandmasterLeagueListDto)
@validate_query(_validate_get_many_grandmaster_league_query, convert_region_to_platform)
def get_grandmaster_leagues_list(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> Generator[GrandmasterLeagueListDto, None, None]:
def generator():
for queue in query["queues"]:
url = "https://{platform}.api.riotgames.com/lol/league/v4/grandmasterleagues/by-queue/{queueName}".format(platform=query["platform"].value.lower(), queueName=queue.value)
try:
endpoint = "grandmasterleagues/by-queue {}".format(query["platform"].value)
app_limiter, method_limiter = self._get_rate_limiter(query["platform"], endpoint)
data = self._get(url, {}, app_limiter=app_limiter, method_limiter=method_limiter)
except APINotFoundError as error:
raise NotFoundError(str(error)) from error
data = {"leagues": data}
data["region"] = query["platform"].region.value
data["queue"] = queue.value
for entry in data["entries"]:
entry["region"] = data["region"]
yield GrandmasterLeagueListDto(data)
return generator()
_validate_get_master_league_query = Query. \
has("queue").as_(Queue).also. \
has("platform").as_(Platform)
@get.register(MasterLeagueListDto)
@validate_query(_validate_get_master_league_query, convert_region_to_platform)
def get_master_league_list(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> MasterLeagueListDto:
url = "https://{platform}.api.riotgames.com/lol/league/v4/masterleagues/by-queue/{queueName}".format(platform=query["platform"].value.lower(), queueName=query["queue"].value)
try:
endpoint = "masterleagues/by-queue {}".format(query["platform"].value)
app_limiter, method_limiter = self._get_rate_limiter(query["platform"], endpoint)
data = self._get(url, {}, app_limiter=app_limiter, method_limiter=method_limiter)
except APINotFoundError as error:
raise NotFoundError(str(error)) from error
data["region"] = query["platform"].region.value
data["queue"] = query["queue"].value
for entry in data["entries"]:
entry["region"] = data["region"]
return MasterLeagueListDto(data)
_validate_get_many_master_league_query = Query. \
has("queues").as_(Iterable).also. \
has("platform").as_(Platform)
@get_many.register(MasterLeagueListDto)
@validate_query(_validate_get_many_master_league_query, convert_region_to_platform)
def get_master_leagues_list(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> Generator[MasterLeagueListDto, None, None]:
def generator():
for queue in query["queues"]:
url = "https://{platform}.api.riotgames.com/lol/league/v4/masterleagues/by-queue/{queueName}".format(platform=query["platform"].value.lower(), queueName=queue.value)
try:
endpoint = "masterleagues/by-queue {}".format(query["platform"].value)
app_limiter, method_limiter = self._get_rate_limiter(query["platform"], endpoint)
data = self._get(url, {}, app_limiter=app_limiter, method_limiter=method_limiter)
except APINotFoundError as error:
raise NotFoundError(str(error)) from error
data = {"leagues": data}
data["region"] = query["platform"].region.value
data["queue"] = queue.value
for entry in data["entries"]:
entry["region"] = data["region"]
yield MasterLeagueListDto(data)
return generator()
| 53.197917 | 187 | 0.650741 |
f984404c0243f38c664f58f6c079aa0cf3c8ee58 | 9,207 | py | Python | config/settings/production.py | yerkebulan19971212/dipploma | d274088aa477dadd7971950b80ef9ea3ea366a6b | [
"MIT"
] | null | null | null | config/settings/production.py | yerkebulan19971212/dipploma | d274088aa477dadd7971950b80ef9ea3ea366a6b | [
"MIT"
] | null | null | null | config/settings/production.py | yerkebulan19971212/dipploma | d274088aa477dadd7971950b80ef9ea3ea366a6b | [
"MIT"
] | null | null | null | import logging
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
from sentry_sdk.integrations.logging import LoggingIntegration
from sentry_sdk.integrations.celery import CeleryIntegration
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env("DJANGO_SECRET_KEY")
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["smart_note.com"])
# DATABASES
# ------------------------------------------------------------------------------
DATABASES["default"] = env.db("DATABASE_URL") # noqa F405
DATABASES["default"]["ATOMIC_REQUESTS"] = True # noqa F405
DATABASES["default"]["CONN_MAX_AGE"] = env.int("CONN_MAX_AGE", default=60) # noqa F405
# CACHES
# ------------------------------------------------------------------------------
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": env("REDIS_URL"),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
# Mimicing memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
"IGNORE_EXCEPTIONS": True,
},
}
}
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure
SESSION_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure
CSRF_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/topics/security/#ssl-https
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds
# TODO: set this to 60 seconds first and then to 518400 once you prove the former works
SECURE_HSTS_SECONDS = 60
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True
)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload
SECURE_HSTS_PRELOAD = env.bool("DJANGO_SECURE_HSTS_PRELOAD", default=True)
# https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True
)
# STORAGES
# ------------------------------------------------------------------------------
# https://django-storages.readthedocs.io/en/latest/#installation
INSTALLED_APPS += ["storages"] # noqa F405
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = env("DJANGO_AWS_ACCESS_KEY_ID")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_SECRET_ACCESS_KEY = env("DJANGO_AWS_SECRET_ACCESS_KEY")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_STORAGE_BUCKET_NAME = env("DJANGO_AWS_STORAGE_BUCKET_NAME")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_QUERYSTRING_AUTH = False
# DO NOT change these unless you know what you're doing.
_AWS_EXPIRY = 60 * 60 * 24 * 7
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_OBJECT_PARAMETERS = {
"CacheControl": f"max-age={_AWS_EXPIRY}, s-maxage={_AWS_EXPIRY}, must-revalidate"
}
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_DEFAULT_ACL = None
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_REGION_NAME = env("DJANGO_AWS_S3_REGION_NAME", default=None)
# STATIC
# ------------------------
STATICFILES_STORAGE = "config.settings.production.StaticRootS3Boto3Storage"
COLLECTFAST_STRATEGY = "collectfast.strategies.boto3.Boto3Strategy"
STATIC_URL = f"https://{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com/static/"
# MEDIA
# ------------------------------------------------------------------------------
# region http://stackoverflow.com/questions/10390244/
# Full-fledge class: https://stackoverflow.com/a/18046120/104731
from storages.backends.s3boto3 import S3Boto3Storage # noqa E402
class StaticRootS3Boto3Storage(S3Boto3Storage):
location = "static"
default_acl = "public-read"
class MediaRootS3Boto3Storage(S3Boto3Storage):
location = "media"
file_overwrite = False
# endregion
DEFAULT_FILE_STORAGE = "config.settings.production.MediaRootS3Boto3Storage"
MEDIA_URL = f"https://{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com/media/"
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[-1]["OPTIONS"]["loaders"] = [ # type: ignore[index] # noqa F405
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email
DEFAULT_FROM_EMAIL = env(
"DJANGO_DEFAULT_FROM_EMAIL", default="SmartNoteDiploma <noreply@smart_note.com>"
)
# https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = env("DJANGO_SERVER_EMAIL", default=DEFAULT_FROM_EMAIL)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = env(
"DJANGO_EMAIL_SUBJECT_PREFIX", default="[SmartNoteDiploma]"
)
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
ADMIN_URL = env("DJANGO_ADMIN_URL")
# Anymail (Mailgun)
# ------------------------------------------------------------------------------
# https://anymail.readthedocs.io/en/stable/installation/#installing-anymail
INSTALLED_APPS += ["anymail"] # noqa F405
EMAIL_BACKEND = "anymail.backends.mailgun.EmailBackend"
# https://anymail.readthedocs.io/en/stable/installation/#anymail-settings-reference
ANYMAIL = {
"MAILGUN_API_KEY": env("MAILGUN_API_KEY"),
"MAILGUN_SENDER_DOMAIN": env("MAILGUN_DOMAIN"),
"MAILGUN_API_URL": env("MAILGUN_API_URL", default="https://api.mailgun.net/v3"),
}
# django-compressor
# ------------------------------------------------------------------------------
# https://django-compressor.readthedocs.io/en/latest/settings/#django.conf.settings.COMPRESS_ENABLED
COMPRESS_ENABLED = env.bool("COMPRESS_ENABLED", default=True)
# https://django-compressor.readthedocs.io/en/latest/settings/#django.conf.settings.COMPRESS_STORAGE
COMPRESS_STORAGE = "storages.backends.s3boto3.S3Boto3Storage"
# https://django-compressor.readthedocs.io/en/latest/settings/#django.conf.settings.COMPRESS_URL
COMPRESS_URL = STATIC_URL
# Collectfast
# ------------------------------------------------------------------------------
# https://github.com/antonagestam/collectfast#installation
INSTALLED_APPS = ["collectfast"] + INSTALLED_APPS # noqa F405
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": True,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
}
},
"root": {"level": "INFO", "handlers": ["console"]},
"loggers": {
"django.db.backends": {
"level": "ERROR",
"handlers": ["console"],
"propagate": False,
},
# Errors logged by the SDK itself
"sentry_sdk": {"level": "ERROR", "handlers": ["console"], "propagate": False},
"django.security.DisallowedHost": {
"level": "ERROR",
"handlers": ["console"],
"propagate": False,
},
},
}
# Sentry
# ------------------------------------------------------------------------------
SENTRY_DSN = env("SENTRY_DSN")
SENTRY_LOG_LEVEL = env.int("DJANGO_SENTRY_LOG_LEVEL", logging.INFO)
sentry_logging = LoggingIntegration(
level=SENTRY_LOG_LEVEL, # Capture info and above as breadcrumbs
event_level=logging.ERROR, # Send errors as events
)
sentry_sdk.init(
dsn=SENTRY_DSN,
integrations=[sentry_logging, DjangoIntegration(), CeleryIntegration()],
)
# Your stuff...
# ------------------------------------------------------------------------------
| 40.92 | 100 | 0.633648 |
f779c2c1898bf9169019877149f86baa55631f5b | 17,616 | py | Python | dataset/convert_tfrecords.py | HiKapok/DAN | fb726fad86b3f53d12c7bc5b833a705d7d885563 | [
"Apache-2.0"
] | 15 | 2019-01-28T01:56:15.000Z | 2021-04-27T19:33:46.000Z | dataset/convert_tfrecords.py | HiKapok/DAN | fb726fad86b3f53d12c7bc5b833a705d7d885563 | [
"Apache-2.0"
] | 3 | 2019-05-14T07:25:17.000Z | 2021-08-11T01:50:59.000Z | dataset/convert_tfrecords.py | HiKapok/DAN | fb726fad86b3f53d12c7bc5b833a705d7d885563 | [
"Apache-2.0"
] | 7 | 2019-04-11T09:25:11.000Z | 2020-12-18T06:04:10.000Z | # Copyright 2018 Changan Wang
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import xml.etree.ElementTree as xml_tree
import numpy as np
import six
import tensorflow as tf
import dataset_common
'''How to organize your dataset folder:
WIDERFACE Dataset/
|->WIDER_train/
| |->images/
| |->...
|->WIDER_val/
| |->images/
| |->...
|->WIDER_test/
| |->images/
| |->...
|->wider_face_split/
| |->wider_face_train.mat
| |->...
'''
tf.app.flags.DEFINE_string('dataset_directory', '/data1/home/changanwang/widerface',
'All datas directory')
tf.app.flags.DEFINE_string('output_directory', '/data1/home/changanwang/widerface/tfrecords',
'Output data directory')
tf.app.flags.DEFINE_string('train_split', 'WIDER_train',
'Name of the training data sub-directory')
tf.app.flags.DEFINE_string('validation_split', 'WIDER_val',
'Name of the validation data sub-directory')
tf.app.flags.DEFINE_integer('train_shards', 16,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('validation_shards', 8,
'Number of shards in validation TFRecord files.')
tf.app.flags.DEFINE_integer('num_threads', 8,
'Number of threads to preprocess the images.')
RANDOM_SEED = 180530
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _float_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _bytes_list_feature(value):
"""Wrapper for inserting a list of bytes features into Example proto.
"""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
if isinstance(value, six.string_types):
value = six.binary_type(value, encoding='utf-8')
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_name, image_buffer, bboxes, blur, expression, illumination, invalid, occlusion, pose, height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
bboxes: List of bounding boxes for each image.
blur: List, clear->0, normal blur->1, heavy blur->2.
expression: List, typical expression->0, exaggerate expression->1.
illumination: List, normal illumination->0, extreme illumination->1.
invalid: List, false->0(valid image), true->1(invalid image).
occlusion: List, no occlusion->0, partial occlusion->1, heavy occlusion->2.
pose: List, typical pose->0, atypical pose->1.
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
ymin = []
xmin = []
ymax = []
xmax = []
for b in bboxes:
assert len(b) == 4
# pylint: disable=expression-not-assigned
[l.append(point) for l, point in zip([ymin, xmin, ymax, xmax], b)]
# pylint: enable=expression-not-assigned
channels = 3
image_format = 'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/channels': _int64_feature(channels),
'image/shape': _int64_feature([height, width, channels]),
'image/object/bbox/xmin': _float_feature(xmin),
'image/object/bbox/xmax': _float_feature(xmax),
'image/object/bbox/ymin': _float_feature(ymin),
'image/object/bbox/ymax': _float_feature(ymax),
'image/object/bbox/blur': _int64_feature(blur),
'image/object/bbox/expression': _int64_feature(expression),
'image/object/bbox/illumination': _int64_feature(illumination),
'image/object/bbox/invalid': _int64_feature(invalid),
'image/object/bbox/occlusion': _int64_feature(occlusion),
'image/object/bbox/pose': _int64_feature(pose),
'image/format': _bytes_feature(image_format),
'image/filename': _bytes_feature(image_name.encode('utf8')),
'image/encoded': _bytes_feature(image_buffer)}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that converts CMYK JPEG data to RGB JPEG data.
self._cmyk_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_jpeg(self._cmyk_data, channels=0)
self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def cmyk_to_rgb(self, image_data):
return self._sess.run(self._cmyk_to_rgb,
feed_dict={self._cmyk_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
# final_scaless = 0.
# count = 0.
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
with tf.gfile.FastGFile(filename, 'rb') as f:
image_data = f.read()
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
# shorter_side = min(height, width)
# longer_side = max(height, width)
# target_shorter_side = np.random.choice([1024., 1200.], 2, p=[0.5, 0.5])[0]
# target_longer = target_shorter_side * longer_side / shorter_side
# if target_longer > 1600:
# final_scale = 1600./ longer_side
# else:
# final_scale = target_shorter_side / shorter_side
# global final_scaless
# global count
# final_scaless += final_scale
# #print(final_scale)
# count+=1.
return image_data, height, width
def _find_image_bounding_boxes(cur_record, all_ground_truth, height, width):
"""Find the bounding boxes for a given image file.
Args:
cur_record: list of strings; the first of which is the sub-directory of cur_record, the second is the image filename.
all_ground_truth: all the annotations of the faces in this data set.
height: the height of the current image.
width: the width of the current image.
Returns:
bboxes: List of bounding boxes for each image.
blur: List, clear->0, normal blur->1, heavy blur->2.
expression: List, typical expression->0, exaggerate expression->1.
illumination: List, normal illumination->0, extreme illumination->1.
invalid: List, false->0(valid image), true->1(invalid image).
occlusion: List, no occlusion->0, partial occlusion->1, heavy occlusion->2.
pose: List, typical pose->0, atypical pose->1.
"""
all_bboxes = all_ground_truth[cur_record]
bboxes = []
blur = []
expression = []
illumination = []
invalid = []
occlusion = []
pose = []
for bbox in all_bboxes:
bbox = bbox.split()
_x1, _y1, _w, _h, _blur, _expression, _illumination, _invalid, _occlusion, _pose = [int(_.strip()) for _ in bbox]
# _w = max(_w, 1)
# _h = max(_h, 1)
# ymin = _y1 * 1.
# xmin = _x1 * 1.
# ymax = (_y1 + _h - 1) * 1.
# xmax = (_x1 + _w - 1) * 1.
_w = max(_w, 0)
_h = max(_h, 0)
ymin = _y1 * 1.
xmin = _x1 * 1.
ymax = (_y1 + _h) * 1.
xmax = (_x1 + _w) * 1.
bboxes.append((ymin, xmin, ymax, xmax))
blur.append(_blur)
expression.append(_expression)
illumination.append(_illumination)
invalid.append(_invalid)
occlusion.append(_occlusion)
pose.append(_pose)
return bboxes, blur, expression, illumination, invalid, occlusion, pose
def _process_image_files_batch(coder, thread_index, ranges, name, directory, all_records, num_shards, all_ground_truth):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
directory: string; the path of all datas
all_records: list of string tuples; the first of each tuple is the sub-directory of the record, the second is the image filename.
num_shards: integer number of shards for this data set.
all_ground_truth: all the annotations of the faces in this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in range(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
cur_record = all_records[i]
filename = os.path.join(directory, cur_record)
image_buffer, height, width = _process_image(filename, coder)
bboxes, blur, expression, illumination, invalid, occlusion, pose = _find_image_bounding_boxes(cur_record, all_ground_truth, height, width)
example = _convert_to_example(filename, cur_record, image_buffer, bboxes, blur, expression, illumination, invalid, occlusion, pose, height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, directory, all_records, num_shards, all_ground_truth):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
directory: string; the path of all datas
all_records: list of string tuples; the first of each tuple is the sub-directory of the record, the second is the image filename.
num_shards: integer number of shards for this data set.
all_ground_truth: all the annotations of the faces in this data set.
"""
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(all_records), FLAGS.num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i + 1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in range(len(ranges)):
args = (coder, thread_index, ranges, name, directory, all_records, num_shards, all_ground_truth)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(all_records)))
sys.stdout.flush()
def _process_dataset(name, directory, split_name, num_shards, all_ground_truth):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
split_name: sub-path to the data set.
num_shards: integer number of shards for this data set.
all_ground_truth: all the annotations of the faces in this data set.
"""
#all_records = []
jpeg_file_path = os.path.join(directory, split_name, 'images')
all_records = list(all_ground_truth.keys())
#all_records = list(zip([jpeg_file_path] * len(jpegs), jpegs))
shuffled_index = list(range(len(all_records)))
random.seed(RANDOM_SEED)
random.shuffle(shuffled_index)
all_records = [all_records[i] for i in shuffled_index]
_process_image_files(name, jpeg_file_path, all_records, num_shards, all_ground_truth)
def get_train_or_val_gt(anna_file):
# assume there is not empty objects in all images
all_images = {}
# take the first non-empty record as imagename
sts_stamp = -1
cur_image = None
with open(anna_file) as file:
for line in file:
line = line.strip()
if line == '':
continue
elif sts_stamp < 0:
# encounter a new image
assert (('jpg' in line) or ('--' in line)), 'mismatch records in {}'.format(anna_file)
all_images[line] = []
cur_image = line
sts_stamp = 0 # set stamp to read total objects at next line
elif sts_stamp > 0:
all_images[cur_image].append(line)
sts_stamp = sts_stamp - 1
if sts_stamp == 0:
sts_stamp = -1 # wait for next image
else:
sts_stamp = int(line)
return all_images
all_images_list = {
'train': os.path.join(FLAGS.dataset_directory, 'wider_face_split', 'wider_face_train_bbx_gt.txt'),
'valid': os.path.join(FLAGS.dataset_directory, 'wider_face_split', 'wider_face_val_bbx_gt.txt')
}
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.validation_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.validation_shards')
print('Saving results to %s' % FLAGS.output_directory)
# Run it!
os.makedirs(FLAGS.output_directory, exist_ok=True)
_process_dataset('valid', FLAGS.dataset_directory, FLAGS.validation_split, FLAGS.validation_shards, get_train_or_val_gt(all_images_list['valid']))
_process_dataset('train', FLAGS.dataset_directory, FLAGS.train_split, FLAGS.train_shards, get_train_or_val_gt(all_images_list['train']))
#global final_scaless
#global count
#print(final_scaless/count)
if __name__ == '__main__':
tf.app.run()
# python dataset/convert_tfrecords.py --dataset_directory=/data1/home/changanwang/widerface/ --output_directory=/data1/home/changanwang/widerface/tfrecords
| 39.233853 | 155 | 0.686478 |
17d296d40cc1759c7265ce628143918aae5b2cba | 726 | py | Python | kings_and_pigs/entities/stats_heart.py | yumauri/kings_and_pigs | f22be6bbe988f430d72b5ce55283b78d3cc3fe9e | [
"MIT"
] | null | null | null | kings_and_pigs/entities/stats_heart.py | yumauri/kings_and_pigs | f22be6bbe988f430d72b5ce55283b78d3cc3fe9e | [
"MIT"
] | 2 | 2021-12-02T11:05:53.000Z | 2021-12-23T18:19:23.000Z | kings_and_pigs/entities/stats_heart.py | yumauri/kings_and_pigs | f22be6bbe988f430d72b5ce55283b78d3cc3fe9e | [
"MIT"
] | null | null | null | from ..functions import loader
from .animation import Animation
from .animated_entity import AnimatedEntity
# stats heart sprites loader
load_image = loader("kings_and_pigs/data/sprites/12-Live and Coins")
class StatsHeart(AnimatedEntity):
def __init__(self, x, y):
idle = load_image("Small Heart Idle (18x14).png")
hit = load_image("Small Heart Hit (18x14).png")
self.animation_idle = Animation(idle, 8)
self.animation_hit = Animation(hit, 2)
super().__init__(x, y, self.animation_idle)
def disappear(self):
def disappear_is_done():
self.kill()
self.change_animation(self.animation_hit)
self.animation_hit.on_done(disappear_is_done)
| 27.923077 | 68 | 0.692837 |
aaadbccc68aa4cce7a5688620bc0f855daa975c6 | 1,918 | py | Python | openpyxl2/chartsheet/tests/test_relation.py | j5int/openpyxl2 | 3c82567c33d6cad5b0b26eea97da7bb39ba7f4c8 | [
"MIT"
] | null | null | null | openpyxl2/chartsheet/tests/test_relation.py | j5int/openpyxl2 | 3c82567c33d6cad5b0b26eea97da7bb39ba7f4c8 | [
"MIT"
] | null | null | null | openpyxl2/chartsheet/tests/test_relation.py | j5int/openpyxl2 | 3c82567c33d6cad5b0b26eea97da7bb39ba7f4c8 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
# Copyright (c) 2010-2018 openpyxl
import pytest
from openpyxl2.xml.functions import fromstring, tostring
from openpyxl2.tests.helper import compare_xml
@pytest.fixture
def SheetBackgroundPicture():
from ..chartsheet import SheetBackgroundPicture
return SheetBackgroundPicture
class TestSheetBackgroundPicture:
def test_read(self, SheetBackgroundPicture):
src = """
<picture r:id="rId5" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" />
"""
xml = fromstring(src)
sheetBackgroundPicture = SheetBackgroundPicture.from_tree(xml)
assert sheetBackgroundPicture.id == "rId5"
def test_write(self, SheetBackgroundPicture):
sheetBackgroundPicture = SheetBackgroundPicture(id="rId5")
expected = """
<picture r:id="rId5" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" />
"""
xml = tostring(sheetBackgroundPicture.to_tree())
diff = compare_xml(xml, expected)
assert diff is None, diff
@pytest.fixture
def DrawingHF():
from ..chartsheet import DrawingHF
return DrawingHF
class TestDrawingHF:
def test_read(self, DrawingHF):
src = """
<drawingHF lho="7" lhf="6" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" r:id="rId3"/>
"""
xml = fromstring(src)
drawingHF = DrawingHF.from_tree(xml)
assert drawingHF.lho == 7
def test_write(self, DrawingHF):
drawingHF = DrawingHF(lho=7, lhf=6, id='rId3')
expected = """
<drawingHF lho="7" lhf="6" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" r:id="rId3" />
"""
xml = tostring(drawingHF.to_tree("drawingHF"))
diff = compare_xml(xml, expected)
assert diff is None, diff
| 32.508475 | 131 | 0.675704 |
a283d944ba564e2e2b8b59ed5d80a6ba92827a34 | 16,368 | py | Python | mathics/builtin/specialfns/bessel.py | adamantinum/mathics-core | c8f5d7a7645a17a6a8833f750fb93a352d1ac9b4 | [
"Apache-2.0"
] | 90 | 2021-09-11T14:14:00.000Z | 2022-03-29T02:08:29.000Z | mathics/builtin/specialfns/bessel.py | adamantinum/mathics-core | c8f5d7a7645a17a6a8833f750fb93a352d1ac9b4 | [
"Apache-2.0"
] | 187 | 2021-09-13T01:00:41.000Z | 2022-03-31T11:52:52.000Z | mathics/builtin/specialfns/bessel.py | adamantinum/mathics-core | c8f5d7a7645a17a6a8833f750fb93a352d1ac9b4 | [
"Apache-2.0"
] | 10 | 2021-10-05T15:44:26.000Z | 2022-03-21T12:34:33.000Z | """
Bessel and Related Functions
"""
import mpmath
from mathics.builtin.arithmetic import _MPMathFunction
from mathics.builtin.base import Builtin
from mathics.core.atoms import from_mpmath
from mathics.core.number import machine_precision, get_precision, PrecisionValueError
from mathics.core.number import prec as _prec
from mathics.core.attributes import (
listable,
n_hold_first,
numeric_function,
protected,
read_protected,
)
class _Bessel(_MPMathFunction):
attributes = listable | numeric_function | protected | read_protected
nargs = 2
class AiryAi(_MPMathFunction):
"""
<dl>
<dt>'AiryAi[$x$]'
<dd>returns the Airy function Ai($x$).
</dl>
Exact values:
>> AiryAi[0]
= 3 ^ (1 / 3) / (3 Gamma[2 / 3])
'AiryAi' can be evaluated numerically:
>> AiryAi[0.5]
= 0.231694
>> AiryAi[0.5 + I]
= 0.157118 - 0.24104 I
>> Plot[AiryAi[x], {x, -10, 10}]
= -Graphics-
"""
rules = {
"Derivative[1][AiryAi]": "AiryAiPrime",
}
mpmath_name = "airyai"
summary_text = "Airy's function Ai"
sympy_name = "airyai"
class AiryAiPrime(_MPMathFunction):
"""
<dl>
<dt>'AiryAiPrime[$x$]'
<dd>returns the derivative of the Airy function 'AiryAi[$x$]'.
</dl>
Exact values:
>> AiryAiPrime[0]
= -3 ^ (2 / 3) / (3 Gamma[1 / 3])
Numeric evaluation:
>> AiryAiPrime[0.5]
= -0.224911
"""
mpmath_name = ""
rules = {
"Derivative[1][AiryAiPrime]": "(#1 AiryAi[#1])&",
}
summary_text = "derivative of the Airy's function Ai"
sympy_name = "airyaiprime"
def get_mpmath_function(self, args):
return lambda x: mpmath.airyai(x, derivative=1)
class AiryAiZero(Builtin):
"""
<dl>
<dt>'AiryAiZero[$k$]'
<dd>returns the $k$th zero of the Airy function Ai($z$).
</dl>
>> N[AiryAiZero[1]]
= -2.33811
#> AiryAiZero[1]
= AiryAiZero[1]
#> AiryAiZero[1.]
= AiryAiZero[1.]
#> AiryAi[AiryAiZero[1]]
= 0
#> N[AiryAiZero[2], 100]
= -4.087949444130970616636988701457391060224764699108529754984160876025121946836047394331169160758270562
"""
# TODO: 'AiryAiZero[$k$, $x0$]' - $k$th zero less than x0
attributes = listable | n_hold_first | numeric_function | protected | read_protected
rules = {
"AiryAi[AiryAiZero[k_]]": "0",
}
summary_text = "kth zero of the Airy's function Ai"
def apply_N(self, k, precision, evaluation):
"N[AiryAiZero[k_Integer], precision_]"
try:
d = get_precision(precision, evaluation)
except PrecisionValueError:
return
if d is None:
p = machine_precision
else:
p = _prec(d)
k_int = k.get_int_value()
with mpmath.workprec(p):
result = mpmath.airyaizero(k_int)
return from_mpmath(result, d)
class AiryBi(_MPMathFunction):
"""
<dl>
<dt>'AiryBi[$x$]'
<dd>returns the Airy function of the second kind Bi($x$).
</dl>
Exact values:
>> AiryBi[0]
= 3 ^ (5 / 6) / (3 Gamma[2 / 3])
Numeric evaluation:
>> AiryBi[0.5]
= 0.854277
>> AiryBi[0.5 + I]
= 0.688145 + 0.370815 I
>> Plot[AiryBi[x], {x, -10, 2}]
= -Graphics-
"""
mpmath_name = "airybi"
rules = {
"Derivative[1][AiryBi]": "AiryBiPrime",
}
summary_text = "Airy's function Bi"
sympy_name = "airybi"
class AiryBiPrime(_MPMathFunction):
"""
<dl>
<dt>'AiryBiPrime[$x$]'
<dd>returns the derivative of the Airy function of the second
kind 'AiryBi[$x$]'.
</dl>
Exact values:
>> AiryBiPrime[0]
= 3 ^ (1 / 6) / Gamma[1 / 3]
Numeric evaluation:
>> AiryBiPrime[0.5]
= 0.544573
"""
mpmath_name = ""
sympy_name = "airybiprime"
rules = {
"Derivative[1][AiryBiPrime]": "(#1 AiryBi[#1])&",
}
summary_text = "derivative of the Airy's function Bi"
def get_mpmath_function(self, args):
return lambda x: mpmath.airybi(x, derivative=1)
class AiryBiZero(Builtin):
"""
<dl>
<dt>'AiryBiZero[$k$]'
<dd>returns the $k$th zero of the Airy function Bi($z$).
</dl>
>> N[AiryBiZero[1]]
= -1.17371
#> AiryBiZero[1]
= AiryBiZero[1]
#> AiryBiZero[1.]
= AiryBiZero[1.]
#> AiryBi[AiryBiZero[1]]
= 0
#> N[AiryBiZero[2], 100]
= -3.271093302836352715680228240166413806300935969100284801485032396261130864238742879252000673830055014
"""
# TODO: 'AiryBiZero[$k$, $x0$]' - $k$th zero less than x0
attributes = listable | n_hold_first | numeric_function | protected | read_protected
rules = {
"AiryBi[AiryBiZero[z_]]": "0",
}
summary_text = "kth zero of the Airy's function Bi"
def apply_N(self, k, precision, evaluation):
"N[AiryBiZero[k_Integer], precision_]"
try:
d = get_precision(precision, evaluation)
except PrecisionValueError:
return
if d is None:
p = machine_precision
else:
p = _prec(d)
k_int = k.get_int_value()
with mpmath.workprec(p):
result = mpmath.airybizero(k_int)
return from_mpmath(result, d)
class AngerJ(_Bessel):
"""
<dl>
<dt>'AngerJ[$n$, $z$]'
<dd>returns the Anger function J_$n$($z$).
</dl>
>> AngerJ[1.5, 3.5]
= 0.294479
>> Plot[AngerJ[1, x], {x, -10, 10}]
= -Graphics-
"""
# TODO: Associated Anger function AngerJ[v, u, z]
mpmath_name = "angerj"
summary_text = "Anger's function J"
sympy_name = ""
# Bessel Functions
class BesselI(_Bessel):
"""
Bessel function of the first kind. See <url>https://en.wikipedia.org/wiki/Bessel_function#Bessel_functions_of_the_first_kind:_J%CE%B1</url>.
<dl>
<dt>'BesselI[$n$, $z$]'
<dd>returns the modified Bessel function of the first kind I_$n$($z$).
</dl>
>> BesselI[1.5, 4]
= 8.17263
>> Plot[BesselI[0, x], {x, 0, 5}]
= -Graphics-
"""
rules = {
"Derivative[0, 1][BesselI]": "((BesselI[-1 + #1, #2] + BesselI[1 + #1, #2])/2)&",
}
mpmath_name = "besseli"
sympy_name = "besseli"
summary_text = "Bessel's function of the second kind"
class BesselJ(_Bessel):
"""
Bessel function of the first kind. See <url>https://en.wikipedia.org/wiki/Bessel_function#Bessel_functions_of_the_first_kind:_J%CE%B1</url>.
<dl>
<dt>'BesselJ[$n$, $z$]'
<dd>returns the Bessel function of the first kind J_$n$($z$).
</dl>
>> BesselJ[0, 5.2]
= -0.11029
#> BesselJ[2.5, 1]
= 0.0494968
>> D[BesselJ[n, z], z]
= -BesselJ[1 + n, z] / 2 + BesselJ[-1 + n, z] / 2
#> BesselJ[0., 0.]
= 1.
>> Plot[BesselJ[0, x], {x, 0, 10}]
= -Graphics-
"""
# TODO: Sympy Backend is not as powerful as Mathematica
"""
>> BesselJ[1/2, x]
= Sqrt[2 / Pi] Sin[x] / Sqrt[x]
"""
mpmath_name = "besselj"
rules = {
"Derivative[0,1][BesselJ]": "(BesselJ[#1- 1, #2] / 2 - BesselJ[#1 + 1, #2] / 2)&",
}
summary_text = "Bessel's function of the first kind"
sympy_name = "besselj"
class BesselK(_Bessel):
"""
Modified Bessel function of the second kind. See <url>https://en.wikipedia.org/wiki/Bessel_function#Modified_Bessel_functions:_I%CE%B1,_K%CE%B1</url>.
<dl>
<dt>'BesselK[$n$, $z$]'
<dd>returns the modified Bessel function of the second kind K_$n$($z$).
</dl>
>> BesselK[1.5, 4]
= 0.014347
>> Plot[BesselK[0, x], {x, 0, 5}]
= -Graphics-
"""
mpmath_name = "besselk"
rules = {
"Derivative[0, 1][BesselK]": "((-BesselK[-1 + #1, #2] - BesselK[1 + #1, #2])/2)&",
}
summary_text = "modified Bessel's function of the second kind"
sympy_name = "besselk"
class BesselY(_Bessel):
"""
<dl>
<dt>'BesselY[$n$, $z$]'
<dd>returns the Bessel function of the second kind Y_$n$($z$).
</dl>
>> BesselY[1.5, 4]
= 0.367112
## Returns ComplexInfinity instead
## #> BesselY[0., 0.]
## = -Infinity
>> Plot[BesselY[0, x], {x, 0, 10}]
= -Graphics-
"""
# TODO: Special Values
"""
>> BesselY[0, 0]
= -Infinity
"""
rules = {
"Derivative[0,1][BesselY]": "(BesselY[-1 + #1, #2] / 2 - BesselY[1 + #1, #2] / 2)&",
}
mpmath_name = "bessely"
summary_text = "Bessel's function of the second kind"
sympy_name = "bessely"
class BesselJZero(_Bessel):
"""
<dl>
<dt>'BesselJZero[$n$, $k$]'
<dd>returns the $k$th zero of the Bessel function of the first kind J_$n$($z$).
</dl>
>> N[BesselJZero[0, 1]]
= 2.40483
>> N[BesselJZero[0, 1], 10]
= 2.404825558
"""
mpmath_name = "besseljzero"
summary_text = "kth zero of the Bessel's function of the first kind"
sympy_name = ""
class BesselYZero(_Bessel):
"""
<dl>
<dt>'BesselYZero[$n$, $k$]'
<dd>returns the $k$th zero of the Bessel function of the second kind Y_$n$($z$).
</dl>
>> N[BesselYZero[0, 1]]
= 0.893577
>> N[BesselYZero[0, 1], 10]
= 0.8935769663
"""
mpmath_name = "besselyzero"
summary_text = "kth zero of the Bessel's function of the second kind"
sympy_name = ""
# Hankel Functions
class HankelH1(_Bessel):
"""
<dl>
<dt>'HankelH1[$n$, $z$]'
<dd>returns the Hankel function of the first kind H_$n$^1 ($z$).
</dl>
>> HankelH1[1.5, 4]
= 0.185286 + 0.367112 I
"""
mpmath_name = "hankel1"
rules = {
"Derivative[0, 1][HankelH1]": "((HankelH1[-1 + #1, #2] - HankelH1[1 + #1, #2])/2)&",
}
summary_text = "Hankel's function of the first kind"
sympy_name = "hankel1"
class HankelH2(_Bessel):
"""
<dl>
<dt>'HankelH2[$n$, $z$]'
<dd>returns the Hankel function of the second kind H_$n$^2 ($z$).
</dl>
>> HankelH2[1.5, 4]
= 0.185286 - 0.367112 I
"""
mpmath_name = "hankel2"
rules = {
"Derivative[0, 1][HankelH2]": "((HankelH2[-1 + #1, #2] - HankelH2[1 + #1, #2])/2)&",
}
summary_text = "Hankel's function of the second kind"
sympy_name = "hankel2"
# Kelvin Functions
class KelvinBei(_Bessel):
"""
<dl>
<dt>'KelvinBei[$z$]'
<dd>returns the Kelvin function bei($z$).
<dt>'KelvinBei[$n$, $z$]'
<dd>returns the Kelvin function bei_$n$($z$).
</dl>
>> KelvinBei[0.5]
= 0.0624932
>> KelvinBei[1.5 + I]
= 0.326323 + 0.755606 I
>> KelvinBei[0.5, 0.25]
= 0.370153
>> Plot[KelvinBei[x], {x, 0, 10}]
= -Graphics-
"""
mpmath_name = "bei"
rules = {
"KelvinBei[z_]": "KelvinBei[0, z]",
"Derivative[1][KelvinBei]": "((2*KelvinBei[1, #1] - 2*KelvinBer[1, #1])/(2*Sqrt[2]))&",
}
summary_text = "Kelvin's function bei"
sympy_name = ""
class KelvinBer(_Bessel):
"""
<dl>
<dt>'KelvinBer[$z$]'
<dd>returns the Kelvin function ber($z$).
<dt>'KelvinBer[$n$, $z$]'
<dd>returns the Kelvin function ber_$n$($z$).
</dl>
>> KelvinBer[0.5]
= 0.999023
>> KelvinBer[1.5 + I]
= 1.1162 - 0.117944 I
>> KelvinBer[0.5, 0.25]
= 0.148824
>> Plot[KelvinBer[x], {x, 0, 10}]
= -Graphics-
"""
mpmath_name = "ber"
rules = {
"KelvinBer[z_]": "KelvinBer[0, z]",
"Derivative[1][KelvinBer]": "((2*KelvinBei[1, #1] + 2*KelvinBer[1, #1])/(2*Sqrt[2]))&",
}
summary_text = "Kelvin's function ber"
sympy_name = ""
class KelvinKei(_Bessel):
"""
<dl>
<dt>'KelvinKei[$z$]'
<dd>returns the Kelvin function kei($z$).
<dt>'KelvinKei[$n$, $z$]'
<dd>returns the Kelvin function kei_$n$($z$).
</dl>
>> KelvinKei[0.5]
= -0.671582
>> KelvinKei[1.5 + I]
= -0.248994 + 0.303326 I
>> KelvinKei[0.5, 0.25]
= -2.0517
>> Plot[KelvinKei[x], {x, 0, 10}]
= -Graphics-
"""
mpmath_name = "kei"
rules = {
"KelvinKei[z_]": "KelvinKei[0, z]",
}
summary_text = "Kelvin's function kei"
sympy_name = ""
class KelvinKer(_Bessel):
"""
<dl>
<dt>'KelvinKer[$z$]'
<dd>returns the Kelvin function ker($z$).
<dt>'KelvinKer[$n$, $z$]'
<dd>returns the Kelvin function ker_$n$($z$).
</dl>
>> KelvinKer[0.5]
= 0.855906
>> KelvinKer[1.5 + I]
= -0.167162 - 0.184404 I
>> KelvinKer[0.5, 0.25]
= 0.450023
>> Plot[KelvinKer[x], {x, 0, 10}]
= -Graphics-
"""
mpmath_name = "ker"
rules = {
"KelvinKer[z_]": "KelvinKer[0, z]",
}
summary_text = "Kelvin's function ker"
sympy_name = ""
class SphericalBesselJ(_Bessel):
"""
Spherical Bessel function of the first kind. See <url>href="https://en.wikipedia.org/wiki/Bessel_function#Spherical_Bessel_functions</url>
<dl>
<dt>'SphericalBesselJ[$n$, $z$]'
<dd>returns the spherical Bessel function of the first kind Y_$n$($z$).
</dl>
>> SphericalBesselJ[1, 5.2]
= -0.122771
## FIXME: should be able to tolerate Plotting at 0.
>> Plot[SphericalBesselJ[1, x], {x, 0.1, 10}]
= -Graphics-
"""
rules = {"SphericalBesselJ[n_, z_]": "Sqrt[Pi / 2] / Sqrt[z] BesselJ[n + 0.5, z]"}
summary_text = "spherical Bessel's function of the first kind"
sympy_name = "jn"
class SphericalBesselY(_Bessel):
"""
Spherical Bessel function of the first kind. See <url>href="https://en.wikipedia.org/wiki/Bessel_function#Spherical_Bessel_functions</url>
<dl>
<dt>'SphericalBesselY[$n$, $z$]'
<dd>returns the spherical Bessel function of the second kind Y_$n$($z$).
</dl>
>> SphericalBesselY[1, 5.5]
= 0.104853
>> Plot[SphericalBesselY[1, x], {x, 0, 10}]
= -Graphics-
"""
rules = {"SphericalBesselY[n_, z_]": "Sqrt[Pi / 2] / Sqrt[z] BesselY[n + 0.5, z]"}
summary_text = "spherical Bessel's function of the second kind"
sympy_name = "yn"
class SphericalHankelH1(_Bessel):
"""
Spherical Bessel function of the first kind. See <url>href="https://en.wikipedia.org/wiki/Bessel_function#Spherical_Bessel_functions</url>
<dl>
<dt>'SphericalHankelH1[$n$, $z$]'
<dd>returns the spherical Hankel function of the first kind h_$n$^(1)($z$).
</dl>
>> SphericalHankelH1[3, 1.5]
= 0.0283246 - 3.78927 I
"""
rules = {"SphericalHankelH1[n_, z_]": "Sqrt[Pi / 2] / Sqrt[z] HankelH1[n + 0.5, z]"}
summary_text = "spherical Hankel's function of the first kind"
sympy_name = "hankel1"
class SphericalHankelH2(_Bessel):
"""
Spherical Bessel function of the second kind. See <url>href="https://en.wikipedia.org/wiki/Bessel_function#Spherical_Bessel_functions</url>
<dl>
<dt>'SphericalHankelH1[$n$, $z$]'
<dd>returns the spherical Hankel function of the second kind h_$n$^(2)($z$).
</dl>
>> SphericalHankelH2[3, 1.5]
= 0.0283246 + 3.78927 I
"""
rules = {"SphericalHankelH2[n_, z_]": "Sqrt[Pi / 2] / Sqrt[z] HankelH2[n + 0.5, z]"}
summary_text = "spherical Hankel's function of the second kind"
sympy_name = "hankel2"
class StruveH(_Bessel):
"""
<dl>
<dt>'StruveH[$n$, $z$]'
<dd>returns the Struve function H_$n$($z$).
</dl>
>> StruveH[1.5, 3.5]
= 1.13192
>> Plot[StruveH[0, x], {x, 0, 10}]
= -Graphics-
"""
mpmath_name = "struveh"
summary_text = "Struvel's function H"
sympy_name = ""
class StruveL(_Bessel):
"""
<dl>
<dt>'StruveL[$n$, $z$]'
<dd>returns the modified Struve function L_$n$($z$).
</dl>
>> StruveL[1.5, 3.5]
= 4.41126
>> Plot[StruveL[0, x], {x, 0, 5}]
= -Graphics-
"""
mpmath_name = "struvel"
summary_text = "Struvel's function L"
sympy_name = ""
class WeberE(_Bessel):
"""
<dl>
<dt>'WeberE[$n$, $z$]'
<dd>returns the Weber function E_$n$($z$).
</dl>
>> WeberE[1.5, 3.5]
= -0.397256
>> Plot[WeberE[1, x], {x, -10, 10}]
= -Graphics-
"""
# TODO: Associated Weber function WeberE[v, u, z]
mpmath_name = "webere"
summary_text = "Weber's function E"
sympy_name = ""
| 21.536842 | 154 | 0.559445 |
3efe213518780ecd8f164ca0f16977d328ade375 | 5,334 | py | Python | tests/extensions/plugins/test_webhook.py | ballon3/PlanB | 890735d0c09f68e7dd603f577e3d5bcbf818a2ab | [
"CC-BY-4.0"
] | 1 | 2019-10-09T16:42:32.000Z | 2019-10-09T16:42:32.000Z | tests/extensions/plugins/test_webhook.py | ballon3/PlanB | 890735d0c09f68e7dd603f577e3d5bcbf818a2ab | [
"CC-BY-4.0"
] | 14 | 2020-03-24T17:54:18.000Z | 2022-02-10T19:43:59.000Z | tests/extensions/plugins/test_webhook.py | ballon3/PlanB | 890735d0c09f68e7dd603f577e3d5bcbf818a2ab | [
"CC-BY-4.0"
] | 2 | 2019-10-12T09:35:02.000Z | 2019-10-15T07:18:25.000Z | from unittest import mock
import pytest
import requests
from django.core.serializers import serialize
from saleor.extensions.manager import get_extensions_manager
from saleor.extensions.plugins.webhook import create_hmac_signature
from saleor.extensions.plugins.webhook.payloads import (
generate_customer_payload,
generate_order_payload,
generate_product_payload,
)
from saleor.extensions.plugins.webhook.tasks import trigger_webhooks_for_event
from saleor.webhook import WebhookEventType
@pytest.mark.vcr
@mock.patch(
"saleor.extensions.plugins.webhook.tasks.requests.post", wraps=requests.post
)
def test_trigger_webhooks_for_event(
mock_request, webhook, order_with_lines, permission_manage_orders
):
webhook.service_account.permissions.add(permission_manage_orders)
webhook.target_url = "https://webhook.site/f0fc9979-cbd4-47b7-8705-1acb03fff1d0"
webhook.save()
expected_data = serialize("json", [order_with_lines])
trigger_webhooks_for_event(WebhookEventType.ORDER_CREATED, expected_data)
expected_headers = {
"X-Saleor-Event": "order_created",
"X-Saleor-Domain": "mirumee.com",
}
mock_request.assert_called_once_with(
webhook.target_url, data=expected_data, headers=expected_headers, timeout=10
)
@pytest.mark.vcr
@mock.patch(
"saleor.extensions.plugins.webhook.tasks.requests.post", wraps=requests.post
)
def test_trigger_webhooks_for_event_with_secret_key(
mock_request, webhook, order_with_lines, permission_manage_orders
):
webhook.service_account.permissions.add(permission_manage_orders)
webhook.target_url = "https://webhook.site/f0fc9979-cbd4-47b7-8705-1acb03fff1d0"
webhook.secret_key = "secret_key"
webhook.save()
expected_data = serialize("json", [order_with_lines])
trigger_webhooks_for_event(WebhookEventType.ORDER_CREATED, expected_data)
expected_signature = create_hmac_signature(
expected_data, webhook.secret_key, "utf-8"
)
expected_headers = {
"X-Saleor-Event": "order_created",
"X-Saleor-Domain": "mirumee.com",
"X-Saleor-HMAC-SHA256": f"sha1={expected_signature}",
}
mock_request.assert_called_once_with(
webhook.target_url, data=expected_data, headers=expected_headers, timeout=10
)
@mock.patch("saleor.extensions.plugins.webhook.plugin.trigger_webhooks_for_event.delay")
def test_order_created(mocked_webhook_trigger, settings, order_with_lines):
settings.PLUGINS = ["saleor.extensions.plugins.webhook.plugin.WebhookPlugin"]
manager = get_extensions_manager()
manager.order_created(order_with_lines)
expected_data = generate_order_payload(order_with_lines)
mocked_webhook_trigger.assert_called_once_with(
WebhookEventType.ORDER_CREATED, expected_data
)
@mock.patch("saleor.extensions.plugins.webhook.plugin.trigger_webhooks_for_event.delay")
def test_customer_created(mocked_webhook_trigger, settings, customer_user):
settings.PLUGINS = ["saleor.extensions.plugins.webhook.plugin.WebhookPlugin"]
manager = get_extensions_manager()
manager.customer_created(customer_user)
expected_data = generate_customer_payload(customer_user)
mocked_webhook_trigger.assert_called_once_with(
WebhookEventType.CUSTOMER_CREATED, expected_data
)
@mock.patch("saleor.extensions.plugins.webhook.plugin.trigger_webhooks_for_event.delay")
def test_order_fully_paid(mocked_webhook_trigger, settings, order_with_lines):
settings.PLUGINS = ["saleor.extensions.plugins.webhook.plugin.WebhookPlugin"]
manager = get_extensions_manager()
manager.order_fully_paid(order_with_lines)
expected_data = generate_order_payload(order_with_lines)
mocked_webhook_trigger.assert_called_once_with(
WebhookEventType.ORDER_FULLYPAID, expected_data
)
@mock.patch("saleor.extensions.plugins.webhook.plugin.trigger_webhooks_for_event.delay")
def test_product_created(mocked_webhook_trigger, settings, product):
settings.PLUGINS = ["saleor.extensions.plugins.webhook.plugin.WebhookPlugin"]
manager = get_extensions_manager()
manager.product_created(product)
expected_data = generate_product_payload(product)
mocked_webhook_trigger.assert_called_once_with(
WebhookEventType.PRODUCT_CREATED, expected_data
)
@mock.patch("saleor.extensions.plugins.webhook.plugin.trigger_webhooks_for_event.delay")
def test_order_updated(mocked_webhook_trigger, settings, order_with_lines):
settings.PLUGINS = ["saleor.extensions.plugins.webhook.plugin.WebhookPlugin"]
manager = get_extensions_manager()
manager.order_updated(order_with_lines)
expected_data = generate_order_payload(order_with_lines)
mocked_webhook_trigger.assert_called_once_with(
WebhookEventType.ORDER_UPDATED, expected_data
)
@mock.patch("saleor.extensions.plugins.webhook.plugin.trigger_webhooks_for_event.delay")
def test_order_cancelled(mocked_webhook_trigger, settings, order_with_lines):
settings.PLUGINS = ["saleor.extensions.plugins.webhook.plugin.WebhookPlugin"]
manager = get_extensions_manager()
manager.order_cancelled(order_with_lines)
expected_data = generate_order_payload(order_with_lines)
mocked_webhook_trigger.assert_called_once_with(
WebhookEventType.ORDER_CANCELLED, expected_data
)
| 37.56338 | 88 | 0.792463 |
7ae7a57d7a8fefa7210152e36f921292c49edd52 | 159,368 | py | Python | python/ccxt/async_support/gateio.py | trasherdk/ccxt | c82d8e27e0286ede819235158761ce390eed26bb | [
"MIT"
] | null | null | null | python/ccxt/async_support/gateio.py | trasherdk/ccxt | c82d8e27e0286ede819235158761ce390eed26bb | [
"MIT"
] | 4 | 2021-12-14T06:19:10.000Z | 2022-03-19T02:39:29.000Z | python/ccxt/async_support/gateio.py | trasherdk/ccxt | c82d8e27e0286ede819235158761ce390eed26bb | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountNotEnabled
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import NotSupported
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class gateio(Exchange):
def describe(self):
return self.deep_extend(super(gateio, self).describe(), {
'id': 'gateio',
'name': 'Gate.io',
'countries': ['KR'],
'rateLimit': 10 / 3, # 300 requests per second or 3.33ms
'version': 'v4',
'certified': True,
'pro': True,
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/31784029-0313c702-b509-11e7-9ccc-bc0da6a0e435.jpg',
'doc': 'https://www.gate.io/docs/apiv4/en/index.html',
'www': 'https://gate.io/',
'api': {
'public': {
'wallet': 'https://api.gateio.ws/api/v4',
'futures': 'https://api.gateio.ws/api/v4',
'margin': 'https://api.gateio.ws/api/v4',
'delivery': 'https://api.gateio.ws/api/v4',
'spot': 'https://api.gateio.ws/api/v4',
'options': 'https://api.gateio.ws/api/v4',
},
'private': {
'withdrawals': 'https://api.gateio.ws/api/v4',
'wallet': 'https://api.gateio.ws/api/v4',
'futures': 'https://api.gateio.ws/api/v4',
'margin': 'https://api.gateio.ws/api/v4',
'delivery': 'https://api.gateio.ws/api/v4',
'spot': 'https://api.gateio.ws/api/v4',
'options': 'https://api.gateio.ws/api/v4',
},
},
'test': {
'public': {
'futures': 'https://fx-api-testnet.gateio.ws/api/v4',
'delivery': 'https://fx-api-testnet.gateio.ws/api/v4',
},
'private': {
'futures': 'https://fx-api-testnet.gateio.ws/api/v4',
'delivery': 'https://fx-api-testnet.gateio.ws/api/v4',
},
},
'referral': {
'url': 'https://www.gate.io/ref/2436035',
'discount': 0.2,
},
},
'has': {
'CORS': None,
'spot': True,
'margin': True,
'swap': True,
'future': True,
'option': True,
'cancelAllOrders': True,
'cancelOrder': True,
'createMarketOrder': False,
'createOrder': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingFees': True,
'fetchFundingHistory': True,
'fetchFundingRate': True,
'fetchFundingRateHistory': True,
'fetchFundingRates': True,
'fetchIndexOHLCV': True,
'fetchLeverageTiers': True,
'fetchMarketLeverageTiers': 'emulated',
'fetchMarkets': True,
'fetchMarkOHLCV': True,
'fetchMyTrades': True,
'fetchNetworkDepositAddress': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchPositions': True,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': False,
'fetchTrades': True,
'fetchTradingFee': True,
'fetchTradingFees': True,
'fetchWithdrawals': True,
'setLeverage': True,
'transfer': True,
'withdraw': True,
},
'api': {
'public': {
'wallet': {
'get': {
'wallet/currency_chains': 1.5,
},
},
'spot': {
'get': {
'currencies': 1,
'currencies/{currency}': 1,
'currency_pairs': 1,
'currency_pairs/{currency_pair}': 1,
'tickers': 1,
'order_book': 1,
'trades': 1,
'candlesticks': 1,
},
},
'margin': {
'get': {
'currency_pairs': 1,
'currency_pairs/{currency_pair}': 1,
'cross/currencies': 1,
'cross/currencies/{currency}': 1,
'funding_book': 1,
},
},
'futures': {
'get': {
'{settle}/contracts': 1.5,
'{settle}/contracts/{contract}': 1.5,
'{settle}/order_book': 1.5,
'{settle}/trades': 1.5,
'{settle}/candlesticks': 1.5,
'{settle}/tickers': 1.5,
'{settle}/funding_rate': 1.5,
'{settle}/insurance': 1.5,
'{settle}/contract_stats': 1.5,
'{settle}/liq_orders': 1.5,
},
},
'delivery': {
'get': {
'{settle}/contracts': 1.5,
'{settle}/contracts/{contract}': 1.5,
'{settle}/order_book': 1.5,
'{settle}/trades': 1.5,
'{settle}/candlesticks': 1.5,
'{settle}/tickers': 1.5,
'{settle}/insurance': 1.5,
},
},
'options': {
'get': {
'underlyings': 1.5,
'expirations': 1.5,
'contracts': 1.5,
'contracts/{contract}': 1.5,
'settlements': 1.5,
'settlements/{contract}': 1.5,
'order_book': 1.5,
'tickers': 1.5,
'underlying/tickers/{underlying}': 1.5,
'candlesticks': 1.5,
'underlying/candlesticks': 1.5,
'trades': 1.5,
},
},
},
'private': {
'withdrawals': {
'post': {
'': 3000, # 3000 = 10 seconds
},
'delete': {
'{withdrawal_id}': 300,
},
},
'wallet': {
'get': {
'deposit_address': 300,
'withdrawals': 300,
'deposits': 300,
'sub_account_transfers': 300,
'withdraw_status': 300,
'sub_account_balances': 300,
'fee': 300,
'total_balance': 300,
},
'post': {
'transfers': 300,
'sub_account_transfers': 300,
},
},
'spot': {
'get': {
'accounts': 1,
'open_orders': 1,
'orders': 1,
'orders/{order_id}': 1,
'my_trades': 1,
'price_orders': 1,
'price_orders/{order_id}': 1,
},
'post': {
'batch_orders': 1,
'orders': 1,
'cancel_batch_orders': 1,
'price_orders': 1,
},
'delete': {
'orders': 1,
'orders/{order_id}': 1,
'price_orders': 1,
'price_orders/{order_id}': 1,
},
},
'margin': {
'get': {
'accounts': 1.5,
'account_book': 1.5,
'funding_accounts': 1.5,
'loans': 1.5,
'loans/{loan_id}': 1.5,
'loans/{loan_id}/repayment': 1.5,
'loan_records': 1.5,
'loan_records/{load_record_id}': 1.5,
'auto_repay': 1.5,
'transferable': 1.5,
'cross/accounts': 1.5,
'cross/account_book': 1.5,
'cross/loans': 1.5,
'cross/loans/{loan_id}': 1.5,
'cross/loans/repayments': 1.5,
'cross/transferable': 1.5,
'loan_records/{loan_record_id}': 1.5,
'borrowable': 1.5,
'cross/repayments': 1.5,
'cross/borrowable': 1.5,
},
'post': {
'loans': 1.5,
'merged_loans': 1.5,
'loans/{loan_id}/repayment': 1.5,
'auto_repay': 1.5,
'cross/loans': 1.5,
'cross/loans/repayments': 1.5,
'cross/repayments': 1.5,
},
'patch': {
'loans/{loan_id}': 1.5,
'loan_records/{loan_record_id}': 1.5,
},
'delete': {
'loans/{loan_id}': 1.5,
},
},
'futures': {
'get': {
'{settle}/accounts': 1.5,
'{settle}/account_book': 1.5,
'{settle}/positions': 1.5,
'{settle}/positions/{contract}': 1.5,
'{settle}/orders': 1.5,
'{settle}/orders/{order_id}': 1.5,
'{settle}/my_trades': 1.5,
'{settle}/position_close': 1.5,
'{settle}/liquidates': 1.5,
'{settle}/price_orders': 1.5,
'{settle}/price_orders/{order_id}': 1.5,
'{settle}/dual_comp/positions/{contract}': 1.5,
},
'post': {
'{settle}/positions/{contract}/margin': 1.5,
'{settle}/positions/{contract}/leverage': 1.5,
'{settle}/positions/{contract}/risk_limit': 1.5,
'{settle}/dual_mode': 1.5,
'{settle}/dual_comp/positions/{contract}': 1.5,
'{settle}/dual_comp/positions/{contract}/margin': 1.5,
'{settle}/dual_comp/positions/{contract}/leverage': 1.5,
'{settle}/dual_comp/positions/{contract}/risk_limit': 1.5,
'{settle}/orders': 1.5,
'{settle}/price_orders': 1.5,
},
'delete': {
'{settle}/orders': 1.5,
'{settle}/orders/{order_id}': 1.5,
'{settle}/price_orders': 1.5,
'{settle}/price_orders/{order_id}': 1.5,
},
},
'delivery': {
'get': {
'{settle}/accounts': 1.5,
'{settle}/account_book': 1.5,
'{settle}/positions': 1.5,
'{settle}/positions/{contract}': 1.5,
'{settle}/orders': 1.5,
'{settle}/orders/{order_id}': 1.5,
'{settle}/my_trades': 1.5,
'{settle}/position_close': 1.5,
'{settle}/liquidates': 1.5,
'{settle}/price_orders': 1.5,
'{settle}/price_orders/{order_id}': 1.5,
'{settle}/settlements': 1.5,
},
'post': {
'{settle}/positions/{contract}/margin': 1.5,
'{settle}/positions/{contract}/leverage': 1.5,
'{settle}/positions/{contract}/risk_limit': 1.5,
'{settle}/orders': 1.5,
'{settle}/price_orders': 1.5,
},
'delete': {
'{settle}/orders': 1.5,
'{settle}/orders/{order_id}': 1.5,
'{settle}/price_orders': 1.5,
'{settle}/price_orders/{order_id}': 1.5,
},
},
'options': {
'get': {
'accounts': 1.5,
'account_book': 1.5,
'positions': 1.5,
'positions/{contract}': 1.5,
'position_close': 1.5,
'orders': 1.5,
'orders/{order_id}': 1.5,
'my_trades': 1.5,
},
'post': {
'orders': 1.5,
},
'delete': {
'orders': 1.5,
'orders/{order_id}': 1.5,
},
},
},
},
'timeframes': {
'10s': '10s',
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'4h': '4h',
'8h': '8h',
'1d': '1d',
'7d': '7d',
'1w': '7d',
},
# copied from gateiov2
'commonCurrencies': {
'88MPH': 'MPH',
'AXIS': 'Axis DeFi',
'BIFI': 'Bitcoin File',
'BOX': 'DefiBox',
'BTCBEAR': 'BEAR',
'BTCBULL': 'BULL',
'BYN': 'BeyondFi',
'EGG': 'Goose Finance',
'GTC': 'Game.com', # conflict with Gitcoin and Gastrocoin
'GTC_HT': 'Game.com HT',
'GTC_BSC': 'Game.com BSC',
'HIT': 'HitChain',
'MM': 'Million', # conflict with MilliMeter
'MPH': 'Morpher', # conflict with 88MPH
'RAI': 'Rai Reflex Index', # conflict with RAI Finance
'SBTC': 'Super Bitcoin',
'TNC': 'Trinity Network Credit',
'TON': 'TONToken',
'VAI': 'VAIOT',
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'headers': {
'X-Gate-Channel-Id': 'ccxt',
},
'options': {
'createOrder': {
'expiration': 86400, # for conditional orders
},
'networks': {
'TRC20': 'TRX',
'ERC20': 'ETH',
'BEP20': 'BSC',
},
'accountsByType': {
'funding': 'spot',
'spot': 'spot',
'margin': 'margin',
'future': 'futures',
'futures': 'futures',
'delivery': 'delivery',
},
'defaultType': 'spot',
'defaultMarginType': 'isolated',
'swap': {
'fetchMarkets': {
'settlementCurrencies': ['usdt', 'btc'],
},
},
'future': {
'fetchMarkets': {
'settlementCurrencies': ['usdt', 'btc'],
},
},
},
'precisionMode': TICK_SIZE,
'fees': {
'trading': {
'tierBased': True,
'feeSide': 'get',
'percentage': True,
'maker': self.parse_number('0.002'),
'taker': self.parse_number('0.002'),
'tiers': {
# volume is in BTC
'maker': [
[self.parse_number('0'), self.parse_number('0.002')],
[self.parse_number('1.5'), self.parse_number('0.00185')],
[self.parse_number('3'), self.parse_number('0.00175')],
[self.parse_number('6'), self.parse_number('0.00165')],
[self.parse_number('12.5'), self.parse_number('0.00155')],
[self.parse_number('25'), self.parse_number('0.00145')],
[self.parse_number('75'), self.parse_number('0.00135')],
[self.parse_number('200'), self.parse_number('0.00125')],
[self.parse_number('500'), self.parse_number('0.00115')],
[self.parse_number('1250'), self.parse_number('0.00105')],
[self.parse_number('2500'), self.parse_number('0.00095')],
[self.parse_number('3000'), self.parse_number('0.00085')],
[self.parse_number('6000'), self.parse_number('0.00075')],
[self.parse_number('11000'), self.parse_number('0.00065')],
[self.parse_number('20000'), self.parse_number('0.00055')],
[self.parse_number('40000'), self.parse_number('0.00055')],
[self.parse_number('75000'), self.parse_number('0.00055')],
],
'taker': [
[self.parse_number('0'), self.parse_number('0.002')],
[self.parse_number('1.5'), self.parse_number('0.00195')],
[self.parse_number('3'), self.parse_number('0.00185')],
[self.parse_number('6'), self.parse_number('0.00175')],
[self.parse_number('12.5'), self.parse_number('0.00165')],
[self.parse_number('25'), self.parse_number('0.00155')],
[self.parse_number('75'), self.parse_number('0.00145')],
[self.parse_number('200'), self.parse_number('0.00135')],
[self.parse_number('500'), self.parse_number('0.00125')],
[self.parse_number('1250'), self.parse_number('0.00115')],
[self.parse_number('2500'), self.parse_number('0.00105')],
[self.parse_number('3000'), self.parse_number('0.00095')],
[self.parse_number('6000'), self.parse_number('0.00085')],
[self.parse_number('11000'), self.parse_number('0.00075')],
[self.parse_number('20000'), self.parse_number('0.00065')],
[self.parse_number('40000'), self.parse_number('0.00065')],
[self.parse_number('75000'), self.parse_number('0.00065')],
],
},
},
'swap': {
'tierBased': True,
'feeSide': 'base',
'percentage': True,
'maker': self.parse_number('0.0'),
'taker': self.parse_number('0.0005'),
'tiers': {
'maker': [
[self.parse_number('0'), self.parse_number('0.0000')],
[self.parse_number('1.5'), self.parse_number('-0.00005')],
[self.parse_number('3'), self.parse_number('-0.00005')],
[self.parse_number('6'), self.parse_number('-0.00005')],
[self.parse_number('12.5'), self.parse_number('-0.00005')],
[self.parse_number('25'), self.parse_number('-0.00005')],
[self.parse_number('75'), self.parse_number('-0.00005')],
[self.parse_number('200'), self.parse_number('-0.00005')],
[self.parse_number('500'), self.parse_number('-0.00005')],
[self.parse_number('1250'), self.parse_number('-0.00005')],
[self.parse_number('2500'), self.parse_number('-0.00005')],
[self.parse_number('3000'), self.parse_number('-0.00008')],
[self.parse_number('6000'), self.parse_number('-0.01000')],
[self.parse_number('11000'), self.parse_number('-0.01002')],
[self.parse_number('20000'), self.parse_number('-0.01005')],
[self.parse_number('40000'), self.parse_number('-0.02000')],
[self.parse_number('75000'), self.parse_number('-0.02005')],
],
'taker': [
[self.parse_number('0'), self.parse_number('0.00050')],
[self.parse_number('1.5'), self.parse_number('0.00048')],
[self.parse_number('3'), self.parse_number('0.00046')],
[self.parse_number('6'), self.parse_number('0.00044')],
[self.parse_number('12.5'), self.parse_number('0.00042')],
[self.parse_number('25'), self.parse_number('0.00040')],
[self.parse_number('75'), self.parse_number('0.00038')],
[self.parse_number('200'), self.parse_number('0.00036')],
[self.parse_number('500'), self.parse_number('0.00034')],
[self.parse_number('1250'), self.parse_number('0.00032')],
[self.parse_number('2500'), self.parse_number('0.00030')],
[self.parse_number('3000'), self.parse_number('0.00030')],
[self.parse_number('6000'), self.parse_number('0.00030')],
[self.parse_number('11000'), self.parse_number('0.00030')],
[self.parse_number('20000'), self.parse_number('0.00030')],
[self.parse_number('40000'), self.parse_number('0.00030')],
[self.parse_number('75000'), self.parse_number('0.00030')],
],
},
},
},
# https://www.gate.io/docs/apiv4/en/index.html#label-list
'exceptions': {
'exact': {
'INVALID_PARAM_VALUE': BadRequest,
'INVALID_PROTOCOL': BadRequest,
'INVALID_ARGUMENT': BadRequest,
'INVALID_REQUEST_BODY': BadRequest,
'MISSING_REQUIRED_PARAM': ArgumentsRequired,
'BAD_REQUEST': BadRequest,
'INVALID_CONTENT_TYPE': BadRequest,
'NOT_ACCEPTABLE': BadRequest,
'METHOD_NOT_ALLOWED': BadRequest,
'NOT_FOUND': ExchangeError,
'INVALID_CREDENTIALS': AuthenticationError,
'INVALID_KEY': AuthenticationError,
'IP_FORBIDDEN': AuthenticationError,
'READ_ONLY': PermissionDenied,
'INVALID_SIGNATURE': AuthenticationError,
'MISSING_REQUIRED_HEADER': AuthenticationError,
'REQUEST_EXPIRED': AuthenticationError,
'ACCOUNT_LOCKED': AccountSuspended,
'FORBIDDEN': PermissionDenied,
'SUB_ACCOUNT_NOT_FOUND': ExchangeError,
'SUB_ACCOUNT_LOCKED': AccountSuspended,
'MARGIN_BALANCE_EXCEPTION': ExchangeError,
'MARGIN_TRANSFER_FAILED': ExchangeError,
'TOO_MUCH_FUTURES_AVAILABLE': ExchangeError,
'FUTURES_BALANCE_NOT_ENOUGH': InsufficientFunds,
'ACCOUNT_EXCEPTION': ExchangeError,
'SUB_ACCOUNT_TRANSFER_FAILED': ExchangeError,
'ADDRESS_NOT_USED': ExchangeError,
'TOO_FAST': RateLimitExceeded,
'WITHDRAWAL_OVER_LIMIT': ExchangeError,
'API_WITHDRAW_DISABLED': ExchangeNotAvailable,
'INVALID_WITHDRAW_ID': ExchangeError,
'INVALID_WITHDRAW_CANCEL_STATUS': ExchangeError,
'INVALID_PRECISION': InvalidOrder,
'INVALID_CURRENCY': BadSymbol,
'INVALID_CURRENCY_PAIR': BadSymbol,
'POC_FILL_IMMEDIATELY': ExchangeError,
'ORDER_NOT_FOUND': OrderNotFound,
'CLIENT_ID_NOT_FOUND': OrderNotFound,
'ORDER_CLOSED': InvalidOrder,
'ORDER_CANCELLED': InvalidOrder,
'QUANTITY_NOT_ENOUGH': InvalidOrder,
'BALANCE_NOT_ENOUGH': InsufficientFunds,
'MARGIN_NOT_SUPPORTED': InvalidOrder,
'MARGIN_BALANCE_NOT_ENOUGH': InsufficientFunds,
'AMOUNT_TOO_LITTLE': InvalidOrder,
'AMOUNT_TOO_MUCH': InvalidOrder,
'REPEATED_CREATION': InvalidOrder,
'LOAN_NOT_FOUND': OrderNotFound,
'LOAN_RECORD_NOT_FOUND': OrderNotFound,
'NO_MATCHED_LOAN': ExchangeError,
'NOT_MERGEABLE': ExchangeError,
'NO_CHANGE': ExchangeError,
'REPAY_TOO_MUCH': ExchangeError,
'TOO_MANY_CURRENCY_PAIRS': InvalidOrder,
'TOO_MANY_ORDERS': InvalidOrder,
'MIXED_ACCOUNT_TYPE': InvalidOrder,
'AUTO_BORROW_TOO_MUCH': ExchangeError,
'TRADE_RESTRICTED': InsufficientFunds,
'USER_NOT_FOUND': AccountNotEnabled,
'CONTRACT_NO_COUNTER': ExchangeError,
'CONTRACT_NOT_FOUND': BadSymbol,
'RISK_LIMIT_EXCEEDED': ExchangeError,
'INSUFFICIENT_AVAILABLE': InsufficientFunds,
'LIQUIDATE_IMMEDIATELY': InvalidOrder,
'LEVERAGE_TOO_HIGH': InvalidOrder,
'LEVERAGE_TOO_LOW': InvalidOrder,
'ORDER_NOT_OWNED': ExchangeError,
'ORDER_FINISHED': ExchangeError,
'POSITION_CROSS_MARGIN': ExchangeError,
'POSITION_IN_LIQUIDATION': ExchangeError,
'POSITION_IN_CLOSE': ExchangeError,
'POSITION_EMPTY': InvalidOrder,
'REMOVE_TOO_MUCH': ExchangeError,
'RISK_LIMIT_NOT_MULTIPLE': ExchangeError,
'RISK_LIMIT_TOO_HIGH': ExchangeError,
'RISK_LIMIT_TOO_lOW': ExchangeError,
'PRICE_TOO_DEVIATED': InvalidOrder,
'SIZE_TOO_LARGE': InvalidOrder,
'SIZE_TOO_SMALL': InvalidOrder,
'PRICE_OVER_LIQUIDATION': InvalidOrder,
'PRICE_OVER_BANKRUPT': InvalidOrder,
'ORDER_POC_IMMEDIATE': InvalidOrder,
'INCREASE_POSITION': InvalidOrder,
'CONTRACT_IN_DELISTING': ExchangeError,
'INTERNAL': ExchangeNotAvailable,
'SERVER_ERROR': ExchangeNotAvailable,
'TOO_BUSY': ExchangeNotAvailable,
},
},
'broad': {},
})
async def fetch_markets(self, params={}):
result = []
type, query = self.handle_market_type_and_params('fetchMarkets', None, params)
if type == 'spot' or type == 'margin':
result = await self.fetch_spot_markets(query)
if type == 'swap' or type == 'future':
result = await self.fetch_contract_markets(query) # futures and swaps
if type == 'option':
result = await self.fetch_option_markets(query)
resultLength = len(result)
if resultLength == 0:
raise ExchangeError(self.id + " does not support '" + type + "' type, set exchange.options['defaultType'] to " + "'spot', 'margin', 'swap', 'future' or 'option'") # eslint-disable-line quotes
return result
async def fetch_spot_markets(self, params):
marginResponse = await self.publicMarginGetCurrencyPairs(params)
spotMarketsResponse = await self.publicSpotGetCurrencyPairs(params)
marginMarkets = self.index_by(marginResponse, 'id')
#
# Spot
#
# [
# {
# "id":"QTUM_ETH",
# "base":"QTUM",
# "quote":"ETH",
# "fee":"0.2",
# "min_base_amount":"0.01",
# "min_quote_amount":"0.001",
# "amount_precision":3,
# "precision":6,
# "trade_status":"tradable",
# "sell_start":0,
# "buy_start":0
# }
# ]
#
# Margin
#
# [
# {
# "id": "ETH_USDT",
# "base": "ETH",
# "quote": "USDT",
# "leverage": 3,
# "min_base_amount": "0.01",
# "min_quote_amount": "100",
# "max_quote_amount": "1000000"
# }
# ]
#
result = []
for i in range(0, len(spotMarketsResponse)):
market = spotMarketsResponse[i]
id = self.safe_string(market, 'id')
marginMarket = self.safe_value(marginMarkets, id)
market = self.deep_extend(marginMarket, market)
baseId, quoteId = id.split('_')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
takerPercent = self.safe_string(market, 'fee')
makerPercent = self.safe_string(market, 'maker_fee_rate', takerPercent)
amountPrecisionString = self.safe_string(market, 'amount_precision')
pricePrecisionString = self.safe_string(market, 'precision')
tradeStatus = self.safe_string(market, 'trade_status')
leverage = self.safe_number(market, 'leverage')
margin = leverage is not None
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': margin,
'swap': False,
'future': False,
'option': False,
'active': (tradeStatus == 'tradable'),
'contract': False,
'linear': None,
'inverse': None,
# Fee is in %, so divide by 100
'taker': self.parse_number(Precise.string_div(takerPercent, '100')),
'maker': self.parse_number(Precise.string_div(makerPercent, '100')),
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': self.parse_number(self.parse_precision(amountPrecisionString)),
'price': self.parse_number(self.parse_precision(pricePrecisionString)),
},
'limits': {
'leverage': {
'min': self.parse_number('1'),
'max': self.safe_number(market, 'leverage', 1),
},
'amount': {
'min': self.safe_number(market, 'min_base_amount'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'min_quote_amount'),
'max': None,
},
},
'info': market,
})
return result
async def fetch_contract_markets(self, params):
result = []
swapSettlementCurrencies = self.get_settlement_currencies('swap', 'fetchMarkets')
futureSettlementCurrencies = self.get_settlement_currencies('future', 'fetchMarkets')
for c in range(0, len(swapSettlementCurrencies)):
settleId = swapSettlementCurrencies[c]
query = params
query['settle'] = settleId
response = await self.publicFuturesGetSettleContracts(query)
for i in range(0, len(response)):
parsedMarket = self.parse_contract_market(response[i], settleId)
result.append(parsedMarket)
for c in range(0, len(futureSettlementCurrencies)):
settleId = futureSettlementCurrencies[c]
query = params
query['settle'] = settleId
response = await self.publicDeliveryGetSettleContracts(query)
for i in range(0, len(response)):
parsedMarket = self.parse_contract_market(response[i], settleId)
result.append(parsedMarket)
return result
def parse_contract_market(self, market, settleId):
# Perpetual swap
# {
# "name": "BTC_USDT",
# "type": "direct",
# "quanto_multiplier": "0.0001",
# "ref_discount_rate": "0",
# "order_price_deviate": "0.5",
# "maintenance_rate": "0.005",
# "mark_type": "index",
# "last_price": "38026",
# "mark_price": "37985.6",
# "index_price": "37954.92",
# "funding_rate_indicative": "0.000219",
# "mark_price_round": "0.01",
# "funding_offset": 0,
# "in_delisting": False,
# "risk_limit_base": "1000000",
# "interest_rate": "0.0003",
# "order_price_round": "0.1",
# "order_size_min": 1,
# "ref_rebate_rate": "0.2",
# "funding_interval": 28800,
# "risk_limit_step": "1000000",
# "leverage_min": "1",
# "leverage_max": "100",
# "risk_limit_max": "8000000",
# "maker_fee_rate": "-0.00025",
# "taker_fee_rate": "0.00075",
# "funding_rate": "0.002053",
# "order_size_max": 1000000,
# "funding_next_apply": 1610035200,
# "short_users": 977,
# "config_change_time": 1609899548,
# "trade_size": 28530850594,
# "position_size": 5223816,
# "long_users": 455,
# "funding_impact_value": "60000",
# "orders_limit": 50,
# "trade_id": 10851092,
# "orderbook_id": 2129638396
# }
#
# Delivery Futures
# {
# "name": "BTC_USDT_20200814",
# "underlying": "BTC_USDT",
# "cycle": "WEEKLY",
# "type": "direct",
# "quanto_multiplier": "0.0001",
# "mark_type": "index",
# "last_price": "9017",
# "mark_price": "9019",
# "index_price": "9005.3",
# "basis_rate": "0.185095",
# "basis_value": "13.7",
# "basis_impact_value": "100000",
# "settle_price": "0",
# "settle_price_interval": 60,
# "settle_price_duration": 1800,
# "settle_fee_rate": "0.0015",
# "expire_time": 1593763200,
# "order_price_round": "0.1",
# "mark_price_round": "0.1",
# "leverage_min": "1",
# "leverage_max": "100",
# "maintenance_rate": "1000000",
# "risk_limit_base": "140.726652109199",
# "risk_limit_step": "1000000",
# "risk_limit_max": "8000000",
# "maker_fee_rate": "-0.00025",
# "taker_fee_rate": "0.00075",
# "ref_discount_rate": "0",
# "ref_rebate_rate": "0.2",
# "order_price_deviate": "0.5",
# "order_size_min": 1,
# "order_size_max": 1000000,
# "orders_limit": 50,
# "orderbook_id": 63,
# "trade_id": 26,
# "trade_size": 435,
# "position_size": 130,
# "config_change_time": 1593158867,
# "in_delisting": False
# }
#
id = self.safe_string(market, 'name')
parts = id.split('_')
baseId = self.safe_string(parts, 0)
quoteId = self.safe_string(parts, 1)
date = self.safe_string(parts, 2)
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
settle = self.safe_currency_code(settleId)
expiry = self.safe_timestamp(market, 'expire_time')
symbol = ''
marketType = 'swap'
if date is not None:
symbol = base + '/' + quote + ':' + settle + '-' + self.yymmdd(expiry, '')
marketType = 'future'
else:
symbol = base + '/' + quote + ':' + settle
priceDeviate = self.safe_string(market, 'order_price_deviate')
markPrice = self.safe_string(market, 'mark_price')
minMultiplier = Precise.string_sub('1', priceDeviate)
maxMultiplier = Precise.string_add('1', priceDeviate)
minPrice = Precise.string_mul(minMultiplier, markPrice)
maxPrice = Precise.string_mul(maxMultiplier, markPrice)
takerPercent = self.safe_string(market, 'taker_fee_rate')
makerPercent = self.safe_string(market, 'maker_fee_rate', takerPercent)
isLinear = quote == settle
return {
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'settle': settle,
'baseId': baseId,
'quoteId': quoteId,
'settleId': settleId,
'type': marketType,
'spot': False,
'margin': False,
'swap': marketType == 'swap',
'future': marketType == 'future',
'option': marketType == 'option',
'active': True,
'contract': True,
'linear': isLinear,
'inverse': not isLinear,
'taker': self.parse_number(Precise.string_div(takerPercent, '100')), # Fee is in %, so divide by 100
'maker': self.parse_number(Precise.string_div(makerPercent, '100')),
'contractSize': self.safe_number(market, 'quanto_multiplier'),
'expiry': expiry,
'expiryDatetime': self.iso8601(expiry),
'strike': None,
'optionType': None,
'precision': {
'amount': self.parse_number('1'),
'price': self.safe_number(market, 'order_price_round'),
},
'limits': {
'leverage': {
'min': self.safe_number(market, 'leverage_min'),
'max': self.safe_number(market, 'leverage_max'),
},
'amount': {
'min': self.safe_number(market, 'order_size_min'),
'max': self.safe_number(market, 'order_size_max'),
},
'price': {
'min': self.parse_number(minPrice),
'max': self.parse_number(maxPrice),
},
'cost': {
'min': None,
'max': None,
},
},
'info': market,
}
async def fetch_option_markets(self, params={}):
result = []
underlyings = await self.fetch_option_underlyings()
for i in range(0, len(underlyings)):
underlying = underlyings[i]
query = params
query['underlying'] = underlying
response = await self.publicOptionsGetContracts(query)
#
# [
# {
# "orders_limit":"50",
# "order_size_max":"100000",
# "mark_price_round":"0.1",
# "order_size_min":"1",
# "position_limit":"1000000",
# "orderbook_id":"575967",
# "order_price_deviate":"0.9",
# "is_call":true, # True means Call False means Put
# "last_price":"93.9",
# "bid1_size":"0",
# "bid1_price":"0",
# "taker_fee_rate":"0.0004",
# "underlying":"BTC_USDT",
# "create_time":"1646381188",
# "price_limit_fee_rate":"0.1",
# "maker_fee_rate":"0.0004",
# "trade_id":"727",
# "order_price_round":"0.1",
# "settle_fee_rate":"0.0001",
# "trade_size":"1982",
# "ref_rebate_rate":"0",
# "name":"BTC_USDT-20220311-44000-C",
# "underlying_price":"39194.26",
# "strike_price":"44000",
# "multiplier":"0.0001",
# "ask1_price":"0",
# "ref_discount_rate":"0",
# "expiration_time":"1646985600",
# "mark_price":"12.15",
# "position_size":"4",
# "ask1_size":"0",
# "tag":"WEEK"
# }
# ]
#
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'name')
parts = underlying.split('_')
baseId = self.safe_string(parts, 0)
quoteId = self.safe_string(parts, 1)
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
expiry = self.safe_timestamp(market, 'expiration_time')
strike = self.safe_string(market, 'strike_price')
isCall = self.safe_value(market, 'is_call')
optionLetter = 'C' if isCall else 'P'
optionType = 'call' if isCall else 'put'
symbol = symbol + ':' + quote + '-' + self.yymmdd(expiry) + ':' + strike + ':' + optionLetter
priceDeviate = self.safe_string(market, 'order_price_deviate')
markPrice = self.safe_string(market, 'mark_price')
minMultiplier = Precise.string_sub('1', priceDeviate)
maxMultiplier = Precise.string_add('1', priceDeviate)
minPrice = Precise.string_mul(minMultiplier, markPrice)
maxPrice = Precise.string_mul(maxMultiplier, markPrice)
takerPercent = self.safe_string(market, 'taker_fee_rate')
makerPercent = self.safe_string(market, 'maker_fee_rate', takerPercent)
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'settle': quote,
'baseId': baseId,
'quoteId': quoteId,
'settleId': quoteId,
'type': 'option',
'spot': False,
'margin': False,
'swap': False,
'future': False,
'option': True,
'active': True,
'contract': True,
'linear': True,
'inverse': False,
'taker': self.parse_number(Precise.string_div(takerPercent, '100')), # Fee is in %, so divide by 100
'maker': self.parse_number(Precise.string_div(makerPercent, '100')),
'contractSize': self.parse_number('1'),
'expiry': expiry,
'expiryDatetime': self.iso8601(expiry),
'strike': strike,
'optionType': optionType,
'precision': {
'amount': self.parse_number('1'),
'price': self.safe_number(market, 'order_price_round'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(market, 'order_size_min'),
'max': self.safe_number(market, 'order_size_max'),
},
'price': {
'min': self.parse_number(minPrice),
'max': self.parse_number(maxPrice),
},
'cost': {
'min': None,
'max': None,
},
},
'info': market,
})
return result
async def fetch_option_underlyings(self):
underlyingsResponse = await self.publicOptionsGetUnderlyings()
# [
# {
# "index_time":"1646915796",
# "name":"BTC_USDT",
# "index_price":"39142.73"
# }
# ]
underlyings = []
for i in range(0, len(underlyingsResponse)):
underlying = underlyingsResponse[i]
name = self.safe_string(underlying, 'name')
if name is not None:
underlyings.append(name)
return underlyings
def prepare_request(self, market):
if market is not None:
if market['contract']:
return {
'contract': market['id'],
'settle': market['settleId'],
}
else:
return {
'currency_pair': market['id'],
}
def get_settlement_currencies(self, type, method):
options = self.safe_value(self.options, type, {}) # ['BTC', 'USDT'] unified codes
fetchMarketsContractOptions = self.safe_value(options, method, {})
defaultSettle = ['usdt'] if (type == 'swap') else ['btc']
return self.safe_value(fetchMarketsContractOptions, 'settlementCurrencies', defaultSettle)
async def fetch_currencies(self, params={}):
# sandbox/testnet only supports future markets
apiBackup = self.safe_string(self.urls, 'apiBackup')
if apiBackup is not None:
return None
response = await self.publicSpotGetCurrencies(params)
#
# {
# "currency": "BCN",
# "delisted": False,
# "withdraw_disabled": True,
# "withdraw_delayed": False,
# "deposit_disabled": True,
# "trade_disabled": False
# }
#
result = {}
# TODO: remove magic constants
amountPrecision = self.parse_number('1e-6')
for i in range(0, len(response)):
entry = response[i]
currencyId = self.safe_string(entry, 'currency')
currencyIdLower = self.safe_string_lower(entry, 'currency')
code = self.safe_currency_code(currencyId)
delisted = self.safe_value(entry, 'delisted')
withdrawDisabled = self.safe_value(entry, 'withdraw_disabled', False)
depositDisabled = self.safe_value(entry, 'deposit_disabled', False)
tradeDisabled = self.safe_value(entry, 'trade_disabled', False)
withdrawEnabled = not withdrawDisabled
depositEnabled = not depositDisabled
tradeEnabled = not tradeDisabled
listed = not delisted
active = listed and tradeEnabled and withdrawEnabled and depositEnabled
result[code] = {
'id': currencyId,
'lowerCaseId': currencyIdLower,
'name': None,
'code': code,
'precision': amountPrecision,
'info': entry,
'active': active,
'deposit': depositEnabled,
'withdraw': withdrawEnabled,
'fee': None,
'fees': [],
'limits': self.limits,
}
return result
async def fetch_funding_rate(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
if not market['swap']:
raise BadRequest('Funding rates only exist for swap contracts')
request = self.prepare_request(market)
response = await self.publicFuturesGetSettleContractsContract(self.extend(request, params))
#
# [
# {
# "name": "BTC_USDT",
# "type": "direct",
# "quanto_multiplier": "0.0001",
# "ref_discount_rate": "0",
# "order_price_deviate": "0.5",
# "maintenance_rate": "0.005",
# "mark_type": "index",
# "last_price": "38026",
# "mark_price": "37985.6",
# "index_price": "37954.92",
# "funding_rate_indicative": "0.000219",
# "mark_price_round": "0.01",
# "funding_offset": 0,
# "in_delisting": False,
# "risk_limit_base": "1000000",
# "interest_rate": "0.0003",
# "order_price_round": "0.1",
# "order_size_min": 1,
# "ref_rebate_rate": "0.2",
# "funding_interval": 28800,
# "risk_limit_step": "1000000",
# "leverage_min": "1",
# "leverage_max": "100",
# "risk_limit_max": "8000000",
# "maker_fee_rate": "-0.00025",
# "taker_fee_rate": "0.00075",
# "funding_rate": "0.002053",
# "order_size_max": 1000000,
# "funding_next_apply": 1610035200,
# "short_users": 977,
# "config_change_time": 1609899548,
# "trade_size": 28530850594,
# "position_size": 5223816,
# "long_users": 455,
# "funding_impact_value": "60000",
# "orders_limit": 50,
# "trade_id": 10851092,
# "orderbook_id": 2129638396
# }
# ]
#
return self.parse_funding_rate(response)
async def fetch_funding_rates(self, symbols=None, params={}):
await self.load_markets()
settle = self.safe_string_lower(params, 'settle')
request = {
'settle': settle,
}
response = await self.publicFuturesGetSettleContracts(self.extend(request, params))
#
# [
# {
# "name": "BTC_USDT",
# "type": "direct",
# "quanto_multiplier": "0.0001",
# "ref_discount_rate": "0",
# "order_price_deviate": "0.5",
# "maintenance_rate": "0.005",
# "mark_type": "index",
# "last_price": "38026",
# "mark_price": "37985.6",
# "index_price": "37954.92",
# "funding_rate_indicative": "0.000219",
# "mark_price_round": "0.01",
# "funding_offset": 0,
# "in_delisting": False,
# "risk_limit_base": "1000000",
# "interest_rate": "0.0003",
# "order_price_round": "0.1",
# "order_size_min": 1,
# "ref_rebate_rate": "0.2",
# "funding_interval": 28800,
# "risk_limit_step": "1000000",
# "leverage_min": "1",
# "leverage_max": "100",
# "risk_limit_max": "8000000",
# "maker_fee_rate": "-0.00025",
# "taker_fee_rate": "0.00075",
# "funding_rate": "0.002053",
# "order_size_max": 1000000,
# "funding_next_apply": 1610035200,
# "short_users": 977,
# "config_change_time": 1609899548,
# "trade_size": 28530850594,
# "position_size": 5223816,
# "long_users": 455,
# "funding_impact_value": "60000",
# "orders_limit": 50,
# "trade_id": 10851092,
# "orderbook_id": 2129638396
# }
# ]
#
result = self.parse_funding_rates(response)
return self.filter_by_array(result, 'symbol', symbols)
def parse_funding_rate(self, contract, market=None):
#
# {
# "name": "BTC_USDT",
# "type": "direct",
# "quanto_multiplier": "0.0001",
# "ref_discount_rate": "0",
# "order_price_deviate": "0.5",
# "maintenance_rate": "0.005",
# "mark_type": "index",
# "last_price": "38026",
# "mark_price": "37985.6",
# "index_price": "37954.92",
# "funding_rate_indicative": "0.000219",
# "mark_price_round": "0.01",
# "funding_offset": 0,
# "in_delisting": False,
# "risk_limit_base": "1000000",
# "interest_rate": "0.0003",
# "order_price_round": "0.1",
# "order_size_min": 1,
# "ref_rebate_rate": "0.2",
# "funding_interval": 28800,
# "risk_limit_step": "1000000",
# "leverage_min": "1",
# "leverage_max": "100",
# "risk_limit_max": "8000000",
# "maker_fee_rate": "-0.00025",
# "taker_fee_rate": "0.00075",
# "funding_rate": "0.002053",
# "order_size_max": 1000000,
# "funding_next_apply": 1610035200,
# "short_users": 977,
# "config_change_time": 1609899548,
# "trade_size": 28530850594,
# "position_size": 5223816,
# "long_users": 455,
# "funding_impact_value": "60000",
# "orders_limit": 50,
# "trade_id": 10851092,
# "orderbook_id": 2129638396
# }
#
marketId = self.safe_string(contract, 'name')
symbol = self.safe_symbol(marketId, market)
markPrice = self.safe_number(contract, 'mark_price')
indexPrice = self.safe_number(contract, 'index_price')
interestRate = self.safe_number(contract, 'interest_rate')
fundingRate = self.safe_number(contract, 'funding_rate')
fundingTime = self.safe_integer(contract, 'funding_next_apply') * 1000
fundingRateIndicative = self.safe_number(contract, 'funding_rate_indicative')
return {
'info': contract,
'symbol': symbol,
'markPrice': markPrice,
'indexPrice': indexPrice,
'interestRate': interestRate,
'estimatedSettlePrice': None,
'timestamp': None,
'datetime': None,
'fundingRate': fundingRate,
'fundingTimestamp': fundingTime,
'fundingDatetime': self.iso8601(fundingTime),
'nextFundingRate': fundingRateIndicative,
'nextFundingTimestamp': None,
'nextFundingDatetime': None,
'previousFundingRate': None,
'previousFundingTimestamp': None,
'previousFundingDatetime': None,
}
async def fetch_network_deposit_address(self, code, params={}):
await self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
}
response = await self.privateWalletGetDepositAddress(self.extend(request, params))
addresses = self.safe_value(response, 'multichain_addresses')
currencyId = self.safe_string(response, 'currency')
code = self.safe_currency_code(currencyId)
result = {}
for i in range(0, len(addresses)):
entry = addresses[i]
#
# {
# "chain": "ETH",
# "address": "0x359a697945E79C7e17b634675BD73B33324E9408",
# "payment_id": "",
# "payment_name": "",
# "obtain_failed": "0"
# }
#
obtainFailed = self.safe_integer(entry, 'obtain_failed')
if obtainFailed:
continue
network = self.safe_string(entry, 'chain')
address = self.safe_string(entry, 'address')
tag = self.safe_string(entry, 'payment_id')
tagLength = len(tag)
tag = tag if tagLength else None
result[network] = {
'info': entry,
'code': code,
'address': address,
'tag': tag,
}
return result
async def fetch_deposit_address(self, code, params={}):
await self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
}
response = await self.privateWalletGetDepositAddress(self.extend(request, params))
#
# {
# "currency": "XRP",
# "address": "rHcFoo6a9qT5NHiVn1THQRhsEGcxtYCV4d 391331007",
# "multichain_addresses": [
# {
# "chain": "XRP",
# "address": "rHcFoo6a9qT5NHiVn1THQRhsEGcxtYCV4d",
# "payment_id": "391331007",
# "payment_name": "Tag",
# "obtain_failed": 0
# }
# ]
# }
#
currencyId = self.safe_string(response, 'currency')
code = self.safe_currency_code(currencyId)
addressField = self.safe_string(response, 'address')
tag = None
address = None
if addressField.find(' ') >= 0:
splitted = addressField.split(' ')
address = splitted[0]
tag = splitted[1]
else:
address = addressField
return {
'info': response,
'code': code,
'address': address,
'tag': tag,
'network': None,
}
async def fetch_trading_fee(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'currency_pair': market['id'],
}
response = await self.privateWalletGetFee(self.extend(request, params))
#
# {
# "user_id": 1486602,
# "taker_fee": "0.002",
# "maker_fee": "0.002",
# "gt_discount": True,
# "gt_taker_fee": "0.0015",
# "gt_maker_fee": "0.0015",
# "loan_fee": "0.18",
# "point_type": "0",
# "futures_taker_fee": "0.0005",
# "futures_maker_fee": "0"
# }
#
return self.parse_trading_fee(response, market)
async def fetch_trading_fees(self, params={}):
await self.load_markets()
response = await self.privateWalletGetFee(params)
#
# {
# "user_id": 1486602,
# "taker_fee": "0.002",
# "maker_fee": "0.002",
# "gt_discount": True,
# "gt_taker_fee": "0.0015",
# "gt_maker_fee": "0.0015",
# "loan_fee": "0.18",
# "point_type": "0",
# "futures_taker_fee": "0.0005",
# "futures_maker_fee": "0"
# }
#
return self.parse_trading_fees(response)
def parse_trading_fees(self, response):
result = {}
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
market = self.market(symbol)
result[symbol] = self.parse_trading_fee(response, market)
return result
def parse_trading_fee(self, info, market=None):
#
# {
# "user_id": 1486602,
# "taker_fee": "0.002",
# "maker_fee": "0.002",
# "gt_discount": True,
# "gt_taker_fee": "0.0015",
# "gt_maker_fee": "0.0015",
# "loan_fee": "0.18",
# "point_type": "0",
# "futures_taker_fee": "0.0005",
# "futures_maker_fee": "0"
# }
#
contract = self.safe_value(market, 'contract')
takerKey = 'futures_taker_fee' if contract else 'taker_fee'
makerKey = 'futures_maker_fee' if contract else 'maker_fee'
return {
'info': info,
'symbol': self.safe_string(market, 'symbol'),
'maker': self.safe_number(info, makerKey),
'taker': self.safe_number(info, takerKey),
}
async def fetch_funding_fees(self, params={}):
await self.load_markets()
response = await self.privateWalletGetWithdrawStatus(params)
#
# {
# "currency": "MTN",
# "name": "Medicalchain",
# "name_cn": "Medicalchain",
# "deposit": "0",
# "withdraw_percent": "0%",
# "withdraw_fix": "900",
# "withdraw_day_limit": "500000",
# "withdraw_day_limit_remain": "500000",
# "withdraw_amount_mini": "900.1",
# "withdraw_eachtime_limit": "90000000000",
# "withdraw_fix_on_chains": {
# "ETH": "900"
# }
# }
#
withdrawFees = {}
for i in range(0, len(response)):
entry = response[i]
currencyId = self.safe_string(entry, 'currency')
code = self.safe_currency_code(currencyId)
withdrawFees[code] = {}
withdrawFix = self.safe_value(entry, 'withdraw_fix_on_chains')
if withdrawFix is None:
withdrawFix = {}
withdrawFix[code] = self.safe_number(entry, 'withdraw_fix')
keys = list(withdrawFix.keys())
for i in range(0, len(keys)):
key = keys[i]
withdrawFees[code][key] = self.parse_number(withdrawFix[key])
return {
'info': response,
'withdraw': withdrawFees,
'deposit': {},
}
async def fetch_funding_history(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
# defaultType = 'future'
market = None
request = {}
if symbol is not None:
market = self.market(symbol)
symbol = market['symbol']
request = self.prepare_request(market)
type = None
type, params = self.handle_market_type_and_params('fetchFundingHistory', market, params)
if market is None:
defaultSettle = 'usdt' if (type == 'swap') else 'btc'
settle = self.safe_string(params, 'settle', defaultSettle)
request['settle'] = settle
params = self.omit(params, 'settle')
request['type'] = 'fund' # 'dnw' 'pnl' 'fee' 'refr' 'fund' 'point_dnw' 'point_fee' 'point_refr'
if since is not None:
request['from'] = since / 1000
if limit is not None:
request['limit'] = limit
method = self.get_supported_mapping(type, {
'swap': 'privateFuturesGetSettleAccountBook',
'future': 'privateDeliveryGetSettleAccountBook',
})
response = await getattr(self, method)(self.extend(request, params))
#
# [
# {
# "time": 1646899200,
# "change": "-0.027722",
# "balance": "11.653120591841",
# "text": "XRP_USDT",
# "type": "fund"
# },
# ...
# ]
#
return self.parse_funding_histories(response, symbol, since, limit)
def parse_funding_histories(self, response, symbol, since, limit):
result = []
for i in range(0, len(response)):
entry = response[i]
funding = self.parse_funding_history(entry)
result.append(funding)
sorted = self.sort_by(result, 'timestamp')
return self.filter_by_symbol_since_limit(sorted, symbol, since, limit)
def parse_funding_history(self, info, market=None):
#
# {
# "time": 1646899200,
# "change": "-0.027722",
# "balance": "11.653120591841",
# "text": "XRP_USDT",
# "type": "fund"
# }
#
timestamp = self.safe_timestamp(info, 'time')
marketId = self.safe_string(info, 'text')
market = self.safe_market(marketId, market)
return {
'info': info,
'symbol': self.safe_string(market, 'symbol'),
'code': self.safe_string(market, 'settle'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'id': None,
'amount': self.safe_number(info, 'change'),
}
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
#
# request = {
# 'currency_pair': market['id'],
# 'interval': '0', # depth, 0 means no aggregation is applied, default to 0
# 'limit': limit, # maximum number of order depth data in asks or bids
# 'with_id': True, # return order book ID
# }
#
request = self.prepare_request(market)
spotOrMargin = market['spot'] or market['margin']
method = self.get_supported_mapping(market['type'], {
'spot': 'publicSpotGetOrderBook',
'margin': 'publicSpotGetOrderBook',
'swap': 'publicFuturesGetSettleOrderBook',
'future': 'publicDeliveryGetSettleOrderBook',
})
if limit is not None:
request['limit'] = limit # default 10, max 100
response = await getattr(self, method)(self.extend(request, params))
#
# SPOT
#
# {
# "current": 1634345973275,
# "update": 1634345973271,
# "asks": [
# ["2.2241","12449.827"],
# ["2.2242","200"],
# ["2.2244","826.931"],
# ["2.2248","3876.107"],
# ["2.225","2377.252"],
# ["2.22509","439.484"],
# ["2.2251","1489.313"],
# ["2.2253","714.582"],
# ["2.2254","1349.784"],
# ["2.2256","234.701"]],
# "bids":[
# ["2.2236","32.465"],
# ["2.2232","243.983"],
# ["2.2231","32.207"],
# ["2.223","449.827"],
# ["2.2228","7.918"],
# ["2.2227","12703.482"],
# ["2.2226","143.033"],
# ["2.2225","143.027"],
# ["2.2224","1369.352"],
# ["2.2223","756.063"]
# ]
# }
#
# Perpetual Swap
#
# {
# "current": 1634350208.745,
# "asks": [
# {"s":24909,"p": "61264.8"},
# {"s":81,"p": "61266.6"},
# {"s":2000,"p": "61267.6"},
# {"s":490,"p": "61270.2"},
# {"s":12,"p": "61270.4"},
# {"s":11782,"p": "61273.2"},
# {"s":14666,"p": "61273.3"},
# {"s":22541,"p": "61273.4"},
# {"s":33,"p": "61273.6"},
# {"s":11980,"p": "61274.5"}
# ],
# "bids": [
# {"s":41844,"p": "61264.7"},
# {"s":13783,"p": "61263.3"},
# {"s":1143,"p": "61259.8"},
# {"s":81,"p": "61258.7"},
# {"s":2471,"p": "61257.8"},
# {"s":2471,"p": "61257.7"},
# {"s":2471,"p": "61256.5"},
# {"s":3,"p": "61254.2"},
# {"s":114,"p": "61252.4"},
# {"s":14372,"p": "61248.6"}
# ],
# "update": 1634350208.724
# }
#
timestamp = self.safe_integer(response, 'current')
if not spotOrMargin:
timestamp = timestamp * 1000
priceKey = 0 if spotOrMargin else 'p'
amountKey = 1 if spotOrMargin else 's'
return self.parse_order_book(response, symbol, timestamp, 'bids', 'asks', priceKey, amountKey)
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = self.prepare_request(market)
method = self.get_supported_mapping(market['type'], {
'spot': 'publicSpotGetTickers',
'margin': 'publicSpotGetTickers',
'swap': 'publicFuturesGetSettleTickers',
'future': 'publicDeliveryGetSettleTickers',
})
response = await getattr(self, method)(self.extend(request, params))
ticker = self.safe_value(response, 0)
return self.parse_ticker(ticker, market)
def parse_ticker(self, ticker, market=None):
#
# SPOT
#
# {
# "currency_pair": "KFC_USDT",
# "last": "7.255",
# "lowest_ask": "7.298",
# "highest_bid": "7.218",
# "change_percentage": "-1.18",
# "base_volume": "1219.053687865",
# "quote_volume": "8807.40299875455",
# "high_24h": "7.262",
# "low_24h": "7.095"
# }
#
# LINEAR/DELIVERY
#
# {
# "contract": "BTC_USDT",
# "last": "6432",
# "low_24h": "6278",
# "high_24h": "6790",
# "change_percentage": "4.43",
# "total_size": "32323904",
# "volume_24h": "184040233284",
# "volume_24h_btc": "28613220",
# "volume_24h_usd": "184040233284",
# "volume_24h_base": "28613220",
# "volume_24h_quote": "184040233284",
# "volume_24h_settle": "28613220",
# "mark_price": "6534",
# "funding_rate": "0.0001",
# "funding_rate_indicative": "0.0001",
# "index_price": "6531"
# }
#
marketId = self.safe_string_2(ticker, 'currency_pair', 'contract')
symbol = self.safe_symbol(marketId, market)
last = self.safe_string(ticker, 'last')
ask = self.safe_string(ticker, 'lowest_ask')
bid = self.safe_string(ticker, 'highest_bid')
high = self.safe_string(ticker, 'high_24h')
low = self.safe_string(ticker, 'low_24h')
baseVolume = self.safe_string_2(ticker, 'base_volume', 'volume_24h_base')
quoteVolume = self.safe_string_2(ticker, 'quote_volume', 'volume_24h_quote')
percentage = self.safe_string(ticker, 'change_percentage')
return self.safe_ticker({
'symbol': symbol,
'timestamp': None,
'datetime': None,
'high': high,
'low': low,
'bid': bid,
'bidVolume': None,
'ask': ask,
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': percentage,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market, False)
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
type = None
type, params = self.handle_market_type_and_params('fetchTickers', None, params)
method = self.get_supported_mapping(type, {
'spot': 'publicSpotGetTickers',
'margin': 'publicSpotGetTickers',
'swap': 'publicFuturesGetSettleTickers',
'future': 'publicDeliveryGetSettleTickers',
})
request = {}
future = type == 'future'
swap = type == 'swap'
defaultSettle = 'usdt' if swap else 'btc'
settle = self.safe_string_lower(params, 'settle', defaultSettle)
if swap or future:
request['settle'] = settle
response = await getattr(self, method)(self.extend(request, params))
return self.parse_tickers(response, symbols)
def fetch_balance_helper(self, entry):
account = self.account()
account['used'] = self.safe_string_2(entry, 'locked', 'position_margin')
account['free'] = self.safe_string(entry, 'available')
return account
async def fetch_balance(self, params={}):
# :param params.type: spot, margin, crossMargin, swap or future
# :param params.settle: Settle currency(usdt or btc) for perpetual swap and future
await self.load_markets()
type = None
type, params = self.handle_market_type_and_params('fetchBalance', None, params)
swap = type == 'swap'
future = type == 'future'
method = self.get_supported_mapping(type, {
'spot': 'privateSpotGetAccounts',
'margin': 'privateMarginGetAccounts',
'swap': 'privateFuturesGetSettleAccounts',
'future': 'privateDeliveryGetSettleAccounts',
})
request = {}
response = []
if swap or future:
defaultSettle = 'usdt' if swap else 'btc'
request['settle'] = self.safe_string_lower(params, 'settle', defaultSettle)
response_item = await getattr(self, method)(self.extend(request, params))
response = [response_item]
else:
response = await getattr(self, method)(self.extend(request, params))
# Spot
#
# [
# {
# "currency": "DBC",
# "available": "0",
# "locked": "0"
# },
# ...
# ]
#
# Margin
#
# [
# {
# "currency_pair":"DOGE_USDT",
# "locked":false,
# "risk":"9999.99",
# "base": {
# "currency":"DOGE",
# "available":"0",
# "locked":"0",
# "borrowed":"0",
# "interest":"0"
# },
# "quote": {
# "currency":"USDT",
# "available":"0.73402",
# "locked":"0",
# "borrowed":"0",
# "interest":"0"
# }
# },
# ...
# ]
#
# Perpetual Swap
#
# {
# order_margin: "0",
# point: "0",
# bonus: "0",
# history: {
# dnw: "2.1321",
# pnl: "11.5351",
# refr: "0",
# point_fee: "0",
# fund: "-0.32340576684",
# bonus_dnw: "0",
# point_refr: "0",
# bonus_offset: "0",
# fee: "-0.20132775",
# point_dnw: "0",
# },
# unrealised_pnl: "13.315100000006",
# total: "12.51345151332",
# available: "0",
# in_dual_mode: False,
# currency: "USDT",
# position_margin: "12.51345151332",
# user: "6333333",
# }
#
# Delivery Future
#
# {
# order_margin: "0",
# point: "0",
# history: {
# dnw: "1",
# pnl: "0",
# refr: "0",
# point_fee: "0",
# point_dnw: "0",
# settle: "0",
# settle_fee: "0",
# point_refr: "0",
# fee: "0",
# },
# unrealised_pnl: "0",
# total: "1",
# available: "1",
# currency: "USDT",
# position_margin: "0",
# user: "6333333",
# }
#
margin = type == 'margin'
result = {
'info': response,
}
for i in range(0, len(response)):
entry = response[i]
if margin:
marketId = self.safe_string(entry, 'currency_pair')
symbol = self.safe_symbol(marketId, None, '_')
base = self.safe_value(entry, 'base', {})
quote = self.safe_value(entry, 'quote', {})
baseCode = self.safe_currency_code(self.safe_string(base, 'currency', {}))
quoteCode = self.safe_currency_code(self.safe_string(quote, 'currency', {}))
subResult = {}
subResult[baseCode] = self.fetch_balance_helper(base)
subResult[quoteCode] = self.fetch_balance_helper(quote)
result[symbol] = self.safe_balance(subResult)
else:
code = self.safe_currency_code(self.safe_string(entry, 'currency', {}))
result[code] = self.fetch_balance_helper(entry)
return result if margin else self.safe_balance(result)
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
price = self.safe_string(params, 'price')
request = self.prepare_request(market)
request['interval'] = self.timeframes[timeframe]
method = 'publicSpotGetCandlesticks'
if market['contract']:
maxLimit = 1999
limit = maxLimit if (limit is None) else min(limit, maxLimit)
if market['future']:
method = 'publicDeliveryGetSettleCandlesticks'
elif market['swap']:
method = 'publicFuturesGetSettleCandlesticks'
isMark = (price == 'mark')
isIndex = (price == 'index')
if isMark or isIndex:
request['contract'] = price + '_' + market['id']
params = self.omit(params, 'price')
else:
maxLimit = 1000
limit = maxLimit if (limit is None) else min(limit, maxLimit)
request['limit'] = limit
if since is not None:
duration = self.parse_timeframe(timeframe)
request['from'] = int(since / 1000)
toTimestamp = self.sum(request['from'], limit * duration - 1)
currentTimestamp = self.seconds()
request['to'] = min(toTimestamp, currentTimestamp)
response = await getattr(self, method)(self.extend(request, params))
return self.parse_ohlcvs(response, market, timeframe, since, limit)
async def fetch_mark_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
request = {
'price': 'mark',
}
return await self.fetch_ohlcv(symbol, timeframe, since, limit, self.extend(request, params))
async def fetch_funding_rate_history(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchFundingRateHistory() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
if not market['swap']:
raise BadRequest('Funding rates only exist for swap contracts')
request = {
'contract': market['id'],
'settle': market['settleId'],
}
if limit is not None:
request['limit'] = limit
method = 'publicFuturesGetSettleFundingRate'
response = await getattr(self, method)(self.extend(request, params))
#
# {
# "r": "0.00063521",
# "t": "1621267200000",
# }
#
rates = []
for i in range(0, len(response)):
entry = response[i]
timestamp = self.safe_timestamp(entry, 't')
rates.append({
'info': entry,
'symbol': symbol,
'fundingRate': self.safe_number(entry, 'r'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
})
sorted = self.sort_by(rates, 'timestamp')
return self.filter_by_symbol_since_limit(sorted, market['symbol'], since, limit)
async def fetch_index_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
request = {
'price': 'index',
}
return await self.fetch_ohlcv(symbol, timeframe, since, limit, self.extend(request, params))
def parse_ohlcv(self, ohlcv, market=None):
#
# Spot market candles
#
# [
# "1626163200", # Unix timestamp in seconds
# "346711.933138181617", # Trading volume
# "33165.23", # Close price
# "33260", # Highest price
# "33117.6", # Lowest price
# "33184.47" # Open price
# ]
#
# Mark and Index price candles
#
# {
# "t":1632873600, # Unix timestamp in seconds
# "o": "41025", # Open price
# "h": "41882.17", # Highest price
# "c": "41776.92", # Close price
# "l": "40783.94" # Lowest price
# }
#
if isinstance(ohlcv, list):
return [
self.safe_timestamp(ohlcv, 0), # unix timestamp in seconds
self.safe_number(ohlcv, 5), # open price
self.safe_number(ohlcv, 3), # highest price
self.safe_number(ohlcv, 4), # lowest price
self.safe_number(ohlcv, 2), # close price
self.safe_number(ohlcv, 1), # trading volume
]
else:
# Mark and Index price candles
return [
self.safe_timestamp(ohlcv, 't'), # unix timestamp in seconds
self.safe_number(ohlcv, 'o'), # open price
self.safe_number(ohlcv, 'h'), # highest price
self.safe_number(ohlcv, 'l'), # lowest price
self.safe_number(ohlcv, 'c'), # close price
self.safe_number(ohlcv, 'v'), # trading volume, None for mark or index price
]
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
#
# spot
#
# request = {
# 'currency_pair': market['id'],
# 'limit': limit, # maximum number of records to be returned in a single list
# 'last_id': 'id', # specify list staring point using the id of last record in previous list-query results
# 'reverse': False, # True to retrieve records where id is smaller than the specified last_id, False to retrieve records where id is larger than the specified last_id
# }
#
# swap, future
#
# request = {
# 'settle': market['settleId'],
# 'contract': market['id'],
# 'limit': limit, # maximum number of records to be returned in a single list
# 'last_id': 'id', # specify list staring point using the id of last record in previous list-query results
# 'from': since / 1000), # starting time in seconds, if not specified, to and limit will be used to limit response items
# 'to': self.seconds(), # end time in seconds, default to current time
# }
#
request = self.prepare_request(market)
method = self.get_supported_mapping(market['type'], {
'spot': 'publicSpotGetTrades',
'margin': 'publicSpotGetTrades',
'swap': 'publicFuturesGetSettleTrades',
'future': 'publicDeliveryGetSettleTrades',
})
if limit is not None:
request['limit'] = limit # default 100, max 1000
if since is not None and (market['contract']):
request['from'] = int(since / 1000)
response = await getattr(self, method)(self.extend(request, params))
#
# spot
#
# [
# {
# id: "1852958144",
# create_time: "1634673259",
# create_time_ms: "1634673259378.105000",
# currency_pair: "ADA_USDT",
# side: "sell",
# amount: "307.078",
# price: "2.104",
# }
# ]
#
# perpetual swap
#
# [
# {
# size: "2",
# id: "2522911",
# create_time_ms: "1634673380.182",
# create_time: "1634673380.182",
# contract: "ADA_USDT",
# price: "2.10486",
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
market = None
request = {}
type = None
type, params = self.handle_market_type_and_params('fetchMyTrades', None, params)
if symbol:
market = self.market(symbol)
request = self.prepare_request(market)
type = market['type']
else:
if type == 'swap' or type == 'future':
settle = self.safe_string_lower(params, 'settle')
if not settle:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument or a settle parameter for ' + type + ' markets')
request['settle'] = settle
#
# request = {
# 'currency_pair': market['id'],
# # 'limit': limit,
# # 'page': 0,
# # 'order_id': 'Order ID',
# # 'account': 'spot', # default to spot and margin account if not specified, set to cross_margin to operate against margin account
# # 'from': since, # default to 7 days before current time
# # 'to': self.milliseconds(), # default to current time
# }
#
if limit is not None:
request['limit'] = limit # default 100, max 1000
if since is not None:
request['from'] = int(since / 1000)
# request['to'] = since + 7 * 24 * 60 * 60
method = self.get_supported_mapping(type, {
'spot': 'privateSpotGetMyTrades',
'margin': 'privateSpotGetMyTrades',
'swap': 'privateFuturesGetSettleMyTrades',
'future': 'privateDeliveryGetSettleMyTrades',
})
response = await getattr(self, method)(self.extend(request, params))
#
# spot
#
# [
# {
# "id":"2876130500",
# "create_time":"1645464610",
# "create_time_ms":"1645464610777.399200",
# "currency_pair":"DOGE_USDT",
# "side":"sell",
# "role":"taker",
# "amount":"10.97",
# "price":"0.137384",
# "order_id":"125924049993",
# "fee":"0.00301420496",
# "fee_currency":"USDT",
# "point_fee":"0",
# "gt_fee":"0"
# }
# ]
#
# perpetual swap
#
# [
# {
# "size":-5,
# "order_id":"130264979823",
# "id":26884791,
# "role":"taker",
# "create_time":1645465199.5472,
# "contract":"DOGE_USDT",
# "price":"0.136888"
# }
# ]
#
# future
#
# [
# {
# "id": 121234231,
# "create_time": 1514764800.123,
# "contract": "BTC_USDT",
# "order_id": "21893289839",
# "size": 100,
# "price": "100.123",
# "role": "taker"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
#
# public
#
# {
# "id": "1334253759",
# "create_time": "1626342738",
# "create_time_ms": "1626342738331.497000",
# "currency_pair": "BTC_USDT",
# "side": "sell",
# "amount": "0.0022",
# "price": "32452.16"
# }
#
# public ws
#
# {
# id: 221994511,
# time: 1580311438.618647,
# price: '9309',
# amount: '0.0019',
# type: 'sell'
# }
#
# spot rest
#
# {
# "id":"2876130500",
# "create_time":"1645464610",
# "create_time_ms":"1645464610777.399200",
# "currency_pair":"DOGE_USDT",
# "side":"sell",
# "role":"taker",
# "amount":"10.97",
# "price":"0.137384",
# "order_id":"125924049993",
# "fee":"0.00301420496",
# "fee_currency":"USDT",
# "point_fee":"0","gt_fee":"0"
# }
#
# perpetual swap rest
#
# {
# "size":-5,
# "order_id":"130264979823",
# "id":26884791,
# "role":"taker",
# "create_time":1645465199.5472,
# "contract":"DOGE_USDT",
# "price":"0.136888"
# }
#
# future rest
#
# {
# "id": 121234231,
# "create_time": 1514764800.123,
# "contract": "BTC_USDT",
# "order_id": "21893289839",
# "size": 100,
# "price": "100.123",
# "role": "taker"
# }
#
id = self.safe_string(trade, 'id')
timestamp = self.safe_timestamp_2(trade, 'time', 'create_time')
timestamp = self.safe_integer(trade, 'create_time_ms', timestamp)
marketId = self.safe_string_2(trade, 'currency_pair', 'contract')
symbol = self.safe_symbol(marketId, market)
amountString = self.safe_string_2(trade, 'amount', 'size')
priceString = self.safe_string(trade, 'price')
contractSide = 'sell' if Precise.string_lt(amountString, '0') else 'buy'
amountString = Precise.string_abs(amountString)
side = self.safe_string_2(trade, 'side', 'type', contractSide)
orderId = self.safe_string(trade, 'order_id')
gtFee = self.safe_string(trade, 'gt_fee')
feeCurrency = None
feeCostString = None
if gtFee == '0':
feeCurrency = self.safe_string(trade, 'fee_currency')
feeCostString = self.safe_string(trade, 'fee')
else:
feeCurrency = 'GT'
feeCostString = gtFee
fee = {
'cost': feeCostString,
'currency': feeCurrency,
}
takerOrMaker = self.safe_string(trade, 'role')
return self.safe_trade({
'info': trade,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': fee,
}, market)
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
if limit is not None:
request['limit'] = limit
if since is not None:
start = int(since / 1000)
request['from'] = start
request['to'] = self.sum(start, 30 * 24 * 60 * 60)
response = await self.privateWalletGetDeposits(self.extend(request, params))
return self.parse_transactions(response, currency)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
if limit is not None:
request['limit'] = limit
if since is not None:
start = int(since / 1000)
request['from'] = start
request['to'] = self.sum(start, 30 * 24 * 60 * 60)
response = await self.privateWalletGetWithdrawals(self.extend(request, params))
return self.parse_transactions(response, currency)
async def withdraw(self, code, amount, address, tag=None, params={}):
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
'address': address,
'amount': self.currency_to_precision(code, amount),
}
if tag is not None:
request['memo'] = tag
networks = self.safe_value(self.options, 'networks', {})
network = self.safe_string_upper(params, 'network') # self line allows the user to specify either ERC20 or ETH
network = self.safe_string_lower(networks, network, network) # handle ETH>ERC20 alias
if network is not None:
request['chain'] = network
params = self.omit(params, 'network')
response = await self.privateWithdrawalsPost(self.extend(request, params))
#
# {
# "id": "w13389675",
# "currency": "USDT",
# "amount": "50",
# "address": "TUu2rLFrmzUodiWfYki7QCNtv1akL682p1",
# "memo": null
# }
#
currencyId = self.safe_string(response, 'currency')
id = self.safe_string(response, 'id')
return {
'info': response,
'id': id,
'code': self.safe_currency_code(currencyId),
'amount': self.safe_number(response, 'amount'),
'address': self.safe_string(response, 'address'),
'tag': self.safe_string(response, 'memo'),
}
def parse_transaction_status(self, status):
statuses = {
'PEND': 'pending',
'REQUEST': 'pending',
'DMOVE': 'pending',
'CANCEL': 'failed',
'DONE': 'ok',
'BCODE': 'ok', # GateCode withdrawal
}
return self.safe_string(statuses, status, status)
def parse_transaction_type(self, type):
types = {
'd': 'deposit',
'w': 'withdrawal',
}
return self.safe_string(types, type, type)
def parse_transaction(self, transaction, currency=None):
#
# deposits
#
# {
# "id": "d33361395",
# "currency": "USDT_TRX",
# "address": "TErdnxenuLtXfnMafLbfappYdHtnXQ5U4z",
# "amount": "100",
# "txid": "ae9374de34e558562fe18cbb1bf9ab4d9eb8aa7669d65541c9fa2a532c1474a0",
# "timestamp": "1626345819",
# "status": "DONE",
# "memo": ""
# }
#
# withdrawals
id = self.safe_string(transaction, 'id')
type = None
amount = self.safe_string(transaction, 'amount')
if id[0] == 'b':
# GateCode handling
type = 'deposit' if Precise.string_gt(amount, '0') else 'withdrawal'
amount = Precise.string_abs(amount)
elif id is not None:
type = self.parse_transaction_type(id[0])
currencyId = self.safe_string(transaction, 'currency')
code = self.safe_currency_code(currencyId)
txid = self.safe_string(transaction, 'txid')
rawStatus = self.safe_string(transaction, 'status')
status = self.parse_transaction_status(rawStatus)
address = self.safe_string(transaction, 'address')
fee = self.safe_number(transaction, 'fee')
tag = self.safe_string(transaction, 'memo')
if tag == '':
tag = None
timestamp = self.safe_timestamp(transaction, 'timestamp')
return {
'info': transaction,
'id': id,
'txid': txid,
'currency': code,
'amount': self.parse_number(amount),
'network': None,
'address': address,
'addressTo': None,
'addressFrom': None,
'tag': tag,
'tagTo': None,
'tagFrom': None,
'status': status,
'type': type,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'updated': None,
'fee': fee,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
contract = market['contract']
stopPrice = self.safe_number(params, 'stopPrice')
methodTail = 'Orders'
reduceOnly = self.safe_value_2(params, 'reduce_only', 'reduceOnly')
defaultTimeInForce = self.safe_value_2(params, 'tif', 'time_in_force', 'gtc')
timeInForce = self.safe_value(params, 'timeInForce', defaultTimeInForce)
params = self.omit(params, ['stopPrice', 'reduce_only', 'reduceOnly', 'tif', 'time_in_force', 'timeInForce'])
isLimitOrder = (type == 'limit')
isMarketOrder = (type == 'market')
if isLimitOrder and price is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument for ' + type + ' orders')
if contract:
amountToPrecision = self.amount_to_precision(symbol, amount)
signedAmount = Precise.string_neg(amountToPrecision) if (side == 'sell') else amountToPrecision
amount = int(signedAmount)
if isMarketOrder:
timeInForce = 'ioc'
price = 0
elif not isLimitOrder:
# Gateio doesn't have market orders for spot
raise InvalidOrder(self.id + ' createOrder() does not support ' + type + ' orders for ' + market['type'] + ' markets')
request = None
trigger = self.safe_value(params, 'trigger')
if stopPrice is None and trigger is None:
if contract:
# contract order
request = {
'contract': market['id'], # filled in prepareRequest above
'size': amount, # int64, positive = bid, negative = ask
# 'iceberg': 0, # int64, display size for iceberg order, 0 for non-iceberg, note that you will have to pay the taker fee for the hidden size
'price': self.price_to_precision(symbol, price), # 0 for market order with tif set as ioc
# 'close': False, # True to close the position, with size set to 0
# 'reduce_only': False, # St as True to be reduce-only order
# 'tif': 'gtc', # gtc, ioc, poc PendingOrCancelled == postOnly order
# 'text': clientOrderId, # 't-abcdef1234567890',
# 'auto_size': '', # close_long, close_short, note size also needs to be set to 0
'settle': market['settleId'], # filled in prepareRequest above
}
if reduceOnly is not None:
request['reduce_only'] = reduceOnly
if timeInForce is not None:
request['tif'] = timeInForce
else:
options = self.safe_value(self.options, 'createOrder', {})
defaultAccount = self.safe_string(options, 'account', 'spot')
account = self.safe_string(params, 'account', defaultAccount)
params = self.omit(params, 'account')
# spot order
request = {
# 'text': clientOrderId, # 't-abcdef1234567890',
'currency_pair': market['id'], # filled in prepareRequest above
'type': type,
'account': account, # 'spot', 'margin', 'cross_margin'
'side': side,
'amount': self.amount_to_precision(symbol, amount),
'price': self.price_to_precision(symbol, price),
# 'time_in_force': 'gtc', # gtc, ioc, poc PendingOrCancelled == postOnly order
# 'iceberg': 0, # amount to display for the iceberg order, null or 0 for normal orders, set to -1 to hide the order completely
# 'auto_borrow': False, # used in margin or cross margin trading to allow automatic loan of insufficient amount if balance is not enough
# 'auto_repay': False, # automatic repayment for automatic borrow loan generated by cross margin order, diabled by default
}
if timeInForce is not None:
request['time_in_force'] = timeInForce
clientOrderId = self.safe_string_2(params, 'text', 'clientOrderId')
if clientOrderId is not None:
# user-defined, must follow the rules if not empty
# prefixed with t-
# no longer than 28 bytes without t- prefix
# can only include 0-9, A-Z, a-z, underscores(_), hyphens(-) or dots(.)
if len(clientOrderId) > 28:
raise BadRequest(self.id + ' createOrder() clientOrderId or text param must be up to 28 characters')
params = self.omit(params, ['text', 'clientOrderId'])
if clientOrderId[0] != 't':
clientOrderId = 't-' + clientOrderId
request['text'] = clientOrderId
else:
if contract:
# contract conditional order
rule = 1 if (side == 'buy') else 2
request = {
'initial': {
'contract': market['id'],
'size': amount, # positive = buy, negative = sell, set to 0 to close the position
'price': self.price_to_precision(symbol, price), # set to 0 to use market price
# 'close': False, # set to True if trying to close the position
# 'tif': 'gtc', # gtc, ioc, if using market price, only ioc is supported
# 'text': clientOrderId, # web, api, app
# 'reduce_only': False,
},
'trigger': {
# 'strategy_type': 0, # 0 = by price, 1 = by price gap, only 0 is supported currently
# 'price_type': 0, # 0 latest deal price, 1 mark price, 2 index price
'price': self.price_to_precision(symbol, stopPrice), # price or gap
'rule': rule, # 1 means price_type >= price, 2 means price_type <= price
# 'expiration': expiration, how many seconds to wait for the condition to be triggered before cancelling the order
},
'settle': market['settleId'],
}
expiration = self.safe_integer(params, 'expiration')
if expiration is not None:
request['trigger']['expiration'] = expiration
params = self.omit(params, 'expiration')
if reduceOnly is not None:
request['initial']['reduce_only'] = reduceOnly
if timeInForce is not None:
request['initial']['tif'] = timeInForce
else:
# spot conditional order
options = self.safe_value(self.options, 'createOrder', {})
defaultAccount = self.safe_string(options, 'account', 'normal')
account = self.safe_string(params, 'account', defaultAccount)
params = self.omit(params, 'account')
defaultExpiration = self.safe_integer(options, 'expiration')
expiration = self.safe_integer(params, 'expiration', defaultExpiration)
rule = '>=' if (side == 'buy') else '<='
triggerPrice = self.safe_value(trigger, 'price', stopPrice)
request = {
'trigger': {
'price': self.price_to_precision(symbol, triggerPrice),
'rule': rule, # >= triggered when market price larger than or equal to price field, <= triggered when market price less than or equal to price field
'expiration': expiration, # required, how long(in seconds) to wait for the condition to be triggered before cancelling the order
},
'put': {
'type': type,
'side': side,
'price': self.price_to_precision(symbol, price),
'amount': self.amount_to_precision(symbol, amount),
'account': account, # normal, margin
'time_in_force': timeInForce, # gtc, ioc for taker only
},
'market': market['id'],
}
methodTail = 'PriceOrders'
method = self.get_supported_mapping(market['type'], {
'spot': 'privateSpotPost' + methodTail,
'margin': 'privateSpotPost' + methodTail,
'swap': 'privateFuturesPostSettle' + methodTail,
'future': 'privateDeliveryPostSettle' + methodTail,
})
response = await getattr(self, method)(self.deep_extend(request, params))
#
# spot
#
# {
# "id":"95282841887",
# "text":"apiv4",
# "create_time":"1637383156",
# "update_time":"1637383156",
# "create_time_ms":1637383156017,
# "update_time_ms":1637383156017,
# "status":"open",
# "currency_pair":"ETH_USDT",
# "type":"limit",
# "account":"spot",
# "side":"buy",
# "amount":"0.01",
# "price":"3500",
# "time_in_force":"gtc",
# "iceberg":"0",
# "left":"0.01",
# "fill_price":"0",
# "filled_total":"0",
# "fee":"0",
# "fee_currency":"ETH",
# "point_fee":"0",
# "gt_fee":"0",
# "gt_discount":false,
# "rebated_fee":"0",
# "rebated_fee_currency":"USDT"
# }
#
# spot conditional
#
# {"id":5891843}
#
# future and perpetual swaps
#
# {
# "id":95938572327,
# "contract":"ETH_USDT",
# "mkfr":"0",
# "tkfr":"0.0005",
# "tif":"gtc",
# "is_reduce_only":false,
# "create_time":1637384600.08,
# "price":"3000",
# "size":1,
# "refr":"0",
# "left":1,
# "text":"api",
# "fill_price":"0",
# "user":2436035,
# "status":"open",
# "is_liq":false,
# "refu":0,
# "is_close":false,
# "iceberg":0
# }
#
# futures and perpetual swaps conditionals
#
# {"id":7615567}
#
return self.parse_order(response, market)
def parse_order_status(self, status):
statuses = {
'_new': 'open',
'filled': 'closed',
'cancelled': 'canceled',
'liquidated': 'closed',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# SPOT
# createOrder/cancelOrder/fetchOrder
#
# {
# "id": "62364648575",
# "text": "apiv4",
# "create_time": "1626354834",
# "update_time": "1626354834",
# "create_time_ms": "1626354833544",
# "update_time_ms": "1626354833544",
# "status": "open",
# "currency_pair": "BTC_USDT",
# "type": "limit",
# "account": "spot",
# "side": "buy",
# "amount": "0.0001",
# "price": "30000",
# "time_in_force": "gtc",
# "iceberg": "0",
# "left": "0.0001",
# "fill_price": "0",
# "filled_total": "0",
# "fee": "0",
# "fee_currency": "BTC",
# "point_fee": "0",
# "gt_fee": "0",
# "gt_discount": True,
# "rebated_fee": "0",
# "rebated_fee_currency": "USDT"
# }
#
# SPOT TRIGGER ORDERS
# createOrder
# {
# "id":12604556
# }
#
# fetchOrder/cancelOrder
# {
# "market": "ADA_USDT",
# "user":6392049,
# "trigger": {
# "price": "1.08", # stopPrice
# "rule": "\u003e=",
# "expiration": 86400
# },
# "put": {
# "type": "limit",
# "side": "buy",
# "price": "1.08", # order price
# "amount": "1.00000000000000000000",
# "account": "normal",
# "time_in_force": "gtc"
# },
# "id": 71639298,
# "ctime": 1643945985,
# "status": "open"
# }
#
# FUTURE AND SWAP
# createOrder/cancelOrder/fetchOrder
#
# {
# "id": 123028481731,
# "contract": "ADA_USDT",
# "mkfr": "-0.00005",
# "tkfr": "0.00048",
# "tif": "ioc",
# "is_reduce_only": False,
# "create_time": 1643950262.68,
# "finish_time": 1643950262.68,
# "price": "0",
# "size": 1,
# "refr": "0",
# "left":0,
# "text": "api",
# "fill_price": "1.05273",
# "user":6329238,
# "finish_as": "filled",
# "status": "finished",
# "is_liq": False,
# "refu":0,
# "is_close": False,
# "iceberg": 0
# }
#
# TRIGGER ORDERS(FUTURE AND SWAP)
#
# createOrder
# {
# "id":12604556
# }
#
# fetchOrder/cancelOrder
# {
# "user": 6320300,
# "trigger": {
# "strategy_type": 0,
# "price_type": 0,
# "price": "1.03", # stopPrice
# "rule": 2,
# "expiration": 0
# },
# "initial": {
# "contract": "ADA_USDT",
# "size":-1,
# "price": "1.02",
# "tif": "gtc",
# "text":"",
# "iceberg": 0,
# "is_close": False,
# "is_reduce_only": False,
# "auto_size":""
# },
# "id": 126393906,
# "trade_id": 0,
# "status": "open",
# "reason": "",
# "create_time": 1643953482,
# "finish_time": 1643953482,
# "is_stop_order": False,
# "stop_trigger": {
# "rule": 0,
# "trigger_price": "",
# "order_price": ""
# },
# "me_order_id": 0,
# "order_type":""
# }
#
put = self.safe_value_2(order, 'put', 'initial')
trigger = self.safe_value(order, 'trigger')
contract = self.safe_string(put, 'contract')
type = self.safe_string(put, 'type')
timeInForce = self.safe_string_upper_2(put, 'time_in_force', 'tif')
amount = self.safe_string_2(put, 'amount', 'size')
side = self.safe_string(put, 'side')
price = self.safe_string(put, 'price')
contract = self.safe_string(order, 'contract', contract)
type = self.safe_string(order, 'type', type)
timeInForce = self.safe_string_upper_2(order, 'time_in_force', 'tif', timeInForce)
if timeInForce == 'POC':
timeInForce = 'PO'
postOnly = (timeInForce == 'PO')
amount = self.safe_string_2(order, 'amount', 'size', amount)
side = self.safe_string(order, 'side', side)
price = self.safe_string(order, 'price', price)
remaining = self.safe_string(order, 'left')
filled = Precise.string_sub(amount, remaining)
cost = self.safe_number(order, 'filled_total')
rawStatus = None
average = None
if put:
remaining = amount
filled = '0'
cost = self.parse_number('0')
if contract:
isMarketOrder = Precise.string_equals(price, '0') and (timeInForce == 'IOC')
type = 'market' if isMarketOrder else 'limit'
side = 'buy' if Precise.string_gt(amount, '0') else 'sell'
rawStatus = self.safe_string(order, 'finish_as', 'open')
average = self.safe_number(order, 'fill_price')
else:
rawStatus = self.safe_string(order, 'status')
timestamp = self.safe_integer(order, 'create_time_ms')
if timestamp is None:
timestamp = self.safe_timestamp_2(order, 'create_time', 'ctime')
lastTradeTimestamp = self.safe_integer(order, 'update_time_ms')
if lastTradeTimestamp is None:
lastTradeTimestamp = self.safe_timestamp_2(order, 'update_time', 'finish_time')
exchangeSymbol = self.safe_string_2(order, 'currency_pair', 'market', contract)
# Everything below self(above return) is related to fees
fees = []
gtFee = self.safe_string(order, 'gt_fee')
if gtFee:
fees.append({
'currency': 'GT',
'cost': gtFee,
})
fee = self.safe_string(order, 'fee')
if fee:
fees.append({
'currency': self.safe_currency_code(self.safe_string(order, 'fee_currency')),
'cost': fee,
})
rebate = self.safe_string(order, 'rebated_fee')
if rebate:
fees.append({
'currency': self.safe_currency_code(self.safe_string(order, 'rebated_fee_currency')),
'cost': Precise.string_neg(rebate),
})
numFeeCurrencies = len(fees)
multipleFeeCurrencies = numFeeCurrencies > 1
status = self.parse_order_status(rawStatus)
return self.safe_order({
'id': self.safe_string(order, 'id'),
'clientOrderId': self.safe_string(order, 'text'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'status': status,
'symbol': self.safe_symbol(exchangeSymbol),
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': self.parse_number(price),
'stopPrice': self.safe_number(trigger, 'price'),
'average': average,
'amount': self.parse_number(Precise.string_abs(amount)),
'cost': cost,
'filled': self.parse_number(filled),
'remaining': self.parse_number(Precise.string_abs(remaining)),
'fee': None if multipleFeeCurrencies else self.safe_value(fees, 0),
'fees': fees if multipleFeeCurrencies else [],
'trades': None,
'info': order,
}, market)
async def fetch_order(self, id, symbol=None, params={}):
'''
* Retrieves information on an order
* @param {string} id: Order id
* @param {string} symbol: Unified market symbol
* @param {boolean} params.stop: True if the order being fetched is a trigger order
* @param {dictionary} params: Parameters specified by the exchange api
* @returns Order structure
'''
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
await self.load_markets()
stop = self.safe_value_2(params, 'is_stop_order', 'stop', False)
params = self.omit(params, ['is_stop_order', 'stop'])
market = self.market(symbol)
request = {
'order_id': id,
}
if market['spot'] or market['margin']:
request['currency_pair'] = market['id']
else:
request['settle'] = market['settleId']
method = None
if stop:
method = self.get_supported_mapping(market['type'], {
'spot': 'privateSpotGetPriceOrdersOrderId',
'margin': 'privateSpotGetPriceOrdersOrderId',
'swap': 'privateFuturesGetSettlePriceOrdersOrderId',
'future': 'privateDeliveryGetSettlePriceOrdersOrderId',
})
else:
method = self.get_supported_mapping(market['type'], {
'spot': 'privateSpotGetOrdersOrderId',
'margin': 'privateSpotGetOrdersOrderId',
'swap': 'privateFuturesGetSettleOrdersOrderId',
'future': 'privateDeliveryGetSettleOrdersOrderId',
})
response = await getattr(self, method)(self.extend(request, params))
return self.parse_order(response, market)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
type = None
type, params = self.handle_market_type_and_params('fetchOpenOrders', None, params)
if symbol is None and (type == 'spot') or type == 'margin' or type == 'cross_margin':
request = {
# 'page': 1,
# 'limit': limit,
'account': type, # spot/margin(default), cross_margin
}
if limit is not None:
request['limit'] = limit
response = await self.privateSpotGetOpenOrders(self.extend(request, params))
#
# [
# {
# "currency_pair": "ETH_BTC",
# "total": 1,
# "orders": [
# {
# "id": "12332324",
# "text": "t-123456",
# "create_time": "1548000000",
# "update_time": "1548000100",
# "currency_pair": "ETH_BTC",
# "status": "open",
# "type": "limit",
# "account": "spot",
# "side": "buy",
# "amount": "1",
# "price": "5.00032",
# "time_in_force": "gtc",
# "left": "0.5",
# "filled_total": "2.50016",
# "fee": "0.005",
# "fee_currency": "ETH",
# "point_fee": "0",
# "gt_fee": "0",
# "gt_discount": False,
# "rebated_fee": "0",
# "rebated_fee_currency": "BTC"
# }
# ]
# },
# ...
# ]
#
allOrders = []
for i in range(0, len(response)):
entry = response[i]
orders = self.safe_value(entry, 'orders', [])
parsed = self.parse_orders(orders, None, since, limit)
allOrders = self.array_concat(allOrders, parsed)
return self.filter_by_since_limit(allOrders, since, limit)
return await self.fetch_orders_by_status('open', symbol, since, limit, params)
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
return await self.fetch_orders_by_status('finished', symbol, since, limit, params)
async def fetch_orders_by_status(self, status, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrdersByStatus requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = self.prepare_request(market)
request['status'] = status
if limit is not None:
request['limit'] = limit
if since is not None and (market['spot'] or market['margin']):
request['from'] = int(since / 1000)
method = self.get_supported_mapping(market['type'], {
'spot': 'privateSpotGetOrders',
'margin': 'privateSpotGetOrders',
'swap': 'privateFuturesGetSettleOrders',
'future': 'privateDeliveryGetSettleOrders',
})
if market['type'] == 'margin' or market['type'] == 'cross_margin':
request['account'] = market['type']
response = await getattr(self, method)(self.extend(request, params))
# SPOT
# {
# "id":"8834234273",
# "text": "3",
# "create_time": "1635406193",
# "update_time": "1635406193",
# "create_time_ms": 1635406193361,
# "update_time_ms": 1635406193361,
# "status": "closed",
# "currency_pair": "BTC_USDT",
# "type": "limit",
# "account": "spot",
# "side": "sell",
# "amount": "0.0002",
# "price": "58904.01",
# "time_in_force":"gtc",
# "iceberg": "0",
# "left": "0.0000",
# "fill_price": "11.790516",
# "filled_total": "11.790516",
# "fee": "0.023581032",
# "fee_currency": "USDT",
# "point_fee": "0",
# "gt_fee": "0",
# "gt_discount": False,
# "rebated_fee_currency": "BTC"
# }
# Perpetual Swap
# {
# "status": "finished",
# "size":-1,
# "left":0,
# "id":82750739203,
# "is_liq":false,
# "is_close":false,
# "contract": "BTC_USDT",
# "text": "web",
# "fill_price": "60721.3",
# "finish_as": "filled",
# "iceberg":0,
# "tif": "ioc",
# "is_reduce_only":true,
# "create_time": 1635403475.412,
# "finish_time": 1635403475.4127,
# "price": "0"
# }
return self.parse_orders(response, market, since, limit)
async def cancel_order(self, id, symbol=None, params={}):
'''
* Cancels an open order
* @param {string} id: Order id
* @param {string} symbol: Unified market symbol
* @param {boolean} params.stop: True if the order to be cancelled is a trigger order
* @param {dictionary} params: Parameters specified by the exchange api
* @returns Order structure
'''
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'order_id': id,
}
if market['contract']:
request['settle'] = market['settleId']
else:
request['currency_pair'] = market['id']
stop = self.safe_value_2(params, 'is_stop_order', 'stop', False)
params = self.omit(params, ['is_stop_order', 'stop'])
pathMiddle = 'Price' if stop else ''
method = self.get_supported_mapping(market['type'], {
'spot': 'privateSpotDelete' + pathMiddle + 'OrdersOrderId',
'margin': 'privateSpotDelete' + pathMiddle + 'OrdersOrderId',
'swap': 'privateFuturesDeleteSettle' + pathMiddle + 'OrdersOrderId',
'future': 'privateDeliveryDeleteSettle' + pathMiddle + 'OrdersOrderId',
})
response = await getattr(self, method)(self.extend(request, params))
#
# spot
#
# {
# "id":"95282841887",
# "text":"apiv4",
# "create_time":"1637383156",
# "update_time":"1637383235",
# "create_time_ms":1637383156017,
# "update_time_ms":1637383235085,
# "status":"cancelled",
# "currency_pair":"ETH_USDT",
# "type":"limit",
# "account":"spot",
# "side":"buy",
# "amount":"0.01",
# "price":"3500",
# "time_in_force":"gtc",
# "iceberg":"0",
# "left":"0.01",
# "fill_price":"0",
# "filled_total":"0",
# "fee":"0",
# "fee_currency":"ETH",
# "point_fee":"0",
# "gt_fee":"0",
# "gt_discount":false,
# "rebated_fee":"0",
# "rebated_fee_currency":"USDT"
# }
#
# spot conditional
#
# {
# "market":"ETH_USDT",
# "user":2436035,
# "trigger":{
# "price":"3500",
# "rule":"\u003c=",
# "expiration":86400
# },
# "put":{
# "type":"limit",
# "side":"buy",
# "price":"3500",
# "amount":"0.01000000000000000000",
# "account":"normal",
# "time_in_force":"gtc"
# },
# "id":5891843,
# "ctime":1637382379,
# "ftime":1637382673,
# "status":"canceled"
# }
#
# perpetual swaps
#
# {
# id: "82241928192",
# contract: "BTC_USDT",
# mkfr: "0",
# tkfr: "0.0005",
# tif: "gtc",
# is_reduce_only: False,
# create_time: "1635196145.06",
# finish_time: "1635196233.396",
# price: "61000",
# size: "4",
# refr: "0",
# left: "4",
# text: "web",
# fill_price: "0",
# user: "6693577",
# finish_as: "cancelled",
# status: "finished",
# is_liq: False,
# refu: "0",
# is_close: False,
# iceberg: "0",
# }
#
return self.parse_order(response, market)
async def cancel_all_orders(self, symbol=None, params={}):
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request = self.prepare_request(market)
type, query = self.handle_market_type_and_params('cancelAllOrders', market, params)
swap = type == 'swap'
future = type == 'future'
if symbol is None and (swap or future):
defaultSettle = 'usdt' if swap else 'btc'
settle = self.safe_string_lower(params, 'settle', defaultSettle)
request['settle'] = settle
method = self.get_supported_mapping(type, {
'spot': 'privateSpotDeleteOrders',
'margin': 'privateSpotDeleteOrders',
'swap': 'privateFuturesDeleteSettleOrders',
'future': 'privateDeliveryDeleteSettleOrders',
})
response = await getattr(self, method)(self.extend(request, query))
#
# [
# {
# "id":139797004085,
# "contract":"ADA_USDT",
# "mkfr":"0",
# "tkfr":"0.0005",
# "tif":"gtc",
# "is_reduce_only":false,
# "create_time":1647911169.343,
# "finish_time":1647911226.849,
# "price":"0.8",
# "size":1,
# "refr":"0.3",
# "left":1,
# "text":"api",
# "fill_price":"0",
# "user":6693577,
# "finish_as":"cancelled",
# "status":"finished",
# "is_liq":false,
# "refu":2436035,
# "is_close":false,
# "iceberg":0
# }
# ...
# ]
#
return self.parse_orders(response, market)
async def transfer(self, code, amount, fromAccount, toAccount, params={}):
await self.load_markets()
currency = self.currency(code)
accountsByType = self.safe_value(self.options, 'accountsByType', {})
fromId = self.safe_string(accountsByType, fromAccount, fromAccount)
toId = self.safe_string(accountsByType, toAccount, toAccount)
if fromId is None:
keys = list(accountsByType.keys())
raise ExchangeError(self.id + ' fromAccount must be one of ' + ', '.join(keys))
if toId is None:
keys = list(accountsByType.keys())
raise ExchangeError(self.id + ' toAccount must be one of ' + ', '.join(keys))
truncated = self.currency_to_precision(code, amount)
request = {
'currency': currency['id'],
'from': fromId,
'to': toId,
'amount': truncated,
}
if (toId == 'futures') or (toId == 'delivery') or (fromId == 'futures') or (fromId == 'delivery'):
request['settle'] = currency['lowerCaseId']
response = await self.privateWalletPostTransfers(self.extend(request, params))
#
# according to the docs
#
# {
# "currency": "BTC",
# "from": "spot",
# "to": "margin",
# "amount": "1",
# "currency_pair": "BTC_USDT"
# }
#
# actual response
#
# POST https://api.gateio.ws/api/v4/wallet/transfers 204 No Content
#
return {
'info': response,
'id': None,
'timestamp': None,
'datetime': None,
'currency': code,
'amount': truncated,
'fromAccount': fromId,
'toAccount': toId,
'status': None,
}
async def set_leverage(self, leverage, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' setLeverage() requires a symbol argument')
# WARNING: THIS WILL INCREASE LIQUIDATION PRICE FOR OPEN ISOLATED LONG POSITIONS
# AND DECREASE LIQUIDATION PRICE FOR OPEN ISOLATED SHORT POSITIONS
if (leverage < 0) or (leverage > 100):
raise BadRequest(self.id + ' leverage should be between 1 and 100')
await self.load_markets()
market = self.market(symbol)
method = self.get_supported_mapping(market['type'], {
'swap': 'privateFuturesPostSettlePositionsContractLeverage',
'future': 'privateDeliveryPostSettlePositionsContractLeverage',
})
request = self.prepare_request(market)
defaultMarginType = self.safe_string_2(self.options, 'marginType', 'defaultMarginType')
crossLeverageLimit = self.safe_string(params, 'cross_leverage_limit')
marginType = self.safe_string(params, 'marginType', defaultMarginType)
if crossLeverageLimit is not None:
marginType = 'cross'
leverage = crossLeverageLimit
if marginType == 'cross':
request['query'] = {
'cross_leverage_limit': str(leverage),
'leverage': '0',
}
else:
request['query'] = {
'leverage': str(leverage),
}
response = await getattr(self, method)(self.extend(request, params))
#
# {
# "value":"0",
# "leverage":"5",
# "mode":"single",
# "realised_point":"0",
# "contract":"BTC_USDT",
# "entry_price":"0",
# "mark_price":"62035.86",
# "history_point":"0",
# "realised_pnl":"0",
# "close_order":null,
# "size":0,
# "cross_leverage_limit":"0",
# "pending_orders":0,
# "adl_ranking":6,
# "maintenance_rate":"0.005",
# "unrealised_pnl":"0",
# "user":2436035,
# "leverage_max":"100",
# "history_pnl":"0",
# "risk_limit":"1000000",
# "margin":"0",
# "last_close_pnl":"0",
# "liq_price":"0"
# }
#
return response
def parse_position(self, position, market=None):
#
# {
# value: "12.475572",
# leverage: "0",
# mode: "single",
# realised_point: "0",
# contract: "BTC_USDT",
# entry_price: "62422.6",
# mark_price: "62377.86",
# history_point: "0",
# realised_pnl: "-0.00624226",
# close_order: null,
# size: "2",
# cross_leverage_limit: "25",
# pending_orders: "0",
# adl_ranking: "5",
# maintenance_rate: "0.005",
# unrealised_pnl: "-0.008948",
# user: "663337",
# leverage_max: "100",
# history_pnl: "14.98868396636",
# risk_limit: "1000000",
# margin: "0.740721495056",
# last_close_pnl: "-0.041996015",
# liq_price: "59058.58"
# }
#
contract = self.safe_string(position, 'contract')
market = self.safe_market(contract, market)
size = self.safe_string(position, 'size')
side = None
if Precise.string_gt(size, '0'):
side = 'long'
elif Precise.string_lt(size, '0'):
side = 'short'
maintenanceRate = self.safe_string(position, 'maintenance_rate')
notional = self.safe_string(position, 'value')
leverage = self.safe_string(position, 'leverage')
marginType = None
if leverage == '0':
marginType = 'cross'
else:
marginType = 'isolated'
unrealisedPnl = self.safe_string(position, 'unrealised_pnl')
# Initial Position Margin = ( Position Value / Leverage ) + Close Position Fee
# *The default leverage under the full position is the highest leverage in the market.
# *Trading fee is charged as Taker Fee Rate(0.075%).
takerFee = '0.00075'
feePaid = Precise.string_mul(takerFee, notional)
initialMarginString = Precise.string_add(Precise.string_div(notional, leverage), feePaid)
percentage = Precise.string_mul(Precise.string_div(unrealisedPnl, initialMarginString), '100')
return {
'info': position,
'symbol': self.safe_string(market, 'symbol'),
'timestamp': None,
'datetime': None,
'initialMargin': self.parse_number(initialMarginString),
'initialMarginPercentage': self.parse_number(Precise.string_div(initialMarginString, notional)),
'maintenanceMargin': self.parse_number(Precise.string_mul(maintenanceRate, notional)),
'maintenanceMarginPercentage': self.parse_number(maintenanceRate),
'entryPrice': self.safe_number(position, 'entry_price'),
'notional': self.parse_number(notional),
'leverage': self.safe_number(position, 'leverage'),
'unrealizedPnl': self.parse_number(unrealisedPnl),
'contracts': self.parse_number(Precise.string_abs(size)),
'contractSize': self.safe_value(market, 'contractSize'),
# realisedPnl: position['realised_pnl'],
'marginRatio': None,
'liquidationPrice': self.safe_number(position, 'liq_price'),
'markPrice': self.safe_number(position, 'mark_price'),
'collateral': self.safe_number(position, 'margin'),
'marginType': marginType,
'side': side,
'percentage': self.parse_number(percentage),
}
def parse_positions(self, positions):
result = []
for i in range(0, len(positions)):
result.append(self.parse_position(positions[i]))
return result
async def fetch_positions(self, symbols=None, params={}):
# :param symbols: Not used by Gateio
# :param params:
# settle: The currency that derivative contracts are settled in
# Other exchange specific params
#
await self.load_markets()
defaultType = self.safe_string_2(self.options, 'fetchPositions', 'defaultType', 'swap')
type = self.safe_string(params, 'type', defaultType)
method = self.get_supported_mapping(type, {
'swap': 'privateFuturesGetSettlePositions',
'future': 'privateDeliveryGetSettlePositions',
})
defaultSettle = 'usdt' if (type == 'swap') else 'btc'
settle = self.safe_string_lower(params, 'settle', defaultSettle)
request = {
'settle': settle,
}
response = await getattr(self, method)(request)
#
# [
# {
# value: "12.475572",
# leverage: "0",
# mode: "single",
# realised_point: "0",
# contract: "BTC_USDT",
# entry_price: "62422.6",
# mark_price: "62377.86",
# history_point: "0",
# realised_pnl: "-0.00624226",
# close_order: null,
# size: "2",
# cross_leverage_limit: "25",
# pending_orders: "0",
# adl_ranking: "5",
# maintenance_rate: "0.005",
# unrealised_pnl: "-0.008948",
# user: "6693577",
# leverage_max: "100",
# history_pnl: "14.98868396636",
# risk_limit: "1000000",
# margin: "0.740721495056",
# last_close_pnl: "-0.041996015",
# liq_price: "59058.58"
# }
# ]
#
result = self.parse_positions(response)
return self.filter_by_array(result, 'symbol', symbols, False)
async def fetch_leverage_tiers(self, symbols=None, params={}):
await self.load_markets()
methodName = 'fetchLeverageTiers'
type, query = self.handle_market_type_and_params(methodName, None, params)
swap = type == 'swap'
defaultSettle = 'usdt' if swap else 'btc'
settle = self.safe_string_lower(query, 'settle', defaultSettle)
query['settle'] = settle
if type != 'future' and type != 'swap':
raise BadRequest(self.id + '.' + methodName + ' only supports swap and future')
method = self.get_supported_mapping(type, {
'swap': 'publicFuturesGetSettleContracts',
'future': 'publicDeliveryGetSettleContracts',
})
response = await getattr(self, method)(query)
# Perpetual swap
# [
# {
# "name": "BTC_USDT",
# "type": "direct",
# "quanto_multiplier": "0.0001",
# "ref_discount_rate": "0",
# "order_price_deviate": "0.5",
# "maintenance_rate": "0.005",
# "mark_type": "index",
# "last_price": "38026",
# "mark_price": "37985.6",
# "index_price": "37954.92",
# "funding_rate_indicative": "0.000219",
# "mark_price_round": "0.01",
# "funding_offset": 0,
# "in_delisting": False,
# "risk_limit_base": "1000000",
# "interest_rate": "0.0003",
# "order_price_round": "0.1",
# "order_size_min": 1,
# "ref_rebate_rate": "0.2",
# "funding_interval": 28800,
# "risk_limit_step": "1000000",
# "leverage_min": "1",
# "leverage_max": "100",
# "risk_limit_max": "8000000",
# "maker_fee_rate": "-0.00025",
# "taker_fee_rate": "0.00075",
# "funding_rate": "0.002053",
# "order_size_max": 1000000,
# "funding_next_apply": 1610035200,
# "short_users": 977,
# "config_change_time": 1609899548,
# "trade_size": 28530850594,
# "position_size": 5223816,
# "long_users": 455,
# "funding_impact_value": "60000",
# "orders_limit": 50,
# "trade_id": 10851092,
# "orderbook_id": 2129638396
# }
# ]
#
# Delivery Futures
# [
# {
# "name": "BTC_USDT_20200814",
# "underlying": "BTC_USDT",
# "cycle": "WEEKLY",
# "type": "direct",
# "quanto_multiplier": "0.0001",
# "mark_type": "index",
# "last_price": "9017",
# "mark_price": "9019",
# "index_price": "9005.3",
# "basis_rate": "0.185095",
# "basis_value": "13.7",
# "basis_impact_value": "100000",
# "settle_price": "0",
# "settle_price_interval": 60,
# "settle_price_duration": 1800,
# "settle_fee_rate": "0.0015",
# "expire_time": 1593763200,
# "order_price_round": "0.1",
# "mark_price_round": "0.1",
# "leverage_min": "1",
# "leverage_max": "100",
# "maintenance_rate": "1000000",
# "risk_limit_base": "140.726652109199",
# "risk_limit_step": "1000000",
# "risk_limit_max": "8000000",
# "maker_fee_rate": "-0.00025",
# "taker_fee_rate": "0.00075",
# "ref_discount_rate": "0",
# "ref_rebate_rate": "0.2",
# "order_price_deviate": "0.5",
# "order_size_min": 1,
# "order_size_max": 1000000,
# "orders_limit": 50,
# "orderbook_id": 63,
# "trade_id": 26,
# "trade_size": 435,
# "position_size": 130,
# "config_change_time": 1593158867,
# "in_delisting": False
# }
# ]
#
return self.parse_leverage_tiers(response, symbols, 'name')
def parse_market_leverage_tiers(self, info, market=None):
'''
https://www.gate.io/help/futures/perpetual/22162/instrctions-of-risk-limit
@param info: Exchange market response for 1 market
Perpetual swap
{
"name": "BTC_USDT",
"type": "direct",
"quanto_multiplier": "0.0001",
"ref_discount_rate": "0",
"order_price_deviate": "0.5",
"maintenance_rate": "0.005",
"mark_type": "index",
"last_price": "38026",
"mark_price": "37985.6",
"index_price": "37954.92",
"funding_rate_indicative": "0.000219",
"mark_price_round": "0.01",
"funding_offset": 0,
"in_delisting": False,
"risk_limit_base": "1000000",
"interest_rate": "0.0003",
"order_price_round": "0.1",
"order_size_min": 1,
"ref_rebate_rate": "0.2",
"funding_interval": 28800,
"risk_limit_step": "1000000",
"leverage_min": "1",
"leverage_max": "100",
"risk_limit_max": "8000000",
"maker_fee_rate": "-0.00025",
"taker_fee_rate": "0.00075",
"funding_rate": "0.002053",
"order_size_max": 1000000,
"funding_next_apply": 1610035200,
"short_users": 977,
"config_change_time": 1609899548,
"trade_size": 28530850594,
"position_size": 5223816,
"long_users": 455,
"funding_impact_value": "60000",
"orders_limit": 50,
"trade_id": 10851092,
"orderbook_id": 2129638396
Delivery Futures
{
"name": "BTC_USDT_20200814",
"underlying": "BTC_USDT",
"cycle": "WEEKLY",
"type": "direct",
"quanto_multiplier": "0.0001",
"mark_type": "index",
"last_price": "9017",
"mark_price": "9019",
"index_price": "9005.3",
"basis_rate": "0.185095",
"basis_value": "13.7",
"basis_impact_value": "100000",
"settle_price": "0",
"settle_price_interval": 60,
"settle_price_duration": 1800,
"settle_fee_rate": "0.0015",
"expire_time": 1593763200,
"order_price_round": "0.1",
"mark_price_round": "0.1",
"leverage_min": "1",
"leverage_max": "100",
"maintenance_rate": "1000000",
"risk_limit_base": "140.726652109199",
"risk_limit_step": "1000000",
"risk_limit_max": "8000000",
"maker_fee_rate": "-0.00025",
"taker_fee_rate": "0.00075",
"ref_discount_rate": "0",
"ref_rebate_rate": "0.2",
"order_price_deviate": "0.5",
"order_size_min": 1,
"order_size_max": 1000000,
"orders_limit": 50,
"orderbook_id": 63,
"trade_id": 26,
"trade_size": 435,
"position_size": 130,
"config_change_time": 1593158867,
"in_delisting": False
@param market: CCXT market
'''
maintenanceMarginUnit = self.safe_string(info, 'maintenance_rate') # '0.005',
leverageMax = self.safe_string(info, 'leverage_max') # '100',
riskLimitStep = self.safe_string(info, 'risk_limit_step') # '1000000',
riskLimitMax = self.safe_string(info, 'risk_limit_max') # '16000000',
initialMarginUnit = Precise.string_div('1', leverageMax)
maintenanceMarginRate = maintenanceMarginUnit
initialMarginRatio = initialMarginUnit
floor = '0'
tiers = []
while(Precise.string_lt(floor, riskLimitMax)):
cap = Precise.string_add(floor, riskLimitStep)
tiers.append({
'tier': self.parse_number(Precise.string_div(cap, riskLimitStep)),
'currency': self.safe_string(market, 'settle'),
'notionalFloor': self.parse_number(floor),
'notionalCap': self.parse_number(cap),
'maintenanceMarginRate': self.parse_number(maintenanceMarginRate),
'maxLeverage': self.parse_number(Precise.string_div('1', initialMarginRatio)),
'info': info,
})
maintenanceMarginRate = Precise.string_add(maintenanceMarginRate, maintenanceMarginUnit)
initialMarginRatio = Precise.string_add(initialMarginRatio, initialMarginUnit)
floor = cap
return tiers
def sign(self, path, api=[], method='GET', params={}, headers=None, body=None):
authentication = api[0] # public, private
type = api[1] # spot, margin, future, delivery
query = self.omit(params, self.extract_params(path))
path = self.implode_params(path, params)
endPart = '' if (path == '') else ('/' + path)
entirePath = '/' + type + endPart
url = self.urls['api'][authentication][type]
if url is None:
raise NotSupported(self.id + ' does not have a testnet for the ' + type + ' market type.')
url += entirePath
if authentication == 'public':
if query:
url += '?' + self.urlencode(query)
else:
queryString = ''
if (method == 'GET') or (method == 'DELETE'):
if query:
queryString = self.urlencode(query)
url += '?' + queryString
else:
urlQueryParams = self.safe_value(query, 'query', {})
if urlQueryParams:
queryString = self.urlencode(urlQueryParams)
url += '?' + queryString
query = self.omit(query, 'query')
body = self.json(query)
bodyPayload = '' if (body is None) else body
bodySignature = self.hash(self.encode(bodyPayload), 'sha512')
timestamp = self.seconds()
timestampString = str(timestamp)
signaturePath = '/api/' + self.version + entirePath
payloadArray = [method.upper(), signaturePath, queryString, bodySignature, timestampString]
# eslint-disable-next-line quotes
payload = "\n".join(payloadArray)
signature = self.hmac(self.encode(payload), self.encode(self.secret), hashlib.sha512)
headers = {
'KEY': self.apiKey,
'Timestamp': timestampString,
'SIGN': signature,
'Content-Type': 'application/json',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return
#
# {"label":"ORDER_NOT_FOUND","message":"Order not found"}
# {"label":"INVALID_PARAM_VALUE","message":"invalid argument: status"}
# {"label":"INVALID_PARAM_VALUE","message":"invalid argument: Trigger.rule"}
# {"label":"INVALID_PARAM_VALUE","message":"invalid argument: trigger.expiration invalid range"}
# {"label":"INVALID_ARGUMENT","detail":"invalid size"}
#
label = self.safe_string(response, 'label')
if label is not None:
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], label, feedback)
raise ExchangeError(feedback)
| 42.657388 | 204 | 0.459854 |
eb5b3a6754eea7735149caac2775f8f66bb2d712 | 7,232 | py | Python | train.py | shizhediao/old-OpenNMT-py | 328187e0655a685103c78ae9f77ff8c582e0f270 | [
"MIT"
] | null | null | null | train.py | shizhediao/old-OpenNMT-py | 328187e0655a685103c78ae9f77ff8c582e0f270 | [
"MIT"
] | null | null | null | train.py | shizhediao/old-OpenNMT-py | 328187e0655a685103c78ae9f77ff8c582e0f270 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Train models."""
import os
import signal
import torch
import onmt.opts as opts
import onmt.utils.distributed
from onmt.utils.misc import set_random_seed
from onmt.utils.logging import init_logger, logger
from onmt.train_single import main as single_main
from onmt.utils.parse import ArgumentParser
from onmt.inputters.inputter import build_dataset_iter, \
load_old_vocab, old_style_vocab, build_dataset_iter_multiple, make_tgt
from itertools import cycle
from torchtext.data import Field, RawField
def main(opt):
ArgumentParser.validate_train_opts(opt)
ArgumentParser.update_model_opts(opt)
ArgumentParser.validate_model_opts(opt)
# Load checkpoint if we resume from a previous training.
if opt.train_from:
logger.info('Loading checkpoint from %s' % opt.train_from)
checkpoint = torch.load(opt.train_from,
map_location=lambda storage, loc: storage)
logger.info('Loading vocab from checkpoint at %s.' % opt.train_from)
vocab = checkpoint['vocab']
else:
vocab = torch.load(opt.data + '.vocab.pt')
# check for code where vocab is saved instead of fields
# (in the future this will be done in a smarter way)
if old_style_vocab(vocab):
fields = load_old_vocab(
vocab, opt.model_type, dynamic_dict=opt.copy_attn)
else:
fields = vocab
# @memray: a temporary workaround, as well as train_single.py line 78
if opt.model_type == "keyphrase":
if opt.tgt_type in ["one2one", "multiple"]:
del fields['sep_indices']
else:
if 'sep_indices' not in fields:
sep_indices = Field(
use_vocab=False, dtype=torch.long,
postprocessing=make_tgt, sequential=False)
fields["sep_indices"] = sep_indices
if len(opt.data_ids) > 1:
train_shards = []
for train_id in opt.data_ids:
shard_base = "train_" + train_id
train_shards.append(shard_base)
train_iter = build_dataset_iter_multiple(train_shards, fields, opt)
else:
if opt.data_ids[0] is not None:
shard_base = "train_" + opt.data_ids[0]
else:
shard_base = "train"
train_iter = build_dataset_iter(shard_base, fields, opt)
nb_gpu = len(opt.gpu_ranks)
print(os.environ['PATH'])
if opt.world_size > 1:
queues = []
mp = torch.multiprocessing.get_context('spawn')
semaphore = mp.Semaphore(opt.world_size * opt.queue_size)
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
procs = []
for device_id in range(nb_gpu):
q = mp.Queue(opt.queue_size)
queues += [q]
procs.append(mp.Process(target=run, args=(
opt, device_id, error_queue, q, semaphore), daemon=True))
procs[device_id].start()
logger.info(" Starting process pid: %d " % procs[device_id].pid)
error_handler.add_child(procs[device_id].pid)
producer = mp.Process(target=batch_producer,
args=(train_iter, queues, semaphore, opt,),
daemon=True)
producer.start()
error_handler.add_child(producer.pid)
for p in procs:
p.join()
producer.terminate()
elif nb_gpu == 1: # case 1 GPU only
single_main(opt, 0)
else: # case only CPU
single_main(opt, -1)
def batch_producer(generator_to_serve, queues, semaphore, opt):
init_logger(opt.log_file)
set_random_seed(opt.seed, False)
# generator_to_serve = iter(generator_to_serve)
def pred(x):
"""
Filters batches that belong only
to gpu_ranks of current node
"""
for rank in opt.gpu_ranks:
if x[0] % opt.world_size == rank:
return True
generator_to_serve = filter(
pred, enumerate(generator_to_serve))
def next_batch(device_id):
new_batch = next(generator_to_serve)
semaphore.acquire()
return new_batch[1]
b = next_batch(0)
for device_id, q in cycle(enumerate(queues)):
b.dataset = None
if isinstance(b.src, tuple):
b.src = tuple([_.to(torch.device(device_id))
for _ in b.src])
else:
b.src = b.src.to(torch.device(device_id))
b.tgt = b.tgt.to(torch.device(device_id))
b.indices = b.indices.to(torch.device(device_id))
b.alignment = b.alignment.to(torch.device(device_id)) \
if hasattr(b, 'alignment') else None
b.src_map = b.src_map.to(torch.device(device_id)) \
if hasattr(b, 'src_map') else None
# hack to dodge unpicklable `dict_keys`
b.fields = list(b.fields)
q.put(b)
b = next_batch(device_id)
def run(opt, device_id, error_queue, batch_queue, semaphore):
""" run process """
try:
gpu_rank = onmt.utils.distributed.multi_init(opt, device_id)
if gpu_rank != opt.gpu_ranks[device_id]:
raise AssertionError("An error occurred in \
Distributed initialization")
single_main(opt, device_id, batch_queue, semaphore)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((opt.gpu_ranks[device_id], traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(
target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg)
def _get_parser():
parser = ArgumentParser(description='train.py')
opts.config_opts(parser)
opts.model_opts(parser)
opts.train_opts(parser)
return parser
if __name__ == "__main__":
parser = _get_parser()
opt = parser.parse_args()
main(opt) | 34.113208 | 79 | 0.627351 |
6878700031e937643592947d270ff476f8a42281 | 6 | py | Python | recipes/Python/577240_null_null/recipe-577240.py | tdiprima/code | 61a74f5f93da087d27c70b2efe779ac6bd2a3b4f | [
"MIT"
] | 2,023 | 2017-07-29T09:34:46.000Z | 2022-03-24T08:00:45.000Z | recipes/Python/577240_null_null/recipe-577240.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 32 | 2017-09-02T17:20:08.000Z | 2022-02-11T17:49:37.000Z | recipes/Python/577240_null_null/recipe-577240.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 780 | 2017-07-28T19:23:28.000Z | 2022-03-25T20:39:41.000Z | null
| 3 | 5 | 0.666667 |
07c3c77909bedf3fac18501b4d8b4b5402f0239d | 5,645 | py | Python | src/mesh/python/x3d_plotter.py | attom/Draco | 181b6b0e08bab928492fb5f2e5b23155e0bfad09 | [
"BSD-3-Clause-Open-MPI"
] | 1 | 2020-06-23T03:15:23.000Z | 2020-06-23T03:15:23.000Z | src/mesh/python/x3d_plotter.py | attom/Draco | 181b6b0e08bab928492fb5f2e5b23155e0bfad09 | [
"BSD-3-Clause-Open-MPI"
] | null | null | null | src/mesh/python/x3d_plotter.py | attom/Draco | 181b6b0e08bab928492fb5f2e5b23155e0bfad09 | [
"BSD-3-Clause-Open-MPI"
] | 1 | 2020-06-23T02:02:50.000Z | 2020-06-23T02:02:50.000Z | #!/usr/bin/env python
# -------------------------------------------*-python-*------------------------------------------- #
# file src/mesh/python/x3d_plotter.py
# date Monday, Aug 9, 2021
# brief This script plots X3D mesh files.
# note Copyright (C) 2021 Triad National Security, LLC., All rights reserved.
# ------------------------------------------------------------------------------------------------ #
import matplotlib.pyplot as plt
import argparse
import os
# ------------------------------------------------------------------------------------------------ #
# -- create argument parser
parser = argparse.ArgumentParser(description='Plot X3D mesh file.')
parser.add_argument('-fn', '--file_name', type=str, default=None, required=True,
help='Provide mesh file to plot.')
# -- parse arguments from command line
args = parser.parse_args()
# ------------------------------------------------------------------------------------------------ #
# -- Read and parse x3d file
assert (os.path.exists(args.file_name)), f"Mesh file \"{args.file_name}\" does not exist!"
with open(args.file_name) as f:
lines = [line.strip() for line in f]
# Data to read in
numdim = None
numnodes = None
numfaces = None
numcells = None
nodes = []
face_indices = []
faces = []
cells = []
boundaries = []
blocks = ['header', 'nodes', 'faces', 'cells']
current_block = None
for line in lines:
words = line.split()
# If no current block, check if starting new block
if current_block is None:
for block in blocks:
if block == line:
current_block = block
break
# If current block, check if ending current block
else:
if line == "end_" + current_block:
current_block = None
# Process data if currently on a block
if current_block == 'header':
if words[0] == 'numdim':
numdim = int(words[1])
elif words[0] == 'nodes':
numnodes = int(words[1])
elif words[0] == 'faces':
numfaces = int(words[1])
elif words[0] == 'elements':
numcells = int(words[1])
elif current_block == 'nodes':
if len(words) == 4:
nodes.append([float(words[1]), float(words[2]), float(words[3])])
elif current_block == 'faces':
if len(words) >= 3:
face = []
for nnodes in range(int(words[1])):
# Convert from file node ID to code node index
face.append(int(words[nnodes + 2]) - 1)
face_index = int(words[0])
# if face_index not in face_indices:
faces.append(face)
face_indices.append(int(words[0]))
elif current_block == 'cells':
if len(words) >= 3:
cell = []
for nface in range(int(words[1])):
# Convert from file face ID to code face index
cell.append(int(words[nface + 2]) - 1)
cells.append(cell)
# Sort faces in case they are out of order
faces = [x for _, x in sorted(zip(face_indices, faces))]
# Read boundaries
boundary_files = []
boundary_nodes = []
boundary_faces = []
if numdim == 2:
for n in range(4):
assert (args.file_name[-3:] == '.in'), "Filename does not end in \".in\""
boundary_files.append(args.file_name[:-3] + f".bdy{n+1}.in")
for boundary_file in boundary_files:
with open(boundary_file) as f:
lines = [line.strip() for line in f]
# -- read in boundary nodes
boundary = []
for line in lines:
boundary.append(int(line) - 1)
boundary_nodes.append(boundary)
# -- calculate boundary faces
boundary_face_tmp = []
for face_idx, face in enumerate(faces):
node0 = face[0]
node1 = face[1]
if node0 in boundary and node1 in boundary:
boundary_face_tmp.append(face_idx)
boundary_faces.append(boundary_face_tmp)
# -- sanity checks
assert (numdim is not None), "numdim not found!"
assert (numnodes is not None), "numnodes not found!"
assert (numfaces is not None), "numfaces not found!"
assert (numcells is not None), "numcells not found!"
assert (len(nodes) == numnodes), "numnodes does not match number of nodes!"
assert (len(faces) == numfaces), "numfaces does not match number of faces!"
assert (len(cells) == numcells), "numcells does not match number of faces!"
# ------------------------------------------------------------------------------------------------ #
# -- Plot mesh
if numdim == 1:
assert (False), "1D plotting not supported!"
elif numdim == 2:
plt.figure()
ax = plt.gca()
# -- plot faces
plotted_faces = []
for cell in cells:
for face in cell:
# Don't re-plot the same face
if (([faces[face][0], faces[face][1]] not in plotted_faces) and
([faces[face][1], faces[face][0]] not in plotted_faces)):
pt1 = nodes[faces[face][0]]
pt2 = nodes[faces[face][1]]
plotted_faces.append([faces[face][0], faces[face][1]])
ax.plot([pt1[0], pt2[0]], [pt1[1], pt2[1]], color='k')
# -- plot boundary faces
colors = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red']
for n, bound in enumerate(boundary_faces):
for face in bound:
pt1 = nodes[faces[face][0]]
pt2 = nodes[faces[face][1]]
ax.plot([pt1[0], pt2[0]], [pt1[1], pt2[1]], color=colors[n], linewidth=4)
# -- plot nodes
for node in nodes:
ax.plot([node[0]], [node[1]], marker='.', color='b')
plt.show()
elif numdim == 3:
assert (False), "3D plotting not supported!"
| 34.631902 | 100 | 0.541541 |
da344ca5106cd15b7c35a162a7dedddc04d7cc2e | 3,080 | py | Python | frameworks/kafka/tests/test_kerberos_auth.py | amolde/dcos-kafka-service | 4ec069750421d70633cf66d47aac9e4463d091c0 | [
"Apache-2.0"
] | 33 | 2016-08-01T16:50:53.000Z | 2020-12-15T20:50:16.000Z | frameworks/kafka/tests/test_kerberos_auth.py | amolde/dcos-kafka-service | 4ec069750421d70633cf66d47aac9e4463d091c0 | [
"Apache-2.0"
] | 148 | 2016-08-01T20:10:13.000Z | 2020-06-02T02:13:43.000Z | frameworks/kafka/tests/test_kerberos_auth.py | amolde/dcos-kafka-service | 4ec069750421d70633cf66d47aac9e4463d091c0 | [
"Apache-2.0"
] | 38 | 2016-08-08T17:14:20.000Z | 2020-07-08T03:33:56.000Z | import logging
import pytest
import sdk_auth
import sdk_cmd
import sdk_install
import sdk_networks
import sdk_utils
from tests import auth
from tests import client
from tests import config
log = logging.getLogger(__name__)
@pytest.fixture(scope="module", autouse=True)
def kerberos(configure_security):
try:
kerberos_env = sdk_auth.KerberosEnvironment()
principals = auth.get_service_principals(config.SERVICE_NAME, kerberos_env.get_realm())
kerberos_env.add_principals(principals)
kerberos_env.finalize()
yield kerberos_env
finally:
kerberos_env.cleanup()
@pytest.fixture(scope="module", autouse=True)
def kafka_server(kerberos):
"""
A pytest fixture that installs a Kerberized kafka service.
On teardown, the service is uninstalled.
"""
service_kerberos_options = {
"service": {
"name": config.SERVICE_NAME,
"security": {
"kerberos": {
"enabled": True,
"kdc": {"hostname": kerberos.get_host(), "port": int(kerberos.get_port())},
"realm": kerberos.get_realm(),
"keytab_secret": kerberos.get_keytab_path(),
}
},
}
}
sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
try:
sdk_install.install(
config.PACKAGE_NAME,
config.SERVICE_NAME,
config.DEFAULT_BROKER_COUNT,
additional_options=service_kerberos_options,
timeout_seconds=30 * 60,
)
yield {**service_kerberos_options, **{"package_name": config.PACKAGE_NAME}}
finally:
sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
@pytest.fixture(scope="module", autouse=True)
def kafka_client(kerberos, kafka_server):
kafka_client = client.KafkaClient(
"kafka-client", kafka_server["package_name"], kafka_server["service"]["name"], kerberos
)
try:
kafka_client.install()
kafka_client.connect()
yield kafka_client
finally:
kafka_client.uninstall()
@pytest.mark.dcos_min_version("1.10")
@sdk_utils.dcos_ee_only
@pytest.mark.sanity
def test_no_vip(kafka_server):
endpoints = sdk_networks.get_endpoint(kafka_server["package_name"], kafka_server["service"]["name"], "broker")
assert "vip" not in endpoints
@pytest.mark.dcos_min_version("1.10")
@sdk_utils.dcos_ee_only
@pytest.mark.sanity
def test_client_can_read_and_write(kafka_client):
topic_name = "authn.test"
sdk_cmd.svc_cli(
config.PACKAGE_NAME,
config.SERVICE_NAME,
"topic create {}".format(topic_name),
parse_json=True,
)
user = "client"
write_success, read_successes, _ = kafka_client.can_write_and_read(
user, topic_name
)
assert write_success, "Write failed (user={})".format(user)
assert read_successes, (
"Read failed (user={}): "
"MESSAGES={} "
"read_successes={}".format(user, kafka_client.MESSAGES, read_successes)
)
| 27.256637 | 114 | 0.655519 |
87781113a6f849cccc3b4ce914a05ae9aab2bcef | 2,768 | py | Python | sine/utils/application.py | SineObama/sine.utils | b5ec6e926c8eddfb40e917384833e89e82dd40c8 | [
"MIT"
] | null | null | null | sine/utils/application.py | SineObama/sine.utils | b5ec6e926c8eddfb40e917384833e89e82dd40c8 | [
"MIT"
] | null | null | null | sine/utils/application.py | SineObama/sine.utils | b5ec6e926c8eddfb40e917384833e89e82dd40c8 | [
"MIT"
] | null | null | null | # encoding: UTF-8
from tqdm import tqdm
from threading import Lock
from config_file import ConfigFile
from .func import *
class PredictableProcessBar(tqdm):
'''็ปงๆฟ่ชtqdm่ฟๅบฆๆกๆจกๅ๏ผๅฏนๆปไฝๅคงๅฐๆช็ฅ็่ฟๅบฆ่ฟ่ก่ฟไผผไผฐ็ฎ๏ผไฝฟ่ฟๅบฆๆกๅฐฝ้ๅนณๆปใ
ไฝฟ็จ็ฎๅ็็ฎๆณ่ฟ่ก้ขๆต่ฎก็ฎใ'''
loaded_size=0 # ๅทฒ็ฅ็ๆปๅคงๅฐ
loaded_total=0 # ๅทฒ็ฅ็ๆป่ฟๅบฆๆฐ๏ผๅฏนๅบtqdm.total
loaded_avg=0 # ๅทฒ็ฅ็ๅนณๅๅคงๅฐ
updated_size=0 # ๅทฒๅฎๆ่ฟๅบฆ็ๅคงๅฐ
updated_time=0 # ๅทฒๅฎๆ่ฟๅบฆ็ๆถ่ๆถ้ด(็ง)็ฑ่ฐ็จๆน็ปๅบ
origin_n=0 # ๅทฒๅฎๆ็่ฟๅบฆๆฐ๏ผ้กถๆฟtqdm.n็ๅผ๏ผๅจๆ่ฐๆดtqdm.nไฝฟๅ
ถๅนณๆป
lock=Lock() # ๅนถๅ้
total_error=0.00001 # ่ฟๅบฆๅฎๆๆถ็ๅคๆญ่ฏฏๅทฎ๏ผๆฏๅผ๏ผ
def __init__(self, *args, **kw):
if 'bar_format' not in kw:
# ้ป่ฎค้ฟๅ
n่ฟๅบฆๅฐๆฐ็นๅคชๅค
kw['bar_format'] = '{l_bar}{bar}| {n:.2f}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]'
tqdm.__init__(self, *args, **kw)
def load_in(self, size, total=1):
'''่ฝฝๅ
ฅๅทฒ็ฅ็่ฟๅบฆsizeๅคงๅฐ๏ผๅฏนๅบ็total่ฟๅบฆๆฐ้ป่ฎคไธบ1'''
self.lock.acquire()
self.loaded_size = self.loaded_size + size
self.loaded_total = self.loaded_total + total
self.lock.release()
def update_out(self, size, total=1, updated_time=0):
'''ๅฎๆsizeๅคงๅฐ็่ฟๅบฆ๏ผๅฏนๅบ็total่ฟๅบฆๆฐ้ป่ฎคไธบ1๏ผๆถ่updated_time็ง'''
self.lock.acquire()
self.updated_size = self.updated_size + size
self.origin_n = self.origin_n + total
self.updated_time = self.updated_time + updated_time
if self.loaded_total == 0:
self.update(total)
elif abs(self.origin_n - self.total) <= self.total * self.total_error:
self.update((self.total - self.n))
else:
self.loaded_avg = avg = self.loaded_size / self.loaded_total
self.update(size / avg / (self.total - self.origin_n + size / avg) * (self.total - self.n))
self.lock.release()
class ConfigFileAlone(ConfigFile):
'''็ปงๆฟ่ชconfig-file๏ผ็ฎๅไธไพฟๅฉ็้
็ฝฎๆไปถ่ฏปๅใไฟๅญใ
ไธป่ฆๅบๆฏ๏ผ
1.ๆไปถไธๅญๅจๆถๅๅปบใ
2.่ทๅ้
็ฝฎๆถๆๅฎ้ป่ฎคๅผ๏ผไธๅญๅจๅๅๆถๅฎๆ่ฎพ็ฝฎใ
3.ไฟๅญ้
็ฝฎๅฐๆไปถ๏ผไป
ๅจ้
็ฝฎๆไฟฎๆนๆถ๏ผ'''
changed=False # ้
็ฝฎๆฏๅฆๆไฟฎๆน
def __init__(self, file_path):
if isinstance(file_path, str) and not os.path.exists(file_path):
mkdir(os.path.dirname(os.path.abspath(file_path)))
with open(file_path, 'w') as file:
file.write('{}')
ConfigFile.__init__(self, file_path)
def getAlone(self, key, default=None):
if default != None and not self.has(key):
self.changed = True
self.set(key, default)
return default
return self.get(key)
def setAlone(self, key, val):
old_val = self.get(key)
if old_val == val:
return
self.changed = True
self.set(key, val)
def saveAlone(self, *args, **kw):
if self.changed:
self.changed = False
self.save(*args, **kw)
__all__ = ['PredictableProcessBar', 'ConfigFileAlone']
| 36.906667 | 111 | 0.621387 |
de891c363e4d2a8b94061e7f38c3b38cc372ec06 | 1,771 | py | Python | examples/ad_manager/v201902/activity_service/get_all_activities.py | nlynch504/googleads-python-lib | 8f7bd7f987498c4651c969a7dc73e1d5fc965be2 | [
"Apache-2.0"
] | null | null | null | examples/ad_manager/v201902/activity_service/get_all_activities.py | nlynch504/googleads-python-lib | 8f7bd7f987498c4651c969a7dc73e1d5fc965be2 | [
"Apache-2.0"
] | null | null | null | examples/ad_manager/v201902/activity_service/get_all_activities.py | nlynch504/googleads-python-lib | 8f7bd7f987498c4651c969a7dc73e1d5fc965be2 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all activities.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
def main(client):
# Initialize appropriate service.
activity_service = client.GetService('ActivityService', version='v201902')
# Create a statement to select activities.
statement = ad_manager.StatementBuilder(version='v201902')
# Retrieve a small amount of activities at a time, paging
# through until all activities have been retrieved.
while True:
response = activity_service.getActivitiesByStatement(statement.ToStatement(
))
if 'results' in response and len(response['results']):
for activity in response['results']:
# Print out some information for each activity.
print('Activity with ID "%d" and name "%s" was found.\n' %
(activity['id'], activity['name']))
statement.offset += statement.limit
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
| 34.72549 | 79 | 0.730661 |
82ffc6d9cea7b1351721389990d5772ab3fbec1b | 1,232 | py | Python | armada/exceptions/k8s_exceptions.py | sktelecom-oslab/armada | ebc71ff8eca7ecf0560493d5cdafc14e34c783c9 | [
"Apache-2.0"
] | null | null | null | armada/exceptions/k8s_exceptions.py | sktelecom-oslab/armada | ebc71ff8eca7ecf0560493d5cdafc14e34c783c9 | [
"Apache-2.0"
] | null | null | null | armada/exceptions/k8s_exceptions.py | sktelecom-oslab/armada | ebc71ff8eca7ecf0560493d5cdafc14e34c783c9 | [
"Apache-2.0"
] | 2 | 2018-05-28T13:00:42.000Z | 2021-09-02T07:28:59.000Z | # Copyright 2018 The Armada Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from armada.exceptions.base_exception import ArmadaBaseException as ex
class KubernetesException(ex):
'''Base class for Kubernetes exceptions and error handling.'''
message = 'An unknown Kubernetes error occured.'
class KubernetesUnknownStreamingEventTypeException(KubernetesException):
'''Exception for getting an unknown event type from the Kubernetes API'''
message = 'An unknown event type was returned from the streaming API.'
class KubernetesErrorEventException(KubernetesException):
'''Exception for getting an error from the Kubernetes API'''
message = 'An error event was returned from the streaming API.'
| 36.235294 | 77 | 0.769481 |
e5e94908a0bd5107371136282dbd1ffa065162c0 | 13,097 | py | Python | entropylab/results_backend/sqlalchemy/storage.py | galwiner/entropy | 3c799981d20f268840ee68804b5ac740d1d2a8b4 | [
"BSD-3-Clause"
] | null | null | null | entropylab/results_backend/sqlalchemy/storage.py | galwiner/entropy | 3c799981d20f268840ee68804b5ac740d1d2a8b4 | [
"BSD-3-Clause"
] | null | null | null | entropylab/results_backend/sqlalchemy/storage.py | galwiner/entropy | 3c799981d20f268840ee68804b5ac740d1d2a8b4 | [
"BSD-3-Clause"
] | 1 | 2022-03-29T11:47:31.000Z | 2022-03-29T11:47:31.000Z | import os.path
import pickle
from datetime import datetime
from enum import Enum
from typing import Optional, Any, Iterable, TypeVar, Callable, List
import h5py
import numpy as np
from entropylab import RawResultData
from entropylab.api.data_reader import ResultRecord, MetadataRecord
from entropylab.api.data_writer import Metadata
from entropylab.logger import logger
from entropylab.results_backend.sqlalchemy.model import (
ResultDataType,
ResultTable,
Base,
)
T = TypeVar("T", bound=Base)
R = TypeVar("R", ResultRecord, MetadataRecord)
def _experiment_from(dset: h5py.Dataset) -> int:
return dset.attrs["experiment_id"]
def _id_from(dset: h5py.Dataset) -> str:
return dset.name
def _stage_from(dset: h5py.Dataset) -> int:
return dset.attrs["stage"]
def _label_from(dset: h5py.Dataset) -> str:
return dset.attrs["label"]
def _story_from(dset: h5py.Dataset) -> str:
if "story" in dset.attrs:
return dset.attrs["story"]
return ""
def _data_from(dset: h5py.Dataset) -> Any:
data = dset[()]
if dset.dtype.metadata and dset.dtype.metadata.get("vlen") == str:
return dset.asstr()[()]
elif dset.attrs.get("data_type") == ResultDataType.Pickled.value:
return pickle.loads(data)
elif dset.attrs.get("data_type") == ResultDataType.String.value:
# un-picklable data is stored as HDF5 Opaque so turn to bytes then to string:
return data.tobytes().decode(encoding="utf-8")
else:
return data
def _time_from(dset: h5py.Dataset) -> datetime:
return datetime.fromisoformat(dset.attrs["time"])
def _build_result_record(dset: h5py.Dataset) -> ResultRecord:
return ResultRecord(
experiment_id=_experiment_from(dset),
id=_id_from(dset),
label=_label_from(dset),
story=_story_from(dset),
stage=_stage_from(dset),
data=_data_from(dset),
time=_time_from(dset),
)
def _build_metadata_record(dset: h5py.Dataset) -> MetadataRecord:
return MetadataRecord(
experiment_id=_experiment_from(dset),
id=_id_from(dset),
label=_label_from(dset),
stage=_stage_from(dset),
data=_data_from(dset),
time=_time_from(dset),
)
def _get_all_or_single(group: h5py.Group, name: Optional[str] = None):
"""
Returns all or one child from an h5py.Group
Parameters
----------
group group to get child or children from. Can be h5py.File itself.
name name of child to get. If None, indicates all children should be retrieved.
Returns
-------
A list of group children (either h5py.Group or h5py.Datasets)
"""
if name is None:
return list(group.values())
else:
if str(name) in group:
return [group[str(name)]]
else:
return []
class EntityType(Enum):
RESULT = 1
METADATA = 2
class _HDF5Reader:
def get_result_records(
self,
experiment_id: Optional[int] = None,
stage: Optional[int] = None,
label: Optional[str] = None,
) -> Iterable[ResultRecord]:
return self._get_records(
EntityType.RESULT, _build_result_record, experiment_id, stage, label
)
def get_metadata_records(
self,
experiment_id: Optional[int] = None,
stage: Optional[int] = None,
label: Optional[str] = None,
) -> Iterable[MetadataRecord]:
return self._get_records(
EntityType.METADATA, _build_metadata_record, experiment_id, stage, label
)
def _get_records(
self,
entity_type: EntityType,
record_build_func: Callable,
experiment_id: Optional[int] = None,
stage: Optional[int] = None,
label: Optional[str] = None,
) -> Iterable[T]:
entities = []
if experiment_id:
experiment_ids = [experiment_id]
else:
experiment_ids = self._list_experiment_ids_in_fs()
for experiment_id in experiment_ids:
entities += self._get_experiment_entities(
entity_type, record_build_func, experiment_id, stage, label
)
return sorted(entities, key=lambda entity: entity.experiment_id)
def _list_experiment_ids_in_fs(self) -> List[int]:
# noinspection PyUnresolvedReferences
dir_list = os.listdir(self._path)
# TODO: Better validation of experiment ids
exp_files = filter(lambda f: f.endswith(".hdf5"), dir_list)
experiment_ids = list(map(lambda f: f[:-5], exp_files))
return experiment_ids
def _get_experiment_entities(
self,
entity_type: EntityType,
convert_from_dset: Callable,
experiment_id: int,
stage: Optional[int] = None,
label: Optional[str] = None,
) -> Iterable[T]:
dsets = []
try:
# noinspection PyUnresolvedReferences
file = self._open_hdf5(experiment_id, "r")
except FileNotFoundError:
logger.error(f"HDF5 file for experiment_id [{experiment_id}] was not found")
else:
with file:
stage_groups = _get_all_or_single(file, stage)
for stage_group in stage_groups:
label_groups = _get_all_or_single(stage_group, label)
for label_group in label_groups:
dset_name = entity_type.name.lower()
dset = label_group[dset_name]
dsets.append(convert_from_dset(dset))
return dsets
def get_last_result_of_experiment(
self, experiment_id: int
) -> Optional[ResultRecord]:
results = list(self.get_result_records(experiment_id, None, None))
if results and len(results) > 0:
results.sort(key=lambda x: x.time, reverse=True)
return results[0]
else:
return None
class _HDF5Writer:
def save_result(self, experiment_id: int, result: RawResultData) -> str:
# noinspection PyUnresolvedReferences
with self._open_hdf5(experiment_id, "a") as file:
return self._save_entity_to_file(
file,
EntityType.RESULT,
experiment_id,
result.stage,
result.label,
result.data,
datetime.now(),
result.story,
)
def save_metadata(self, experiment_id: int, metadata: Metadata):
# noinspection PyUnresolvedReferences
with self._open_hdf5(experiment_id, "a") as file:
return self._save_entity_to_file(
file,
EntityType.METADATA,
experiment_id,
metadata.stage,
metadata.label,
metadata.data,
datetime.now(),
)
def _save_entity_to_file(
self,
file: h5py.File,
entity_type: EntityType,
experiment_id: int,
stage: int,
label: str,
data: Any,
time: datetime,
story: Optional[str] = None,
migrated_id: Optional[str] = None,
) -> str:
path = f"/{stage}/{label}"
label_group = file.require_group(path)
dset = self._create_dataset(label_group, entity_type, data)
dset.attrs.create("experiment_id", experiment_id)
dset.attrs.create("stage", stage)
dset.attrs.create("label", label)
dset.attrs.create("time", time.astimezone().isoformat())
if story:
dset.attrs.create("story", story or "")
if migrated_id:
dset.attrs.create("migrated_id", migrated_id or "")
return dset.name
def _create_dataset(
self, group: h5py.Group, entity_type: EntityType, data: Any
) -> h5py.Dataset:
name = entity_type.name.lower()
try:
dset = group.create_dataset(name=name, data=data)
except TypeError:
data_type, pickled = self._pickle_data(data)
# np.void turns our string to bytes (HDF5 Opaque):
dset = group.create_dataset(name=name, data=np.void(pickled))
dset.attrs.create("data_type", data_type.value, dtype="i2")
return dset
@staticmethod
def _pickle_data(data: Any) -> (bytes, ResultDataType):
try:
pickled = pickle.dumps(data)
data_type = ResultDataType.Pickled
except Exception as ex:
logger.debug("Could not pickle data, defaulting to __repr__()", ex)
pickled = data.__repr__().encode(encoding="utf-8")
data_type = ResultDataType.String
return data_type, pickled
class _HDF5Migrator(_HDF5Writer):
def migrate_result_rows(self, rows: Iterable[ResultTable]) -> None:
self.migrate_rows(EntityType.RESULT, rows)
def migrate_metadata_rows(self, rows: Iterable[Metadata]) -> None:
self.migrate_rows(EntityType.METADATA, rows)
def migrate_rows(self, entity_type: EntityType, rows: Iterable[T]) -> None:
if rows is not None and len(list(rows)) > 0:
for row in rows:
if not row.saved_in_hdf5:
record = row.to_record()
# noinspection PyUnresolvedReferences
with self._open_hdf5(record.experiment_id, "a") as file:
hdf5_id = self._migrate_record(file, entity_type, record)
logger.debug(
f"Migrated ${entity_type.name} with id [{row.id}] "
f"to HDF5 with id [{hdf5_id}]"
)
def _migrate_record(
self, file: h5py.File, entity_type: EntityType, record: R
) -> str:
return self._save_entity_to_file(
file,
entity_type,
record.experiment_id,
record.stage,
record.label,
record.data,
record.time,
getattr(record, "story", None),
record.id,
)
def _migrate_result_record(
self, file: h5py.File, result_record: ResultRecord
) -> str:
return self._save_entity_to_file(
file,
EntityType.RESULT,
result_record.experiment_id,
result_record.stage,
result_record.label,
result_record.data,
result_record.time,
result_record.story,
result_record.id,
)
def migrate_from_per_project_hdf5_to_per_experiment_hdf5_files(
self, old_global_hdf5_file_path
):
logger.debug(
f"Migrating global .hdf5 file {old_global_hdf5_file_path} "
"to per-experiment .hdf5 files"
)
with h5py.File(old_global_hdf5_file_path, "r") as file:
if "experiments" in file:
top_group = file["experiments"]
for exp_group in top_group.values():
experiment_id = int(exp_group.name[13:])
f"Migrating results and metadata for experiment id {experiment_id}"
# noinspection PyUnresolvedReferences
with self._open_hdf5(experiment_id, "a") as exp_file:
for stage_group in exp_group.values():
exp_group.copy(stage_group, exp_file)
new_filename = f"{old_global_hdf5_file_path}.bak"
logger.debug(f"Renaming global .hdf5 file to [{new_filename}]")
os.rename(old_global_hdf5_file_path, new_filename)
logger.debug("Global .hdf5 file migration done")
class HDF5Storage(_HDF5Reader, _HDF5Migrator, _HDF5Writer):
def __init__(self, path=None):
"""Initializes a new storage class instance for storing experiment results
and metadata in HDF5 files.
:param path: filesystem path to a directory where HDF5 files reside. If no path
is given or the path is empty, HDF5 files are stored in memory only.
"""
if path is None or path == "": # memory files
self._path = "./entropy_temp_hdf5"
self._in_memory_mode = True
else: # filesystem
self._path = path
os.makedirs(self._path, exist_ok=True)
self._in_memory_mode = False
def _open_hdf5(self, experiment_id: int, mode: str) -> h5py.File:
path = self._build_hdf5_filepath(experiment_id)
try:
if self._in_memory_mode:
"""Note that because backing_store=False, self._path is ignored & no
file is saved on disk.
See https://docs.h5py.org/en/stable/high/file.html#file-drivers
"""
return h5py.File(path, mode, driver="core", backing_store=False)
else:
return h5py.File(path, mode)
except FileNotFoundError:
logger.exception(f"HDF5 file not found at '{path}'")
raise
def _build_hdf5_filepath(self, experiment_id: int) -> str:
return os.path.join(self._path, f"{experiment_id}.hdf5")
| 34.375328 | 88 | 0.604795 |
0bc5af947cb972059b3d4c83a05f581070851b6c | 24,779 | py | Python | twinpy/interfaces/aiida/vasp.py | kei0822kei/twinpy | 14b47df1fa5b57a54f57d5c2120ed3fe9502a9bc | [
"MIT"
] | null | null | null | twinpy/interfaces/aiida/vasp.py | kei0822kei/twinpy | 14b47df1fa5b57a54f57d5c2120ed3fe9502a9bc | [
"MIT"
] | 5 | 2021-01-19T13:08:28.000Z | 2021-02-20T12:03:59.000Z | twinpy/interfaces/aiida/vasp.py | kei0822kei/twinpy | 14b47df1fa5b57a54f57d5c2120ed3fe9502a9bc | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Interface for Aiida-Vasp.
"""
from pprint import pprint
import warnings
import numpy as np
from matplotlib import pyplot as plt
from aiida.cmdline.utils.decorators import with_dbenv
from aiida.orm import (load_node,
Node,
WorkChainNode,
QueryBuilder)
from aiida.common.exceptions import NotExistentAttributeError
from twinpy.common.kpoints import Kpoints
from twinpy.common.utils import print_header
from twinpy.structure.lattice import CrystalLattice
from twinpy.interfaces.aiida.base import (check_process_class,
get_cell_from_aiida,
_WorkChain)
from twinpy.analysis.relax_analyzer import RelaxAnalyzer
from twinpy.plot.base import line_chart
from twinpy.plot.relax import RelaxPlot
class _AiidaVaspWorkChain(_WorkChain):
"""
Aiida-Vasp base work chain class.
"""
def __init__(
self,
node:Node,
):
"""
Args:
node: Aiida Node.
"""
super().__init__(node=node)
self._initial_structure_pk = None
self._initial_cell = None
self._set_initial_structure()
self._stress = None
self._forces = None
self._energy = None
if self._exit_status == 0:
self._set_properties()
def _set_initial_structure(self):
"""
Set initial structure.
"""
self._initial_structure_pk = self._node.inputs.structure.pk
self._initial_cell = get_cell_from_aiida(
load_node(self._initial_structure_pk))
@property
def initial_cell(self):
"""
Initial cell.
"""
return self._initial_cell
def _set_properties(self):
"""
Set properties.
"""
try:
misc = self._node.outputs.misc.get_dict()
self._forces = self._node.outputs.forces.get_array('final')
self._stress = self._node.outputs.stress.get_array('final')
self._energy = misc['total_energies']['energy_extrapolated']
except NotExistentAttributeError:
warnings.warn("Could not extract outputs. Please check report.")
@property
def forces(self):
"""
Forces acting on atoms after relax.
"""
return self._forces
@property
def stress(self):
"""
Stress acting on lattice after relax.
"""
return self._stress
@property
def energy(self):
"""
Total energy.
"""
return self._energy
def get_max_force(self) -> float:
"""
Get maximum force acting on atoms.
"""
max_force = float(np.linalg.norm(self._forces, axis=1).max())
return max_force
def get_kpoints_info(self, include_two_pi:bool=True) -> dict:
"""
Get sampling kpoints information.
Args:
include_two_pi: If True, 2*pi is included for reciprocal lattice.
Returns:
dict: Contains kpoints information.
"""
mesh, offset = self._node.inputs.kpoints.get_kpoints_mesh()
total_mesh = mesh[0] * mesh[1] * mesh[2]
kpt = Kpoints(lattice=self._initial_cell[0])
dic = kpt.get_dict(mesh=mesh, include_two_pi=include_two_pi)
dic['offset'] = offset
del dic['input_interval']
del dic['decimal_handling']
del dic['use_symmetry']
if self._exit_status == 0:
sampling_kpoints = self._node.outputs.kpoints.get_array('kpoints')
weights = self._node.outputs.kpoints.get_array('weights')
weights_num = (weights * total_mesh).astype(int)
dic['sampling_kpoints'] = sampling_kpoints
dic['weights'] = weights_num
dic['total_irreducible_kpoints'] =len(weights_num)
return dic
def get_vasp_settings(self) -> dict:
"""
Get input parameters.
Returns:
dict: Contains input parameters.
"""
potcar = {
'potential_family': self._node.inputs.potential_family.value,
'potential_mapping': self._node.inputs.potential_mapping.get_dict()
}
settings = {
'incar': self._node.inputs.parameters.get_dict()['incar'],
'potcar': potcar,
'kpoints': self._node.inputs.kpoints.get_kpoints_mesh(),
'parser_settings':
self._node.inputs.settings.get_dict()['parser_settings'],
}
return settings
def get_misc(self) -> dict:
"""
Get misc.
"""
return self._node.outputs.misc.get_dict()
def _print_vasp_results(self):
"""
Print VASP run results.
"""
kpoints_info_for_print = self.get_kpoints_info()
if self._exit_status == 0:
del kpoints_info_for_print['sampling_kpoints']
del kpoints_info_for_print['weights']
print_header('VASP settings')
pprint(self.get_vasp_settings())
print("\n")
print_header("kpoints information")
pprint(kpoints_info_for_print)
if self._exit_status == 0:
print("\n")
print_header('VASP outputs')
print("# stress")
pprint(self._stress)
print("")
print("# max force acting on atoms")
print(str(self.get_max_force())+"\n")
print("# total energy")
print(str(self._energy)+"\n")
@with_dbenv()
class AiidaVaspWorkChain(_AiidaVaspWorkChain):
"""
Vasp work chain class.
"""
def __init__(
self,
node:Node,
ignore_warning:bool=False,
):
"""
Args:
node: Aiida Node.
"""
process_class = 'VaspWorkChain'
check_process_class(node, process_class)
super().__init__(node=node)
self._final_structure_pk = None
self._final_cell = None
self._set_final_structure(ignore_warning=ignore_warning)
self._step_energies = None
self._set_step_energies(ignore_warning=ignore_warning)
def _set_step_energies(self, ignore_warning):
"""
Set step energies.
"""
try:
eg = self._node.outputs.energies
self._step_energies = {
'energy_extrapolated': eg.get_array('energy_extrapolated'),
'energy_extrapolated_final':
eg.get_array('energy_extrapolated_final'),
}
except NotExistentAttributeError:
if not ignore_warning:
warnings.warn("Output energy could not find.\n"
"process state:{} (pk={})".format(
self.process_state, self._node.pk))
def _set_final_structure(self, ignore_warning):
"""
Set final structure.
"""
try:
self._final_structure_pk = self._node.outputs.structure.pk
self._final_cell = get_cell_from_aiida(
load_node(self._final_structure_pk))
except NotExistentAttributeError:
if not ignore_warning:
warnings.warn("Final structure could not find.\n"
"process state:{} (pk={})".format(
self.process_state, self._node.pk))
@property
def final_cell(self):
"""
Final cell.
"""
return self._final_cell
@property
def step_energies(self):
"""
Energy for each steps.
"""
return self._step_energies
def get_pks(self) -> dict:
"""
Get pks.
Returns:
dict: Contains vasp pk and structure pk.
"""
return {
'vasp_pk': self._pk,
'initial_structure_pk': self._initial_structure_pk,
'final_structure_pk': self._final_structure_pk,
}
def get_relax_analyzer(self,
original_cell:tuple=None,
no_standardize:bool=False) -> RelaxAnalyzer:
"""
Get RelaxAnalyzer class object.
Args:
original_cell: Original cell whose standardized cell
is initail_cell.
no_standardize: Please see docstring of RelaxAnalyzer.
Returns:
RelaxAnalyzer: RelaxAnalyzer class object.
"""
analyzer = RelaxAnalyzer(initial_cell=self._initial_cell,
final_cell=self._final_cell,
original_cell=original_cell,
forces=self._forces,
stress=self._stress,
energy=self._energy,
no_standardize=no_standardize)
return analyzer
def plot_energy_convergence(self):
"""
Plot energy convergence.
"""
fig = plt.figure()
ax =fig.add_subplot(111)
energies = self._step_energies['energy_extrapolated']
steps = [ i+1 for i in range(len(energies)) ]
line_chart(ax,
xdata=steps,
ydata=energies,
xlabel='Relax steps',
ylabel='Energy [eV]',
)
ax.ticklabel_format(axis='y', style='sci', scilimits=(0,0))
def get_description(self):
"""
Get description.
"""
self._print_common_information()
print_header('PKs')
pprint(self.get_pks())
print("\n")
self._print_vasp_results()
@with_dbenv()
class AiidaRelaxWorkChain(_AiidaVaspWorkChain):
"""
Relax work chain class.
"""
def __init__(
self,
node:Node,
ignore_warning:bool=False,
):
"""
Args:
node: Aiida Node.
"""
process_class = 'RelaxWorkChain'
check_process_class(node, process_class)
super().__init__(node=node)
self._final_structure_pk = None
self._final_cell = None
self._current_final_structure_pk = None
self._current_final_cell = None
self._set_final_structure(ignore_warning)
def _set_final_structure(self, ignore_warning):
"""
Set final structure.
"""
try:
relax_structure = self._node.outputs.relax__structure
self._final_structure_pk = relax_structure.pk
self._final_cell = get_cell_from_aiida(relax_structure)
self._current_final_structure_pk = self._final_structure_pk
self._current_final_cell = self._final_cell
except NotExistentAttributeError:
if not ignore_warning:
warnings.warn("Final structure could not find.\n"
"process state:{} (pk={})".format(
self.process_state, self._node.pk))
relax_pks, static_pk = self.get_vasp_calculation_pks()
if relax_pks is None:
self._current_final_structure_pk = self._initial_structure_pk
self._current_final_cell = self._initial_cell
else:
if static_pk is not None:
self._current_final_structure_pk = \
load_node(static_pk).inputs.structure.pk
self._current_final_cell = get_cell_from_aiida(
load_node(static_pk).inputs.structure)
else:
aiida_vasp = AiidaVaspWorkChain(load_node(relax_pks[-1]))
self._current_final_structure_pk = \
aiida_vasp.get_pks()['initial_structure_pk']
self._current_final_cell = aiida_vasp.initial_cell
@property
def final_cell(self):
"""
Final cell.
"""
return self._final_cell
@property
def current_final_cell(self):
"""
Current final cell.
"""
return self._current_final_cell
def get_relax_settings(self) -> dict:
"""
Get relax settings.
Returns:
dict: Contains relax settings.
"""
keys = [ key for key in dir(self._node.inputs) if 'relax' in key ]
settings = {}
for key in keys:
name = key.replace('relax__', '')
settings[name] = self._node.inputs.__getattr__(key).value
return settings
def get_vasp_calculation_pks(self) -> tuple:
"""
Get VaspWorkChain pks.
Returns:
tuple: (relax_calcs, static_calc).
"""
qb = QueryBuilder()
qb.append(Node, filters={'id':{'==': self._pk}})
qb.append(WorkChainNode) # extract vasp.verify WorkChainNodes
qb.append(WorkChainNode,
project=['id']) # extract vasp.vasp WorkChainNodes
qb.order_by({WorkChainNode: {'id': 'asc'}})
vasp_pks = qb.all()
relax_pks = None
static_pk = None
if 'nsw' not in \
load_node(vasp_pks[-1][0]).inputs.parameters.get_dict().keys():
static_pk = vasp_pks[-1][0]
relax_pks = [ pk[0] for pk in vasp_pks[:-1] ]
else:
warnings.warn("Could not find final static_pk calculation in {}.".
format(self._pk))
relax_pks = [ pk[0] for pk in vasp_pks ]
return (relax_pks, static_pk)
def get_vasp_calculations(self) -> tuple:
"""
Get AiidaVaspWorkChain class objects.
Returns:
tuple: (relax_calcs, static_calc).
"""
relax_pks, static_pk = self.get_vasp_calculation_pks()
if self._exit_status == 0:
relax = [ AiidaVaspWorkChain(load_node(pk))
for pk in relax_pks ]
static = AiidaVaspWorkChain(load_node(static_pk),
ignore_warning=True)
else:
relax = [ AiidaVaspWorkChain(load_node(pk))
for pk in relax_pks[:-1] ]
static = None
return (relax, static)
def get_pks(self) -> dict:
"""
Get pks.
Returns:
dict: Contains relax pk and structure pk.
"""
relax_pks, static_pk = self.get_vasp_calculation_pks()
pks = {
'relax_pk': self._pk,
'initial_structure_pk': self._initial_structure_pk,
'final_structure_pk': self._final_structure_pk,
'current_final_structure_pk': self._current_final_structure_pk,
'vasp_relax_pks': relax_pks,
'static_pk': static_pk,
}
return pks
def get_relax_analyzer(self,
original_cell:tuple=None,
no_standardize:bool=False) -> RelaxAnalyzer:
"""
Get RelaxAnalyzer class object.
Args:
original_cell: Original cell whose standardized cell
is initail_cell.
no_standardize: Please see docstring of RelaxAnalyzer.
Returns:
RelaxAnalyzer: RelaxAnalyzer class object.
"""
analyzer = RelaxAnalyzer(initial_cell=self._initial_cell,
final_cell=self._final_cell,
original_cell=original_cell,
forces=self._forces,
stress=self._stress,
energy=self._energy,
no_standardize=no_standardize,
)
return analyzer
def get_description(self):
"""
Get description.
"""
self._print_common_information()
print_header('PKs')
pprint(self.get_pks())
print("\n\n")
print_header("relax settings")
pprint(self.get_relax_settings())
print("\n\n")
self._print_vasp_results()
def get_relaxplot(self, start_step:int=1) -> RelaxPlot:
"""
Get RelaxPlot class object.
Args:
start_step: The step number of the first relax in this WorkChain.
If you relax 20 steps in the privious RelaxWorkChain,
for example, start_step becomes 21.
Returns:
RelaxPlot: RelaxPlot class object.
"""
relax_vasps, static_vasp = self.get_vasp_calculations()
relax_data = {}
relax_data['max_force'] = \
np.array([ relax_vasp.get_max_force()
for relax_vasp in relax_vasps ])
# stress xx yy zz yz zx xy
relax_data['stress'] = \
np.array([ relax_vasp.stress.flatten()[[0,4,8,5,6,1]]
for relax_vasp in relax_vasps ])
relax_data['energy'] = \
np.array([ relax_vasp.energy
for relax_vasp in relax_vasps ])
relax_data['abc'] = \
np.array([ CrystalLattice(relax_vasp.final_cell[0]).abc
for relax_vasp in relax_vasps ])
relax_data['step_energies_collection'] = \
[ relax_vasp.step_energies for relax_vasp in relax_vasps ]
if static_vasp is None:
static_data = None
else:
static_data = {
'max_force': static_vasp.get_max_force(),
'stress': static_vasp.stress.flatten()[[0,4,8,5,6,1]] ,
'energy': static_vasp.energy,
'abc': CrystalLattice(static_vasp.initial_cell[0]).abc,
}
relax_plot = RelaxPlot(relax_data=relax_data,
static_data=static_data,
start_step=start_step)
return relax_plot
def plot_convergence(self):
"""
Plot convergence.
"""
plt.rcParams["font.size"] = 14
fig = plt.figure(figsize=(16,13))
ax1 = fig.add_axes((0.15, 0.1, 0.35, 0.35))
ax2 = fig.add_axes((0.63, 0.1, 0.35, 0.35))
ax3 = fig.add_axes((0.15, 0.55, 0.35, 0.35))
ax4 = fig.add_axes((0.63, 0.55, 0.35, 0.35))
relax_plot = self.get_relaxplot()
relax_plot.plot_max_force(ax1)
relax_plot.plot_energy(ax2)
relax_plot.plot_stress(ax3)
relax_plot.plot_abc(ax4)
return fig
class AiidaRelaxCollection():
"""
Collection of AiidaRelaxWorkChain.
"""
def __init__(
self,
aiida_relaxes:list,
):
"""
Args:
aiida_relaxes (list): List of AiidaRelaxWorkChain.
"""
self._aiida_relaxes = aiida_relaxes
self._aiida_relax_pks = [ relax.pk for relax in aiida_relaxes ]
self._initial_structure_pk = None
self._initial_cell = None
self._current_final_structure_pk = None
self._current_final_cell = None
self._final_structure_pk = None
self._final_cell = None
self._set_structures()
def _set_structures(self):
"""
Check previous output structure and next input structure
are the same.
"""
relax_pk = None
structure_pk = None
for i, aiida_relax in enumerate(self._aiida_relaxes):
if i > 0:
if aiida_relax.get_pks()['initial_structure_pk'] \
!= structure_pk:
raise RuntimeError(
"Relax pk {} output structure pk {} "
"and relax pk {} input structure pk {} "
"does not match.".format(
relax_pk,
structure_pk,
aiida_relax.pk,
aiida_relax.get_pks()['initial_structure_pk'],
))
relax_pk = aiida_relax.pk
structure_pk = aiida_relax.get_pks()['current_final_structure_pk']
self._current_final_structure_pk = structure_pk
self._current_final_cell = \
get_cell_from_aiida(load_node(structure_pk))
if self._aiida_relaxes[-1].process_state == 'finished':
self._final_structure_pk = \
self._current_final_structure_pk
self._final_cell = self._current_final_cell
self._initial_structure_pk = \
self._aiida_relaxes[0].get_pks()['initial_structure_pk']
self._initial_cell = \
get_cell_from_aiida(load_node(self._initial_structure_pk))
@property
def aiida_relaxes(self):
"""
List of AiidaRelaxWorkChain class object.
"""
return self._aiida_relaxes
@property
def initial_cell(self):
"""
Initial cell.
"""
return self._initial_cell
@property
def current_final_cell(self):
"""
Current final cell.
"""
return self._current_final_cell
@property
def final_cell(self):
"""
Final cell.
"""
return self._final_cell
def get_pks(self):
"""
Get pks.
"""
pks = {
'aiida_relax_pks': self._aiida_relax_pks,
'initial_structure_pk': self._initial_structure_pk,
'current_final_structure_pk': self._current_final_structure_pk,
'final_structure_pk': self._final_structure_pk,
}
return pks
def get_description(self):
"""
Print description for first RelaxWorkChain.
"""
print("# Print description for first RelaxWorkChain.")
self._aiida_relaxes[0].get_description()
def get_relax_analyzer(self,
original_cell:tuple=None,
no_standardize:bool=False) -> RelaxAnalyzer:
"""
Get RelaxAnalyzer class object.
Args:
original_cell: Original cell whose standardized cell
is initail_cell.
no_standardize: Please see docstring of RelaxAnalyzer.
"""
pks = self.get_pks()
if pks['final_structure_pk'] is None:
warnings.warn("Final structure in latest RelaxWorkChain (pk={}) "
"does not find. So build RelaxAnalyzer with "
"previous RelaxWorkChain (pk={}) "
" as a final structure.".format(
self._aiida_relaxes[-1].pk,
self._aiida_relaxes[-2].pk,
))
final_relax = self._aiida_relaxes[-2]
else:
final_relax = self._aiida_relaxes[-1]
initail_cell = self._initial_cell
final_cell = final_relax._final_cell
forces = final_relax.forces
stress = final_relax.stress
energy = final_relax.energy
relax_analyzer = RelaxAnalyzer(
initial_cell=initail_cell,
final_cell=final_cell,
original_cell=original_cell,
forces=forces,
stress=stress,
energy=energy,
no_standardize=no_standardize,
)
return relax_analyzer
def get_relaxplots(self) -> list:
"""
Get RelaxPlot class objects.
"""
relax_plots = []
start_step = 1
for relax in self._aiida_relaxes:
rlxplot = relax.get_relaxplot(start_step=start_step)
relax_plots.append(rlxplot)
start_step = rlxplot.vasp_final_steps[-1] + 1
return relax_plots
def plot_convergence(self) -> list:
"""
Get RelaxPlot class objects.
"""
fig = plt.figure(figsize=(16,13))
ax1 = fig.add_axes((0.15, 0.1, 0.35, 0.35))
ax2 = fig.add_axes((0.63, 0.1, 0.35, 0.35))
ax3 = fig.add_axes((0.15, 0.55, 0.35, 0.35))
ax4 = fig.add_axes((0.63, 0.55, 0.35, 0.35))
relax_plots = self.get_relaxplots()
for i, relax_plot in enumerate(relax_plots):
decorate = bool(i == 0)
relax_plot.plot_max_force(ax1, decorate=decorate)
relax_plot.plot_energy(ax2, decorate=decorate)
relax_plot.plot_stress(ax3, decorate=decorate)
relax_plot.plot_abc(ax4, decorate=decorate)
return fig
| 32.994674 | 79 | 0.546552 |
b7f571d010d8027c6a894e2a8ef0942994c00dff | 19,330 | py | Python | src/m6_fancy_iterating.py | Seanxia99/11-Sequences | fd0753537eda4d335246492ecabf0b96306ae2cd | [
"MIT"
] | 1 | 2022-01-07T23:47:49.000Z | 2022-01-07T23:47:49.000Z | src/m6_fancy_iterating.py | Seanxia99/11-Sequences | fd0753537eda4d335246492ecabf0b96306ae2cd | [
"MIT"
] | null | null | null | src/m6_fancy_iterating.py | Seanxia99/11-Sequences | fd0753537eda4d335246492ecabf0b96306ae2cd | [
"MIT"
] | 65 | 2018-09-24T11:54:24.000Z | 2018-10-17T19:33:17.000Z | """
This module practices ITERATING (i.e. looping) through a SEQUENCE
in ways OTHER than just going thru the sequence from BEGINNING to END.
It also shows how to SELECT items in a sequence, e.g.:
-- the items that are strings
-- the items that are even integers (e.g. 2, 4, 6, ...)
-- the items in the second half of the sequence
Note that:
-- SELECTING items that ARE even integers
is different from:
-- LOOKING only at items AT even-numbered indices.
Authors: David Mutchler, Valerie Galluzzi, Mark Hays, Amanda Stouder,
their colleagues and PUT_YOUR_NAME_HERE.
""" # TODO: 1. PUT YOUR NAME IN THE ABOVE LINE.
import rosegraphics as rg
def main():
""" Calls the TEST functions in this module. """
# ------------------------------------------------------------------
# TODO: 2. EXAMINE the 4 sequences immediately below
# this comment, and READ the instructions that follows them.
#
# When you have examined the 4 sequences below and understand how
# the testing will work for this module, asking questions as needed,
# THEN:
# change the above TO DO to DONE.
# ------------------------------------------------------------------
sequence1 = [55, 'hello', 33, rg.Point(90, 25)] # List
sequence2 = [90, 'dog', 87, 'cat', 'bone', 33, 100] # List
sequence3 = 'Yo! Whazup?' # String
sequence4 = ('This', 'is a', 'tuple', 55) # Tuple
# ------------------------------------------------------------------
# STUDENTS: Do the work in this module as follows.
# Otherwise, you will be overwhelmed by the output.
#
# For each function that you implement (TODOs 3 - 9):
# 1. Locate the statements just below this comment
# that call TEST functions.
# 2. UN-comment only one test at a time.
# 3. Implement that function per its TO DO.
# 4. When satisfied with your work, move onto the next test.
# ------------------------------------------------------------------
run_test_print_all_items_forwards(sequence1, sequence2, sequence3, sequence4)
# run_test_print_all_items_backwards(sequence1, sequence2, sequence3, sequence4)
# run_test_print_items_at_odd_indices(sequence1, sequence2, sequence3, sequence4)
# run_test_print_items_in_second_half(sequence1, sequence2, sequence3, sequence4)
# run_test_print_items_that_are_bigger_than_5() # Uses different sequences
# run_test_print_items_that_are_strings(sequence1, sequence2, sequence3, sequence4)
# run_test_print_items_that_are_odd_integers(sequence1, sequence2, sequence3, sequence4)
def run_test_print_all_items_forwards(sequence1, sequence2, sequence3, sequence4):
""" Tests the print_all_items_forwards function. """
print()
print('***********************************************************')
print('Testing the print_all_items_forwards function.')
print('Iterate through an ENTIRE sequence, forwards:')
print('***********************************************************')
print('Expected:')
print('55\nhello\n33\nPoint(90.0, 25.0)\n')
print('Actual: ')
print_all_items_forwards(sequence1)
print()
print('-------------------------------------------------------')
print('Expected:')
print('90\ndog\n87\ncat\nbone\n33\n100\n')
print('Actual: ')
print_all_items_forwards(sequence2)
print()
print('-------------------------------------------------------')
print('Expected:')
print('Y\no\n!\n \nW\nh\na\nz\nu\np\n?\n')
print('Actual: ')
print_all_items_forwards(sequence3)
print()
print('-------------------------------------------------------')
print('Expected:')
print('This\nis a\ntuple\n55\n')
print('Actual: ')
print_all_items_forwards(sequence4)
def run_test_print_all_items_backwards(sequence1, sequence2, sequence3, sequence4):
""" Tests the print_all_items_backwards function. """
print()
print('***********************************************************')
print('Testing the print_all_items_backwards function.')
print('Iterate through an ENTIRE sequence, backwards:')
print('***********************************************************')
print('Expected:')
print('Point(90.0, 25.0)\n33\nhello\n55\n')
print('Actual: ')
print_all_items_backwards(sequence1)
print()
print('-------------------------------------------------------')
print('Expected:')
print('100\n33\nbone\ncat\n87\ndog\n90\n')
print('Actual: ')
print_all_items_backwards(sequence2)
print()
print('-------------------------------------------------------')
print('Expected:')
print('?\np\nu\nz\na\nh\nW\n \n!\no\nY\n')
print('Actual: ')
print_all_items_backwards(sequence3)
print()
print('-------------------------------------------------------')
print('Expected:')
print('55\ntuple\nis a\nThis\n')
print('Actual: ')
print_all_items_backwards(sequence4)
def run_test_print_items_at_odd_indices(sequence1, sequence2, sequence3, sequence4):
print()
print('***********************************************************')
print('Testing the print_items_at_odd_indices function.')
print('Iterate through PART of a sequence, namely,')
print('the items at odd-numbered indices:')
print('***********************************************************')
print('Expected:')
print('hello is at index 1')
print('Point(90.0, 25.0) is at index 3\n')
print('Actual: ')
print_items_at_odd_indices(sequence1)
print()
print('-------------------------------------------------------')
print('Expected:')
print('dog is at index 1')
print('cat is at index 3')
print('33 is at index 5\n')
print('Actual: ')
print_items_at_odd_indices(sequence2)
print()
print('-------------------------------------------------------')
print('Expected:')
print('o is at index 1')
print(' is at index 3')
print('h is at index 5')
print('z is at index 7')
print('p is at index 9\n')
print('Actual: ')
print_items_at_odd_indices(sequence3)
print()
print('-------------------------------------------------------')
print('Expected:')
print('is a is at index 1')
print('55 is at index 3\n')
print('Actual: ')
print_items_at_odd_indices(sequence4)
def run_test_print_items_in_second_half(sequence1, sequence2, sequence3, sequence4):
print()
print('***********************************************************')
print('Testing the print_items_in_second_half function.')
print('Iterate through PART of a sequence, namely,')
print('the items in the second half of the sequence:')
print('***********************************************************')
print('Expected:')
print('33\nPoint(90.0, 25.0)\n')
print('Actual: ')
print_items_in_second_half(sequence1)
print()
print('-------------------------------------------------------')
print('Expected:')
print('cat\nbone\n33\n100\n')
print('Actual: ')
print_items_in_second_half(sequence2)
print()
print('-------------------------------------------------------')
print('Expected:')
print('h\na\nz\nu\np\n?\n')
print('Actual: ')
print_items_in_second_half(sequence3)
print()
print('-------------------------------------------------------')
print('Expected:')
print('tuple\n55\n')
print('Actual: ')
print_items_in_second_half(sequence4)
def run_test_print_items_that_are_bigger_than_5():
# Note: The tests of this function use sequences that are DIFFERENT
# than the tests of the other functions.
print()
print('***********************************************************')
print('Testing the print_items_that_are_bigger_than_5 function.')
print('Iterate through a sequence, selecting items, namely,')
print('the items that are bigger than 5:')
print('Note that the test sequences for this are NOT the same as')
print('the test sequences for the other exercises herein.')
print('***********************************************************')
print('Expected:')
print('45 is at index 0')
print('6 is at index 3')
print('100 is at index 6')
print('100 is at index 7\n')
print('Actual: ')
print_items_that_are_bigger_than_5([45, 3, -50, 6, 5, 3, 100, 100])
print()
print('-------------------------------------------------------')
print('Expected:')
print('7 is at index 0')
print('30 is at index 1')
print('6 is at index 2\n')
print('Actual: ')
print_items_that_are_bigger_than_5([7, 30, 6])
print()
print('-------------------------------------------------------')
print('Expected:')
print('')
print('Actual: ')
print_items_that_are_bigger_than_5([5, 5, 5])
def run_test_print_items_that_are_strings(sequence1, sequence2, sequence3, sequence4):
print()
print('***********************************************************')
print('Testing the print_items_that_are_strings function.')
print('Iterate through a sequence, selecting items, namely,')
print('the items that are strings:')
print('***********************************************************')
print('Expected:')
print('hello is at index 1\n')
print('Actual: ')
print_items_that_are_strings(sequence1)
print()
print('-------------------------------------------------------')
print('Expected:')
print('dog is at index 1')
print('cat is at index 3')
print('bone is at index 4\n')
print('Actual: ')
print_items_that_are_strings(sequence2)
print()
print('-------------------------------------------------------')
print('Expected:')
print('Y is at index 0')
print('o is at index 1')
print('! is at index 2')
print(' is at index 3')
print('W is at index 4')
print('h is at index 5')
print('a is at index 6')
print('z is at index 7')
print('u is at index 8')
print('p is at index 9')
print('? is at index 10\n')
print('Actual: ')
print_items_that_are_strings(sequence3)
print()
print('-------------------------------------------------------')
print('Expected:')
print('This is at index 0')
print('is a is at index 1')
print('tuple is at index 2\n')
print('Actual: ')
print_items_that_are_strings(sequence4)
def run_test_print_items_that_are_odd_integers(sequence1, sequence2, sequence3, sequence4):
print()
print('***********************************************************')
print('Testing the print_items_that_are_odd_integers function.')
print('Iterate through a sequence, selecting items, namely,')
print('the items that are odd integers:')
print('Note that there is 1 extra test sequence for this problem.')
print('***********************************************************')
print('Expected:')
print('55 is at index 0')
print('33 is at index 2\n')
print('Actual: ')
print_items_that_are_odd_integers(sequence1)
print()
print('-------------------------------------------------------')
print('Expected:')
print('87 is at index 2')
print('33 is at index 5\n')
print('Actual: ')
print_items_that_are_odd_integers(sequence2)
print()
print('-------------------------------------------------------')
print('Expected:')
print('')
print('Actual: ')
print_items_that_are_odd_integers(sequence3)
print()
print('-------------------------------------------------------')
print('Expected:')
print('55 is at index 3\n')
print('Actual: ')
print_items_that_are_odd_integers(sequence4)
print()
print('-------------------------------------------------------')
print('Expected:')
print('87 is at index 2')
print('95 is at index 3')
print('5 is at index 6')
print('77 is at index 7\n')
print('Actual: ')
print_items_that_are_odd_integers([90, 'dog', 87, 95, 8, 10, 5, 77])
# ----------------------------------------------------------------------
# Iterating through the ENTIRE sequence, FORWARDs.
# ----------------------------------------------------------------------
def print_all_items_forwards(sequence):
"""
Prints the items in the given sequence in the order that
they appear, that is, forwards. Prints them one item per line.
For example, if the sequence is [55, 'hello', 33, rg.Point(90, 25)],
then this function prints:
55
hello
33
Point at (90, 25)
"""
# ------------------------------------------------------------------
# TODO: 3. Implement and test this function.
# Tests have been written for you (above).
# ------------------------------------------------------------------
# ----------------------------------------------------------------------
# Iterating through the ENTIRE sequence, BACKWARDs.
# ----------------------------------------------------------------------
def print_all_items_backwards(sequence):
"""
Prints the items in the given sequence in the REVERSE of the order
in which they appear, that is, prints them in backwards order.
Prints them one item per line.
For example, if the sequence is [55, 'hello', 33, rg.Point(90, 25)],
then this function prints:
Point at (90,25)
33
hello
55
"""
# ------------------------------------------------------------------
# TODO: 4. Implement and test this function.
# Tests have been written for you (above).
# ------------------------------------------------------------------
# ----------------------------------------------------------------------
# Iterating through PART of a sequence:
# -- in this sample problem, every other item in the sequence.
# ----------------------------------------------------------------------
def print_items_at_odd_indices(sequence):
"""
Prints the items at the odd-numbered indices in the given sequence,
along with their positions (indices) in the sequence.
with ' is at index ' in between (see example).
For example, if the sequence is [90, 'dog', 87, 95, 92, 33, 100],
then this function prints:
dog is at index 1
95 is at index 3
33 is at index 5
"""
# ------------------------------------------------------------------
# TODO: 5. Implement and test this function.
# Tests have been written for you (above).
# ------------------------------------------------------------------
# ----------------------------------------------------------------------
# Iterating through PART of a sequence:
# -- in this sample problem, the second half.
# ----------------------------------------------------------------------
def print_items_in_second_half(sequence):
"""
Prints the items in the second half of the given sequence.
For odd-length sequences, includes the middle item in the sequence.
For example, if the sequence is [90, 'dog', 87, 95, 92, 33, 100],
then this function prints:
95
92
33
100
"""
# ------------------------------------------------------------------
# TODO: 6. Implement and test this function.
# Tests have been written for you (above).
#
# IMPORTANT: Don't get hung up on dealing with the middle item
# being included or not. Just try to solve the problem and adjust
# if needed. No conditional is needed under most implementations.
#
# IMPORTANT: RANGE expressions need INTEGERS.
# Use // for integer division.
# ------------------------------------------------------------------
# ----------------------------------------------------------------------
# Iterating through a sequence, selecting items:
# -- in this sample problem, the items that are bigger than 5.
# ----------------------------------------------------------------------
def print_items_that_are_bigger_than_5(sequence):
"""
Prints the items in the given sequence that are bigger than 5,
along with their positions (indices) in the sequence,
with ' is at index ' in between (see example).
For example, if the sequence is [45, 3, -50, 6, 5, 3, 100, 100],
then this function prints:
45 is at index 0
6 is at index 3
100 is at index 6
100 is at index 7
Precondition: All the items in the sequence are integers.
"""
# ------------------------------------------------------------------
# TODO: 7. Implement and test this function.
# Tests have been written for you (above).
# ------------------------------------------------------------------
# ----------------------------------------------------------------------
# Iterating through a sequence, selecting items:
# -- in this sample problem, the items that are strings.
# ----------------------------------------------------------------------
def print_items_that_are_strings(sequence):
"""
Prints the items in the given sequence that are strings,
along with their positions (indices) in the sequence,
with ' is at index ' in between (see example).
For example, if the sequence is [90, 'dog', 87, 'cat', 'bone', 33, 100],
then this function prints:
dog is at index 1
cat is at index 3
bone is at index 4
"""
# ------------------------------------------------------------------
# TODO: 8. Implement and test this function.
#
# IMPORTANT:
# -- A string is, by definition, an object whose type is str.
# -- The type function gives the type of an object.
# For example, type('hello') returns str. So:
# if type('hello') is str:
# ...
# would be how you would test whether 'hello' is a string.
#
# Note that str has NO quotes surrounding it.
# ------------------------------------------------------------------
# ----------------------------------------------------------------------
# Iterating through a sequence, selecting items:
# -- in this sample problem, the items that are odd integers.
# ----------------------------------------------------------------------
def print_items_that_are_odd_integers(sequence):
"""
Prints the items in the given sequence that are odd integers,
along with their positions in the sequence,
with ' is at index ' in between (see example).
For example, if the sequence is
[90, 'dog', 87, 'cat', 'bone', 33, 100],
then this function prints:
87 is at index 2
33 is at index 5
"""
# ------------------------------------------------------------------
# TODO: 9. Implement and test this function.
#
# IMPORTANT: The type function returns int if its argument
# is an integer. Note that int has NO quotes surrounding it.
# ------------------------------------------------------------------
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# ----------------------------------------------------------------------
main()
| 37.316602 | 92 | 0.496534 |
91575671d1ad1d3d6b1607da508763790936a2f4 | 6,066 | py | Python | roles/lib_openshift/src/class/oc_adm_ca_server_cert.py | Roscoe198/Ansible-Openshift | b874bef456852ef082a27dfec4f2d7d466702370 | [
"Apache-2.0"
] | 164 | 2015-07-29T17:35:04.000Z | 2021-12-16T16:38:04.000Z | roles/lib_openshift/src/class/oc_adm_ca_server_cert.py | Roscoe198/Ansible-Openshift | b874bef456852ef082a27dfec4f2d7d466702370 | [
"Apache-2.0"
] | 3,634 | 2015-06-09T13:49:15.000Z | 2022-03-23T20:55:44.000Z | roles/lib_openshift/src/class/oc_adm_ca_server_cert.py | Roscoe198/Ansible-Openshift | b874bef456852ef082a27dfec4f2d7d466702370 | [
"Apache-2.0"
] | 250 | 2015-06-08T19:53:11.000Z | 2022-03-01T04:51:23.000Z | # pylint: skip-file
# flake8: noqa
class CAServerCertConfig(OpenShiftCLIConfig):
''' CAServerCertConfig is a DTO for the oc adm ca command '''
def __init__(self, kubeconfig, verbose, ca_options):
super(CAServerCertConfig, self).__init__('ca', None, kubeconfig, ca_options)
self.kubeconfig = kubeconfig
self.verbose = verbose
self._ca = ca_options
class CAServerCert(OpenShiftCLI):
''' Class to wrap the oc adm ca create-server-cert command line'''
def __init__(self,
config,
verbose=False):
''' Constructor for oadm ca '''
super(CAServerCert, self).__init__(None, config.kubeconfig, verbose)
self.config = config
self.verbose = verbose
def get(self):
'''get the current cert file
If a file exists by the same name in the specified location then the cert exists
'''
cert = self.config.config_options['cert']['value']
if cert and os.path.exists(cert):
return open(cert).read()
return None
def create(self):
'''run openshift oc adm ca create-server-cert cmd'''
# Added this here as a safegaurd for stomping on the
# cert and key files if they exist
if self.config.config_options['backup']['value']:
ext = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
date_str = "%s_" + "%s" % ext
if os.path.exists(self.config.config_options['key']['value']):
shutil.copy(self.config.config_options['key']['value'],
date_str % self.config.config_options['key']['value'])
if os.path.exists(self.config.config_options['cert']['value']):
shutil.copy(self.config.config_options['cert']['value'],
date_str % self.config.config_options['cert']['value'])
options = self.config.to_option_list()
cmd = ['ca', 'create-server-cert']
cmd.extend(options)
return self.openshift_cmd(cmd, oadm=True)
def exists(self):
''' check whether the certificate exists and has the clusterIP '''
cert_path = self.config.config_options['cert']['value']
if not os.path.exists(cert_path):
return False
# Would prefer pyopenssl but is not installed.
# When we verify it is, switch this code
# Here is the code to get the subject and the SAN
# openssl x509 -text -noout -certopt \
# no_header,no_version,no_serial,no_signame,no_validity,no_issuer,no_pubkey,no_sigdump,no_aux \
# -in /etc/origin/master/registry.crt
# Instead of this solution we will use a regex.
cert_names = []
hostnames = self.config.config_options['hostnames']['value'].split(',')
proc = subprocess.Popen(['openssl', 'x509', '-noout', '-text', '-in', cert_path],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
x509output, _ = proc.communicate()
if proc.returncode == 0:
regex = re.compile(r"^\s*X509v3 Subject Alternative Name:\s*?\n\s*(.*)\s*\n", re.MULTILINE)
match = regex.search(x509output.decode()) # E501
if not match:
return False
for entry in re.split(r", *", match.group(1)):
if entry.startswith('DNS') or entry.startswith('IP Address'):
cert_names.append(entry.split(':')[1])
# now that we have cert names let's compare
cert_set = set(cert_names)
hname_set = set(hostnames)
if cert_set.issubset(hname_set) and hname_set.issubset(cert_set):
return True
return False
@staticmethod
def run_ansible(params, check_mode):
'''run the oc_adm_ca_server_cert module'''
# Filter non-strings from hostnames list (Such as boolean: False)
params['hostnames'] = [host for host in params['hostnames'] if isinstance(host, string_types)]
config = CAServerCertConfig(params['kubeconfig'],
params['debug'],
{'cert': {'value': params['cert'], 'include': True},
'hostnames': {'value': ','.join(params['hostnames']), 'include': True},
'overwrite': {'value': True, 'include': True},
'key': {'value': params['key'], 'include': True},
'signer_cert': {'value': params['signer_cert'], 'include': True},
'signer_key': {'value': params['signer_key'], 'include': True},
'signer_serial': {'value': params['signer_serial'], 'include': True},
'expire_days': {'value': params['expire_days'], 'include': True},
'backup': {'value': params['backup'], 'include': False},
})
server_cert = CAServerCert(config)
state = params['state']
if state == 'present':
########
# Create
########
if not server_cert.exists() or params['force']:
if check_mode:
return {'changed': True,
'msg': "CHECK_MODE: Would have created the certificate.",
'state': state}
api_rval = server_cert.create()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
########
# Exists
########
api_rval = server_cert.get()
return {'changed': False, 'results': api_rval, 'state': state}
return {'failed': True,
'msg': 'Unknown state passed. %s' % state}
| 42.125 | 112 | 0.53363 |
dda6512288b1a06f80b75a165309695c1f607b5c | 17,346 | py | Python | ursina/mesh_importer.py | andraskohlmann/ursina | 6661589e9f9b9b689138b4c21c7b7b29b9bda810 | [
"MIT"
] | null | null | null | ursina/mesh_importer.py | andraskohlmann/ursina | 6661589e9f9b9b689138b4c21c7b7b29b9bda810 | [
"MIT"
] | 1 | 2020-12-22T16:59:04.000Z | 2020-12-22T16:59:04.000Z | ursina/mesh_importer.py | gmvh/ursina | d539812e9b03988230bab47f886bc0f30a71f41e | [
"MIT"
] | null | null | null | import os
import glob
import platform
import subprocess
from copy import copy, deepcopy
from pathlib import Path
from ursina.mesh import Mesh
from ursina import application
from panda3d.core import CullFaceAttrib
from time import perf_counter
imported_meshes = dict()
def load_model(name, path=application.asset_folder):
if not isinstance(name, str):
raise TypeError(f"Argument save must be of type str, not {type(str)}")
if name in imported_meshes:
# print('load cached model', name)
try:
return deepcopy(imported_meshes[name])
except:
pass
for filetype in ('.bam', '.ursinamesh', '.obj', '.blend'):
# warning: glob is case-insensitive on windows, so m.path will be all lowercase
for filename in path.glob(f'**/{name}{filetype}'):
if filetype in '.bam':
print('loading bam')
return loader.loadModel(filename)
if filetype == '.ursinamesh':
try:
with open(filename) as f:
m = eval(f.read())
m.path = filename
m.name = name
imported_meshes[name] = m
return m
except:
print('invalid ursinamesh file:', filename)
if filetype == '.obj':
# print('found obj', filename)
# m = loader.loadModel(filename)
# m.setAttrib(CullFaceAttrib.make(CullFaceAttrib.MCullCounterClockwise))
m = obj_to_ursinamesh(path=path, name=name, return_mesh=True)
m.path = filename
m.name = name
imported_meshes[name] = m
return m
elif filetype == '.blend':
print('found blend file:', filename)
if compress_models(path=path, name=name):
# obj_to_ursinamesh(name=name)
return load_model(name, path)
# else:
return None
# find blender installations
if not hasattr(application, 'blender_paths') and application.development_mode:
application.blender_paths = dict()
if platform.system() == 'Windows':
# get blender path by getting default program for '.blend' file extention
import shlex
import winreg
try:
class_root = winreg.QueryValue(winreg.HKEY_CLASSES_ROOT, '.blend')
with winreg.OpenKey(winreg.HKEY_CLASSES_ROOT, r'{}\shell\open\command'.format(class_root)) as key:
command = winreg.QueryValueEx(key, '')[0]
default_blender = shlex.split(command)[0]
default_blender = Path(default_blender)
application.blender_paths['default'] = default_blender
blender_foundation_directory = default_blender.parent.parent
for blender_installation in blender_foundation_directory.glob('*'):
first_folder = tuple(blender_installation.glob('*'))[0] # version
version_name = first_folder.name[:3]
application.blender_paths[version_name] = list(blender_installation.glob('blender.exe'))[0]
except:
pass
elif platform.system() == 'Linux':
# Use "which" command to find blender
which_process = subprocess.run(('which', 'blender'), stdout=subprocess.PIPE)
if which_process.returncode == 0:
blender_exec = which_process.stdout.decode().strip()
application.blender_paths['default'] = blender_exec
from pprint import pprint
print('blender_paths:')
pprint(application.blender_paths)
def load_blender_scene(name, path=application.asset_folder, load=True, reload=False, skip_hidden=True):
scenes_folder = Path(application.asset_folder / 'scenes')
if not scenes_folder.exists():
scenes_folder.mkdir()
out_file_path = scenes_folder / f'{name}.py'
print('loading:', out_file_path)
if reload or not out_file_path.exists():
print('reload:')
t = perf_counter()
blend_file = tuple(path.glob(f'**/{name}.blend'))
if not blend_file:
raise ValueError('no blender file found at:', path / name)
blend_file = blend_file[0]
blender = get_blender(blend_file)
print('loading blender scene:', blend_file, '-->', out_file_path, 'using:', blender)
script_path = application.internal_scripts_folder / '_blender_scene_to_ursina.py'
if platform.system() == 'Windows':
subprocess.call(f'''{blender} {blend_file} --background --python {script_path} {out_file_path}''')
else:
subprocess.run((blender, blend_file, '--background', '--python', script_path, out_file_path))
print('exported from blender:', perf_counter() - t)
t = perf_counter()
with open(out_file_path) as f:
t2 = perf_counter()
file_content = f.read()
print('file read time:', perf_counter() - t2)
loc = {}
first_part, rest = file_content.split('# unique meshes')
exec(first_part, globals(), loc)
exec(rest, globals(), loc)
print('exec total:', perf_counter() - t)
return loc['scene_parent']
def get_blender(blend_file): # try to get a matching blender version in case we have multiple blender version installed
with open(blend_file, 'rb') as f:
blender_version_number = (f.read(12).decode("utf-8"))[-3:] # get version from start of .blend file e.g. 'BLENDER-v280'
blender_version_number = blender_version_number[0] + '.' + blender_version_number[1:2]
print('blender_version:', blender_version_number)
if blender_version_number in application.blender_paths:
return application.blender_paths[blender_version_number]
else:
print('using default blender version')
return application.blender_paths['default']
def compress_models(path=None, outpath=application.compressed_models_folder, name='*'):
if not application.compressed_models_folder.exists():
application.compressed_models_folder.mkdir()
export_script_path = application.internal_scripts_folder / '_blend_export.py'
exported = list()
for blend_file in path.glob(f'**/{name}.blend'):
blender = get_blender(blend_file)
out_file_path = outpath / (blend_file.stem + '.obj')
print('converting .blend file to .obj:', blend_file, '-->', out_file_path, 'using:', blender)
if platform.system() == 'Windows':
subprocess.call(f'''"{blender}" "{blend_file}" --background --python "{export_script_path}" "{out_file_path}"''')
else:
subprocess.run((blender, blend_file, '--background', '--python', export_script_path, out_file_path))
exported.append(blend_file)
return exported
def obj_to_ursinamesh(
path=application.compressed_models_folder,
outpath=application.compressed_models_folder,
name='*',
return_mesh=True,
save_to_file=False,
delete_obj=False
):
if name.endswith('.obj'):
name = name[:-4]
for f in path.glob(f'**/{name}.obj'):
filepath = path / (os.path.splitext(f)[0] + '.obj')
print('read obj at:', filepath)
with open(filepath, 'r') as file:
lines = file.readlines()
verts = list()
tris = list()
uv_indices = list()
uvs = list()
norm_indices = list()
norms = list()
# parse the obj file to a Mesh
for i, l in enumerate(lines):
if l.startswith('v '):
vert = [float(v) for v in l[2:].strip().split(' ')]
vert[0] = -vert[0]
verts.append(tuple(vert))
elif l.startswith('vn '):
n = l[3:].strip().split(' ')
norms.append(tuple([float(e) for e in n]))
elif l.startswith('vt '):
uv = l[3:].strip()
uv = uv.split(' ')
uvs.append(tuple([float(e) for e in uv]))
elif l.startswith('f '):
l = l[2:]
l = l.split(' ')
try:
tri = tuple([int(t.split('/')[0])-1 for t in l if t != '\n'])
except:
print('error in obj file line:', i, ':', l)
return
if len(tri) == 3:
tris.extend(tri)
elif len(tri) == 4:
tris.extend((tri[0], tri[1], tri[2], tri[2], tri[3], tri[0]))
else: # ngon
for i in range(1, len(tri)-1):
tris.extend((tri[i], tri[i+1], tri[0]))
try:
uv = tuple([int(t.split('/')[1])-1 for t in l])
if len(uv) == 3:
uv_indices.extend(uv)
elif len(uv) == 4:
uv_indices.extend((uv[0], uv[1], uv[2], uv[2], uv[3], uv[0]))
else: # ngon
for i in range(1, len(uv)-1):
uv_indices.extend((uv[i], uv[i+1], uv[0]))
except: # if no uvs
pass
try:
n = tuple([int(t.split('/')[2])-1 for t in l])
if len(n) == 3:
norm_indices.extend(n)
elif len(uv) == 4:
norm_indices.extend((n[0], n[1], n[2], n[2], n[3], n[0]))
else: # ngon
for i in range(1, len(n)-1):
norm_indices.extend((n[i], n[i+1], n[0]))
except: # if no normals
pass
normals = [(-norms[nid][0], norms[nid][1], norms[nid][2]) for nid in norm_indices]
if return_mesh:
return Mesh(
vertices=[verts[t] for t in tris],
normals=normals,
uvs=[uvs[uid] for uid in uv_indices]
)
meshstring = ''
meshstring += 'Mesh('
meshstring += '\nvertices='
meshstring += str(tuple([verts[t] for t in tris]))
if uv_indices:
meshstring += ', \nuvs='
meshstring += str(tuple([uvs[uid] for uid in uv_indices]))
if norm_indices:
meshstring += ', \nnormals='
meshstring += str(normals)
meshstring += ''', \nmode='triangle')'''
if not save_to_file:
return meshstring
outfilepath = outpath / (os.path.splitext(f)[0] + '.ursinamesh')
with open(outfilepath, 'w') as file:
file.write(meshstring)
if delete_obj:
os.remove(filepath)
print('saved ursinamesh to:', outfilepath)
# faster, but does not apply modifiers
def compress_models_fast(model_name=None, write_to_disk=False):
print('find models')
from tinyblend import BlenderFile
application.compressed_models_folder.mkdir(parents=True, exist_ok=True)
files = os.listdir(application.models_folder)
compressed_files = os.listdir(application.compressed_models_folder)
for f in files:
if f.endswith('.blend'):
# print('f:', application.compressed_models_folder + '/' + f)
print('compress______', f)
blend = BlenderFile(application.models_folder + '/' + f)
number_of_objects = len(blend.list('Object'))
for o in blend.list('Object'):
if not o.data.mvert:
continue
# print(o.id.name.decode("utf-8", "strict"))
object_name = o.id.name.decode( "utf-8").replace(".", "_")[2:]
object_name = object_name.split('\0', 1)[0]
print('name:', object_name)
verts = [v.co for v in o.data.mvert]
verts = tuple(verts)
file_content = 'Mesh(' + str(verts)
file_name = ''.join([f.split('.')[0], '.ursinamesh'])
if number_of_objects > 1:
file_name = ''.join([f.split('.')[0], '_', object_name, '.ursinamesh'])
file_path = os.path.join(application.compressed_models_folder, file_name)
print(file_path)
tris = tuple([triindex.v for triindex in o.data.mloop])
flippedtris = list()
for i in range(0, len(tris)-3, 3):
flippedtris.append(tris[i+2])
flippedtris.append(tris[i+1])
flippedtris.append(tris[i+0])
file_content += ', triangles=' + str(flippedtris)
if o.data.mloopuv:
uvs = tuple([v.uv for v in o.data.mloopuv])
file_content += ', uvs=' + str(uvs)
file_content += ''', mode='triangle')'''
if write_to_disk:
with open(file_path, 'w') as file:
file.write(file_content)
return file_content
def ursina_mesh_to_obj(mesh, name='', out_path=application.compressed_models_folder, max_decimals=3):
from ursina.string_utilities import camel_to_snake
if not name:
name = camel_to_snake(mesh.__class__.__name__)
obj = 'o ' + name + '\n'
for v in mesh.vertices:
v = [round(e, max_decimals) for e in v]
obj += f'v {v[0]} {v[1]} {v[2]}\n'
if mesh.uvs:
for uv in mesh.uvs:
uv = [round(e, max_decimals) for e in uv]
obj += f'vt {uv[0]} {uv[1]}\n'
obj += 's off\n'
if mesh.triangles:
tris = mesh.triangles
if isinstance(tris[0], tuple): # convert from tuples to flat
new_tris = list()
for t in tris:
if len(t) == 3:
new_tris.extend([t[0], t[1], t[2]])
elif len(t) == 4: # turn quad into tris
new_tris.extend([t[0], t[1], t[2], t[2], t[3], t[0]])
tris = new_tris
if mesh.mode == 'ngon':
tris = list()
for i in range(1, len(mesh.vertices)-1):
tris.extend((i, i+1, 0))
# tris must be a list of indices
for i, t in enumerate(tris):
if i % 3 == 0:
obj += '\nf '
obj += str(t+1)
if mesh.uvs:
obj += '/'+str(t+1)
obj += ' '
# print(obj)
with open(out_path / (name + '.obj'), 'w') as f:
f.write(obj)
print('saved obj:', out_path / (name + '.obj'))
def compress_internal():
compress_models(application.internal_models_folder)
obj_to_ursinamesh(
application.internal_models_compressed_folder,
application.internal_models_compressed_folder,
return_mesh=False, save_to_file=True, delete_obj=True
)
if __name__ == '__main__':
# compress_internal()
from ursina import *
app = Ursina()
print('imported_meshes:\n', imported_meshes)
# Entity(model='quad').model.save('quad.bam')
# m = obj_to_ursinamesh(path=application.asset_folder.parent / 'samples', name='procedural_rock_0')
# Entity(model=m)
# EditorCamera()
application.asset_folder = application.asset_folder.parent / 'samples'
# from ursina.shaders import basic_lighting_shader
from ursina.shaders import normals_shader as rim_shader
from ursina.shaders import matcap_shader as rim_shader
# from ursina.shaders import height_shader as rim_shader
t = time.time()
# blender_scene = load_blender_scene(path=application.asset_folder, name='desert', reload=True)
blender_scene = load_blender_scene(path=application.asset_folder, name='blender_level_editor_test_scene_2', reload=True)
print('-------', time.time() - t)
# print('--------', blender_scene.children)
# for e in blender_scene.children:
# # e.color = color.random_color()
# e.shader = rim_shader
# e.texture='matcap_4'
#
#
# blender_scene.Plane_002.collider = 'mesh'
# from ursina.prefabs.first_person_controller import FirstPersonController
# player = FirstPersonController()
# def input(key):
# if key == '+':
# for e in blender_scene.children:
# e.texture_scale = Vec2(e.texture_scale[0], e.texture_scale[1]+.1)
# if key == '-':
# for e in blender_scene.children:
# e.texture_scale = Vec2(e.texture_scale[0], e.texture_scale[1]-.1)
# print(blender_scene.children[0].texture_scale)
#
EditorCamera()
Sky(texture='sky_sunset')
# def update():
# blender_scene.Cube.x += (held_keys['d'] - held_keys['a']) * time.dt * 10
app.run()
# e = Entity(model=Cylinder(16))
# ursina_mesh_to_obj(e.model, name='quad_export_test')
| 36.36478 | 129 | 0.542488 |
5c6aac3de58d41f4b80bf1903101e4ecfe2fa150 | 113 | py | Python | Models/querysets/Field lookups/__second/models.py | looking-for-a-job/django-examples | dfafa450668cac5c0351f6c7238b8886511229bf | [
"Unlicense"
] | null | null | null | Models/querysets/Field lookups/__second/models.py | looking-for-a-job/django-examples | dfafa450668cac5c0351f6c7238b8886511229bf | [
"Unlicense"
] | null | null | null | Models/querysets/Field lookups/__second/models.py | looking-for-a-job/django-examples | dfafa450668cac5c0351f6c7238b8886511229bf | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
from django.db import models
class MyModel(models.Model):
time = models.TimeField()
| 14.125 | 29 | 0.716814 |
dc04dacc77fa27dd65cd5203d9238c8689a01496 | 5,340 | py | Python | nova/tests/integrated/integrated_helpers.py | vmthunder/nova | baf05caab705c5778348d9f275dc541747b7c2de | [
"Apache-2.0"
] | 2 | 2016-04-19T08:20:39.000Z | 2021-10-03T16:00:37.000Z | nova/tests/integrated/integrated_helpers.py | vmthunder/nova | baf05caab705c5778348d9f275dc541747b7c2de | [
"Apache-2.0"
] | 9 | 2015-05-20T11:20:17.000Z | 2017-07-27T08:21:33.000Z | nova/tests/integrated/integrated_helpers.py | vmthunder/nova | baf05caab705c5778348d9f275dc541747b7c2de | [
"Apache-2.0"
] | 13 | 2015-05-05T09:34:04.000Z | 2017-11-08T02:03:46.000Z | # Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Provides common functionality for integrated unit tests
"""
import random
import string
import uuid
from oslo.config import cfg
import nova.image.glance
from nova.openstack.common import log as logging
from nova import service
from nova import test
from nova.tests import cast_as_call
from nova.tests import fake_crypto
import nova.tests.image.fake
from nova.tests.integrated.api import client
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
CONF.import_opt('manager', 'nova.cells.opts', group='cells')
def generate_random_alphanumeric(length):
"""Creates a random alphanumeric string of specified length."""
return ''.join(random.choice(string.ascii_uppercase + string.digits)
for _x in range(length))
def generate_random_numeric(length):
"""Creates a random numeric string of specified length."""
return ''.join(random.choice(string.digits)
for _x in range(length))
def generate_new_element(items, prefix, numeric=False):
"""Creates a random string with prefix, that is not in 'items' list."""
while True:
if numeric:
candidate = prefix + generate_random_numeric(8)
else:
candidate = prefix + generate_random_alphanumeric(8)
if candidate not in items:
return candidate
LOG.debug("Random collision on %s" % candidate)
class _IntegratedTestBase(test.TestCase):
def setUp(self):
super(_IntegratedTestBase, self).setUp()
f = self._get_flags()
self.flags(**f)
self.flags(verbose=True)
self.useFixture(test.ReplaceModule('crypto', fake_crypto))
nova.tests.image.fake.stub_out_image_service(self.stubs)
self.flags(scheduler_driver='nova.scheduler.'
'chance.ChanceScheduler')
self._setup_services()
self._start_api_service()
self.api = self._get_test_client()
self.useFixture(cast_as_call.CastAsCall(self.stubs))
def _setup_services(self):
self.conductor = self.start_service('conductor',
manager=CONF.conductor.manager)
self.compute = self.start_service('compute')
self.cert = self.start_service('cert')
self.consoleauth = self.start_service('consoleauth')
self.network = self.start_service('network')
self.scheduler = self.start_service('scheduler')
self.cells = self.start_service('cells', manager=CONF.cells.manager)
def tearDown(self):
self.osapi.stop()
nova.tests.image.fake.FakeImageService_reset()
super(_IntegratedTestBase, self).tearDown()
def _get_test_client(self):
return client.TestOpenStackClient('fake', 'fake', self.auth_url)
def _start_api_service(self):
self.osapi = service.WSGIService("osapi_compute")
self.osapi.start()
self.auth_url = 'http://%(host)s:%(port)s/%(api_version)s' % ({
'host': self.osapi.host, 'port': self.osapi.port,
'api_version': self._api_version})
def _get_flags(self):
"""An opportunity to setup flags, before the services are started."""
f = {}
# Ensure tests only listen on localhost
f['ec2_listen'] = '127.0.0.1'
f['osapi_compute_listen'] = '127.0.0.1'
f['metadata_listen'] = '127.0.0.1'
# Auto-assign ports to allow concurrent tests
f['ec2_listen_port'] = 0
f['osapi_compute_listen_port'] = 0
f['metadata_listen_port'] = 0
f['fake_network'] = True
return f
def get_unused_server_name(self):
servers = self.api.get_servers()
server_names = [server['name'] for server in servers]
return generate_new_element(server_names, 'server')
def get_invalid_image(self):
return str(uuid.uuid4())
def _build_minimal_create_server_request(self):
server = {}
image = self.api.get_images()[0]
LOG.debug("Image: %s" % image)
if self._image_ref_parameter in image:
image_href = image[self._image_ref_parameter]
else:
image_href = image['id']
image_href = 'http://fake.server/%s' % image_href
# We now have a valid imageId
server[self._image_ref_parameter] = image_href
# Set a valid flavorId
flavor = self.api.get_flavors()[0]
LOG.debug("Using flavor: %s" % flavor)
server[self._flavor_ref_parameter] = ('http://fake.server/%s'
% flavor['id'])
# Set a valid server name
server_name = self.get_unused_server_name()
server['name'] = server_name
return server
| 33.584906 | 78 | 0.650936 |
e8f7e47279a3a212308fcd3003b6e9e0aaa3c979 | 2,396 | py | Python | Rainfall Monitor/GetForecast.py | chennes/Rainfall-Monitor | a1430a10add5ed5bf967e8f2eaf471ff75cef8ea | [
"BSD-3-Clause"
] | null | null | null | Rainfall Monitor/GetForecast.py | chennes/Rainfall-Monitor | a1430a10add5ed5bf967e8f2eaf471ff75cef8ea | [
"BSD-3-Clause"
] | null | null | null | Rainfall Monitor/GetForecast.py | chennes/Rainfall-Monitor | a1430a10add5ed5bf967e8f2eaf471ff75cef8ea | [
"BSD-3-Clause"
] | null | null | null |
# Weather API: A simple app for learning Python
#
# This program downloads the latest forecast for Norman, OK and stores it in a couple
# of lists suitable for processing with matplotlib
#
# Copyright 2022 Pioneer Library System
#
# LICENSE: BSD 3-Clause
#
# Redistribution and use in source and binary forms, with or without modification, are
# permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list
# of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or other
# materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be
# used to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
# THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import requests
import json
import matplotlib.pyplot as plt
import numpy as np
LATITUDE = 35.22
LONGITUDE = -97.44
WEATHER_API_START_URL = f"https://api.weather.gov/points/{LATITUDE},{LONGITUDE}"
if __name__ == "__main__":
r = requests.get(WEATHER_API_START_URL)
results = json.loads(r.text)
forecast_url = results["properties"]["forecast"]
r = requests.get(forecast_url)
results = json.loads(r.text)
forecast = results["properties"]["periods"]
labels = []
temperatures = []
for entry in forecast:
labels.append(entry["name"])
temperatures.append(entry["temperature"])
| 42.035088 | 90 | 0.750417 |
74566852099dd9eff927fc905f29a9f5e7da522b | 8,772 | py | Python | ml/rl/test/gridworld/test_gridworld_sac.py | sfujim/Horizon | ee24203bf72740563f1e12d16990bfafe2ae9c6f | [
"BSD-3-Clause"
] | 2 | 2021-05-23T22:11:21.000Z | 2021-06-17T13:08:53.000Z | ml/rl/test/gridworld/test_gridworld_sac.py | sfujim/Horizon | ee24203bf72740563f1e12d16990bfafe2ae9c6f | [
"BSD-3-Clause"
] | null | null | null | ml/rl/test/gridworld/test_gridworld_sac.py | sfujim/Horizon | ee24203bf72740563f1e12d16990bfafe2ae9c6f | [
"BSD-3-Clause"
] | 2 | 2021-01-06T01:06:50.000Z | 2021-06-24T01:12:52.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import os
import random
import tempfile
import unittest
import numpy as np
import numpy.testing as npt
import torch
from ml.rl.models.actor import DirichletFullyConnectedActor, GaussianFullyConnectedActor
from ml.rl.models.fully_connected_network import FullyConnectedNetwork
from ml.rl.models.output_transformer import (
ActorOutputTransformer,
ParametricActionOutputTransformer,
)
from ml.rl.models.parametric_dqn import FullyConnectedParametricDQN
from ml.rl.preprocessing.feature_extractor import PredictorFeatureExtractor
from ml.rl.preprocessing.normalization import (
get_num_output_features,
sort_features_by_normalization,
)
from ml.rl.test.gridworld.gridworld_base import DISCOUNT
from ml.rl.test.gridworld.gridworld_continuous import GridworldContinuous
from ml.rl.test.gridworld.gridworld_evaluator import GridworldContinuousEvaluator
from ml.rl.test.gridworld.gridworld_test_base import GridworldTestBase
from ml.rl.thrift.core.ttypes import (
FeedForwardParameters,
OptimizerParameters,
RLParameters,
SACModelParameters,
SACTrainingParameters,
)
from ml.rl.training.rl_exporter import ActorExporter, ParametricDQNExporter
from ml.rl.training.sac_trainer import SACTrainer
class TestGridworldSAC(GridworldTestBase):
def setUp(self):
self.minibatch_size = 4096
np.random.seed(0)
random.seed(0)
torch.manual_seed(0)
super().setUp()
def get_sac_parameters(
self,
use_2_q_functions=False,
logged_action_uniform_prior=True,
constrain_action_sum=False,
):
return SACModelParameters(
rl=RLParameters(gamma=DISCOUNT, target_update_rate=0.5),
training=SACTrainingParameters(
minibatch_size=self.minibatch_size,
use_2_q_functions=use_2_q_functions,
q_network_optimizer=OptimizerParameters(),
value_network_optimizer=OptimizerParameters(),
actor_network_optimizer=OptimizerParameters(),
alpha_optimizer=OptimizerParameters(),
logged_action_uniform_prior=logged_action_uniform_prior,
),
q_network=FeedForwardParameters(
layers=[128, 64], activations=["relu", "relu"]
),
value_network=FeedForwardParameters(
layers=[128, 64], activations=["relu", "relu"]
),
actor_network=FeedForwardParameters(
layers=[128, 64], activations=["relu", "relu"]
),
constrain_action_sum=constrain_action_sum,
)
def get_sac_trainer(self, env, parameters, use_gpu):
state_dim = get_num_output_features(env.normalization)
action_dim = get_num_output_features(env.normalization_action)
q1_network = FullyConnectedParametricDQN(
state_dim,
action_dim,
parameters.q_network.layers,
parameters.q_network.activations,
)
q2_network = None
if parameters.training.use_2_q_functions:
q2_network = FullyConnectedParametricDQN(
state_dim,
action_dim,
parameters.q_network.layers,
parameters.q_network.activations,
)
if parameters.constrain_action_sum:
actor_network = DirichletFullyConnectedActor(
state_dim,
action_dim,
parameters.actor_network.layers,
parameters.actor_network.activations,
)
else:
actor_network = GaussianFullyConnectedActor(
state_dim,
action_dim,
parameters.actor_network.layers,
parameters.actor_network.activations,
)
value_network = None
if parameters.training.use_value_network:
value_network = FullyConnectedNetwork(
[state_dim] + parameters.value_network.layers + [1],
parameters.value_network.activations + ["linear"],
)
if use_gpu:
q1_network.cuda()
if q2_network:
q2_network.cuda()
if value_network:
value_network.cuda()
actor_network.cuda()
return SACTrainer(
q1_network,
actor_network,
parameters,
use_gpu=use_gpu,
value_network=value_network,
q2_network=q2_network,
)
def get_critic_exporter(self, trainer, environment):
feature_extractor = PredictorFeatureExtractor(
state_normalization_parameters=environment.normalization,
action_normalization_parameters=environment.normalization_action,
)
output_transformer = ParametricActionOutputTransformer()
return ParametricDQNExporter(
trainer.q1_network, feature_extractor, output_transformer
)
def get_actor_predictor(self, trainer, environment):
feature_extractor = PredictorFeatureExtractor(
state_normalization_parameters=environment.normalization
)
output_transformer = ActorOutputTransformer(
sort_features_by_normalization(environment.normalization_action)[0],
environment.max_action_range.reshape(-1),
environment.min_action_range.reshape(-1),
)
predictor = ActorExporter(
trainer.actor_network, feature_extractor, output_transformer
).export()
return predictor
def _test_sac_trainer(self, use_gpu=False, **kwargs):
environment = GridworldContinuous()
trainer = self.get_sac_trainer(
environment, self.get_sac_parameters(**kwargs), use_gpu
)
evaluator = GridworldContinuousEvaluator(
environment, assume_optimal_policy=False, gamma=DISCOUNT
)
exporter = self.get_critic_exporter(trainer, environment)
self.evaluate_gridworld(environment, evaluator, trainer, exporter, use_gpu)
# Make sure actor predictor works
actor_predictor = self.get_actor_predictor(trainer, environment)
# Just test that it doesn't blow up
preds = actor_predictor.predict(evaluator.logged_states)
self._test_save_load_actor(preds, actor_predictor, evaluator.logged_states)
# TODO: run predictor and check results
def _test_save_load_actor(
self, before_preds, predictor, states, dbtype="minidb", check_equality=False
):
with tempfile.TemporaryDirectory() as tmpdirname:
tmp_path = os.path.join(tmpdirname, "model")
predictor.save(tmp_path, dbtype)
new_predictor = type(predictor).load(tmp_path, dbtype)
after_preds = new_predictor.predict(states)
if check_equality:
self._check_output_match(before_preds, after_preds)
else:
# Check if dims match for stochastic outputs in SAC
self.assertEqual(len(before_preds), len(after_preds))
def _check_output_match(self, a_preds, b_preds):
self.assertEqual(len(a_preds), len(b_preds))
self.assertEqual(a_preds[0].keys(), b_preds[0].keys())
keys = list(a_preds[0].keys())
a_array = [[r[k] for k in keys] for r in a_preds]
b_array = [[r[k] for k in keys] for r in b_preds]
npt.assert_allclose(a_array, b_array)
def test_sac_trainer(self):
self._test_sac_trainer()
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_sac_trainer_gpu(self):
self._test_sac_trainer(use_gpu=True)
def test_sac_trainer_use_2_q_functions(self):
self._test_sac_trainer(use_2_q_functions=True)
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_sac_trainer_gpu_use_2_q_functions(self):
self._test_sac_trainer(use_2_q_functions=True, use_gpu=True)
def test_sac_trainer_model_propensity(self):
self._test_sac_trainer(logged_action_uniform_prior=True)
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_sac_trainer_model_propensity_gpu(self):
self._test_sac_trainer(use_gpu=True, logged_action_uniform_prior=True)
def test_sac_trainer_w_dirichlet_actor(self):
self._test_sac_trainer(constrain_action_sum=True)
# TODO: Renable when PyTorch supports backwards pass in CUDA.
@unittest.skipIf(True or not torch.cuda.is_available(), "CUDA not available")
def test_sac_trainer_w_dirichlet_actor_gpu(self):
self._test_sac_trainer(use_gpu=True, constrain_action_sum=True)
| 38.13913 | 88 | 0.677953 |
6cafe15dc1607ac3bdd4ab02cc69e183ae95c054 | 14,942 | py | Python | lib/membase/helper/subdoc_helper.py | pavithra-mahamani/testrunner | d204491caa23f1fbe90505646534ed7810d96289 | [
"Apache-2.0"
] | 1 | 2020-08-31T18:51:45.000Z | 2020-08-31T18:51:45.000Z | lib/membase/helper/subdoc_helper.py | pavithra-mahamani/testrunner | d204491caa23f1fbe90505646534ed7810d96289 | [
"Apache-2.0"
] | null | null | null | lib/membase/helper/subdoc_helper.py | pavithra-mahamani/testrunner | d204491caa23f1fbe90505646534ed7810d96289 | [
"Apache-2.0"
] | 2 | 2020-07-24T07:12:01.000Z | 2022-03-17T23:43:28.000Z | import json
import random
import logger
import time
import unittest
import copy
from TestInput import TestInputSingleton
from couchbase_helper.document import DesignDocument, View
from membase.api.rest_client import RestConnection, RestHelper
from membase.helper.bucket_helper import BucketOperationHelper
from membase.helper.cluster_helper import ClusterOperationHelper
from membase.helper.rebalance_helper import RebalanceHelper
from memcached.helper.data_helper import MemcachedClientHelper
from memcached.helper.data_helper import MemcachedError
from memcached.helper.data_helper import VBucketAwareMemcached
from lib.mc_bin_client import MemcachedClient, MemcachedError
from lib.memcacheConstants import *
# The SubdocHelper operates on a single bucket over a single RestConnection
# The original testcase needs to be passed in so we can make assertions
class SubdocHelper:
def __init__(self, testcase, bucket):
self.testcase = testcase
self.bucket = bucket
self.input = TestInputSingleton.input
self.servers = self.input.servers
self.master = self.servers[0]
self.rest = RestConnection(self.master)
self.log = logger.Logger.get_logger()
self.client = MemcachedClient(host=self.master.ip)
self.jsonSchema = {
"id" : "0",
"number" : 0,
"array" : [],
"child" : {},
"isDict" : True,
"padding": None
}
self.jsonSchema_longPath = {
"id" : "0",
"number" : 0,
"array12345678901234567890123456789" : [],
"child12345678901234567890123456789" : {},
"isDict" : True,
"padding": None
}
def set_bucket(self, bucket):
self.bucket = bucket
def setup_cluster(self):
node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
mem_quota = int(self.rest.get_nodes_self().mcdMemoryReserved *
node_ram_ratio)
self.rest.init_cluster(self.master.rest_username,
self.master.rest_password)
self.rest.init_cluster_memoryQuota(self.master.rest_username,
self.master.rest_password,
memoryQuota=mem_quota)
for server in self.servers:
ClusterOperationHelper.cleanup_cluster([server])
ClusterOperationHelper.wait_for_ns_servers_or_assert(
[self.master], self.testcase)
rebalanced = ClusterOperationHelper.add_and_rebalance(
self.servers)
self.testcase.assertTrue(rebalanced, "cluster is not rebalanced")
self._create_default_bucket()
def cleanup_cluster(self):
if not "skip_cleanup" in TestInputSingleton.input.test_params:
BucketOperationHelper.delete_all_buckets_or_assert(
self.servers, self.testcase)
ClusterOperationHelper.cleanup_cluster(self.servers)
ClusterOperationHelper.wait_for_ns_servers_or_assert(
self.servers, self.testcase)
'''Recursive call to create a deep nested Dictionary JSON document '''
def _createNestedJson(self, key, dict):
if dict['levels'] == 0:
return
if dict['doc'] == {}:
dict['doc'] = copy.copy(self.jsonSchema)
else:
dict['doc']['child'] = copy.copy(self.jsonSchema)
dict['doc']['child']['array'] = []
dict['doc'] = dict['doc']['child']
dict['doc']['id'] = key
dict['doc']['number'] = dict['levels']
for level in range(0, dict['levels']):
dict['doc']['array'].append(level)
return self._createNestedJson(key, {'doc': dict['doc'], 'levels': dict['levels']-1})
'''Recursive call to create a deep nested Array JSON document
This should be changed to make it as recursive for array'''
def _createNestedJsonArray(self, key, dict):
self.array = [[[1, 2, 3, True, True], 200, 300], 20, 30, [1000, 2000, 3000]]
if dict['levels'] == 0:
return
if dict['doc'] == {}:
dict['doc'] = copy.copy(self.jsonSchema)
else:
dict['doc']['child'] = copy.copy(self.jsonSchema)
dict['doc']['child']['array'] = []
dict['doc'] = dict['doc']['child']
dict['doc']['id'] = key
dict['doc']['number'] = dict['levels']
for level in range(0, dict['levels']):
dict['doc']['array'].append(level)
dict['doc']['array'][level] = self.array
return self._createNestedJson(key, {'doc': dict['doc'], 'levels': dict['levels']-1})
'''Recursive call to create a deep nested long path Dictionary JSON document '''
def _createNestedJson_longPath(self, key, dict):
if dict['levels'] == 0:
return
if dict['doc'] == {}:
dict['doc'] = copy.copy(self.jsonSchema_longPath)
else:
dict['doc']['child12345678901234567890123456789'] = copy.copy(self.jsonSchema)
dict['doc']['child12345678901234567890123456789']['array12345678901234567890123456789'] = []
dict['doc'] = dict['doc']['child12345678901234567890123456789']
dict['doc']['id'] = key
dict['doc']['number'] = dict['levels']
for level in range(0, dict['levels']):
dict['doc']['array12345678901234567890123456789'].append(level)
return self._createNestedJson(key, {'doc': dict['doc'], 'levels': dict['levels']-1})
def insert_nested_docs(self, num_of_docs, prefix='doc', levels=16, size=512, return_docs=False, long_path=False,collection=None):
rest = RestConnection(self.master)
smart = VBucketAwareMemcached(rest, self.bucket)
doc_names = []
dict = {'doc' : {}, 'levels' : levels }
for i in range(0, num_of_docs):
key = doc_name = "{0}-{1}".format(prefix, i)
if long_path:
self._createNestedJson_longPath(key, dict)
else:
self._createNestedJson(key, dict)
value = dict['doc']
if not return_docs:
doc_names.append(doc_name)
else:
doc_names.append(value)
# loop till value is set
fail_count = 0
while True:
try:
smart.set(key, 0, 0, json.dumps(value), collection=collection)
break
except MemcachedError as e:
fail_count += 1
if (e.status == 133 or e.status == 132) and fail_count < 60:
if i == 0:
self.log.error("waiting 5 seconds. error {0}"
.format(e))
time.sleep(5)
else:
self.log.error(e)
time.sleep(1)
else:
raise e
self.log.info("Inserted {0} json documents".format(num_of_docs))
return doc_names
# If you insert docs that are already there, they are simply
# overwritten.
# extra_values are key value pairs that will be added to the
# JSON document
# If `return_docs` is true, it'll return the full docs and not
# only the keys
def insert_nested_specific_docs(self, num_of_docs, prefix='doc', extra_values={},
return_docs=False,collection=None):
random.seed(12345)
rest = RestConnection(self.master)
smart = VBucketAwareMemcached(rest, self.bucket)
doc_names = []
for i in range(0, num_of_docs):
key = doc_name = "{0}-{1}".format(prefix, i)
geom = {"type": "Point", "coordinates":
[random.randrange(-180, 180),
random.randrange(-90, 90)]}
value = {
"padding": None,
"d1" :{
"padding": None,
"d2" :{
"int_array" : [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
"str_array" :["john", "doe", "john", "block", "jim", "john"],
"mix_array" : [1, 2, True, False, 'bird', 5.0, 6.0, repr(123)],
"d3" : {
"d4_01" : 1,
"d4_02" : [21, 22, 23, 24, 25 ],
"d4_03" : False,
"d4_04" : "San Francisco",
"d4_05" : {
"d5_01" : random.randrange(6, 13),
"d5_02" : [ random.randrange(5, 10), random.randrange(6, 13)],
"d5_03" : "abcdefghi",
"d5_04" : {
"d6_01" : random.randrange(6, 13),
"d6_02" : [1, 2, True, False, 'bird', 5.0, 6.0, repr(123)]
}
}
},
"d2_02" : {"d2_02_01":"name"},
"d2_03" :geom
},
"d1_02" :[1, 2, True, False, 'bird', 5.0, 6.0, repr(123)],
"d1_03" : False
},
"age": random.randrange(1, 1000),
"geometry": geom,
"array" :[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 20],
"isDict" : True,
"dict_value" : {"name":"abc", "age":1},
"height": random.randrange(1, 13000),
"bloom": random.randrange(1, 6),
"shed_leaves": random.randrange(6, 13)}
value.update(extra_values)
if not return_docs:
doc_names.append(doc_name)
else:
doc_names.append(value)
# loop till value is set
fail_count = 0
while True:
try:
smart.set(key, 0, 0, json.dumps(value), collection=collection)
break
except MemcachedError as e:
fail_count += 1
if (e.status == 133 or e.status == 132) and fail_count < 60:
if i == 0:
self.log.error("waiting 5 seconds. error {0}"
.format(e))
time.sleep(5)
else:
self.log.error(e)
time.sleep(1)
else:
raise e
self.log.info("Inserted {0} json documents".format(num_of_docs))
return doc_names
def insert_docs(self, num_of_docs, prefix='doc', extra_values={},
return_docs=False,collection=None):
random.seed(12345)
rest = RestConnection(self.master)
smart = VBucketAwareMemcached(rest, self.bucket)
doc_names = []
for i in range(0, num_of_docs):
key = doc_name = "{0}-{1}".format(prefix, i)
geom = {"type": "Point", "coordinates":
[random.randrange(-180, 180),
random.randrange(-90, 90)]}
value = {
"name": doc_name,
"age": random.randrange(1, 1000),
"geometry": geom,
"array" :[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 20],
"isDict" : True,
"dict_value" : {"name":"abc", "age":1},
"height": random.randrange(1, 13000),
"bloom": random.randrange(1, 6),
"shed_leaves": random.randrange(6, 13)}
value.update(extra_values)
if not return_docs:
doc_names.append(doc_name)
else:
doc_names.append(value)
# loop till value is set
fail_count = 0
while True:
try:
smart.set(key, 0, 0, json.dumps(value), collection=collection)
break
except MemcachedError as e:
fail_count += 1
if (e.status == 133 or e.status == 132) and fail_count < 60:
if i == 0:
self.log.error("waiting 5 seconds. error {0}"
.format(e))
time.sleep(5)
else:
self.log.error(e)
time.sleep(1)
else:
raise e
self.log.info("Inserted {0} json documents".format(num_of_docs))
return doc_names
def generate_matching_docs(self, docs_inserted, params, value=None):
pass
def verify_matching_keys(self, expected, current):
pass
# Returns the keys of the deleted documents
# If you try to delete a document that doesn't exists, just skip it
def delete_docs(self, num_of_docs, prefix='doc'):
pass
#create a bucket if it doesn't exist
def _create_default_bucket(self):
helper = RestHelper(self.rest)
if not helper.bucket_exists(self.bucket):
node_ram_ratio = BucketOperationHelper.base_bucket_ratio(
self.servers)
info = self.rest.get_nodes_self()
available_ram = int(info.memoryQuota * node_ram_ratio)
if available_ram < 256:
available_ram = 256
self.rest.create_bucket(bucket=self.bucket,
ramQuotaMB=available_ram)
ready = BucketOperationHelper.wait_for_memcached(self.master,
self.bucket)
self.testcase.assertTrue(ready, "wait_for_memcached failed")
self.testcase.assertTrue(
helper.bucket_exists(self.bucket),
"unable to create {0} bucket".format(self.bucket))
# Compare the inserted documents with the returned result
# Both arguments contain a list of document names
def verify_result(self, inserted, result):
#not_found = []
#for key in inserted:
# if not key in result:
# not_found.append(key)
not_found = list(set(inserted) - set(result))
if not_found:
self._print_keys_not_found(not_found)
self.testcase.fail("Returned only {0} "
"docs and not {1} as expected"
.format(len(result), len(inserted)))
def _print_keys_not_found(self, keys_not_found, how_many=10):
how_many = min(how_many, len(keys_not_found))
for i in range(0, how_many):
self.log.error("did not find key {0} in the results"
.format(keys_not_found[i]))
| 42.090141 | 133 | 0.518605 |
0eb9e5332fcbe93b2aad99cb37245ef61334af08 | 1,116 | py | Python | model-optimizer/mo/ops/activation.py | zhoub/dldt | e42c01cf6e1d3aefa55e2c5df91f1054daddc575 | [
"Apache-2.0"
] | 3 | 2020-02-09T23:25:37.000Z | 2021-01-19T09:44:12.000Z | model-optimizer/mo/ops/activation.py | zhoub/dldt | e42c01cf6e1d3aefa55e2c5df91f1054daddc575 | [
"Apache-2.0"
] | null | null | null | model-optimizer/mo/ops/activation.py | zhoub/dldt | e42c01cf6e1d3aefa55e2c5df91f1054daddc575 | [
"Apache-2.0"
] | 2 | 2020-04-18T16:24:39.000Z | 2021-01-19T09:42:19.000Z | """
Copyright (c) 2018-2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mo.graph.graph import Graph
from mo.ops.op import Op
class Activation(Op):
op = 'Activation'
def __init__(self, graph: Graph, attrs: dict):
super().__init__(graph, {
'type': __class__.op,
'op': __class__.op,
'infer': None,
'in_ports_count': 1,
'out_ports_count': 1,
}, attrs)
def supported_attrs(self):
return ['operation']
def backend_attrs(self):
return [('type', 'operation'), 'alpha'] # operation --> type
| 29.368421 | 73 | 0.666667 |
9807770677c3c3fa7668d86b3c4d5fd5a4b5093b | 3,255 | py | Python | main.py | hbeemster/fastapi-todo | e604f1e7b7952d47c002bcd37b98cd0c32f6d39b | [
"MIT"
] | null | null | null | main.py | hbeemster/fastapi-todo | e604f1e7b7952d47c002bcd37b98cd0c32f6d39b | [
"MIT"
] | null | null | null | main.py | hbeemster/fastapi-todo | e604f1e7b7952d47c002bcd37b98cd0c32f6d39b | [
"MIT"
] | null | null | null | import random
import uuid
from datetime import timedelta
from typing import Optional
import uvicorn
from fastapi import Depends
from fastapi import FastAPI, Form
from fastapi import Request, Response
from fastapi.responses import HTMLResponse
from fastapi.templating import Jinja2Templates
from sqlalchemy.orm import Session
from database import Base
from database import SessionLocal
from database import engine
from models import create_todo, search_todos
from models import delete_todo
from models import get_todo
from models import get_todos
from models import update_todo
Base.metadata.create_all(bind=engine)
app = FastAPI()
templates = Jinja2Templates(directory="templates")
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
@app.get("/todos")
async def todos(request: Request, db: Session = Depends(get_db)):
session_key = request.cookies.get("session_key", uuid.uuid4().hex)
todos = get_todos(db, session_key)
return todos
@app.get("/", response_class=HTMLResponse)
async def home(request: Request, db: Session = Depends(get_db)):
session_key = request.cookies.get("session_key", uuid.uuid4().hex)
todos = get_todos(db, session_key)
context = {
"request": request,
"todos": todos,
"title": "Home"
}
response = templates.TemplateResponse("home.html", context)
response.set_cookie(key="session_key", value=session_key, expires=259200) # 3 days
return response
@app.post("/add", response_class=HTMLResponse)
async def post_add(request: Request, content: str = Form(...), db: Session = Depends(get_db)):
session_key = request.cookies.get("session_key")
todo = create_todo(db, content=content, session_key=session_key)
context = {"request": request, "todo": todo}
return templates.TemplateResponse("todo/item.html", context)
@app.get("/edit/{item_id}", response_class=HTMLResponse)
async def get_edit(request: Request, item_id: int, db: Session = Depends(get_db)):
todo = get_todo(db, item_id)
context = {"request": request, "todo": todo}
return templates.TemplateResponse("todo/form.html", context)
@app.put("/edit/{item_id}", response_class=HTMLResponse)
async def put_edit(request: Request, item_id: int, content: str = Form(...), db: Session = Depends(get_db)):
todo = update_todo(db, item_id, content)
context = {"request": request, "todo": todo}
return templates.TemplateResponse("todo/item.html", context)
@app.delete("/delete/{item_id}", response_class=Response)
async def delete(item_id: int, db: Session = Depends(get_db)):
delete_todo(db, item_id)
@app.get("/search", response_class=HTMLResponse)
async def get_search(request: Request, search: Optional[str] = None, db: Session = Depends(get_db)):
session_key = request.cookies.get("session_key")
todos = search_todos(db, session_key, search)
if todos:
context = {"request": request, "todos": todos}
return templates.TemplateResponse("todo/items.html", context)
else:
context = {"request": request}
return templates.TemplateResponse("todo/no_items.html", context)
if __name__ == "__main__":
"""Run as module."""
uvicorn.run(app, host="0.0.0.0", port=8000)
| 32.227723 | 108 | 0.717358 |
f55f488008aa84628a583f7f26d9d5832aa94ab1 | 979 | py | Python | transformers/rephub/python-flask-server/setup.py | mapleknight/molecular-data-provider | 111418ed7efab3c393c22116854fd322ff367fb9 | [
"MIT"
] | 5 | 2020-08-28T09:30:16.000Z | 2021-12-29T16:00:26.000Z | transformers/rephub/python-flask-server/setup.py | mapleknight/molecular-data-provider | 111418ed7efab3c393c22116854fd322ff367fb9 | [
"MIT"
] | 19 | 2021-04-26T22:19:16.000Z | 2022-03-09T21:09:55.000Z | transformers/rephub/python-flask-server/setup.py | mapleknight/molecular-data-provider | 111418ed7efab3c393c22116854fd322ff367fb9 | [
"MIT"
] | 8 | 2020-07-09T18:50:57.000Z | 2022-01-20T16:01:33.000Z | # coding: utf-8
import sys
from setuptools import setup, find_packages
NAME = "repurposing-hub"
VERSION = "2.0.0"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = [
"connexion>=2.0.2",
"swagger-ui-bundle>=0.0.2",
"python_dateutil>=2.6.0"
]
setup(
name=NAME,
version=VERSION,
description="Transformer API for the Repurposing Hub",
author_email="translator@broadinstitute.org",
url="",
keywords=["OpenAPI", "Transformer API for the Repurposing Hub"],
install_requires=REQUIRES,
packages=find_packages(),
package_data={'': ['openapi/openapi.yaml']},
include_package_data=True,
entry_points={
'console_scripts': ['openapi_server=openapi_server.__main__:main']},
long_description="""\
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
"""
)
| 24.475 | 110 | 0.697651 |
b57c1d6308a889ebc336cab898dc99971501250e | 5,516 | py | Python | tools/accuracy_checker/accuracy_checker/representation/segmentation_representation.py | allnes/open_model_zoo | 693ba31b3b7671f5fb8ecf8f9b8d670cfec21bc3 | [
"Apache-2.0"
] | 1 | 2019-10-31T06:38:49.000Z | 2019-10-31T06:38:49.000Z | tools/accuracy_checker/accuracy_checker/representation/segmentation_representation.py | allnes/open_model_zoo | 693ba31b3b7671f5fb8ecf8f9b8d670cfec21bc3 | [
"Apache-2.0"
] | 6 | 2020-09-26T01:24:39.000Z | 2022-02-10T02:16:03.000Z | tools/accuracy_checker/accuracy_checker/representation/segmentation_representation.py | allnes/open_model_zoo | 693ba31b3b7671f5fb8ecf8f9b8d670cfec21bc3 | [
"Apache-2.0"
] | 2 | 2020-10-11T13:47:20.000Z | 2021-08-12T08:08:06.000Z | """
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from enum import Enum
import numpy as np
from .base_representation import BaseRepresentation
from ..data_readers import BaseReader
from ..utils import remove_difficult
class GTMaskLoader(Enum):
PILLOW = 0
OPENCV = 1
SCIPY = 2
NIFTI = 3
NUMPY = 4
class SegmentationRepresentation(BaseRepresentation):
pass
class SegmentationAnnotation(SegmentationRepresentation):
LOADERS = {
GTMaskLoader.PILLOW: 'pillow_imread',
GTMaskLoader.OPENCV: 'opencv_imread',
GTMaskLoader.SCIPY: 'scipy_imread',
GTMaskLoader.NIFTI: 'nifti_reader',
GTMaskLoader.NUMPY: 'numpy_reader'
}
def __init__(self, identifier, path_to_mask, mask_loader=GTMaskLoader.PILLOW):
"""
Args:
identifier: object identifier (e.g. image name).
path_to_mask: path where segmentation mask should be loaded from. The path is relative to data source.
mask_loader: back-end, used to load segmentation masks.
"""
super().__init__(identifier)
self._mask_path = path_to_mask
self._mask_loader = mask_loader
self._mask = None
@property
def mask(self):
return self._mask if self._mask is not None else self._load_mask()
@mask.setter
def mask(self, value):
self._mask = value
def _load_mask(self):
if self._mask is None:
loader = BaseReader.provide(self.LOADERS.get(self._mask_loader), self.metadata['data_source'])
if self._mask_loader == GTMaskLoader.PILLOW:
loader.convert_to_rgb = False
mask = loader.read(self._mask_path)
return mask.astype(np.uint8)
return self._mask
class SegmentationPrediction(SegmentationRepresentation):
def __init__(self, identifiers, mask):
"""
Args:
identifiers: object identifier (e.g. image name).
mask: array with shape (n_classes, height, width) of probabilities at each location.
"""
super().__init__(identifiers)
self.mask = mask
class BrainTumorSegmentationAnnotation(SegmentationAnnotation):
def __init__(self, identifier, path_to_mask, loader=GTMaskLoader.NIFTI, box=None):
super().__init__(identifier, path_to_mask, loader)
self.box = box
class BrainTumorSegmentationPrediction(SegmentationPrediction):
pass
class CoCoInstanceSegmentationRepresentation(SegmentationRepresentation):
def __init__(self, identifier, mask, labels):
try:
# pylint: disable=W0611
import pycocotools.mask as maskUtils
except ImportError:
raise ValueError('can not create representation')
super().__init__(identifier)
self.raw_mask = mask
self.labels = labels
self._mask = None
@property
def mask(self):
return self._mask if self._mask is not None else self._load_mask()
def _load_mask(self):
masks = []
image_size = self.metadata['image_size']
height, width, _ = image_size if len(np.shape(image_size)) == 1 else image_size[0]
for mask in self.raw_mask:
converted_mask = self._convert_mask(mask, height, width)
masks.append(converted_mask)
self._mask = masks
return self._mask
@staticmethod
def _convert_mask(mask, height, width):
if isinstance(mask, list):
rles = maskUtils.frPyObjects(mask, height, width)
rle = maskUtils.merge(rles)
elif isinstance(mask['counts'], list):
# uncompressed RLE
rle = maskUtils.frPyObjects(mask, height, width)
else:
rle = mask
# rle
return rle
@mask.setter
def mask(self, value):
self._mask = value
@property
def size(self):
return len(self.raw_mask)
@property
def areas(self):
precomputed_areas = self.metadata.get('areas')
if precomputed_areas:
return precomputed_areas
masks = self.mask
areas = []
for mask in masks:
areas.append(pycocotools.mask.area(mask))
return areas
class CoCoInstanceSegmentationAnnotation(CoCoInstanceSegmentationRepresentation):
pass
class CoCocInstanceSegmentationPrediction(CoCoInstanceSegmentationRepresentation):
def __init__(self, identifier, mask, labels, scores):
super().__init__(identifier, mask, labels)
self.scores = scores
def remove(self, indexes):
self.labels = np.delete(self.labels, indexes)
self.mask = np.delete(self.mask, indexes)
self.scores = np.delete(self.scores, indexes)
difficult_boxes = self.metadata.get('difficult_boxes')
if not difficult_boxes:
return
new_difficult_boxes = remove_difficult(difficult_boxes, indexes)
self.metadata['difficult_boxes'] = new_difficult_boxes
| 29.340426 | 114 | 0.664068 |
2fd4bc355c51db7767cd09e0a1348ad23011c141 | 1,255 | py | Python | serial_json/examples.py | stevarino/serial_json | 661ff4e002e4e2f2fbadf7673f3f161d0a3445af | [
"MIT"
] | null | null | null | serial_json/examples.py | stevarino/serial_json | 661ff4e002e4e2f2fbadf7673f3f161d0a3445af | [
"MIT"
] | null | null | null | serial_json/examples.py | stevarino/serial_json | 661ff4e002e4e2f2fbadf7673f3f161d0a3445af | [
"MIT"
] | null | null | null | import argparse
from collections import OrderedDict
import json
import os.path as path
import resource
import time
import serial_json
examples = OrderedDict()
[OrderedDict.__setitem__(examples, e[0], e[1]) for e in [
('all', ''),
('us_house', 'ep-us-house.json')
]]
def run_example(example, parser=None):
with open(path.join(path.dirname(__file__), 'data', example)) as fp:
start_mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
start_time = time.time()
for k in parser(fp):
pass
delta_m = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss - start_mem
delta_t = time.time() - start_time
print delta_t, delta_m
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('example', choices=examples.keys())
parser.add_argument('--use_json', action='store_true')
args = parser.parse_args()
json_parser = serial_json.load
if args.use_json:
json_parser = json.load
example = examples[args.example]
if example:
run_example(example, json_parser)
else:
for key in examples:
if examples[key]:
print key
run_example(examples[key], json_parser)
| 26.702128 | 80 | 0.655777 |
46c25383494c22943a3bd8470ca98a25c6d58a40 | 1,143 | py | Python | tedygram/images/migrations/0003_auto_20180702_1808.py | wooheet/tedygram | 481c5d1fd44f12cf9f5c62e0f12dd3f80c0c1de4 | [
"MIT"
] | null | null | null | tedygram/images/migrations/0003_auto_20180702_1808.py | wooheet/tedygram | 481c5d1fd44f12cf9f5c62e0f12dd3f80c0c1de4 | [
"MIT"
] | null | null | null | tedygram/images/migrations/0003_auto_20180702_1808.py | wooheet/tedygram | 481c5d1fd44f12cf9f5c62e0f12dd3f80c0c1de4 | [
"MIT"
] | null | null | null | # Generated by Django 2.0.6 on 2018-07-02 09:08
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('images', '0002_auto_20180702_1619'),
]
operations = [
migrations.AlterModelOptions(
name='image',
options={'ordering': ['-created_at']},
),
migrations.AlterField(
model_name='comment',
name='image',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='comments', to='images.Image'),
),
migrations.AlterField(
model_name='image',
name='creator',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='images', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='like',
name='image',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='likes', to='images.Image'),
),
]
| 32.657143 | 144 | 0.622047 |
33c380b6c81b6d8cd1fadf7dbbe9fe8a433fa582 | 7,399 | py | Python | cupyx/scipy/sparse/dia.py | prkhrsrvstv1/cupy | ea86c8225b575af9d2855fb77a306cf86fd098ea | [
"MIT"
] | 6,180 | 2016-11-01T14:22:30.000Z | 2022-03-31T08:39:20.000Z | cupyx/scipy/sparse/dia.py | prkhrsrvstv1/cupy | ea86c8225b575af9d2855fb77a306cf86fd098ea | [
"MIT"
] | 6,281 | 2016-12-22T07:42:31.000Z | 2022-03-31T19:57:02.000Z | cupyx/scipy/sparse/dia.py | prkhrsrvstv1/cupy | ea86c8225b575af9d2855fb77a306cf86fd098ea | [
"MIT"
] | 829 | 2017-02-23T05:46:12.000Z | 2022-03-27T17:40:03.000Z | try:
import scipy.sparse
_scipy_available = True
except ImportError:
_scipy_available = False
import cupy
from cupy import _core
from cupyx.scipy.sparse import csc
from cupyx.scipy.sparse import data
from cupyx.scipy.sparse import _util
# TODO(leofang): The current implementation is CSC-based, which is troublesome
# on ROCm/HIP. We should convert it to CSR-based for portability.
class dia_matrix(data._data_matrix):
"""Sparse matrix with DIAgonal storage.
Now it has only one initializer format below:
``dia_matrix((data, offsets))``
Args:
arg1: Arguments for the initializer.
shape (tuple): Shape of a matrix. Its length must be two.
dtype: Data type. It must be an argument of :class:`numpy.dtype`.
copy (bool): If ``True``, copies of given arrays are always used.
.. seealso::
:class:`scipy.sparse.dia_matrix`
"""
format = 'dia'
def __init__(self, arg1, shape=None, dtype=None, copy=False):
if _scipy_available and scipy.sparse.issparse(arg1):
x = arg1.todia()
data = x.data
offsets = x.offsets
shape = x.shape
dtype = x.dtype
copy = False
elif isinstance(arg1, tuple):
data, offsets = arg1
if shape is None:
raise ValueError('expected a shape argument')
else:
raise ValueError(
'unrecognized form for dia_matrix constructor')
data = cupy.array(data, dtype=dtype, copy=copy)
data = cupy.atleast_2d(data)
offsets = cupy.array(offsets, dtype='i', copy=copy)
offsets = cupy.atleast_1d(offsets)
if offsets.ndim != 1:
raise ValueError('offsets array must have rank 1')
if data.ndim != 2:
raise ValueError('data array must have rank 2')
if data.shape[0] != len(offsets):
raise ValueError(
'number of diagonals (%d) does not match the number of '
'offsets (%d)'
% (data.shape[0], len(offsets)))
sorted_offsets = cupy.sort(offsets)
if (sorted_offsets[:-1] == sorted_offsets[1:]).any():
raise ValueError('offset array contains duplicate values')
self.data = data
self.offsets = offsets
if not _util.isshape(shape):
raise ValueError('invalid shape (must be a 2-tuple of int)')
self._shape = int(shape[0]), int(shape[1])
def _with_data(self, data, copy=True):
"""Returns a matrix with the same sparsity structure as self,
but with different data. By default the structure arrays are copied.
"""
if copy:
return dia_matrix((data, self.offsets.copy()), shape=self.shape)
else:
return dia_matrix((data, self.offsets), shape=self.shape)
def get(self, stream=None):
"""Returns a copy of the array on host memory.
Args:
stream (cupy.cuda.Stream): CUDA stream object. If it is given, the
copy runs asynchronously. Otherwise, the copy is synchronous.
Returns:
scipy.sparse.dia_matrix: Copy of the array on host memory.
"""
if not _scipy_available:
raise RuntimeError('scipy is not available')
data = self.data.get(stream)
offsets = self.offsets.get(stream)
return scipy.sparse.dia_matrix((data, offsets), shape=self._shape)
def get_shape(self):
"""Returns the shape of the matrix.
Returns:
tuple: Shape of the matrix.
"""
return self._shape
def getnnz(self, axis=None):
"""Returns the number of stored values, including explicit zeros.
Args:
axis: Not supported yet.
Returns:
int: The number of stored values.
"""
if axis is not None:
raise NotImplementedError(
'getnnz over an axis is not implemented for DIA format')
m, n = self.shape
nnz = _core.ReductionKernel(
'int32 offsets, int32 m, int32 n', 'int32 nnz',
'offsets > 0 ? min(m, n - offsets) : min(m + offsets, n)',
'a + b', 'nnz = a', '0', 'dia_nnz')(self.offsets, m, n)
return int(nnz)
def toarray(self, order=None, out=None):
"""Returns a dense matrix representing the same value."""
return self.tocsc().toarray(order=order, out=out)
def tocsc(self, copy=False):
"""Converts the matrix to Compressed Sparse Column format.
Args:
copy (bool): If ``False``, it shares data arrays as much as
possible. Actually this option is ignored because all
arrays in a matrix cannot be shared in dia to csc conversion.
Returns:
cupyx.scipy.sparse.csc_matrix: Converted matrix.
"""
if self.data.size == 0:
return csc.csc_matrix(self.shape, dtype=self.dtype)
num_rows, num_cols = self.shape
num_offsets, offset_len = self.data.shape
row, mask = _core.ElementwiseKernel(
'int32 offset_len, int32 offsets, int32 num_rows, '
'int32 num_cols, T data',
'int32 row, bool mask',
'''
int offset_inds = i % offset_len;
row = offset_inds - offsets;
mask = (row >= 0 && row < num_rows && offset_inds < num_cols
&& data != T(0));
''',
'cupyx_scipy_sparse_dia_tocsc')(offset_len, self.offsets[:, None],
num_rows, num_cols, self.data)
indptr = cupy.zeros(num_cols + 1, dtype='i')
indptr[1: offset_len + 1] = cupy.cumsum(mask.sum(axis=0))
indptr[offset_len + 1:] = indptr[offset_len]
indices = row.T[mask.T].astype('i', copy=False)
data = self.data.T[mask.T]
return csc.csc_matrix(
(data, indices, indptr), shape=self.shape, dtype=self.dtype)
def tocsr(self, copy=False):
"""Converts the matrix to Compressed Sparse Row format.
Args:
copy (bool): If ``False``, it shares data arrays as much as
possible. Actually this option is ignored because all
arrays in a matrix cannot be shared in dia to csr conversion.
Returns:
cupyx.scipy.sparse.csc_matrix: Converted matrix.
"""
return self.tocsc().tocsr()
def diagonal(self, k=0):
"""Returns the k-th diagonal of the matrix.
Args:
k (int, optional): Which diagonal to get, corresponding to elements
a[i, i+k]. Default: 0 (the main diagonal).
Returns:
cupy.ndarray : The k-th diagonal.
"""
rows, cols = self.shape
if k <= -rows or k >= cols:
return cupy.empty(0, dtype=self.data.dtype)
idx, = cupy.nonzero(self.offsets == k)
first_col, last_col = max(0, k), min(rows + k, cols)
if idx.size == 0:
return cupy.zeros(last_col - first_col, dtype=self.data.dtype)
return self.data[idx[0], first_col:last_col]
def isspmatrix_dia(x):
"""Checks if a given matrix is of DIA format.
Returns:
bool: Returns if ``x`` is :class:`cupyx.scipy.sparse.dia_matrix`.
"""
return isinstance(x, dia_matrix)
| 33.631818 | 79 | 0.582917 |
1e041d6e230d85cc94929345a7d0a82b0d1561f1 | 1,466 | py | Python | aries_cloudagent/config/tests/test_argparse.py | baens/aries-cloudagent-python | 0ff9a490f60bb8de686c4e26fa9a0dd32db0cb33 | [
"Apache-2.0"
] | 7 | 2020-07-07T15:44:41.000Z | 2022-03-26T21:20:41.000Z | aries_cloudagent/config/tests/test_argparse.py | totemprotocol/aries-fl | dd78dcebc771971abfee301b80cdd5d246c14840 | [
"Apache-2.0"
] | 6 | 2021-03-10T20:05:19.000Z | 2022-02-27T05:41:09.000Z | aries_cloudagent/config/tests/test_argparse.py | totemprotocol/aries-fl | dd78dcebc771971abfee301b80cdd5d246c14840 | [
"Apache-2.0"
] | 2 | 2019-12-02T18:59:07.000Z | 2020-06-03T18:58:20.000Z | import itertools
from argparse import ArgumentParser
from asynctest import TestCase as AsyncTestCase, mock as async_mock
from .. import argparse
class TestArgParse(AsyncTestCase):
async def test_groups(self):
"""Test optional argument parsing."""
parser = ArgumentParser()
groups = (
g
for g in argparse.group.get_registered()
if g is not argparse.TransportGroup
)
argparse.load_argument_groups(parser, *groups)
parser.parse_args([])
async def test_transport_settings(self):
"""Test required argument parsing."""
parser = ArgumentParser()
group = argparse.TransportGroup()
group.add_arguments(parser)
with async_mock.patch.object(parser, "exit") as exit_parser:
parser.parse_args([])
exit_parser.assert_called_once()
result = parser.parse_args(
[
"--inbound-transport",
"http",
"0.0.0.0",
"80",
"--outbound-transport",
"http",
]
)
assert result.inbound_transports == [["http", "0.0.0.0", "80"]]
assert result.outbound_transports == ["http"]
settings = group.get_settings(result)
assert settings.get("transport.inbound_configs") == [["http", "0.0.0.0", "80"]]
assert settings.get("transport.outbound_configs") == ["http"]
| 28.192308 | 87 | 0.577763 |
f78c79ef8d72b6c67d0b5f4d8efa5b143576a45b | 1,273 | py | Python | mysite/urls.py | dariodip/django_polls | b79264b71f8b0581b9d922b4a145e0db4972215b | [
"MIT"
] | 1 | 2018-04-04T19:24:50.000Z | 2018-04-04T19:24:50.000Z | mysite/urls.py | dariodip/django_polls | b79264b71f8b0581b9d922b4a145e0db4972215b | [
"MIT"
] | null | null | null | mysite/urls.py | dariodip/django_polls | b79264b71f8b0581b9d922b4a145e0db4972215b | [
"MIT"
] | null | null | null | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.auth import get_user_model
from rest_framework import routers, serializers
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = get_user_model()
fields = ('url', 'username', 'email', 'is_staff')
router = routers.DefaultRouter()
urlpatterns = [
url(r'^', include('polls.urls'), name='index'),
url(r'^api/', include('polls.urls', namespace='polls')),
url(r'^admin/', admin.site.urls),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
]
| 33.5 | 83 | 0.700707 |
0b1b4ed3737bef3596ff4c5ea2f6ce29c08a44f7 | 121 | py | Python | .history/CourseLessions/FlowControlWithLoops/if_else_20210623204425.py | minefarmer/Complete-Coding-E-Degree | 7044d32b155d0fb2520c3234a0a4e3b2b44fa84b | [
"Unlicense"
] | null | null | null | .history/CourseLessions/FlowControlWithLoops/if_else_20210623204425.py | minefarmer/Complete-Coding-E-Degree | 7044d32b155d0fb2520c3234a0a4e3b2b44fa84b | [
"Unlicense"
] | null | null | null | .history/CourseLessions/FlowControlWithLoops/if_else_20210623204425.py | minefarmer/Complete-Coding-E-Degree | 7044d32b155d0fb2520c3234a0a4e3b2b44fa84b | [
"Unlicense"
] | null | null | null | """[If/Else]
Focus of this section is on controlling the flow of my data and coding with loops
If and else statements
""" | 30.25 | 81 | 0.752066 |
26f917523e4517a292ab8ef6208def025c3b6580 | 2,409 | py | Python | cache.py | bzaczynski/autoautelion | 5f60b74c64eb29d162b0ba8c2f3386e24bb7a11b | [
"MIT"
] | null | null | null | cache.py | bzaczynski/autoautelion | 5f60b74c64eb29d162b0ba8c2f3386e24bb7a11b | [
"MIT"
] | 4 | 2018-12-27T13:32:30.000Z | 2021-04-21T14:19:33.000Z | cache.py | bzaczynski/autoautelion | 5f60b74c64eb29d162b0ba8c2f3386e24bb7a11b | [
"MIT"
] | null | null | null | """
Caching layer for Autelion status utilizing Redis.
Usage:
$ export REDIS_URL=redis://localhost
>>> import cache
>>> autelion = cache.get_autelion()
>>> if autelion is not None:
... print(autelion.status, autelion.updated_at)
...
>>> cache.set_autelion({'key1': 'value1', (...)})
"""
import os
import json
import collections
import logging
from datetime import datetime
import redis
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
Autelion = collections.namedtuple('Autelion', 'status updated_at')
class RedisCache:
"""Convenience class for connecting to Redis."""
def __init__(self):
redis_url = os.environ.get('REDIS_URL')
if redis_url is None:
raise redis.ConnectionError('Undefined REDIS_URL variable')
self._connection = redis.from_url(redis_url)
def __getattr__(self, name):
"""Delegate commands to the connection object."""
return getattr(self._connection, name)
def get_autelion() -> Autelion:
"""Return named tuple with status and last update or None."""
try:
redis_cache = RedisCache()
status, updated_at = redis_cache.mget([
'autelion_status',
'autelion_updated_at'
])
if status is None:
logger.info('Cache miss')
return None
try:
updated_at = datetime.fromisoformat(updated_at.decode('utf-8'))
except (ValueError, AttributeError):
logger.error('Unable to parse last update: "%s"', updated_at)
updated_at = None
return Autelion(json.loads(status), updated_at)
except redis.RedisError as ex:
logger.error('Could not read from Redis: %s', ex)
except json.JSONDecodeError as ex:
logger.error('Unable to decode JSON: %s', ex)
def set_autelion(status: dict) -> None:
"""Serialize status to JSON, add timestamp and store both in cache."""
try:
autelion = Autelion(status=status, updated_at=datetime.utcnow())
logger.info('Updating cache')
RedisCache().mset({
'autelion_status': json.dumps(autelion.status),
'autelion_updated_at': autelion.updated_at.isoformat(),
})
except TypeError as ex:
logger.error('Unable to format status as JSON')
except redis.RedisError as ex:
logger.error('Could not write to Redis: %s', ex)
| 26.472527 | 75 | 0.649647 |
ba516b55a3397f92a0f58bd7b84021e2b6f82f2d | 122 | py | Python | server/__init__.py | mica-framework/cli | a5a851a73d7b9bd0431e9c8bb0c8fca401b32ccf | [
"MIT"
] | 5 | 2019-06-14T12:32:56.000Z | 2022-03-17T20:55:48.000Z | server/__init__.py | mica-framework/cli | a5a851a73d7b9bd0431e9c8bb0c8fca401b32ccf | [
"MIT"
] | null | null | null | server/__init__.py | mica-framework/cli | a5a851a73d7b9bd0431e9c8bb0c8fca401b32ccf | [
"MIT"
] | null | null | null | # import the modules
from .mica_server import * #FIXME we could make that dynamically editable within the config.yml file! | 61 | 101 | 0.803279 |
9f68c282003e4df5543640c10f30cd331ca4ef92 | 1,198 | py | Python | tests/test_viewer.py | quapka/javus | 577e0c2dabfaea39d7ffacd42100d8a5f4cd738c | [
"MIT"
] | 1 | 2020-09-22T01:38:21.000Z | 2020-09-22T01:38:21.000Z | tests/test_viewer.py | petrs/javus | 6927c824d5e6b574a6a323c87bd5aa117eca5b00 | [
"MIT"
] | null | null | null | tests/test_viewer.py | petrs/javus | 6927c824d5e6b574a6a323c87bd5aa117eca5b00 | [
"MIT"
] | 1 | 2020-07-26T07:20:47.000Z | 2020-07-26T07:20:47.000Z | import pytest
from javus.viewer import format_duration, add_whitespace_id
@pytest.mark.parametrize(
"utc_timestamp, duration",
[
("0", "0ms"),
("1", "1s"),
("0.123", "123ms"),
("59.0", "59s"),
("61", "01:01"),
("61.013", "01:01.013"),
],
)
def test_format_duration(utc_timestamp, duration):
assert format_duration(utc_timestamp) == duration
@pytest.mark.parametrize(
"_id",
[
("5ede2f24b69c4aa58f209a71"),
("5ede3f4ae7983ce6b81868f2"),
("5edfa555327223721fce9a8b"),
("5edfb1a58ebd9320dc42d50e"),
("5edfbbe291e4823727f4c62e"),
("5edfbbf14207c1387a7b7267"),
("5edfc44712c3d80d4b9f85d7"),
("5edfc594741a40e4fe12fd16"),
("5edfc6f429f8cc28f8d0ce97"),
("5edfc75f392ee85fd112be1c"),
("5ee1f79d513ea7911c3ec4a7"),
("5ee1fdee1667196caa667c81"),
("5ee203fc3225a7d137c750da"),
("5ee22329163233c47831cee6"),
("5ee223d1b91ae419621f2fd1"),
("5ee2368031d2420572a5e8fe"),
("5ee23fd32d2894e2e81f8ae5"),
],
)
def test_add_whitespace_id(_id):
assert add_whitespace_id(_id).replace(" ", "") == _id
| 27.227273 | 59 | 0.608514 |
403d8f534912b34576857d8945d384beea1c4298 | 3,905 | py | Python | tests/api_tests.py | afqueiruga/torchdiffeq | c03c46e4d0fe0eba163fdcbc46b0c96947886712 | [
"MIT"
] | null | null | null | tests/api_tests.py | afqueiruga/torchdiffeq | c03c46e4d0fe0eba163fdcbc46b0c96947886712 | [
"MIT"
] | null | null | null | tests/api_tests.py | afqueiruga/torchdiffeq | c03c46e4d0fe0eba163fdcbc46b0c96947886712 | [
"MIT"
] | null | null | null | import unittest
import torch
import torchdiffeq
from problems import construct_problem
eps = 1e-12
torch.set_default_dtype(torch.float64)
TEST_DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def max_abs(tensor):
return torch.max(torch.abs(tensor))
class TestCollectionState(unittest.TestCase):
def test_dopri5(self):
f, y0, t_points, sol = construct_problem(TEST_DEVICE)
tuple_f = lambda t, y: (f(t, y[0]), f(t, y[1]))
tuple_y0 = (y0, y0)
tuple_y = torchdiffeq.odeint(tuple_f, tuple_y0, t_points, method='dopri5')
max_error0 = (sol - tuple_y[0]).max()
max_error1 = (sol - tuple_y[1]).max()
self.assertLess(max_error0, eps)
self.assertLess(max_error1, eps)
def test_dopri5_gradient(self):
f, y0, t_points, sol = construct_problem(TEST_DEVICE)
tuple_f = lambda t, y: (f(t, y[0]), f(t, y[1]))
for i in range(2):
func = lambda y0, t_points: torchdiffeq.odeint(tuple_f, (y0, y0), t_points, method='dopri5')[i]
self.assertTrue(torch.autograd.gradcheck(func, (y0, t_points)))
def test_adams(self):
f, y0, t_points, sol = construct_problem(TEST_DEVICE)
tuple_f = lambda t, y: (f(t, y[0]), f(t, y[1]))
tuple_y0 = (y0, y0)
tuple_y = torchdiffeq.odeint(tuple_f, tuple_y0, t_points, method='adams')
max_error0 = (sol - tuple_y[0]).max()
max_error1 = (sol - tuple_y[1]).max()
self.assertLess(max_error0, eps)
self.assertLess(max_error1, eps)
def test_adams_gradient(self):
f, y0, t_points, sol = construct_problem(TEST_DEVICE)
tuple_f = lambda t, y: (f(t, y[0]), f(t, y[1]))
for i in range(2):
func = lambda y0, t_points: torchdiffeq.odeint(tuple_f, (y0, y0), t_points, method='adams')[i]
self.assertTrue(torch.autograd.gradcheck(func, (y0, t_points)))
def test_adaptive_heun(self):
f, y0, t_points, sol = construct_problem(TEST_DEVICE)
tuple_f = lambda t, y: (f(t, y[0]), f(t, y[1]))
tuple_y0 = (y0, y0)
tuple_y = torchdiffeq.odeint(tuple_f, tuple_y0, t_points, method='adaptive_heun')
max_error0 = (sol - tuple_y[0]).max()
max_error1 = (sol - tuple_y[1]).max()
self.assertLess(max_error0, eps)
self.assertLess(max_error1, eps)
def test_adaptive_heun_gradient(self):
f, y0, t_points, sol = construct_problem(TEST_DEVICE)
tuple_f = lambda t, y: (f(t, y[0]), f(t, y[1]))
for i in range(2):
func = lambda y0, t_points: torchdiffeq.odeint(tuple_f, (y0, y0), t_points, method='adaptive_heun')[i]
self.assertTrue(torch.autograd.gradcheck(func, (y0, t_points)))
class TestOpenSet(unittest.TestCase):
def test(self):
def f(t,x):
if t<1.0:
return 0.0
else:
return 1.0
t_points = torch.Tensor([0.0,1.0])
y0 = torch.Tensor([0.0])
# This should pick up a bad 1.0 with the default
ys = torchdiffeq.odeint(f, y0, t_points, method='rk4')
self.assertNotEqual(ys[1].cpu().item(), 0.0)
# This should be the same answer
ys = torchdiffeq.odeint(f, y0, t_points, method='rk4',
options=dict(enforce_openset=False))
self.assertNotEqual(ys[1].cpu().item(), 0.0)
# This should be right
ys = torchdiffeq.odeint(f, y0, t_points, method='rk4',
options=dict(enforce_openset=True))
self.assertEqual(ys[1].cpu().item(), 0.0)
# It doesn't matter with Euler
ys = torchdiffeq.odeint(f, y0, t_points, method='euler',
options=dict(enforce_openset=False))
self.assertEqual(ys[1].cpu().item(), 0.0)
if __name__ == '__main__':
unittest.main()
| 34.866071 | 114 | 0.595903 |
b082952854b5211c999e81ae828b9ffb7cf9fd60 | 1,656 | py | Python | settings/FreezeInterventionPanel_settings.py | bopopescu/Lauecollect | 60ae2b05ea8596ba0decf426e37aeaca0bc8b6be | [
"MIT"
] | null | null | null | settings/FreezeInterventionPanel_settings.py | bopopescu/Lauecollect | 60ae2b05ea8596ba0decf426e37aeaca0bc8b6be | [
"MIT"
] | 1 | 2019-10-22T21:28:31.000Z | 2019-10-22T21:39:12.000Z | settings/FreezeInterventionPanel_settings.py | bopopescu/Lauecollect | 60ae2b05ea8596ba0decf426e37aeaca0bc8b6be | [
"MIT"
] | 2 | 2019-06-06T15:06:46.000Z | 2020-07-20T02:03:22.000Z | Enabled.action = {
False: 'freeze_intervention.enabled = False',
True: 'freeze_intervention.enabled = True'}
Enabled.defaults = {
'Enabled': False, 'Label': '?'
}
Enabled.properties = {
'BackgroundColour': [
('green', 'freeze_intervention.enabled == False'),
('red', 'freeze_intervention.enabled == True'),
('grey80', 'freeze_intervention.enabled not in [True,False]'),
],
'Enabled': [(True, 'freeze_intervention.enabled in [True,False]')],
'Value': [
(True, 'freeze_intervention.enabled == True'),
(False, 'freeze_intervention.enabled == False'),
],
'Label': [
('Enabled', 'freeze_intervention.enabled == True'),
('Disabled', 'freeze_intervention.enabled == False'),
('?', 'freeze_intervention.enabled not in [True,False]'),
]
}
Active.action = {
False: 'freeze_intervention.active = False',
True: 'freeze_intervention.active = True'}
Active.defaults = {
'Enabled': False, 'Label': '?'
}
Active.properties = {
'BackgroundColour': [
('green', 'freeze_intervention.active == False'),
('red', 'freeze_intervention.active == True'),
('grey80', 'freeze_intervention.active not in [True,False]'),
],
'Enabled': [(True, 'freeze_intervention.active in [True,False]')],
'Value': [
(True, 'freeze_intervention.active == True'),
(False, 'freeze_intervention.active == False'),
],
'Label': [
('Active', 'freeze_intervention.active == True'),
('Not active', 'freeze_intervention.active == False'),
('?', 'freeze_intervention.active not in [True,False]'),
]
}
| 34.5 | 71 | 0.603865 |
594c431c05231aad0a8671bb5b9de053a9fb036d | 465 | py | Python | app/user/migrations/0033_userreviewlist_event_host.py | Sovol2018/sovolo | 54250e42b4af3391d2f99690f45b93ab240563c2 | [
"MIT"
] | 2 | 2017-06-06T11:34:49.000Z | 2017-10-24T13:09:50.000Z | app/user/migrations/0033_userreviewlist_event_host.py | Sovol2018/sovolo | 54250e42b4af3391d2f99690f45b93ab240563c2 | [
"MIT"
] | 346 | 2016-08-09T20:50:57.000Z | 2018-08-28T06:52:17.000Z | app/user/migrations/0033_userreviewlist_event_host.py | hejob/sovolo | 8b73253d7bf0427c7ae0ebb6d8e3d70e118e8427 | [
"MIT"
] | 3 | 2017-11-27T14:07:57.000Z | 2018-08-13T15:51:01.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-21 23:55
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0032_auto_20170818_0930'),
]
operations = [
migrations.AddField(
model_name='userreviewlist',
name='event_host',
field=models.NullBooleanField(default=False),
),
]
| 22.142857 | 57 | 0.627957 |
75862b3931d48174013e91fa5eaa87c102b0c989 | 6,621 | py | Python | resources/lib/lights.py | xcorp/script.kodi.hue.ambilight | 76c47903b3923484f3587d684d9e51d0488e9597 | [
"WTFPL"
] | 172 | 2016-06-17T08:14:00.000Z | 2021-11-28T01:19:30.000Z | resources/lib/lights.py | xcorp/script.kodi.hue.ambilight | 76c47903b3923484f3587d684d9e51d0488e9597 | [
"WTFPL"
] | 119 | 2016-06-14T17:09:02.000Z | 2021-12-27T22:08:41.000Z | resources/lib/lights.py | xcorp/script.kodi.hue.ambilight | 76c47903b3923484f3587d684d9e51d0488e9597 | [
"WTFPL"
] | 73 | 2016-06-15T08:58:06.000Z | 2021-06-18T07:44:21.000Z | import json
import requests
from tools import xbmclog
class Light(object):
def __init__(self, bridge_ip, username, light_id, spec):
self.bridge_ip = bridge_ip
self.username = username
self.light_id = light_id
self.fullspectrum = ((spec['type'] == 'Color Light') or
(spec['type'] == 'Extended Color Light'))
self.livingwhite = False
self.name = spec['name'].encode("utf-8")
self.init_hue = None
self.hue = None
try:
self.init_hue = spec['state']['hue']
self.hue = self.init_hue
except KeyError:
self.livingwhite = True
self.init_sat = None
self.sat = None
try:
self.init_sat = spec['state']['sat']
self.sat = self.init_sat
except KeyError:
self.livingwhite = True
try:
self.init_bri = spec['state']['bri']
self.bri = self.init_bri
except KeyError:
self.livingwhite = True
self.init_on = spec['state']['on']
self.on = self.init_on
self.session = requests.Session()
def set_state(self, hue=None, sat=None, bri=None, on=None,
transition_time=None):
state = {}
if transition_time is not None:
state['transitiontime'] = transition_time
if on is not None and on != self.on:
self.on = on
state['on'] = on
if hue is not None and not self.livingwhite and hue != self.hue:
self.hue = hue
state['hue'] = hue
if sat is not None and not self.livingwhite and sat != self.sat:
self.sat = sat
state['sat'] = sat
if bri is not None and bri != self.bri:
self.bri = bri
state['bri'] = bri
# Hue specific
if bri <= 0 and self.on and on is None:
self.on = False
state['on'] = False
if bri >= 1 and not self.on and on is None:
self.on = True
state['on'] = True
data = json.dumps(state)
try:
endpoint = 'http://{}/api/{}/lights/{}/state'.format(
self.bridge_ip, self.username, self.light_id)
self.session.put(endpoint, data)
except Exception:
pass
def restore_initial_state(self, transition_time=0):
self.set_state(
self.init_hue,
self.init_sat,
self.init_bri,
self.init_on,
transition_time
)
def save_state_as_initial(self):
self.init_hue = self.hue
self.init_sat = self.sat
self.init_bri = self.bri
self.init_on = self.on
def __repr__(self):
return ('<Light({}) {} hue: {}, sat: {}, bri: {}, on: {}>'.format(
self.name, self.light_id, self.hue, self.sat, self.bri, self.on))
class Controller(object):
def __init__(self, lights, settings):
self.lights = lights
self.settings = settings
def on_playback_start(self):
raise NotImplementedError(
'on_playback_start must be implemented in the controller'
)
def on_playback_pause(self):
raise NotImplementedError(
'on_playback_pause must be implemented in the controller'
)
def on_playback_stop(self):
raise NotImplementedError(
'on_playback_stop must be implemented in the controller'
)
def set_state(self, hue=None, sat=None, bri=None, on=None,
transition_time=None, lights=None, force_on=True):
xbmclog(
'Kodi Hue: In {}.set_state(hue={}, sat={}, bri={}, '
'on={}, transition_time={}, lights={}, force_on={})'.format(
self.__class__.__name__, hue, sat, bri, on, transition_time,
lights, force_on
)
)
for light in self._calculate_subgroup(lights):
if not force_on and not light.init_on:
continue
if bri:
if self.settings.proportional_dim_time:
transition_time = self._transition_time(light, bri)
else:
transition_time = self.settings.dim_time
light.set_state(
hue=hue, sat=sat, bri=bri, on=on,
transition_time=transition_time
)
def restore_initial_state(self, lights=None, force_on=True):
xbmclog(
'Kodi Hue: In {}.restore_initial_state(lights={})'
.format(self.__class__.__name__, lights)
)
for light in self._calculate_subgroup(lights):
if not force_on and not light.init_on:
continue
transition_time = self.settings.dim_time
if self.settings.proportional_dim_time:
transition_time = self._transition_time(light, light.init_bri)
light.restore_initial_state(
transition_time
)
def save_state_as_initial(self, lights=None):
xbmclog(
'Kodi Hue: In {}.save_state_as_initial(lights={})'
.format(self.__class__.__name__, lights)
)
for light in self._calculate_subgroup(lights):
light.save_state_as_initial()
def flash_lights(self):
xbmclog(
'Kodi Hue: In {} flash_lights())'
.format(self.__class__.__name__)
)
self.set_state(
on=False,
force_on=self.settings.force_light_on,
)
self.restore_initial_state(
force_on=self.settings.force_light_on,
)
def _calculate_subgroup(self, lights=None):
if lights is None:
ret = self.lights.values()
else:
ret = [light for light in
self.lights.values() if light.light_id in lights]
xbmclog(
'Kodi Hue: In {}._calculate_subgroup'
'(lights={}) returning {}'.format(
self.__class__.__name__, lights, ret)
)
return ret
def _transition_time(self, light, bri):
time = 0
difference = abs(float(bri) - light.bri)
total = float(light.init_bri) - self.settings.theater_start_bri
if total == 0:
return self.settings.dim_time
proportion = difference / total
time = int(round(proportion * self.settings.dim_time))
return time
def __repr__(self):
return ('<{} {}>'.format(self.__class__.__name__, self.lights))
| 31.231132 | 78 | 0.55037 |
a97a0eb66ca862296cb30e615671b52c97ea63ad | 3,534 | py | Python | pypureclient/flasharray/FA_2_4/models/resource_space_no_id.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 14 | 2018-12-07T18:30:27.000Z | 2022-02-22T09:12:33.000Z | pypureclient/flasharray/FA_2_4/models/resource_space_no_id.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 28 | 2019-09-17T21:03:52.000Z | 2022-03-29T22:07:35.000Z | pypureclient/flasharray/FA_2_4/models/resource_space_no_id.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 15 | 2020-06-11T15:50:08.000Z | 2022-03-21T09:27:25.000Z | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_4 import models
class ResourceSpaceNoId(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'space': 'Space',
'time': 'int'
}
attribute_map = {
'name': 'name',
'space': 'space',
'time': 'time'
}
required_args = {
}
def __init__(
self,
name=None, # type: str
space=None, # type: models.Space
time=None, # type: int
):
"""
Keyword args:
name (str): A locally unique, system-generated name. The name cannot be modified.
space (Space): Displays size and space consumption information.
time (int)
"""
if name is not None:
self.name = name
if space is not None:
self.space = space
if time is not None:
self.time = time
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ResourceSpaceNoId`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ResourceSpaceNoId, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResourceSpaceNoId):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.5 | 105 | 0.537634 |
fc0f89b83d59281a84ab6dea909d143a4622a2ae | 23,212 | py | Python | python/ray/tune/ray_trial_executor.py | wingman-ai/ray | 23e1ccc1ea5d7e5789628d3eb85ff6bc8d0a4359 | [
"Apache-2.0"
] | null | null | null | python/ray/tune/ray_trial_executor.py | wingman-ai/ray | 23e1ccc1ea5d7e5789628d3eb85ff6bc8d0a4359 | [
"Apache-2.0"
] | 4 | 2019-03-04T13:03:24.000Z | 2019-06-06T11:25:07.000Z | python/ray/tune/ray_trial_executor.py | wingman-ai/ray | 23e1ccc1ea5d7e5789628d3eb85ff6bc8d0a4359 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import math
import os
import random
import time
import traceback
import ray
from ray.tune.error import AbortTrialExecution
from ray.tune.logger import NoopLogger
from ray.tune.trial import Trial, Resources, Checkpoint
from ray.tune.trial_executor import TrialExecutor
from ray.tune.util import warn_if_slow
logger = logging.getLogger(__name__)
RESOURCE_REFRESH_PERIOD = 0.5 # Refresh resources every 500 ms
BOTTLENECK_WARN_PERIOD_S = 60
NONTRIVIAL_WAIT_TIME_THRESHOLD_S = 1e-3
class _LocalWrapper(object):
def __init__(self, result):
self._result = result
def unwrap(self):
"""Returns the wrapped result."""
return self._result
class RayTrialExecutor(TrialExecutor):
"""An implemention of TrialExecutor based on Ray."""
def __init__(self,
queue_trials=False,
reuse_actors=False,
ray_auto_init=False,
refresh_period=RESOURCE_REFRESH_PERIOD):
super(RayTrialExecutor, self).__init__(queue_trials)
self._running = {}
# Since trial resume after paused should not run
# trial.train.remote(), thus no more new remote object id generated.
# We use self._paused to store paused trials here.
self._paused = {}
self._reuse_actors = reuse_actors
self._cached_actor = None
self._avail_resources = Resources(cpu=0, gpu=0)
self._committed_resources = Resources(cpu=0, gpu=0)
self._resources_initialized = False
self._refresh_period = refresh_period
self._last_resource_refresh = float("-inf")
self._last_nontrivial_wait = time.time()
if not ray.is_initialized() and ray_auto_init:
logger.info("Initializing Ray automatically."
"For cluster usage or custom Ray initialization, "
"call `ray.init(...)` before `tune.run`.")
ray.init()
if ray.is_initialized():
self._update_avail_resources()
def _setup_runner(self, trial, reuse_allowed):
if (self._reuse_actors and reuse_allowed
and self._cached_actor is not None):
logger.debug("Reusing cached runner {} for {}".format(
self._cached_actor, trial.trial_id))
existing_runner = self._cached_actor
self._cached_actor = None
else:
if self._cached_actor:
logger.debug(
"Cannot reuse cached runner {} for new trial".format(
self._cached_actor))
self._cached_actor.stop.remote()
self._cached_actor.__ray_terminate__.remote()
self._cached_actor = None
existing_runner = None
cls = ray.remote(
num_cpus=trial.resources.cpu,
num_gpus=trial.resources.gpu,
resources=trial.resources.custom_resources)(
trial._get_trainable_cls())
trial.init_logger()
# We checkpoint metadata here to try mitigating logdir duplication
self.try_checkpoint_metadata(trial)
remote_logdir = trial.logdir
if existing_runner:
trial.runner = existing_runner
if not self.reset_trial(trial, trial.config, trial.experiment_tag):
raise AbortTrialExecution(
"Trial runner reuse requires reset_trial() to be "
"implemented and return True.")
return existing_runner
def logger_creator(config):
# Set the working dir in the remote process, for user file writes
if not os.path.exists(remote_logdir):
os.makedirs(remote_logdir)
if not ray.worker._mode() == ray.worker.LOCAL_MODE:
os.chdir(remote_logdir)
return NoopLogger(config, remote_logdir)
# Logging for trials is handled centrally by TrialRunner, so
# configure the remote runner to use a noop-logger.
return cls.remote(config=trial.config, logger_creator=logger_creator)
def _train(self, trial):
"""Start one iteration of training and save remote id."""
assert trial.status == Trial.RUNNING, trial.status
remote = trial.runner.train.remote()
# Local Mode
if isinstance(remote, dict):
remote = _LocalWrapper(remote)
self._running[remote] = trial
def _start_trial(self, trial, checkpoint=None):
"""Starts trial and restores last result if trial was paused.
Raises:
ValueError if restoring from checkpoint fails.
"""
prior_status = trial.status
self.set_status(trial, Trial.RUNNING)
trial.runner = self._setup_runner(
trial,
reuse_allowed=checkpoint is not None
or trial._checkpoint.value is not None)
if not self.restore(trial, checkpoint):
if trial.status == Trial.ERROR:
raise RuntimeError(
"Restore from checkpoint failed for Trial {}.".format(
str(trial)))
previous_run = self._find_item(self._paused, trial)
if (prior_status == Trial.PAUSED and previous_run):
# If Trial was in flight when paused, self._paused stores result.
self._paused.pop(previous_run[0])
self._running[previous_run[0]] = trial
else:
self._train(trial)
def _stop_trial(self, trial, error=False, error_msg=None,
stop_logger=True):
"""Stops this trial.
Stops this trial, releasing all allocating resources. If stopping the
trial fails, the run will be marked as terminated in error, but no
exception will be thrown.
Args:
error (bool): Whether to mark this trial as terminated in error.
error_msg (str): Optional error message.
stop_logger (bool): Whether to shut down the trial logger.
"""
if stop_logger:
trial.close_logger()
if error:
self.set_status(trial, Trial.ERROR)
else:
self.set_status(trial, Trial.TERMINATED)
try:
trial.write_error_log(error_msg)
if hasattr(trial, "runner") and trial.runner:
if (not error and self._reuse_actors
and self._cached_actor is None):
logger.debug("Reusing actor for {}".format(trial.runner))
self._cached_actor = trial.runner
else:
logger.info(
"Destroying actor for trial {}. If your trainable is "
"slow to initialize, consider setting "
"reuse_actors=True to reduce actor creation "
"overheads.".format(trial))
trial.runner.stop.remote()
trial.runner.__ray_terminate__.remote()
except Exception:
logger.exception("Error stopping runner for Trial %s", str(trial))
self.set_status(trial, Trial.ERROR)
finally:
trial.runner = None
def start_trial(self, trial, checkpoint=None):
"""Starts the trial.
Will not return resources if trial repeatedly fails on start.
Args:
trial (Trial): Trial to be started.
checkpoint (Checkpoint): A Python object or path storing the state
of trial.
"""
self._commit_resources(trial.resources)
try:
self._start_trial(trial, checkpoint)
except Exception as e:
logger.exception("Error starting runner for Trial %s", str(trial))
error_msg = traceback.format_exc()
time.sleep(2)
self._stop_trial(trial, error=True, error_msg=error_msg)
if isinstance(e, AbortTrialExecution):
return # don't retry fatal Tune errors
try:
# This forces the trial to not start from checkpoint.
trial.clear_checkpoint()
logger.info(
"Trying to start runner for Trial %s without checkpoint.",
str(trial))
self._start_trial(trial)
except Exception:
logger.exception(
"Error starting runner for Trial %s, aborting!",
str(trial))
error_msg = traceback.format_exc()
self._stop_trial(trial, error=True, error_msg=error_msg)
# note that we don't return the resources, since they may
# have been lost
def _find_item(self, dictionary, item):
out = [rid for rid, t in dictionary.items() if t is item]
return out
def stop_trial(self, trial, error=False, error_msg=None, stop_logger=True):
"""Only returns resources if resources allocated."""
prior_status = trial.status
self._stop_trial(
trial, error=error, error_msg=error_msg, stop_logger=stop_logger)
if prior_status == Trial.RUNNING:
logger.debug("Returning resources for Trial %s.", str(trial))
self._return_resources(trial.resources)
out = self._find_item(self._running, trial)
for result_id in out:
self._running.pop(result_id)
def continue_training(self, trial):
"""Continues the training of this trial."""
self._train(trial)
def pause_trial(self, trial):
"""Pauses the trial.
If trial is in-flight, preserves return value in separate queue
before pausing, which is restored when Trial is resumed.
"""
trial_future = self._find_item(self._running, trial)
if trial_future:
self._paused[trial_future[0]] = trial
super(RayTrialExecutor, self).pause_trial(trial)
def reset_trial(self, trial, new_config, new_experiment_tag):
"""Tries to invoke `Trainable.reset_config()` to reset trial.
Args:
trial (Trial): Trial to be reset.
new_config (dict): New configuration for Trial
trainable.
new_experiment_tag (str): New experiment name
for trial.
Returns:
True if `reset_config` is successful else False.
"""
trial.experiment_tag = new_experiment_tag
trial.config = new_config
trainable = trial.runner
with warn_if_slow("reset_config"):
reset_val = ray.get(trainable.reset_config.remote(new_config))
return reset_val
def get_running_trials(self):
"""Returns the running trials."""
return list(self._running.values())
def get_next_available_trial(self):
shuffled_results = list(self._running.keys())
random.shuffle(shuffled_results)
# Note: We shuffle the results because `ray.wait` by default returns
# the first available result, and we want to guarantee that slower
# trials (i.e. trials that run remotely) also get fairly reported.
# See https://github.com/ray-project/ray/issues/4211 for details.
start = time.time()
[result_id], _ = ray.wait(shuffled_results)
wait_time = time.time() - start
if wait_time > NONTRIVIAL_WAIT_TIME_THRESHOLD_S:
self._last_nontrivial_wait = time.time()
if time.time() - self._last_nontrivial_wait > BOTTLENECK_WARN_PERIOD_S:
logger.warn(
"Over the last {} seconds, the Tune event loop has been "
"backlogged processing new results. Consider increasing your "
"period of result reporting to improve performance.".format(
BOTTLENECK_WARN_PERIOD_S))
self._last_nontrivial_wait = time.time()
return self._running[result_id]
def fetch_result(self, trial):
"""Fetches one result of the running trials.
Returns:
Result of the most recent trial training run."""
trial_future = self._find_item(self._running, trial)
if not trial_future:
raise ValueError("Trial was not running.")
self._running.pop(trial_future[0])
with warn_if_slow("fetch_result"):
result = ray.get(trial_future[0])
# For local mode
if isinstance(result, _LocalWrapper):
result = result.unwrap()
return result
def _commit_resources(self, resources):
committed = self._committed_resources
all_keys = set(resources.custom_resources).union(
set(committed.custom_resources))
custom_resources = {
k: committed.get(k) + resources.get_res_total(k)
for k in all_keys
}
self._committed_resources = Resources(
committed.cpu + resources.cpu_total(),
committed.gpu + resources.gpu_total(),
custom_resources=custom_resources)
def _return_resources(self, resources):
committed = self._committed_resources
all_keys = set(resources.custom_resources).union(
set(committed.custom_resources))
custom_resources = {
k: committed.get(k) - resources.get_res_total(k)
for k in all_keys
}
self._committed_resources = Resources(
committed.cpu - resources.cpu_total(),
committed.gpu - resources.gpu_total(),
custom_resources=custom_resources)
assert self._committed_resources.is_nonnegative(), (
"Resource invalid: {}".format(resources))
def _update_avail_resources(self, num_retries=5):
for i in range(num_retries):
try:
resources = ray.cluster_resources()
except Exception:
# TODO(rliaw): Remove this when local mode is fixed.
# https://github.com/ray-project/ray/issues/4147
logger.debug("Using resources for local machine.")
resources = ray.services.check_and_update_resources(
None, None, None)
if not resources:
logger.warning(
"Cluster resources not detected or are 0. Retrying...")
time.sleep(0.5)
if not resources:
# NOTE: This hides the possibility that Ray may be waiting for
# clients to connect.
resources.setdefault("CPU", 0)
resources.setdefault("GPU", 0)
logger.warning("Cluster resources cannot be detected or are 0. "
"You can resume this experiment by passing in "
"`resume=True` to `run`.")
resources = resources.copy()
num_cpus = resources.pop("CPU", 0)
num_gpus = resources.pop("GPU", 0)
custom_resources = resources
self._avail_resources = Resources(
int(num_cpus), int(num_gpus), custom_resources=custom_resources)
self._last_resource_refresh = time.time()
self._resources_initialized = True
def has_resources(self, resources):
"""Returns whether this runner has at least the specified resources.
This refreshes the Ray cluster resources if the time since last update
has exceeded self._refresh_period. This also assumes that the
cluster is not resizing very frequently.
"""
if time.time() - self._last_resource_refresh > self._refresh_period:
self._update_avail_resources()
currently_available = Resources.subtract(self._avail_resources,
self._committed_resources)
have_space = (
resources.cpu_total() <= currently_available.cpu
and resources.gpu_total() <= currently_available.gpu and all(
resources.get_res_total(res) <= currently_available.get(res)
for res in resources.custom_resources))
if have_space:
return True
can_overcommit = self._queue_trials
if (resources.cpu_total() > 0 and currently_available.cpu <= 0) or \
(resources.gpu_total() > 0 and currently_available.gpu <= 0) or \
any((resources.get_res_total(res_name) > 0
and currently_available.get(res_name) <= 0)
for res_name in resources.custom_resources):
can_overcommit = False # requested resource is already saturated
if can_overcommit:
logger.warning(
"Allowing trial to start even though the "
"cluster does not have enough free resources. Trial actors "
"may appear to hang until enough resources are added to the "
"cluster (e.g., via autoscaling). You can disable this "
"behavior by specifying `queue_trials=False` in "
"ray.tune.run().")
return True
return False
def debug_string(self):
"""Returns a human readable message for printing to the console."""
if self._resources_initialized:
status = "Resources requested: {}/{} CPUs, {}/{} GPUs".format(
self._committed_resources.cpu, self._avail_resources.cpu,
self._committed_resources.gpu, self._avail_resources.gpu)
customs = ", ".join([
"{}/{} {}".format(
self._committed_resources.get_res_total(name),
self._avail_resources.get_res_total(name), name)
for name in self._avail_resources.custom_resources
])
if customs:
status += " ({})".format(customs)
return status
else:
return "Resources requested: ?"
def resource_string(self):
"""Returns a string describing the total resources available."""
if self._resources_initialized:
res_str = "{} CPUs, {} GPUs".format(self._avail_resources.cpu,
self._avail_resources.gpu)
if self._avail_resources.custom_resources:
custom = ", ".join(
"{} {}".format(
self._avail_resources.get_res_total(name), name)
for name in self._avail_resources.custom_resources)
res_str += " ({})".format(custom)
return res_str
else:
return "? CPUs, ? GPUs"
def on_step_begin(self):
"""Before step() called, update the available resources."""
self._update_avail_resources()
def save(self, trial, storage=Checkpoint.DISK):
"""Saves the trial's state to a checkpoint."""
trial._checkpoint.storage = storage
trial._checkpoint.last_result = trial.last_result
if storage == Checkpoint.MEMORY:
trial._checkpoint.value = trial.runner.save_to_object.remote()
else:
# Keeps only highest performing checkpoints if enabled
if trial.keep_checkpoints_num:
try:
last_attr_val = trial.last_result[
trial.checkpoint_score_attr]
if (trial.compare_checkpoints(last_attr_val)
and not math.isnan(last_attr_val)):
trial.best_checkpoint_attr_value = last_attr_val
self._checkpoint_and_erase(trial)
except KeyError:
logger.warning(
"Result dict has no key: {}. keep"
"_checkpoints_num flag will not work".format(
trial.checkpoint_score_attr))
else:
with warn_if_slow("save_to_disk"):
trial._checkpoint.value = ray.get(
trial.runner.save.remote())
return trial._checkpoint.value
@staticmethod
def _checkpoint_and_erase(trial):
"""Checkpoints the model and erases old checkpoints
if needed.
Parameters
----------
trial : trial to save
"""
with warn_if_slow("save_to_disk"):
trial._checkpoint.value = ray.get(trial.runner.save.remote())
if len(trial.history) >= trial.keep_checkpoints_num:
ray.get(trial.runner.delete_checkpoint.remote(trial.history[-1]))
trial.history.pop()
trial.history.insert(0, trial._checkpoint.value)
def _checkpoint_and_erase(self, subdir, trial):
"""Checkpoints the model and erases old checkpoints
if needed.
Parameters
----------
subdir string: either "" or "best"
trial : trial to save
"""
with warn_if_slow("save_to_disk"):
trial._checkpoint.value, folder_path = ray.get(trial.runner.save_checkpoint_relative.remote(subdir))
if trial.prefix[subdir]["limit"]:
if len(trial.prefix[subdir]["history"]) == trial.prefix[subdir]["limit"]:
ray.get(trial.runner.delete_checkpoint.remote(trial.prefix[subdir]["history"][-1]))
trial.prefix[subdir]["history"].pop()
trial.prefix[subdir]["history"].insert(0, folder_path)
def restore(self, trial, checkpoint=None):
"""Restores training state from a given model checkpoint.
This will also sync the trial results to a new location
if restoring on a different node.
"""
if checkpoint is None or checkpoint.value is None:
checkpoint = trial._checkpoint
if checkpoint is None or checkpoint.value is None:
return True
if trial.runner is None:
logger.error("Unable to restore - no runner.")
self.set_status(trial, Trial.ERROR)
return False
try:
value = checkpoint.value
if checkpoint.storage == Checkpoint.MEMORY:
assert type(value) != Checkpoint, type(value)
trial.runner.restore_from_object.remote(value)
else:
worker_ip = ray.get(trial.runner.current_ip.remote())
trial.sync_logger_to_new_location(worker_ip)
with warn_if_slow("restore_from_disk"):
ray.get(trial.runner.restore.remote(value))
trial.last_result = checkpoint.last_result
return True
except Exception:
logger.exception("Error restoring runner for Trial %s.", trial)
self.set_status(trial, Trial.ERROR)
return False
def export_trial_if_needed(self, trial):
"""Exports model of this trial based on trial.export_formats.
Return:
A dict that maps ExportFormats to successfully exported models.
"""
if trial.export_formats and len(trial.export_formats) > 0:
return ray.get(
trial.runner.export_model.remote(trial.export_formats))
return {}
| 39.678632 | 112 | 0.59844 |
166fb17bb239637c6b3e25974f130421bb13b016 | 1,845 | py | Python | gitInit.py | Garretming/slg_server | 1acc536153b6ccfa07f4b8137248dec9cbd327f5 | [
"MIT"
] | 2 | 2021-06-05T09:52:23.000Z | 2021-10-09T12:41:14.000Z | gitInit.py | Garretming/slg_server | 1acc536153b6ccfa07f4b8137248dec9cbd327f5 | [
"MIT"
] | null | null | null | gitInit.py | Garretming/slg_server | 1acc536153b6ccfa07f4b8137248dec9cbd327f5 | [
"MIT"
] | 2 | 2020-10-20T02:58:41.000Z | 2021-08-22T13:02:05.000Z | #!/usr/bin/evn python3
#coding=utf-8
import os
import sys
def parseArgument():
# 1ใๆไบคไปๅบ็ปๅ 2ใ้กน็ฎๅ 3ใๆไบคๆถๆฏๅคๆณจ
argus = []
for i in range(0,len(sys.argv)):
# print(sys.argv[i])
argus.append(sys.argv[i])
return argus
if __name__ == '__main__':
argus = parseArgument()
path =os.getcwd()
count = 0
for k, v in enumerate(argus):
# print k, v
count = count + 1
if count >2 and count >=4 :
#ๆถๆฏๅคๆณจ
mes = argus[1]
# ไปๅบ็ปๅ
store = argus[2]
# ้กน็ฎๅ
name = argus[3]
else:
#ๆถๆฏๅคๆณจ
mes = "ๆฒกๆๅคๆณจ"
# ไปๅบ็ปๅ
store = "Garretming"
# ้กน็ฎๅ
name = os.path.basename(path)
os.system('git remote add origin git@gitlab.com:' + store +'/' + name +'.git')
os.system('rm -rf .git')
os.system('git init')
os.system('curl -u Garretming -d \'{"name":"slg_server","description":"slg_server is a server for slg games"}\' https://api.github.com/user/repos')
os.system('git remote add origin git@github.com:Garretming/slg_server.git')
# os.system('git submodule add git@gitlab.com:Clark8/mj_server.git mj_server')
os.system('git submodule add https://github.com/cloudwu/skynet.git skynet')
os.system('git submodule add https://github.com/simongog/sdsl-lite.git 3rd/sdsl-lite')
os.system('git submodule add https://github.com/driedfruit/jenkins-minimal-perfect-hash.git 3rd/perfect-hash')
os.system('git submodule add https://github.com/mpx/lua-cjson.git 3rd/cjson')
os.system('git submodule add https://github.com/cloudwu/pbc.git 3rd/pbc')
os.system('git add .')
# os.system('git pull --rebase')
os.system('git commit -m ' + '\"' + mes + '\"')
# os.system('git stash')
os.system('git push -u origin master')
| 27.132353 | 151 | 0.593496 |
2221763952e6b59598e578d279ca59d1d17ca8a3 | 125,291 | py | Python | venv/lib/python3.6/site-packages/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 1 | 2020-01-22T13:11:23.000Z | 2020-01-22T13:11:23.000Z | venv/lib/python3.6/site-packages/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 12 | 2020-02-21T07:24:52.000Z | 2020-04-14T09:54:32.000Z | venv/lib/python3.6/site-packages/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | null | null | null | #!/usr/bin/python
# (c) 2018-2021, NetApp, Inc
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
'''
na_ontap_volume
'''
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
module: na_ontap_volume
short_description: NetApp ONTAP manage volumes.
extends_documentation_fragment:
- netapp.ontap.netapp.na_ontap
version_added: 2.6.0
author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
description:
- Create or destroy or modify volumes on NetApp ONTAP.
options:
state:
description:
- Whether the specified volume should exist or not.
choices: ['present', 'absent']
type: str
default: 'present'
name:
description:
- The name of the volume to manage.
type: str
required: true
vserver:
description:
- Name of the vserver to use.
type: str
required: true
from_name:
description:
- Name of the existing volume to be renamed to name.
type: str
version_added: 2.7.0
is_infinite:
type: bool
description:
Set True if the volume is an Infinite Volume.
Deleting an infinite volume is asynchronous.
default: false
is_online:
type: bool
description:
- Whether the specified volume is online, or not.
default: True
aggregate_name:
description:
- The name of the aggregate the flexvol should exist on.
- Cannot be set when using the na_application_template option.
type: str
nas_application_template:
description:
- additional options when using the application/applications REST API to create a volume.
- the module is using ZAPI by default, and switches to REST if any suboption is present.
- create a FlexVol by default.
- create a FlexGroup if C(auto_provision_as) is set and C(FlexCache) option is not present.
- create a FlexCache if C(flexcache) option is present.
type: dict
version_added: 20.12.0
suboptions:
flexcache:
description: whether to create a flexcache. If absent, a FlexVol or FlexGroup is created.
type: dict
suboptions:
dr_cache:
description:
- whether to use the same flexgroup msid as the origin.
- requires ONTAP 9.9 and REST.
- create only option, ignored if the flexcache already exists.
type: bool
version_added: 21.3.0
origin_svm_name:
description: the remote SVM for the flexcache.
type: str
required: true
origin_component_name:
description: the remote component for the flexcache.
type: str
required: true
cifs_access:
description:
- The list of CIFS access controls. You must provide I(user_or_group) or I(access) to enable CIFS access.
type: list
elements: dict
suboptions:
access:
description: The CIFS access granted to the user or group. Default is full_control.
type: str
choices: [change, full_control, no_access, read]
user_or_group:
description: The name of the CIFS user or group that will be granted access. Default is Everyone.
type: str
nfs_access:
description:
- The list of NFS access controls. You must provide I(host) or I(access) to enable NFS access.
- Mutually exclusive with export_policy option in nas_application_template.
type: list
elements: dict
suboptions:
access:
description: The NFS access granted. Default is rw.
type: str
choices: [none, ro, rw]
host:
description: The name of the NFS entity granted access. Default is 0.0.0.0/0.
type: str
storage_service:
description:
- The performance service level (PSL) for this volume
type: str
choices: ['value', 'performance', 'extreme']
tiering:
description:
- Cloud tiering policy (see C(tiering_policy) for a more complete description).
type: dict
suboptions:
control:
description: Storage tiering placement rules for the container.
choices: ['required', 'best_effort', 'disallowed']
type: str
policy:
description:
- Cloud tiering policy (see C(tiering_policy)).
- Must match C(tiering_policy) if both are present.
choices: ['all', 'auto', 'none', 'snapshot-only']
type: str
object_stores:
description: list of object store names for tiering.
type: list
elements: str
exclude_aggregates:
description:
- The list of aggregate names to exclude when creating a volume.
- Requires ONTAP 9.9.1 GA or later.
type: list
elements: str
version_added: 21.7.0
use_nas_application:
description:
- Whether to use the application/applications REST/API to create a volume.
- This will default to true if any other suboption is present.
type: bool
default: true
size:
description:
- The size of the volume in (size_unit). Required when C(state=present).
type: int
size_unit:
description:
- The unit used to interpret the size parameter.
choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
type: str
default: 'gb'
size_change_threshold:
description:
- Percentage in size change to trigger a resize.
- When this parameter is greater than 0, a difference in size between what is expected and what is configured is ignored if it is below the threshold.
- For instance, the nas application allocates a larger size than specified to account for overhead.
- Set this to 0 for an exact match.
type: int
default: 10
version_added: 20.12.0
sizing_method:
description:
- Represents the method to modify the size of a FlexGroup.
- use_existing_resources - Increases or decreases the size of the FlexGroup by increasing or decreasing the size of the current FlexGroup resources.
- add_new_resources - Increases the size of the FlexGroup by adding new resources. This is limited to two new resources per available aggregate.
- This is only supported if REST is enabled (ONTAP 9.6 or later) and only for FlexGroups. ONTAP defaults to use_existing_resources.
type: str
choices: ['add_new_resources', 'use_existing_resources']
version_added: 20.12.0
type:
description:
- The volume type, either read-write (RW) or data-protection (DP).
type: str
export_policy:
description:
- Name of the export policy.
- Mutually exclusive with nfs_access suboption in nas_application_template.
type: str
aliases: ['policy']
junction_path:
description:
- Junction path of the volume.
- To unmount, use junction path C('').
type: str
space_guarantee:
description:
- Space guarantee style for the volume.
choices: ['none', 'file', 'volume']
type: str
percent_snapshot_space:
description:
- Amount of space reserved for snapshot copies of the volume.
type: int
volume_security_style:
description:
- The security style associated with this volume.
choices: ['mixed', 'ntfs', 'unified', 'unix']
type: str
encrypt:
type: bool
description:
- Whether or not to enable Volume Encryption.
- If not present, ONTAP defaults to false at volume creation.
- Changing encrypt value after creation requires ONTAP 9.3 or later.
version_added: 2.7.0
efficiency_policy:
description:
- Allows a storage efficiency policy to be set on volume creation.
type: str
version_added: 2.7.0
unix_permissions:
description:
- Unix permission bits in octal or symbolic format.
- For example, 0 is equivalent to ------------, 777 is equivalent to ---rwxrwxrwx,both formats are accepted.
- The valid octal value ranges between 0 and 777 inclusive.
type: str
version_added: 2.8.0
group_id:
description:
- The UNIX group ID for the volume. The default value is 0 ('root').
type: int
version_added: '20.1.0'
user_id:
description:
- The UNIX user ID for the volume. The default value is 0 ('root').
type: int
version_added: '20.1.0'
snapshot_policy:
description:
- The name of the snapshot policy.
- The default policy name is 'default'.
- If present, this will set the protection_type when using C(nas_application_template).
type: str
version_added: 2.8.0
aggr_list:
description:
- an array of names of aggregates to be used for FlexGroup constituents.
type: list
elements: str
version_added: 2.8.0
aggr_list_multiplier:
description:
- The number of times to iterate over the aggregates listed with the aggr_list parameter when creating a FlexGroup.
type: int
version_added: 2.8.0
auto_provision_as:
description:
- Automatically provision a FlexGroup volume.
version_added: 2.8.0
choices: ['flexgroup']
type: str
snapdir_access:
description:
- This is an advanced option, the default is False.
- Enable the visible '.snapshot' directory that is normally present at system internal mount points.
- This value also turns on access to all other '.snapshot' directories in the volume.
type: bool
version_added: 2.8.0
atime_update:
description:
- This is an advanced option, the default is True.
- If false, prevent the update of inode access times when a file is read.
- This value is useful for volumes with extremely high read traffic,
since it prevents writes to the inode file for the volume from contending with reads from other files.
- This field should be used carefully.
- That is, use this field when you know in advance that the correct access time for inodes will not be needed for files on that volume.
type: bool
version_added: 2.8.0
wait_for_completion:
description:
- Set this parameter to 'true' for synchronous execution during create (wait until volume status is online)
- Set this parameter to 'false' for asynchronous execution
- For asynchronous, execution exits as soon as the request is sent, without checking volume status
type: bool
default: false
version_added: 2.8.0
time_out:
description:
- time to wait for Flexgroup creation, modification, or deletion in seconds.
- Error out if task is not completed in defined time.
- if 0, the request is asynchronous.
- default is set to 3 minutes.
default: 180
type: int
version_added: 2.8.0
language:
description:
- Language to use for Volume
- Default uses SVM language
- Possible values Language
- c POSIX
- ar Arabic
- cs Czech
- da Danish
- de German
- en English
- en_us English (US)
- es Spanish
- fi Finnish
- fr French
- he Hebrew
- hr Croatian
- hu Hungarian
- it Italian
- ja Japanese euc-j
- ja_v1 Japanese euc-j
- ja_jp.pck Japanese PCK (sjis)
- ja_jp.932 Japanese cp932
- ja_jp.pck_v2 Japanese PCK (sjis)
- ko Korean
- no Norwegian
- nl Dutch
- pl Polish
- pt Portuguese
- ro Romanian
- ru Russian
- sk Slovak
- sl Slovenian
- sv Swedish
- tr Turkish
- zh Simplified Chinese
- zh.gbk Simplified Chinese (GBK)
- zh_tw Traditional Chinese euc-tw
- zh_tw.big5 Traditional Chinese Big 5
- To use UTF-8 as the NFS character set, append '.UTF-8' to the language code
type: str
version_added: 2.8.0
qos_policy_group:
description:
- Specifies a QoS policy group to be set on volume.
type: str
version_added: 2.9.0
qos_adaptive_policy_group:
description:
- Specifies a QoS adaptive policy group to be set on volume.
type: str
version_added: 2.9.0
tiering_policy:
description:
- The tiering policy that is to be associated with the volume.
- This policy decides whether the blocks of a volume will be tiered to the capacity tier.
- snapshot-only policy allows tiering of only the volume snapshot copies not associated with the active file system.
- auto policy allows tiering of both snapshot and active file system user data to the capacity tier.
- backup policy on DP volumes allows all transferred user data blocks to start in the capacity tier.
- all is the REST equivalent for backup.
- When set to none, the Volume blocks will not be tiered to the capacity tier.
- If no value specified, the volume is assigned snapshot only by default.
- Requires ONTAP 9.4 or later.
choices: ['snapshot-only', 'auto', 'backup', 'none', 'all']
type: str
version_added: 2.9.0
space_slo:
description:
- Specifies the space SLO type for the volume. The space SLO type is the Service Level Objective for space management for the volume.
- The space SLO value is used to enforce existing volume settings so that sufficient space is set aside on the aggregate to meet the space SLO.
- This parameter is not supported on Infinite Volumes.
choices: ['none', 'thick', 'semi-thick']
type: str
version_added: 2.9.0
nvfail_enabled:
description:
- If true, the controller performs additional work at boot and takeover times if it finds that there has been any potential data loss in the volume's
constituents due to an NVRAM failure.
- The volume's constituents would be put in a special state called 'in-nvfailed-state' such that protocol access is blocked.
- This will cause the client applications to crash and thus prevent access to stale data.
- To get out of this situation, the admin needs to manually clear the 'in-nvfailed-state' on the volume's constituents.
type: bool
version_added: 2.9.0
vserver_dr_protection:
description:
- Specifies the protection type for the volume in a Vserver DR setup.
choices: ['protected', 'unprotected']
type: str
version_added: 2.9.0
comment:
description:
- Sets a comment associated with the volume.
type: str
version_added: 2.9.0
snapshot_auto_delete:
description:
- A dictionary for the auto delete options and values.
- Supported options include 'state', 'commitment', 'trigger', 'target_free_space', 'delete_order', 'defer_delete',
'prefix', 'destroy_list'.
- Option 'state' determines if the snapshot autodelete is currently enabled for the volume. Possible values are 'on' and 'off'.
- Option 'commitment' determines the snapshots which snapshot autodelete is allowed to delete to get back space.
Possible values are 'try', 'disrupt' and 'destroy'.
- Option 'trigger' determines the condition which starts the automatic deletion of snapshots.
Possible values are 'volume', 'snap_reserve' and DEPRECATED 'space_reserve'.
- Option 'target_free_space' determines when snapshot autodelete should stop deleting snapshots. Depending on the trigger,
snapshots are deleted till we reach the target free space percentage. Accepts int type.
- Option 'delete_order' determines if the oldest or newest snapshot is deleted first. Possible values are 'newest_first' and 'oldest_first'.
- Option 'defer_delete' determines which kind of snapshots to delete in the end. Possible values are 'scheduled', 'user_created',
'prefix' and 'none'.
- Option 'prefix' can be set to provide the prefix string for the 'prefix' value of the 'defer_delete' option.
The prefix string length can be 15 char long.
- Option 'destroy_list' is a comma seperated list of services which can be destroyed if the snapshot backing that service is deleted.
For 7-mode, the possible values for this option are a combination of 'lun_clone', 'vol_clone', 'cifs_share', 'file_clone' or 'none'.
For cluster-mode, the possible values for this option are a combination of 'lun_clone,file_clone' (for LUN clone and/or file clone),
'lun_clone,sfsr' (for LUN clone and/or sfsr), 'vol_clone', 'cifs_share', or 'none'.
type: dict
version_added: '20.4.0'
cutover_action:
description:
- Specifies the action to be taken for cutover.
- Possible values are 'abort_on_failure', 'defer_on_failure', 'force' and 'wait'. Default is 'defer_on_failure'.
choices: ['abort_on_failure', 'defer_on_failure', 'force', 'wait']
type: str
version_added: '20.5.0'
check_interval:
description:
- The amount of time in seconds to wait between checks of a volume to see if it has moved successfully.
default: 30
type: int
version_added: '20.6.0'
from_vserver:
description:
- The source vserver of the volume is rehosted.
type: str
version_added: '20.6.0'
auto_remap_luns:
description:
- Flag to control automatic map of LUNs.
type: bool
version_added: '20.6.0'
force_unmap_luns:
description:
- Flag to control automatic unmap of LUNs.
type: bool
version_added: '20.6.0'
force_restore:
description:
- If this field is set to "true", the Snapshot copy is restored even if the volume has one or more newer Snapshot
copies which are currently used as reference Snapshot copy by SnapMirror. If a restore is done in this
situation, this will cause future SnapMirror transfers to fail.
- Option should only be used along with snapshot_restore.
type: bool
version_added: '20.6.0'
preserve_lun_ids:
description:
- If this field is set to "true", LUNs in the volume being restored will remain mapped and their identities
preserved such that host connectivity will not be disrupted during the restore operation. I/O's to the LUN will
be fenced during the restore operation by placing the LUNs in an unavailable state. Once the restore operation
has completed, hosts will be able to resume I/O access to the LUNs.
- Option should only be used along with snapshot_restore.
type: bool
version_added: '20.6.0'
snapshot_restore:
description:
- Name of snapshot to restore from.
- Not supported on Infinite Volume.
type: str
version_added: '20.6.0'
compression:
description:
- Whether to enable compression for the volume (HDD and Flash Pool aggregates).
- If this option is not present, it is automatically set to true if inline_compression is true.
type: bool
version_added: '20.12.0'
inline_compression:
description:
- Whether to enable inline compression for the volume (HDD and Flash Pool aggregates, AFF platforms).
type: bool
version_added: '20.12.0'
'''
EXAMPLES = """
- name: Create FlexVol
na_ontap_volume:
state: present
name: ansibleVolume12
is_infinite: False
aggregate_name: ansible_aggr
size: 100
size_unit: mb
user_id: 1001
group_id: 2002
space_guarantee: none
tiering_policy: auto
export_policy: default
percent_snapshot_space: 60
qos_policy_group: max_performance_gold
vserver: ansibleVServer
wait_for_completion: True
space_slo: none
nvfail_enabled: False
comment: ansible created volume
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
- name: Volume Delete
na_ontap_volume:
state: absent
name: ansibleVolume12
aggregate_name: ansible_aggr
vserver: ansibleVServer
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
- name: Make FlexVol offline
na_ontap_volume:
state: present
name: ansibleVolume
is_infinite: False
is_online: False
vserver: ansibleVServer
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
- name: Create Flexgroup volume manually
na_ontap_volume:
state: present
name: ansibleVolume
is_infinite: False
aggr_list: "{{ aggr_list }}"
aggr_list_multiplier: 2
size: 200
size_unit: mb
space_guarantee: none
export_policy: default
vserver: "{{ vserver }}"
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
https: False
unix_permissions: 777
snapshot_policy: default
time_out: 0
- name: Create Flexgroup volume auto provsion as flex group
na_ontap_volume:
state: present
name: ansibleVolume
is_infinite: False
auto_provision_as: flexgroup
size: 200
size_unit: mb
space_guarantee: none
export_policy: default
vserver: "{{ vserver }}"
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
https: False
unix_permissions: 777
snapshot_policy: default
time_out: 0
- name: Create FlexVol with QoS adaptive
na_ontap_volume:
state: present
name: ansibleVolume15
is_infinite: False
aggregate_name: ansible_aggr
size: 100
size_unit: gb
space_guarantee: none
export_policy: default
percent_snapshot_space: 10
qos_adaptive_policy_group: extreme
vserver: ansibleVServer
wait_for_completion: True
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
- name: Modify volume dr protection (vserver of the volume must be in a snapmirror relationship)
na_ontap_volume:
state: present
name: ansibleVolume
vserver_dr_protection: protected
vserver: "{{ vserver }}"
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
https: False
- name: Modify volume with snapshot auto delete options
na_ontap_volume:
state: present
name: vol_auto_delete
snapshot_auto_delete:
state: "on"
commitment: try
defer_delete: scheduled
target_free_space: 30
destroy_list: lun_clone,vol_clone
delete_order: newest_first
aggregate_name: "{{ aggr }}"
vserver: "{{ vserver }}"
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
https: False
- name: Move volume with force cutover action
na_ontap_volume:
name: ansible_vol
aggregate_name: aggr_ansible
cutover_action: force
vserver: "{{ vserver }}"
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
https: false
- name: Rehost volume to another vserver auto remap luns
na_ontap_volume:
name: ansible_vol
from_vserver: ansible
auto_remap_luns: true
vserver: "{{ vserver }}"
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
https: false
- name: Rehost volume to another vserver force unmap luns
na_ontap_volume:
name: ansible_vol
from_vserver: ansible
force_unmap_luns: true
vserver: "{{ vserver }}"
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
https: false
- name: Snapshot restore volume
na_ontap_volume:
name: ansible_vol
vserver: ansible
snapshot_restore: 2020-05-24-weekly
force_restore: true
preserve_lun_ids: true
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
https: true
validate_certs: false
- name: Volume create using application/applications nas template
na_ontap_volume:
state: present
name: ansibleVolume12
vserver: ansibleSVM
size: 100000000
size_unit: b
space_guarantee: none
language: es
percent_snapshot_space: 60
unix_permissions: ---rwxrwxrwx
snapshot_policy: default
efficiency_policy: default
comment: testing
nas_application_template:
nfs_access: # the mere presence of a suboption is enough to enable this new feature
- access: ro
- access: rw
host: 10.0.0.0/8
exclude_aggregates: aggr0
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
https: true
validate_certs: false
"""
RETURN = """
"""
import time
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
from ansible_collections.netapp.ontap.plugins.module_utils.rest_application import RestApplication
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
from ansible_collections.netapp.ontap.plugins.module_utils import rest_volume
from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppOntapVolume(object):
'''Class with volume operations'''
def __init__(self):
'''Initialize module parameters'''
self._size_unit_map = dict(
bytes=1,
b=1,
kb=1024,
mb=1024 ** 2,
gb=1024 ** 3,
tb=1024 ** 4,
pb=1024 ** 5,
eb=1024 ** 6,
zb=1024 ** 7,
yb=1024 ** 8
)
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
name=dict(required=True, type='str'),
vserver=dict(required=True, type='str'),
from_name=dict(required=False, type='str'),
is_infinite=dict(required=False, type='bool', default=False),
is_online=dict(required=False, type='bool', default=True),
size=dict(type='int', default=None),
size_unit=dict(default='gb', choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'], type='str'),
sizing_method=dict(choices=['add_new_resources', 'use_existing_resources'], type='str'),
aggregate_name=dict(type='str', default=None),
type=dict(type='str', default=None),
export_policy=dict(type='str', default=None, aliases=['policy']),
junction_path=dict(type='str', default=None),
space_guarantee=dict(choices=['none', 'file', 'volume'], default=None),
percent_snapshot_space=dict(type='int', default=None),
volume_security_style=dict(choices=['mixed', 'ntfs', 'unified', 'unix']),
encrypt=dict(required=False, type='bool'),
efficiency_policy=dict(required=False, type='str'),
unix_permissions=dict(required=False, type='str'),
group_id=dict(required=False, type='int'),
user_id=dict(required=False, type='int'),
snapshot_policy=dict(required=False, type='str'),
aggr_list=dict(required=False, type='list', elements='str'),
aggr_list_multiplier=dict(required=False, type='int'),
snapdir_access=dict(required=False, type='bool'),
atime_update=dict(required=False, type='bool'),
auto_provision_as=dict(choices=['flexgroup'], required=False, type='str'),
wait_for_completion=dict(required=False, type='bool', default=False),
time_out=dict(required=False, type='int', default=180),
language=dict(type='str', required=False),
qos_policy_group=dict(required=False, type='str'),
qos_adaptive_policy_group=dict(required=False, type='str'),
nvfail_enabled=dict(type='bool', required=False),
space_slo=dict(type='str', required=False, choices=['none', 'thick', 'semi-thick']),
tiering_policy=dict(type='str', required=False, choices=['snapshot-only', 'auto', 'backup', 'none', 'all']),
vserver_dr_protection=dict(type='str', required=False, choices=['protected', 'unprotected']),
comment=dict(type='str', required=False),
snapshot_auto_delete=dict(type='dict', required=False),
cutover_action=dict(required=False, type='str', choices=['abort_on_failure', 'defer_on_failure', 'force', 'wait']),
check_interval=dict(required=False, type='int', default=30),
from_vserver=dict(required=False, type='str'),
auto_remap_luns=dict(required=False, type='bool'),
force_unmap_luns=dict(required=False, type='bool'),
force_restore=dict(required=False, type='bool'),
compression=dict(required=False, type='bool'),
inline_compression=dict(required=False, type='bool'),
preserve_lun_ids=dict(required=False, type='bool'),
snapshot_restore=dict(required=False, type='str'),
nas_application_template=dict(type='dict', options=dict(
use_nas_application=dict(type='bool', default=True),
exclude_aggregates=dict(type='list', elements='str'),
flexcache=dict(type='dict', options=dict(
dr_cache=dict(type='bool'),
origin_svm_name=dict(required=True, type='str'),
origin_component_name=dict(required=True, type='str')
)),
cifs_access=dict(type='list', elements='dict', options=dict(
access=dict(type='str', choices=['change', 'full_control', 'no_access', 'read']),
user_or_group=dict(type='str')
)),
nfs_access=dict(type='list', elements='dict', options=dict(
access=dict(type='str', choices=['none', 'ro', 'rw']),
host=dict(type='str')
)),
storage_service=dict(type='str', choices=['value', 'performance', 'extreme']),
tiering=dict(type='dict', options=dict(
control=dict(type='str', choices=['required', 'best_effort', 'disallowed']),
policy=dict(type='str', choices=['all', 'auto', 'none', 'snapshot-only']),
object_stores=dict(type='list', elements='str') # create only
))
)),
size_change_threshold=dict(type='int', default=10),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
mutually_exclusive=[
['space_guarantee', 'space_slo'], ['auto_remap_luns', 'force_unmap_luns']
],
supports_check_mode=True
)
self.na_helper = NetAppModule(self.module)
self.parameters = self.na_helper.check_and_set_parameters(self.module)
self.volume_style = None
self.volume_created = False
self.issues = []
self.sis_keys2zapi_get = dict(
efficiency_policy='policy',
compression='is-compression-enabled',
inline_compression='is-inline-compression-enabled')
self.sis_keys2zapi_set = dict(
efficiency_policy='policy-name',
compression='enable-compression',
inline_compression='enable-inline-compression')
if self.parameters.get('size'):
self.parameters['size'] = self.parameters['size'] * \
self._size_unit_map[self.parameters['size_unit']]
if 'snapshot_auto_delete' in self.parameters:
for key in self.parameters['snapshot_auto_delete']:
if key not in ['commitment', 'trigger', 'target_free_space', 'delete_order', 'defer_delete',
'prefix', 'destroy_list', 'state']:
self.module.fail_json(msg="snapshot_auto_delete option '%s' is not valid." % key)
unsupported_rest_properties = ['atime_update',
'cutover_action',
'encrypt-destination',
'force_restore',
'nvfail_enabled',
'preserve_lun_ids',
'snapdir-access-enabled',
'snapshot_auto_delete',
'space_slo',
'vserver_dr_protection']
self.rest_api = OntapRestAPI(self.module)
used_unsupported_rest_properties = [x for x in unsupported_rest_properties if x in self.parameters]
self.use_rest, error = self.rest_api.is_rest(used_unsupported_rest_properties)
if error is not None:
self.module.fail_json(msg=error)
if self.use_rest and self.parameters['use_rest'].lower() == 'auto':
self.module.warn(
'Falling back to ZAPI as REST support for na_ontap_volume is in beta and use_rest: auto. Set use_rest: always to force REST.')
self.use_rest = False
if not self.use_rest:
if HAS_NETAPP_LIB is False:
self.module.fail_json(
msg="the python NetApp-Lib module is required")
elif self.parameters.get('sizing_method'):
self.module.fail_json(msg="sizing_method is not supported with ZAPI. It can only be used with REST")
else:
self.server = netapp_utils.setup_na_ontap_zapi(
module=self.module, vserver=self.parameters['vserver'])
self.cluster = netapp_utils.setup_na_ontap_zapi(module=self.module)
if self.use_rest:
self.rest_errors()
ontap_97_options = ['nas_application_template']
if not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 7) and any(x in self.parameters for x in ontap_97_options):
self.module.fail_json(msg='Error: %s' % self.rest_api.options_require_ontap_version(ontap_97_options, version='9.7'))
if not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9) and\
self.na_helper.safe_get(self.parameters, ['nas_application_template', 'flexcache', 'dr_cache']) is not None:
self.module.fail_json(msg='Error: %s' % self.rest_api.options_require_ontap_version('flexcache: dr_cache', version='9.9'))
# REST API for application/applications if needed
self.rest_app = self.setup_rest_application()
def setup_rest_application(self):
use_application_template = self.na_helper.safe_get(self.parameters, ['nas_application_template', 'use_nas_application'])
rest_app = None
if use_application_template:
# consistency checks
# tiering policy is duplicated, make sure values are matching
tiering_policy_nas = self.na_helper.safe_get(self.parameters, ['nas_application_template', 'tiering', 'policy'])
tiering_policy = self.na_helper.safe_get(self.parameters, ['tiering_policy'])
if tiering_policy_nas is not None and tiering_policy is not None and tiering_policy_nas != tiering_policy:
msg = 'Conflict: if tiering_policy and nas_application_template tiering policy are both set, they must match.'
msg += ' Found "%s" and "%s".' % (tiering_policy, tiering_policy_nas)
self.module.fail_json(msg=msg)
# aggregate_name will force a move if present
if self.parameters.get('aggregate_name') is not None:
msg = 'Conflict: aggregate_name is not supported when application template is enabled.'\
' Found: aggregate_name: %s' % self.parameters['aggregate_name']
self.module.fail_json(msg=msg)
nfs_access = self.na_helper.safe_get(self.parameters, ['nas_application_template', 'nfs_access'])
if nfs_access is not None and self.na_helper.safe_get(self.parameters, ['export_policy']) is not None:
msg = 'Conflict: export_policy option and nfs_access suboption in nas_application_template are mutually exclusive.'
self.module.fail_json(msg=msg)
rest_app = RestApplication(self.rest_api, self.parameters['vserver'], self.parameters['name'])
return rest_app
def volume_get_iter(self, vol_name=None):
"""
Return volume-get-iter query results
:param vol_name: name of the volume
:return: NaElement
"""
volume_info = netapp_utils.zapi.NaElement('volume-get-iter')
volume_attributes = netapp_utils.zapi.NaElement('volume-attributes')
volume_id_attributes = netapp_utils.zapi.NaElement('volume-id-attributes')
volume_id_attributes.add_new_child('name', vol_name)
volume_id_attributes.add_new_child('vserver', self.parameters['vserver'])
volume_attributes.add_child_elem(volume_id_attributes)
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(volume_attributes)
volume_info.add_child_elem(query)
try:
result = self.server.invoke_successfully(volume_info, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error fetching volume %s : %s'
% (self.parameters['name'], to_native(error)),
exception=traceback.format_exc())
return result
def get_application(self):
if self.rest_app:
app, error = self.rest_app.get_application_details('nas')
self.na_helper.fail_on_error(error)
# flatten component list
comps = self.na_helper.safe_get(app, ['nas', 'application_components'])
if comps:
comp = comps[0]
app['nas'].pop('application_components')
app['nas'].update(comp)
return app['nas']
return None
def get_volume(self, vol_name=None):
"""
Return details about the volume
:param:
name : Name of the volume
:return: Details about the volume. None if not found.
:rtype: dict
"""
result = None
if vol_name is None:
vol_name = self.parameters['name']
if self.use_rest:
return self.get_volume_rest(vol_name)
volume_info = self.volume_get_iter(vol_name)
if self.na_helper.zapi_get_value(volume_info, ['num-records'], convert_to=int, default=0) > 0:
# extract values from volume record
attrs = dict(
# The keys are used to index a result dictionary, values are read from a ZAPI object indexed by key_list.
# If required is True, an error is reported if a key in key_list is not found.
# I'm not sure there is much value in omitnone, but it preserves backward compatibility
# If omitnone is absent or False, a None value is recorded, if True, the key is not set
encrypt=dict(key_list=['encrypt'], convert_to=bool, omitnone=True),
tiering_policy=dict(key_list=['volume-comp-aggr-attributes', 'tiering-policy'], omitnone=True),
export_policy=dict(key_list=['volume-export-attributes', 'policy']),
aggregate_name=dict(key_list=['volume-id-attributes', 'containing-aggregate-name']),
flexgroup_uuid=dict(key_list=['volume-id-attributes', 'flexgroup-uuid']),
instance_uuid=dict(key_list=['volume-id-attributes', 'instance-uuid']),
junction_path=dict(key_list=['volume-id-attributes', 'junction-path'], default=''),
style_extended=dict(key_list=['volume-id-attributes', 'style-extended']),
type=dict(key_list=['volume-id-attributes', 'type'], omitnone=True),
comment=dict(key_list=['volume-id-attributes', 'comment']),
atime_update=dict(key_list=['volume-performance-attributes', 'is-atime-update-enabled'], convert_to=bool),
qos_policy_group=dict(key_list=['volume-qos-attributes', 'policy-group-name']),
qos_adaptive_policy_group=dict(key_list=['volume-qos-attributes', 'adaptive-policy-group-name']),
# style is not present if the volume is still offline or of type: dp
volume_security_style=dict(key_list=['volume-security-attributes', 'style'], omitnone=True),
group_id=dict(key_list=['volume-security-attributes', 'volume-security-unix-attributes', 'group-id'], convert_to=int, omitnone=True),
unix_permissions=dict(key_list=['volume-security-attributes', 'volume-security-unix-attributes', 'permissions'], required=True),
user_id=dict(key_list=['volume-security-attributes', 'volume-security-unix-attributes', 'user-id'], convert_to=int, omitnone=True),
snapdir_access=dict(key_list=['volume-snapshot-attributes', 'snapdir-access-enabled'], convert_to=bool),
snapshot_policy=dict(key_list=['volume-snapshot-attributes', 'snapshot-policy'], omitnone=True),
percent_snapshot_space=dict(key_list=['volume-space-attributes', 'percentage-snapshot-reserve'], convert_to=int, omitnone=True),
size=dict(key_list=['volume-space-attributes', 'size'], required=True, convert_to=int),
space_guarantee=dict(key_list=['volume-space-attributes', 'space-guarantee']),
space_slo=dict(key_list=['volume-space-attributes', 'space-slo']),
nvfail_enabled=dict(key_list=['volume-state-attributes', 'is-nvfail-enabled'], convert_to=bool),
is_online=dict(key_list=['volume-state-attributes', 'state'], required=True, convert_to='bool_online'),
vserver_dr_protection=dict(key_list=['volume-vserver-dr-protection-attributes', 'vserver-dr-protection']),
)
volume_attributes = self.na_helper.zapi_get_value(volume_info, ['attributes-list', 'volume-attributes'], required=True)
result = dict(name=vol_name)
self.na_helper.zapi_get_attrs(volume_attributes, attrs, result)
if result['style_extended'] == 'flexvol':
result['uuid'] = result['instance_uuid']
elif result['style_extended'] is not None and result['style_extended'].startswith('flexgroup'):
result['uuid'] = result['flexgroup_uuid']
else:
result['uuid'] = None
# snapshot_auto_delete options
auto_delete = dict()
attrs = dict(
commitment=dict(key_list=['volume-snapshot-autodelete-attributes', 'commitment']),
defer_delete=dict(key_list=['volume-snapshot-autodelete-attributes', 'defer-delete']),
delete_order=dict(key_list=['volume-snapshot-autodelete-attributes', 'delete-order']),
destroy_list=dict(key_list=['volume-snapshot-autodelete-attributes', 'destroy-list']),
is_autodelete_enabled=dict(key_list=['volume-snapshot-autodelete-attributes', 'is-autodelete-enabled'], convert_to=bool),
prefix=dict(key_list=['volume-snapshot-autodelete-attributes', 'prefix']),
target_free_space=dict(key_list=['volume-snapshot-autodelete-attributes', 'target-free-space'], convert_to=int),
trigger=dict(key_list=['volume-snapshot-autodelete-attributes', 'trigger']),
)
self.na_helper.zapi_get_attrs(volume_attributes, attrs, auto_delete)
if auto_delete['is_autodelete_enabled'] is not None:
auto_delete['state'] = 'on' if auto_delete['is_autodelete_enabled'] else 'off'
del auto_delete['is_autodelete_enabled']
result['snapshot_auto_delete'] = auto_delete
self.get_efficiency_info(result)
return result
def wrap_fail_json(self, msg, exception=None):
for issue in self.issues:
self.module.warn(issue)
if self.volume_created:
msg = 'Volume created with success, with missing attributes: ' + msg
self.module.fail_json(msg=msg, exception=exception)
def create_nas_application_component(self):
'''Create application component for nas template'''
required_options = ('name', 'size')
for option in required_options:
if self.parameters.get(option) is None:
self.module.fail_json(msg='Error: "%s" is required to create nas application.' % option)
application_component = dict(
name=self.parameters['name'],
total_size=self.parameters['size'],
share_count=1, # 1 is the maximum value for nas
scale_out=(self.volume_style == 'flexgroup'),
)
name = self.na_helper.safe_get(self.parameters, ['nas_application_template', 'storage_service'])
if name is not None:
application_component['storage_service'] = dict(name=name)
flexcache = self.na_helper.safe_get(self.parameters, ['nas_application_template', 'flexcache'])
if flexcache is not None:
application_component['flexcache'] = dict(
origin=dict(
svm=dict(name=flexcache['origin_svm_name']),
component=dict(name=flexcache['origin_component_name'])
)
)
# scale_out should be absent or set to True for FlexCache
del application_component['scale_out']
dr_cache = self.na_helper.safe_get(self.parameters, ['nas_application_template', 'flexcache', 'dr_cache'])
if dr_cache is not None:
application_component['flexcache']['dr_cache'] = dr_cache
tiering = self.na_helper.safe_get(self.parameters, ['nas_application_template', 'tiering'])
if tiering is not None or self.parameters.get('tiering_policy') is not None:
application_component['tiering'] = {}
if tiering is None:
tiering = {}
if 'policy' not in tiering:
tiering['policy'] = self.parameters.get('tiering_policy')
for attr in ('control', 'policy', 'object_stores'):
value = tiering.get(attr)
if attr == 'object_stores' and value is not None:
value = [dict(name=x) for x in value]
if value is not None:
application_component['tiering'][attr] = value
if self.parameters.get('qos_policy') is not None:
application_component['qos'] = {
"policy": {
"name": self.parameters['qos_policy'],
}
}
if self.parameters.get('export_policy') is not None:
application_component['export_policy'] = {
"name": self.parameters['export_policy'],
}
return application_component
def create_volume_body(self):
'''Create body for nas template'''
nas = dict(application_components=[self.create_nas_application_component()])
value = self.na_helper.safe_get(self.parameters, ['snapshot_policy'])
if value is not None:
nas['protection_type'] = dict(local_policy=value)
for attr in ('nfs_access', 'cifs_access'):
value = self.na_helper.safe_get(self.parameters, ['nas_application_template', attr])
if value is not None:
# we expect value to be a list of dicts, with maybe some empty entries
value = self.na_helper.filter_out_none_entries(value)
if value:
nas[attr] = value
for attr in ('exclude_aggregates',):
values = self.na_helper.safe_get(self.parameters, ['nas_application_template', attr])
if values:
nas[attr] = [dict(name=name) for name in values]
return self.rest_app.create_application_body("nas", nas)
def create_nas_application(self):
'''Use REST application/applications nas template to create a volume'''
body, error = self.create_volume_body()
self.na_helper.fail_on_error(error)
response, error = self.rest_app.create_application(body)
self.na_helper.fail_on_error(error)
return response
def create_volume(self):
'''Create ONTAP volume'''
if self.rest_app:
return self.create_nas_application()
if self.use_rest:
return self.create_volume_rest()
if self.volume_style == 'flexgroup':
return self.create_volume_async()
options = self.create_volume_options()
volume_create = netapp_utils.zapi.NaElement.create_node_with_children('volume-create', **options)
try:
self.server.invoke_successfully(volume_create, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
size_msg = ' of size %s' % self.parameters['size'] if self.parameters.get('size') is not None else ''
self.module.fail_json(msg='Error provisioning volume %s%s: %s'
% (self.parameters['name'], size_msg, to_native(error)),
exception=traceback.format_exc())
self.ems_log_event("volume-create")
if self.parameters.get('wait_for_completion'):
# round off time_out
retries = (self.parameters['time_out'] + 5) // 10
is_online = None
errors = []
while not is_online and retries > 0:
try:
current = self.get_volume()
is_online = None if current is None else current['is_online']
except KeyError as err:
# get_volume may receive incomplete data as the volume is being created
errors.append(repr(err))
if not is_online:
time.sleep(10)
retries = retries - 1
if not is_online:
errors.append("Timeout after %s seconds" % self.parameters['time_out'])
self.module.fail_json(msg='Error waiting for volume %s to come online: %s'
% (self.parameters['name'], str(errors)))
return None
def create_volume_async(self):
'''
create volume async.
'''
options = self.create_volume_options()
volume_create = netapp_utils.zapi.NaElement.create_node_with_children('volume-create-async', **options)
if self.parameters.get('aggr_list'):
aggr_list_obj = netapp_utils.zapi.NaElement('aggr-list')
volume_create.add_child_elem(aggr_list_obj)
for aggr in self.parameters['aggr_list']:
aggr_list_obj.add_new_child('aggr-name', aggr)
try:
result = self.server.invoke_successfully(volume_create, enable_tunneling=True)
self.ems_log_event("volume-create")
except netapp_utils.zapi.NaApiError as error:
size_msg = ' of size %s' % self.parameters['size'] if self.parameters.get('size') is not None else ''
self.module.fail_json(msg='Error provisioning volume %s%s: %s'
% (self.parameters['name'], size_msg, to_native(error)),
exception=traceback.format_exc())
self.check_invoke_result(result, 'create')
return None
def create_volume_options(self):
'''Set volume options for create operation'''
options = {}
if self.volume_style == 'flexgroup':
options['volume-name'] = self.parameters['name']
if self.parameters.get('aggr_list_multiplier') is not None:
options['aggr-list-multiplier'] = str(self.parameters['aggr_list_multiplier'])
if self.parameters.get('auto_provision_as') is not None:
options['auto-provision-as'] = self.parameters['auto_provision_as']
if self.parameters.get('space_guarantee') is not None:
options['space-guarantee'] = self.parameters['space_guarantee']
else:
options['volume'] = self.parameters['name']
if self.parameters.get('aggregate_name') is None:
self.module.fail_json(msg='Error provisioning volume %s: aggregate_name is required'
% self.parameters['name'])
options['containing-aggr-name'] = self.parameters['aggregate_name']
if self.parameters.get('space_guarantee') is not None:
options['space-reserve'] = self.parameters['space_guarantee']
if self.parameters.get('size') is not None:
options['size'] = str(self.parameters['size'])
if self.parameters.get('snapshot_policy') is not None:
options['snapshot-policy'] = self.parameters['snapshot_policy']
if self.parameters.get('unix_permissions') is not None:
options['unix-permissions'] = self.parameters['unix_permissions']
if self.parameters.get('group_id') is not None:
options['group-id'] = str(self.parameters['group_id'])
if self.parameters.get('user_id') is not None:
options['user-id'] = str(self.parameters['user_id'])
if self.parameters.get('volume_security_style') is not None:
options['volume-security-style'] = self.parameters['volume_security_style']
if self.parameters.get('export_policy') is not None:
options['export-policy'] = self.parameters['export_policy']
if self.parameters.get('junction_path') is not None:
options['junction-path'] = self.parameters['junction_path']
if self.parameters.get('comment') is not None:
options['volume-comment'] = self.parameters['comment']
if self.parameters.get('type') is not None:
options['volume-type'] = self.parameters['type']
if self.parameters.get('percent_snapshot_space') is not None:
options['percentage-snapshot-reserve'] = str(self.parameters['percent_snapshot_space'])
if self.parameters.get('language') is not None:
options['language-code'] = self.parameters['language']
if self.parameters.get('qos_policy_group') is not None:
options['qos-policy-group-name'] = self.parameters['qos_policy_group']
if self.parameters.get('qos_adaptive_policy_group') is not None:
options['qos-adaptive-policy-group-name'] = self.parameters['qos_adaptive_policy_group']
if self.parameters.get('nvfail_enabled') is not None:
options['is-nvfail-enabled'] = str(self.parameters['nvfail_enabled'])
if self.parameters.get('space_slo') is not None:
options['space-slo'] = self.parameters['space_slo']
if self.parameters.get('tiering_policy') is not None:
options['tiering-policy'] = self.parameters['tiering_policy']
if self.parameters.get('encrypt') is not None:
options['encrypt'] = self.na_helper.get_value_for_bool(False, self.parameters['encrypt'], 'encrypt')
if self.parameters.get('vserver_dr_protection') is not None:
options['vserver-dr-protection'] = self.parameters['vserver_dr_protection']
if self.parameters['is_online']:
options['volume-state'] = 'online'
else:
options['volume-state'] = 'offline'
return options
def rest_unmount_volume(self, uuid, current):
"""
Unmount the volume using REST PATCH method.
"""
response = None
if current.get('junction_path'):
body = dict(nas=dict(path=''))
response, error = rest_volume.patch_volume(self.rest_api, uuid, body)
self.na_helper.fail_on_error(error)
return response
def rest_delete_volume(self, current):
"""
Delete the volume using REST DELETE method (it scrubs better than ZAPI).
"""
uuid = self.parameters['uuid']
if uuid is None:
self.module.fail_json(msg='Could not read UUID for volume %s' % self.parameters['name'])
self.rest_unmount_volume(uuid, current)
response, error = rest_volume.delete_volume(self.rest_api, uuid)
self.na_helper.fail_on_error(error)
return response
def delete_volume(self, current):
'''Delete ONTAP volume'''
if self.use_rest and self.parameters['uuid'] is not None:
return self.rest_delete_volume(current)
if self.parameters.get('is_infinite') or self.volume_style == 'flexgroup':
if current['is_online']:
self.change_volume_state(call_from_delete_vol=True)
volume_delete = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-destroy-async', **{'volume-name': self.parameters['name']})
else:
volume_delete = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-destroy', **{'name': self.parameters['name'], 'unmount-and-offline': 'true'})
try:
result = self.server.invoke_successfully(volume_delete, enable_tunneling=True)
if self.parameters.get('is_infinite') or self.volume_style == 'flexgroup':
self.check_invoke_result(result, 'delete')
self.ems_log_event("volume-delete")
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error deleting volume %s: %s'
% (self.parameters['name'], to_native(error)),
exception=traceback.format_exc())
def move_volume(self, encrypt_destination=None):
'''Move volume from source aggregate to destination aggregate'''
if self.use_rest:
return self.move_volume_rest()
volume_move = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-move-start', **{'source-volume': self.parameters['name'],
'vserver': self.parameters['vserver'],
'dest-aggr': self.parameters['aggregate_name']})
if self.parameters.get('cutover_action'):
volume_move.add_new_child('cutover-action', self.parameters['cutover_action'])
if encrypt_destination is not None:
volume_move.add_new_child('encrypt-destination', self.na_helper.get_value_for_bool(False, encrypt_destination))
try:
self.cluster.invoke_successfully(volume_move,
enable_tunneling=True)
self.ems_log_event("volume-move")
except netapp_utils.zapi.NaApiError as error:
rest_error = self.move_volume_with_rest_passthrough(encrypt_destination)
if rest_error is not None:
self.module.fail_json(msg='Error moving volume %s: %s - Retry failed with REST error: %s'
% (self.parameters['name'], to_native(error), rest_error),
exception=traceback.format_exc())
if self.parameters.get('wait_for_completion'):
self.wait_for_volume_move()
def move_volume_with_rest_passthrough(self, encrypt_destination=None):
# MDV volume will fail on a move, but will work using the REST CLI pass through
# vol move start -volume MDV_CRS_d6b0b313ff5611e9837100a098544e51_A -destination-aggregate data_a3 -vserver wmc66-a
# if REST isn't available fail with the original error
if not self.use_rest:
return False
# if REST exists let's try moving using the passthrough CLI
api = 'private/cli/volume/move/start'
body = {'destination-aggregate': self.parameters['aggregate_name'],
}
if encrypt_destination is not None:
body['encrypt-destination'] = encrypt_destination
query = {'volume': self.parameters['name'],
'vserver': self.parameters['vserver']
}
dummy, error = self.rest_api.patch(api, body, query)
return error
def check_volume_move_state(self, result):
volume_move_status = result.get_child_by_name('attributes-list').get_child_by_name('volume-move-info').get_child_content('state')
# We have 5 states that can be returned.
# warning and healthy are state where the move is still going so we don't need to do anything for thouse.
if volume_move_status == 'done':
return False
if volume_move_status in ['failed', 'alert']:
self.module.fail_json(msg='Error moving volume %s: %s' %
(self.parameters['name'], result.get_child_by_name('attributes-list').get_child_by_name('volume-move-info')
.get_child_by_name('details')))
return True
def wait_for_volume_move(self):
volume_move_iter = netapp_utils.zapi.NaElement('volume-move-get-iter')
volume_move_info = netapp_utils.zapi.NaElement('volume-move-info')
volume_move_info.add_new_child('volume', self.parameters['name'])
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(volume_move_info)
volume_move_iter.add_child_elem(query)
error = self.wait_for_task_completion(volume_move_iter, self.check_volume_move_state)
if error:
self.module.fail_json(msg='Error getting volume move status: %s' % (to_native(error)),
exception=traceback.format_exc())
def check_volume_encryption_conversion_state(self, result):
volume_encryption_conversion_status = result.get_child_by_name('attributes-list').get_child_by_name('volume-encryption-conversion-info')\
.get_child_content('status')
if volume_encryption_conversion_status == 'running':
return True
if volume_encryption_conversion_status == 'Not currently going on.':
return False
self.module.fail_json(msg='Error converting encryption for volume %s: %s' %
(self.parameters['name'], volume_encryption_conversion_status))
def wait_for_volume_encryption_conversion(self):
volume_encryption_conversion_iter = netapp_utils.zapi.NaElement('volume-encryption-conversion-get-iter')
volume_encryption_conversion_info = netapp_utils.zapi.NaElement('volume-encryption-conversion-info')
volume_encryption_conversion_info.add_new_child('volume', self.parameters['name'])
volume_encryption_conversion_info.add_new_child('vserver', self.parameters['vserver'])
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(volume_encryption_conversion_info)
volume_encryption_conversion_iter.add_child_elem(query)
error = self.wait_for_task_completion(volume_encryption_conversion_iter, self.check_volume_encryption_conversion_state)
if error:
self.module.fail_json(msg='Error getting volume encryption_conversion status: %s' % (to_native(error)),
exception=traceback.format_exc())
def wait_for_task_completion(self, zapi_iter, check_state):
waiting = True
fail_count = 0
while waiting:
try:
result = self.cluster.invoke_successfully(zapi_iter, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
if fail_count < 3:
fail_count += 1
time.sleep(self.parameters['check_interval'])
continue
return error
if int(result.get_child_content('num-records')) == 0:
return None
# reset fail count to 0
fail_count = 0
waiting = check_state(result)
if waiting:
time.sleep(self.parameters['check_interval'])
def rename_volume(self):
"""
Rename the volume.
Note: 'is_infinite' needs to be set to True in order to rename an
Infinite Volume. Use time_out parameter to set wait time for rename completion.
"""
if self.use_rest:
return self.rename_volume_rest()
vol_rename_zapi, vol_name_zapi = ['volume-rename-async', 'volume-name'] if self.parameters['is_infinite']\
else ['volume-rename', 'volume']
volume_rename = netapp_utils.zapi.NaElement.create_node_with_children(
vol_rename_zapi, **{vol_name_zapi: self.parameters['from_name'],
'new-volume-name': str(self.parameters['name'])})
try:
result = self.server.invoke_successfully(volume_rename, enable_tunneling=True)
if vol_rename_zapi == 'volume-rename-async':
self.check_invoke_result(result, 'rename')
self.ems_log_event("volume-rename")
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error renaming volume %s: %s'
% (self.parameters['name'], to_native(error)),
exception=traceback.format_exc())
def resize_volume(self):
"""
Re-size the volume.
Note: 'is_infinite' needs to be set to True in order to resize an
Infinite Volume.
"""
if self.use_rest:
return self.resize_volume_rest()
vol_size_zapi, vol_name_zapi = ['volume-size-async', 'volume-name']\
if (self.parameters['is_infinite'] or self.volume_style == 'flexgroup')\
else ['volume-size', 'volume']
volume_resize = netapp_utils.zapi.NaElement.create_node_with_children(
vol_size_zapi, **{vol_name_zapi: self.parameters['name'],
'new-size': str(self.parameters['size'])})
try:
result = self.server.invoke_successfully(volume_resize, enable_tunneling=True)
if vol_size_zapi == 'volume-size-async':
self.check_invoke_result(result, 'resize')
self.ems_log_event("volume-resize")
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error re-sizing volume %s: %s'
% (self.parameters['name'], to_native(error)),
exception=traceback.format_exc())
return None
def start_encryption_conversion(self, encrypt_destination):
if encrypt_destination:
if self.rest_api:
return self.encryption_conversion_rest()
zapi = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-encryption-conversion-start', **{'volume': self.parameters['name']})
try:
self.server.invoke_successfully(zapi, enable_tunneling=True)
self.ems_log_event("volume-encryption-conversion-start")
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error enabling encryption for volume %s: %s'
% (self.parameters['name'], to_native(error)),
exception=traceback.format_exc())
if self.parameters.get('wait_for_completion'):
self.wait_for_volume_encryption_conversion()
else:
self.module.warn('disabling encryption requires cluster admin permissions.')
self.move_volume(encrypt_destination)
def change_volume_state(self, call_from_delete_vol=False):
"""
Change volume's state (offline/online).
"""
if self.use_rest:
return self.change_volume_state_rest()
if self.parameters['is_online'] and not call_from_delete_vol: # Desired state is online, setup zapi APIs respectively
vol_state_zapi, vol_name_zapi, action = ['volume-online-async', 'volume-name', 'online']\
if (self.parameters['is_infinite'] or self.volume_style == 'flexgroup')\
else ['volume-online', 'name', 'online']
else: # Desired state is offline, setup zapi APIs respectively
vol_state_zapi, vol_name_zapi, action = ['volume-offline-async', 'volume-name', 'offline']\
if (self.parameters['is_infinite'] or self.volume_style == 'flexgroup')\
else ['volume-offline', 'name', 'offline']
volume_unmount = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-unmount', **{'volume-name': self.parameters['name']})
volume_change_state = netapp_utils.zapi.NaElement.create_node_with_children(
vol_state_zapi, **{vol_name_zapi: self.parameters['name']})
errors = []
if not self.parameters['is_online'] or call_from_delete_vol: # Unmount before offline
try:
self.server.invoke_successfully(volume_unmount, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
errors.append('Error unmounting volume %s: %s' % (self.parameters['name'], to_native(error)))
try:
result = self.server.invoke_successfully(volume_change_state, enable_tunneling=True)
if self.volume_style == 'flexgroup' or self.parameters['is_infinite']:
self.check_invoke_result(result, action)
self.ems_log_event("change-state")
except netapp_utils.zapi.NaApiError as error:
state = "online" if self.parameters['is_online'] and not call_from_delete_vol else "offline"
errors.append('Error changing the state of volume %s to %s: %s' % (self.parameters['name'], state, to_native(error)))
self.module.fail_json(msg=', '.join(errors),
exception=traceback.format_exc())
def create_volume_attribute(self, zapi_object, parent_attribute, attribute, value):
"""
:param parent_attribute:
:param child_attribute:
:param value:
:return:
"""
if isinstance(parent_attribute, str):
vol_attribute = netapp_utils.zapi.NaElement(parent_attribute)
vol_attribute.add_new_child(attribute, value)
zapi_object.add_child_elem(vol_attribute)
else:
zapi_object.add_new_child(attribute, value)
parent_attribute.add_child_elem(zapi_object)
def volume_modify_attributes(self, params):
"""
modify volume parameter 'export_policy','unix_permissions','snapshot_policy','space_guarantee', 'percent_snapshot_space',
'qos_policy_group', 'qos_adaptive_policy_group'
"""
if self.use_rest:
return self.volume_modify_attributes_rest(params)
if self.volume_style == 'flexgroup' or self.parameters['is_infinite']:
vol_mod_iter = netapp_utils.zapi.NaElement('volume-modify-iter-async')
else:
vol_mod_iter = netapp_utils.zapi.NaElement('volume-modify-iter')
attributes = netapp_utils.zapi.NaElement('attributes')
vol_mod_attributes = netapp_utils.zapi.NaElement('volume-attributes')
# Volume-attributes is split in to 25 sub categories
if params and 'encrypt' in params:
vol_mod_attributes.add_new_child('encrypt', self.na_helper.get_value_for_bool(False, self.parameters['encrypt']))
# volume-space-attributes
vol_space_attributes = netapp_utils.zapi.NaElement('volume-space-attributes')
if self.parameters.get('space_guarantee') is not None:
self.create_volume_attribute(vol_space_attributes, vol_mod_attributes,
'space-guarantee', self.parameters['space_guarantee'])
if self.parameters.get('percent_snapshot_space') is not None:
self.create_volume_attribute(vol_space_attributes, vol_mod_attributes,
'percentage-snapshot-reserve', str(self.parameters['percent_snapshot_space']))
if self.parameters.get('space_slo') is not None:
self.create_volume_attribute(vol_space_attributes, vol_mod_attributes, 'space-slo', self.parameters['space_slo'])
# volume-snapshot-attributes
vol_snapshot_attributes = netapp_utils.zapi.NaElement('volume-snapshot-attributes')
if self.parameters.get('snapshot_policy') is not None:
self.create_volume_attribute(vol_snapshot_attributes, vol_mod_attributes,
'snapshot-policy', self.parameters['snapshot_policy'])
if self.parameters.get('snapdir_access') is not None:
self.create_volume_attribute(vol_snapshot_attributes, vol_mod_attributes,
'snapdir-access-enabled',
self.na_helper.get_value_for_bool(False, self.parameters['snapdir_access'], 'snapdir_access'))
# volume-export-attributes
if self.parameters.get('export_policy') is not None:
self.create_volume_attribute(vol_mod_attributes, 'volume-export-attributes',
'policy', self.parameters['export_policy'])
# volume-security-attributes
if self.parameters.get('unix_permissions') is not None or self.parameters.get('group_id') is not None or self.parameters.get('user_id') is not None:
vol_security_attributes = netapp_utils.zapi.NaElement('volume-security-attributes')
vol_security_unix_attributes = netapp_utils.zapi.NaElement('volume-security-unix-attributes')
if self.parameters.get('unix_permissions') is not None:
self.create_volume_attribute(vol_security_unix_attributes, vol_security_attributes,
'permissions', self.parameters['unix_permissions'])
if self.parameters.get('group_id') is not None:
self.create_volume_attribute(vol_security_unix_attributes, vol_security_attributes,
'group-id', str(self.parameters['group_id']))
if self.parameters.get('user_id') is not None:
self.create_volume_attribute(vol_security_unix_attributes, vol_security_attributes,
'user-id', str(self.parameters['user_id']))
vol_mod_attributes.add_child_elem(vol_security_attributes)
if params and params.get('volume_security_style') is not None:
self.create_volume_attribute(vol_mod_attributes, 'volume-security-attributes',
'style', self.parameters['volume_security_style'])
# volume-performance-attributes
if self.parameters.get('atime_update') is not None:
self.create_volume_attribute(vol_mod_attributes, 'volume-performance-attributes',
'is-atime-update-enabled', self.na_helper.get_value_for_bool(False, self.parameters['atime_update'], 'atime_update'))
# volume-qos-attributes
if self.parameters.get('qos_policy_group') is not None:
self.create_volume_attribute(vol_mod_attributes, 'volume-qos-attributes',
'policy-group-name', self.parameters['qos_policy_group'])
if self.parameters.get('qos_adaptive_policy_group') is not None:
self.create_volume_attribute(vol_mod_attributes, 'volume-qos-attributes',
'adaptive-policy-group-name', self.parameters['qos_adaptive_policy_group'])
# volume-comp-aggr-attributes
if params and params.get('tiering_policy') is not None:
self.create_volume_attribute(vol_mod_attributes, 'volume-comp-aggr-attributes',
'tiering-policy', self.parameters['tiering_policy'])
# volume-state-attributes
if self.parameters.get('nvfail_enabled') is not None:
self.create_volume_attribute(vol_mod_attributes, 'volume-state-attributes', 'is-nvfail-enabled', str(self.parameters['nvfail_enabled']))
# volume-dr-protection-attributes
if self.parameters.get('vserver_dr_protection') is not None:
self.create_volume_attribute(vol_mod_attributes, 'volume-vserver-dr-protection-attributes',
'vserver-dr-protection', self.parameters['vserver_dr_protection'])
# volume-id-attributes
if self.parameters.get('comment') is not None:
self.create_volume_attribute(vol_mod_attributes, 'volume-id-attributes',
'comment', self.parameters['comment'])
# End of Volume-attributes sub attributes
attributes.add_child_elem(vol_mod_attributes)
query = netapp_utils.zapi.NaElement('query')
vol_query_attributes = netapp_utils.zapi.NaElement('volume-attributes')
self.create_volume_attribute(vol_query_attributes, 'volume-id-attributes',
'name', self.parameters['name'])
query.add_child_elem(vol_query_attributes)
vol_mod_iter.add_child_elem(attributes)
vol_mod_iter.add_child_elem(query)
try:
result = self.server.invoke_successfully(vol_mod_iter, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
error_msg = to_native(error)
if 'volume-comp-aggr-attributes' in error_msg:
error_msg += ". Added info: tiering option requires 9.4 or later."
self.wrap_fail_json(msg='Error modifying volume %s: %s'
% (self.parameters['name'], error_msg),
exception=traceback.format_exc())
self.ems_log_event("volume-modify")
failures = result.get_child_by_name('failure-list')
# handle error if modify space, policy, or unix-permissions parameter fails
if failures is not None:
error_msgs = [
failures.get_child_by_name(return_info).get_child_content(
'error-message'
)
for return_info in (
'volume-modify-iter-info',
'volume-modify-iter-async-info',
)
if failures.get_child_by_name(return_info) is not None
]
if error_msgs and any(x is not None for x in error_msgs):
self.wrap_fail_json(msg="Error modifying volume %s: %s"
% (self.parameters['name'], ' --- '.join(error_msgs)),
exception=traceback.format_exc())
if self.volume_style == 'flexgroup' or self.parameters['is_infinite']:
success = result.get_child_by_name('success-list')
success = success.get_child_by_name('volume-modify-iter-async-info')
results = {}
for key in ('status', 'jobid'):
if success and success.get_child_by_name(key):
results[key] = success[key]
status = results.get('status')
if status == 'in_progress' and 'jobid' in results:
if self.parameters['time_out'] == 0:
return
error = self.check_job_status(results['jobid'])
if error is None:
return
self.wrap_fail_json(msg='Error when modifying volume: %s' % error)
self.wrap_fail_json(msg='Unexpected error when modifying volume: result is: %s' % str(result.to_string()))
def volume_mount(self):
"""
Mount an existing volume in specified junction_path
:return: None
"""
if self.use_rest:
return self.volume_mount_rest()
vol_mount = netapp_utils.zapi.NaElement('volume-mount')
vol_mount.add_new_child('volume-name', self.parameters['name'])
vol_mount.add_new_child('junction-path', self.parameters['junction_path'])
try:
self.server.invoke_successfully(vol_mount, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error mounting volume %s on path %s: %s'
% (self.parameters['name'], self.parameters['junction_path'],
to_native(error)), exception=traceback.format_exc())
def volume_unmount(self):
"""
Unmount an existing volume
:return: None
"""
if self.use_rest:
return self.volume_unmount_rest()
vol_unmount = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-unmount', **{'volume-name': self.parameters['name']})
try:
self.server.invoke_successfully(vol_unmount, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error unmounting volume %s: %s'
% (self.parameters['name'], to_native(error)), exception=traceback.format_exc())
def modify_volume(self, modify):
'''Modify volume action'''
attributes = modify.keys()
# order matters here, if both is_online and mount in modify, must bring the volume online first.
if 'is_online' in attributes:
self.change_volume_state()
for attribute in attributes:
if attribute in ['space_guarantee', 'export_policy', 'unix_permissions', 'group_id', 'user_id', 'tiering_policy',
'snapshot_policy', 'percent_snapshot_space', 'snapdir_access', 'atime_update', 'volume_security_style',
'nvfail_enabled', 'space_slo', 'qos_policy_group', 'qos_adaptive_policy_group', 'vserver_dr_protection', 'comment']:
self.volume_modify_attributes(modify)
break
if 'snapshot_auto_delete' in attributes and not self.use_rest:
# Rest doesn't support any snapshot_auto_delete option other than is_autodelete_enabled. For now i've completely
# disabled this in rest
self.set_snapshot_auto_delete()
if 'junction_path' in attributes:
if modify.get('junction_path') == '':
self.volume_unmount()
else:
self.volume_mount()
if 'size' in attributes:
self.resize_volume()
if 'aggregate_name' in attributes:
# keep it last, as it may take some time
# handle change in encryption as part of the move
self.move_volume(self.parameters.get('encrypt'))
elif 'encrypt' in attributes:
self.start_encryption_conversion(self.parameters['encrypt'])
def compare_chmod_value(self, current):
"""
compare current unix_permissions to desire unix_permissions.
:return: True if the same, False it not the same or desire unix_permissions is not valid.
"""
desire = self.parameters
if current is None:
return False
unix_permissions = desire['unix_permissions']
if unix_permissions.isdigit():
return int(current['unix_permissions']) == int(unix_permissions)
if len(unix_permissions) != 12:
return False
if unix_permissions[:3] != '---':
return False
octal_value = ''
for i in range(3, len(unix_permissions), 3):
if unix_permissions[i] not in ['r', '-'] or unix_permissions[i + 1] not in ['w', '-']\
or unix_permissions[i + 2] not in ['x', '-']:
return False
group_permission = self.char_to_octal(unix_permissions[i:i + 3])
octal_value += str(group_permission)
return int(current['unix_permissions']) == int(octal_value)
def char_to_octal(self, chars):
"""
:param chars: Characters to be converted into octal values.
:return: octal value of the individual group permission.
"""
total = 0
if chars[0] == 'r':
total += 4
if chars[1] == 'w':
total += 2
if chars[2] == 'x':
total += 1
return total
def get_volume_style(self, current):
'''Get volume style, infinite or standard flexvol'''
if current is not None:
return current.get('style_extended')
if self.parameters.get('aggr_list') or self.parameters.get('aggr_list_multiplier') or self.parameters.get('auto_provision_as'):
if self.use_rest and self.parameters.get('auto_provision_as') and not self.parameters['aggr_list_multiplier']:
self.parameters['aggr_list_multiplier'] = 1
return 'flexgroup'
return None
def get_job(self, jobid, server):
"""
Get job details by id
"""
job_get = netapp_utils.zapi.NaElement('job-get')
job_get.add_new_child('job-id', jobid)
try:
result = server.invoke_successfully(job_get, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
if to_native(error.code) == "15661":
# Not found
return None
self.wrap_fail_json(msg='Error fetching job info: %s' % to_native(error),
exception=traceback.format_exc())
job_info = result.get_child_by_name('attributes').get_child_by_name('job-info')
results = {
'job-progress': job_info['job-progress'],
'job-state': job_info['job-state']
}
if job_info.get_child_by_name('job-completion') is not None:
results['job-completion'] = job_info['job-completion']
else:
results['job-completion'] = None
return results
def check_job_status(self, jobid):
"""
Loop until job is complete
"""
server = self.server
sleep_time = 5
time_out = self.parameters['time_out']
results = self.get_job(jobid, server)
error = 'timeout'
while time_out > 0:
results = self.get_job(jobid, server)
# If running as cluster admin, the job is owned by cluster vserver
# rather than the target vserver.
if results is None and server == self.server:
results = netapp_utils.get_cserver(self.server)
server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
continue
if results is None:
error = 'cannot locate job with id: %d' % int(jobid)
break
if results['job-state'] in ('queued', 'running'):
time.sleep(sleep_time)
time_out -= sleep_time
continue
if results['job-state'] in ('success', 'failure'):
break
else:
self.wrap_fail_json(msg='Unexpected job status in: %s' % repr(results))
if results is not None:
if results['job-state'] == 'success':
error = None
elif results['job-state'] in ('queued', 'running'):
error = 'job completion exceeded expected timer of: %s seconds' % \
self.parameters['time_out']
elif results['job-completion'] is not None:
error = results['job-completion']
else:
error = results['job-progress']
return error
def check_invoke_result(self, result, action):
'''
check invoked api call back result.
'''
results = {}
for key in ('result-status', 'result-jobid'):
if result.get_child_by_name(key):
results[key] = result[key]
status = results.get('result-status')
if status == 'in_progress' and 'result-jobid' in results:
if self.parameters['time_out'] == 0:
return
error = self.check_job_status(results['result-jobid'])
if error is None:
return
else:
self.wrap_fail_json(msg='Error when %s volume: %s' % (action, error))
if status == 'failed':
self.wrap_fail_json(msg='Operation failed when %s volume.' % action)
def set_efficiency_attributes(self, options):
for key, attr in self.sis_keys2zapi_set.items():
value = self.parameters.get(key)
if value is not None:
if self.argument_spec[key]['type'] == 'bool':
value = self.na_helper.get_value_for_bool(False, value)
options[attr] = value
# ZAPI requires compression to be set for inline-compression
if options.get('enable-inline-compression') == 'true' and 'enable-compression' not in options:
options['enable-compression'] = 'true'
def set_efficiency_config(self):
'''Set efficiency policy and compression attributes'''
options = {'path': '/vol/' + self.parameters['name']}
efficiency_enable = netapp_utils.zapi.NaElement.create_node_with_children('sis-enable', **options)
try:
self.server.invoke_successfully(efficiency_enable, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
# Error 40043 denotes an Operation has already been enabled.
if to_native(error.code) != "40043":
self.wrap_fail_json(msg='Error enable efficiency on volume %s: %s'
% (self.parameters['name'], to_native(error)),
exception=traceback.format_exc())
self.set_efficiency_attributes(options)
efficiency_start = netapp_utils.zapi.NaElement.create_node_with_children('sis-set-config', **options)
try:
self.server.invoke_successfully(efficiency_start, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.wrap_fail_json(msg='Error setting up efficiency attributes on volume %s: %s'
% (self.parameters['name'], to_native(error)),
exception=traceback.format_exc())
def set_efficiency_config_async(self):
"""Set efficiency policy and compression attributes in asynchronous mode"""
options = {'volume-name': self.parameters['name']}
efficiency_enable = netapp_utils.zapi.NaElement.create_node_with_children('sis-enable-async', **options)
try:
result = self.server.invoke_successfully(efficiency_enable, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.wrap_fail_json(msg='Error enable efficiency on volume %s: %s'
% (self.parameters['name'], to_native(error)),
exception=traceback.format_exc())
self.check_invoke_result(result, 'enable efficiency on')
self.set_efficiency_attributes(options)
efficiency_start = netapp_utils.zapi.NaElement.create_node_with_children('sis-set-config-async', **options)
try:
result = self.server.invoke_successfully(efficiency_start, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.wrap_fail_json(msg='Error setting up efficiency attributes on volume %s: %s'
% (self.parameters['name'], to_native(error)),
exception=traceback.format_exc())
self.check_invoke_result(result, 'set efficiency policy on')
def get_efficiency_info(self, return_value):
"""
get the name of the efficiency policy assigned to volume, as well as compression values
if attribute does not exist, set its value to None
:return: update return_value dict.
"""
sis_info = netapp_utils.zapi.NaElement('sis-get-iter')
sis_status_info = netapp_utils.zapi.NaElement('sis-status-info')
sis_status_info.add_new_child('path', '/vol/' + self.parameters['name'])
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(sis_status_info)
sis_info.add_child_elem(query)
try:
result = self.server.invoke_successfully(sis_info, True)
except netapp_utils.zapi.NaApiError as error:
# Don't error out if efficiency settings cannot be read. We'll fail if they need to be set.
if error.message.startswith('Insufficient privileges: user ') and error.message.endswith(' does not have read access to this resource'):
self.issues.append('cannot read volume efficiency options (as expected when running as vserver): %s' % to_native(error))
return
self.wrap_fail_json(msg='Error fetching efficiency policy for volume %s : %s'
% (self.parameters['name'], to_native(error)),
exception=traceback.format_exc())
for key in self.sis_keys2zapi_get:
return_value[key] = None
if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
sis_attributes = result.get_child_by_name('attributes-list'). get_child_by_name('sis-status-info')
for key, attr in self.sis_keys2zapi_get.items():
value = sis_attributes.get_child_content(attr)
if self.argument_spec[key]['type'] == 'bool':
value = self.na_helper.get_value_for_bool(True, value)
return_value[key] = value
def modify_volume_efficiency_config(self, efficiency_config_modify_value):
if self.use_rest:
return self.set_efficiency_rest()
if efficiency_config_modify_value == 'async':
self.set_efficiency_config_async()
else:
self.set_efficiency_config()
def set_snapshot_auto_delete(self):
options = {'volume': self.parameters['name']}
desired_options = self.parameters['snapshot_auto_delete']
for key, value in desired_options.items():
options['option-name'] = key
options['option-value'] = str(value)
snapshot_auto_delete = netapp_utils.zapi.NaElement.create_node_with_children('snapshot-autodelete-set-option', **options)
try:
self.server.invoke_successfully(snapshot_auto_delete, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.wrap_fail_json(msg='Error setting snapshot auto delete options for volume %s: %s'
% (self.parameters['name'], to_native(error)),
exception=traceback.format_exc())
def rehost_volume(self):
if self.use_rest:
self.module.fail_json(msg='ONTAP Rest API does not support Rehosting Volumes')
volume_rehost = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-rehost', **{'vserver': self.parameters['from_vserver'],
'destination-vserver': self.parameters['vserver'],
'volume': self.parameters['name']})
if self.parameters.get('auto_remap_luns') is not None:
volume_rehost.add_new_child('auto-remap-luns', str(self.parameters['auto_remap_luns']))
if self.parameters.get('force_unmap_luns') is not None:
volume_rehost.add_new_child('force-unmap-luns', str(self.parameters['force_unmap_luns']))
try:
self.cluster.invoke_successfully(volume_rehost, enable_tunneling=True)
self.ems_log_event("volume-rehost")
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error rehosting volume %s: %s'
% (self.parameters['name'], to_native(error)),
exception=traceback.format_exc())
def snapshot_restore_volume(self):
if self.use_rest:
return self.snapshot_restore_volume_rest()
snapshot_restore = netapp_utils.zapi.NaElement.create_node_with_children(
'snapshot-restore-volume', **{'snapshot': self.parameters['snapshot_restore'],
'volume': self.parameters['name']})
if self.parameters.get('force_restore') is not None:
snapshot_restore.add_new_child('force', str(self.parameters['force_restore']))
if self.parameters.get('preserve_lun_ids') is not None:
snapshot_restore.add_new_child('preserve-lun-ids', str(self.parameters['preserve_lun_ids']))
try:
self.server.invoke_successfully(snapshot_restore, enable_tunneling=True)
self.ems_log_event("snapshot-restore-volume")
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error restoring volume %s: %s'
% (self.parameters['name'], to_native(error)),
exception=traceback.format_exc())
def adjust_size(self, current, after_create):
"""
ignore small change in size by resetting expectations
"""
if after_create:
# ignore change in size immediately after a create:
self.parameters['size'] = current['size']
elif self.parameters['size_change_threshold'] > 0:
if 'size' in current and current['size'] != 0 and self.parameters.get('size') is not None:
# ignore a less than XX% difference
resize = abs(current['size'] - self.parameters['size']) * 100.0 / current['size']
if resize < self.parameters['size_change_threshold']:
self.parameters['size'] = current['size']
if resize > 0.1:
self.module.warn('resize request ignored: %.1f%% is below the threshold: %d%%' % (resize, self.parameters['size_change_threshold']))
def set_modify_dict(self, current, after_create=False):
'''Fill modify dict with changes'''
# snapshot_auto_delete's value is a dict, get_modified_attributes function doesn't support dict as value.
auto_delete_info = current.pop('snapshot_auto_delete', None)
# ignore small changes in size by adjusting self.parameters['size']
self.adjust_size(current, after_create)
modify = self.na_helper.get_modified_attributes(current, self.parameters)
if modify is not None and 'type' in modify:
msg = "Error: changing a volume from one type to another is not allowed."
msg += ' Current: %s, desired: %s.' % (current['type'], self.parameters['type'])
self.module.fail_json(msg=msg)
desired_style = self.get_volume_style(None)
if desired_style is not None and desired_style != self.volume_style:
msg = "Error: changing a volume from one backend to another is not allowed."
msg += ' Current: %s, desired: %s.' % (self.volume_style, desired_style)
self.module.fail_json(msg=msg)
if self.parameters.get('snapshot_auto_delete') is not None:
auto_delete_modify = self.na_helper.get_modified_attributes(auto_delete_info,
self.parameters['snapshot_auto_delete'])
if len(auto_delete_modify) > 0:
modify['snapshot_auto_delete'] = auto_delete_modify
return modify
def take_modify_actions(self, modify):
if modify.get('is_online'):
# when moving to online, include parameters that get does not return when volume is offline
for field in ['volume_security_style', 'group_id', 'user_id', 'percent_snapshot_space']:
if self.parameters.get(field) is not None:
modify[field] = self.parameters[field]
self.modify_volume(modify)
if any(modify.get(key) is not None for key in self.sis_keys2zapi_get):
if self.parameters.get('is_infinite') or self.volume_style == 'flexgroup':
efficiency_config_modify = 'async'
else:
efficiency_config_modify = 'sync'
self.modify_volume_efficiency_config(efficiency_config_modify)
""" MAPPING OF VOLUME FIELDS FROM ZAPI TO REST
ZAPI = REST
encrypt = encryption.enabled
volume-comp-aggr-attributes.tiering-policy = tiering.policy
'volume-export-attributes.policy' = nas.export_policy.name
'volume-id-attributes.containing-aggregate-name' = aggregates.name
'volume-id-attributes.flexgroup-uuid' = uuid (Only for FlexGroup volumes)
'volume-id-attributes.instance-uuid' = uuid (Only for FlexVols)
'volume-id-attributes.junction-path' = nas.path
'volume-id-attributes.style-extended' = style
'volume-id-attributes.type' = type
'volume-id-attributes.comment' = comment
'volume-performance-attributes.is-atime-update-enabled' == NO REST VERSION
volume-qos-attributes.policy-group-name' = qos.policy.name
'volume-qos-attributes.adaptive-policy-group-name' = qos.policy.name
'volume-security-attributes.style = nas.security_style
volume-security-attributes.volume-security-unix-attributes.group-id' = nas.gid
'volume-security-attributes.volume-security-unix-attributes.permissions' = nas.unix_permissions
'volume-security-attributes.volume-security-unix-attributes.user-id' = nas.uid
'volume-snapshot-attributes.snapdir-access-enabled' == NO REST VERSION
'volume-snapshot-attributes,snapshot-policy' = snapshot_policy
volume-space-attributes.percentage-snapshot-reserve = space.snapshot.reserve_percent
volume-space-attributes.size' = space.size
'volume-space-attributes.space-guarantee' = guarantee.type
volume-space-attributes.space-slo' == NO REST VERSION
'volume-state-attributes.is-nvfail-enabled' == NO REST Version
'volume-state-attributes.state' = state
'volume-vserver-dr-protection-attributes.vserver-dr-protection' = == NO REST Version
volume-snapshot-autodelete-attributes.* None exist other than space.snapshot.autodelete_enabled
From get_efficiency_info function
efficiency_policy = efficiency.policy.name
compression = efficiency.compression
inline_compression = efficiency.compression
"""
def get_volume_rest(self, vol_name):
"""
This covers the zapi functions
get_volume
- volume_get_iter
- get_efficiency_info
"""
api = 'storage/volumes'
params = {'name': vol_name,
'svm.name': self.parameters['vserver'],
'fields': 'encryption.enabled,'
'tiering.policy,'
'nas.export_policy.name,'
'aggregates.name,'
'uuid,'
'nas.path,'
'style,'
'type,'
'comment,'
'qos.policy.name,'
'nas.security_style,'
'nas.gid,'
'nas.unix_permissions,'
'nas.uid,'
'snapshot_policy,'
'space.snapshot.reserve_percent,'
'space.size,'
'guarantee.type,'
'state,'
'efficiency.policy.name,'
'efficiency.compression'}
record, error = rest_generic.get_one_record(self.rest_api, api, params)
if error:
self.module.fail_json(msg=error)
if record:
return self.format_get_volume_rest(record)
return None
def rename_volume_rest(self):
# volume-rename-async and volume-rename are the same in rest
# Zapi you had to give the old and new name to change a volume.
# Rest you need the old UUID, and the new name only
current = self.get_volume_rest(self.parameters['from_name'])
body = {
'name': self.parameters['name']
}
dummy, error = self.volume_rest_patch(body, uuid=current['uuid'])
if error:
self.module.fail_json(msg='Error changing name of volume %s: %s' % (self.parameters['name'], to_native(error)),
exception=traceback.format_exc())
def snapshot_restore_volume_rest(self):
# Rest does not have force_restore or preserve_lun_id
current = self.get_volume()
self.parameters['uuid'] = current['uuid']
body = {
'restore_to.snapshot.name': self.parameters['snapshot_restore']
}
dummy, error = self.volume_rest_patch(body)
if error:
self.module.fail_json(msg='Error restoring snapshot %s in volume %s: %s' % (
self.parameters['snapshot_restore'],
self.parameters['name'],
to_native(error)), exception=traceback.format_exc())
def create_volume_rest(self):
body = self.create_volume_body_rest()
dummy, error = rest_generic.post_async(self.rest_api, 'storage/volumes', body, job_timeout=120)
if error:
self.module.fail_json(msg='Error creating volume %s: %s' % (self.parameters['name'], to_native(error)),
exception=traceback.format_exc())
def create_volume_body_rest(self):
body = {
'name': self.parameters['name'],
'svm.name': self.parameters['vserver']
}
# Zapi's Space-guarantee and space-reserve are the same thing in Rest
if self.parameters.get('space_guarantee') is not None:
body['guarantee.type'] = self.parameters['space_guarantee']
# TODO: Check to see if there a difference in rest between flexgroup or not. might need to throw error
body = self.aggregates_rest(body)
if self.parameters.get('size') is not None:
body['size'] = self.parameters['size']
if self.parameters.get('snapshot_policy') is not None:
body['snapshot_policy.name'] = self.parameters['snapshot_policy']
if self.parameters.get('unix_permissions') is not None:
body['nas.unix_permissions'] = self.parameters['unix_permissions']
if self.parameters.get('group_id') is not None:
body['nas.gid'] = self.parameters['group_id']
if self.parameters.get('user_id') is not None:
body['nas.uid'] = self.parameters['user_id']
if self.parameters.get('volume_security_style') is not None:
body['nas.security_style'] = self.parameters['volume_security_style']
if self.parameters.get('export_policy') is not None:
body['nas.export_policy.name'] = self.parameters['export_policy']
if self.parameters.get('junction_path') is not None:
body['nas.path'] = self.parameters['junction_path']
if self.parameters.get('comment') is not None:
body['comment'] = self.parameters['comment']
if self.parameters.get('type') is not None:
body['type'] = self.parameters['type']
if self.parameters.get('percent_snapshot_space') is not None:
body['space.snapshot.reserve_percent'] = self.parameters['percent_snapshot_space']
if self.parameters.get('language') is not None:
body['language'] = self.parameters['language']
if self.get_qos_policy_group() is not None:
body['qos.policy.name'] = self.get_qos_policy_group()
if self.parameters.get('tiering_policy') is not None:
body['tiering.policy'] = self.parameters['tiering_policy']
if self.parameters.get('encrypt') is not None:
body['encryption.enabled'] = self.parameters['encrypt']
body['state'] = 'online' if self.parameters['is_online'] else 'offline'
return body
def aggregates_rest(self, body):
if self.parameters.get('aggregate_name') is not None:
body['aggregates'] = [{'name': self.parameters['aggregate_name']}]
if self.parameters.get('aggr_list') is not None:
body['aggregates'] = [{'name': name} for name in self.parameters['aggr_list']]
if self.parameters.get('aggr_list_multiplier') is not None:
body['constituents_per_aggregate'] = self.parameters['aggr_list_multiplier']
return body
def volume_modify_attributes_rest(self, params):
body = self.modify_volume_body_rest(params)
dummy, error = self.volume_rest_patch(body)
if error:
self.module.fail_json(msg='Error modifying volume %s: %s' % (self.parameters['name'], to_native(error)),
exception=traceback.format_exc())
def modify_volume_body_rest(self, params):
body = {}
if self.parameters.get('space_guarantee') is not None:
body['guarantee.type'] = self.parameters['space_guarantee']
if self.parameters.get('percent_snapshot_space') is not None:
body['space.snapshot.reserve_percent'] = self.parameters['percent_snapshot_space']
if self.parameters.get('snapshot_policy') is not None:
body['snapshot_policy.name'] = self.parameters['snapshot_policy']
if self.parameters.get('export_policy') is not None:
body['nas.export_policy.name'] = self.parameters['export_policy']
if self.parameters.get('unix_permissions') is not None:
body['nas.unix_permissions'] = self.parameters['unix_permissions']
if self.parameters.get('group_id') is not None:
body['nas.gid'] = self.parameters['group_id']
if self.parameters.get('user_id') is not None:
body['nas.uid'] = self.parameters['user_id']
if params and params.get('volume_security_style') is not None:
body['nas.security_style'] = self.parameters['volume_security_style']
if self.get_qos_policy_group() is not None:
body['qos.policy.name'] = self.get_qos_policy_group()
if params and params.get('tiering_policy') is not None:
body['tiering.policy'] = self.parameters['tiering_policy']
# TODO: Check if this work. The Zapi to Rest doc dosn't metion it. The Rest API example though show it exists
if self.parameters.get('comment') is not None:
body['comment'] = self.parameters['comment']
return body
def change_volume_state_rest(self):
# TODO: check if call_from_delete_vol is needed in rest
body = {
'state': 'online' if self.parameters['is_online'] else 'offline',
'name': self.parameters['name'],
}
dummy, error = self.volume_rest_patch(body)
if error:
self.module.fail_json(msg='Error changing state of volume %s: %s' % (self.parameters['name'],
to_native(error)),
exception=traceback.format_exc())
def volume_unmount_rest(self):
body = {
'nas.path': None,
}
dummy, error = self.volume_rest_patch(body)
if error:
self.module.fail_json(msg='Error unmounting volume %s: %s' % (self.parameters['name'], to_native(error)),
exception=traceback.format_exc())
def volume_mount_rest(self):
body = {
'nas.path': self.parameters['junction_path']
}
dummy, error = self.volume_rest_patch(body)
if error:
self.module.fail_json(msg='Error mounting volume %s: %s' % (self.parameters['name'], to_native(error)),
exception=traceback.format_exc())
def set_efficiency_rest(self):
body = {}
if self.parameters.get('efficiency_policy') is not None:
body['efficiency.policy.name'] = self.parameters['efficiency_policy']
if self.get_compression():
body['efficiency.compression'] = self.get_compression()
dummy, error = self.volume_rest_patch(body)
if error:
self.module.fail_json(msg='Error set efficiency for volume %s: %s' % (self.parameters['name'], to_native(error)),
exception=traceback.format_exc())
def encryption_conversion_rest(self):
# volume-encryption-conversion-start
# Set the "encryption.enabled" field to "true" to start the encryption conversion operation.
body = {
'encryption.enabled': True
}
dummy, error = self.volume_rest_patch(body)
if error:
self.module.fail_json(msg='Error enabling encryption for volume %s: %s' % (self.parameters['name'], to_native(error)),
exception=traceback.format_exc())
def resize_volume_rest(self):
query = None
if self.parameters.get('sizing_method') is not None:
query = dict(sizing_method=self.parameters['sizing_method'])
body = {
'size': self.parameters['size']
}
dummy, error = self.volume_rest_patch(body, query)
if error:
self.module.fail_json(msg='Error resizing volume %s: %s' % (self.parameters['name'], to_native(error)),
exception=traceback.format_exc())
def move_volume_rest(self):
body = {
'movement.destination_aggregate.name': self.parameters['aggregate_name']
}
dummy, error = self.volume_rest_patch(body)
if error:
self.module.fail_json(msg='Error moving volume %s: %s' % (self.parameters['name'], to_native(error)),
exception=traceback.format_exc())
def volume_rest_patch(self, body, query=None, uuid=None):
api = 'storage/volumes'
if not uuid:
uuid = self.parameters['uuid']
return rest_generic.patch_async(self.rest_api, api, uuid, body, query=query, job_timeout=120)
def get_qos_policy_group(self):
if self.parameters.get('qos_policy_group') is not None:
return self.parameters['qos_policy_group']
if self.parameters.get('qos_adaptive_policy_group') is not None:
return self.parameters['qos_adaptive_policy_group']
return None
def get_compression(self):
if self.parameters.get('compression') and self.parameters.get('inline_compression'):
return 'both'
if self.parameters.get('compression'):
return 'background'
if self.parameters.get('inline_compression'):
return 'inline'
if self.parameters.get('compression') is False and self.parameters.get('inline_compression') is False:
return 'none'
return None
def rest_errors(self):
# For variable that have been merged together we shoulf fail before we do anything
if self.parameters.get('qos_policy_group') and self.parameters.get('qos_adaptive_policy_group'):
self.module.fail_json(msg='Error: With Rest API qos_policy_group and qos_adaptive_policy_group are now '
'the same thing, and cannot be set at the same time')
def format_get_volume_rest(self, record):
is_online = record.get('state') == 'online'
# TODO FIX THIS!!!! ZAPI would only return a single aggr, REST can return more than 1.
# For now i'm going to hard code this, but we need a way to show all aggrs
aggregates = record.get('aggregates', None)
aggr_name = aggregates[0].get('name', None) if aggregates else None
rest_compression = self.none_to_bool(self.na_helper.safe_get(record, ['efficiency', 'compression']))
return {
'name': record.get('name', None),
'encrypt': self.na_helper.safe_get(record, ['encryption', 'enabled']),
'tiering_policy': self.na_helper.safe_get(record, ['tiering', 'policy']),
'export_policy': self.na_helper.safe_get(record, ['nas', 'export_policy', 'name']),
'aggregate_name': aggr_name,
'flexgroup_uuid': record.get('uuid', None), # this might need some additional logic
'instance_uuid': record.get('uuid', None), # this might need some additional logic
'junction_path': self.na_helper.safe_get(record, ['nas', 'path']),
'style_extended': record.get('style', None),
'type': record.get('type', None),
'comment': record.get('comment', None),
'qos_policy_group': self.na_helper.safe_get(record, ['qos', 'policy', 'name']),
'qos_adaptive_policy_group': self.na_helper.safe_get(record, ['qos', 'policy', 'name']),
'volume_security_style': self.na_helper.safe_get(record, ['nas', 'security_style']),
'group_id': self.na_helper.safe_get(record, ['nas', 'gid']),
# Rest return an Int while Zapi return a string, force Rest to be an String
'unix_permissions': str(self.na_helper.safe_get(record, ['nas', 'unix_permissions'])),
'user_id': self.na_helper.safe_get(record, ['nas', 'uid']),
'snapshot_policy': self.na_helper.safe_get(record, ['snapshot_policy', 'name']),
'percent_snapshot_space': self.na_helper.safe_get(record, ['space', 'snapshot', 'reserve_percent']),
'size': self.na_helper.safe_get(record, ['space', 'size']),
'space_guarantee': self.na_helper.safe_get(record, ['guarantee', 'type']),
'is_online': is_online,
'uuid': record.get('uuid', None),
'efficiency_policy': self.na_helper.safe_get(record, ['efficiency', 'policy', 'name']),
'compression': rest_compression in ('both', 'background'),
'inline_compression': rest_compression in ('both', 'inline'),
}
def none_to_bool(self, value):
return value != 'none'
def apply(self):
'''Call create/modify/delete operations'''
response = None
modify_after_create = None
current = self.get_volume()
self.volume_style = self.get_volume_style(current) # TODO: Check if this needed REST
if self.volume_style == 'flexgroup' and self.parameters.get('aggregate_name') is not None:
self.module.fail_json(msg='Error: aggregate_name option cannot be used with FlexGroups.')
rename, rehost, snapshot_restore, cd_action, modify = None, None, None, None, None
# rename and create are mutually exclusive
if self.parameters.get('from_name'):
rename = self.na_helper.is_rename_action(self.get_volume(self.parameters['from_name']), current)
elif self.parameters.get('from_vserver'):
rehost = True
self.na_helper.changed = True
elif self.parameters.get('snapshot_restore'):
snapshot_restore = True
self.na_helper.changed = True
else:
cd_action = self.na_helper.get_cd_action(current, self.parameters)
if self.parameters.get('unix_permissions') is not None and (
self.compare_chmod_value(current) or not self.parameters['is_online']
):
# don't change if the values are the same
# can't change permissions if not online
del self.parameters['unix_permissions']
if cd_action is None and rename is None and rehost is None and self.parameters['state'] == 'present':
modify = self.set_modify_dict(current)
if self.parameters.get('nas_application_template') is not None:
application = self.get_application()
changed = self.na_helper.changed
modify_app = self.na_helper.get_modified_attributes(application, self.parameters.get('nas_application_template'))
# restore current change state, as we ignore this
if modify_app:
self.na_helper.changed = changed
self.module.warn('Modifying an app is not supported at present: ignoring: %s' % str(modify_app))
if self.na_helper.changed and not self.module.check_mode:
if rename:
self.rename_volume()
# REST DOES NOT have a volume-rehost equivalent
if rehost:
self.rehost_volume()
if snapshot_restore:
self.snapshot_restore_volume()
if cd_action == 'create':
response = self.create_volume()
# if we create using ZAPI and modify only options are set (snapdir_access or atime_update), we need to run a modify.
# The modify also takes care of efficiency (sis) parameters and snapshot_auto_delete.
# If we create using REST application, some options are not available, we may need to run a modify.
current = self.get_volume()
if current:
self.parameters['uuid'] = current['uuid']
self.volume_created = True
modify_after_create = self.set_modify_dict(current, after_create=True)
if modify_after_create:
self.take_modify_actions(modify_after_create)
# restore this, as set_modify_dict could set it to False
self.na_helper.changed = True
elif cd_action == 'delete':
self.parameters['uuid'] = current['uuid']
self.delete_volume(current)
elif modify:
self.parameters['uuid'] = current['uuid']
self.take_modify_actions(modify)
result = dict(
changed=self.na_helper.changed
)
if response is not None:
result['response'] = response
if modify:
result['modify'] = modify
if modify_after_create:
result['modify_after_create'] = modify_after_create
self.module.exit_json(**result)
def ems_log_event(self, state):
'''Autosupport log event'''
if state == 'create':
message = "A Volume has been created, size: " + \
str(self.parameters['size']) + str(self.parameters['size_unit'])
elif state == 'volume-delete':
message = "A Volume has been deleted"
elif state == 'volume-move':
message = "A Volume has been moved"
elif state == 'volume-rename':
message = "A Volume has been renamed"
elif state == 'volume-resize':
message = "A Volume has been resized to: " + \
str(self.parameters['size']) + str(self.parameters['size_unit'])
elif state == 'volume-rehost':
message = "A Volume has been rehosted"
elif state == 'snapshot-restore-volume':
message = "A Volume has been restored by snapshot"
elif state == 'volume-change':
message = "A Volume state has been changed"
else:
message = "na_ontap_volume has been called"
netapp_utils.ems_log_event(
"na_ontap_volume", self.server, event=message)
def main():
'''Apply volume operations from playbook'''
obj = NetAppOntapVolume()
obj.apply()
if __name__ == '__main__':
main()
| 49.153001 | 158 | 0.623197 |
6c5d95b1cae7b4c3e6135dcc2179641f02bfcfbf | 771 | py | Python | deepaugment/val_run.py | YaCpotato/B4Research | 065463c36a8f1a95c4197fe2c6ae903de4aae8c5 | [
"MIT"
] | 2 | 2020-04-20T03:21:33.000Z | 2020-04-30T14:24:43.000Z | deepaugment/val_run.py | YaCpotato/B4Research | 065463c36a8f1a95c4197fe2c6ae903de4aae8c5 | [
"MIT"
] | 6 | 2020-01-28T23:04:17.000Z | 2022-02-10T01:07:17.000Z | deepaugment/val_run.py | YaCpotato/B4ResearchDeepaugment | 065463c36a8f1a95c4197fe2c6ae903de4aae8c5 | [
"MIT"
] | null | null | null | import numpy as np
import os
from keras.preprocessing import image
from sklearn.model_selection import train_test_split
import sys
from os.path import dirname, realpath
file_path = realpath(__file__)
dir_of_file = dirname(file_path)
parent_dir_of_file = dirname(dir_of_file)
sys.path.insert(0, parent_dir_of_file)
from run_full_model import run_full_model
from build_features import DataOp
def main():
X, y, input_shape = DataOp.load("cifar10")
run_full_model(
X, y, test_proportion=0.1,
model="wrn_28_10", epochs=100, batch_size=32,
policies_path="/home/acb11354uz/B4researchMain/B4ResearchDeepaugment/reports/best_policies/top20_policies_cifar10_exp_2019-02-08_03-54_3000_iters.csv"
)
if __name__ == "__main__":
main()
| 24.09375 | 158 | 0.771725 |
f04f2ad8b1746611f93fc1a539ab9f6cfd5fbe5e | 225 | py | Python | tests/conftest.py | brettelliot/jinx | 3faa03c25b61594cfc902c122db091edff54b607 | [
"MIT"
] | 2 | 2020-05-17T02:50:18.000Z | 2020-06-26T20:56:41.000Z | tests/conftest.py | brettelliot/jinx | 3faa03c25b61594cfc902c122db091edff54b607 | [
"MIT"
] | null | null | null | tests/conftest.py | brettelliot/jinx | 3faa03c25b61594cfc902c122db091edff54b607 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Dummy conftest.py for jinx.
If you don't know what this is for, just leave it empty.
Read more about conftest.py under:
https://pytest.org/latest/plugins.html
"""
# import pytest
| 20.454545 | 60 | 0.644444 |
36b134069a7e062ec6113d55e8d4924cedfd9e79 | 645 | py | Python | src/poetry/console/commands/cache/list.py | s-spindler/poetry | 27034d61127b6b4ee0974d3d356105e33cb91713 | [
"MIT"
] | null | null | null | src/poetry/console/commands/cache/list.py | s-spindler/poetry | 27034d61127b6b4ee0974d3d356105e33cb91713 | [
"MIT"
] | null | null | null | src/poetry/console/commands/cache/list.py | s-spindler/poetry | 27034d61127b6b4ee0974d3d356105e33cb91713 | [
"MIT"
] | null | null | null | from __future__ import annotations
from poetry.config.config import Config
from poetry.console.commands.command import Command
class CacheListCommand(Command):
name = "cache list"
description = "List Poetry's caches."
def handle(self) -> int | None:
config = Config.create()
if config.repository_cache_directory.exists():
caches = sorted(config.repository_cache_directory.iterdir())
if caches:
for cache in caches:
self.line(f"<info>{cache.name}</>")
return 0
self.line_error("<warning>No caches found</>")
return None
| 28.043478 | 72 | 0.627907 |
915194e8b237a0e4aceaee6de18928def1da57c4 | 9,173 | py | Python | ros/src/waypoint_updater/waypoint_updater.py | philippmarcus/CarND-Capstone | 9be5846c4a4cbd6e6d32897b62bb25d43d7f1713 | [
"MIT"
] | null | null | null | ros/src/waypoint_updater/waypoint_updater.py | philippmarcus/CarND-Capstone | 9be5846c4a4cbd6e6d32897b62bb25d43d7f1713 | [
"MIT"
] | null | null | null | ros/src/waypoint_updater/waypoint_updater.py | philippmarcus/CarND-Capstone | 9be5846c4a4cbd6e6d32897b62bb25d43d7f1713 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import rospy
from geometry_msgs.msg import PoseStamped, TwistStamped
from styx_msgs.msg import Lane, Waypoint
from std_msgs.msg import Int32
import tf
import math
import numpy as np
'''
This node will publish waypoints from the car's current position to some `x` distance ahead.
As mentioned in the doc, you should ideally first implement a version which does not care
about traffic lights or obstacles.
Once you have created dbw_node, you will update this node to use the status of traffic lights too.
Please note that our simulator also provides the exact location of traffic lights and their
current status in `/vehicle/traffic_lights` message. You can use this message to build this node
as well as to verify your TL classifier.
TODO (for Yousuf and Aaron): Stopline location for each traffic light.
'''
LOOKAHEAD_WPS = 200 # Number of waypoints we will publish. You can change this number
WAYPOINT_HZ = 10
MAX_DECELERATION = rospy.get_param('/dbw_node/decel_limit')* 0.5
HALT_DISTANCE = 2.5
MAX_SPEED_MiPH = rospy.get_param('/waypoint_loader/velocity')
class WaypointUpdater(object):
def __init__(self):
rospy.init_node('waypoint_updater')
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
# Needed for smooth slow down
rospy.Subscriber('/current_velocity', TwistStamped, \
self.velocity_cb)
# TODO: Add a subscriber for /traffic_waypoint and /obstacle_waypoint below
rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_wp_cb)
self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)
# TODO: Add other member variables you need below
self.last_current_velocity = None
self.last_base_waypoints = None
self.last_current_pose = None
self.traffic_wp = None
self.loop()
rospy.spin()
def loop(self):
pass
rate = rospy.Rate(WAYPOINT_HZ)
while not rospy.is_shutdown():
self.publish_next_waypoints()
rate.sleep()
def pose_cb(self, msg):
self.last_current_pose = msg
def waypoints_cb(self, msg):
self.last_base_waypoints = msg
def velocity_cb(self, data):
self.last_current_velocity = data
def traffic_wp_cb(self, msg):
self.traffic_wp = msg.data
def closest_forward_waypoint(self, car):
# compute global heading angle of car
quaternion = (car.pose.orientation.x, car.pose.orientation.y, car.pose.orientation.z, car.pose.orientation.w)
_, _, car_yaw = tf.transformations.euler_from_quaternion(quaternion)
# Find index of closest candidate waypoint
closest_idx = -1
closest_dist = 999999999999
for idx in range(len(self.last_base_waypoints.waypoints)):
wp = self.last_base_waypoints.waypoints[idx]
# Check if it has a smaller distance than the observed WPs
this_wp_dist = math.sqrt((wp.pose.pose.position.y - car.pose.position.y)**2 + \
(wp.pose.pose.position.x - car.pose.position.x)**2)
if this_wp_dist < closest_dist:
wp_glob_angle = math.atan2(wp.pose.pose.position.y - car.pose.position.y,\
wp.pose.pose.position.x - car.pose.position.x)
# calculate the smallest difference between the two angles
phi = min((2 * math.pi) - abs(wp_glob_angle - car_yaw), abs(car_yaw - wp_glob_angle))
# Check if the wp is in front of the car
if phi < math.pi/2:
closest_idx = idx
closest_dist = this_wp_dist
return closest_idx
def normal_speed(self, car, selected_waypoints):
#print("NORMAL SPEED")
# Update the speed of all waypoints to maximum speed
max_speed = MAX_SPEED_MiPH * 1609.340 / (60. * 60.)
for i in range(len(selected_waypoints)):
self.set_waypoint_velocity(selected_waypoints, i, max_speed)
return selected_waypoints
def decelerate(self, car, selected_waypoints, obstacle_id):
rospy.loginfo("BRAKING")
rospy.loginfo("obstacle_id={} \t len(selected_waypoints)={}".format(obstacle_id, len(selected_waypoints)))
assert 0 <= obstacle_id <= len(selected_waypoints)
car_vx = self.last_current_velocity.twist.linear.x
car_vy = self.last_current_velocity.twist.linear.y
car_speed = math.sqrt(car_vx **2 + car_vy**2)
max_speed = MAX_SPEED_MiPH * 1609.340 / (60. * 60.)
# all waypoints after the obstacle to 0
for i in range(obstacle_id, len(selected_waypoints)):
self.set_waypoint_velocity(selected_waypoints, i, 0.0)
# backwards from obstacle to car position
for i in reversed(range(0, obstacle_id)):
dist_to_obstacle = max(self.distance(selected_waypoints, i, obstacle_id) - HALT_DISTANCE, 0.0)
this_wp_speed = min(math.sqrt(0.0 - 2. * MAX_DECELERATION * dist_to_obstacle), max_speed)
self.set_waypoint_velocity(selected_waypoints, i, this_wp_speed)
return selected_waypoints
def publish_next_waypoints(self):
# get the current angle and position of the car
if self.last_current_pose is not None and \
self.last_base_waypoints is not None and \
self.traffic_wp is not None and \
self.last_current_velocity is not None:
# The car
car = self.last_current_pose
wp_count = len(self.last_base_waypoints.waypoints)
# Select waypoints
forward_wp_id = self.closest_forward_waypoint(car)
selected_waypoints = []
is_obstacle_ahead = False
obstacle_id = -1
WP_ids = []
# Needed to allow the car to do several rounds
for i in range(LOOKAHEAD_WPS):
cur_wp_id = (forward_wp_id + i)%wp_count
WP_ids.append(cur_wp_id)
selected_waypoints.append(self.last_base_waypoints.waypoints[cur_wp_id])
if cur_wp_id == self.traffic_wp:
is_obstacle_ahead = True
obstacle_id = i
if is_obstacle_ahead:
if obstacle_id < 0:
# EMERGENCY BRAKE
for i in range(len(selected_waypoints)):
self.set_waypoint_velocity(selected_waypoints, i, 0.0)
else:
# Normal brake
selected_waypoints = self.decelerate(car, selected_waypoints, obstacle_id)
else:
selected_waypoints = self.normal_speed(car, selected_waypoints)
rospy.loginfo("Closest car WP = {}. \t Obstacle WP = {}. Selected min WP = {}. Selected max WP = {}.".format(forward_wp_id, \
obstacle_id, \
min(WP_ids), \
max(WP_ids)))
if len(selected_waypoints) == 0:
raise Exception("No waypoints selected. forward_wp_id={} \t \
self.traffic_wp={} \t is_obstacle_ahead={} \t \
obstacle_id={}".format(forward_wp_id,self.traffic_wp,is_obstacle_ahead,obstacle_id))
# publish result
lane = Lane()
lane.header.frame_id = '/world'
lane.header.stamp = rospy.Time(0)
lane.waypoints = selected_waypoints
self.final_waypoints_pub.publish(lane)
pass
def traffic_cb(self, msg):
# TODO: Callback for /traffic_waypoint message. Implement
pass
def obstacle_cb(self, msg):
# TODO: Callback for /obstacle_waypoint message. We will implement it later
pass
def get_waypoint_velocity(self, waypoint):
return waypoint.twist.twist.linear.x
def set_waypoint_velocity(self, waypoints, waypoint, velocity):
waypoints[waypoint].twist.twist.linear.x = velocity
def distance(self, waypoints, wp1, wp2):
dist = 0
dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)
for i in range(wp1, wp2+1):
dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)
wp1 = i
return dist
if __name__ == '__main__':
try:
WaypointUpdater()
except rospy.ROSInterruptException:
rospy.logerr('Could not start waypoint updater node.')
| 40.950893 | 137 | 0.59359 |
e8496d12800bb4540cb9f9c99f7481f6ed5bab67 | 15,662 | py | Python | {{cookiecutter.project_slug}}/config/settings/base.py | rq0net/cookiecutter-django | 9a94bc530f8fecadfc7846c68491832379b654b8 | [
"BSD-3-Clause"
] | null | null | null | {{cookiecutter.project_slug}}/config/settings/base.py | rq0net/cookiecutter-django | 9a94bc530f8fecadfc7846c68491832379b654b8 | [
"BSD-3-Clause"
] | 1 | 2021-11-17T03:36:35.000Z | 2021-11-17T03:36:35.000Z | {{cookiecutter.project_slug}}/config/settings/base.py | rq0net/cookiecutter-django | 9a94bc530f8fecadfc7846c68491832379b654b8 | [
"BSD-3-Clause"
] | null | null | null | """
Base settings to build other settings files upon.
"""
from pathlib import Path
import environ, os
from urllib.parse import parse_qs
ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent.parent
# {{ cookiecutter.project_slug }}/
APPS_DIR = ROOT_DIR / "{{ cookiecutter.project_slug }}"
env = environ.Env()
READ_DOT_ENV_FILE = env.bool("DJANGO_READ_DOT_ENV_FILE", default=False)
if READ_DOT_ENV_FILE:
# OS environment variables take precedence over variables from .env
env.read_env(str(ROOT_DIR / ".env"))
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# Local time zone. Choices are
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# though not all of them may be available with every OS.
# In Windows, this must be set to your system time zone.
TIME_ZONE = "{{ cookiecutter.timezone }}"
# https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = "en-us"
# https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# https://docs.djangoproject.com/en/dev/ref/settings/#locale-paths
LOCALE_PATHS = [str(ROOT_DIR / "locale")]
# DATABASES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
config_options = {}
if env.str("POSTGRES_DB_PARAMETERS", None):
for k, v in parse_qs(env.str("POSTGRES_DB_PARAMETERS", None)).items():
config_options.update({k: v[0]})
DATABASES = {
"default": {
'ENGINE': 'django.db.backends.postgresql',
'NAME': env.str("POSTGRESQL_DATABASE", "gaius_{{ cookiecutter.project_slug }}"),
'USER': env.str("POSTGRESQL_DB_USER"),
'PASSWORD': env.str("POSTGRESQL_DB_PASS"),
'HOST': env.str("POSTGRESQL_DB_HOST"),
'PORT': env.str("POSTGRESQL_DB_PORT"),
'OPTIONS': config_options
},
"common": {
'ENGINE': 'django.db.backends.postgresql',
'NAME': env.str("POSTGRES_DB_COMMON", "gaius_common"),
'USER': env.str("POSTGRESQL_DB_USER"),
'PASSWORD': env.str("POSTGRESQL_DB_PASS"),
'HOST': env.str("POSTGRESQL_DB_HOST"),
'PORT': env.str("POSTGRESQL_DB_PORT"),
'OPTIONS': config_options
},
}
DATABASES["default"]["ATOMIC_REQUESTS"] = True
DATABASE_ROUTERS = ["common.dbrouter.AuthRouter", ]
# URLS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = "config.urls"
# https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = "config.wsgi.application"
# APPS
# ------------------------------------------------------------------------------
DJANGO_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.staticfiles",
# "django.contrib.humanize", # Handy template tags
"django.contrib.admin",
"django.forms",
]
THIRD_PARTY_APPS = [
"crispy_forms",
"crispy_bootstrap5",
{%- if cookiecutter.use_celery == 'y' %}
"django_celery_beat",
{%- endif %}
{%- if cookiecutter.use_drf == "y" %}
"rest_framework",
"rest_framework.authtoken",
"corsheaders",
{%- endif %}
]
LOCAL_APPS = [
# Your stuff: custom apps go here
]
# https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIGRATIONS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#migration-modules
MIGRATION_MODULES = {"sites": "{{ cookiecutter.project_slug }}.contrib.sites.migrations"}
# AUTHENTICATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#authentication-backends
AUTHENTICATION_BACKENDS = [
"django.contrib.auth.backends.ModelBackend",
]
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = [
# https://docs.djangoproject.com/en/dev/topics/auth/passwords/#using-argon2-with-django
"django.contrib.auth.hashers.Argon2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher",
"django.contrib.auth.hashers.BCryptSHA256PasswordHasher",
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# MIDDLEWARE
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#middleware
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
{%- if cookiecutter.use_drf == 'y' %}
"corsheaders.middleware.CorsMiddleware",
{%- endif %}
{%- if cookiecutter.use_whitenoise == 'y' %}
"whitenoise.middleware.WhiteNoiseMiddleware",
{%- endif %}
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.common.BrokenLinkEmailsMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
# STATIC
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR / "staticfiles")
# https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = "/static/"
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [str(APPS_DIR / "static")]
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
# MEDIA
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR / "media")
# https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = "/media/"
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
"BACKEND": "django.template.backends.django.DjangoTemplates",
# https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
"DIRS": [str(APPS_DIR / "templates")],
"OPTIONS": {
# https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
"loaders": [
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
# https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"{{ cookiecutter.project_slug }}.utils.context_processors.settings_context",
],
},
}
]
# https://docs.djangoproject.com/en/dev/ref/settings/#form-renderer
FORM_RENDERER = "django.forms.renderers.TemplatesSetting"
# http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = "bootstrap5"
CRISPY_ALLOWED_TEMPLATE_PACKS = "bootstrap5"
# FIXTURES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#fixture-dirs
FIXTURE_DIRS = (str(APPS_DIR / "fixtures"),)
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-httponly
SESSION_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-httponly
CSRF_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-browser-xss-filter
SECURE_BROWSER_XSS_FILTER = True
# https://docs.djangoproject.com/en/dev/ref/settings/#x-frame-options
X_FRAME_OPTIONS = "DENY"
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env(
"DJANGO_EMAIL_BACKEND",
default="django.core.mail.backends.smtp.EmailBackend",
)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-timeout
EMAIL_TIMEOUT = 5
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL.
ADMIN_URL = "admin/"
# https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = [("""{{cookiecutter.author_name}}""", "{{cookiecutter.email}}")]
# https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
}
},
"root": {"level": "INFO", "handlers": ["console"]},
}
{% if cookiecutter.use_celery == 'y' -%}
# Celery
# ------------------------------------------------------------------------------
if USE_TZ:
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-timezone
CELERY_TIMEZONE = TIME_ZONE
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-broker_url
REDIS_HOST = os.getenv('REDIS_HOST', 'redis-master')
REDIS_PASSWORD = os.getenv('REDIS_PASSWORD', 'hZg7kXzvPN')
REDIS_PORT = os.getenv('REDIS_PORT', '6379')
REDIS_DATABASE = os.getenv('REDIS_DATABASE', '1')
REDIS_DATABASE_CELERY = os.getenv('REDIS_DATABASE_CELERY', '0')
REDIS_DATABASE_IP_BLOCK = os.getenv('REDIS_DATABASE_IP_BLOCK', '15')
CELERY_BROKER_URL = f'redis://:{REDIS_PASSWORD}@{REDIS_HOST}:{REDIS_PORT}/{REDIS_DATABASE_CELERY}'
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-result_backend
CELERY_RESULT_BACKEND = CELERY_BROKER_URL
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-accept_content
CELERY_ACCEPT_CONTENT = ["json"]
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-task_serializer
CELERY_TASK_SERIALIZER = "json"
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-result_serializer
CELERY_RESULT_SERIALIZER = "json"
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-time-limit
# TODO: set to whatever value is adequate in your circumstances
CELERY_TASK_TIME_LIMIT = 5 * 60
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-soft-time-limit
# TODO: set to whatever value is adequate in your circumstances
CELERY_TASK_SOFT_TIME_LIMIT = 60
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#beat-scheduler
CELERY_BEAT_SCHEDULER = "django_celery_beat.schedulers:DatabaseScheduler"
{%- endif %}
{% if cookiecutter.use_compressor == 'y' -%}
# django-compressor
# ------------------------------------------------------------------------------
# https://django-compressor.readthedocs.io/en/latest/quickstart/#installation
INSTALLED_APPS += ["compressor"]
STATICFILES_FINDERS += ["compressor.finders.CompressorFinder"]
{%- endif %}
{% if cookiecutter.use_drf == "y" -%}
# django-rest-framework
# -------------------------------------------------------------------------------
# django-rest-framework - https://www.django-rest-framework.org/api-guide/settings/
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework.authentication.SessionAuthentication",
"rest_framework.authentication.TokenAuthentication",
),
"DEFAULT_PERMISSION_CLASSES": ("rest_framework.permissions.IsAuthenticated",),
}
# django-cors-headers - https://github.com/adamchainz/django-cors-headers#setup
CORS_URLS_REGEX = r"^/api/.*$"
{%- endif %}
# django-rest-framework
# -------------------------------------------------------------------------------
# django-rest-framework - https://www.django-rest-framework.org/api-guide/settings/
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": [
"rest_framework.authentication.SessionAuthentication",
"rest_framework.authentication.TokenAuthentication",
],
"DEFAULT_PERMISSION_CLASSES": ("rest_framework.permissions.IsAuthenticated",),
}
# KeyCloak
USE_KEYCLOAK = env.bool("USE_KEYCLOAK", False)
if USE_KEYCLOAK is not None:
INSTALLED_APPS += [
'django_keycloak.apps.KeycloakAppConfig',
'caesar_user',
]
MIDDLEWARE += [
'django_keycloak.middleware.BaseKeycloakMiddleware',
'django_keycloak.middleware.KeycloakStatelessBearerAuthenticationMiddleware',
]
PASSWORD_HASHERS = [
'django_keycloak.hashers.PBKDF2SHA512PasswordHasher',
]
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'django_keycloak.auth.backends.KeycloakAuthorizationCodeBackend',
'django_keycloak.auth.backends.KeycloakIDTokenAuthorizationBackend',
]
REST_FRAMEWORK['DEFAULT_AUTHENTICATION_CLASSES'] += ['django_keycloak.auth.authentication.KeycloakIDAuthentication', ]
KEYCLOAK_OIDC_PROFILE_MODEL = 'django_keycloak.OpenIdConnectProfile'
KEYCLOAK_BEARER_AUTHENTICATION_EXEMPT_PATHS = [
r'^admin/',
r'^docs/',
]
# KEYCLOAK_SKIP_SSL_VERIFY = True
LOGIN_URL = 'keycloak_login'
# Your stuff...
# ------------------------------------------------------------------------------
| 38.960199 | 122 | 0.642957 |
d66c837827f6c00239af995a01af38d5b05292e8 | 591 | py | Python | test/vanilla/Expected/AcceptanceTests/BodyDateTime/bodydatetime/aio/__init__.py | Azure/autorest.azure-functions-python | b0896d8aec6b0fd6f0bcb12ea8e0489652dc2783 | [
"MIT"
] | 4 | 2020-10-22T20:35:38.000Z | 2021-12-21T07:29:01.000Z | test/vanilla/Expected/AcceptanceTests/BodyDateTime/bodydatetime/aio/__init__.py | Azure/autorest.azure-functions-python | b0896d8aec6b0fd6f0bcb12ea8e0489652dc2783 | [
"MIT"
] | 3 | 2020-09-09T15:16:15.000Z | 2021-12-20T15:25:18.000Z | test/vanilla/Expected/AcceptanceTests/BodyDateTime/bodydatetime/aio/__init__.py | Azure/autorest.azure-functions-python | b0896d8aec6b0fd6f0bcb12ea8e0489652dc2783 | [
"MIT"
] | 2 | 2020-11-10T07:16:23.000Z | 2020-12-30T11:03:14.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._auto_rest_date_time_test_service_async import AutoRestDateTimeTestService
__all__ = ['AutoRestDateTimeTestService']
| 53.727273 | 94 | 0.602369 |
ee957607f208f3250964b4ad8c8aa78e951b27e9 | 177 | py | Python | AtCoder/ABC/129/a.py | ttyskg/ProgrammingCompetition | 65fb9e131803e4f1a1a6369e68ed1b504f08b00f | [
"MIT"
] | null | null | null | AtCoder/ABC/129/a.py | ttyskg/ProgrammingCompetition | 65fb9e131803e4f1a1a6369e68ed1b504f08b00f | [
"MIT"
] | null | null | null | AtCoder/ABC/129/a.py | ttyskg/ProgrammingCompetition | 65fb9e131803e4f1a1a6369e68ed1b504f08b00f | [
"MIT"
] | null | null | null | import sys
def main():
input = sys.stdin.readline
A = list(map(int, input().split()))
A.sort()
return sum(A[:2])
if __name__ == '__main__':
print(main())
| 14.75 | 39 | 0.570621 |
d6d5b6196df7f109026b0580b8ab2bffc332738e | 1,431 | py | Python | tests/model/test_publication.py | pauloaugusto-dmf/blog_django | 7374e85dd4f0622aefbbb99d27ceb85f19fd1cd8 | [
"MIT"
] | 2 | 2021-12-31T22:14:31.000Z | 2021-12-31T22:14:34.000Z | tests/model/test_publication.py | pauloaugusto-dmf/blog_django | 7374e85dd4f0622aefbbb99d27ceb85f19fd1cd8 | [
"MIT"
] | null | null | null | tests/model/test_publication.py | pauloaugusto-dmf/blog_django | 7374e85dd4f0622aefbbb99d27ceb85f19fd1cd8 | [
"MIT"
] | null | null | null | import pytest
from publication.models import Topic, Post
from ..factories.publication import TopicFactory
from ..factories.users import UserFactory
pytestmark = pytest.mark.django_db
class TestTopicModel:
def test_create_topic(self):
topic = Topic.objects.create(name="test_topic")
assert topic.name == "test_topic"
def test___str__(self):
topic = Topic.objects.create(name="test_topic")
assert str(topic) == "test_topic"
class TestPostModel:
def test_crete_post(self):
topic = TopicFactory()
author = UserFactory()
post = Post.objects.create(
title="test post",
topic=topic,
author=author,
image="teste.jpg",
alt="imagem de teste",
article="artigo de teste",
)
assert post.title == "test post"
assert post.topic == topic
assert post.author == author
assert post.image == "teste.jpg"
assert post.alt == "imagem de teste"
assert post.article == "artigo de teste"
def test__str__(self):
topic = TopicFactory()
author = UserFactory()
post = Post.objects.create(
title="test post",
topic=topic,
author=author,
image="teste.jpg",
alt="imagem de teste",
article="artigo de teste",
)
assert str(post) == "test post"
| 27 | 55 | 0.587002 |
69611718116b71b0be9875d7598daa3b82206a78 | 1,244 | py | Python | beautiful/event/models.py | wtl0442/bwg_real | 45007e89cb6c5099daea7a84b81433d1adb8e41d | [
"MIT"
] | null | null | null | beautiful/event/models.py | wtl0442/bwg_real | 45007e89cb6c5099daea7a84b81433d1adb8e41d | [
"MIT"
] | 9 | 2018-01-24T10:40:34.000Z | 2022-01-13T00:40:29.000Z | beautiful/event/models.py | wtl0442/bwg_real | 45007e89cb6c5099daea7a84b81433d1adb8e41d | [
"MIT"
] | 1 | 2018-05-05T08:54:55.000Z | 2018-05-05T08:54:55.000Z | from django.db import models
from django.urls import reverse
from django.conf import settings
from django_google_maps import fields as map_fields
# Create your models here.
class Tag(models.Model):
tag_name = models.CharField(max_length=10, default=None)
def __str__(self):
return self.tag_name
class Event(models.Model):
title = models.CharField(max_length=30, verbose_name='ํ์ฌ ์ด๋ฆ')
place = models.CharField(max_length=30, verbose_name='ํ์ฌ ์ฅ์')
date = models.DateField(verbose_name='ํ์ฌ ๋ ์ง')
time = models.TimeField(verbose_name='ํ์ฌ ์๊ฐ')
content = models.TextField(verbose_name='ํ์ฌ ์ค๋ช
')
tag2 = models.CharField(max_length=10, default=None, blank=True, null=True, verbose_name='ํ์ฌ ์ฑ๊ฒฉ ํ๊ทธ')
tag = models.ManyToManyField(Tag, blank=True, null=True, verbose_name='ํ๊ทธ')
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('event:event_main')
# Create your models here.
class Googlemap(models.Model):
place_name = models.CharField(max_length=30, verbose_name='์ฅ์ ์ด๋ฆ')
address = map_fields.AddressField(max_length=200)
geolocation = map_fields.GeoLocationField(max_length=100)
def __str__(self):
return self.place_name
| 29.619048 | 104 | 0.728296 |
b177199340efda9fc41813e5bac103e7802a6dfe | 3,907 | py | Python | republisher/management/commands/emailrepublishers.py | danielsinaga1/djangonewsfix | 04b84240daabb840c1e715fbade8ae15b3b0f22c | [
"BSD-3-Clause"
] | null | null | null | republisher/management/commands/emailrepublishers.py | danielsinaga1/djangonewsfix | 04b84240daabb840c1e715fbade8ae15b3b0f22c | [
"BSD-3-Clause"
] | 3 | 2020-02-12T01:23:34.000Z | 2021-06-10T21:49:25.000Z | republisher/management/commands/emailrepublishers.py | danielsinaga1/djangonewsfix | 04b84240daabb840c1e715fbade8ae15b3b0f22c | [
"BSD-3-Clause"
] | null | null | null | import datetime
from django.core.management.base import BaseCommand
from django.template.loader import render_to_string
from django.conf import settings
from django.utils import timezone
from django.contrib.sites.models import Site
from django.core.mail import send_mail
import sys
import html2text
from bs4 import BeautifulSoup as bs
from republisher.models import RepublisherArticle
def process():
successes = 0
failures = 0
republisherarticles = RepublisherArticle.objects.filter(status="scheduled")
for republisherarticle in republisherarticles:
# Only notify once article is published
if not republisherarticle.article.is_published():
continue
# Check that sufficient time has passed since publishing
dont_send_before = republisherarticle.article.published + \
datetime.timedelta(minutes=republisherarticle.wait_time)
if timezone.now() >= dont_send_before:
prefix = "http://" + Site.objects.all()[0].domain
url = prefix + republisherarticle.article.get_absolute_url()
article = republisherarticle.article
if article.cached_primary_image[0] == "/":
article.cached_primary_image = prefix + \
article.cached_primary_image
soup = bs(article.body, 'html.parser')
images = soup.find_all("img")
for image in images:
if image['src'][0] == '/':
image['src'] = prefix + image['src']
links = soup.find_all("a")
for link in links:
if 'href' in link and len(link['href']) > 0 and \
link['href'][0] == '/':
link['href'] = prefix + link['href']
article.body = str(soup)
message = render_to_string('republisher/message.html',
{'republisher':
republisherarticle.republisher,
'note': republisherarticle.note,
'article':
republisherarticle.article,
'url': url})
subject = "Article from GroundUp: " + \
republisherarticle.article.title
email_addresses = republisherarticle.republisher.email_addresses
email_list = [address.strip() for address in
email_addresses.split(",")]
email_list.append(settings.REPUBLISHER_EMAIL_FROM)
try:
send_mail(subject, html2text.html2text(message),
settings.REPUBLISHER_EMAIL_FROM,
email_list, html_message=message,
fail_silently=False)
republisherarticle.status = "sent"
republisherarticle.save()
successes = successes + 1
print("EmailRepublishers: Sent: {}".
format(str(republisherarticle)))
except:
failures = failures + 1
republisherarticle.status = "failed"
republisherarticle.save()
print("EmailRepublishers: Error: ", sys.exc_info()[0])
print("EmailRepublishers: Failed send: {}".
format(str(republisherarticle)))
return {"successes": successes, "failures": failures}
class Command(BaseCommand):
help = 'Sends articles to republishers'
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
print("EmailRepublishers: {0}:".format(str(timezone.now())))
success_dict = process()
print("EmailRepublishers: Successful: {0}. Failed: {1}".
format(success_dict["successes"], success_dict["failures"]))
| 43.411111 | 79 | 0.566931 |
155659aac50e89e56891d5602584c8bcc56c0046 | 1,024 | py | Python | src/pages/single_article_page.py | djeada/Responsive-Blog-Template | 3cf40a2f389d17acb49f82885947e825c921760e | [
"MIT"
] | null | null | null | src/pages/single_article_page.py | djeada/Responsive-Blog-Template | 3cf40a2f389d17acb49f82885947e825c921760e | [
"MIT"
] | null | null | null | src/pages/single_article_page.py | djeada/Responsive-Blog-Template | 3cf40a2f389d17acb49f82885947e825c921760e | [
"MIT"
] | 1 | 2022-02-02T13:03:24.000Z | 2022-02-02T13:03:24.000Z | import MySQLdb
from flask import Blueprint, render_template
from flask_mysqldb import MySQL
def construct_article_page(database: MySQL) -> Blueprint:
"""
Constructs the single article page.
:param database: The database object.
:return: Single article page blueprint.
"""
article_page = Blueprint("/article/<string:id>/", __name__)
@article_page.route("/article/<string:id>/")
def article(id: str) -> str:
"""
Renders the article page.
:param id: The article id in the database.
:return: Rendered article page.
"""
try:
with database.connection.cursor() as cursor:
cursor.execute(f"SELECT * FROM articles WHERE id = {id}")
retrieved_article = cursor.fetchone()
return render_template("article.html", article=retrieved_article)
except MySQLdb._exceptions.OperationalError:
return render_template("article.html", msg="Article not found")
return article_page
| 30.117647 | 77 | 0.650391 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.