hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7959ab19a11899c2ced3262c2e7ea3c70a7a5d4a | 7,106 | py | Python | kubernetes_asyncio/client/models/v1_subject_access_review.py | PidgeyBE/kubernetes_asyncio | 14d15dc309890253c26b6274a022e84441e05217 | [
"Apache-2.0"
] | null | null | null | kubernetes_asyncio/client/models/v1_subject_access_review.py | PidgeyBE/kubernetes_asyncio | 14d15dc309890253c26b6274a022e84441e05217 | [
"Apache-2.0"
] | null | null | null | kubernetes_asyncio/client/models/v1_subject_access_review.py | PidgeyBE/kubernetes_asyncio | 14d15dc309890253c26b6274a022e84441e05217 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.13.5
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class V1SubjectAccessReview(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1SubjectAccessReviewSpec',
'status': 'V1SubjectAccessReviewStatus'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None): # noqa: E501
"""V1SubjectAccessReview - a model defined in OpenAPI""" # noqa: E501
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self._status = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
self.spec = spec
if status is not None:
self.status = status
@property
def api_version(self):
"""Gets the api_version of this V1SubjectAccessReview. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1SubjectAccessReview. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1SubjectAccessReview.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1SubjectAccessReview. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1SubjectAccessReview. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1SubjectAccessReview. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1SubjectAccessReview.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1SubjectAccessReview. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1SubjectAccessReview. # noqa: E501
:return: The metadata of this V1SubjectAccessReview. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1SubjectAccessReview.
:param metadata: The metadata of this V1SubjectAccessReview. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this V1SubjectAccessReview. # noqa: E501
:return: The spec of this V1SubjectAccessReview. # noqa: E501
:rtype: V1SubjectAccessReviewSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this V1SubjectAccessReview.
:param spec: The spec of this V1SubjectAccessReview. # noqa: E501
:type: V1SubjectAccessReviewSpec
"""
if spec is None:
raise ValueError("Invalid value for `spec`, must not be `None`") # noqa: E501
self._spec = spec
@property
def status(self):
"""Gets the status of this V1SubjectAccessReview. # noqa: E501
:return: The status of this V1SubjectAccessReview. # noqa: E501
:rtype: V1SubjectAccessReviewStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1SubjectAccessReview.
:param status: The status of this V1SubjectAccessReview. # noqa: E501
:type: V1SubjectAccessReviewStatus
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1SubjectAccessReview):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 32.009009 | 295 | 0.618491 |
7959ab340a01764236d466d7b8acbddb3ca0db8f | 1,813 | py | Python | data_import.py | schenkd/webdev-project | 8b2ae4396d5f3f059692021b07fe74394408dfea | [
"MIT"
] | 1 | 2017-04-27T09:36:17.000Z | 2017-04-27T09:36:17.000Z | data_import.py | schenkd/webdev-project | 8b2ae4396d5f3f059692021b07fe74394408dfea | [
"MIT"
] | null | null | null | data_import.py | schenkd/webdev-project | 8b2ae4396d5f3f059692021b07fe74394408dfea | [
"MIT"
] | null | null | null | # ~*~ encoding: utf-8 ~*~
from pymongo import MongoClient
from pandas import read_csv
from datetime import date
mongodb = MongoClient('192.168.178.82', 9999)
db = mongodb['dev']
drug_collection = db['drug']
drugs = read_csv('~/Dokumente/bfarm_lieferenpass_meldung.csv', delimiter=';', encoding='iso8859_2').to_dict()
drugs.pop('Id', None)
drugs.pop('aktuelle Bescheidart', None)
drugs.pop('Meldungsart', None)
drugs.pop('aktuelle Bescheidart', None)
data = dict()
for x in range(drugs['Verkehrsfähig'].__len__()):
"""
if drugs['Ende Engpass'][x] == '-':
data['end'] = None
else:
day, month, year = drugs['Ende Engpass'][x].split('.')
data['end'] = date(int(year), int(month), int(day)).__str__()
if drugs['Beginn Engpass'][x] == '-':
data['initial_report'] = None
else:
day, month, year = drugs['Beginn Engpass'][x].split('.')
data['initial_report'] = date(int(year), int(month), int(day)).__str__()
if drugs['Datum der letzten Meldung'][x] == '-':
data['last_report'] = None
else:
day, month, year = drugs['Datum der letzten Meldung'][x].split('.')
data['last_report'] = date(int(year), int(month), int(day)).__str__()
"""
data['substance'] = drugs['Wirkstoffe'][x].replace(' ', '').split(';')
data['enr'] = int(drugs['Enr'][x])
data['marketability'] = True if drugs['Verkehrsfähig'][x] == 'ja' else False
data['atc_code'] = drugs['ATC-Code'][x]
data['pzn'] = int(drugs['PZN'][x].split(' ')[0].replace(';', '')) if drugs['PZN'][x] != '-' else None
data['drug_title'] = drugs['Arzneimittelbezeichnung'][x]
data['hospital'] = True if drugs['Krankenhausrelevant'][x] == 'ja' else False
drug_collection.update_one({'enr': data['enr']}, {'$set': data}, upsert=True)
| 32.963636 | 109 | 0.607832 |
7959ab642e8506fd393c0133c60e28e816765cc1 | 4,891 | py | Python | src/bench/data/convert-raw-files.py | bitcoin-ce/bitcoin-ce | e6a0f3907a58ad4a0f67a30d6b8b3940f08fe2c9 | [
"MIT"
] | 1 | 2021-11-24T03:54:05.000Z | 2021-11-24T03:54:05.000Z | src/bench/data/convert-raw-files.py | 1Crazymoney/bitcoin-cash-node | 8f82823b3c5d4bcb401b0e4e6b464c1228f936e1 | [
"MIT"
] | null | null | null | src/bench/data/convert-raw-files.py | 1Crazymoney/bitcoin-cash-node | 8f82823b3c5d4bcb401b0e4e6b464c1228f936e1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2019 The Bitcoin Core developers
# Copyright (c) 2019-2020 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from glob import glob
from os.path import basename
names_raw = glob("*.raw")
print("Found " + str(len(names_raw)) + " .raw file(s) in working directory")
names_raw.sort()
names = []
for name_raw in names_raw:
name = name_raw[:-4]
name_cpp = name + ".cpp"
name = basename(name)
with open(name_raw, "rb") as file_raw, open(name_cpp, "w") as file_cpp:
print("Converting " + name_raw + " to " + name_cpp + " ...")
contents = file_raw.read()
file_cpp.write("// DO NOT EDIT THIS FILE - it is machine-generated, use convert-raw-files.py to regenerate\n")
file_cpp.write("\n")
file_cpp.write("#include <cstdint>\n")
file_cpp.write("#include <vector>\n")
file_cpp.write("\n")
code = "static const unsigned char raw[] = \""
prevX = -1
for i in range(len(contents)):
x = contents[i]
# We use short escape sequences for control characters that have one.
if x == 0x07:
code += "\\a"
elif x == 0x08:
code += "\\b"
elif x == 0x09:
code += "\\t"
elif x == 0x0a:
code += "\\n"
elif x == 0x0b:
code += "\\v"
elif x == 0x0c:
code += "\\f"
elif x == 0x0d:
code += "\\r"
# To avoid ending the C++ string, we escape quotation marks.
elif x == 0x22:
code += "\\\""
# To avoid formation of unintended escape sequences, we escape backslashes.
elif x == 0x5c:
code += "\\\\"
# To avoid C++ trigraph formation, we escape a question mark if the previous character was also a question mark.
elif prevX == 0x3f and x == 0x3f:
code += "\\?"
# We display a character unescaped if it is ASCII, and not a control character.
elif x >= 0x20 and x < 0x7f:
code += chr(x)
else:
# This character can be omitted if it is the last character and it is null,
# since we are allowed to read the terminating null added by the C++ compiler.
last = i+1 == len(contents)
if not last or x > 0x00:
# We use octal escape sequences for the rest, which have a length limit of three octal digits.
# One or two leading zeros in octal sequences can be omitted if the next character is not a digit.
# If the next character is a digit, it is cheaper to use all three octal digits here,
# than to escape the next character as well.
octalAbbr = last or contents[i+1] < 0x30 or contents[i+1] >= 0x3a
if octalAbbr and x < 0x08:
code += "\\" + str(x)
elif octalAbbr and x < 0x20:
code += "\\" + str(x // 8) + str(x % 8)
else:
code += "\\" + str(x // 64) + str(x // 8 % 8) + str(x % 8)
prevX = x
code += "\";\n"
file_cpp.write(code)
file_cpp.write("\n")
file_cpp.write("namespace benchmark {\n")
file_cpp.write("namespace data {\n")
file_cpp.write("\n")
file_cpp.write("extern const std::vector<uint8_t> " + name + "(raw, raw + " + str(len(contents)) + ");\n")
file_cpp.write("\n")
file_cpp.write("} // namespace data\n")
file_cpp.write("} // namespace benchmark\n")
names.append(name)
if len(names):
name_h = "../data.h"
with open(name_h, "w") as file_h:
print("Writing " + str(len(names)) + " declaration(s) to " + name_h + " ...")
file_h.write("// DO NOT EDIT THIS FILE - it is machine-generated, use data/convert-raw-files.py to regenerate\n")
file_h.write("\n")
file_h.write("#ifndef BITCOIN_BENCH_DATA_H\n")
file_h.write("#define BITCOIN_BENCH_DATA_H\n")
file_h.write("\n")
file_h.write("#include <cstdint>\n")
file_h.write("#include <vector>\n")
file_h.write("\n")
file_h.write("namespace benchmark {\n")
file_h.write("namespace data {\n")
file_h.write("\n")
for name in names:
file_h.write("extern const std::vector<uint8_t> " + name + ";\n")
file_h.write("\n")
file_h.write("} // namespace data\n")
file_h.write("} // namespace benchmark\n")
file_h.write("\n")
file_h.write("#endif // BITCOIN_BENCH_DATA_H\n")
print("Done")
| 40.090164 | 124 | 0.532815 |
7959abc40b48eab1b2becc369fe69288a1768bee | 4,325 | py | Python | sa/profiles/Qtech/QSW2800/get_switchport.py | xUndero/noc | 9fb34627721149fcf7064860bd63887e38849131 | [
"BSD-3-Clause"
] | 1 | 2019-09-20T09:36:48.000Z | 2019-09-20T09:36:48.000Z | sa/profiles/Qtech/QSW2800/get_switchport.py | ewwwcha/noc | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | [
"BSD-3-Clause"
] | null | null | null | sa/profiles/Qtech/QSW2800/get_switchport.py | ewwwcha/noc | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Qtech.QSW2800.get_switchport
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
# NOC modules
from noc.sa.profiles.Generic.get_switchport import Script as BaseScript
from noc.sa.interfaces.igetswitchport import IGetSwitchport
from noc.core.validators import is_int
class Script(BaseScript):
name = "Qtech.QSW2800.get_switchport"
interface = IGetSwitchport
rx_descr = re.compile(
r"^\s+(?P<interface>\S+\d+(?:/\d+)?) is layer \d+ "
r"port, alias name is (?P<description>.+?), "
r"index is \d+$",
re.MULTILINE,
)
rx_switchport = re.compile(
r"(?P<interface>\S+\d+(/\d+)?)\n"
r"Type :(?P<type>Universal|"
r"Aggregation(?: member)?)\n"
r"(?:Mac addr num: No limit\n)?"
r"Mode :\S+\s*\nPort VID :(?P<pvid>\d+)\n"
r"((?:Hybrid tag|Trunk) allowed Vlan:"
r"\s+(?P<tags>\S+))?",
re.MULTILINE,
)
rx_qinq_port = re.compile(
r"^Interface (?P<interface>\S+):\n" r"\s+dot1q-tunnel is enable", re.MULTILINE
)
def execute_cli(self, **kwargs):
# Get portchannels
pc_members = []
portchannels = self.scripts.get_portchannel()
for pch in portchannels:
pc_members += pch["members"]
qinq_ports = []
cmd = self.cli("show dot1q-tunnel")
for match in self.rx_qinq_port.finditer(cmd):
qinq_ports += [match.group("interface")]
# Get interfaces' status
int_status = {}
for istat in self.scripts.get_interface_status():
int_status[istat["interface"]] = istat["status"]
# Get tags
# Get vlans
vlans = set()
cmd = self.cli("show vlan brief")
for line in cmd.splitlines():
for k in line.split():
if is_int(k):
vlans.add(int(k))
# vlans = [vlan["vlan_id"] for vlan in self.scripts.get_vlans()]
# Get descriptions
descr = {}
cmd = self.cli("show interface | i alias")
for match in self.rx_descr.finditer(cmd):
if match.group("description") != "(null)":
descr[match.group("interface")] = match.group("description")
result = []
cmd = self.cli("show switchport interface")
for match in self.rx_switchport.finditer(cmd):
ifname = match.group("interface")
# skip portchannels members
if ifname in pc_members:
continue
pvid = int(match.group("pvid"))
# initial data
swp = {
"interface": ifname,
"status": int_status.get(ifname, False),
"tagged": [],
"untagged": pvid,
"members": [],
"802.1ad Tunnel": False,
}
# description
if ifname in descr:
swp["description"] = descr[ifname]
# port-channel members
if match.group("type") == "Aggregation":
for pch in portchannels:
if pch["interface"] == ifname:
swp["members"] = pch["members"]
for mmbr in swp["members"]: # if PC member is QinQ
if mmbr in qinq_ports: # PC is QinQ too
swp["802.1ad Tunnel"] = True
break
break
# tags
if match.group("tags"):
ma_group = match.group("tags").replace(";", ",")
if "showOneSwitchPort" in ma_group:
continue
for tag in self.expand_rangelist(ma_group):
if tag in vlans and tag != pvid:
swp["tagged"] += [tag]
# 802.1q and QinQ
if ifname in qinq_ports:
swp["802.1ad Tunnel"] = True
if len(swp["tagged"]) > 0:
swp["802.1Q Enabled"] = True
result += [swp]
return result
| 36.041667 | 86 | 0.482312 |
7959abcb3c357d16b4940fc8ac974c14057ca42c | 132 | py | Python | tastefulpy/__init__.py | mjschultz/django-tastefulpy | c81c7b32da16f9b181589a0311d9819718fdc960 | [
"BSD-3-Clause"
] | null | null | null | tastefulpy/__init__.py | mjschultz/django-tastefulpy | c81c7b32da16f9b181589a0311d9819718fdc960 | [
"BSD-3-Clause"
] | null | null | null | tastefulpy/__init__.py | mjschultz/django-tastefulpy | c81c7b32da16f9b181589a0311d9819718fdc960 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import unicode_literals
__author__ = 'Daniel Lindsley & the Tastefulpy core team'
__version__ = (0, 12, 2, 'dev')
| 22 | 57 | 0.75 |
7959ac4c4faa09464693018d1fd2599eea85a398 | 340 | py | Python | chat/urls.py | aimengda/django-chat | ac4def489d6a568eacfd5dec3f159e47025365a6 | [
"MIT"
] | null | null | null | chat/urls.py | aimengda/django-chat | ac4def489d6a568eacfd5dec3f159e47025365a6 | [
"MIT"
] | 5 | 2021-03-30T13:51:39.000Z | 2021-09-22T19:14:56.000Z | chat/urls.py | aimengda/django-chat | ac4def489d6a568eacfd5dec3f159e47025365a6 | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path('', views.user_login, name='login'),
path(r'login', views.user_login, name='login'),
path(r'logout', views.user_logout, name='logout'),
path(r'homepage', views.homepage, name='homepage'),
path(r'register',views.Register.as_view(), name="register")
] | 34 | 63 | 0.682353 |
7959ac9ee8333f19a09810c3d51a72fa1d720280 | 352 | py | Python | exceptions_FancyDivide2.py | medifle/python_6.00.1x | d40629f83e09b02cd4fc4e79e790d51d9b0ebf63 | [
"MIT"
] | 4 | 2015-10-27T15:42:33.000Z | 2018-03-08T07:16:26.000Z | exceptions_FancyDivide2.py | medifle/python_6.00.1x | d40629f83e09b02cd4fc4e79e790d51d9b0ebf63 | [
"MIT"
] | null | null | null | exceptions_FancyDivide2.py | medifle/python_6.00.1x | d40629f83e09b02cd4fc4e79e790d51d9b0ebf63 | [
"MIT"
] | null | null | null | def FancyDivide2(list_of_numbers, index):
try:
try:
denom = list_of_numbers[index]
for i in range(len(list_of_numbers)):
list_of_numbers[i] /= denom
finally:
raise Exception("1")
except Exception, e:
print (e)
print 'b'
print e | 27.076923 | 50 | 0.488636 |
7959ad959eafe8184a08d51fb10b03af4fa3b556 | 4,711 | py | Python | napari/_qt/qt_about_key_bindings.py | danielballan/napari | 9963d6bf52971f8f240b507be206ec682487fb4a | [
"BSD-3-Clause"
] | null | null | null | napari/_qt/qt_about_key_bindings.py | danielballan/napari | 9963d6bf52971f8f240b507be206ec682487fb4a | [
"BSD-3-Clause"
] | null | null | null | napari/_qt/qt_about_key_bindings.py | danielballan/napari | 9963d6bf52971f8f240b507be206ec682487fb4a | [
"BSD-3-Clause"
] | null | null | null | from qtpy.QtCore import Qt
from qtpy.QtWidgets import (
QDialog,
QVBoxLayout,
QHBoxLayout,
QTextEdit,
QComboBox,
)
from collections import OrderedDict
import napari
from ..utils.interactions import get_key_bindings_summary
class QtAboutKeyBindings(QDialog):
"""Qt dialog window for displaying keybinding information.
Parameters
----------
viewer : napari.components.ViewerModel
Napari viewer containing the rendered scene, layers, and controls.
Attributes
----------
key_bindings_strs : collections.OrderedDict
Ordered dictionary of hotkey shortcuts and associated key bindings.
Dictionary keys include:
- 'All active key bindings'
- 'Image layer'
- 'Labels layer'
- 'Points layer'
- 'Shapes layer'
- 'Surface layer'
- 'Vectors layer'
layout : qtpy.QtWidgets.QVBoxLayout
Layout of the widget.
layerTypeComboBox : qtpy.QtWidgets.QComboBox
Dropdown menu to select layer type.
textEditBox : qtpy.QtWidgets.QTextEdit
Text box widget containing table of key bindings information.
viewer : napari.components.ViewerModel
Napari viewer containing the rendered scene, layers, and controls.
"""
ALL_ACTIVE_KEYBINDINGS = 'All active key bindings'
def __init__(self, viewer, parent=None):
super().__init__(parent=parent)
self.viewer = viewer
self.layout = QVBoxLayout()
self.setWindowTitle('Keybindings')
self.setWindowModality(Qt.NonModal)
self.setLayout(self.layout)
# stacked key bindings widgets
self.textEditBox = QTextEdit()
self.textEditBox.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.textEditBox.setMinimumWidth(360)
# Can switch to a normal dict when our minimum Python is 3.7
self.key_bindings_strs = OrderedDict()
self.key_bindings_strs[self.ALL_ACTIVE_KEYBINDINGS] = ''
col = self.viewer.palette['secondary']
layers = [
napari.layers.Image,
napari.layers.Labels,
napari.layers.Points,
napari.layers.Shapes,
napari.layers.Surface,
napari.layers.Vectors,
]
for layer in layers:
if len(layer.class_keymap) == 0:
text = 'No key bindings'
else:
text = get_key_bindings_summary(layer.class_keymap, col=col)
self.key_bindings_strs[f"{layer.__name__} layer"] = text
# layer type selection
self.layerTypeComboBox = QComboBox()
self.layerTypeComboBox.addItems(list(self.key_bindings_strs))
self.layerTypeComboBox.activated[str].connect(self.change_layer_type)
self.layerTypeComboBox.setCurrentText(self.ALL_ACTIVE_KEYBINDINGS)
# self.change_layer_type(current_layer)
layer_type_layout = QHBoxLayout()
layer_type_layout.setContentsMargins(10, 5, 0, 0)
layer_type_layout.addWidget(self.layerTypeComboBox)
layer_type_layout.addStretch(1)
layer_type_layout.setSpacing(0)
self.layout.addLayout(layer_type_layout)
self.layout.addWidget(self.textEditBox, 1)
self.viewer.events.active_layer.connect(self.update_active_layer)
self.viewer.events.palette.connect(self.update_active_layer)
self.update_active_layer()
def change_layer_type(self, text):
"""Change layer type selected in dropdown menu.
Parameters
----------
text : str
Dictionary key to access key bindings associated with the layer.
Available keys include:
- 'All active key bindings'
- 'Image layer'
- 'Labels layer'
- 'Points layer'
- 'Shapes layer'
- 'Surface layer'
- 'Vectors layer'
"""
self.textEditBox.setHtml(self.key_bindings_strs[text])
def update_active_layer(self, event=None):
"""Update the active layer and display key bindings for that layer type.
Parameters
----------
event : napari.utils.event.Event, optional
The napari event that triggered this method, by default None.
"""
col = self.viewer.palette['secondary']
# Add class and instance viewer key bindings
text = get_key_bindings_summary(self.viewer.active_keymap, col=col)
# Update layer speficic key bindings if all active are displayed
self.key_bindings_strs[self.ALL_ACTIVE_KEYBINDINGS] = text
if self.layerTypeComboBox.currentText() == self.ALL_ACTIVE_KEYBINDINGS:
self.textEditBox.setHtml(text)
| 35.961832 | 80 | 0.652303 |
7959adbcf0fa0241760efbf738dea7cc9262173c | 765 | py | Python | apps/estoque/migrations/0004_auto_20170201_2231.py | SamuelsonH2T/erp | 5973c6b9f4ce4ddcdc47cdd809d9c3d9a5f0ef6e | [
"MIT"
] | null | null | null | apps/estoque/migrations/0004_auto_20170201_2231.py | SamuelsonH2T/erp | 5973c6b9f4ce4ddcdc47cdd809d9c3d9a5f0ef6e | [
"MIT"
] | null | null | null | apps/estoque/migrations/0004_auto_20170201_2231.py | SamuelsonH2T/erp | 5973c6b9f4ce4ddcdc47cdd809d9c3d9a5f0ef6e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-01 22:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('estoque', '0003_auto_20170201_2216'),
]
operations = [
migrations.AlterModelOptions(
name='estoque',
options={'verbose_name': 'Estoque', 'verbose_name_plural': 'Estoque de Produtos'},
),
migrations.AlterField(
model_name='prateleira',
name='codigo',
field=models.CharField(max_length=42),
),
migrations.AlterUniqueTogether(
name='estoque',
unique_together=set([('lote', 'prateleira')]),
),
]
| 26.37931 | 94 | 0.593464 |
7959b0263b10d68c7f34529049211675c59d0aa1 | 16,984 | py | Python | tensorflow/python/keras/utils/layer_utils.py | EricLi404/tensorflow | 23759800d89f7b5362c338d9a3fd72a6810c3e22 | [
"Apache-2.0"
] | 2 | 2020-09-08T15:04:52.000Z | 2020-09-08T15:04:54.000Z | tensorflow/python/keras/utils/layer_utils.py | EricLi404/tensorflow | 23759800d89f7b5362c338d9a3fd72a6810c3e22 | [
"Apache-2.0"
] | 2 | 2021-08-25T16:12:15.000Z | 2022-02-10T02:19:16.000Z | tensorflow/python/keras/utils/layer_utils.py | EricLi404/tensorflow | 23759800d89f7b5362c338d9a3fd72a6810c3e22 | [
"Apache-2.0"
] | 2 | 2019-03-07T05:54:18.000Z | 2019-05-16T20:31:25.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Utilities related to layer/model functionality.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import weakref
import numpy as np
import six
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.utils.conv_utils import convert_kernel
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.utils.get_source_inputs')
def get_source_inputs(tensor, layer=None, node_index=None):
"""Returns the list of input tensors necessary to compute `tensor`.
Output will always be a list of tensors
(potentially with 1 element).
Arguments:
tensor: The tensor to start from.
layer: Origin layer of the tensor. Will be
determined via tensor._keras_history if not provided.
node_index: Origin node index of the tensor.
Returns:
List of input tensors.
"""
if not hasattr(tensor, '_keras_history'):
return tensor
if layer is None or node_index:
layer, node_index, _ = tensor._keras_history
if not layer._inbound_nodes:
return [tensor]
else:
node = layer._inbound_nodes[node_index]
if node.is_input:
# Reached an Input layer, stop recursion.
return nest.flatten(node.input_tensors)
else:
source_tensors = []
for layer, node_index, _, tensor in node.iterate_inbound():
previous_sources = get_source_inputs(tensor, layer, node_index)
# Avoid input redundancy.
for x in previous_sources:
if all(x is not t for t in source_tensors):
source_tensors.append(x)
return source_tensors
def validate_string_arg(input_data,
allowable_strings,
layer_name,
arg_name,
allow_none=False,
allow_callables=False):
"""Validates the correctness of a string-based arg."""
if allow_none and input_data is None:
return
elif allow_callables and callable(input_data):
return
elif isinstance(input_data,
six.string_types) and input_data in allowable_strings:
return
else:
allowed_args = '`None`, ' if allow_none else ''
allowed_args += 'a `Callable`, ' if allow_callables else ''
allowed_args += 'or one of the following values: %s' % (allowable_strings,)
raise ValueError(("%s's %s arg received an invalid value %s. " +
'Allowed values are %s.') %
(layer_name, arg_name, input_data, allowed_args))
def count_params(weights):
"""Count the total number of scalars composing the weights.
Arguments:
weights: An iterable containing the weights on which to compute params
Returns:
The total number of scalars composing the weights
"""
unique_weights = {id(w): w for w in weights}.values()
weight_shapes = [w.shape.as_list() for w in unique_weights]
standardized_weight_shapes = [
[0 if w_i is None else w_i for w_i in w] for w in weight_shapes
]
return int(sum(np.prod(p) for p in standardized_weight_shapes))
def print_summary(model, line_length=None, positions=None, print_fn=None):
"""Prints a summary of a model.
Arguments:
model: Keras model instance.
line_length: Total length of printed lines
(e.g. set this to adapt the display to different
terminal window sizes).
positions: Relative or absolute positions of log elements in each line.
If not provided, defaults to `[.33, .55, .67, 1.]`.
print_fn: Print function to use.
It will be called on each line of the summary.
You can set it to a custom function
in order to capture the string summary.
It defaults to `print` (prints to stdout).
"""
if print_fn is None:
print_fn = print
if model.__class__.__name__ == 'Sequential':
sequential_like = True
elif not model._is_graph_network:
# We treat subclassed models as a simple sequence of layers, for logging
# purposes.
sequential_like = True
else:
sequential_like = True
nodes_by_depth = model._nodes_by_depth.values()
nodes = []
for v in nodes_by_depth:
if (len(v) > 1) or (len(v) == 1 and
len(nest.flatten(v[0].keras_inputs)) > 1):
# if the model has multiple nodes
# or if the nodes have multiple inbound_layers
# the model is no longer sequential
sequential_like = False
break
nodes += v
if sequential_like:
# search for shared layers
for layer in model.layers:
flag = False
for node in layer._inbound_nodes:
if node in nodes:
if flag:
sequential_like = False
break
else:
flag = True
if not sequential_like:
break
if sequential_like:
line_length = line_length or 65
positions = positions or [.45, .85, 1.]
if positions[-1] <= 1:
positions = [int(line_length * p) for p in positions]
# header names for the different log elements
to_display = ['Layer (type)', 'Output Shape', 'Param #']
else:
line_length = line_length or 98
positions = positions or [.33, .55, .67, 1.]
if positions[-1] <= 1:
positions = [int(line_length * p) for p in positions]
# header names for the different log elements
to_display = ['Layer (type)', 'Output Shape', 'Param #', 'Connected to']
relevant_nodes = []
for v in model._nodes_by_depth.values():
relevant_nodes += v
def print_row(fields, positions):
line = ''
for i in range(len(fields)):
if i > 0:
line = line[:-1] + ' '
line += str(fields[i])
line = line[:positions[i]]
line += ' ' * (positions[i] - len(line))
print_fn(line)
print_fn('Model: "{}"'.format(model.name))
print_fn('_' * line_length)
print_row(to_display, positions)
print_fn('=' * line_length)
def print_layer_summary(layer):
"""Prints a summary for a single layer.
Arguments:
layer: target layer.
"""
try:
output_shape = layer.output_shape
except AttributeError:
output_shape = 'multiple'
except RuntimeError: # output_shape unknown in Eager mode.
output_shape = '?'
name = layer.name
cls_name = layer.__class__.__name__
fields = [name + ' (' + cls_name + ')', output_shape, layer.count_params()]
print_row(fields, positions)
def print_layer_summary_with_connections(layer):
"""Prints a summary for a single layer (including topological connections).
Arguments:
layer: target layer.
"""
try:
output_shape = layer.output_shape
except AttributeError:
output_shape = 'multiple'
connections = []
for node in layer._inbound_nodes:
if relevant_nodes and node not in relevant_nodes:
# node is not part of the current network
continue
for inbound_layer, node_index, tensor_index, _ in node.iterate_inbound():
connections.append('{}[{}][{}]'.format(inbound_layer.name, node_index,
tensor_index))
name = layer.name
cls_name = layer.__class__.__name__
if not connections:
first_connection = ''
else:
first_connection = connections[0]
fields = [
name + ' (' + cls_name + ')', output_shape,
layer.count_params(), first_connection
]
print_row(fields, positions)
if len(connections) > 1:
for i in range(1, len(connections)):
fields = ['', '', '', connections[i]]
print_row(fields, positions)
layers = model.layers
for i in range(len(layers)):
if sequential_like:
print_layer_summary(layers[i])
else:
print_layer_summary_with_connections(layers[i])
if i == len(layers) - 1:
print_fn('=' * line_length)
else:
print_fn('_' * line_length)
if hasattr(model, '_collected_trainable_weights'):
trainable_count = count_params(model._collected_trainable_weights)
else:
trainable_count = count_params(model.trainable_weights)
non_trainable_count = count_params(model.non_trainable_weights)
print_fn('Total params: {:,}'.format(trainable_count + non_trainable_count))
print_fn('Trainable params: {:,}'.format(trainable_count))
print_fn('Non-trainable params: {:,}'.format(non_trainable_count))
print_fn('_' * line_length)
def gather_trainable_weights(trainable, sub_layers, extra_variables):
"""Lists the trainable weights for an object with sub-layers.
Args:
trainable: Whether the object collecting the variables is trainable.
sub_layers: A flat list of Layer objects owned by this object, to collect
variables from.
extra_variables: Any extra variables to include. Their `.trainable` property
is used to categorize them.
Returns:
A list of collected trainable weights/variables.
"""
if not trainable:
return []
weights = []
for layer in sub_layers:
weights += layer.trainable_weights
trainable_extra_variables = [
v for v in extra_variables if v.trainable]
return weights + trainable_extra_variables
def gather_non_trainable_weights(trainable, sub_layers, extra_variables):
"""Lists the non-trainable weights for an object with sub-layers.
Args:
trainable: Whether the object collecting the variables is trainable.
sub_layers: A flat list of Layer objects owned by this object, to collect
variables from.
extra_variables: Any extra variables to include. Their `.trainable` property
is used to categorize them.
Returns:
A list of collected non-trainable weights/variables.
"""
trainable_extra_variables = []
non_trainable_extra_variables = []
for v in extra_variables:
if v.trainable:
trainable_extra_variables.append(v)
else:
non_trainable_extra_variables.append(v)
weights = []
for layer in sub_layers:
weights += layer.non_trainable_weights
if not trainable:
trainable_weights = []
for layer in sub_layers:
trainable_weights += layer.trainable_weights
return (trainable_weights + trainable_extra_variables
+ weights + non_trainable_extra_variables)
return weights + non_trainable_extra_variables
@deprecation.deprecated('2020-06-23',
'The Theano kernel format is legacy; '
'this utility will be removed.')
@keras_export('keras.utils.convert_all_kernels_in_model')
def convert_all_kernels_in_model(model):
"""Converts all convolution kernels in a model from Theano to TensorFlow.
Also works from TensorFlow to Theano.
This is used for converting legacy Theano-saved model files.
Arguments:
model: target model for the conversion.
"""
# Note: SeparableConvolution not included
# since only supported by TF.
conv_classes = {
'Conv1D',
'Conv2D',
'Conv3D',
'Conv2DTranspose',
}
to_assign = []
for layer in model.layers:
if layer.__class__.__name__ in conv_classes:
original_kernel = K.get_value(layer.kernel)
converted_kernel = convert_kernel(original_kernel)
to_assign.append((layer.kernel, converted_kernel))
K.batch_set_value(to_assign)
def convert_dense_weights_data_format(dense,
previous_feature_map_shape,
target_data_format='channels_first'):
"""Utility useful when changing a convnet's `data_format`.
When porting the weights of a convnet from one data format to the other,
if the convnet includes a `Flatten` layer
(applied to the last convolutional feature map)
followed by a `Dense` layer, the weights of that `Dense` layer
should be updated to reflect the new dimension ordering.
Arguments:
dense: The target `Dense` layer.
previous_feature_map_shape: A shape tuple of 3 integers,
e.g. `(512, 7, 7)`. The shape of the convolutional
feature map right before the `Flatten` layer that
came before the target `Dense` layer.
target_data_format: One of "channels_last", "channels_first".
Set it "channels_last"
if converting a "channels_first" model to "channels_last",
or reciprocally.
"""
assert target_data_format in {'channels_last', 'channels_first'}
kernel, bias = dense.get_weights()
for i in range(kernel.shape[1]):
if target_data_format == 'channels_first':
c, h, w = previous_feature_map_shape
original_fm_shape = (h, w, c)
ki = kernel[:, i].reshape(original_fm_shape)
ki = np.transpose(ki, (2, 0, 1)) # last -> first
else:
h, w, c = previous_feature_map_shape
original_fm_shape = (c, h, w)
ki = kernel[:, i].reshape(original_fm_shape)
ki = np.transpose(ki, (1, 2, 0)) # first -> last
kernel[:, i] = np.reshape(ki, (np.prod(previous_feature_map_shape),))
dense.set_weights([kernel, bias])
def is_builtin_layer(layer):
if not getattr(layer, '_keras_api_names', None):
return False
# Subclasses of `Layer` that are not exported inherit the export name
# of the base layer class.
return (layer._keras_api_names != ('keras.layers.Layer',) and
layer._keras_api_names_v1 != ('keras.layers.Layer',))
def cached_per_instance(f):
"""Lightweight decorator for caching lazily constructed properties.
When to use:
This decorator provides simple caching with minimal overhead. It is designed
for properties which are expensive to compute and static over the life of a
class instance, and provides no mechanism for cache invalidation. Thus it is
best suited for lazily exposing derived properties of other static data.
For classes with custom getattr / setattr behavior (such as trackable
objects), storing cache results as object attributes is not performant.
Instead, a specialized cache can significantly reduce property lookup
overhead. (While still allowing the decorated property to be lazily computed.)
Consider the following class:
```
class MyClass(object):
def __setattr__(self, key, value):
# Some expensive class specific code
# ...
# ...
super(MyClass, self).__setattr__(key, value)
@property
def thing(self):
# `thing` is expensive to compute (and may not even be requested), so we
# want to lazily compute it and then cache it.
output = getattr(self, '_thing', None)
if output is None:
self._thing = output = compute_thing(self)
return output
```
It's also worth noting that ANY overriding of __setattr__, even something as
simple as:
```
def __setattr__(self, key, value):
super(MyClass, self).__setattr__(key, value)
```
Slows down attribute assignment by nearly 10x.
By contrast, replacing the definition of `thing` with the following sidesteps
the expensive __setattr__ altogether:
'''
@property
@tracking.cached_per_instance
def thing(self):
# `thing` is expensive to compute (and may not even be requested), so we
# want to lazily compute it and then cache it.
return compute_thing(self)
'''
Performance:
The overhead for this decorator is ~0.4 us / call. A much lower overhead
implementation (~0.085 us / call) can be achieved by using a custom dict type:
```
def dict_based_cache(f):
class Cache(dict):
__slots__ = ()
def __missing__(self, key):
self[key] = output = f(key)
return output
return property(Cache().__getitem__)
```
However, that implementation holds class instances as keys, and as a result
blocks garbage collection. (And modifying it to use weakref's as keys raises
the lookup overhead to ~0.4 us) As a result, the WeakKeyDictionary
implementation below turns out to be more prudent.
Args:
f: The function to cache.
Returns:
f decorated with simple caching behavior.
"""
cache = weakref.WeakKeyDictionary()
@functools.wraps(f)
def wrapped(item):
output = cache.get(item)
if output is None:
cache[item] = output = f(item)
return output
wrapped.cache = cache
return wrapped
| 33.698413 | 80 | 0.675224 |
7959b06cab4baf1fbff9ee7da7f17a96c8709dbe | 5,253 | py | Python | ToDo_CLI_App/todo.py | Dutta-SD/Python_Programs | f002dbd49c979a6d8b156f88003a79f364ff01da | [
"MIT"
] | 1 | 2021-01-04T07:16:05.000Z | 2021-01-04T07:16:05.000Z | ToDo_CLI_App/todo.py | Dutta-SD/Python_Programs | f002dbd49c979a6d8b156f88003a79f364ff01da | [
"MIT"
] | 2 | 2021-01-27T04:24:50.000Z | 2021-07-25T17:44:00.000Z | ToDo_CLI_App/todo.py | Dutta-SD/Python_Programs | f002dbd49c979a6d8b156f88003a79f364ff01da | [
"MIT"
] | null | null | null | '''
-----------------------------------------------
todo : CLI tool for managing daily tasks
-----------------------------------------------
__author__ : Sandip Dutta
-----------------------------------------------
Note : Some error occuring due to help function
formatting issues
'''
#------------ Dependencies--------------
import click
### Required dependency, install via
### pip install click
import os
from datetime import datetime
import sys
#---------------------------------------
## Main init function of todo cli
@click.group()
def todo_cli():
# init function for cli
# args : None
# return None
pass
## Add function - adds the task to todo list
@todo_cli.command('add', help ="$ add 'todo item' # Add a new todo")
@click.argument('todo_item', type = str, required = False)
def add(todo_item : str=None):
# adds todo item to file (todo.txt) where we store
# tasks to be done
if todo_item == None:
click.echo("Error: Missing todo string. Nothing added!")
with open('todo.txt', 'a') as todoDataFile:
todoDataFile.write(f"{todo_item}\n")
# Success confirmation
click.echo(f"Added todo: \"{todo_item}\"")
## ls - Shows all the tasks remaining
@todo_cli.command('ls', help = "$ ls # Show remaining todos")
def ls():
# reports work done or not
with open('todo.txt', 'r') as todoDataFile:
todoTaskData = todoDataFile.readlines()
numTasks = len(todoTaskData)
if numTasks == 0:
# no tasks
click.echo('There are no pending todos!')
sys.exit(0)
# Print from reverse as per priority
todoTaskData.reverse()
for reversePriority, task in enumerate(todoTaskData):
task = task.rstrip("\n")
print(f"[{numTasks - reversePriority}] {task}")
# delete
@todo_cli.command('del', help='$ del NUMBER # Delete a todo')
@click.argument('task_number', type=int, required = False)
def delete(task_number : int):
# deletes a task
# if not found, then raise error
if task_number == None:
click.echo(f'Error: Missing NUMBER for deleting todo.')
sys.exit(0)
with open('todo.txt', 'r+') as todoDataFile:
todoTasks = todoDataFile.readlines()
numTasks = len(todoTasks)
if task_number > numTasks or task_number < 1:
# invalid number
errorMessage = f"Error: todo #{task_number} does not exist. Nothing deleted."
click.echo(errorMessage)
sys.exit(0)
# valid, so remove item, clear file, write tasks again
task = todoTasks[task_number - 1]
todoTasks.remove(task)
todoDataFile.truncate(0)
todoDataFile.writelines(todoTasks)
click.echo(f'Deleted todo #{task_number}')
@todo_cli.command('done', help = '$ done NUMBER # Mark task as Done')
@click.argument('task_number', type=int, required = False)
def done(task_number):
# marks task as done
# if not found, then raise error
task = None ## task to be deleted
if task_number == None:
click.echo(f'Error: Missing NUMBER for marking todo as done.')
sys.exit(0)
with open('todo.txt', 'r+') as todoDataFile:
todoTasks = todoDataFile.readlines()
numTasks = len(todoTasks)
if task_number > numTasks or task_number < 1:
# invalid number
errorMessage = f"Error: todo #{task_number} does not exist."
click.echo(errorMessage)
sys.exit(0)
# valid, so remove item, clear file, write tasks again
task = todoTasks[task_number - 1]
todoTasks.remove(task)
todoDataFile.truncate(0)
todoDataFile.writelines(todoTasks)
with open('done.txt', 'a') as doneTasks:
# Current utc time
task_complete_date = datetime.utcnow().strftime("%Y-%m-%d")
## Write task in final format
doneTasks.write(f"x {task_complete_date} {task}")
click.echo(f"Marked todo #{task_number} as done.")
@todo_cli.command('report', help = '$ report # Statistics')
def report():
## Gives statistics for tasks
pendingTasksData = open('todo.txt')
completedTasksData = open('done.txt')
numPendingTasks = len(pendingTasksData.readlines())
numCompletedTasks = len(completedTasksData.readlines())
dateNow = datetime.utcnow().strftime("%Y-%m-%d")
tasksStatsToDisplay = f"{dateNow} Pending : {numPendingTasks} Completed : {numCompletedTasks}"
click.echo(tasksStatsToDisplay)
@todo_cli.command('help', help = "$ help # Show usage")
def give_help():
# Shows help message
help_text = '''Usage :-
$ ./todo add \"todo item\" # Add a new todo
$ ./todo ls # Show remaining todos
$ ./todo del NUMBER # Delete a todo
$ ./todo done NUMBER # Complete a todo
$ ./todo help # Show usage
$ ./todo report # Statistics'''
click.echo(help_text)
if __name__ == '__main__':
os.chdir('.') # change to current dir
## Make necessary files
todo_file = 'todo.txt'
done_file = 'done.txt'
## Make files
open(todo_file, 'a').close()
open(done_file, 'a').close()
# run cli
todo_cli() | 33.246835 | 98 | 0.598134 |
7959b0f8d6952080060f39f0a59866cebce9748d | 1,550 | py | Python | QFA/test/gqfa_test.py | gustawlippa/QFA | 7f1f8bd0d2c9cb9aeeeb924b2f002c9e849523be | [
"MIT"
] | 2 | 2021-01-30T23:14:36.000Z | 2021-02-17T01:41:56.000Z | QFA/test/gqfa_test.py | gustawlippa/QFA | 7f1f8bd0d2c9cb9aeeeb924b2f002c9e849523be | [
"MIT"
] | null | null | null | QFA/test/gqfa_test.py | gustawlippa/QFA | 7f1f8bd0d2c9cb9aeeeb924b2f002c9e849523be | [
"MIT"
] | null | null | null | import unittest
from QFA import GQFA
import numpy as np
from math import sqrt
class GQFATest(unittest.TestCase):
def test_example(self):
# example is the same as in GQFA.example()
alphabet = 'a'
a_matrix = np.array([[1 / 2, 1 / 2, sqrt(1 / 2), 0],
[sqrt(1 / 2), -sqrt(1 / 2), 0, 0],
[1 / 2, 1 / 2, -sqrt(1 / 2), 0],
[0, 0, 0, 1]])
end_matrix = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[1, 0, 0, 0],
[0, 1, 0, 0]])
initial_state = np.array([[1], [0], [0], [0]])
measurement_acc = np.array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 0]])
measurement_rej = np.array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 1]])
measurements = [[measurement_acc, measurement_rej], [measurement_acc, measurement_rej]]
gqfa = GQFA.GQFA(alphabet, initial_state, [a_matrix, end_matrix], measurements)
prob_a, err_a = gqfa.process('a')
self.assertAlmostEqual(prob_a, 0.5, delta=err_a)
prob_aa, err_aa = gqfa.process('aa')
self.assertAlmostEqual(prob_aa, (5/8 + 1/(2*sqrt(2))), delta=err_aa)
if __name__ == '__main__':
unittest.main()
| 32.291667 | 95 | 0.413548 |
7959b1277d858496fb5954356e1cb9c651f1935b | 5,068 | py | Python | package/xarl/agents/xaddpg/xaddpg_tf_loss.py | proroklab/xaer | 9a59d1ec19ffd8037697aa7ffc43246d4c0c0e69 | [
"MIT"
] | 1 | 2021-10-16T16:48:56.000Z | 2021-10-16T16:48:56.000Z | package/xarl/agents/xaddpg/xaddpg_tf_loss.py | proroklab/xaer | 9a59d1ec19ffd8037697aa7ffc43246d4c0c0e69 | [
"MIT"
] | null | null | null | package/xarl/agents/xaddpg/xaddpg_tf_loss.py | proroklab/xaer | 9a59d1ec19ffd8037697aa7ffc43246d4c0c0e69 | [
"MIT"
] | null | null | null | from ray.rllib.agents.ddpg.ddpg_tf_policy import *
def xaddpg_actor_critic_loss(policy, model, _, train_batch):
twin_q = policy.config["twin_q"]
gamma = policy.config["gamma"]
n_step = policy.config["n_step"]
use_huber = policy.config["use_huber"]
huber_threshold = policy.config["huber_threshold"]
l2_reg = policy.config["l2_reg"]
input_dict = {
"obs": train_batch[SampleBatch.CUR_OBS],
"is_training": True,
}
input_dict_next = {
"obs": train_batch[SampleBatch.NEXT_OBS],
"is_training": True,
}
model_out_t, _ = model(input_dict, [], None)
model_out_tp1, _ = model(input_dict_next, [], None)
target_model_out_tp1, _ = policy.target_model(input_dict_next, [], None)
policy.target_q_func_vars = policy.target_model.variables()
# Policy network evaluation.
policy_t = model.get_policy_output(model_out_t)
policy_tp1 = \
policy.target_model.get_policy_output(target_model_out_tp1)
# Action outputs.
if policy.config["smooth_target_policy"]:
target_noise_clip = policy.config["target_noise_clip"]
clipped_normal_sample = tf.clip_by_value(
tf.random.normal(
tf.shape(policy_tp1), stddev=policy.config["target_noise"]),
-target_noise_clip, target_noise_clip)
policy_tp1_smoothed = tf.clip_by_value(
policy_tp1 + clipped_normal_sample,
policy.action_space.low * tf.ones_like(policy_tp1),
policy.action_space.high * tf.ones_like(policy_tp1))
else:
# No smoothing, just use deterministic actions.
policy_tp1_smoothed = policy_tp1
# Q-net(s) evaluation.
# prev_update_ops = set(tf.get_collection(tf.GraphKeys.UPDATE_OPS))
# Q-values for given actions & observations in given current
q_t = model.get_q_values(model_out_t, train_batch[SampleBatch.ACTIONS])
# Q-values for current policy (no noise) in given current state
q_t_det_policy = model.get_q_values(model_out_t, policy_t)
if twin_q:
twin_q_t = model.get_twin_q_values(model_out_t,
train_batch[SampleBatch.ACTIONS])
# Target q-net(s) evaluation.
q_tp1 = policy.target_model.get_q_values(target_model_out_tp1,
policy_tp1_smoothed)
if twin_q:
twin_q_tp1 = policy.target_model.get_twin_q_values(
target_model_out_tp1, policy_tp1_smoothed)
q_t_selected = tf.squeeze(q_t, axis=len(q_t.shape) - 1)
if twin_q:
twin_q_t_selected = tf.squeeze(twin_q_t, axis=len(q_t.shape) - 1)
q_tp1 = tf.minimum(q_tp1, twin_q_tp1)
q_tp1_best = tf.squeeze(input=q_tp1, axis=len(q_tp1.shape) - 1)
q_tp1_best_masked = \
(1.0 - tf.cast(train_batch[SampleBatch.DONES], tf.float32)) * \
q_tp1_best
# Compute RHS of bellman equation.
q_t_selected_target = tf.stop_gradient(train_batch[SampleBatch.REWARDS] +
gamma**n_step * q_tp1_best_masked)
# Compute the error (potentially clipped).
if twin_q:
td_error = q_t_selected - q_t_selected_target
twin_td_error = twin_q_t_selected - q_t_selected_target
if use_huber:
errors = huber_loss(td_error, huber_threshold) + \
huber_loss(twin_td_error, huber_threshold)
else:
errors = 0.5 * tf.math.square(td_error) + \
0.5 * tf.math.square(twin_td_error)
else:
td_error = q_t_selected - q_t_selected_target
if use_huber:
errors = huber_loss(td_error, huber_threshold)
else:
errors = 0.5 * tf.math.square(td_error)
prio_weights = tf.cast(train_batch[PRIO_WEIGHTS], tf.float32)
critic_loss = tf.reduce_mean(prio_weights * errors)
actor_loss = -tf.reduce_mean(prio_weights * q_t_det_policy)
# Add l2-regularization if required.
if l2_reg is not None:
for var in policy.model.policy_variables():
if "bias" not in var.name:
actor_loss += (l2_reg * tf.nn.l2_loss(var))
for var in policy.model.q_variables():
if "bias" not in var.name:
critic_loss += (l2_reg * tf.nn.l2_loss(var))
# Model self-supervised losses.
if policy.config["use_state_preprocessor"]:
# Expand input_dict in case custom_loss' need them.
input_dict[SampleBatch.ACTIONS] = train_batch[SampleBatch.ACTIONS]
input_dict[SampleBatch.REWARDS] = train_batch[SampleBatch.REWARDS]
input_dict[SampleBatch.DONES] = train_batch[SampleBatch.DONES]
input_dict[SampleBatch.NEXT_OBS] = train_batch[SampleBatch.NEXT_OBS]
if log_once("ddpg_custom_loss"):
logger.warning(
"You are using a state-preprocessor with DDPG and "
"therefore, `custom_loss` will be called on your Model! "
"Please be aware that DDPG now uses the ModelV2 API, which "
"merges all previously separate sub-models (policy_model, "
"q_model, and twin_q_model) into one ModelV2, on which "
"`custom_loss` is called, passing it "
"[actor_loss, critic_loss] as 1st argument. "
"You may have to change your custom loss function to handle "
"this.")
[actor_loss, critic_loss] = model.custom_loss(
[actor_loss, critic_loss], input_dict)
# Store values for stats function.
policy.actor_loss = actor_loss
policy.critic_loss = critic_loss
policy.td_error = td_error
policy.q_t = q_t
# Return one loss value (even though we treat them separately in our
# 2 optimizers: actor and critic).
return policy.critic_loss + policy.actor_loss
| 36.2 | 74 | 0.747238 |
7959b1e757836d74ff59259ac79d920874a8730c | 35,426 | py | Python | src/pymor/operators/fv.py | ManuelMBaumann/pymor | 9ad226a0a46c7ba30a18bdab27b8bbbfe8f83a31 | [
"Unlicense"
] | null | null | null | src/pymor/operators/fv.py | ManuelMBaumann/pymor | 9ad226a0a46c7ba30a18bdab27b8bbbfe8f83a31 | [
"Unlicense"
] | null | null | null | src/pymor/operators/fv.py | ManuelMBaumann/pymor | 9ad226a0a46c7ba30a18bdab27b8bbbfe8f83a31 | [
"Unlicense"
] | null | null | null | # This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2019 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
""" This module provides some operators for finite volume discretizations."""
import numpy as np
from scipy.sparse import coo_matrix, csc_matrix, dia_matrix
from pymor.core.defaults import defaults
from pymor.core.interfaces import ImmutableInterface, abstractmethod
from pymor.functions.interfaces import FunctionInterface
from pymor.grids.interfaces import AffineGridWithOrthogonalCentersInterface
from pymor.grids.boundaryinfos import SubGridBoundaryInfo
from pymor.grids.subgrid import SubGrid
from pymor.operators.basic import OperatorBase
from pymor.operators.constructions import ComponentProjection
from pymor.operators.numpy import NumpyMatrixBasedOperator, NumpyMatrixOperator
from pymor.parameters.base import Parametric
from pymor.tools.inplace import iadd_masked, isub_masked
from pymor.tools.quadratures import GaussQuadratures
from pymor.vectorarrays.numpy import NumpyVectorSpace
def FVVectorSpace(grid, id_='STATE'):
return NumpyVectorSpace(grid.size(0), id_)
class NumericalConvectiveFluxInterface(ImmutableInterface, Parametric):
"""Interface for numerical convective fluxes for finite volume schemes.
Numerical fluxes defined by this interfaces are functions of
the form `F(U_inner, U_outer, unit_outer_normal, edge_volume, mu)`.
The flux evaluation is vectorized and happens in two stages:
1. `evaluate_stage1` receives a |NumPy array| `U` of all values which
appear as `U_inner` or `U_outer` for all edges the flux shall be
evaluated at and returns a `tuple` of |NumPy arrays|
each of the same length as `U`.
2. `evaluate_stage2` receives the reordered `stage1_data` for each
edge as well as the unit outer normal and the volume of the edges.
`stage1_data` is given as follows: If `R_l` is `l`-th entry of the
`tuple` returned by `evaluate_stage1`, the `l`-th entry `D_l` of
of the `stage1_data` tuple has the shape `(num_edges, 2) + R_l.shape[1:]`.
If for edge `k` the values `U_inner` and `U_outer` are the `i`-th
and `j`-th value in the `U` array provided to `evaluate_stage1`,
we have ::
D_l[k, 0] == R_l[i], D_l[k, 1] == R_l[j].
`evaluate_stage2` returns a |NumPy array| of the flux evaluations
for each edge.
"""
@abstractmethod
def evaluate_stage1(self, U, mu=None):
pass
@abstractmethod
def evaluate_stage2(self, stage1_data, unit_outer_normals, volumes, mu=None):
pass
class LaxFriedrichsFlux(NumericalConvectiveFluxInterface):
"""Lax-Friedrichs numerical flux.
If `f` is the analytical flux, the Lax-Friedrichs flux `F` is given
by::
F(U_in, U_out, normal, vol) = vol * [normal⋅(f(U_in) + f(U_out))/2 + (U_in - U_out)/(2*λ)]
Parameters
----------
flux
|Function| defining the analytical flux `f`.
lxf_lambda
The stabilization parameter `λ`.
"""
def __init__(self, flux, lxf_lambda=1.0):
self.flux = flux
self.lxf_lambda = lxf_lambda
self.build_parameter_type(flux)
def evaluate_stage1(self, U, mu=None):
return U, self.flux(U[..., np.newaxis], mu)
def evaluate_stage2(self, stage1_data, unit_outer_normals, volumes, mu=None):
U, F = stage1_data
return (np.sum(np.sum(F, axis=1) * unit_outer_normals, axis=1) * 0.5
+ (U[..., 0] - U[..., 1]) * (0.5 / self.lxf_lambda)) * volumes
class SimplifiedEngquistOsherFlux(NumericalConvectiveFluxInterface):
"""Engquist-Osher numerical flux. Simplified Implementation for special case.
For the definition of the Engquist-Osher flux see :class:`EngquistOsherFlux`.
This class provides a faster and more accurate implementation for the special
case that `f(0) == 0` and the derivative of `f` only changes sign at `0`.
Parameters
----------
flux
|Function| defining the analytical flux `f`.
flux_derivative
|Function| defining the analytical flux derivative `f'`.
"""
def __init__(self, flux, flux_derivative):
self.flux = flux
self.flux_derivative = flux_derivative
self.build_parameter_type(flux, flux_derivative)
def evaluate_stage1(self, U, mu=None):
return self.flux(U[..., np.newaxis], mu), self.flux_derivative(U[..., np.newaxis], mu)
def evaluate_stage2(self, stage1_data, unit_outer_normals, volumes, mu=None):
F_edge, F_d_edge = stage1_data
unit_outer_normals = unit_outer_normals[:, np.newaxis, :]
F_d_edge = np.sum(F_d_edge * unit_outer_normals, axis=2)
F_edge = np.sum(F_edge * unit_outer_normals, axis=2)
F_edge[:, 0] = np.where(np.greater_equal(F_d_edge[:, 0], 0), F_edge[:, 0], 0)
F_edge[:, 1] = np.where(np.less_equal(F_d_edge[:, 1], 0), F_edge[:, 1], 0)
F_edge = np.sum(F_edge, axis=1)
F_edge *= volumes
return F_edge
class EngquistOsherFlux(NumericalConvectiveFluxInterface):
"""Engquist-Osher numerical flux.
If `f` is the analytical flux, and `f'` its derivative, the Engquist-Osher flux is
given by::
F(U_in, U_out, normal, vol) = vol * [c^+(U_in, normal) + c^-(U_out, normal)]
U_in
c^+(U_in, normal) = f(0)⋅normal + ∫ max(f'(s)⋅normal, 0) ds
s=0
U_out
c^-(U_out, normal) = ∫ min(f'(s)⋅normal, 0) ds
s=0
Parameters
----------
flux
|Function| defining the analytical flux `f`.
flux_derivative
|Function| defining the analytical flux derivative `f'`.
gausspoints
Number of Gauss quadrature points to be used for integration.
intervals
Number of subintervals to be used for integration.
"""
def __init__(self, flux, flux_derivative, gausspoints=5, intervals=1):
self.flux = flux
self.flux_derivative = flux_derivative
self.gausspoints = gausspoints
self.intervals = intervals
self.build_parameter_type(flux, flux_derivative)
points, weights = GaussQuadratures.quadrature(npoints=self.gausspoints)
points = points / intervals
points = ((np.arange(self.intervals, dtype=np.float)[:, np.newaxis] * (1 / intervals))
+ points[np.newaxis, :]).ravel()
weights = np.tile(weights, intervals) * (1 / intervals)
self.points = points
self.weights = weights
def evaluate_stage1(self, U, mu=None):
int_els = np.abs(U)[:, np.newaxis, np.newaxis]
return [np.concatenate([self.flux_derivative(U[:, np.newaxis] * p, mu)[:, np.newaxis, :] * int_els * w
for p, w in zip(self.points, self.weights)], axis=1)]
def evaluate_stage2(self, stage1_data, unit_outer_normals, volumes, mu=None):
F0 = np.sum(self.flux.evaluate(np.array([[0.]]), mu=mu) * unit_outer_normals, axis=1)
Fs = np.sum(stage1_data[0] * unit_outer_normals[:, np.newaxis, np.newaxis, :], axis=3)
Fs[:, 0, :] = np.maximum(Fs[:, 0, :], 0)
Fs[:, 1, :] = np.minimum(Fs[:, 1, :], 0)
Fs = np.sum(np.sum(Fs, axis=2), axis=1) + F0
Fs *= volumes
return Fs
@defaults('delta')
def jacobian_options(delta=1e-7):
return {'delta': delta}
class NonlinearAdvectionOperator(OperatorBase):
"""Nonlinear finite volume advection |Operator|.
The operator is of the form ::
L(u, mu)(x) = ∇ ⋅ f(u(x), mu)
Parameters
----------
grid
|Grid| for which to evaluate the operator.
boundary_info
|BoundaryInfo| determining the Dirichlet and Neumann boundaries.
numerical_flux
The :class:`NumericalConvectiveFlux <NumericalConvectiveFluxInterface>` to use.
dirichlet_data
|Function| providing the Dirichlet boundary values. If `None`, constant-zero
boundary is assumed.
solver_options
The |solver_options| for the operator.
name
The name of the operator.
"""
sid_ignore = OperatorBase.sid_ignore | {'_grid_data'}
linear = False
def __init__(self, grid, boundary_info, numerical_flux, dirichlet_data=None, solver_options=None,
space_id='STATE', name=None):
assert dirichlet_data is None or isinstance(dirichlet_data, FunctionInterface)
self.grid = grid
self.boundary_info = boundary_info
self.numerical_flux = numerical_flux
self.dirichlet_data = dirichlet_data
self.solver_options = solver_options
self.space_id = space_id
self.name = name
if (isinstance(dirichlet_data, FunctionInterface) and boundary_info.has_dirichlet
and not dirichlet_data.parametric):
self._dirichlet_values = self.dirichlet_data(grid.centers(1)[boundary_info.dirichlet_boundaries(1)])
self._dirichlet_values = self._dirichlet_values.ravel()
self._dirichlet_values_flux_shaped = self._dirichlet_values.reshape((-1, 1))
self.build_parameter_type(numerical_flux, dirichlet_data)
self.source = self.range = FVVectorSpace(grid, space_id)
self.add_with_arguments = self.add_with_arguments.union(f'numerical_flux_{arg}'
for arg in numerical_flux.with_arguments)
def with_(self, **kwargs):
assert 'numerical_flux' not in kwargs or not any(arg.startswith('numerical_flux_') for arg in kwargs)
num_flux_args = {arg[len('numerical_flux_'):]: kwargs.pop(arg)
for arg in list(kwargs) if arg.startswith('numerical_flux_')}
if num_flux_args:
kwargs['numerical_flux'] = self.numerical_flux.with_(**num_flux_args)
return super().with_(**kwargs)
def restricted(self, dofs):
source_dofs = np.setdiff1d(np.union1d(self.grid.neighbours(0, 0)[dofs].ravel(), dofs),
np.array([-1], dtype=np.int32),
assume_unique=True)
sub_grid = SubGrid(self.grid, entities=source_dofs)
sub_boundary_info = SubGridBoundaryInfo(sub_grid, self.grid, self.boundary_info)
op = self.with_(grid=sub_grid, boundary_info=sub_boundary_info, space_id=None,
name=f'{self.name}_restricted')
sub_grid_indices = sub_grid.indices_from_parent_indices(dofs, codim=0)
proj = ComponentProjection(sub_grid_indices, op.range)
return proj @ op, sub_grid.parent_indices(0)
def _fetch_grid_data(self):
# pre-fetch all grid-associated data to avoid searching the cache for each operator application
g = self.grid
bi = self.boundary_info
self._grid_data = dict(SUPE=g.superentities(1, 0),
SUPI=g.superentity_indices(1, 0),
VOLS0=g.volumes(0),
VOLS1=g.volumes(1),
BOUNDARIES=g.boundaries(1),
CENTERS=g.centers(1),
DIRICHLET_BOUNDARIES=bi.dirichlet_boundaries(1) if bi.has_dirichlet else None,
NEUMANN_BOUNDARIES=bi.neumann_boundaries(1) if bi.has_neumann else None)
self._grid_data.update(UNIT_OUTER_NORMALS=g.unit_outer_normals()[self._grid_data['SUPE'][:, 0],
self._grid_data['SUPI'][:, 0]])
def apply(self, U, mu=None):
assert U in self.source
mu = self.parse_parameter(mu)
if not hasattr(self, '_grid_data'):
self._fetch_grid_data()
U = U.to_numpy()
R = np.zeros((len(U), self.source.dim))
bi = self.boundary_info
gd = self._grid_data
SUPE = gd['SUPE']
VOLS0 = gd['VOLS0']
VOLS1 = gd['VOLS1']
BOUNDARIES = gd['BOUNDARIES']
CENTERS = gd['CENTERS']
DIRICHLET_BOUNDARIES = gd['DIRICHLET_BOUNDARIES']
NEUMANN_BOUNDARIES = gd['NEUMANN_BOUNDARIES']
UNIT_OUTER_NORMALS = gd['UNIT_OUTER_NORMALS']
if bi.has_dirichlet:
if hasattr(self, '_dirichlet_values'):
dirichlet_values = self._dirichlet_values
elif self.dirichlet_data is not None:
dirichlet_values = self.dirichlet_data(CENTERS[DIRICHLET_BOUNDARIES], mu=mu)
else:
dirichlet_values = np.zeros_like(DIRICHLET_BOUNDARIES)
F_dirichlet = self.numerical_flux.evaluate_stage1(dirichlet_values, mu)
for i, j in enumerate(range(len(U))):
Ui = U[j]
Ri = R[i]
F = self.numerical_flux.evaluate_stage1(Ui, mu)
F_edge = [f[SUPE] for f in F]
for f in F_edge:
f[BOUNDARIES, 1] = f[BOUNDARIES, 0]
if bi.has_dirichlet:
for f, f_d in zip(F_edge, F_dirichlet):
f[DIRICHLET_BOUNDARIES, 1] = f_d
NUM_FLUX = self.numerical_flux.evaluate_stage2(F_edge, UNIT_OUTER_NORMALS, VOLS1, mu)
if bi.has_neumann:
NUM_FLUX[NEUMANN_BOUNDARIES] = 0
iadd_masked(Ri, NUM_FLUX, SUPE[:, 0])
isub_masked(Ri, NUM_FLUX, SUPE[:, 1])
R /= VOLS0
return self.range.make_array(R)
def jacobian(self, U, mu=None):
assert U in self.source and len(U) == 1
mu = self.parse_parameter(mu)
if not hasattr(self, '_grid_data'):
self._fetch_grid_data()
U = U.to_numpy().ravel()
g = self.grid
bi = self.boundary_info
gd = self._grid_data
SUPE = gd['SUPE']
VOLS0 = gd['VOLS0']
VOLS1 = gd['VOLS1']
BOUNDARIES = gd['BOUNDARIES']
CENTERS = gd['CENTERS']
DIRICHLET_BOUNDARIES = gd['DIRICHLET_BOUNDARIES']
NEUMANN_BOUNDARIES = gd['NEUMANN_BOUNDARIES']
UNIT_OUTER_NORMALS = gd['UNIT_OUTER_NORMALS']
INNER = np.setdiff1d(np.arange(g.size(1)), BOUNDARIES)
solver_options = self.solver_options
delta = solver_options.get('jacobian_delta') if solver_options else None
if delta is None:
delta = jacobian_options()['delta']
if bi.has_dirichlet:
if hasattr(self, '_dirichlet_values'):
dirichlet_values = self._dirichlet_values
elif self.dirichlet_data is not None:
dirichlet_values = self.dirichlet_data(CENTERS[DIRICHLET_BOUNDARIES], mu=mu)
else:
dirichlet_values = np.zeros_like(DIRICHLET_BOUNDARIES)
F_dirichlet = self.numerical_flux.evaluate_stage1(dirichlet_values, mu)
UP = U + delta
UM = U - delta
F = self.numerical_flux.evaluate_stage1(U, mu)
FP = self.numerical_flux.evaluate_stage1(UP, mu)
FM = self.numerical_flux.evaluate_stage1(UM, mu)
del UP, UM
F_edge = [f[SUPE] for f in F]
FP_edge = [f[SUPE] for f in FP]
FM_edge = [f[SUPE] for f in FM]
del F, FP, FM
F0P_edge = [f.copy() for f in F_edge]
for f, ff in zip(F0P_edge, FP_edge):
f[:, 0] = ff[:, 0]
f[BOUNDARIES, 1] = f[BOUNDARIES, 0]
if bi.has_dirichlet:
for f, f_d in zip(F0P_edge, F_dirichlet):
f[DIRICHLET_BOUNDARIES, 1] = f_d
NUM_FLUX_0P = self.numerical_flux.evaluate_stage2(F0P_edge, UNIT_OUTER_NORMALS, VOLS1, mu)
del F0P_edge
F0M_edge = [f.copy() for f in F_edge]
for f, ff in zip(F0M_edge, FM_edge):
f[:, 0] = ff[:, 0]
f[BOUNDARIES, 1] = f[BOUNDARIES, 0]
if bi.has_dirichlet:
for f, f_d in zip(F0M_edge, F_dirichlet):
f[DIRICHLET_BOUNDARIES, 1] = f_d
NUM_FLUX_0M = self.numerical_flux.evaluate_stage2(F0M_edge, UNIT_OUTER_NORMALS, VOLS1, mu)
del F0M_edge
D_NUM_FLUX_0 = (NUM_FLUX_0P - NUM_FLUX_0M)
D_NUM_FLUX_0 /= (2 * delta)
if bi.has_neumann:
D_NUM_FLUX_0[NEUMANN_BOUNDARIES] = 0
del NUM_FLUX_0P, NUM_FLUX_0M
F1P_edge = [f.copy() for f in F_edge]
for f, ff in zip(F1P_edge, FP_edge):
f[:, 1] = ff[:, 1]
f[BOUNDARIES, 1] = f[BOUNDARIES, 0]
if bi.has_dirichlet:
for f, f_d in zip(F1P_edge, F_dirichlet):
f[DIRICHLET_BOUNDARIES, 1] = f_d
NUM_FLUX_1P = self.numerical_flux.evaluate_stage2(F1P_edge, UNIT_OUTER_NORMALS, VOLS1, mu)
del F1P_edge, FP_edge
F1M_edge = F_edge
for f, ff in zip(F1M_edge, FM_edge):
f[:, 1] = ff[:, 1]
f[BOUNDARIES, 1] = f[BOUNDARIES, 0]
if bi.has_dirichlet:
for f, f_d in zip(F1M_edge, F_dirichlet):
f[DIRICHLET_BOUNDARIES, 1] = f_d
NUM_FLUX_1M = self.numerical_flux.evaluate_stage2(F1M_edge, UNIT_OUTER_NORMALS, VOLS1, mu)
del F1M_edge, FM_edge
D_NUM_FLUX_1 = (NUM_FLUX_1P - NUM_FLUX_1M)
D_NUM_FLUX_1 /= (2 * delta)
if bi.has_neumann:
D_NUM_FLUX_1[NEUMANN_BOUNDARIES] = 0
del NUM_FLUX_1P, NUM_FLUX_1M
I1 = np.hstack([SUPE[INNER, 0], SUPE[INNER, 0], SUPE[INNER, 1], SUPE[INNER, 1], SUPE[BOUNDARIES, 0]])
I0 = np.hstack([SUPE[INNER, 0], SUPE[INNER, 1], SUPE[INNER, 0], SUPE[INNER, 1], SUPE[BOUNDARIES, 0]])
V = np.hstack([D_NUM_FLUX_0[INNER], -D_NUM_FLUX_0[INNER], D_NUM_FLUX_1[INNER], -D_NUM_FLUX_1[INNER],
D_NUM_FLUX_0[BOUNDARIES]])
A = coo_matrix((V, (I0, I1)), shape=(g.size(0), g.size(0)))
A = csc_matrix(A).copy() # See pymor.operators.cg.DiffusionOperatorP1 for why copy() is necessary
A = dia_matrix(([1. / VOLS0], [0]), shape=(g.size(0),) * 2) * A
return NumpyMatrixOperator(A, source_id=self.source.id, range_id=self.range.id)
def nonlinear_advection_lax_friedrichs_operator(grid, boundary_info, flux, lxf_lambda=1.0,
dirichlet_data=None, solver_options=None, name=None):
"""Instantiate a :class:`NonlinearAdvectionOperator` using :class:`LaxFriedrichsFlux`."""
num_flux = LaxFriedrichsFlux(flux, lxf_lambda)
return NonlinearAdvectionOperator(grid, boundary_info, num_flux, dirichlet_data, solver_options, name=name)
def nonlinear_advection_simplified_engquist_osher_operator(grid, boundary_info, flux, flux_derivative,
dirichlet_data=None, solver_options=None, name=None):
"""Instantiate a :class:`NonlinearAdvectionOperator` using :class:`SimplifiedEngquistOsherFlux`."""
num_flux = SimplifiedEngquistOsherFlux(flux, flux_derivative)
return NonlinearAdvectionOperator(grid, boundary_info, num_flux, dirichlet_data, solver_options, name=name)
def nonlinear_advection_engquist_osher_operator(grid, boundary_info, flux, flux_derivative, gausspoints=5, intervals=1,
dirichlet_data=None, solver_options=None, name=None):
"""Instantiate a :class:`NonlinearAdvectionOperator` using :class:`EngquistOsherFlux`."""
num_flux = EngquistOsherFlux(flux, flux_derivative, gausspoints=gausspoints, intervals=intervals)
return NonlinearAdvectionOperator(grid, boundary_info, num_flux, dirichlet_data, solver_options, name=name)
class LinearAdvectionLaxFriedrichs(NumpyMatrixBasedOperator):
"""Linear advection finite Volume |Operator| using Lax-Friedrichs flux.
The operator is of the form ::
L(u, mu)(x) = ∇ ⋅ (v(x, mu)⋅u(x))
See :class:`LaxFriedrichsFlux` for the definition of the Lax-Friedrichs flux.
Parameters
----------
grid
|Grid| over which to assemble the operator.
boundary_info
|BoundaryInfo| determining the Dirichlet and Neumann boundaries.
velocity_field
|Function| defining the velocity field `v`.
lxf_lambda
The stabilization parameter `λ`.
solver_options
The |solver_options| for the operator.
name
The name of the operator.
"""
def __init__(self, grid, boundary_info, velocity_field, lxf_lambda=1.0, solver_options=None, name=None):
self.grid = grid
self.boundary_info = boundary_info
self.velocity_field = velocity_field
self.lxf_lambda = lxf_lambda
self.solver_options = solver_options
self.name = name
self.build_parameter_type(velocity_field)
self.source = self.range = FVVectorSpace(grid)
def _assemble(self, mu=None):
g = self.grid
bi = self.boundary_info
SUPE = g.superentities(1, 0)
SUPI = g.superentity_indices(1, 0)
assert SUPE.ndim == 2
edge_volumes = g.volumes(1)
boundary_edges = g.boundaries(1)
inner_edges = np.setdiff1d(np.arange(g.size(1)), boundary_edges)
dirichlet_edges = bi.dirichlet_boundaries(1) if bi.has_dirichlet else np.array([], ndmin=1, dtype=np.int)
neumann_edges = bi.neumann_boundaries(1) if bi.has_neumann else np.array([], ndmin=1, dtype=np.int)
outflow_edges = np.setdiff1d(boundary_edges, np.hstack([dirichlet_edges, neumann_edges]))
normal_velocities = np.einsum('ei,ei->e',
self.velocity_field(g.centers(1), mu=mu),
g.unit_outer_normals()[SUPE[:, 0], SUPI[:, 0]])
nv_inner = normal_velocities[inner_edges]
l_inner = np.ones_like(nv_inner) * (1. / self.lxf_lambda)
I0_inner = np.hstack([SUPE[inner_edges, 0], SUPE[inner_edges, 0], SUPE[inner_edges, 1], SUPE[inner_edges, 1]])
I1_inner = np.hstack([SUPE[inner_edges, 0], SUPE[inner_edges, 1], SUPE[inner_edges, 0], SUPE[inner_edges, 1]])
V_inner = np.hstack([nv_inner, nv_inner, -nv_inner, -nv_inner])
V_inner += np.hstack([l_inner, -l_inner, -l_inner, l_inner])
V_inner *= np.tile(0.5 * edge_volumes[inner_edges], 4)
I_out = SUPE[outflow_edges, 0]
V_out = edge_volumes[outflow_edges] * normal_velocities[outflow_edges]
I_dir = SUPE[dirichlet_edges, 0]
V_dir = edge_volumes[dirichlet_edges] * (0.5 * normal_velocities[dirichlet_edges] + 0.5 / self.lxf_lambda)
I0 = np.hstack([I0_inner, I_out, I_dir])
I1 = np.hstack([I1_inner, I_out, I_dir])
V = np.hstack([V_inner, V_out, V_dir])
A = coo_matrix((V, (I0, I1)), shape=(g.size(0), g.size(0)))
A = csc_matrix(A).copy() # See pymor.operators.cg.DiffusionOperatorP1 for why copy() is necessary
A = dia_matrix(([1. / g.volumes(0)], [0]), shape=(g.size(0),) * 2) * A
return A
class L2Product(NumpyMatrixBasedOperator):
"""|Operator| representing the L2-product between finite volume functions.
Parameters
----------
grid
The |Grid| for which to assemble the product.
solver_options
The |solver_options| for the operator.
name
The name of the product.
"""
sparse = True
def __init__(self, grid, solver_options=None, name=None):
self.source = self.range = FVVectorSpace(grid)
self.grid = grid
self.solver_options = solver_options
self.name = name
def _assemble(self, mu=None):
A = dia_matrix((self.grid.volumes(0), [0]), shape=(self.grid.size(0),) * 2)
return A
class ReactionOperator(NumpyMatrixBasedOperator):
"""Finite Volume reaction |Operator|.
The operator is of the form ::
L(u, mu)(x) = c(x, mu)⋅u(x)
Parameters
----------
grid
The |Grid| for which to assemble the operator.
reaction_coefficient
The function 'c'
solver_options
The |solver_options| for the operator.
name
The name of the operator.
"""
sparse = True
def __init__(self, grid, reaction_coefficient, solver_options=None, name=None):
assert reaction_coefficient.dim_domain == grid.dim and reaction_coefficient.shape_range == ()
self.source = self.range = FVVectorSpace(grid)
self.grid = grid
self.reaction_coefficient = reaction_coefficient
self.solver_options = solver_options
self.name = name
self.build_parameter_type(reaction_coefficient)
def _assemble(self, mu=None):
A = dia_matrix((self.reaction_coefficient.evaluate(self.grid.centers(0), mu=mu), [0]),
shape=(self.grid.size(0),) * 2)
return A
class NonlinearReactionOperator(OperatorBase):
linear = False
def __init__(self, grid, reaction_function, reaction_function_derivative=None, space_id='STATE', name=None):
self.grid = grid
self.reaction_function = reaction_function
self.reaction_function_derivative = reaction_function_derivative
self.build_parameter_type(reaction_function, reaction_function_derivative)
self.space_id = space_id
self.name = name
self.source = self.range = FVVectorSpace(grid, space_id)
def apply(self, U, ind=None, mu=None):
assert U in self.source
R = U.to_numpy() if ind is None else U.to_numpy()[ind]
R = self.reaction_function.evaluate(R.reshape(R.shape + (1,)), mu=mu)
return self.range.make_array(R)
def jacobian(self, U, mu=None):
if self.reaction_function_derivative is None:
raise NotImplementedError
U = U.to_numpy()
A = dia_matrix((self.reaction_function_derivative.evaluate(U.reshape(U.shape + (1,)), mu=mu), [0]),
shape=(self.grid.size(0),) * 2)
return NumpyMatrixOperator(A, source_id=self.source.id, range_id=self.range.id)
class L2ProductFunctional(NumpyMatrixBasedOperator):
"""Finite volume functional representing the inner product with an L2-|Function|.
Additionally, boundary conditions can be enforced by providing `dirichlet_data`
and `neumann_data` functions.
Parameters
----------
grid
|Grid| for which to assemble the functional.
function
The |Function| with which to take the inner product or `None`.
boundary_info
|BoundaryInfo| determining the Dirichlet and Neumann boundaries or `None`.
If `None`, no boundary treatment is performed.
dirichlet_data
|Function| providing the Dirichlet boundary values. If `None`,
constant-zero boundary is assumed.
diffusion_function
See :class:`DiffusionOperator`. Has to be specified in case `dirichlet_data`
is given.
diffusion_constant
See :class:`DiffusionOperator`. Has to be specified in case `dirichlet_data`
is given.
neumann_data
|Function| providing the Neumann boundary values. If `None`,
constant-zero is assumed.
order
Order of the Gauss quadrature to use for numerical integration.
name
The name of the functional.
"""
source = NumpyVectorSpace(1)
sparse = False
def __init__(self, grid, function=None, boundary_info=None, dirichlet_data=None, diffusion_function=None,
diffusion_constant=None, neumann_data=None, order=1, name=None):
assert function is None or function.shape_range == ()
self.range = FVVectorSpace(grid)
self.grid = grid
self.boundary_info = boundary_info
self.function = function
self.dirichlet_data = dirichlet_data
self.diffusion_function = diffusion_function
self.diffusion_constant = diffusion_constant
self.neumann_data = neumann_data
self.order = order
self.name = name
self.build_parameter_type(function, dirichlet_data, diffusion_function, neumann_data)
def _assemble(self, mu=None):
g = self.grid
bi = self.boundary_info
if self.function is not None:
# evaluate function at all quadrature points -> shape = (g.size(0), number of quadrature points, 1)
F = self.function(g.quadrature_points(0, order=self.order), mu=mu)
_, w = g.reference_element.quadrature(order=self.order)
# integrate the products of the function with the shape functions on each element
# -> shape = (g.size(0), number of shape functions)
F_INTS = np.einsum('ei,e,i->e', F, g.integration_elements(0), w).ravel()
else:
F_INTS = np.zeros(g.size(0))
if bi is not None and (bi.has_dirichlet and self.dirichlet_data is not None
or bi.has_neumann and self.neumann_data):
centers = g.centers(1)
superentities = g.superentities(1, 0)
superentity_indices = g.superentity_indices(1, 0)
SE_I0 = superentities[:, 0]
VOLS = g.volumes(1)
FLUXES = np.zeros(g.size(1))
if bi.has_dirichlet and self.dirichlet_data is not None:
dirichlet_mask = bi.dirichlet_mask(1)
SE_I0_D = SE_I0[dirichlet_mask]
boundary_normals = g.unit_outer_normals()[SE_I0_D, superentity_indices[:, 0][dirichlet_mask]]
BOUNDARY_DISTS = np.sum((centers[dirichlet_mask, :] - g.orthogonal_centers()[SE_I0_D, :])
* boundary_normals,
axis=-1)
DIRICHLET_FLUXES = VOLS[dirichlet_mask] * self.dirichlet_data(centers[dirichlet_mask]) / BOUNDARY_DISTS
if self.diffusion_function is not None:
DIRICHLET_FLUXES *= self.diffusion_function(centers[dirichlet_mask], mu=mu)
if self.diffusion_constant is not None:
DIRICHLET_FLUXES *= self.diffusion_constant
FLUXES[dirichlet_mask] = DIRICHLET_FLUXES
if bi.has_neumann and self.neumann_data is not None:
neumann_mask = bi.neumann_mask(1)
FLUXES[neumann_mask] -= VOLS[neumann_mask] * self.neumann_data(centers[neumann_mask], mu=mu)
F_INTS += np.bincount(SE_I0, weights=FLUXES, minlength=len(F_INTS))
F_INTS /= g.volumes(0)
return F_INTS.reshape((-1, 1))
class DiffusionOperator(NumpyMatrixBasedOperator):
"""Finite Volume Diffusion |Operator|.
The operator is of the form ::
(Lu)(x) = c ∇ ⋅ [ d(x) ∇ u(x) ]
Parameters
----------
grid
The |Grid| over which to assemble the operator.
boundary_info
|BoundaryInfo| for the treatment of Dirichlet boundary conditions.
diffusion_function
The scalar-valued |Function| `d(x)`. If `None`, constant one is assumed.
diffusion_constant
The constant `c`. If `None`, `c` is set to one.
solver_options
The |solver_options| for the operator.
name
Name of the operator.
"""
sparse = True
def __init__(self, grid, boundary_info, diffusion_function=None, diffusion_constant=None, solver_options=None,
name=None):
super().__init__()
assert isinstance(grid, AffineGridWithOrthogonalCentersInterface)
assert (diffusion_function is None
or (isinstance(diffusion_function, FunctionInterface)
and diffusion_function.dim_domain == grid.dim
and diffusion_function.shape_range == ()))
self.grid = grid
self.boundary_info = boundary_info
self.diffusion_function = diffusion_function
self.diffusion_constant = diffusion_constant
self.solver_options = solver_options
self.name = name
self.source = self.range = FVVectorSpace(grid)
if diffusion_function is not None:
self.build_parameter_type(diffusion_function)
def _assemble(self, mu=None):
grid = self.grid
# compute the local coordinates of the codim-1 subentity centers in the reference element
reference_element = grid.reference_element(0)
subentity_embedding = reference_element.subentity_embedding(1)
subentity_centers = (np.einsum('eij,j->ei',
subentity_embedding[0], reference_element.sub_reference_element(1).center())
+ subentity_embedding[1])
# compute shift for periodic boundaries
embeddings = grid.embeddings(0)
superentities = grid.superentities(1, 0)
superentity_indices = grid.superentity_indices(1, 0)
boundary_mask = grid.boundary_mask(1)
inner_mask = ~boundary_mask
SE_I0 = superentities[:, 0]
SE_I1 = superentities[:, 1]
SE_I0_I = SE_I0[inner_mask]
SE_I1_I = SE_I1[inner_mask]
SHIFTS = (np.einsum('eij,ej->ei',
embeddings[0][SE_I0_I, :, :],
subentity_centers[superentity_indices[:, 0][inner_mask]])
+ embeddings[1][SE_I0_I, :])
SHIFTS -= (np.einsum('eij,ej->ei',
embeddings[0][SE_I1_I, :, :],
subentity_centers[superentity_indices[:, 1][inner_mask]])
+ embeddings[1][SE_I1_I, :])
# comute distances for gradient approximations
centers = grid.centers(1)
orthogonal_centers = grid.orthogonal_centers()
VOLS = grid.volumes(1)
INNER_DISTS = np.linalg.norm(orthogonal_centers[SE_I0_I, :] - orthogonal_centers[SE_I1_I, :] - SHIFTS,
axis=1)
del SHIFTS
# assemble matrix
FLUXES = VOLS[inner_mask] / INNER_DISTS
if self.diffusion_function is not None:
FLUXES *= self.diffusion_function(centers[inner_mask], mu=mu)
if self.diffusion_constant is not None:
FLUXES *= self.diffusion_constant
del INNER_DISTS
FLUXES = np.concatenate((-FLUXES, -FLUXES, FLUXES, FLUXES))
FLUXES_I0 = np.concatenate((SE_I0_I, SE_I1_I, SE_I0_I, SE_I1_I))
FLUXES_I1 = np.concatenate((SE_I1_I, SE_I0_I, SE_I0_I, SE_I1_I))
if self.boundary_info.has_dirichlet:
dirichlet_mask = self.boundary_info.dirichlet_mask(1)
SE_I0_D = SE_I0[dirichlet_mask]
boundary_normals = grid.unit_outer_normals()[SE_I0_D, superentity_indices[:, 0][dirichlet_mask]]
BOUNDARY_DISTS = np.sum((centers[dirichlet_mask, :] - orthogonal_centers[SE_I0_D, :]) * boundary_normals,
axis=-1)
DIRICHLET_FLUXES = VOLS[dirichlet_mask] / BOUNDARY_DISTS
if self.diffusion_function is not None:
DIRICHLET_FLUXES *= self.diffusion_function(centers[dirichlet_mask], mu=mu)
if self.diffusion_constant is not None:
DIRICHLET_FLUXES *= self.diffusion_constant
FLUXES = np.concatenate((FLUXES, DIRICHLET_FLUXES))
FLUXES_I0 = np.concatenate((FLUXES_I0, SE_I0_D))
FLUXES_I1 = np.concatenate((FLUXES_I1, SE_I0_D))
A = coo_matrix((FLUXES, (FLUXES_I0, FLUXES_I1)), shape=(self.source.dim, self.source.dim))
A = (dia_matrix(([1. / grid.volumes(0)], [0]), shape=(grid.size(0),) * 2) * A).tocsc()
return A
| 41.482436 | 119 | 0.629848 |
7959b22f2a8456c946ff2e50d7c94d7f6c7c62bc | 4,583 | py | Python | test_20201216.py | tjdalsckd/Hyc_wheelchair | 7279775fafbbafc6419d372ab2ea1199f1c3d033 | [
"Apache-2.0"
] | null | null | null | test_20201216.py | tjdalsckd/Hyc_wheelchair | 7279775fafbbafc6419d372ab2ea1199f1c3d033 | [
"Apache-2.0"
] | 1 | 2021-01-06T08:40:11.000Z | 2021-01-06T08:40:11.000Z | test_20201216.py | tjdalsckd/Hyc_wheelchair | 7279775fafbbafc6419d372ab2ea1199f1c3d033 | [
"Apache-2.0"
] | null | null | null | import sys, time
import threading
import keyboard
import numpy as np
from bledevice import scanble, BLEDevice
Device1 = BLEDevice("DD:43:89:16:43:81")
Device2 = BLEDevice("F4:82:B3:50:ED:55")
time.sleep(0.5)
sum_time = 0;
mean_time = 0;
count = 0;
STOP = 0
MOVE_FWD = 1
MOVE_BWD = 2
MOVE_FWR_R = 10
MOVE_FWR_L = 11
MOVE_R = 3
MOVE_L = 4
IDLE = 9
F = 5
S = 6
Mon = 7
Moff = 8
keycode = ""
def print_key():
print("hotkey press")
direction = 1
state = STOP;
def print_state():
global state
if state == MOVE_FWD:
print("\nMOVE_FORWARD")
elif state == MOVE_BWD:
print("\nMOVE_BACKWARD")
elif state == MOVE_R:
print("\nMOVE_RIGHT")
elif state == MOVE_L:
print("\nMOVE_LEFT")
elif state == STOP:
M_STOP();
print("\nSTOP")
elif state == F:
print("\nSTOP")
elif state == S:
print("\nSTOP")
elif state == Mon:
print("\nMOTOR_ON")
elif state == Moff:
print("\nMOTOR_OFF")
elif state == IDLE:
M_IDLE()
print("\nIDLE")
def data_ON():
print("\nData ON")
'''
Device1.writereq(0xd,'545457550D0A') #RUN_flag
Device2.writereq(0xd,'545457550D0A') #RUN_flag
'''
def data_OFF():
print("\nData OFF")
'''
Device1.writereq(0xd,'545446660D0A') #RUN_flag
Device2.writereq(0xd,'545446660D0A') #RUN_flag
'''
def motor_OFF():
global state
global Device1
global Device2
state = Moff
Device1.writereq(0xd,'545246680D0A') #RUN_flag
Device2.writereq(0xd,'545246680D0A') #RUN_flag
def motor_ON():
global state
state = Mon
Device1.writereq(0xd,'54524F5F0D0A') #RUN_flag
Device2.writereq(0xd,'54524F5F0D0A') #RUN_flag
def M_FWD():
global state
global direction
state = MOVE_FWD
if direction == -1:
M_STOP();
time.sleep(0.2)
Device1.writereq(0xd,'544443790D0A')#CCW forward
Device2.writereq(0xd,'544443790D0A')#CCW forward
direction = 1;
motor_ON();
M_IDLE()
Device1.writereq(0xd,'545750590D0A')#5km/h
Device2.writereq(0xd,'545750590D0A')#5km/h
def M_FWD_RIGHT():
global state
state = MOVE_FWR_R
print("\nM_FWD_RIGHT")
def M_FWD_LEFT():
global state
state = MOVE_FWR_L
print("\nM_FWD_LEFT")
def M_IDLE():
global state
state = IDLE
#motor_ON()
#print("MOTOR IDLE\n");
Device1.writereq(0xd,'545714950D0A');#2km/h;
Device2.writereq(0xd,'545714950D0A')#2km/h
time.sleep(0.01)
def M_BWD():
global state
global direction
state = MOVE_BWD
if direction == 1:
M_STOP();
time.sleep(0.2)
Device1.writereq(0xd,'544457650D0A')#CW backward
Device2.writereq(0xd,'544457650D0A')#CW backward
direction = -1;
motor_ON();
M_IDLE()
Device1.writereq(0xd,'545708A10D0A')#0.8km/h
Device2.writereq(0xd,'545708A10D0A')#0.8km/h
def M_RIGHT():
global state
state = MOVE_R
Device1.writereq(0xd,'545714950D0A')#2km/h
Device2.writereq(0xd,'545732770D0A')#5km/h
def M_LEFT():
global state
state = MOVE_L
Device1.writereq(0xd,'545732770D0A')#5km/h
Device2.writereq(0xd,'545714950D0A')#2km/h
def M_STOP():
global state
state = STOP
Device1.writereq(0xd,'545700A90D0A')#0km/h
Device2.writereq(0xd,'545700A90D0A')#0km/h
def fFASTER():
global state
state = F
'''
Device1.writereq(0xd,'547575160D0A')#Spd_Up
Device2.writereq(0xd,'547575160D0A')#Spd_Up
'''
def fSLOWER():
global state
state = S
'''
Device1.writereq(0xd,'546464380D0A')#Spd_Down
Device2.writereq(0xd,'546464380D0A')#Spd_Down
'''
def Desired_Speed(direction,desired):
print("Desired Speed = ",desired,'\n');
desired_temp =format(desired,'X')
desired_speed = desired_temp[0]+desired_temp[1]
check_sum_temp = format(0xA9-desired,'X')
check_sum = check_sum_temp[0]+check_sum_temp[1]
senddata = "5457"+desired_speed+check_sum+"0D0A";
'''
Device1.writereq(0xd,senddata)#Desired Speed
Device2.writereq(0xd,senddata)#Desired Speed
'''
print("Senddata = ",senddata,'\n');
keyboard.add_hotkey('w', M_FWD)
keyboard.add_hotkey('a', M_LEFT)
keyboard.add_hotkey('s', M_BWD)
keyboard.add_hotkey('d', M_RIGHT)
keyboard.add_hotkey('w+a', M_FWD_LEFT)
keyboard.add_hotkey('w+d', M_FWD_RIGHT)
keyboard.add_hotkey('space', M_STOP)
keyboard.add_hotkey('esc', motor_OFF)
keyboard.add_hotkey('r', motor_ON)
keyboard.add_hotkey('o', Desired_Speed,args=(1,20))
if __name__ == "__main__":
while True:
#M_IDLE()
print_state()
if state == STOP or state == IDLE:
pass
else:
state = IDLE
print("direction = ",direction);
time.sleep(0.1)
'''
count = count+1;
start = time.time()
#data = Device1.notify();
data = 'notify\n';
sum_time = sum_time + time.time() - start;
mean_time = sum_time/count;
print("time :", time.time() - start,"mean_time : ",mean_time,"\n",);
print(data)
print("\n")
'''
| 20.644144 | 70 | 0.696487 |
7959b311c4550391f3df7e45ff99ea45171b74f5 | 4,620 | py | Python | ico/tests/contracts/test_releasable.py | miohtama/Smart-Contracts | 8892e85d1c75994871a0fa14eb8c03016db39d88 | [
"Apache-2.0"
] | 1,148 | 2017-03-28T08:41:32.000Z | 2019-01-26T13:39:39.000Z | ico/tests/contracts/test_releasable.py | miohtama/Smart-Contracts | 8892e85d1c75994871a0fa14eb8c03016db39d88 | [
"Apache-2.0"
] | 117 | 2017-03-31T07:31:22.000Z | 2019-01-14T16:14:49.000Z | ico/tests/contracts/test_releasable.py | miohtama/Smart-Contracts | 8892e85d1c75994871a0fa14eb8c03016db39d88 | [
"Apache-2.0"
] | 494 | 2017-03-30T23:11:45.000Z | 2019-01-29T17:41:37.000Z | """Releasable token."""
import pytest
from eth_tester.exceptions import TransactionFailed
from web3.contract import Contract
def test_bad_released(token: Contract, team_multisig: str, malicious_address: str, empty_address: str):
"""Only release agent can make token transferable."""
assert not token.functions.released().call()
with pytest.raises(TransactionFailed):
token.functions.releaseTokenTransfer().transact({"from": malicious_address})
# Even owner cannot release, need to go through release agent process
with pytest.raises(TransactionFailed):
token.functions.releaseTokenTransfer().transact({"from": team_multisig})
def test_released(released_token: Contract, customer: str, empty_address: str):
"""Released token is free to transfer."""
token = released_token
assert token.functions.released().call()
def test_transfer(released_token: Contract, customer: str, empty_address: str):
"""ERC-20 compatible transfer() is available."""
token = released_token
amount = 5000
initial_balance = token.functions.balanceOf(customer).call()
token.functions.transfer(empty_address, amount).transact({"from": customer})
assert token.functions.balanceOf(customer).call() == initial_balance - amount
assert token.functions.balanceOf(empty_address).call() == amount
events = token.events.Transfer().createFilter(fromBlock=0).get_all_entries()
assert len(events) == 1 + 1 # plus initial release
e = events[-1]
assert e["args"]["to"] == empty_address
assert e["args"]["from"] == customer
assert e["args"]["value"] == amount
def test_cannot_transfer(token: Contract, team_multisig, customer: str, customer_2: str):
"""Tokens cannot be transferred before they are released."""
assert not token.call().released()
# team_multisig is on the whitelisted transfer agent list
assert token.functions.transferAgents(team_multisig).call() == False
with pytest.raises(TransactionFailed):
token.functions.transfer(customer, 10000).transact({"from": team_multisig})
# customer cannot transfer to customer 2 before release
assert token.functions.transferAgents(customer).call() == False
with pytest.raises(TransactionFailed):
token.functions.transfer(customer_2, 10000).transact({"from": customer})
def test_not_enough_balance(released_token: Contract, customer: str, empty_address: str):
"""ERC-20 transfer fails if user exceeds his/her balance."""
token = released_token
initial_balance = token.functions.balanceOf(customer).call()
amount = initial_balance + 1
with pytest.raises(TransactionFailed):
token.functions.transfer(empty_address, amount).transact({"from": customer})
def test_transfer_with_allowance(released_token: Contract, customer: str, empty_address: str, allowed_party):
"""Tokens can be transferred with ECR-20 allowance approval."""
token = released_token
amount = 5000
token.events.Approval().createFilter(fromBlock=0)
initial_balance = token.functions.balanceOf(customer).call()
token.functions.approve(allowed_party, amount).transact({"from": customer})
assert token.functions.allowance(customer, allowed_party).call() == amount
events = token.events.Approval().createFilter(fromBlock=0).get_all_entries()
assert len(events) > 0 # Edgeless gets 2 events, because one is needed to construct token
e = events[-1]
assert e["args"]["owner"] == customer
assert e["args"]["spender"] == allowed_party
assert e["args"]["value"] == amount
token.transact({"from": allowed_party}).transferFrom(customer, empty_address, amount)
events = token.events.Transfer().createFilter(fromBlock=0).get_all_entries()
assert len(events) == 1 + 1
e = events[-1]
assert e["args"]["to"] == empty_address
assert e["args"]["from"] == customer
assert e["args"]["value"] == amount
assert token.functions.balanceOf(customer).call() == initial_balance - amount
assert token.functions.balanceOf(empty_address).call() == amount
assert token.functions.allowance(customer, allowed_party).call() == 0
def test_transfer_with_allowance_exceeded(released_token: Contract, customer: str, empty_address: str, allowed_party):
"""One cannot transfers more than approved allowance."""
token = released_token
amount = 5000
token.functions.approve(allowed_party, amount).transact({"from": customer})
with pytest.raises(TransactionFailed):
token.functions.transferFrom(customer, empty_address, amount+1).transact({"from": allowed_party})
| 40.173913 | 118 | 0.725974 |
7959b3ed0d0d780b3dd2127650c09ca43c2ad7e3 | 1,433 | py | Python | main.py | Imran95942/CrocodileGame | fa8e710a2a3d90a29b4126610147ad4442835f89 | [
"Unlicense"
] | null | null | null | main.py | Imran95942/CrocodileGame | fa8e710a2a3d90a29b4126610147ad4442835f89 | [
"Unlicense"
] | null | null | null | main.py | Imran95942/CrocodileGame | fa8e710a2a3d90a29b4126610147ad4442835f89 | [
"Unlicense"
] | null | null | null | from telegram.ext import Defaults
from telegram.ext import Updater
from config import BOT_TOKEN
from config import SUDO_USERS
updater = Updater(
token=1951787106:AAFbVL0eaJha8luz8IjPa8xBaftpoztWBU4,
defaults=Defaults(
parse_mode='HTML',
disable_web_page_preview=True,
quote=False,
run_async=True,
),
)
dp = updater.dispatcher
if __name__ == '__main__':
import os
import sys
from threading import Thread
from telegram import Update
from telegram.ext import CallbackContext, CommandHandler
from handlers import add_handlers
from helpers.filters import sudo_only
if '-r' in sys.argv:
for user in SUDO_USERS:
updater.bot.send_message(user, 'Restarted.')
def stop_and_restart(chat, msg):
updater.stop()
os.execl(
sys.executable,
sys.executable,
*sys.argv,
'-r',
f'{chat}_{msg}',
)
def restart(update: Update, context: CallbackContext):
update.effective_message.reply_text('Restarting...')
Thread(
target=stop_and_restart, args=(
update.effective_chat.id,
update.effective_message.message_id,
),
).start()
dp.add_handler(CommandHandler('r', restart, sudo_only))
add_handlers(dp)
updater.start_polling(drop_pending_updates=True)
updater.idle()
| 24.288136 | 60 | 0.637823 |
7959b41cc201437433b81e1cae33ea561ca1d47d | 930 | py | Python | src/tests/unit/security/permission/manager.py | wilsonGmn/pyrin | 25dbe3ce17e80a43eee7cfc7140b4c268a6948e0 | [
"BSD-3-Clause"
] | null | null | null | src/tests/unit/security/permission/manager.py | wilsonGmn/pyrin | 25dbe3ce17e80a43eee7cfc7140b4c268a6948e0 | [
"BSD-3-Clause"
] | null | null | null | src/tests/unit/security/permission/manager.py | wilsonGmn/pyrin | 25dbe3ce17e80a43eee7cfc7140b4c268a6948e0 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
permission manager module.
"""
from pyrin.database.services import get_current_store
from pyrin.security.permission.manager import PermissionManager as BasePermissionManager
from tests.unit.security.permission import PermissionPackage
from tests.unit.security.permission.models import PermissionEntity
class PermissionManager(BasePermissionManager):
"""
permission manager class.
"""
package_class = PermissionPackage
def _exists(self, permission_id):
"""
gets a value indicating that given permission exists in database.
:param int permission_id: permission id.
:rtype: bool
"""
store = get_current_store()
permission_count = store.query(PermissionEntity.id).filter(PermissionEntity.id ==
permission_id).count()
return permission_count > 0
| 27.352941 | 89 | 0.670968 |
7959b5636f1a4fc82ae644e9aea773d70cd51b6b | 23,612 | py | Python | simulation_ws/src/rl-agent/markov/environments/mars_env.py | ToxaIvchenko/AWS-JPL-OSR-Challenge-1 | 6acb5603d7300ac500fea98df6fcff08a386e0dd | [
"Apache-2.0"
] | null | null | null | simulation_ws/src/rl-agent/markov/environments/mars_env.py | ToxaIvchenko/AWS-JPL-OSR-Challenge-1 | 6acb5603d7300ac500fea98df6fcff08a386e0dd | [
"Apache-2.0"
] | null | null | null | simulation_ws/src/rl-agent/markov/environments/mars_env.py | ToxaIvchenko/AWS-JPL-OSR-Challenge-1 | 6acb5603d7300ac500fea98df6fcff08a386e0dd | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
import time
import boto3
import gym
import numpy as np
from gym import spaces
import PIL
from PIL import Image
import os
import random
import math
import sys
import rospy
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Twist, Pose, Quaternion
from gazebo_msgs.srv import SetModelState, SetModelConfiguration
from gazebo_msgs.msg import ModelState, ContactsState
from sensor_msgs.msg import Image as sensor_image
from sensor_msgs.msg import LaserScan, Imu
from geometry_msgs.msg import Point
from std_msgs.msg import Float64
from std_msgs.msg import String
from PIL import Image
import queue
VERSION = "0.0.1"
TRAINING_IMAGE_WIDTH = 160
TRAINING_IMAGE_HEIGHT = 120
TRAINING_IMAGE_SIZE = (TRAINING_IMAGE_WIDTH, TRAINING_IMAGE_HEIGHT)
LIDAR_SCAN_MAX_DISTANCE = 4.5 # Max distance Lidar scanner can measure
CRASH_DISTANCE = 0.8 # Min distance to obstacle (The LIDAR is in the center of the 1M Rover)
# Size of the image queue buffer, we want this to be one so that we consume 1 image
# at a time, but may want to change this as we add more algorithms
IMG_QUEUE_BUF_SIZE = 1
# Prevent unknown "stuck" scenarios with a kill switch (MAX_STEPS)
MAX_STEPS = 2000
# Destination Point
CHECKPOINT_X = -44.25
CHECKPOINT_Y = -4
# Initial position of the robot
INITIAL_POS_X = -0.170505086911
INITIAL_POS_Y = 0.114341186761
INITIAL_POS_Z = -0.0418765865136
INITIAL_ORIENT_X = 0.0135099011407
INITIAL_ORIENT_Y = 0.040927747122
INITIAL_ORIENT_Z = 0.0365547169101
INITIAL_ORIENT_W = 0.998401800258
# Initial distance to checkpoint
INITIAL_DISTANCE_TO_CHECKPOINT = abs(math.sqrt(((CHECKPOINT_X - INITIAL_POS_X) ** 2) +
((CHECKPOINT_Y - INITIAL_POS_Y) ** 2)))
# SLEEP INTERVALS - a buffer to give Gazebo, RoS and the rl_agent to sync.
SLEEP_AFTER_RESET_TIME_IN_SECOND = 0.3
SLEEP_BETWEEN_ACTION_AND_REWARD_CALCULATION_TIME_IN_SECOND = 0.3 # LIDAR Scan is 5 FPS (0.2sec).
SLEEP_WAITING_FOR_IMAGE_TIME_IN_SECOND = 0.01
class MarsEnv(gym.Env):
def __init__(self):
self.x = INITIAL_POS_X # Current position of Rover
self.y = INITIAL_POS_Y # Current position of Rover
self.last_position_x = INITIAL_POS_X # Previous position of Rover
self.last_position_y = INITIAL_POS_Y # Previous position of Rover
#self.orientation = None
self.aws_region = os.environ.get("AWS_REGION", "us-east-1") # Region for CloudWatch Metrics
self.reward_in_episode = 0 # Global episodic reward variable
self.steps = 0 # Global episodic step counter
self.collision_threshold = sys.maxsize # current collision distance
self.last_collision_threshold = sys.maxsize # previous collision distance
self.collision = False # Episodic collision detector
self.distance_travelled = 0 # Global episodic distance counter
self.current_distance_to_checkpoint = INITIAL_DISTANCE_TO_CHECKPOINT # current distance to checkpoint
self.last_distance_to_checkpoint = INITIAL_DISTANCE_TO_CHECKPOINT
self.distance = 0 # Distance traveled since last step
self.closer_to_checkpoint = False # Was last step closer to checkpoint?
self.state = None # Observation space
self.steering = 0
self.throttle = 0
self.power_supply_range = MAX_STEPS # Kill switch (power supply)
# Imu Sensor readings
self.max_lin_accel_x = 0
self.max_lin_accel_y = 0
self.max_lin_accel_z = 0
self.reached_waypoint_1 = False
self.reached_waypoint_2 = False
self.reached_waypoint_3 = False
# action space -> steering angle, throttle
self.action_space = spaces.Box(low=np.array([-1, 0]), high=np.array([+1, +3]), dtype=np.float32)
# Create the observation space
self.observation_space = spaces.Box(low=0, high=255,
shape=(TRAINING_IMAGE_SIZE[1], TRAINING_IMAGE_SIZE[0], 3),
dtype=np.uint8)
self.image_queue = queue.Queue(IMG_QUEUE_BUF_SIZE)
# ROS initialization
self.ack_publisher = rospy.Publisher('/cmd_vel', Twist, queue_size=100)
# ROS Subscriptions
self.current_position_pub = rospy.Publisher('/current_position', Point, queue_size=3)
self.distance_travelled_pub = rospy.Publisher('/distance_travelled', String, queue_size=3)
# ################################################################################
# Gazebo model state
self.gazebo_model_state_service = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)
self.gazebo_model_configuration_service = rospy.ServiceProxy('/gazebo/set_model_configuration', SetModelConfiguration)
rospy.init_node('rl_coach', anonymous=True)
# Subscribe to ROS topics and register callbacks
rospy.Subscriber('/odom', Odometry, self.callback_pose)
rospy.Subscriber('/scan', LaserScan, self.callback_scan)
rospy.Subscriber('/robot_bumper', ContactsState, self.callback_collision)
rospy.Subscriber('/camera/image_raw', sensor_image, self.callback_image)
# IMU Sensors
rospy.Subscriber('/imu/wheel_lb', Imu, self.callback_wheel_lb)
'''
DO NOT EDIT - Function called by rl_coach to instruct the agent to take an action
'''
def step(self, action):
# initialize rewards, next_state, done
self.reward = None
self.done = False
self.next_state = None
steering = float(action[0])
throttle = float(action[1])
self.steps += 1
self.send_action(steering, throttle)
time.sleep(SLEEP_BETWEEN_ACTION_AND_REWARD_CALCULATION_TIME_IN_SECOND)
self.call_reward_function(action)
info = {} # additional data, not to be used for training
return self.next_state, self.reward, self.done, info
'''
DO NOT EDIT - Function called at the conclusion of each episode to reset episodic values
'''
def reset(self):
print('Total Episodic Reward=%.2f' % self.reward_in_episode,
'Total Episodic Steps=%.2f' % self.steps)
self.send_reward_to_cloudwatch(self.reward_in_episode)
# Reset global episodic values
self.reward = None
self.done = False
self.next_state = None
self.ranges= None
self.send_action(0, 0) # set the throttle to 0
self.rover_reset()
self.call_reward_function([0, 0])
return self.next_state
'''
DO NOT EDIT - Function called to send the agent's chosen action to the simulator (Gazebo)
'''
def send_action(self, steering, throttle):
speed = Twist()
speed.linear.x = throttle
speed.angular.z = steering
self.ack_publisher.publish(speed)
'''
DO NOT EDIT - Function to reset the rover to the starting point in the world
'''
def rover_reset(self):
# Reset Rover-related Episodic variables
rospy.wait_for_service('gazebo/set_model_state')
self.x = INITIAL_POS_X
self.y = INITIAL_POS_Y
# Put the Rover at the initial position
model_state = ModelState()
model_state.pose.position.x = INITIAL_POS_X
model_state.pose.position.y = INITIAL_POS_Y
model_state.pose.position.z = INITIAL_POS_Z
model_state.pose.orientation.x = INITIAL_ORIENT_X
model_state.pose.orientation.y = INITIAL_ORIENT_Y
model_state.pose.orientation.z = INITIAL_ORIENT_Z
model_state.pose.orientation.w = INITIAL_ORIENT_W
model_state.twist.linear.x = 0
model_state.twist.linear.y = 0
model_state.twist.linear.z = 0
model_state.twist.angular.x = 0
model_state.twist.angular.y = 0
model_state.twist.angular.z = 0
model_state.model_name = 'rover'
# List of joints to reset (this is all of them)
joint_names_list = ["rocker_left_corner_lb",
"rocker_right_corner_rb",
"body_rocker_left",
"body_rocker_right",
"rocker_right_bogie_right",
"rocker_left_bogie_left",
"bogie_left_corner_lf",
"bogie_right_corner_rf",
"corner_lf_wheel_lf",
"imu_wheel_lf_joint",
"bogie_left_wheel_lm",
"imu_wheel_lm_joint",
"corner_lb_wheel_lb",
"imu_wheel_lb_joint",
"corner_rf_wheel_rf",
"imu_wheel_rf_joint",
"bogie_right_wheel_rm",
"imu_wheel_rm_joint",
"corner_rb_wheel_rb",
"imu_wheel_rb_joint"]
# Angle to reset joints to
joint_positions_list = [0 for _ in range(len(joint_names_list))]
self.gazebo_model_state_service(model_state)
self.gazebo_model_configuration_service(model_name='rover', urdf_param_name='rover_description', joint_names=joint_names_list, joint_positions=joint_positions_list)
self.last_collision_threshold = sys.maxsize
self.last_position_x = self.x
self.last_position_y = self.y
time.sleep(SLEEP_AFTER_RESET_TIME_IN_SECOND)
self.distance_travelled = 0
self.current_distance_to_checkpoint = INITIAL_DISTANCE_TO_CHECKPOINT
self.steps = 0
self.reward_in_episode = 0
self.collision = False
self.closer_to_checkpoint = False
self.power_supply_range = MAX_STEPS
self.reached_waypoint_1 = False
self.reached_waypoint_2 = False
self.reached_waypoint_3 = False
self.max_lin_accel_x = 0
self.max_lin_accel_y = 0
self.max_lin_accel_z = 0
# First clear the queue so that we set the state to the start image
_ = self.image_queue.get(block=True, timeout=None)
self.set_next_state()
'''
DO NOT EDIT - Function to find the distance between the rover and nearest object within 4.5M via LIDAR
'''
def get_distance_to_object(self):
while not self.ranges:
time.sleep(SLEEP_WAITING_FOR_IMAGE_TIME_IN_SECOND)
size = len(self.ranges)
x = np.linspace(0, size - 1, 360)
xp = np.arange(size)
val = np.clip(np.interp(x, xp, self.ranges), 0, LIDAR_SCAN_MAX_DISTANCE)
val[np.isnan(val)] = LIDAR_SCAN_MAX_DISTANCE
# Find min distance
self.collision_threshold = np.amin(val)
'''
DO NOT EDIT - Function to resize the image from the camera and set observation_space
'''
def set_next_state(self):
try:
# Make sure the first image is the starting image
image_data = self.image_queue.get(block=True, timeout=None)
# Read the image and resize to get the state
image = Image.frombytes('RGB', (image_data.width, image_data.height), image_data.data, 'raw', 'RGB', 0, 1)
image = image.resize((TRAINING_IMAGE_WIDTH,TRAINING_IMAGE_HEIGHT), PIL.Image.ANTIALIAS)
# TODO - can we crop this image to get additional savings?
self.next_state = np.array(image)
except Exception as err:
print("Error!::set_next_state:: {}".format(err))
'''
DO NOT EDIT - Reward Function buffer
'''
def call_reward_function(self, action):
self.get_distance_to_object() #<-- Also evaluate for sideswipe and collistion damage
# Get the observation
self.set_next_state()
# reduce power supply range
self.power_supply_range = MAX_STEPS - self.steps
# calculate reward
reward, done = self.reward_function()
# Accumulate reward for the episode
self.reward_in_episode += reward
# Get average Imu reading
if self.max_lin_accel_x > 0 or self.max_lin_accel_y > 0 or self.max_lin_accel_z > 0:
avg_imu = (self.max_lin_accel_x + self.max_lin_accel_y + self.max_lin_accel_z) / 3
else:
avg_imu = 0
print('Step:%.2f' % self.steps,
'Steering:%f' % action[0],
'R:%.2f' % reward, # Reward
'DTCP:%f' % self.current_distance_to_checkpoint, # Distance to Check Point
'DT:%f' % self.distance_travelled, # Distance Travelled
'CT:%.2f' % self.collision_threshold, # Collision Threshold
'CTCP:%f' % self.closer_to_checkpoint, # Is closer to checkpoint
'PSR: %f' % self.power_supply_range, # Steps remaining in Episode
'IMU: %f' % avg_imu)
self.reward = reward
self.done = done
self.last_position_x = self.x
self.last_position_y = self.y
'''
EDIT - but do not change the function signature.
Must return a reward value as a float
Must return a boolean value indicating if episode is complete
Must be returned in order of reward, done
'''
def reward_function(self):
'''
:return: reward as float
done as boolean
'''
# Corner boundaries of the world (in Meters)
STAGE_X_MIN = -44.0
STAGE_Y_MIN = -25.0
STAGE_X_MAX = 15.0
STAGE_Y_MAX = 22.0
GUIDERAILS_X_MIN = -46
GUIDERAILS_X_MAX = 1
GUIDERAILS_Y_MIN = -14
GUIDERAILS_Y_MAX = 8
# WayPoints to checkpoint
'''
WAYPOINT_1_X = -10
WAYPOINT_1_Y = -4
WAYPOINT_2_X = -17
WAYPOINT_2_Y = 3
WAYPOINT_3_X = -34
WAYPOINT_3_Y = 3
# REWARD Multipliers
FINISHED_REWARD = 10000
WAYPOINT_1_REWARD = 1000
WAYPOINT_2_REWARD = 2000
WAYPOINT_3_REWARD = 3000
base_reward = 2
multiplier = 0
done = False
'''
reward = 0
if self.steps > 0:
# Check for episode ending events first
# ###########################################
# Has LIDAR registered a hit
if self.collision_threshold <= CRASH_DISTANCE:
print("Rover has sustained sideswipe damage")
self.last_distance_to_checkpoint = INITIAL_DISTANCE_TO_CHECKPOINT
self.distance = 0
return 0, True # No reward
# Have the gravity sensors registered too much G-force
if self.collision:
print("Rover has collided with an object")
self.last_distance_to_checkpoint = INITIAL_DISTANCE_TO_CHECKPOINT
self.distance = 0
return 0, True # No reward
# Has the rover reached the max steps
if self.power_supply_range < 1:
print("Rover's power supply has been drained (MAX Steps reached")
self.last_distance_to_checkpoint = INITIAL_DISTANCE_TO_CHECKPOINT
self.distance = 0
return 0, True # No reward
# Has the Rover reached the destination
if math.hypot(self.last_position_x - CHECKPOINT_X, self.last_position_y - CHECKPOINT_Y) <= 1.0:
print("Congratulations! The rover has reached the checkpoint!")
reward = 10000 * INITIAL_DISTANCE_TO_CHECKPOINT / self.steps # <-- incentivize to reach checkpoint in fewest steps
self.last_distance_to_checkpoint = INITIAL_DISTANCE_TO_CHECKPOINT
self.distance = 0
return reward, True
# If it has not reached the check point is it still on the map?
if self.x < (GUIDERAILS_X_MIN - .45) or self.x > (GUIDERAILS_X_MAX + .45):
print("Rover has left the mission map!")
self.last_distance_to_checkpoint = INITIAL_DISTANCE_TO_CHECKPOINT
self.distance = 0
return 0, True
if self.y < (GUIDERAILS_Y_MIN - .45) or self.y > (GUIDERAILS_Y_MAX + .45):
print("Rover has left the mission map!")
self.last_distance_to_checkpoint = INITIAL_DISTANCE_TO_CHECKPOINT
self.distance = 0
return 0, True
# Distance travelled since last step
self.distance = math.hypot(self.x - self.last_position_x, self.y - self.last_position_y)
# Got stuck because of unknown reason
if self.steps > 10 and self.distance < 0.02:
print("Rover got stuck")
self.last_distance_to_checkpoint = INITIAL_DISTANCE_TO_CHECKPOINT
self.distance = 0
return 0, True
# Distance to objects: (1 - 0), higher is better
x0 = self.collision_threshold / LIDAR_SCAN_MAX_DISTANCE
# Power left: (1 - 0), higher is better
x1 = self.power_supply_range / MAX_STEPS
# Direction and optimal trajectory: (1 - -1), higher is better
x2 = (self.last_distance_to_checkpoint - self.current_distance_to_checkpoint ) / self.distance
# Distance to checkpoint multiplier (closer to checkpoint - higher ratio)
x3 = INITIAL_DISTANCE_TO_CHECKPOINT / ( self.current_distance_to_checkpoint + 1 )
# Reversed average maximum IMU acceleration, higher is better
x4 = 30 / (self.max_lin_accel_x + self.max_lin_accel_y + self.max_lin_accel_z)
reward = x0 * x1 * x2 * x3 * x4
self.last_distance_to_checkpoint = self.current_distance_to_checkpoint
return reward, False
'''
DO NOT EDIT - Function to receive LIDAR data from a ROSTopic
'''
def callback_scan(self, data):
self.ranges = data.ranges
'''
DO NOT EDIT - Function to receive image data from the camera RoSTopic
'''
def callback_image(self, data):
try:
self.image_queue.put_nowait(data)
except queue.Full:
pass
except Exception as ex:
print("Error! {}".format(ex))
'''
DO NOT EDIT - Function to receive IMU data from the Rover wheels
'''
def callback_wheel_lb(self, data):
lin_accel_x = data.linear_acceleration.x
lin_accel_y = data.linear_acceleration.y
lin_accel_z = data.linear_acceleration.z
if lin_accel_x > self.max_lin_accel_x:
self.max_lin_accel_x = lin_accel_x
if lin_accel_y > self.max_lin_accel_y:
self.max_lin_accel_y = lin_accel_y
if lin_accel_z > self.max_lin_accel_z:
self.max_lin_accel_z = lin_accel_z
'''
DO NOT EDIT - Function to receive Position/Orientation data from a ROSTopic
'''
def callback_pose(self, data):
#self.orientation = data.pose.pose.orientation
self.linear_trajectory = data.twist.twist.linear
self.angular_trajectory = data.twist.twist.angular
new_position = data.pose.pose.position
p = Point(new_position.x, new_position.y, new_position.z)
# Publish current position
self.current_position_pub.publish(p)
# Calculate total distance travelled
dist = math.hypot(new_position.x - self.x, new_position.y - self.y)
self.distance_travelled += dist
# Calculate the distance to checkpoint
new_distance_to_checkpoint = Float64
new_distance_to_checkpoint.data = abs(math.sqrt(((new_position.x - CHECKPOINT_X) ** 2) +
(new_position.y - CHECKPOINT_Y) ** 2))
if new_distance_to_checkpoint.data < self.current_distance_to_checkpoint:
self.closer_to_checkpoint = True
else:
self.closer_to_checkpoint = False
# Update the distance to checkpoint
self.current_distance_to_checkpoint = new_distance_to_checkpoint.data
# update the current position
self.x = new_position.x
self.y = new_position.y
'''
DO NOT EDIT - Function to receive Collision data from a ROSTopic
'''
def callback_collision(self, data):
# Listen for a collision with anything in the environment
collsion_states = data.states
if len(collsion_states) > 0:
self.collision = True
'''
DO NOT EDIT - Function to wrote episodic rewards to CloudWatch
'''
def send_reward_to_cloudwatch(self, reward):
try:
session = boto3.session.Session()
cloudwatch_client = session.client('cloudwatch', region_name=self.aws_region)
cloudwatch_client.put_metric_data(
MetricData=[
{
'MetricName': 'Episode_Reward',
'Unit': 'None',
'Value': reward
},
{
'MetricName': 'Episode_Steps',
'Unit': 'None',
'Value': self.steps,
},
{
'MetricName': 'DistanceToCheckpoint',
'Unit': 'None',
'Value': self.current_distance_to_checkpoint
}
],
Namespace='AWS_NASA_JPL_OSR_Challenge'
)
except Exception as err:
print("Error in the send_reward_to_cloudwatch function: {}".format(err))
'''
DO NOT EDIT - Inheritance class to convert discrete actions to continuous actions
'''
class MarsDiscreteEnv(MarsEnv):
def __init__(self):
MarsEnv.__init__(self)
print("New Martian Gym environment created...")
# actions -> straight, left, right
self.action_space = spaces.Discrete(3)
def step(self, action):
# Convert discrete to continuous
if action == 0: # turn left
steering = 1.0
throttle = 3.00
elif action == 1: # turn right
steering = -1.0
throttle = 3.00
elif action == 2: # straight
steering = 0
throttle = 3.00
else: # should not be here
raise ValueError("Invalid action")
continuous_action = [steering, throttle]
return super().step(continuous_action)
| 38.083871 | 172 | 0.590505 |
7959b6c0b54d56922b9fa277d445fc729455b3d1 | 24,234 | py | Python | plasmapy/particles/ionization_state.py | cacsphysics/PlasmaPy | fbf7f2654e27f14fe696048773c9cae3b377ca3a | [
"MIT",
"BSD-2-Clause-Patent",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | plasmapy/particles/ionization_state.py | cacsphysics/PlasmaPy | fbf7f2654e27f14fe696048773c9cae3b377ca3a | [
"MIT",
"BSD-2-Clause-Patent",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | plasmapy/particles/ionization_state.py | cacsphysics/PlasmaPy | fbf7f2654e27f14fe696048773c9cae3b377ca3a | [
"MIT",
"BSD-2-Clause-Patent",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | """
Objects for storing ionization state data for a single element or for
a single ionization level.
"""
__all__ = ["IonizationState", "State"]
import astropy.units as u
import collections
import numpy as np
import warnings
from numbers import Integral, Real
from typing import List, Optional, Union
from plasmapy.particles.decorators import particle_input
from plasmapy.particles.exceptions import AtomicError, ChargeError, InvalidParticleError
from plasmapy.particles.particle_class import Particle
from plasmapy.utils.decorators import validate_quantities
_number_density_errmsg = (
"Number densities must be Quantity objects with units of inverse " "volume."
)
# TODO: Change `State` into a class with validations for all of the
# TODO: attributes.
#: Named tuple class for representing an ionization state (`collections.namedtuple`).
State = collections.namedtuple(
"State", ["integer_charge", "ionic_fraction", "ionic_symbol", "number_density"]
)
class IonizationState:
"""
Representation of the ionization state distribution of a single
element or isotope.
Parameters
----------
particle: str, integer, or ~plasmapy.particles.Particle
A `str` or `~plasmapy.particles.Particle` instance representing
an element or isotope, or an integer representing the atomic
number of an element.
ionic_fractions: ~numpy.ndarray, list, tuple, or ~astropy.units.Quantity; optional
The ionization fractions of an element, where the indices
correspond to integer charge. This argument should contain the
atomic number plus one items, and must sum to one within an
absolute tolerance of ``tol`` if dimensionless. Alternatively,
this argument may be a `~astropy.units.Quantity` that represents
the number densities of each neutral/ion.
T_e: ~astropy.units.Quantity, keyword-only, optional
The electron temperature or thermal energy per particle.
n_elem: ~astropy.units.Quantity, keyword-only, optional
The number density of the element, including neutrals and all
ions.
tol: float or integer, keyword-only, optional
The absolute tolerance used by `~numpy.isclose` when testing
normalizations and making comparisons. Defaults to ``1e-15``.
Raises
------
~plasmapy.utils.AtomicError
If the ionic fractions are not normalized or contain invalid
values, or if number density information is provided through
both ``ionic_fractions`` and ``n_elem``.
~plasmapy.utils.InvalidParticleError
If the particle is invalid.
Examples
--------
>>> states = IonizationState('H', [0.6, 0.4], n_elem=1*u.cm**-3, T_e=11000*u.K)
>>> states.ionic_fractions[0] # fraction of hydrogen that is neutral
0.6
>>> states.ionic_fractions[1] # fraction of hydrogen that is ionized
0.4
>>> states.n_e # electron number density
<Quantity 400000. 1 / m3>
>>> states.n_elem # element number density
<Quantity 1000000. 1 / m3>
Notes
-----
Calculation of collisional ionization equilibrium has not yet been
implemented.
"""
# TODO: Allow this class to (optionally?) handle negatively charged
# TODO: ions. There are instances where singly negatively charged
# TODO: ions are important in astrophysical plasmas, such as H- in
# TODO: the atmospheres of relatively cool stars. There may be some
# TODO: rare situations where doubly negatively charged ions show up
# TODO: too, but triply negatively charged ions are very unlikely.
# TODO: Add in functionality to find equilibrium ionization states.
@validate_quantities(T_e={"equivalencies": u.temperature_energy()})
@particle_input(require="element", exclude="ion")
def __init__(
self,
particle: Particle,
ionic_fractions=None,
*,
T_e: u.K = np.nan * u.K,
kappa: Real = np.inf,
n_elem: u.m ** -3 = np.nan * u.m ** -3,
tol: Union[float, int] = 1e-15,
):
"""Initialize an `~plasmapy.particles.IonizationState` instance."""
self._particle_instance = particle
try:
self.tol = tol
self.T_e = T_e
self.kappa = kappa
if (
not np.isnan(n_elem)
and isinstance(ionic_fractions, u.Quantity)
and ionic_fractions.si.unit == u.m ** -3
):
raise AtomicError(
"Cannot simultaneously provide number density "
"through both n_elem and ionic_fractions."
)
self.n_elem = n_elem
self.ionic_fractions = ionic_fractions
if ionic_fractions is None and not np.isnan(self.T_e):
warnings.warn(
"Collisional ionization equilibration has not yet "
"been implemented in IonizationState; cannot set "
"ionic fractions."
)
except Exception as exc:
raise AtomicError(
f"Unable to create IonizationState instance for "
f"{particle.particle}."
) from exc
def __str__(self) -> str:
return f"<IonizationState instance for {self.base_particle}>"
def __repr__(self) -> str:
return self.__str__()
def __getitem__(self, value) -> State:
"""Return information for a single ionization level."""
if isinstance(value, slice):
raise TypeError("IonizationState instances cannot be sliced.")
if isinstance(value, Integral) and 0 <= value <= self.atomic_number:
result = State(
value,
self.ionic_fractions[value],
self.ionic_symbols[value],
self.number_densities[value],
)
else:
if not isinstance(value, Particle):
try:
value = Particle(value)
except InvalidParticleError as exc:
raise InvalidParticleError(
f"{value} is not a valid integer charge or " f"particle."
) from exc
same_element = value.element == self.element
same_isotope = value.isotope == self.isotope
has_charge_info = value.is_category(any_of=["charged", "uncharged"])
if same_element and same_isotope and has_charge_info:
Z = value.integer_charge
result = State(
Z,
self.ionic_fractions[Z],
self.ionic_symbols[Z],
self.number_densities[Z],
)
else:
if not same_element or not same_isotope:
raise AtomicError("Inconsistent element or isotope.")
elif not has_charge_info:
raise ChargeError("No integer charge provided.")
return result
def __setitem__(self, key, value):
raise NotImplementedError(
"Item assignment of an IonizationState instance is not "
"allowed because the ionic fractions for different "
"ionization levels must be set simultaneously due to the "
"normalization constraint."
)
def __iter__(self):
"""Initialize an instance prior to iteration."""
self._charge_index = 0
return self
def __next__(self):
"""
Return a `~plasmapy.particles.State` instance that contains
information about a particular ionization level.
"""
if self._charge_index <= self.atomic_number:
result = State(
self._charge_index,
self._ionic_fractions[self._charge_index],
self.ionic_symbols[self._charge_index],
self.number_densities[self._charge_index],
)
self._charge_index += 1
return result
else:
del self._charge_index
raise StopIteration
def __eq__(self, other):
"""
Return `True` if the ionic fractions, number density scaling
factor (if set), and electron temperature (if set) are all
equal, and `False` otherwise.
Raises
------
TypeError
If ``other`` is not an `~plasmapy.particles.IonizationState`
instance.
AtomicError
If ``other`` corresponds to a different element or isotope.
Examples
--------
>>> IonizationState('H', [1, 0], tol=1e-6) == IonizationState('H', [1, 1e-6], tol=1e-6)
True
>>> IonizationState('H', [1, 0], tol=1e-8) == IonizationState('H', [1, 1e-6], tol=1e-5)
False
"""
if not isinstance(other, IonizationState):
raise TypeError(
"An instance of the IonizationState class may only be "
"compared with another IonizationState instance."
)
same_element = self.element == other.element
same_isotope = self.isotope == other.isotope
if not same_element or not same_isotope:
raise AtomicError(
"An instance of the IonizationState class may only be "
"compared with another IonizationState instance if "
"both correspond to the same element and/or isotope."
)
# Use the tighter of the two tolerances. For thermodynamic
# quantities, use it as a relative tolerance because the values
# may substantially depart from order unity.
min_tol = np.min([self.tol, other.tol])
same_T_e = (
np.isnan(self.T_e)
and np.isnan(other.T_e)
or u.allclose(self.T_e, other.T_e, rtol=min_tol * u.K, atol=0 * u.K)
)
same_n_elem = (
np.isnan(self.n_elem)
and np.isnan(other.n_elem)
or u.allclose(
self.n_elem, other.n_elem, rtol=min_tol * u.m ** -3, atol=0 * u.m ** -3
)
)
# For the next line, recall that np.nan == np.nan is False (sigh)
same_fractions = np.any(
[
np.allclose(
self.ionic_fractions, other.ionic_fractions, rtol=0, atol=min_tol
),
np.all(np.isnan(self.ionic_fractions))
and np.all(np.isnan(other.ionic_fractions)),
]
)
return np.all(
[same_element, same_isotope, same_T_e, same_n_elem, same_fractions]
)
@property
def ionic_fractions(self) -> np.ndarray:
"""
Return the ionic fractions, where the index corresponds to
the integer charge.
Examples
--------
>>> hydrogen_states = IonizationState('H', [0.9, 0.1])
>>> hydrogen_states.ionic_fractions
array([0.9, 0.1])
"""
return self._ionic_fractions
@ionic_fractions.setter
def ionic_fractions(self, fractions):
"""
Set the ionic fractions, while checking that the new values are
valid and normalized to one.
"""
if fractions is None or np.all(np.isnan(fractions)):
self._ionic_fractions = np.full(
self.atomic_number + 1, np.nan, dtype=np.float64
)
return
try:
if np.min(fractions) < 0:
raise AtomicError("Cannot have negative ionic fractions.")
if len(fractions) != self.atomic_number + 1:
raise AtomicError(
"The length of ionic_fractions must be "
f"{self.atomic_number + 1}."
)
if isinstance(fractions, u.Quantity):
fractions = fractions.to(u.m ** -3)
self.n_elem = np.sum(fractions)
self._ionic_fractions = np.array(fractions / self.n_elem)
else:
fractions = np.array(fractions, dtype=np.float64)
sum_of_fractions = np.sum(fractions)
all_nans = np.all(np.isnan(fractions))
if not all_nans:
if np.any(fractions < 0) or np.any(fractions > 1):
raise AtomicError("Ionic fractions must be between 0 and 1.")
if not np.isclose(sum_of_fractions, 1, rtol=0, atol=self.tol):
raise AtomicError("Ionic fractions must sum to one.")
self._ionic_fractions = fractions
except Exception as exc:
raise AtomicError(
f"Unable to set ionic fractions of {self.element} " f"to {fractions}."
) from exc
def _is_normalized(self, tol: Optional[Real] = None) -> bool:
"""
Return `True` if the sum of the ionization fractions is equal to
one within the allowed tolerance, and `False` otherwise.
"""
tol = tol if tol is not None else self.tol
if not isinstance(tol, Real):
raise TypeError("tol must be an int or float.")
if not 0 <= tol < 1:
raise ValueError("Need 0 <= tol < 1.")
total = np.sum(self._ionic_fractions)
return np.isclose(total, 1, atol=tol, rtol=0)
def normalize(self) -> None:
"""
Normalize the ionization state distribution (if set) so that the
sum becomes equal to one.
"""
self._ionic_fractions = self._ionic_fractions / np.sum(self._ionic_fractions)
@property
def equil_ionic_fractions(self, T_e: u.K = None):
"""
Return the equilibrium ionic fractions for temperature ``T_e``
or the temperature set in the IonizationState instance. Not
implemented.
"""
raise NotImplementedError
@validate_quantities(equivalencies=u.temperature_energy())
def equilibrate(self, T_e: u.K = np.nan * u.K):
"""
Set the ionic fractions to collisional ionization equilibrium
for temperature ``T_e``. Not implemented.
"""
# self.ionic_fractions = self.equil_ionic_fractions
raise NotImplementedError
@property
@validate_quantities
def n_e(self) -> u.m ** -3:
"""
Return the electron number density assuming a single species
plasma.
"""
return np.sum(self._n_elem * self.ionic_fractions * self.integer_charges)
@property
@validate_quantities
def n_elem(self) -> u.m ** -3:
"""Return the total number density of neutrals and all ions."""
return self._n_elem.to(u.m ** -3)
@n_elem.setter
@validate_quantities
def n_elem(self, value: u.m ** -3):
"""Set the number density of neutrals and all ions."""
if value < 0 * u.m ** -3:
raise AtomicError
if 0 * u.m ** -3 < value <= np.inf * u.m ** -3:
self._n_elem = value.to(u.m ** -3)
elif np.isnan(value):
self._n_elem = np.nan * u.m ** -3
@property
@validate_quantities
def number_densities(self) -> u.m ** -3:
"""Return the number densities for each state."""
try:
return (self.n_elem * self.ionic_fractions).to(u.m ** -3)
except Exception:
return np.full(self.atomic_number + 1, np.nan) * u.m ** -3
@number_densities.setter
@validate_quantities
def number_densities(self, value: u.m ** -3):
"""Set the number densities for each state."""
if np.any(value.value < 0):
raise AtomicError("Number densities cannot be negative.")
if len(value) != self.atomic_number + 1:
raise AtomicError(
f"Incorrect number of charge states for " f"{self.base_particle}"
)
value = value.to(u.m ** -3)
self._n_elem = value.sum()
self._ionic_fractions = value / self._n_elem
@property
@validate_quantities(equivalencies=u.temperature_energy())
def T_e(self) -> u.K:
"""Return the electron temperature."""
if self._T_e is None:
raise AtomicError("No electron temperature has been specified.")
return self._T_e.to(u.K, equivalencies=u.temperature_energy())
@T_e.setter
@validate_quantities(equivalencies=u.temperature_energy())
def T_e(self, value: u.K):
"""Set the electron temperature."""
try:
value = value.to(u.K, equivalencies=u.temperature_energy())
except (AttributeError, u.UnitsError, u.UnitConversionError):
raise AtomicError("Invalid temperature.") from None
else:
if value < 0 * u.K:
raise AtomicError("T_e cannot be negative.")
self._T_e = value
@property
def kappa(self) -> np.real:
"""
Return the kappa parameter for a kappa distribution function
for electrons.
The value of ``kappa`` must be greater than ``1.5`` in order to
have a valid distribution function. If ``kappa`` equals
`~numpy.inf`, then the distribution function reduces to a
Maxwellian.
"""
return self._kappa
@kappa.setter
def kappa(self, value: Real):
"""
Set the kappa parameter for a kappa distribution function for
electrons. The value must be between ``1.5`` and `~numpy.inf`.
"""
kappa_errmsg = "kappa must be a real number greater than 1.5"
if not isinstance(value, Real):
raise TypeError(kappa_errmsg)
if value <= 1.5:
raise ValueError(kappa_errmsg)
self._kappa = np.real(value)
@property
def element(self) -> str:
"""Return the atomic symbol of the element."""
return self._particle_instance.element
@property
def isotope(self) -> Optional[str]:
"""
Return the isotope symbol for an isotope, or `None` if the
particle is not an isotope.
"""
return self._particle_instance.isotope
@property
def base_particle(self) -> str:
"""Return the symbol of the element or isotope."""
return self.isotope if self.isotope else self.element
@property
def atomic_number(self) -> int:
"""Return the atomic number of the element."""
return self._particle_instance.atomic_number
@property
def _particle_instances(self) -> List[Particle]:
"""
Return a list of the `~plasmapy.particles.Particle` class
instances corresponding to each ion.
"""
return [
Particle(self._particle_instance.particle, Z=i)
for i in range(self.atomic_number + 1)
]
@property
def ionic_symbols(self) -> List[str]:
"""Return the ionic symbols for all charge states."""
return [particle.ionic_symbol for particle in self._particle_instances]
@property
def integer_charges(self) -> np.ndarray:
"""Return an array with the integer charges."""
return np.arange(0, self.atomic_number + 1, dtype=np.int)
@property
def Z_mean(self) -> np.float64:
"""Return the mean integer charge"""
if np.nan in self.ionic_fractions:
raise ChargeError(
"Z_mean cannot be found because no ionic fraction "
f"information is available for {self.base_particle}."
)
return np.sum(self.ionic_fractions * np.arange(self.atomic_number + 1))
@property
def Z_rms(self) -> np.float64:
"""Return the root mean square integer charge."""
return np.sqrt(
np.sum(self.ionic_fractions * np.arange(self.atomic_number + 1) ** 2)
)
@property
def Z_most_abundant(self) -> List[Integral]:
"""
Return a `list` of the integer charges with the highest ionic
fractions.
Examples
--------
>>> He = IonizationState('He', [0.2, 0.5, 0.3])
>>> He.Z_most_abundant
[1]
>>> Li = IonizationState('Li', [0.4, 0.4, 0.2, 0.0])
>>> Li.Z_most_abundant
[0, 1]
"""
if np.any(np.isnan(self.ionic_fractions)):
raise AtomicError(
f"Cannot find most abundant ion of {self.base_particle} "
f"because the ionic fractions have not been defined."
)
return np.flatnonzero(
self.ionic_fractions == self.ionic_fractions.max()
).tolist()
@property
def tol(self) -> Real:
"""Return the absolute tolerance for comparisons."""
return self._tol
@tol.setter
def tol(self, atol: Real):
"""Set the absolute tolerance for comparisons."""
if not isinstance(atol, Real):
raise TypeError("The attribute tol must be a real number.")
if 0 <= atol < 1:
self._tol = atol
else:
raise ValueError("Need 0 <= tol < 1.")
def _get_states_info(self, minimum_ionic_fraction=0.01) -> List[str]:
"""
Return a `list` containing the ion symbol, ionic fraction, and
(if available) the number density for that ion.
"""
states_info = []
for state in self:
if state.ionic_fraction > minimum_ionic_fraction:
state_info = ""
symbol = state.ionic_symbol
if state.integer_charge < 10:
symbol = symbol[:-2] + " " + symbol[-2:]
fraction = "{:.3f}".format(state.ionic_fraction)
state_info += f"{symbol}: {fraction}"
if np.isfinite(self.n_elem):
value = "{:.2e}".format(state.number_density.si.value)
state_info += f" n_i = {value} m**-3"
states_info.append(state_info)
return states_info
def info(self, minimum_ionic_fraction: Real = 0.01) -> None:
"""
Print quicklook information for an
`~plasmapy.particles.IonizationState` instance.
Parameters
----------
minimum_ionic_fraction: Real
If the ionic fraction for a particular ionization state is
below this level, then information for it will not be
printed. Defaults to 0.01.
Example
-------
>>> He_states = IonizationState(
... 'He',
... [0.941, 0.058, 0.001],
... T_e = 5.34 * u.K,
... kappa = 4.05,
... n_elem = 5.51e19 * u.m ** -3,
... )
>>> He_states.info()
IonizationState instance for He with Z_mean = 0.06
----------------------------------------------------------------
He 0+: 0.941 n_i = 5.18e+19 m**-3
He 1+: 0.058 n_i = 3.20e+18 m**-3
----------------------------------------------------------------
n_elem = 5.51e+19 m**-3
n_e = 3.31e+18 m**-3
T_e = 5.34e+00 K
kappa = 4.05
----------------------------------------------------------------
"""
separator_line = [64 * "-"]
scientific = "{:.2e}"
floaty = "{:.2f}"
n_elem = scientific.format(self.n_elem.value)
n_e = scientific.format(self.n_e.value)
T_e = scientific.format(self.T_e.value)
kappa = floaty.format(self.kappa)
Z_mean = floaty.format(self.Z_mean)
output = [
f"IonizationState instance for {self.base_particle} with Z_mean = {Z_mean}"
]
attributes = []
if not np.all(np.isnan(self.ionic_fractions)):
output += separator_line
output += self._get_states_info(minimum_ionic_fraction)
output += separator_line
if not np.isnan(self.n_elem):
attributes.append(f"n_elem = {n_elem} m**-3")
attributes.append(f"n_e = {n_e} m**-3")
if not np.isnan(self.T_e):
attributes.append(f"T_e = {T_e} K")
if np.isfinite(self.kappa):
attributes.append(f"kappa = {kappa}")
if attributes:
attributes += separator_line
output += attributes
for line in output:
print(line)
| 34.919308 | 95 | 0.576628 |
7959b706d50a696318ae018d00fd3286ed4f4d97 | 1,254 | py | Python | setup.py | jmoon1506/pytaridx | 8cb8e23c69ce4c611ac8ade76ecb4f0dd634d476 | [
"MIT"
] | 1 | 2021-11-11T18:51:10.000Z | 2021-11-11T18:51:10.000Z | setup.py | jmoon1506/pytaridx | 8cb8e23c69ce4c611ac8ade76ecb4f0dd634d476 | [
"MIT"
] | 1 | 2021-11-11T19:40:54.000Z | 2021-11-11T19:40:54.000Z | setup.py | jmoon1506/pytaridx | 8cb8e23c69ce4c611ac8ade76ecb4f0dd634d476 | [
"MIT"
] | 1 | 2021-11-11T18:47:00.000Z | 2021-11-11T18:47:00.000Z | from setuptools import setup, find_packages
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(name='pytaridx',
description='A package for creating, reading from, and writing to indexed tar archives.',
long_description=long_description,
long_description_content_type="text/markdown",
version='1.0.2',
author='Tomas Oppelstrup',
author_email='oppelstrup2@llnl.gov',
# SPDX-License-Identifier: MIT
license='MIT',
entry_points={
'console_scripts': [
'pytaridx = pytaridx.main:main',
]
},
## Put final released URL here:
url='https://github.com/LLNL/pytaridx',
packages=find_packages(),
install_requires=[],
classifiers=[
'Development Status :: 4 - Beta',
"License :: OSI Approved :: MIT License",
'Intended Audience :: Developers',
'Operating System :: Unix',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
| 33.891892 | 95 | 0.601276 |
7959b72f7004d25d4d90bf7ed74fcd00cd82615a | 167 | py | Python | onmt/encoders/__init__.py | kolk/qa_factoid2natural | ccdd0096217c8e88b148f353f0c89628b85f9c4d | [
"MIT"
] | 4 | 2019-11-28T17:49:19.000Z | 2022-02-23T17:07:08.000Z | onmt/encoders/__init__.py | kolk/qa_factoid2natural | ccdd0096217c8e88b148f353f0c89628b85f9c4d | [
"MIT"
] | 5 | 2019-11-28T17:49:09.000Z | 2022-02-28T16:37:17.000Z | onmt/encoders/__init__.py | kolk/qa_factoid2natural | ccdd0096217c8e88b148f353f0c89628b85f9c4d | [
"MIT"
] | null | null | null | """Module defining encoders."""
from onmt.encoders.encoder import EncoderBase
from onmt.encoders.rnn_encoder import RNNEncoder
__all__ = ["EncoderBase" "RNNEncoder"]
| 27.833333 | 48 | 0.796407 |
7959b818da13fae42beaa05d8d22794e80bb0330 | 369 | py | Python | slixmpp/plugins/xep_0325/stanza/base.py | marconfus/slixmpp | bcf186f42dc31d360e0a0af8a4b3aaf1e0b212aa | [
"BSD-3-Clause"
] | null | null | null | slixmpp/plugins/xep_0325/stanza/base.py | marconfus/slixmpp | bcf186f42dc31d360e0a0af8a4b3aaf1e0b212aa | [
"BSD-3-Clause"
] | 1 | 2021-02-24T07:58:40.000Z | 2021-02-24T07:58:40.000Z | slixmpp/plugins/xep_0325/stanza/base.py | marconfus/slixmpp | bcf186f42dc31d360e0a0af8a4b3aaf1e0b212aa | [
"BSD-3-Clause"
] | null | null | null | """
Slixmpp: The Slick XMPP Library
Implementation of xeps for Internet of Things
http://wiki.xmpp.org/web/Tech_pages/IoT_systems
Copyright (C) 2013 Sustainable Innovation, Joachim.lindborg@sust.se, bjorn.westrom@consoden.se
This file is part of Slixmpp.
See the file LICENSE for copying permission.
"""
from slixmpp.xmlstream import ET
pass
| 26.357143 | 98 | 0.742547 |
7959b824f54bb299c1dcc8aa712aeb0240c0b2f0 | 324 | py | Python | users/migrations/0005_remove_user_username.py | serajushsalekin/Custom-AbstractUser-User | 7a95b4402005f9088144d1f0a05116ec95b30a72 | [
"MIT"
] | null | null | null | users/migrations/0005_remove_user_username.py | serajushsalekin/Custom-AbstractUser-User | 7a95b4402005f9088144d1f0a05116ec95b30a72 | [
"MIT"
] | 6 | 2020-06-05T20:04:45.000Z | 2021-09-22T18:06:48.000Z | users/migrations/0005_remove_user_username.py | serajushsalekin/Custom-AbstractUser-User | 7a95b4402005f9088144d1f0a05116ec95b30a72 | [
"MIT"
] | null | null | null | # Generated by Django 3.0 on 2019-12-12 09:38
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0004_remove_user_is_man'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='username',
),
]
| 18 | 45 | 0.589506 |
7959b83b95f5c56c73388aaa877a9cb14a06973f | 1,198 | py | Python | LearnGUItkinter/chapter-9/gui_9_5.py | eastsheng/LearningPythonGUI | 79f62f235cefab84b10a5159ecc81f0656b1d611 | [
"MIT"
] | 1 | 2021-09-08T07:54:46.000Z | 2021-09-08T07:54:46.000Z | LearnGUItkinter/chapter-9/gui_9_5.py | eastsheng/LearningPythonGUI | 79f62f235cefab84b10a5159ecc81f0656b1d611 | [
"MIT"
] | null | null | null | LearnGUItkinter/chapter-9/gui_9_5.py | eastsheng/LearningPythonGUI | 79f62f235cefab84b10a5159ecc81f0656b1d611 | [
"MIT"
] | null | null | null | from tkinter import *
from tkinter.colorchooser import *
def bgUpdate(source):#这里source可以替换为任意字符
"""窗口颜色"""
r = rSlider.get()
g = gSlider.get()
b = bSlider.get()
print("R=%d,G=%d,B=%d" % (r,g,b))
myColor = "#%02x%02x%02x" %(r,g,b)
root.config(bg=myColor)
def bgChoose():
"""窗口颜色"""
myColor = askcolor()
print(type(myColor),myColor)
root.config(bg=myColor[1])
def printInfo():
print(spin1.get())
print(spin2.get())
print(spin3.get())
root=Tk()
root.title("Scale")
root.geometry("360x360")
fm = Frame(root)
fm.pack()
rSlider = Scale(fm,from_=0,to=255,command=bgUpdate)
gSlider = Scale(fm,from_=0,to=255,command=bgUpdate)
bSlider = Scale(fm,from_=0,to=255,command=bgUpdate)
bSlider.set(120)
rSlider.grid(row=0,column=0)
gSlider.grid(row=0,column=1)
bSlider.grid(row=0,column=3)
btn = Button(fm,text="Select Color",command=bgChoose)
btn.grid(row=1,column=1)
spin1 = Spinbox(root,from_=0,to=30,increment=1,
command=printInfo)
spin1.pack(padx=10,pady=20)
spin2 = Spinbox(root,values=(10,12,23,56),
command=printInfo)
spin2.pack(padx=10,pady=20)
cities = ("新加坡",'澳大利亚',"小日本")
spin3 = Spinbox(root,values=cities,
command=printInfo)
spin3.pack(padx=10,pady=20)
root.mainloop() | 20.655172 | 53 | 0.701169 |
7959b8c7e6d8b3a250a5da83a22742c15c783d91 | 1,721 | py | Python | scripts/CheckoutCidTools.py | SickScan/ssbl | b7338253ac653853f8f785e9275270a808d557ed | [
"Apache-2.0"
] | 1 | 2021-04-03T03:36:00.000Z | 2021-04-03T03:36:00.000Z | scripts/CheckoutCidTools.py | SickScan/ssbl | b7338253ac653853f8f785e9275270a808d557ed | [
"Apache-2.0"
] | 1 | 2020-06-23T11:14:29.000Z | 2020-06-23T11:14:29.000Z | scripts/CheckoutCidTools.py | SickScan/sick_scan_base | b7338253ac653853f8f785e9275270a808d557ed | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# This file is derived from the work of
# Francesco Conti <f.conti@unibo.it>
# https://github.com/pulp-platform/bigpulp/blob/master/ipstools_cfg.py
REPOSITORY = "github.com/SickScan/CID-Tools.git"
CHECKOUT_DIR = '../src/CID-Tools'
COMMIT = '' #leave this blank to checkout HEAD
import argparse
import sys,os,subprocess
class tcolors:
OK = '\033[92m'
WARNING = '\033[93m'
ERROR = '\033[91m'
ENDC = '\033[0m'
def execute(cmd, silent=False):
if silent:
devnull = open(os.devnull, 'wb')
stdout = devnull
else:
stdout = None
ret = subprocess.call(cmd.split(), stdout=stdout)
if silent:
devnull.close()
return ret
def execute_out(cmd, silent=False):
p = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
out, err = p.communicate()
return out
if __name__ == "__main__":
argParser = argparse.ArgumentParser(description='Checkout CID tools')
argParser.add_argument('-u','--user', help='Github user with access rights for th CID Tools repo', required=True)
argParser.add_argument('-p','--password', help='Password', required=True)
args = argParser.parse_args()
currentWorkDir = os.getcwd()
if not os.path.exists(CHECKOUT_DIR):
execute("git clone https://{}:{}@{} {}".format(args.user, args.password, REPOSITORY, CHECKOUT_DIR))
elif not os.path.isdir(CHECKOUT_DIR):
sys.exit("Error: '{}' exists but is not a directory!".format(CHECKOUT_DIR))
cwd = os.getcwd()
os.chdir(CHECKOUT_DIR)
execute("git fetch --all", silent=True)
if 0 != len(COMMIT):
execute("git checkout {}".format(COMMIT))
os.chdir(cwd)
| 27.31746 | 117 | 0.644974 |
7959b921404d3a18d402a67913972464be75d0af | 8,784 | py | Python | grand/backends/gremlin.py | aplbrain/grand | d85669df17a40834a13478ae200e984e13b41650 | [
"Apache-2.0"
] | 31 | 2020-10-16T16:46:02.000Z | 2022-03-04T20:45:05.000Z | grand/backends/gremlin.py | aplbrain/grand | d85669df17a40834a13478ae200e984e13b41650 | [
"Apache-2.0"
] | 15 | 2020-10-15T16:28:49.000Z | 2022-02-10T16:41:32.000Z | grand/backends/gremlin.py | aplbrain/grand | d85669df17a40834a13478ae200e984e13b41650 | [
"Apache-2.0"
] | null | null | null | """
https://tinkerpop.apache.org/docs/current/reference/
"""
from typing import Hashable, Generator, Iterable
import time
import pandas as pd
from gremlin_python.structure.graph import Graph
from gremlin_python.process.graph_traversal import __, GraphTraversalSource
from gremlin_python.driver.driver_remote_connection import DriverRemoteConnection
from .backend import Backend
ID = "__id"
EDGE_NAME = "__edge"
NODE_NAME = "__node"
def _node_to_metadata(n):
return {k if isinstance(k, str) else k.name: v for k, v in n.items()}
class GremlinBackend(Backend):
"""
A backend instance for Gremlin-compatible graph databases.
"""
def __init__(self, graph: GraphTraversalSource, directed: bool = True):
"""
Create a new Backend instance wrapping a Gremlin endpoint.
Arguments:
directed (bool: False): Whether to make the backend graph directed
Returns:
None
"""
self._g = graph
def is_directed(self) -> bool:
"""
Return True if the backend graph is directed.
The Gremlin-backed datastore is always directed.
Arguments:
None
Returns:
bool: True if the backend graph is directed.
"""
return True
def add_node(self, node_name: Hashable, metadata: dict):
"""
Add a new node to the graph.
Arguments:
node_name (Hashable): The ID of the node
metadata (dict: None): An optional dictionary of metadata
Returns:
Hashable: The ID of this node, as inserted
"""
if self.has_node(node_name):
# Retrieve the existing node; we will update the props.
v = self._g.V().has(ID, node_name)
else:
v = self._g.addV().property(ID, node_name)
for key, val in metadata.items():
v = v.property(key, val)
return v.toList()[0]
def get_node_by_id(self, node_name: Hashable):
"""
Return the data associated with a node.
Arguments:
node_name (Hashable): The node ID to look up
Returns:
dict: The metadata associated with this node
"""
try:
return _node_to_metadata(
self._g.V().has(ID, node_name).valueMap(True).toList()[0]
)
except IndexError as e:
raise KeyError() from e
def has_node(self, u: Hashable) -> bool:
"""
Return the data associated with a node.
Arguments:
node_name (Hashable): The node ID to look up
Returns:
dict: The metadata associated with this node
"""
try:
self.get_node_by_id(u)
return True
except KeyError:
return False
def remove_node(self, node_name: Hashable):
"""
Remove a node.
Arguments:
node_name (Hashable): The node ID to look up
Returns:
dict: The metadata associated with this node
"""
return self._g.V().has(ID, node_name).drop().toList()
def all_nodes_as_iterable(self, include_metadata: bool = False) -> Generator:
"""
Get a generator of all of the nodes in this graph.
Arguments:
include_metadata (bool: False): Whether to include node metadata in
the response
Returns:
Generator: A generator of all nodes (arbitrary sort)
"""
if include_metadata:
return iter(
[
{n[ID][0]: _node_to_metadata(n)}
for n in self._g.V().valueMap(True).toList()
]
)
else:
return iter([n[ID] for n in self._g.V().project(ID).by(ID).toList()])
def add_edge(self, u: Hashable, v: Hashable, metadata: dict):
"""
Add a new edge to the graph between two nodes.
If the graph is directed, this edge will start (source) at the `u` node
and end (target) at the `v` node.
Arguments:
u (Hashable): The source node ID
v (Hashable): The target node ID
metadata (dict): Optional metadata to associate with the edge
Returns:
Hashable: The edge ID, as inserted.
"""
try:
self.get_edge_by_id(u, v)
e = self._g.V().has(ID, u).outE().as_("e").inV().has(ID, v).select("e")
except IndexError:
if not self.has_node(u):
self.add_node(u, {})
if not self.has_node(v):
self.add_node(v, {})
e = (
self._g.V()
.has(ID, u)
.addE(EDGE_NAME)
.as_("e")
.to(__.V().has(ID, v))
.select("e")
)
for key, val in metadata.items():
e = e.property(key, val)
return e.toList()
def all_edges_as_iterable(self, include_metadata: bool = False) -> Generator:
"""
Get a list of all edges in this graph, arbitrary sort.
Arguments:
include_metadata (bool: False): Whether to include edge metadata
Returns:
Generator: A generator of all edges (arbitrary sort)
"""
if include_metadata:
return iter(
[
(e["source"], e["target"], _node_to_metadata(e["properties"]))
for e in (
self._g.V()
.outE()
.project("target", "source", "properties")
.by(__.inV().values(ID))
.by(__.outV().values(ID))
.by(__.valueMap(True))
.toList()
)
]
)
return iter(
[
(e["source"], e["target"])
for e in self._g.V()
.outE()
.project("target", "source")
.by(__.inV().values(ID))
.by(__.outV().values(ID))
.toList()
]
)
def get_edge_by_id(self, u: Hashable, v: Hashable):
"""
Get an edge by its source and target IDs.
Arguments:
u (Hashable): The source node ID
v (Hashable): The target node ID
Returns:
dict: Metadata associated with this edge
"""
return (
self._g.V()
.has(ID, u)
.outE()
.as_("e")
.inV()
.has(ID, v)
.select("e")
.properties()
.toList()
)[0]
def get_node_neighbors(
self, u: Hashable, include_metadata: bool = False
) -> Generator:
"""
Get a generator of all downstream nodes from this node.
Arguments:
u (Hashable): The source node ID
Returns:
Generator
"""
if include_metadata:
return {
e["target"]: _node_to_metadata(e["properties"])
for e in (
self._g.V()
.has(ID, u)
.outE()
.project("target", "source", "properties")
.by(__.inV().values(ID))
.by(__.outV().values(ID))
.by(__.valueMap(True))
.toList()
)
}
return self._g.V().has(ID, u).out().values(ID).toList()
def get_node_predecessors(
self, u: Hashable, include_metadata: bool = False
) -> Generator:
"""
Get a generator of all downstream nodes from this node.
Arguments:
u (Hashable): The source node ID
Returns:
Generator
"""
if include_metadata:
return {
e["source"]: e
for e in (
self._g.V()
.has(ID, u)
.inE()
.project("target", "source", "properties")
.by(__.inV().values(ID))
.by(__.outV().values(ID))
.by(__.valueMap(True))
.toList()
)
}
return self._g.V().out().has(ID, u).values(ID).toList()
def get_node_count(self) -> Iterable:
"""
Get an integer count of the number of nodes in this graph.
Arguments:
None
Returns:
int: The count of nodes
"""
return self._g.V().count().toList()[0]
def teardown(self) -> None:
self._g.V().drop().toList()
| 27.45 | 83 | 0.493056 |
7959b92b68a374d6ea518cc4c55d747de110d072 | 9,787 | py | Python | padre/handlers/gerrit.py | krislindgren/padre | 56e3342a953fdc472adc11ce301acabf6c595760 | [
"MIT"
] | null | null | null | padre/handlers/gerrit.py | krislindgren/padre | 56e3342a953fdc472adc11ce301acabf6c595760 | [
"MIT"
] | null | null | null | padre/handlers/gerrit.py | krislindgren/padre | 56e3342a953fdc472adc11ce301acabf6c595760 | [
"MIT"
] | null | null | null | import json
import logging
import re
import munch
from oslo_utils import reflection
import requests
from padre import channel as c
from padre import handler
from padre import matchers
from padre import utils
LOG = logging.getLogger(__name__)
def _filter_by_project(ok_projects, event):
in_projects = [
event.change.project,
]
send_message = False
for project in ok_projects:
if project in in_projects:
send_message = True
break
if project == "*":
send_message = True
break
return send_message
def _filter_by_email(known_emails, email_suffixes, event):
incoming_emails = []
incoming_emails.append(event.change.owner.email)
incoming_emails.append(event.patch_set.author.email)
incoming_emails.append(event.patch_set.uploader.email)
incoming_emails.append(event.uploader.email)
incoming_emails = set(email for email in incoming_emails
if email is not None)
send_message = False
if any(e in known_emails for e in incoming_emails):
send_message = True
email_suffixes = [e.strip() for e in email_suffixes if e.strip()]
if len(email_suffixes) == 0:
send_message = True
else:
for ok_suffix in email_suffixes:
if ok_suffix == "*":
send_message = True
else:
for in_email in incoming_emails:
if in_email.endswith(ok_suffix):
send_message = True
return send_message
class Unfurler(handler.TriggeredHandler):
handles_what = {
'channel_matcher': matchers.match_channel(c.BROADCAST),
'message_matcher': matchers.match_slack("message"),
}
template_subdir = 'gerrit'
config_section = 'gerrit'
config_on_off = ("unfurl.enabled", False)
change_url_tpl = ("%(base)s://%(host)s/changes/%(change_id)s"
"?o=CURRENT_COMMIT&o=CURRENT_REVISION")
change_msg_tpl = ("`{{ change.subject }}` in"
" project `{{ change.project }}`"
" ({{ change.insertions }}|{{ change.deletions }}).")
@classmethod
def _find_matches(cls, message_text, config):
matches = []
expand_for = []
try:
expand_for = list(config.unfurl.expand_for)
except AttributeError:
pass
for tmp_host in expand_for:
pats = [
r"(https://|http://)" + tmp_host + r"/#/c/(\d+)[/]?",
r"(https://|http://)" + tmp_host + r"/(\d+)[/]?",
]
for pat in pats:
for m in re.finditer(pat, message_text):
match = munch.Munch({
'host': tmp_host,
'change_id': int(m.group(2)),
'url': m.group(0),
})
if m.group(1) == "https://":
match.is_secure = True
else:
match.is_secure = False
matches.append(match)
return matches
def _fetch_change(self, match, call_timeout):
base = "http"
if match.is_secure:
base += "s"
change_url = self.change_url_tpl % {
'base': base,
'host': match.host,
'change_id': match.change_id,
}
change = None
try:
req = requests.get(change_url, timeout=call_timeout)
req.raise_for_status()
except requests.RequestException:
LOG.warning("Failed fetch of change %s from '%s'",
match.change_id, change_url, exc_info=True)
else:
# Rip off the header gerrit responses start with.
body_lines = req.text.split("\n")[1:]
body = "\n".join(body_lines)
try:
change = json.loads(body)
if not isinstance(change, dict):
raise TypeError(
"%s is not a dict" % reflection.get_class_name(change))
except (ValueError, TypeError):
LOG.warning("Received invalid json content from result"
" of call to %s", change_url, exc_info=True)
else:
LOG.debug("Received %s", change)
change = munch.munchify(change)
return change
@classmethod
def handles(cls, message, channel, config):
channel_matcher = cls.handles_what['channel_matcher']
if not channel_matcher(channel):
return None
message_matcher = cls.handles_what['message_matcher']
if (not message_matcher(message, cls, only_to_me=False) or
message.body.thread_ts):
return None
message_text = message.body.text_no_links
matches = cls._find_matches(message_text, config)
if not matches:
return None
return handler.ExplicitHandlerMatch(arguments={
'matches': matches,
})
@staticmethod
def _find_author(change):
maybe_author = []
if hasattr(change, 'owner') and change.owner:
maybe_author.extend([
change.owner.get("name"),
change.owner.get("email"),
change.owner.get("username"),
])
rev = change.revisions[change.current_revision]
if hasattr(rev, "commit") and rev.commit:
committer = rev.commit.get("committer", {})
maybe_author.extend([
committer.get("name"),
committer.get("email"),
committer.get("username"),
])
author = None
for a in maybe_author:
if a:
author = a
break
return author
def _run(self, matches=None):
if not matches:
matches = []
seen_changes = set()
replier = self.message.reply_attachments
for m in matches:
if self.dead.is_set():
break
if m.change_id <= 0:
continue
m_ident = (m.host, m.change_id)
if m_ident in seen_changes:
continue
seen_changes.add(m_ident)
LOG.debug("Trying to unfurl '%s'", m.url)
change = self._fetch_change(m, self.config.unfurl.call_timeout)
if change is not None:
attachment = {
'fallback': change.subject,
'pretext': utils.render_template(
self.change_msg_tpl, {'change': change}),
'link': m.url,
'footer': "Gerrit",
'mrkdwn_in': ["pretext"],
'footer_icon': ("https://upload.wikimedia.org/"
"wikipedia/commons/thumb/4/4d/"
"Gerrit_icon.svg/"
"52px-Gerrit_icon.svg.png"),
}
author = self._find_author(change)
if author:
attachment['author_name'] = author
rev = change.revisions[change.current_revision]
if rev.commit and rev.commit.message:
attachment['text'] = rev.commit.message.strip()
replier(channel=self.message.body.channel,
log=LOG, thread_ts=self.message.body.ts,
attachments=[attachment],
link_names=False, as_user=True,
unfurl_links=False)
class PatchSetCreatedHandler(handler.Handler):
"""Handlers incoming gerrit patch set created events (not from users)."""
config_section = 'gerrit'
template_subdir = 'gerrit'
handles_what = {
'channel_matcher': matchers.match_channel(c.BROADCAST),
'message_matcher': matchers.match_gerrit("patchset-created"),
}
requires_slack_sender = True
@staticmethod
def _passes_filters(target, what):
passes = _filter_by_email(target.get("emails", []),
target.get("email_suffixes", []),
what)
if not passes:
return False
passes = _filter_by_project(target.get("projects", []), what)
if not passes:
return False
return True
def _run(self):
what = self.message.body
targets = []
for target in self.config.get('channels', []):
if self._passes_filters(target, what):
targets.append(target)
if targets:
attachment = {
'pretext': self.render_template("change", what),
'mrkdwn_in': ["pretext"],
}
expanded_attachment = attachment.copy()
expanded_attachment.update({
'text': what.change.commit_message.strip(),
'footer': "OpenStack Gerrit",
'footer_icon': ("https://upload.wikimedia.org/"
"wikipedia/commons/thumb/4/4d/"
"Gerrit_icon.svg/52px-Gerrit_icon.svg.png"),
})
for target in targets:
if self.dead.is_set():
break
if target.get("expand", True):
tmp_attachment = expanded_attachment
else:
tmp_attachment = attachment
self.bot.slack_sender.post_send(
channel=target.channel,
text=' ', attachments=[tmp_attachment],
link_names=True, as_user=True,
unfurl_links=False, log=LOG)
| 36.655431 | 79 | 0.526821 |
7959b981837542836184418f5fe3aab78ac12629 | 21,147 | py | Python | test/test_punt.py | mnaser/vpp | 8934a04596d1421c35b194949b2027ca1fe71aef | [
"Apache-2.0"
] | null | null | null | test/test_punt.py | mnaser/vpp | 8934a04596d1421c35b194949b2027ca1fe71aef | [
"Apache-2.0"
] | null | null | null | test/test_punt.py | mnaser/vpp | 8934a04596d1421c35b194949b2027ca1fe71aef | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import binascii
import random
import socket
import unittest
import os
import scapy.layers.inet6 as inet6
import threading
import struct
from struct import unpack, unpack_from
from util import ppp, ppc
from re import compile
from scapy.packet import Raw
from scapy.layers.l2 import Ether
from scapy.layers.inet import IP, UDP, ICMP
from scapy.layers.inet6 import IPv6, ICMPv6DestUnreach
from framework import VppTestCase, VppTestRunner
# Format MAC Address
def get_mac_addr(bytes_addr):
return ':'.join('%02x' % ord(b) for b in bytes_addr)
# Format IP Address
def ipv4(bytes_addr):
return '.'.join('%d' % ord(b) for b in bytes_addr)
# Unpack Ethernet Frame
def ethernet_frame(data):
dest_mac, src_mac, proto = struct.unpack('! 6s 6s H', data[:14])
return dest_mac, src_mac, socket.htons(proto), data[14:]
# Unpack IPv4 Packets
def ipv4_packet(data):
proto, src, target = struct.unpack('! 8x 1x B 2x 4s 4s', data[:20])
return proto, src, target, data[20:]
# Unpack IPv6 Packets
def ipv6_packet(data):
nh, src, target = struct.unpack('! 6x B 1x 16s 16s', data[:40])
return nh, src, target, data[40:]
# Unpacks any UDP Packet
def udp_seg(data):
src_port, dest_port, size = struct.unpack('! H H 2x H', data[:8])
return src_port, dest_port, size, data[8:]
# Unpacks any TCP Packet
def tcp_seg(data):
src_port, dest_port, seq, flag = struct.unpack('! H H L 4x H', data[:14])
return src_port, dest_port, seq, data[((flag >> 12) * 4):]
def receivePackets(sock, counters):
# Wait for some packets on socket
while True:
data = sock.recv(65536)
# punt socket metadata
# packet_desc = data[0:8]
# Ethernet
_, _, eth_proto, data = ethernet_frame(data[8:])
# Ipv4
if eth_proto == 8:
proto, _, _, data = ipv4_packet(data)
# TCP
if proto == 6:
_, dst_port, _, data = udp_seg(data)
# UDP
elif proto == 17:
_, dst_port, _, data = udp_seg(data)
counters[dst_port] = 0
# Ipv6
elif eth_proto == 0xdd86:
nh, _, _, data = ipv6_packet(data)
# TCP
if nh == 6:
_, dst_port, _, data = udp_seg(data)
# UDP
elif nh == 17:
_, dst_port, _, data = udp_seg(data)
counters[dst_port] = 0
class serverSocketThread(threading.Thread):
""" Socket server thread"""
def __init__(self, threadID, sockName, counters):
threading.Thread.__init__(self)
self.threadID = threadID
self.sockName = sockName
self.sock = None
self.counters = counters
def run(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
try:
os.unlink(self.sockName)
except:
pass
self.sock.bind(self.sockName)
receivePackets(self.sock, self.counters)
class TestPuntSocket(VppTestCase):
""" Punt Socket """
ports = [1111, 2222, 3333, 4444]
sock_servers = list()
portsCheck = dict()
nr_packets = 256
@classmethod
def setUpClass(cls):
super(TestPuntSocket, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestPuntSocket, cls).tearDownClass()
@classmethod
def setUpConstants(cls):
cls.extra_vpp_punt_config = [
"punt", "{", "socket", cls.tempdir+"/socket_punt", "}"]
super(TestPuntSocket, cls).setUpConstants()
def setUp(self):
super(TestPuntSocket, self).setUp()
random.seed()
self.create_pg_interfaces(range(2))
for i in self.pg_interfaces:
i.admin_up()
def tearDown(self):
del self.sock_servers[:]
super(TestPuntSocket, self).tearDown()
def socket_client_create(self, sock_name, id=None):
thread = serverSocketThread(id, sock_name, self.portsCheck)
self.sock_servers.append(thread)
thread.start()
def socket_client_close(self):
for thread in self.sock_servers:
thread.sock.close()
class TestIP4PuntSocket(TestPuntSocket):
""" Punt Socket for IPv4 """
@classmethod
def setUpClass(cls):
super(TestIP4PuntSocket, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestIP4PuntSocket, cls).tearDownClass()
def setUp(self):
super(TestIP4PuntSocket, self).setUp()
for i in self.pg_interfaces:
i.config_ip4()
i.resolve_arp()
def tearDown(self):
super(TestIP4PuntSocket, self).tearDown()
for i in self.pg_interfaces:
i.unconfig_ip4()
i.admin_down()
def test_punt_socket_dump(self):
""" Punt socket registration/deregistration"""
punts = self.vapi.punt_socket_dump(is_ip6=0)
self.assertEqual(len(punts), 0)
#
# configure a punt socket
#
self.vapi.punt_socket_register(1111, self.tempdir+"/socket_punt_1111")
self.vapi.punt_socket_register(2222, self.tempdir+"/socket_punt_2222")
punts = self.vapi.punt_socket_dump(is_ip6=0)
self.assertEqual(len(punts), 2)
self.assertEqual(punts[0].punt.l4_port, 1111)
self.assertEqual(punts[1].punt.l4_port, 2222)
#
# deregister a punt socket
#
self.vapi.punt_socket_deregister(1111)
punts = self.vapi.punt_socket_dump(is_ip6=0)
self.assertEqual(len(punts), 1)
#
# configure a punt socket again
#
self.vapi.punt_socket_register(1111, self.tempdir+"/socket_punt_1111")
self.vapi.punt_socket_register(3333, self.tempdir+"/socket_punt_3333")
punts = self.vapi.punt_socket_dump(is_ip6=0)
self.assertEqual(len(punts), 3)
#
# deregister all punt socket
#
self.vapi.punt_socket_deregister(1111)
self.vapi.punt_socket_deregister(2222)
self.vapi.punt_socket_deregister(3333)
punts = self.vapi.punt_socket_dump(is_ip6=0)
self.assertEqual(len(punts), 0)
def test_punt_socket_traffic_single_port_single_socket(self):
""" Punt socket traffic single port single socket"""
port = self.ports[0]
p = (Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
IP(src=self.pg0.remote_ip4, dst=self.pg0.local_ip4) /
UDP(sport=9876, dport=port) /
Raw('\xa5' * 100))
pkts = p * self.nr_packets
self.portsCheck[port] = self.nr_packets
punts = self.vapi.punt_socket_dump(is_ip6=0)
self.assertEqual(len(punts), 0)
#
# expect ICMP - port unreachable for all packets
#
self.vapi.cli("clear trace")
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# FIXME - when punt socket deregister is implemented
# rx = self.pg0.get_capture(self.nr_packets)
# for p in rx:
# self.assertEqual(int(p[IP].proto), 1) # ICMP
# self.assertEqual(int(p[ICMP].code), 3) # unreachable
#
# configure a punt socket
#
self.socket_client_create(self.tempdir+"/socket_" + str(port))
self.vapi.punt_socket_register(port, self.tempdir+"/socket_" +
str(port))
punts = self.vapi.punt_socket_dump(is_ip6=0)
self.assertEqual(len(punts), 1)
self.logger.debug("Sending %s packets to port %d",
str(self.portsCheck[port]), port)
#
# expect punt socket and no packets on pg0
#
self.vapi.cli("clear errors")
self.vapi.cli("clear trace")
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.pg0.get_capture(0)
self.logger.info(self.vapi.cli("show trace"))
self.socket_client_close()
self.assertEqual(self.portsCheck[port], 0)
#
# remove punt socket. expect ICMP - port unreachable for all packets
#
self.vapi.punt_socket_deregister(port)
punts = self.vapi.punt_socket_dump(is_ip6=0)
self.assertEqual(len(punts), 0)
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# FIXME - when punt socket deregister is implemented
# self.pg0.get_capture(nr_packets)
def test_punt_socket_traffic_multi_port_multi_sockets(self):
""" Punt socket traffic multi ports and multi sockets"""
for p in self.ports:
self.portsCheck[p] = 0
#
# create stream with random pakets count per given ports
#
pkts = list()
for _ in range(0, self.nr_packets):
# choose port from port list
p = random.choice(self.ports)
pkts.append((
Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
IP(src=self.pg0.remote_ip4, dst=self.pg0.local_ip4) /
UDP(sport=9876, dport=p) /
Raw('\xa5' * 100)))
self.portsCheck[p] += 1
#
# no punt socket
#
punts = self.vapi.punt_socket_dump(is_ip6=0)
self.assertEqual(len(punts), 0)
#
# configure a punt socket
#
for p in self.ports:
self.socket_client_create(self.tempdir+"/socket_" + str(p))
self.vapi.punt_socket_register(p, self.tempdir+"/socket_" + str(p))
punts = self.vapi.punt_socket_dump(is_ip6=0)
self.assertEqual(len(punts), len(self.ports))
for p in self.ports:
self.logger.debug("Sending %s packets to port %d",
str(self.portsCheck[p]), p)
#
# expect punt socket and no packets on pg0
#
self.vapi.cli("clear errors")
self.vapi.cli("clear trace")
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.pg0.get_capture(0)
self.logger.info(self.vapi.cli("show trace"))
self.socket_client_close()
for p in self.ports:
self.assertEqual(self.portsCheck[p], 0)
self.vapi.punt_socket_deregister(p)
punts = self.vapi.punt_socket_dump(is_ip6=0)
self.assertEqual(len(punts), 0)
def test_punt_socket_traffic_multi_ports_single_socket(self):
""" Punt socket traffic multi ports and single socket"""
for p in self.ports:
self.portsCheck[p] = 0
#
# create stream with random pakets count per given ports
#
pkts = list()
for _ in range(0, self.nr_packets):
# choose port from port list
p = random.choice(self.ports)
pkts.append((
Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
IP(src=self.pg0.remote_ip4, dst=self.pg0.local_ip4) /
UDP(sport=9876, dport=p) /
Raw('\xa5' * 100)))
self.portsCheck[p] += 1
#
# no punt socket
#
punts = self.vapi.punt_socket_dump(is_ip6=0)
self.assertEqual(len(punts), 0)
# configure a punt socket
#
self.socket_client_create(self.tempdir+"/socket_multi")
for p in self.ports:
self.vapi.punt_socket_register(p, self.tempdir+"/socket_multi")
punts = self.vapi.punt_socket_dump(is_ip6=0)
self.assertEqual(len(punts), len(self.ports))
for p in self.ports:
self.logger.debug("Sending %s packets to port %d",
str(self.portsCheck[p]), p)
#
# expect punt socket and no packets on pg0
#
self.vapi.cli("clear errors")
self.vapi.cli("clear trace")
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.pg0.get_capture(0)
self.logger.info(self.vapi.cli("show trace"))
self.socket_client_close()
for p in self.ports:
self.assertEqual(self.portsCheck[p], 0)
self.vapi.punt_socket_deregister(p)
punts = self.vapi.punt_socket_dump(is_ip6=0)
self.assertEqual(len(punts), 0)
class TestIP6PuntSocket(TestPuntSocket):
""" Punt Socket for IPv6"""
@classmethod
def setUpClass(cls):
super(TestIP6PuntSocket, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestIP6PuntSocket, cls).tearDownClass()
def setUp(self):
super(TestIP6PuntSocket, self).setUp()
for i in self.pg_interfaces:
i.config_ip6()
i.resolve_ndp()
def tearDown(self):
super(TestIP6PuntSocket, self).tearDown()
for i in self.pg_interfaces:
i.unconfig_ip6()
i.admin_down()
def test_punt_socket_dump(self):
""" Punt socket registration """
punts = self.vapi.punt_socket_dump(is_ip6=1)
self.assertEqual(len(punts), 0)
#
# configure a punt socket
#
self.vapi.punt_socket_register(1111, self.tempdir+"/socket_1111",
is_ip4=0)
self.vapi.punt_socket_register(2222, self.tempdir+"/socket_2222",
is_ip4=0)
punts = self.vapi.punt_socket_dump(is_ip6=1)
self.assertEqual(len(punts), 2)
self.assertEqual(punts[0].punt.l4_port, 1111)
self.assertEqual(punts[1].punt.l4_port, 2222)
#
# deregister a punt socket
#
self.vapi.punt_socket_deregister(1111, is_ip4=0)
punts = self.vapi.punt_socket_dump(is_ip6=1)
self.assertEqual(len(punts), 1)
#
# configure a punt socket again
#
self.vapi.punt_socket_register(1111, self.tempdir+"/socket_1111",
is_ip4=0)
punts = self.vapi.punt_socket_dump(is_ip6=1)
self.assertEqual(len(punts), 2)
#
# deregister all punt socket
#
self.vapi.punt_socket_deregister(1111, is_ip4=0)
self.vapi.punt_socket_deregister(2222, is_ip4=0)
self.vapi.punt_socket_deregister(3333, is_ip4=0)
punts = self.vapi.punt_socket_dump(is_ip6=1)
self.assertEqual(len(punts), 0)
def test_punt_socket_traffic_single_port_single_socket(self):
""" Punt socket traffic single port single socket"""
port = self.ports[0]
p = (Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
IPv6(src=self.pg0.remote_ip6, dst=self.pg0.local_ip6) /
inet6.UDP(sport=9876, dport=port) /
Raw('\xa5' * 100))
pkts = p * self.nr_packets
self.portsCheck[port] = self.nr_packets
punts = self.vapi.punt_socket_dump(is_ip6=1)
self.assertEqual(len(punts), 0)
#
# expect ICMPv6 - destination unreachable for all packets
#
self.vapi.cli("clear trace")
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# FIXME - when punt socket deregister is implemented
# rx = self.pg0.get_capture(self.nr_packets)
# for p in rx:
# self.assertEqual(int(p[IPv6].nh), 58) # ICMPv6
# self.assertEqual(int(p[ICMPv6DestUnreach].code),4) # unreachable
#
# configure a punt socket
#
self.socket_client_create(self.tempdir+"/socket_" + str(port))
self.vapi.punt_socket_register(port, self.tempdir+"/socket_" +
str(port), is_ip4=0)
punts = self.vapi.punt_socket_dump(is_ip6=1)
self.assertEqual(len(punts), 1)
self.logger.debug("Sending %s packets to port %d",
str(self.portsCheck[port]), port)
#
# expect punt socket and no packets on pg0
#
self.vapi.cli("clear errors")
self.vapi.cli("clear trace")
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.pg0.get_capture(0)
self.logger.info(self.vapi.cli("show trace"))
self.socket_client_close()
self.assertEqual(self.portsCheck[port], 0)
#
# remove punt socket. expect ICMP - dest. unreachable for all packets
#
self.vapi.punt_socket_deregister(port, is_ip4=0)
punts = self.vapi.punt_socket_dump(is_ip6=1)
self.assertEqual(len(punts), 0)
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# FIXME - when punt socket deregister is implemented
# self.pg0.get_capture(nr_packets)
def test_punt_socket_traffic_multi_port_multi_sockets(self):
""" Punt socket traffic multi ports and multi sockets"""
for p in self.ports:
self.portsCheck[p] = 0
#
# create stream with random pakets count per given ports
#
pkts = list()
for _ in range(0, self.nr_packets):
# choose port from port list
p = random.choice(self.ports)
pkts.append((
Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
IPv6(src=self.pg0.remote_ip6, dst=self.pg0.local_ip6) /
inet6.UDP(sport=9876, dport=p) /
Raw('\xa5' * 100)))
self.portsCheck[p] += 1
#
# no punt socket
#
punts = self.vapi.punt_socket_dump(is_ip6=1)
self.assertEqual(len(punts), 0)
#
# configure a punt socket
#
for p in self.ports:
self.socket_client_create(self.tempdir+"/socket_" + str(p))
self.vapi.punt_socket_register(p, self.tempdir+"/socket_" + str(p),
is_ip4=0)
punts = self.vapi.punt_socket_dump(is_ip6=1)
self.assertEqual(len(punts), len(self.ports))
for p in self.ports:
self.logger.debug("Sending %s packets to port %d",
str(self.portsCheck[p]), p)
#
# expect punt socket and no packets on pg0
#
self.vapi.cli("clear errors")
self.vapi.cli("clear trace")
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.pg0.get_capture(0)
self.logger.info(self.vapi.cli("show trace"))
self.socket_client_close()
for p in self.ports:
self.assertEqual(self.portsCheck[p], 0)
self.vapi.punt_socket_deregister(p, is_ip4=0)
punts = self.vapi.punt_socket_dump(is_ip6=1)
self.assertEqual(len(punts), 0)
def test_punt_socket_traffic_multi_ports_single_socket(self):
""" Punt socket traffic multi ports and single socket"""
for p in self.ports:
self.portsCheck[p] = 0
#
# create stream with random pakets count per given ports
#
pkts = list()
for _ in range(0, self.nr_packets):
# choose port from port list
p = random.choice(self.ports)
pkts.append((
Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
IPv6(src=self.pg0.remote_ip6, dst=self.pg0.local_ip6) /
inet6.UDP(sport=9876, dport=p) /
Raw('\xa5' * 100)))
self.portsCheck[p] += 1
#
# no punt socket
#
punts = self.vapi.punt_socket_dump(is_ip6=1)
self.assertEqual(len(punts), 0)
#
# configure a punt socket
#
self.socket_client_create(self.tempdir+"/socket_multi")
for p in self.ports:
self.vapi.punt_socket_register(p, self.tempdir+"/socket_multi",
is_ip4=0)
punts = self.vapi.punt_socket_dump(is_ip6=1)
self.assertEqual(len(punts), len(self.ports))
for p in self.ports:
self.logger.debug("Send %s packets to port %d",
str(self.portsCheck[p]), p)
#
# expect punt socket and no packets on pg0
#
self.vapi.cli("clear errors")
self.vapi.cli("clear trace")
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.pg0.get_capture(0)
self.logger.info(self.vapi.cli("show trace"))
self.socket_client_close()
for p in self.ports:
self.assertEqual(self.portsCheck[p], 0)
self.vapi.punt_socket_deregister(p, is_ip4=0)
punts = self.vapi.punt_socket_dump(is_ip6=1)
self.assertEqual(len(punts), 0)
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
| 32.434049 | 79 | 0.587081 |
7959bb09a3ee2d73900bc7ab52d5a9fc5a87f062 | 10,010 | py | Python | My_data.py | jw787/- | 4fd957095e21b557bad4640fc75658d2037ca21a | [
"Apache-2.0"
] | 2 | 2020-03-27T02:25:22.000Z | 2020-03-27T04:07:36.000Z | My_data.py | KyanChen/FaceKeypointsDetection | bb4dc73fc8a3ffb8fc507b8e10a00dd6b2ca3250 | [
"Apache-2.0"
] | null | null | null | My_data.py | KyanChen/FaceKeypointsDetection | bb4dc73fc8a3ffb8fc507b8e10a00dd6b2ca3250 | [
"Apache-2.0"
] | 1 | 2020-12-14T07:24:39.000Z | 2020-12-14T07:24:39.000Z | import numpy as np
import cv2
import torch
from torchvision import transforms
from torch.utils.data import Dataset
from PIL import Image, ImageDraw
import os
import pandas as pd
import matplotlib.pyplot as plt
train_boarder = 112
class FaceLandmarksDataset(Dataset):
def __init__(self, data_file, transform=None):
"""
:param src_lines: src_lines
:param train: whether we are training or not
:param transform: data transform
"""
# 类内变量
self.transform = transform
if not os.path.exists(data_file):
print(data_file+"does not exist!")
self.file_info = pd.read_csv(data_file, index_col=0)
# 增加一列为正样本,人脸标签为1
self.file_info['class'] = 1
# 每一个正样本,生成二个负样本
self.negative_samples = self.get_negative_samples(2)
self.file_info = pd.concat([self.file_info, self.negative_samples])
# self.file_info.to_csv("test.csv")
# self.file_info = pd.read_csv("test.csv")
self.size = len(self.file_info)
def __len__(self):
return self.size
def __getitem__(self, idx):
data = self.file_info.iloc[idx]
img_name = data['path']
rect = np.array(eval(data['rect']), dtype=np.int)
points = eval(data['points'])
class_ = data['class']
# image
img = cv2.imdecode(np.fromfile(img_name, dtype=np.uint8), cv2.IMREAD_COLOR)
img_crop = img[rect[1]:rect[3], rect[0]:rect[2], :] # this is also good, but has some shift already
if class_ == 1:
landmarks = np.array(points).astype(np.float32)
# [0, 1]左上点
landmarks = landmarks - rect[0:2]
else:
landmarks = np.zeros((21, 2), dtype=np.float32)
sample = {'image': img_crop, 'landmarks': landmarks, 'label': class_}
if self.transform:
sample = self.transform(sample)
return sample
def get_negative_samples(self, negative_num):
def get_iou(rect, rects):
LT = np.maximum(rect[:2], rects[:, :2])
RB = np.maximum(rect[2:], rects[:, 2:])
overlap_wh = RB - LT
overlap_wh[overlap_wh < 0] = 0
intersection = overlap_wh[:, 0] * overlap_wh[:, 1]
area_rect = (rect[2] - rect[0]) * (rect[3] - rect[1])
area_rects = (rects[:, 2] - rects[:, 0]) * (rects[:, 3] - rects[:, 1])
t = area_rect + area_rects - intersection
iou_ = intersection / (1e-10 + area_rect + area_rects - intersection)
return iou_
def is_inclusion_relation(rect, rects):
flag_w = rect[:2] > rects[:, :2]
flag_h = rect[2:] < rects[:, 2:]
flag_wh = np.concatenate((flag_w, flag_h), axis=1)
return np.any(np.all(flag_wh, axis=1))
negative_data_info = {'path': [], 'rect': []}
for index, rows_data in self.file_info.iterrows():
img_path = rows_data['path']
img = cv2.imdecode(np.fromfile(img_path, dtype=np.uint8), cv2.IMREAD_COLOR)
height, width, _ = img.shape
# 将一张照片中的每个人脸都拿出来
rects_in_same_img_dataframe = self.file_info[self.file_info['path'] == img_path]
rects = []
for index, rect_data in rects_in_same_img_dataframe.iterrows():
rects += eval(rect_data['rect'])
rects = np.array(rects).astype(int).reshape(-1, 4)
wh = rects[:, 2:] - rects[:, 0:2]
max_wh = np.max(wh, 0)
min_wh = np.min(wh, 0)
# 如果尝试100次还没有找到合适的negative rect则放弃
try_times_threshold = 200
gen_valid_rect_num = 0
for _ in range(try_times_threshold):
gen_w = np.random.randint(max(0.5 * min_wh[0], 2) - 1, max_wh[0])
gen_h = np.random.randint(max(0.5 * min_wh[1], 2) - 1, max_wh[1])
if gen_w / gen_h < 6/10 or gen_w / gen_h > 10/6:
continue
gen_left = np.random.randint(0, width-gen_w)
gen_top = np.random.randint(0, height-gen_h)
gen_right = gen_left + gen_w
gen_bottom = gen_top + gen_h
gen_rect = [gen_left, gen_top, gen_right, gen_bottom]
iou = get_iou(np.array(gen_rect), rects)
if np.any(iou > 0.4):
continue
if is_inclusion_relation(np.array(gen_rect), rects):
continue
gen_valid_rect_num += 1
if gen_valid_rect_num > negative_num:
break
negative_data_info['path'].append(rows_data['path'])
negative_data_info['rect'].append(str(gen_rect))
# img_rect = img[gen_rect[1]: gen_rect[3], gen_rect[0]: gen_rect[2], :]
# plt.imshow(img_rect)
# plt.show()
data = pd.DataFrame(negative_data_info)
data['points'] = str([0, 0])
data['class'] = 0
return data
class Normalize(object):
"""
Resieze to train_boarder x train_boarder. Here we use 112 x 112
"""
def __call__(self, sample):
img, landmarks, label = sample['image'], sample['landmarks'], sample['label']
height, width, _ = img.shape
img_resize = cv2.resize(img, (train_boarder, train_boarder))
if label:
landmarks[:, 0] = landmarks[:, 0] * train_boarder / width
landmarks[:, 1] = landmarks[:, 1] * train_boarder / height
return {'image': img_resize, 'landmarks': landmarks, 'label': label}
class RandomHorizontalFlip(object):
"""
Horizontally flip image randomly with given probability
Args:
p (float): probability of the image being flipped.
Default value = 0.5
"""
def __init__(self, p=0.5):
self.p = p
def __call__(self, sample):
img, landmarks, label = sample['image'], sample['landmarks'], sample['label']
if np.random.random() < self.p:
img = img[:, ::-1].copy()
if label:
landmarks[:, 0] = train_boarder - landmarks[:, 0]
return {'image': img, 'landmarks': landmarks, 'label': label}
class RandomRotate(object):
"""
Randomly rotate image within given limits
Args:
p (float): probability above which the image need to be flipped. Default value = 0.25
rotate limits by default: [-20, 20]
"""
def __init__(self, p=0.5, a=5):
self.p = p
self.angle = a
def __call__(self, sample):
img, landmarks, label = sample['image'], sample['landmarks'], sample['label']
if np.random.random() > self.p:
# angle
limit = self.angle
angle = np.random.randint(-limit, limit)
height, width, _ = img.shape
center = (width // 2, height // 2)
M = cv2.getRotationMatrix2D(center, angle, 1.0)
img = cv2.warpAffine(img, M, (width, height))
if label == 1:
# landmarks
landmarks_pair = np.insert(landmarks, obj=2, values=1, axis=1)
rotated_landmarks = []
for point in landmarks_pair:
rotated_landmark = np.matmul(M, point)
rotated_landmarks.append(rotated_landmark)
landmarks = np.asarray(rotated_landmarks)
img = np.asarray(img, dtype=np.float32)
return {'image': img, 'landmarks': landmarks, 'label': label}
class ToTensor(object):
"""
Convert ndarrays in sample to Tensors.
Tensors channel sequence: N x C x H x W
Then do channel normalization: (image - mean) / std_variation
"""
def channel_norm(self, img):
mean = np.mean(img)
std = np.std(img)
pixels = (img - mean) / (std + 0.0000001)
return pixels
def __call__(self, sample):
img, landmarks, label = sample['image'], sample['landmarks'], sample['label']
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
img = img / 255.0
landmarks = landmarks
img = img.transpose((2, 0, 1))
return {'image': torch.from_numpy(img).float(),
'landmarks': torch.from_numpy(landmarks.reshape(-1)).float(),
'label': torch.from_numpy(np.array([label])).float()}
def get_train_val_data():
train_file = 'train_data.csv'
test_file = 'val_data.csv'
tsfm_train = transforms.Compose([
Normalize(), # do channel normalization
RandomHorizontalFlip(0.5), # randomly flip image horizontally
RandomRotate(0.25, 5), # randomly rotate image
ToTensor()] # convert to torch type: NxCxHxW
)
tsfm_test = transforms.Compose([
Normalize(),
ToTensor()
])
train_dataset = FaceLandmarksDataset(train_file, transform=tsfm_train)
test_dataset = FaceLandmarksDataset(test_file, transform=tsfm_test)
return train_dataset, test_dataset
def _test_My_data():
train_set, val_set = get_train_val_data()
train_loader = torch.utils.data.DataLoader(train_set, batch_size=256, shuffle=True)
valid_loader = torch.utils.data.DataLoader(val_set, batch_size=256)
data_loaders = {'train': train_loader, 'val': valid_loader}
for i in range(0,10):
sample = train_loader.dataset[i]
img = Image.fromarray(sample['image'].astype('uint8'))
points = sample['landmarks']
class_ = sample['label']
landmarks = points.astype('float').reshape(-1, 2)
draw = ImageDraw.Draw(img)
x = landmarks[:, 0]
y = landmarks[:, 1]
points_zip = list(zip(x, y))
draw.point(points_zip, (255, 0, 0))
# img.save(r'H:\DataSet\慧科\人脸关键点检测\result\{:d}.jpg'.format(index))
plt.imshow(img)
plt.show()
train_set, val_set = get_train_val_data() | 38.35249 | 108 | 0.570829 |
7959bba7bfc04ebef2c9a0e7a1eb4a0528dd0acb | 314 | py | Python | aitlas/datasets/dfc15_multilabel.py | alex-hayhoe/aitlas-docker | 57686f9c18f28c884511fc0c84618506cbf61eae | [
"MIT"
] | null | null | null | aitlas/datasets/dfc15_multilabel.py | alex-hayhoe/aitlas-docker | 57686f9c18f28c884511fc0c84618506cbf61eae | [
"MIT"
] | null | null | null | aitlas/datasets/dfc15_multilabel.py | alex-hayhoe/aitlas-docker | 57686f9c18f28c884511fc0c84618506cbf61eae | [
"MIT"
] | null | null | null | from .multilabel_classification import MultiLabelClassificationDataset
LABELS = ["impervious", "water", "clutter", "vegetation", "building", "tree", "boat", "car"]
class DFC15MultiLabelDataset(MultiLabelClassificationDataset):
url = "https://github.com/Hua-YS/DFC15-Multilabel-Dataset"
labels = LABELS
| 31.4 | 92 | 0.754777 |
7959bcb085a0ab9183540da0cacafe89ff5f9c64 | 9,345 | py | Python | models/CaptionModel.py | gstoica27/object_relation_transformer | dc41a88c3e2c01677347edfd3fb5479181388ff8 | [
"MIT"
] | null | null | null | models/CaptionModel.py | gstoica27/object_relation_transformer | dc41a88c3e2c01677347edfd3fb5479181388ff8 | [
"MIT"
] | null | null | null | models/CaptionModel.py | gstoica27/object_relation_transformer | dc41a88c3e2c01677347edfd3fb5479181388ff8 | [
"MIT"
] | null | null | null | # This file contains ShowAttendTell and AllImg model
# ShowAttendTell is from Show, Attend and Tell: Neural Image Caption Generation with Visual Attention
# https://arxiv.org/abs/1502.03044
# AllImg is a model where
# img feature is concatenated with word embedding at every time step as the input of lstm
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import *
import misc.utils as utils
from functools import reduce
class CaptionModel(nn.Module):
def __init__(self):
super(CaptionModel, self).__init__()
def debugger(self, mode, *args, **kwargs):
import pdb; pdb.set_trace()
# implements beam search
# calls beam_step and returns the final set of beams
# augments log-probabilities with diversity terms when number of groups > 1
# @torch.jit.script_method
def forward(self, *args, **kwargs):
# import pdb;pdb.set_trace()
mode = kwargs.get('mode', 'forward')
if 'mode' in kwargs:
del kwargs['mode']
return getattr(self, '_'+mode)(*args, **kwargs)
def beam_search(self, init_state, init_logprobs, *args, **kwargs):
# function computes the similarity score to be augmented
def add_diversity(beam_seq_table, logprobsf, t, divm, diversity_lambda, bdash):
local_time = t - divm
unaug_logprobsf = logprobsf.clone()
for prev_choice in range(divm):
prev_decisions = beam_seq_table[prev_choice][local_time]
for sub_beam in range(bdash):
for prev_labels in range(bdash):
logprobsf[sub_beam][prev_decisions[prev_labels]] = logprobsf[sub_beam][prev_decisions[prev_labels]] - diversity_lambda
return unaug_logprobsf
# does one step of classical beam search
def beam_step(logprobsf, unaug_logprobsf, beam_size, t, beam_seq, beam_seq_logprobs, beam_logprobs_sum, state):
#INPUTS:
#logprobsf: probabilities augmented after diversity
#beam_size: obvious
#t : time instant
#beam_seq : tensor contanining the beams
#beam_seq_logprobs: tensor contanining the beam logprobs
#beam_logprobs_sum: tensor contanining joint logprobs
#OUPUTS:
#beam_seq : tensor containing the word indices of the decoded captions
#beam_seq_logprobs : log-probability of each decision made, same size as beam_seq
#beam_logprobs_sum : joint log-probability of each beam
ys,ix = torch.sort(logprobsf,1,True)
candidates = []
cols = min(beam_size, ys.size(1))
rows = beam_size
if t == 0:
rows = 1
for c in range(cols): # for each column (word, essentially)
for q in range(rows): # for each beam expansion
#compute logprob of expanding beam q with word in (sorted) position c
local_logprob = ys[q,c].item()
candidate_logprob = beam_logprobs_sum[q] + local_logprob
local_unaug_logprob = unaug_logprobsf[q,ix[q,c]]
candidates.append({'c':ix[q,c], 'q':q, 'p':candidate_logprob, 'r':local_unaug_logprob})
candidates = sorted(candidates, key=lambda x: -x['p'])
new_state = [_.clone() for _ in state]
#beam_seq_prev, beam_seq_logprobs_prev
if t >= 1:
#we''ll need these as reference when we fork beams around
beam_seq_prev = beam_seq[:t].clone()
beam_seq_logprobs_prev = beam_seq_logprobs[:t].clone()
for vix in range(beam_size):
v = candidates[vix]
#fork beam index q into index vix
if t >= 1:
beam_seq[:t, vix] = beam_seq_prev[:, v['q']]
beam_seq_logprobs[:t, vix] = beam_seq_logprobs_prev[:, v['q']]
#rearrange recurrent states
for state_ix in range(len(new_state)):
# copy over state in previous beam q to new beam at vix
new_state[state_ix][:, vix] = state[state_ix][:, v['q']] # dimension one is time step
#append new end terminal at the end of this beam
beam_seq[t, vix] = v['c'] # c'th word is the continuation
beam_seq_logprobs[t, vix] = v['r'] # the raw logprob here
beam_logprobs_sum[vix] = v['p'] # the new (sum) logprob along this beam
state = new_state
return beam_seq,beam_seq_logprobs,beam_logprobs_sum,state,candidates
# Start diverse_beam_search
opt = kwargs['opt']
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
diversity_lambda = opt.get('diversity_lambda', 0.5)
decoding_constraint = opt.get('decoding_constraint', 0)
max_ppl = opt.get('max_ppl', 0)
bdash = beam_size // group_size # beam per group
# INITIALIZATIONS
beam_seq_table = [torch.LongTensor(self.seq_length, bdash).zero_() for _ in range(group_size)]
beam_seq_logprobs_table = [torch.FloatTensor(self.seq_length, bdash).zero_() for _ in range(group_size)]
beam_logprobs_sum_table = [torch.zeros(bdash) for _ in range(group_size)]
# logprobs # logprobs predicted in last time step, shape (beam_size, vocab_size+1)
done_beams_table = [[] for _ in range(group_size)]
state_table = [list(torch.unbind(_)) for _ in torch.stack(init_state).chunk(group_size, 2)]
logprobs_table = list(init_logprobs.chunk(group_size, 0))
# END INIT
# Chunk elements in the args
args = list(args)
args = [_.chunk(group_size) if _ is not None else [None]*group_size for _ in args]
args = [[args[i][j] for i in range(len(args))] for j in range(group_size)]
for t in range(self.seq_length + group_size - 1):
for divm in range(group_size):
if t >= divm and t <= self.seq_length + divm - 1:
# add diversity
logprobsf = logprobs_table[divm].data.float()
# suppress previous word
if decoding_constraint and t-divm > 0:
logprobsf.scatter_(1, beam_seq_table[divm][t-divm-1].unsqueeze(1).cuda(), float('-inf'))
# suppress UNK tokens in the decoding
logprobsf[:,logprobsf.size(1)-1] = logprobsf[:, logprobsf.size(1)-1] - 1000
# diversity is added here
# the function directly modifies the logprobsf values and hence, we need to return
# the unaugmented ones for sorting the candidates in the end. # for historical
# reasons :-)
unaug_logprobsf = add_diversity(beam_seq_table,logprobsf,t,divm,diversity_lambda,bdash)
# infer new beams
beam_seq_table[divm],\
beam_seq_logprobs_table[divm],\
beam_logprobs_sum_table[divm],\
state_table[divm],\
candidates_divm = beam_step(logprobsf,
unaug_logprobsf,
bdash,
t-divm,
beam_seq_table[divm],
beam_seq_logprobs_table[divm],
beam_logprobs_sum_table[divm],
state_table[divm])
# if time's up... or if end token is reached then copy beams
for vix in range(bdash):
if beam_seq_table[divm][t-divm,vix] == 0 or t == self.seq_length + divm - 1:
final_beam = {
'seq': beam_seq_table[divm][:, vix].clone(),
'logps': beam_seq_logprobs_table[divm][:, vix].clone(),
'unaug_p': beam_seq_logprobs_table[divm][:, vix].sum().item(),
'p': beam_logprobs_sum_table[divm][vix].item()
}
if max_ppl:
final_beam['p'] = final_beam['p'] / (t-divm+1)
done_beams_table[divm].append(final_beam)
# don't continue beams from finished sequences
beam_logprobs_sum_table[divm][vix] = -1000
# move the current group one step forward in time
it = beam_seq_table[divm][t-divm]
logprobs_table[divm], state_table[divm] = self.get_logprobs_state(it.cuda(), *(args[divm] + [state_table[divm]]))
# all beams are sorted by their log-probabilities
done_beams_table = [sorted(done_beams_table[i], key=lambda x: -x['p'])[:bdash] for i in range(group_size)]
done_beams = reduce(lambda a,b:a+b, done_beams_table)
return done_beams
| 51.065574 | 142 | 0.571001 |
7959bd10f184170b7efffe25bddaf3d8771ede7b | 1,086 | py | Python | OOP/oop_terms/super().py | danielkpodo/python-zero-to-mastery | d39468f48211bc82e4e2613745d9107d433e05af | [
"MIT"
] | null | null | null | OOP/oop_terms/super().py | danielkpodo/python-zero-to-mastery | d39468f48211bc82e4e2613745d9107d433e05af | [
"MIT"
] | null | null | null | OOP/oop_terms/super().py | danielkpodo/python-zero-to-mastery | d39468f48211bc82e4e2613745d9107d433e05af | [
"MIT"
] | null | null | null | # super() is referrin to the super class or the class from which the child inherits
# it helps us to inherit attr of the paret class
# To do this you pass the parameters you want to inherit to the __init__ func and call super
# To do this we do super__init__(the attibutes we want to inherit)
# super side in child takes the self argument
# when using the super ignore the self attr
class Programmer:
def __init__(self, username, years, language):
self.username = username
self.years = years
self.language = language
def describe_programmer(self):
return f"{self.username} knows {self.language} and has {self.years}yrs experience"
class JuniorDeveloper(Programmer):
def __init__(self, username, years, language, skill):
super().__init__(username, years, language)
self.skill = skill
def technical_skills(self):
return f"Welcome {self.username} you are a {self.skill} programmer"
junior_dev = JuniorDeveloper("narh", 1, "Python", 'Proficient')
print(junior_dev.technical_skills())
print(junior_dev.username)
| 36.2 | 92 | 0.723757 |
7959bfd3402a6589091b8fbb49fb80d8a1bd37df | 383 | py | Python | events/migrations/0040_tournament_games.py | RVHowarth/warwick_gg | a8a1a8f902dad76ba77025839c49fc34178af2b3 | [
"MIT"
] | 5 | 2018-03-08T13:02:07.000Z | 2020-04-09T13:36:20.000Z | events/migrations/0040_tournament_games.py | RVHowarth/warwick_gg | a8a1a8f902dad76ba77025839c49fc34178af2b3 | [
"MIT"
] | 15 | 2018-05-29T13:22:40.000Z | 2022-03-11T23:20:32.000Z | events/migrations/0040_tournament_games.py | RVHowarth/warwick_gg | a8a1a8f902dad76ba77025839c49fc34178af2b3 | [
"MIT"
] | 7 | 2018-05-26T15:15:43.000Z | 2020-01-04T20:24:33.000Z | # Generated by Django 2.2.1 on 2019-05-16 10:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0039_auto_20190516_1123'),
]
operations = [
migrations.AddField(
model_name='tournament',
name='games',
field=models.TextField(blank=True),
),
]
| 20.157895 | 47 | 0.5953 |
7959c03bfa52facdb397d5f08bc28a82623a46cd | 1,359 | py | Python | sklearnex/_utils.py | fschlimb/daal4py | 51015148f92db728f23e8e4628c393dff2df23eb | [
"Apache-2.0"
] | 1 | 2021-08-13T13:39:17.000Z | 2021-08-13T13:39:17.000Z | sklearnex/_utils.py | raoberman/daal4py | 65e74dd90342bebbfbb51f1057db9a78ec818b9c | [
"Apache-2.0"
] | null | null | null | sklearnex/_utils.py | raoberman/daal4py | 65e74dd90342bebbfbb51f1057db9a78ec818b9c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#===============================================================================
# Copyright 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
def set_sklearn_ex_verbose():
import logging
import warnings
import os
import sys
logLevel = os.environ.get("SKLEARNEX_VERBOSE")
try:
if logLevel is not None:
logging.basicConfig(
stream=sys.stdout,
format='SKLEARNEX %(levelname)s: %(message)s', level=logLevel.upper())
except Exception:
warnings.warn('Unknown level "{}" for logging.\n'
'Please, use one of "CRITICAL", "ERROR", '
'"WARNING", "INFO", "DEBUG".'.format(logLevel))
| 41.181818 | 87 | 0.573216 |
7959c106f92841a5302bd4955304eb3abb723f0b | 4,534 | py | Python | Kmeans/kmeans_cluster.py | johnny161/Text-Clustering | d3eb7cebfb7679d10070e5ba20096631c92bb673 | [
"Apache-2.0"
] | null | null | null | Kmeans/kmeans_cluster.py | johnny161/Text-Clustering | d3eb7cebfb7679d10070e5ba20096631c92bb673 | [
"Apache-2.0"
] | null | null | null | Kmeans/kmeans_cluster.py | johnny161/Text-Clustering | d3eb7cebfb7679d10070e5ba20096631c92bb673 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf8 -*-
import os, sys
from sklearn.cluster import KMeans
from sklearn import feature_extraction
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import silhouette_score
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import numpy as np
'''vectorize the input documents'''
def tfidf_vector(corpus_path):
corpus_train=[]
#
target_train=[]
for line in open(corpus_path):
line = line.strip().split('\t')
if len(line) == 2:
words = line[1]
category = line[0]
target_train.append(category)
corpus_train.append(words)
print ("build train-corpus done!!")
count_v1 = CountVectorizer(max_df = 0.4, min_df = 0.01)
counts_train = count_v1.fit_transform(corpus_train)
word_dict = {}
for index, word in enumerate(count_v1.get_feature_names()):#出现3次以上的关键词
word_dict[index] = word
print ("the shape of train is " + repr(counts_train.shape))
tfidftransformer = TfidfTransformer()
tfidf_train = tfidftransformer.fit_transform(counts_train)
return tfidf_train, word_dict
'''topic cluster'''
def cluster_kmeans(tfidf_train, word_dict, cluster_docs, cluster_keywords, num_cluster):
f_docs = open(cluster_docs, 'w+')
km = KMeans(n_clusters = num_clusters)
km.fit(tfidf_train)
clusters = km.labels_.tolist()
cluster_dict = {}
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
doc = 1
for cluster in clusters:
f_docs.write(str(doc) + ',' + str(cluster) + '\n')
doc += 1
if cluster not in cluster_dict:
cluster_dict[cluster] = 1
else:
cluster_dict[cluster] += 1
f_docs.close()
for idx in range(num_cluster): # 每个聚类的数量
print ("cluster" + str(idx + 1) + ': ' + str(cluster_dict[idx]))
cluster = 1
f_clusterwords = open(cluster_keywords, 'w+')
for ind in order_centroids: # 每个聚类选 50 个词
words = []
for index in ind[:10]:
words.append(word_dict[index])
print (cluster,','.join(words))
f_clusterwords.write(str(cluster) + '\t' + ','.join(words) + '\n')
cluster += 1
print ('*****' * 5)
f_clusterwords.close()
visualization(tfidf_train.toarray(), km.labels_)
'''select the best cluster num'''
def best_kmeans(tfidf_matrix, word_dict):
import matplotlib.pyplot as plt
# from matplotlib.font_manager import FontProperties
from scipy.spatial.distance import cdist
import numpy as np
K = range(1, 50)
meandistortions = []
for k in K:
print (k, '****'*5)
kmeans = KMeans(n_clusters = k)
kmeans.fit(tfidf_matrix)
meandistortions.append(sum(np.min(cdist(tfidf_matrix.toarray(), kmeans.cluster_centers_, 'euclidean'), axis=1)) /\
tfidf_matrix.shape[0])
plt.plot(K, meandistortions, 'bx-')
plt.grid(True)
plt.xlabel('Number of clusters')
plt.ylabel('Average within-cluster sum of squares')
plt.title('Eibow for Kmeans clustering')
plt.show()
'''calculate Silhouette Coefficient'''
def cal_silhouette_coef(tfidf_train):
weight = tfidf_train.toarray()
Scores = []
for k in range(2, 50):
km = KMeans(n_clusters = k)
km.fit(weight)
Scores.append(silhouette_score(weight, km.labels_, metric='euclidean'))
X = range(2, 50)
plt.xlabel('K-value')
plt.ylabel('Silhouette-Coefficient')
plt.plot(X, Scores, 'o-')
plt.show()
'''visualization'''
def visualization(tfidf_train, labels_):
tsne = TSNE(n_components=2)
decomposition_data = tsne.fit_transform(tfidf_train)
x = []
y = []
for i in decomposition_data:
x.append(i[0])
y.append(i[1])
fig = plt.figure(figsize=(10, 10))
ax = plt.axes()
plt.scatter(x, y, c=labels_, marker="x")
plt.title("k = 15")
plt.xticks(())
plt.yticks(())
plt.show()
plt.savefig('./figure/sample.png', aspect=1)
if __name__ == '__main__':
corpus_train = "./corpus_train.txt"
cluster_docs = "./cluster_result_document.txt"
cluster_keywords = "./cluster_result_keyword.txt"
num_clusters = 15
tfidf_train, word_dict = tfidf_vector(corpus_train)
# cal_silhouette_coef(tfidf_train) # judge which K-value to take
# best_kmeans(tfidf_train, word_dict)
cluster_kmeans(tfidf_train, word_dict, cluster_docs, cluster_keywords, num_clusters)
| 31.706294 | 122 | 0.651301 |
7959c2d3d96671730e36bf9df3eefdc116b8b7f4 | 2,687 | py | Python | tune_hyperparameters.py | jessvb/zhorai-speech-rec | 64fe2589fa8ebbf62c133e91ff9a30728831f922 | [
"CC-BY-4.0"
] | null | null | null | tune_hyperparameters.py | jessvb/zhorai-speech-rec | 64fe2589fa8ebbf62c133e91ff9a30728831f922 | [
"CC-BY-4.0"
] | null | null | null | tune_hyperparameters.py | jessvb/zhorai-speech-rec | 64fe2589fa8ebbf62c133e91ff9a30728831f922 | [
"CC-BY-4.0"
] | null | null | null | ##########################################################
# pytorch-kaldi v.0.1
# Mirco Ravanelli, Titouan Parcollet
# Mila, University of Montreal
# October 2018
#
# Description:
# This scripts generates config files with the random hyperparamters specified by the user.
# python tune_hyperparameters.py cfg_file out_folder N_exp hyperparameters_spec
# e.g., python tune_hyperparameters.py cfg/TIMIT_MLP_mfcc.cfg exp/TIMIT_MLP_mfcc_tuning 10 arch_lr=randfloat(0.001,0.01) batch_size_train=randint(32,256) dnn_act=choose_str{relu,relu,relu,relu,softmax|tanh,tanh,tanh,tanh,softmax}
##########################################################
from random import randint
import random
import re
from optparse import OptionParser
import os
parser=OptionParser()
(options,args)=parser.parse_args()
cfg_file=args[0]
output_folder=args[1]
N_exp=int(args[2])
hyperparam_list=args[3:]
seed=1234
print('Generating config file for hyperparameter tuning...')
if not os.path.exists(output_folder):
os.makedirs(output_folder)
random.seed(seed)
for i in range(N_exp):
cfg_file_out=output_folder+'/exp'+str(i)+'.cfg'
cfg_out=open(cfg_file_out, 'w')
for line in open(cfg_file):
key=line.split('=')[0]
if key=='out_folder':
line='out_folder='+output_folder+'/exp'+str(i)+'\n'
hyper_found=False
for hyperparam in hyperparam_list:
key_hyper=hyperparam.split('=')[0]
if key==key_hyper:
if "randint" in hyperparam:
[lower,higher] = re.search('randint\((.+?)\)', hyperparam).group(1).split(',')
value_hyper=randint(int(lower), int(higher))
hyper_found=True
if "randfloat" in hyperparam:
[lower,higher] = re.search('randfloat\((.+?)\)', hyperparam).group(1).split(',')
value_hyper=random.uniform(float(lower), float(higher))
hyper_found=True
if "choose_str" in hyperparam:
value_hyper = random.choice(re.search('\{(.+?)\}', hyperparam).group(1).split('|'))
hyper_found=True
if "choose_int" in hyperparam:
value_hyper = int(random.choice(re.search('\{(.+?)\}', hyperparam).group(1).split('|')))
hyper_found=True
if "choose_float" in hyperparam:
value_hyper = float(random.choice(re.search('\{(.+?)\}', hyperparam).group(1).split('|')))
hyper_found=True
line_out=key+'='+str(value_hyper)+'\n'
if not(hyper_found):
line_out=line
cfg_out.write(line_out)
print('Done %s'%cfg_file_out)
cfg_out.close()
| 28.892473 | 229 | 0.605136 |
7959c2e5c47fa9d06bed7e14580eeff880503c93 | 5,162 | py | Python | mmpose/models/keypoint_heads/top_down_simple_head.py | filipkro/mmpose | b4b6eda3fe3c2470ab0e44936f4bf7f82db6d3e4 | [
"Apache-2.0"
] | 1 | 2020-09-22T03:39:47.000Z | 2020-09-22T03:39:47.000Z | mmpose/models/keypoint_heads/top_down_simple_head.py | filipkro/mmpose | b4b6eda3fe3c2470ab0e44936f4bf7f82db6d3e4 | [
"Apache-2.0"
] | null | null | null | mmpose/models/keypoint_heads/top_down_simple_head.py | filipkro/mmpose | b4b6eda3fe3c2470ab0e44936f4bf7f82db6d3e4 | [
"Apache-2.0"
] | 1 | 2021-07-13T03:42:27.000Z | 2021-07-13T03:42:27.000Z | import torch.nn as nn
from mmcv.cnn import (build_conv_layer, build_upsample_layer, constant_init,
normal_init)
from ..registry import HEADS
@HEADS.register_module()
class TopDownSimpleHead(nn.Module):
"""Top-down model head of simple baseline paper ref: Bin Xiao. ``Simple
Baselines for Human Pose Estimation and Tracking.''.
TopDownSimpleHead is consisted of (>=0) number of deconv layers
and a simple conv2d layer.
Args:
in_channels (int): Number of input channels
out_channels (int): Number of output channels
num_deconv_layers (int): Number of deconv layers.
num_deconv_layers should >= 0. Note that 0 means
no deconv layers.
num_deconv_filters (list|tuple): Number of filters.
If num_deconv_layers > 0, the length of
num_deconv_kernels (list|tuple): Kernel sizes.
"""
def __init__(self,
in_channels,
out_channels,
num_deconv_layers=3,
num_deconv_filters=(256, 256, 256),
num_deconv_kernels=(4, 4, 4),
extra=None):
super().__init__()
self.in_channels = in_channels
if extra is not None and not isinstance(extra, dict):
raise TypeError('extra should be dict or None.')
if num_deconv_layers > 0:
self.deconv_layers = self._make_deconv_layer(
num_deconv_layers,
num_deconv_filters,
num_deconv_kernels,
)
elif num_deconv_layers == 0:
self.deconv_layers = nn.Identity()
else:
raise ValueError(
f'num_deconv_layers ({num_deconv_layers}) should >= 0.')
identity_final_layer = False
if extra is not None and 'final_conv_kernel' in extra:
assert extra['final_conv_kernel'] in [0, 1, 3]
if extra['final_conv_kernel'] == 3:
padding = 1
elif extra['final_conv_kernel'] == 1:
padding = 0
else:
# 0 for Identity mapping.
identity_final_layer = True
kernel_size = extra['final_conv_kernel']
else:
kernel_size = 1
padding = 0
if identity_final_layer:
self.final_layer = nn.Identity()
else:
self.final_layer = build_conv_layer(
cfg=dict(type='Conv2d'),
in_channels=num_deconv_filters[-1]
if num_deconv_layers > 0 else in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=1,
padding=padding)
def forward(self, x):
"""Forward function."""
if isinstance(x, list):
x = x[0]
x = self.deconv_layers(x)
x = self.final_layer(x)
return x
def _make_deconv_layer(self, num_layers, num_filters, num_kernels):
"""Make deconv layers."""
if num_layers != len(num_filters):
error_msg = f'num_layers({num_layers}) ' \
f'!= length of num_filters({len(num_filters)})'
raise ValueError(error_msg)
if num_layers != len(num_kernels):
error_msg = f'num_layers({num_layers}) ' \
f'!= length of num_kernels({len(num_kernels)})'
raise ValueError(error_msg)
layers = []
for i in range(num_layers):
kernel, padding, output_padding = \
self._get_deconv_cfg(num_kernels[i])
planes = num_filters[i]
layers.append(
build_upsample_layer(
dict(type='deconv'),
in_channels=self.in_channels,
out_channels=planes,
kernel_size=kernel,
stride=2,
padding=padding,
output_padding=output_padding,
bias=False))
layers.append(nn.BatchNorm2d(planes))
layers.append(nn.ReLU(inplace=True))
self.in_channels = planes
return nn.Sequential(*layers)
def _get_deconv_cfg(self, deconv_kernel):
"""Get configurations for deconv layers."""
if deconv_kernel == 4:
padding = 1
output_padding = 0
elif deconv_kernel == 3:
padding = 1
output_padding = 1
elif deconv_kernel == 2:
padding = 0
output_padding = 0
else:
raise ValueError(f'Not supported num_kernels ({deconv_kernel}).')
return deconv_kernel, padding, output_padding
def init_weights(self):
"""Initialize model weights."""
for name, m in self.deconv_layers.named_modules():
if isinstance(m, nn.ConvTranspose2d):
normal_init(m, std=0.001)
elif isinstance(m, nn.BatchNorm2d):
constant_init(m, 1)
for m in self.final_layer.modules():
if isinstance(m, nn.Conv2d):
normal_init(m, std=0.001, bias=0)
| 35.115646 | 77 | 0.555211 |
7959c3d1ad30a094a622f0433c7549072e248f96 | 4,203 | py | Python | classification/models/pointnet_cls.py | asafmanor/SampleNet | b55e2b336d54db31a2d689abede2c3d049275d97 | [
"Unlicense"
] | 283 | 2019-12-07T15:20:06.000Z | 2022-03-30T19:13:43.000Z | classification/models/pointnet_cls.py | asafmanor/SampleNet | b55e2b336d54db31a2d689abede2c3d049275d97 | [
"Unlicense"
] | 12 | 2020-04-10T17:41:23.000Z | 2022-03-22T22:01:28.000Z | classification/models/pointnet_cls.py | asafmanor/SampleNet | b55e2b336d54db31a2d689abede2c3d049275d97 | [
"Unlicense"
] | 34 | 2019-12-15T15:24:10.000Z | 2022-03-12T16:07:20.000Z | from __future__ import print_function
import tensorflow as tf
import numpy as np
import math
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, "../utils"))
import tf_util
from transform_nets import input_transform_net, feature_transform_net
def placeholder_inputs(batch_size, num_point):
pointclouds_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point, 3))
labels_pl = tf.placeholder(tf.int32, shape=(batch_size))
return pointclouds_pl, labels_pl
def get_model(point_cloud, is_training, bn_decay=None):
""" Classification PointNet, input is BxNx3, output Bx40 """
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
end_points = {}
with tf.variable_scope("transform_net1") as sc:
transform = input_transform_net(point_cloud, is_training, bn_decay, K=3)
point_cloud_transformed = tf.matmul(point_cloud, transform)
input_image = tf.expand_dims(point_cloud_transformed, -1)
net = tf_util.conv2d(
input_image,
64,
[1, 3],
padding="VALID",
stride=[1, 1],
bn=True,
is_training=is_training,
scope="conv1",
bn_decay=bn_decay,
)
net = tf_util.conv2d(
net,
64,
[1, 1],
padding="VALID",
stride=[1, 1],
bn=True,
is_training=is_training,
scope="conv2",
bn_decay=bn_decay,
)
with tf.variable_scope("transform_net2") as sc:
transform = feature_transform_net(net, is_training, bn_decay, K=64)
end_points["transform"] = transform
net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
net_transformed = tf.expand_dims(net_transformed, [2])
net = tf_util.conv2d(
net_transformed,
64,
[1, 1],
padding="VALID",
stride=[1, 1],
bn=True,
is_training=is_training,
scope="conv3",
bn_decay=bn_decay,
)
net = tf_util.conv2d(
net,
128,
[1, 1],
padding="VALID",
stride=[1, 1],
bn=True,
is_training=is_training,
scope="conv4",
bn_decay=bn_decay,
)
net = tf_util.conv2d(
net,
1024,
[1, 1],
padding="VALID",
stride=[1, 1],
bn=True,
is_training=is_training,
scope="conv5",
bn_decay=bn_decay,
)
end_points["critical_set_idx"] = tf.arg_max(net, 1)
# Symmetric function: max pooling
net = tf_util.max_pool2d(net, [num_point, 1], padding="VALID", scope="maxpool")
end_points["GFV"] = net
net = tf.reshape(net, [batch_size, -1])
net = tf_util.fully_connected(
net, 512, bn=True, is_training=is_training, scope="fc1", bn_decay=bn_decay
)
net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training, scope="dp1")
net = tf_util.fully_connected(
net, 256, bn=True, is_training=is_training, scope="fc2", bn_decay=bn_decay
)
net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training, scope="dp2")
end_points["retrieval_vectors"] = net
net = tf_util.fully_connected(net, 40, activation_fn=None, scope="fc3")
return net, end_points
def get_loss(pred, label, end_points, reg_weight=0.001):
""" pred: B*NUM_CLASSES,
label: B, """
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label)
classify_loss = tf.reduce_mean(loss)
tf.summary.scalar("classify loss", classify_loss)
# Enforce the transformation as orthogonal matrix
transform = end_points["transform"] # BxKxK
K = transform.get_shape()[1].value
mat_diff = tf.matmul(transform, tf.transpose(transform, perm=[0, 2, 1]))
mat_diff -= tf.constant(np.eye(K), dtype=tf.float32)
mat_diff_loss = tf.nn.l2_loss(mat_diff)
tf.summary.scalar("mat loss", mat_diff_loss)
return classify_loss + mat_diff_loss * reg_weight
if __name__ == "__main__":
with tf.Graph().as_default():
inputs = tf.zeros((1, 1024, 3))
outputs = get_model(inputs, tf.constant(True))
print(outputs)
| 30.021429 | 84 | 0.643112 |
7959c5a2dc9493c9cb8af7ae30f5515064b66a62 | 636 | py | Python | c2cwsgiutils/broadcast/interface.py | arnaud-morvan/c2cwsgiutils | aa06b77b247bd8969b88225ee3ea109886aefeac | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | c2cwsgiutils/broadcast/interface.py | arnaud-morvan/c2cwsgiutils | aa06b77b247bd8969b88225ee3ea109886aefeac | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | c2cwsgiutils/broadcast/interface.py | arnaud-morvan/c2cwsgiutils | aa06b77b247bd8969b88225ee3ea109886aefeac | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | from abc import abstractmethod
from typing import Optional, Callable, Mapping, Any, List
class BaseBroadcaster(object):
"""
Interface definition for message broadcasting implementation
"""
@abstractmethod
def subscribe(self, channel: str, callback: Callable[..., Any]) -> None:
pass # pragma: no cover
@abstractmethod
def unsubscribe(self, channel: str) -> None:
pass # pragma: no cover
@abstractmethod
def broadcast(self, channel: str, params: Mapping[str, Any], expect_answers: bool,
timeout: float) -> Optional[List[Any]]:
pass # pragma: no cover
| 28.909091 | 86 | 0.657233 |
7959c5af70e3e24ffd8db449276c584a04cfe6c5 | 5,378 | py | Python | source/infrastructure/personalize/aws_lambda/functions/create_dataset_import_job.py | turnoutnow/maintaining-personalized-experiences-with-machine-learning | b45588c094734cce70198811890a28e65b8e39e1 | [
"Apache-2.0"
] | null | null | null | source/infrastructure/personalize/aws_lambda/functions/create_dataset_import_job.py | turnoutnow/maintaining-personalized-experiences-with-machine-learning | b45588c094734cce70198811890a28e65b8e39e1 | [
"Apache-2.0"
] | null | null | null | source/infrastructure/personalize/aws_lambda/functions/create_dataset_import_job.py | turnoutnow/maintaining-personalized-experiences-with-machine-learning | b45588c094734cce70198811890a28e65b8e39e1 | [
"Apache-2.0"
] | null | null | null | # ######################################################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed #
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for #
# the specific language governing permissions and limitations under the License. #
# ######################################################################################################################
from pathlib import Path
from typing import Optional
import aws_cdk.aws_iam as iam
from aws_cdk.aws_s3 import IBucket
from aws_cdk.aws_stepfunctions import IChainable
from aws_cdk.core import Construct, Aws
from aws_solutions.cdk.stepfunctions.solutionstep import SolutionStep
class CreateDatasetImportJob(SolutionStep):
def __init__(
self,
scope: Construct,
id: str,
personalize_bucket: IBucket,
layers=None,
failure_state: Optional[IChainable] = None,
):
self.personalize_bucket = personalize_bucket
self.personalize_role = iam.Role(
scope,
"PersonalizeS3ReadRole",
description="Grants Amazon Personalize access to read from S3",
assumed_by=iam.ServicePrincipal("personalize.amazonaws.com"),
inline_policies={
"PersonalizeS3ReadPolicy": iam.PolicyDocument(
statements=[
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=[
"s3:GetObject",
"s3:ListBucket",
],
resources=[
personalize_bucket.arn_for_objects("*"),
personalize_bucket.bucket_arn,
],
)
]
)
},
)
personalize_bucket.add_to_resource_policy(
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=[
"s3:GetObject",
"s3:ListBucket",
],
resources=[
personalize_bucket.arn_for_objects("*"),
personalize_bucket.bucket_arn,
],
principals=[iam.ServicePrincipal("personalize.amazonaws.com")],
)
)
super().__init__(
scope,
id,
layers=layers,
failure_state=failure_state,
entrypoint=(
Path(__file__).absolute().parents[4]
/ "aws_lambda"
/ "create_dataset_import_job"
/ "handler.py"
),
libraries=[Path(__file__).absolute().parents[4] / "aws_lambda" / "shared"],
)
def _set_permissions(self):
# personalize resource permissions
self.function.add_to_role_policy(
statement=iam.PolicyStatement(
actions=[
"personalize:DescribeDatasetGroup",
"personalize:DescribeSchema",
"personalize:DescribeDataset",
"personalize:CreateDatasetImportJob",
"personalize:DescribeDatasetImportJob",
"personalize:ListDatasetImportJobs",
],
effect=iam.Effect.ALLOW,
resources=[
f"arn:{Aws.PARTITION}:personalize:{Aws.REGION}:{Aws.ACCOUNT_ID}:dataset-group/*",
f"arn:{Aws.PARTITION}:personalize:{Aws.REGION}:{Aws.ACCOUNT_ID}:schema/*",
f"arn:{Aws.PARTITION}:personalize:{Aws.REGION}:{Aws.ACCOUNT_ID}:dataset/*",
f"arn:{Aws.PARTITION}:personalize:{Aws.REGION}:{Aws.ACCOUNT_ID}:dataset-import-job/*",
],
)
)
self.personalize_bucket.grant_read(self.function, "train/*")
# passrole permissions
self.function.add_to_role_policy(
statement=iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=["iam:PassRole"],
resources=[self.personalize_role.role_arn],
)
)
self.function.add_environment("ROLE_ARN", self.personalize_role.role_arn)
| 45.576271 | 120 | 0.465415 |
7959c60e66e1885dfd84d642b38c83b671bd91b4 | 2,714 | py | Python | base/site-packages/pymongo/__init__.py | edisonlz/fastor | 342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3 | [
"Apache-2.0"
] | 285 | 2019-12-23T09:50:21.000Z | 2021-12-08T09:08:49.000Z | base/site-packages/pymongo/__init__.py | jeckun/fastor | 342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3 | [
"Apache-2.0"
] | null | null | null | base/site-packages/pymongo/__init__.py | jeckun/fastor | 342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3 | [
"Apache-2.0"
] | 9 | 2019-12-23T12:59:25.000Z | 2022-03-15T05:12:11.000Z | # Copyright 2009-2012 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python driver for MongoDB."""
ASCENDING = 1
"""Ascending sort order."""
DESCENDING = -1
"""Descending sort order."""
GEO2D = "2d"
"""Index specifier for a 2-dimensional `geospatial index`_.
.. versionadded:: 1.5.1
.. note:: Geo-spatial indexing requires server version **>= 1.3.3**.
.. _geospatial index: http://docs.mongodb.org/manual/core/geospatial-indexes/
"""
GEOHAYSTACK = "geoHaystack"
"""Index specifier for a 2-dimensional `haystack index`_.
.. versionadded:: 2.1
.. note:: Geo-spatial indexing requires server version **>= 1.5.6**.
.. _haystack index: http://docs.mongodb.org/manual/core/geospatial-indexes/#haystack-indexes
"""
GEOSPHERE = "2dsphere"
"""Index specifier for a `spherical geospatial index`_.
.. versionadded:: 2.5
.. note:: 2dsphere indexing requires server version **>= 2.4.0**.
.. _spherical geospatial index: http://docs.mongodb.org/manual/release-notes/2.4/#new-geospatial-indexes-with-geojson-and-improved-spherical-geometry
"""
HASHED = "hashed"
"""Index specifier for a `hashed index`_.
.. versionadded:: 2.5
.. note:: hashed indexing requires server version **>= 2.4.0**.
.. _hashed index: http://docs.mongodb.org/manual/release-notes/2.4/#new-hashed-index-and-sharding-with-a-hashed-shard-key
"""
OFF = 0
"""No database profiling."""
SLOW_ONLY = 1
"""Only profile slow operations."""
ALL = 2
"""Profile all operations."""
version_tuple = (2, 5, 2)
def get_version_string():
if isinstance(version_tuple[-1], basestring):
return '.'.join(map(str, version_tuple[:-1])) + version_tuple[-1]
return '.'.join(map(str, version_tuple))
version = get_version_string()
"""Current version of PyMongo."""
from pymongo.connection import Connection
from pymongo.mongo_client import MongoClient
from pymongo.mongo_replica_set_client import MongoReplicaSetClient
from pymongo.replica_set_connection import ReplicaSetConnection
from pymongo.read_preferences import ReadPreference
def has_c():
"""Is the C extension installed?
.. versionadded:: 1.5
"""
try:
from pymongo import _cmessage
return True
except ImportError:
return False
| 28.270833 | 149 | 0.722918 |
7959c6476d53bd4c7ed86fcc5fa8785f3dd7e237 | 2,783 | py | Python | arch/task_manager/apps/machine_learning_model.py | ZZIQIN/FATE | cc6783927564cbb15c067d5010f1cdf82a5de20a | [
"Apache-2.0"
] | null | null | null | arch/task_manager/apps/machine_learning_model.py | ZZIQIN/FATE | cc6783927564cbb15c067d5010f1cdf82a5de20a | [
"Apache-2.0"
] | null | null | null | arch/task_manager/apps/machine_learning_model.py | ZZIQIN/FATE | cc6783927564cbb15c067d5010f1cdf82a5de20a | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from arch.api.utils import file_utils
from flask import Flask, request
from arch.task_manager.settings import server_conf
from arch.task_manager.utils import publish_model
from arch.task_manager.job_manager import generate_job_id
from arch.task_manager.utils.api_utils import get_json_result, federated_api
from arch.api.version_control.control import version_history
from arch.api import eggroll
from arch.task_manager.settings import WORK_MODE, logger, SERVINGS, PARTY_ID
import json
manager = Flask(__name__)
@manager.errorhandler(500)
def internal_server_error(e):
logger.exception(e)
return get_json_result(status=100, msg=str(e))
@manager.route('/load', methods=['POST'])
def load_model():
request_config = request.json
_job_id = generate_job_id()
all_party = set()
for _party_ids in request_config.get('role').values():
all_party.update(set(_party_ids))
for _party_id in all_party:
st, msg = federated_api(job_id=_job_id,
method='POST',
url='/model/load/do',
party_id=_party_id,
json_body=request_config)
return get_json_result(job_id=_job_id)
@manager.route('/load/do', methods=['POST'])
def do_load_model():
request_data = request.json
request_data["servings"] = server_conf.get("servers", {}).get("servings", [])
publish_model.load_model(config_data=request_data)
return get_json_result()
@manager.route('/online', methods=['POST'])
def publish_model_online():
request_config = request.json
if not request_config.get('servings'):
# get my party all servings
request_config['servings'] = SERVINGS
publish_model.publish_online(config_data=request_config)
return get_json_result()
@manager.route('/version', methods=['POST'])
def query_model_version_history():
request_data = request.json
config = file_utils.load_json_conf(request_data.get("config_path"))
eggroll.init(mode=WORK_MODE)
history = version_history(data_table_namespace=config.get("namespace"))
return get_json_result(msg=json.dumps(history))
| 36.618421 | 81 | 0.719727 |
7959c706e99995ff510d2c58c84ad3521165012f | 5,609 | py | Python | core/face_processing.py | ArtyDev57/face_recon | c0a79b3fe41e0db37cb13ce54e17bef8f8dbf685 | [
"MIT"
] | 4 | 2020-05-22T03:17:03.000Z | 2021-07-29T04:24:02.000Z | core/face_processing.py | ArtyDev57/face_recon | c0a79b3fe41e0db37cb13ce54e17bef8f8dbf685 | [
"MIT"
] | null | null | null | core/face_processing.py | ArtyDev57/face_recon | c0a79b3fe41e0db37cb13ce54e17bef8f8dbf685 | [
"MIT"
] | 1 | 2020-10-01T11:58:05.000Z | 2020-10-01T11:58:05.000Z | import cv2
import numpy as np
import math
from database import list_know_people_by_id, insert_people_access
from os import path, getcwd, mkdir
from datetime import datetime
import core.face_detection as fd
import face_recognition as fr
class faceproc:
def __init__(self, resize_frame=4, camera_id=None):
self.known_face_encodings = None
self.known_face_names = None
self.resize_frame = resize_frame
self.camera_id = camera_id
# detect face and use that face to compare known face fro reconition
def detect_face_and_recognition(self, rgb_image=None):
if self.known_face_encodings is None or self.known_face_names is None or rgb_image is None:
raise AttributeError("known_face_encodings, known_face_encodings, rgb_image is None")
face_predictions = []
# detect face
face_locations = fd.face_locations(rgb_image)
# encode face lists
face_encode = fr.face_encodings(rgb_image, face_locations)
# loop face list encode
for f_encode in face_encode:
# compare known face encode and new face encode for checking
matches = fr.compare_faces(self.known_face_encodings, f_encode)
name = 'Unknown'
acc_percent = 0
# calurate face distance for known face lists encode and unknow face encode
face_distance = fr.face_distance(self.known_face_encodings, f_encode)
best_match_index = np.argmin(face_distance)
if matches[best_match_index]:
# calurate percent similar face
acc = math.floor(self.__face_distance_to_conf(face_distance[best_match_index]) * 100)
# if accuracy face compare greater than 80 percent is know face otherwise unknow face
if acc >= 80:
name = self.known_face_names[best_match_index]
acc_percent = acc
# append name and accuracy in percent
face_predictions.append((name, acc_percent))
return face_locations, face_predictions
# preapre output frame after process for showing
def show_face_recognition(self, frame=None, face_locations=None, face_predictions=None):
for (top, right, bottom, left), (kp_id, acc_percent) in zip(face_locations, face_predictions):
top *= self.resize_frame
right *= self.resize_frame
bottom *= self.resize_frame
left *= self.resize_frame
face_box_color = (0, 0, 255)
if acc_percent > 0:
face_box_color = (255, 0, 0)
cv2.rectangle(frame, (left, top), (right, bottom), face_box_color, 2)
name = kp_id
if acc_percent > 0:
know_people = list_know_people_by_id(kp_id)
if len(know_people) > 0:
person = know_people[0]
name = person[1]
label_str = "{name} {percent}%".format(name=name, percent=acc_percent)
(w, h), _ = cv2.getTextSize(label_str, cv2.FONT_HERSHEY_DUPLEX, 0.5, 1)
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), face_box_color, cv2.FILLED)
cv2.putText(frame, label_str, (left + 6, bottom - h), cv2.FONT_HERSHEY_DUPLEX, 0.5, (255, 255, 255), 1)
return frame
# save unknow face to database
def save_face(self, frame, face_locations, face_predictions):
# path to images
_image_path = path.join(getcwd(), 'images')
# create images dir if images not found
if not path.exists(_image_path):
mkdir(_image_path)
for (top, right, bottom, left), (kp_id, acc_percent) in zip(face_locations, face_predictions):
top *= self.resize_frame
right *= self.resize_frame
bottom *= self.resize_frame
left *= self.resize_frame
# if unknown people access
if acc_percent <= 0:
crop_face = frame[top:bottom, left:right]
cap_full_image_name = "cap_full_img-{}.jpg".format(datetime.now().strftime('%s'))
cap_face_image_name = "cap_face_image-{}.jpg".format(datetime.now().strftime('%s'))
cap_full_image_path = path.join(_image_path, cap_full_image_name)
cap_face_image_path = path.join(_image_path, cap_face_image_name)
try:
# save image
cv2.imwrite(cap_face_image_path, crop_face)
cv2.imwrite(cap_full_image_path, frame.copy())
# insert to database
insert_people_access(kp_id, self.camera_id, cap_full_image_name, cap_face_image_name)
except:
continue
def set_face_encoding(self, face_encodings=None):
self.known_face_encodings = face_encodings
def set_face_names(self, face_names):
self.known_face_names = face_names
def set_resize_image(self, resize_img):
self.resize_frame = resize_img
def __face_distance_to_conf(self, face_distance, face_match_threshold=0.6):
"""
calculate face acc
"""
if face_distance > face_match_threshold:
range = (1.0 - face_match_threshold)
linear_val = (1.0 - face_distance) / (range * 2.0)
return linear_val
else:
range = face_match_threshold
linear_val = 1.0 - (face_distance / (range * 2.0))
return linear_val + ((1.0 - linear_val) * math.pow((linear_val - 0.5) * 2, 0.2))
| 40.064286 | 115 | 0.622571 |
7959c745aa3b3791d6ac02b483b733af520c8d59 | 11,676 | py | Python | keras_frcnn/pascal_voc.py | Heyjuke58/frcnn-wind-turbine-detection | 29311020188d3a26c8935cae710bd2c5013653ab | [
"Apache-2.0"
] | null | null | null | keras_frcnn/pascal_voc.py | Heyjuke58/frcnn-wind-turbine-detection | 29311020188d3a26c8935cae710bd2c5013653ab | [
"Apache-2.0"
] | null | null | null | keras_frcnn/pascal_voc.py | Heyjuke58/frcnn-wind-turbine-detection | 29311020188d3a26c8935cae710bd2c5013653ab | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import pickle
import numpy as np
import xml.etree.ElementTree as ET
class pascal_voc_util(object):
def __init__(self, devkit_path):
self.classes = ('__background__', # always index 0
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
self._data_path = os.path.join(devkit_path, 'VOC2007')
self._image_ext = '.jpg'
self.year = "2007"
self._image_index = self._load_image_set_index()
assert os.path.exists(self._data_path), \
'Path does not exist: {}'.format(self._data_path)
def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
# Example path to image set file:
# self._devkit_path + /VOCdevkit2007/VOC2007/ImageSets/Main/val.txt
image_set_file = os.path.join(self._data_path, 'ImageSets', 'Main',
"test" + '.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.strip() for x in f.readlines()]
return image_index
def _write_voc_results_file(self, all_boxes):
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
print('Writing {} VOC results file'.format(cls))
filename = self._get_voc_results_file_template().format(cls)
with open(filename, 'wt') as f:
for im_ind, index in enumerate(self.image_index):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
# the VOCdevkit expects 1-based indices
for k in range(dets.shape[0]):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
format(index, dets[k, -1],
dets[k, 0] + 1, dets[k, 1] + 1,
dets[k, 2] + 1, dets[k, 3] + 1))
def _get_voc_results_file_template(self):
# VOCdevkit/results/VOC2007/Main/<comp_id>_det_test_aeroplane.txt
filename = self._get_comp_id() + '_det_' + self._image_set + '_{:s}.txt'
filedir = os.path.join(self._devkit_path, 'results', 'VOC' + self._year, 'Main')
if not os.path.exists(filedir):
os.makedirs(filedir)
path = os.path.join(filedir, filename)
return path
def _do_python_eval(self, output_dir='output'):
if not os.path.isdir("output"):
os.mkdir("output")
annopath = os.path.join(
self._devkit_path,
'VOC' + self._year,
'Annotations',
'{:s}.xml')
imagesetfile = os.path.join(
self._devkit_path,
'VOC' + self._year,
'ImageSets',
'Main',
self._image_set + '.txt')
cachedir = os.path.join(self._devkit_path, 'annotations_cache')
aps = []
# The PASCAL VOC metric changed in 2010
use_07_metric = True if int(self._year) < 2010 else False
print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
for i, cls in enumerate(self._classes):
if cls == '__background__':
continue
filename = self._get_voc_results_file_template().format(cls)
rec, prec, ap = voc_eval(
filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,
use_07_metric=use_07_metric)
aps += [ap]
print('AP for {} = {:.4f}'.format(cls, ap))
with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
print('Mean AP = {:.4f}'.format(np.mean(aps)))
print('~~~~~~~~')
print('Results:')
for ap in aps:
print('{:.3f}'.format(ap))
print('{:.3f}'.format(np.mean(aps)))
print('~~~~~~~~')
print('')
print('--------------------------------------------------------------')
print('Results computed with the **unofficial** Python eval code.')
print('Results should be very close to the official MATLAB eval code.')
print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
print('-- Thanks, The Management')
print('--------------------------------------------------------------')
def parse_rec(filename):
""" Parse a PASCAL VOC xml file """
tree = ET.parse(filename)
objects = []
for obj in tree.findall('object'):
obj_struct = {}
obj_struct['name'] = obj.find('name').text
obj_struct['pose'] = obj.find('pose').text
obj_struct['truncated'] = int(obj.find('truncated').text)
obj_struct['difficult'] = int(obj.find('difficult').text)
bbox = obj.find('bndbox')
obj_struct['bbox'] = [int(bbox.find('xmin').text),
int(bbox.find('ymin').text),
int(bbox.find('xmax').text),
int(bbox.find('ymax').text)]
objects.append(obj_struct)
return objects
def voc_ap(rec, prec, use_07_metric=False):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def voc_eval(detpath,
annopath,
imagesetfile,
classname,
cachedir,
ovthresh=0.5,
use_07_metric=False):
"""
rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
[ovthresh],
[use_07_metric])
Top level function that does the PASCAL VOC evaluation.
detpath: Path to detections
detpath.format(classname) should produce the detection results file.
annopath: Path to annotations
annopath.format(imagename) should be the xml annotations file.
imagesetfile: Text file containing the list of images, one image per line.
classname: Category name (duh)
cachedir: Directory for caching the annotations
[ovthresh]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation
(default False)
"""
# assumes detections are in detpath.format(classname)
# assumes annotations are in annopath.format(imagename)
# assumes imagesetfile is a text file with each line an image name
# cachedir caches the annotations in a pickle file
# first load gt
if not os.path.isdir(cachedir):
os.mkdir(cachedir)
cachefile = os.path.join(cachedir, '%s_annots.pkl' % imagesetfile)
# read list of images
with open(imagesetfile, 'r') as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
if not os.path.isfile(cachefile):
# load annotations
recs = {}
for i, imagename in enumerate(imagenames):
recs[imagename] = parse_rec(annopath.format(imagename))
if i % 100 == 0:
print('Reading annotation for {:d}/{:d}'.format(
i + 1, len(imagenames)))
# save
#print('Saving cached annotations to {:s}'.format(cachefile))
#with open(cachefile, 'wb') as f:
# pickle.dump(recs, f)
else:
# load
with open(cachefile, 'rb') as f:
try:
recs = pickle.load(f)
except:
recs = pickle.load(f, encoding='bytes')
# extract gt objects for this class
class_recs = {}
npos = 0
for imagename in imagenames:
R = [obj for obj in recs[imagename] if obj['name'] == classname]
bbox = np.array([x['bbox'] for x in R])
difficult = np.array([x['difficult'] for x in R]).astype(np.bool)
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[imagename] = {'bbox': bbox,
'difficult': difficult,
'det': det}
# read dets
detfile = detpath.format(classname)
with open(detfile, 'r') as f:
lines = f.readlines()
splitlines = [x.strip().split(' ') for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
BB = np.array([[float(z) for z in x[2:]] for x in splitlines])
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
if BB.shape[0] > 0:
# sort by confidence
sorted_ind = np.argsort(-confidence)
# sorted_scores = np.sort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
for d in range(nd):
id = image_ids[d][-10:-4]
try:
R = class_recs[id]
except:
print("det not found")
continue
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
if BBGT.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
# union
uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) +
(BBGT[:, 2] - BBGT[:, 0] + 1.) *
(BBGT[:, 3] - BBGT[:, 1] + 1.) - inters)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not R['difficult'][jmax]:
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
return rec, prec, ap | 37.543408 | 89 | 0.523553 |
7959c75960a356c8ef0d8488954adb7451b1fed3 | 4,915 | py | Python | rgw/v2/tests/s3_swift/swift_stats.py | viduship/ceph-qe-scripts | 886619fa6600c24cbf989d65868951b9c3decd72 | [
"MIT"
] | 6 | 2019-04-12T17:45:44.000Z | 2021-09-14T19:59:05.000Z | rgw/v2/tests/s3_swift/swift_stats.py | viduship/ceph-qe-scripts | 886619fa6600c24cbf989d65868951b9c3decd72 | [
"MIT"
] | 111 | 2019-12-10T10:41:08.000Z | 2022-03-31T11:42:30.000Z | rgw/v2/tests/s3_swift/swift_stats.py | viduship/ceph-qe-scripts | 886619fa6600c24cbf989d65868951b9c3decd72 | [
"MIT"
] | 23 | 2019-05-30T19:48:25.000Z | 2022-03-24T17:07:19.000Z | """
swift_stats - Test swift stat command is working for more than 1000 buckets
Usage: swift_stats.py -c <input_yaml>
<input_yaml>
swift_stats.yaml
Operation:
Create tenanted user
Set max bucket count to 2000
Create number of buckets mentioned in swift_stats.yaml
Check swift stat command executing and giving status
"""
import os
import sys
sys.path.append(os.path.abspath(os.path.join(__file__, "../../../..")))
import argparse
import logging
import traceback
import v2.lib.resource_op as swiftlib
import v2.utils.utils as utils
from v2.lib.admin import UserMgmt
from v2.lib.exceptions import RGWBaseException, TestExecError
from v2.lib.resource_op import Config
from v2.lib.s3.write_io_info import BasicIOInfoStructure, IOInfoInitialize
from v2.lib.swift.auth import Auth
from v2.utils.log import configure_logging
from v2.utils.test_desc import AddTestInfo
log = logging.getLogger()
def test_exec(config):
io_info_initialize = IOInfoInitialize()
basic_io_structure = BasicIOInfoStructure()
io_info_initialize.initialize(basic_io_structure.initial())
umgmt = UserMgmt()
# preparing data
user_names = ["tuffy", "scooby", "max"]
tenant = "tenant"
tenant_user_info = umgmt.create_tenant_user(
tenant_name=tenant, user_id=user_names[0], displayname=user_names[0]
)
user_info = umgmt.create_subuser(tenant_name=tenant, user_id=user_names[0])
cmd = "radosgw-admin quota enable --quota-scope=user --uid={uid} --tenant={tenant}".format(
uid=user_names[0], tenant=tenant
)
enable_user_quota = utils.exec_shell_cmd(cmd)
cmd = "radosgw-admin quota set --quota-scope=user --uid={uid} --tenant={tenant} --max_buckets=2000".format(
uid=user_names[0], tenant=tenant
)
max_bucket = utils.exec_shell_cmd(cmd)
auth = Auth(user_info)
rgw = auth.do_auth()
for cc in range(config.container_count):
container_name = utils.gen_bucket_name_from_userid(
user_info["user_id"], rand_no=cc
)
container = swiftlib.resource_op(
{"obj": rgw, "resource": "put_container", "args": [container_name]}
)
if container is False:
raise TestExecError("Resource execution failed: container creation faield")
host, ip = utils.get_hostname_ip()
port = utils.get_radosgw_port_no()
hostname = str(ip) + ":" + str(port)
cmd = "swift -A http://{hostname}/auth/1.0 -U '{uid}' -K '{key}' stat".format(
hostname=hostname, uid=user_info["user_id"], key=user_info["key"]
)
swift_cmd = utils.exec_shell_cmd(cmd)
swift_cmd = swift_cmd.replace(" ", "")
swift_cmd = swift_cmd.replace("\n", ":")
li = list(swift_cmd.split(":"))
res_dct = {li[i]: li[i + 1] for i in range(0, len(li) - 1, 2)}
if int(res_dct["Containers"]) == config.container_count:
cmd = "radosgw-admin user rm --uid={uid} --tenant={tenant} --purge-data".format(
uid=user_names[0], tenant=tenant
)
delete_user_bucket = utils.exec_shell_cmd(cmd)
test_info.success_status("test passed")
sys.exit(0)
else:
cmd = "radosgw-admin user rm --uid={uid} --tenant={tenant} --purge-data".format(
uid=user_names[0], tenant=tenant
)
delete_user_bucket = utils.exec_shell_cmd(cmd)
test_info.failed_status("test failed")
sys.exit(1)
if __name__ == "__main__":
test_info = AddTestInfo("swift stats")
try:
project_dir = os.path.abspath(os.path.join(__file__, "../../.."))
test_data_dir = "test_data"
TEST_DATA_PATH = os.path.join(project_dir, test_data_dir)
log.info("TEST_DATA_PATH: %s" % TEST_DATA_PATH)
if not os.path.exists(TEST_DATA_PATH):
log.info("test data dir not exists, creating.. ")
os.makedirs(TEST_DATA_PATH)
parser = argparse.ArgumentParser(description="RGW S3 Automation")
parser.add_argument("-c", dest="config", help="RGW Test yaml configuration")
parser.add_argument(
"-log_level",
dest="log_level",
help="Set Log Level [DEBUG, INFO, WARNING, ERROR, CRITICAL]",
default="info",
)
args = parser.parse_args()
yaml_file = args.config
log_f_name = os.path.basename(os.path.splitext(yaml_file)[0])
configure_logging(f_name=log_f_name, set_level=args.log_level.upper())
log_f_name = os.path.basename(os.path.splitext(yaml_file)[0])
configure_logging(f_name=log_f_name, set_level=args.log_level.upper())
config = Config(yaml_file)
config.read()
test_exec(config)
test_info.success_status("test passed")
sys.exit(0)
except (RGWBaseException, Exception) as e:
log.info(e)
log.info(traceback.format_exc())
test_info.failed_status("test failed")
sys.exit(1)
| 35.875912 | 111 | 0.661851 |
7959c7ab574837bafc86e295ed47f00567e4e0b9 | 341 | py | Python | week_functs.py | digitaljosh/meal-planner | 53193fc49a5f10867e43068622961acfdd8dd762 | [
"Unlicense",
"MIT"
] | 12 | 2018-12-29T20:42:02.000Z | 2022-02-19T21:01:22.000Z | week_functs.py | digitaljosh/meal-planner | 53193fc49a5f10867e43068622961acfdd8dd762 | [
"Unlicense",
"MIT"
] | 62 | 2018-02-01T20:40:28.000Z | 2021-02-07T10:44:55.000Z | week_functs.py | digitaljosh/meal-planner | 53193fc49a5f10867e43068622961acfdd8dd762 | [
"Unlicense",
"MIT"
] | 7 | 2018-02-02T00:31:54.000Z | 2021-05-31T15:50:26.000Z | import datetime
def get_today_string():
today_string = "{date:%m/%d}".format(date=datetime.datetime.now())
return today_string
def get_week_from_string():
today = datetime.datetime.today()
week_from_date = today + datetime.timedelta(days=7)
week_from = "{date:%m/%d}".format(date=week_from_date)
return week_from
| 24.357143 | 70 | 0.709677 |
7959c84533a78fdb318d8f3430e371da2cafcf85 | 875 | py | Python | view/test.py | LianGee/zed | 0838eec03733a26705126d96dfb59af6bdf19a9e | [
"MIT"
] | null | null | null | view/test.py | LianGee/zed | 0838eec03733a26705126d96dfb59af6bdf19a9e | [
"MIT"
] | null | null | null | view/test.py | LianGee/zed | 0838eec03733a26705126d96dfb59af6bdf19a9e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : test.py
# @Author: zaoshu
# @Date : 2020-02-12
# @Desc :
from flask import Blueprint, request
from common.response import Response
from service.qiniu_service import QiniuService
test_bp = Blueprint('test', __name__)
@test_bp.route('/upload/img', methods=['POST'])
def upload_img():
files = request.files
file = files.get('file')
url = ''
if file is not None:
if file.filename.split('.')[1] not in ['png', 'jpg', 'jpeg', 'bmp', 'gif']:
return Response.failed(msg='图片格式错误')
url = QiniuService.upload_img(file.read())
return Response.success(url)
@test_bp.route('/upload/doc', methods=['POST'])
def upload_doc():
args = request.json
doc = args.get('doc')
assert doc is not None and len(doc) > 0
return Response.success(QiniuService.upload_doc(doc))
| 26.515152 | 83 | 0.646857 |
7959cab026ee87ac8a13f63d594d31e7f2d4f3ea | 1,451 | py | Python | IRIS_data_download/IRIS_download_support/obspy/scripts/flinnengdahl.py | earthinversion/Fnet_IRIS_data_automated_download | 09a6e0c992662feac95744935e038d1c68539fa1 | [
"MIT"
] | 2 | 2020-03-05T01:03:01.000Z | 2020-12-17T05:04:07.000Z | IRIS_data_download/IRIS_download_support/obspy/scripts/flinnengdahl.py | earthinversion/Fnet_IRIS_data_automated_download | 09a6e0c992662feac95744935e038d1c68539fa1 | [
"MIT"
] | 4 | 2021-03-31T19:25:55.000Z | 2021-12-13T20:32:46.000Z | IRIS_data_download/IRIS_download_support/obspy/scripts/flinnengdahl.py | earthinversion/Fnet_IRIS_data_automated_download | 09a6e0c992662feac95744935e038d1c68539fa1 | [
"MIT"
] | 2 | 2020-09-08T19:33:40.000Z | 2021-04-05T09:47:50.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Get the Flinn-Engdahl region name from longitude and latitude.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import * # NOQA
from argparse import ArgumentParser
from obspy import __version__
from obspy.geodetics import FlinnEngdahl
def main(argv=None):
parser = ArgumentParser(prog='obspy-flinn-engdahl',
description=__doc__.strip())
parser.add_argument('-V', '--version', action='version',
version='%(prog)s ' + __version__)
parser.add_argument('longitude', type=float,
help='Longitude (in degrees) of point. Positive for '
'East, negative for West.')
parser.add_argument('latitude', type=float,
help='Latitude (in degrees) of point. Positive for '
'North, negative for South.')
args = parser.parse_args(argv)
flinn_engdahl = FlinnEngdahl()
print(flinn_engdahl.get_region(args.longitude, args.latitude))
if __name__ == '__main__':
# It is not possible to put the code of main directly here.
# This script is automatically installed with name obspy-... by
# setup.py to the Scripts or bin directory of your Python distribution.
# setup.py needs a function to which its scripts can be linked.
main()
| 37.205128 | 77 | 0.63887 |
7959cadd71b9aea7eeac2fc9705ed08e6b1077ab | 1,989 | py | Python | aiida_exciting/commands/lapwbasis.py | electronic-structure/aiida-exciting | c7f4d2d2370dc0b5cb1270883ff24d4880a11dad | [
"MIT"
] | 1 | 2017-06-25T18:25:12.000Z | 2017-06-25T18:25:12.000Z | aiida_exciting/commands/lapwbasis.py | electronic-structure/aiida-exciting | c7f4d2d2370dc0b5cb1270883ff24d4880a11dad | [
"MIT"
] | null | null | null | aiida_exciting/commands/lapwbasis.py | electronic-structure/aiida-exciting | c7f4d2d2370dc0b5cb1270883ff24d4880a11dad | [
"MIT"
] | null | null | null | import click
@click.group()
def lapwbasis():
"""Help for lapwbasis command"""
@lapwbasis.command('upload')
@click.option('--fmt', type=str, help='Format of the species: \"xml\" or \"json\""', required=True)
@click.option('--name', type=str, help='Name of the LAPW basis set', required=True)
@click.option('--description', type=str, help='Description of the set', required=False)
@click.argument('path', type=str, required=True)
def upload_command(path, fmt, name, description):
"""Upload a new set of LAPW basis files"""
import os.path
stop_if_existing = False
if not fmt in ["xml", "json"]:
print >> sys.stderr, ("wrong species format: %s"%parsed_args.fmt)
sys.exit(1)
folder = os.path.abspath(path)
if (not os.path.isdir(folder)):
print >> sys.stderr, 'Cannot find directory: ' + folder
sys.exit(1)
from aiida import load_dbenv
load_dbenv()
import aiida_exciting.data.lapwbasis as lapwbasis
if not description: description=""
files_found, files_uploaded = lapwbasis.upload_family(folder, name, description, fmt, stop_if_existing)
print "Species files found: {}. New files uploaded: {}".format(files_found, files_uploaded)
@lapwbasis.command('list')
def list_command():
"""List the uploaded sets of LAPW basis files"""
with_description = True
from aiida import load_dbenv
load_dbenv()
from aiida.orm import DataFactory
LapwbasisData = DataFactory('exciting.lapwbasis')
groups = LapwbasisData.get_lapwbasis_groups()
if groups:
for g in groups:
sp = LapwbasisData.query(dbgroups=g.dbgroup).distinct()
num_sp = sp.count()
if with_description:
description_string = ": {}".format(g.description)
else:
description_string = ""
print "* {} [{} species]{}".format(g.name, num_sp, description_string)
else:
print "No LAPW basis sets were found."
| 31.571429 | 107 | 0.651584 |
7959cafbf8c85d480fbb1e25dd02e32c3d0f8cf6 | 2,300 | py | Python | webapp/src/routes/auth_routes.py | muctadir/labeling-machine | eb6dde48457715d2aa8a304b2686a8eec8a809ae | [
"MIT"
] | null | null | null | webapp/src/routes/auth_routes.py | muctadir/labeling-machine | eb6dde48457715d2aa8a304b2686a8eec8a809ae | [
"MIT"
] | null | null | null | webapp/src/routes/auth_routes.py | muctadir/labeling-machine | eb6dde48457715d2aa8a304b2686a8eec8a809ae | [
"MIT"
] | null | null | null | from flask import request, redirect, url_for, flash
from flask_login import login_user, logout_user
from sqlalchemy import select
from werkzeug.security import check_password_hash
from src import app, db
from src.database.models import User
from src.helper.tools_common import string_none_or_empty
@app.route("/signin", methods=['GET', 'POST'])
def signin():
if request.method == 'POST':
if string_none_or_empty(request.form['user']) or string_none_or_empty(request.form['password']):
return redirect(url_for('index'))
username = request.form['user'].strip()
password = request.form['password']
user = db.session.execute(select(User).where(User.username == username)).scalar()
if user is not None and check_password_hash(user.password, password):
login_user(user, force=True)
else:
flash('Authentication failed!!!', category='error')
return redirect(url_for('index'))
else:
return "Not POST!"
# Todo: signup currently disabled
# @app.route("/signup", methods=['GET', 'POST'])
# def signup():
# if request.method == 'GET':
# if 'new_user_username' not in session:
# # Try to log-in from home
# return redirect(url_for('index'))
# else:
# tmp = session['new_user_username'] # Passed username from home page
# session.pop('new_user_username', None)
# return render_template('common_pages/signup.html', username=tmp)
# else:
# username = request.form['name']
# gender = request.form['gender']
# education = request.form['education']
# occupation = request.form['occupation']
# affiliation = request.form['affiliation']
# xp = request.form['years_xp']
# user_item = User(username=username, gender=gender, education=education, occupation=occupation,
# affiliation=affiliation, years_xp=xp)
# db.session.add(user_item)
# db.session.commit()
# sign_in(username)
# return redirect(url_for('index'))
@app.route("/signout", methods=['GET', 'POST'])
def signout():
if request.method == 'GET':
logout_user()
return redirect(url_for('index'))
else:
return "Not GET!"
| 36.507937 | 104 | 0.633478 |
7959cbeec367f450e1667a84274e190f99e7ed69 | 675 | py | Python | learning_object/collections/manager/collections/get_one.py | dsvalenciah/ROAp | 24cbff0e719c5009ec1f1e7190924d4d9297e992 | [
"MIT"
] | 4 | 2018-04-23T00:04:01.000Z | 2018-10-28T22:56:51.000Z | learning_object/collections/manager/collections/get_one.py | dsvalenciah/ROAp | 24cbff0e719c5009ec1f1e7190924d4d9297e992 | [
"MIT"
] | 23 | 2017-12-22T08:27:35.000Z | 2021-12-13T19:57:35.000Z | learning_object/collections/manager/collections/get_one.py | dsvalenciah/ROAp | 24cbff0e719c5009ec1f1e7190924d4d9297e992 | [
"MIT"
] | 1 | 2020-06-03T02:07:26.000Z | 2020-06-03T02:07:26.000Z | from ..exceptions import CollectionNotFoundError
def get_one(db_client, collection_id, user):
_ = user.get('language')
collection = db_client.locollection.find_one({
'_id': collection_id
})
if not collection:
raise CollectionNotFoundError(_('Collection not found'))
lo_quantity = db_client.learning_objects.find({ 'collection_id': collection_id }).count()
collection.update({'lo_quantity': lo_quantity})
for sub_collection in collection.get('sub_collections'):
sub_collection.update({'lo_quantity': db_client.learning_objects.find({'sub_collection_id': sub_collection.get('id_')}).count()})
return collection
| 33.75 | 141 | 0.72 |
7959cc531f4f7d5dac123866c9d0cb09ec8f05b9 | 14,891 | py | Python | fastai/text/models/transformer.py | vettukal/fastai | cfe5eba3635737f0a3b2ed9fb0e03e0e153e1b37 | [
"Apache-2.0"
] | null | null | null | fastai/text/models/transformer.py | vettukal/fastai | cfe5eba3635737f0a3b2ed9fb0e03e0e153e1b37 | [
"Apache-2.0"
] | null | null | null | fastai/text/models/transformer.py | vettukal/fastai | cfe5eba3635737f0a3b2ed9fb0e03e0e153e1b37 | [
"Apache-2.0"
] | null | null | null | from ...torch_core import *
from ...layers import *
from .awd_lstm import RNNDropout, LinearDecoder, SequentialRNN
__all__ = ['Activation', 'PositionalEncoding', 'GeLU', 'Swish', 'feed_forward', 'MultiHeadAttention', 'MultiHeadRelativeAttention',
'DecoderLayer', 'Transformer', 'TransformerXL', 'get_transformer_lm', 'get_transformerXL_lm']
Activation = Enum('Activation', 'ReLU Swish GeLU')
class PositionalEncoding(nn.Module):
def __init__(self, d:int):
super().__init__()
self.register_buffer('freq', 1 / (10000 ** (torch.arange(0., d, 2.)/d)))
def forward(self, pos:Tensor, bs:int=None):
inp = torch.ger(pos, self.freq)
enc = torch.cat([inp.sin(), inp.cos()], dim=-1)
return enc
class GeLU(nn.Module):
def forward(self, x): return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
class Swish(nn.Module):
def forward(self, x): return x * torch.sigmoid(x)
_activ_func = {Activation.ReLU:nn.ReLU(inplace=True), Activation.GeLU:GeLU(), Activation.Swish: Swish}
def feed_forward(d_model:int, d_ff:int, p_ff:float=0., act:Activation=Activation.ReLU, double_drop:bool=True):
layers = [nn.Linear(d_model, d_ff), _activ_func[act]]
if double_drop: layers.append(nn.Dropout(p_ff))
return SequentialEx(*layers, nn.Linear(d_ff, d_model), nn.Dropout(p_ff), MergeLayer(), nn.LayerNorm(d_model))
class MultiHeadAttention(nn.Module):
"MutiHeadAttention."
def __init__(self, n_heads:int, d_model:int, d_head:int, p_res:float=0., p_att:float=0., bias:bool=True,
scale:bool=True):
super().__init__()
self.n_heads,self.d_head,self.scale = n_heads,d_head,scale
self.attention = nn.Linear(d_model, 3 * n_heads * d_head, bias=bias)
self.out = nn.Linear(n_heads * d_head, d_model, bias=bias)
self.drop_att,self.drop_res = nn.Dropout(p_att),nn.Dropout(p_res)
self.ln = nn.LayerNorm(d_model)
def forward(self, x:Tensor, mask:Tensor=None, **kwargs):
return self.ln(x + self.drop_res(self.out(self._apply_attention(x, mask=mask, **kwargs))))
def _apply_attention(self, x:Tensor, mask:Tensor=None):
bs,x_len = x.size(0),x.size(1)
wq,wk,wv = torch.chunk(self.attention(x), 3, dim=-1)
wq,wk,wv = map(lambda x:x.view(bs, x.size(1), self.n_heads, self.d_head), (wq,wk,wv))
wq,wk,wv = wq.permute(0, 2, 1, 3),wk.permute(0, 2, 3, 1),wv.permute(0, 2, 1, 3)
attn_score = torch.matmul(wq, wk)
if self.scale: attn_score = attn_score.div_(self.d_head ** 0.5)
if mask is not None:
attn_score = attn_score.float().masked_fill(mask, -float('inf')).type_as(attn_score)
attn_prob = self.drop_att(F.softmax(attn_score, dim=-1))
attn_vec = torch.matmul(attn_prob, wv)
return attn_vec.permute(0, 2, 1, 3).contiguous().contiguous().view(bs, x_len, -1)
def _attention_einsum(self, x, mask=None):
# Permute and matmul is a little bit faster but this implementation is more readable
bs,x_len = x.size(0),x.size(1)
wq,wk,wv = torch.chunk(self.attention(x), 3, dim=-1)
wq,wk,wv = map(lambda x:x.view(bs, x.size(1), self.n_heads, self.d_head), (wq,wk,wv))
attn_score = torch.einsum('bind,bjnd->bijn', (wq, wk))
if self.scale: attn_score = attn_score.mul_(1/(self.d_head ** 0.5))
if mask is not None:
attn_score = attn_score.float().masked_fill(mask, -float('inf')).type_as(attn_score)
attn_prob = self.drop_att(F.softmax(attn_score, dim=2))
attn_vec = torch.einsum('bijn,bjnd->bind', (attn_prob, wv))
return attn_vec.contiguous().view(bs, x_len, -1)
#def _line_shift1(x:Tensor, mask:bool=False):
# "Shift the line i of `x` by p-i elements to the left, is `mask` puts 0s on the diagonal."
# bs,n,p,nh = x.size()
# x_pad = torch.cat([x.new_zeros(bs,n,1,nh), x], dim=2)
# x_shift = x_pad.view(bs,p + 1,n,nh)[:,1:].view_as(x)
# if mask: x_shift.mul_(torch.tril(x.new_ones(n,p), p-n)[None,:,:,None])
# return x_shift
def _line_shift(x:Tensor, mask:bool=False):
"Shift the line i of `x` by p-i elements to the left, is `mask` puts 0s on the diagonal."
bs,nh,n,p = x.size()
x_pad = torch.cat([x.new_zeros(bs,nh,n,1), x], dim=3)
x_shift = x_pad.view(bs,nh,p + 1,n)[:,:,1:].view_as(x)
if mask: x_shift.mul_(torch.tril(x.new_ones(n,p), p-n)[None,None,])
return x_shift
class MultiHeadRelativeAttention(MultiHeadAttention):
"MutiHeadAttention with relative positional encoding."
def __init__(self, n_heads:int, d_model:int, d_head:int, p_res:float=0., p_att:float=0., bias:bool=True,
scale:bool=True):
super().__init__(n_heads, d_model, d_head, p_res=p_res, p_att=p_att, bias=bias, scale=scale)
self.r_attn = nn.Linear(d_model, n_heads * d_head, bias=bias)
def _apply_attention(self, x:Tensor, r:Tensor=None, u:Tensor=None, v:Tensor=None, mask:Tensor=None, mem:Tensor=None):
#Notations from the paper: x input, r vector of relative distance between two elements, u et v learnable
#parameters of the model common between all layers, mask to avoid cheating and mem the previous hidden states.
bs,x_len,seq_len = x.size(0),x.size(1),r.size(0)
context = x if mem is None else torch.cat([mem, x], dim=1)
wq,wk,wv = torch.chunk(self.attention(context), 3, dim=-1)
wq = wq[:,-x_len:]
wq,wk,wv = map(lambda x:x.view(bs, x.size(1), self.n_heads, self.d_head), (wq,wk,wv))
wq,wk,wv = wq.permute(0, 2, 1, 3),wk.permute(0, 2, 3, 1),wv.permute(0, 2, 1, 3)
wkr = self.r_attn(r)
wkr = wkr.view(seq_len, self.n_heads, self.d_head)
wkr = wkr.permute(1,2,0)
#### compute attention score (AC is (a) + (c) and BS is (b) + (d) in the paper)
AC = torch.matmul(wq+u,wk)
BD = _line_shift(torch.matmul(wq+v, wkr))
attn_score = (AC + BD).mul_(1/(self.d_head ** 0.5))
if mask is not None:
attn_score = attn_score.float().masked_fill(mask, -float('inf')).type_as(attn_score)
attn_prob = F.softmax(attn_score, dim=-1)
attn_vec = torch.matmul(attn_prob, wv)
return attn_vec.permute(0, 2, 1, 3).contiguous().view(bs, x_len, -1)
def _attention_einsum(self, x:Tensor, r:Tensor=None, u:Tensor=None, v:Tensor=None, mask:Tensor=None, mem:Tensor=None):
# Permute and matmul is a little bit faster but this implementation is more readable
bs,x_len,seq_len = x.size(0),x.size(1),r.size(0)
context = x if mem is None else torch.cat([mem, x], dim=1)
wq,wk,wv = torch.chunk(self.attention(context), 3, dim=-1)
wq = wq[:,-x_len:]
wkr = self.r_attn(r)
wq,wk,wv = map(lambda x:x.view(bs, x.size(1), self.n_heads, self.d_head), (wq,wk,wv))
wkr = wkr.view(seq_len, self.n_heads, self.d_head)
#### compute attention score (AC is (a) + (c) and BS is (b) + (d) in the paper)
AC = torch.einsum('bind,bjnd->bijn', (wq+u, wk))
BD = _line_shift1(torch.einsum('bind,jnd->bijn', (wq+v, wkr)))
attn_score = (AC + BD).mul_(1/(self.d_head ** 0.5))
if mask is not None:
attn_score = attn_score.float().masked_fill(mask, -float('inf')).type_as(attn_score)
attn_prob = self.drop_att(F.softmax(attn_score, dim=2))
attn_vec = torch.einsum('bijn,bjnd->bind', (attn_prob, wv))
return attn_vec.contiguous().view(bs, x_len, -1)
class DecoderLayer(nn.Module):
#Can't use Sequential directly cause more than one input...
def __init__(self, n_heads:int, d_model:int, d_head:int, d_inner:int, p_res:float=0., p_att:float=0., p_ff:float=0.,
bias:bool=True, scale:bool=True, act:Activation=Activation.ReLU, double_drop:bool=True,
attn_cls:Callable=MultiHeadAttention):
super().__init__()
self.mhra = attn_cls(n_heads, d_model, d_head, p_res=p_res, p_att=p_att, bias=bias, scale=scale)
self.ff = feed_forward(d_model, d_inner, p_ff, act=act, double_drop=double_drop)
def forward(self, x:Tensor, mask:Tensor=None, **kwargs): return self.ff(self.mhra(x, mask=mask, **kwargs))
class Transformer(nn.Module):
def __init__(self, vocab_sz:int, ctx_len:int, n_layers:int, n_heads:int, d_model:int, d_head:int, d_inner:int,
p_res:float=0., p_att:float=0., p_ff:float=0., p_emb:float=0., bias:bool=True, scale:bool=True,
act:Activation=Activation.ReLU, double_drop:bool=True, attn_cls:Callable=MultiHeadAttention,
learned_pos_enc:bool=True):
super().__init__()
self.embedding = nn.Embedding(vocab_sz, d_model)
self.pos_enc = nn.Embedding(ctx_len, d_model) if learned_pos_enc else PositionalEncoding(d_model)
self.drop_emb = nn.Dropout(p_emb)
self.layers = nn.ModuleList([DecoderLayer(n_heads, d_model, d_head, d_inner, p_res=p_res, p_att=p_att,
p_ff=p_ff, bias=bias, scale=scale, act=act, double_drop=double_drop,
attn_cls=attn_cls) for k in range(n_layers)])
def reset(self): pass
def forward(self, x):
bs, x_len = x.size()
pos = torch.arange(0, x_len, device=x.device, dtype=x.dtype)
inp = self.drop_emb(self.embedding(x) + self.pos_enc(pos)[None]) #.mul_(self.d_model ** 0.5)
mask = torch.triu(x.new_ones(x_len, x_len), diagonal=1).byte()[None,None]
#[:,None,None] for einsum implementation of attention
for layer in self.layers: inp = layer(inp, mask=mask)
return ([inp],[inp]) #For the LinearDecoder
class TransformerXL(nn.Module):
def __init__(self, vocab_sz:int, ctx_len:int, n_layers:int, n_heads:int, d_model:int, d_head:int, d_inner:int,
p_res:float=0., p_att:float=0., p_ff:float=0., p_emb:float=0., bias:bool=False, scale:bool=True,
act:Activation=Activation.ReLU, double_drop:bool=True, attn_cls:Callable=MultiHeadRelativeAttention,
learned_pos_enc:bool=False, mem_len:int=0):
super().__init__()
self.embedding = nn.Embedding(vocab_sz, d_model)
self.pos_enc = nn.Embedding(ctx_len, d_model) if learned_pos_enc else PositionalEncoding(d_model)
self.drop_emb = nn.Dropout(p_emb)
self.u = nn.Parameter(torch.Tensor(n_heads, 1, d_head)) #Remove 1 for einsum implementation of attention
self.v = nn.Parameter(torch.Tensor(n_heads, 1, d_head)) #Remove 1 for einsum implementation of attention
self.mem_len,self.n_layers,self.d_model = mem_len,n_layers,d_model
if self.mem_len > 0: self.reset()
self.layers = nn.ModuleList([DecoderLayer(n_heads, d_model, d_head, d_inner, p_res=p_res, p_att=p_att,
p_ff=p_ff, bias=bias, scale=scale, act=act, double_drop=double_drop,
attn_cls=attn_cls) for k in range(n_layers)])
def reset(self):
self.hidden = [next(self.parameters()).data.new(0) for i in range(self.n_layers+1)]
def _update_mems(self, hids):
if not getattr(self, 'hidden', False): return None
assert len(hids) == len(self.hidden), 'len(hids) != len(self.hidden)'
with torch.no_grad():
for i in range(len(hids)):
cat = torch.cat([self.hidden[i], hids[i]], dim=1)
self.hidden[i] = cat[:,-self.mem_len:].detach()
def forward(self, x):
bs,x_len = x.size()
inp = self.drop_emb(self.embedding(x)) #.mul_(self.d_model ** 0.5)
m_len = self.hidden[0].size(1) if hasattr(self, 'hidden') and len(self.hidden[0].size()) > 1 else 0
seq_len = m_len + x_len
mask = torch.triu(x.new_ones(x_len, seq_len), diagonal=1+m_len).byte()[None,None]
#[:,None,None] for einsum implementation of attention
hids = []
pos = torch.arange(seq_len-1, -1, -1, device=inp.device, dtype=inp.dtype)
pos_enc = self.pos_enc(pos)
hids.append(inp)
for i, layer in enumerate(self.layers):
mem = self.hidden[i] if self.mem_len > 0 else None
inp = layer(inp, r=pos_enc, u=self.u, v=self.v, mask=mask, mem=mem)
hids.append(inp)
core_out = inp[:,-x_len:]
self._update_mems(hids)
return [core_out], (self.hidden if self.mem_len > 0 else [core_out])
def init_transformer(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
if hasattr(m, 'weight') and m.weight is not None: nn.init.normal_(m.weight, 0., 0.02)
if hasattr(m, 'bias') and m.bias is not None: nn.init.constant_(m.bias, 0.)
elif classname.find('LayerNorm') != -1:
if hasattr(m, 'weight') and m.weight is not None: nn.init.normal_(m.weight, 1., 0.02)
if hasattr(m, 'bias') and m.bias is not None: nn.init.constant_(m.bias, 0.)
elif classname.find('TransformerXL') != -1:
if hasattr(m, 'u'): nn.init.normal_(m.u, 0., 0.02)
if hasattr(m, 'v'): nn.init.normal_(m.v, 0., 0.02)
def get_transformer_lm(vocab_sz:int, ctx_len:int, n_layers:int=12, n_heads:int=12, d_model:int=768, d_head:int=64,
d_inner:int=3072, p_res:float=0., p_att:float=0., p_ff:float=0., p_emb:float=0., p_out:float=0.,
bias:bool=True, scale:bool=True, act:Activation=Activation.ReLU, double_drop:bool=True,
tie_weights:bool=True, out_bias:bool=True):
encoder = Transformer(vocab_sz, ctx_len, n_layers, n_heads, d_model, d_head, d_inner, p_res=p_res, p_att=p_att,
p_ff=p_ff, p_emb=p_emb, bias=bias, scale=scale, act=act, double_drop=double_drop)
tie_encoder = encoder.embedding if tie_weights else None
decoder = LinearDecoder(vocab_sz, d_model, output_p=p_out, tie_encoder=tie_encoder, bias=out_bias)
return SequentialRNN(encoder, decoder).apply(init_transformer)
def get_transformerXL_lm(vocab_sz:int, ctx_len:int, n_layers:int=12, n_heads:int=12, d_model:int=768, d_head:int=64,
d_inner:int=3072, p_res:float=0., p_att:float=0., p_ff:float=0., p_emb:float=0., p_out:float=0.,
bias:bool=False, scale:bool=True, act:Activation=Activation.ReLU, double_drop:bool=True,
tie_weights:bool=True, out_bias:bool=True, mem_len:int=0):
encoder = TransformerXL(vocab_sz, ctx_len, n_layers, n_heads, d_model, d_head, d_inner, p_res=p_res, p_att=p_att,
p_ff=p_ff, p_emb=p_emb, bias=bias, scale=scale, act=act, double_drop=double_drop, mem_len=mem_len)
tie_encoder = encoder.embedding if tie_weights else None
decoder = LinearDecoder(vocab_sz, d_model, output_p=p_out, tie_encoder=tie_encoder, bias=out_bias)
return SequentialRNN(encoder, decoder).apply(init_transformer) | 59.326693 | 131 | 0.6422 |
7959cdd16280948ca25a6b6cbc47967e57ecfd1a | 3,632 | py | Python | pydrawing/modules/beautifiers/oilpainting/oilpainting.py | CharlesPikachu/pydrawing | be95378a5667ea345f2a3760f8814dff255ebe15 | [
"MIT"
] | 93 | 2022-01-18T01:42:58.000Z | 2022-03-18T18:42:55.000Z | pydrawing/modules/beautifiers/oilpainting/oilpainting.py | CharlesPikachu/pydrawing | be95378a5667ea345f2a3760f8814dff255ebe15 | [
"MIT"
] | null | null | null | pydrawing/modules/beautifiers/oilpainting/oilpainting.py | CharlesPikachu/pydrawing | be95378a5667ea345f2a3760f8814dff255ebe15 | [
"MIT"
] | 1 | 2022-02-17T04:36:17.000Z | 2022-02-17T04:36:17.000Z | '''
Function:
照片油画化
Author:
Charles
微信公众号:
Charles的皮卡丘
'''
import cv2
import random
import numpy as np
from scipy import ndimage
from ..base import BaseBeautifier
'''照片油画化'''
class OilpaintingBeautifier(BaseBeautifier):
def __init__(self, brush_width=5, palette=0, edge_operator='sobel', **kwargs):
super(OilpaintingBeautifier, self).__init__(**kwargs)
assert edge_operator in ['scharr', 'prewitt', 'sobel', 'roberts']
self.brush_width = brush_width
self.palette = palette
self.edge_operator = edge_operator
'''迭代图片'''
def iterimage(self, image):
# 计算图像梯度
r = 2 * int(image.shape[0] / 50) + 1
gx, gy = self.getgradient(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY), (r, r), self.edge_operator)
gh = np.sqrt(np.sqrt(np.square(gx) + np.square(gy)))
ga = (np.arctan2(gy, gx) / np.pi) * 180 + 90
# 画油画的所有位置
canvas = cv2.medianBlur(image, 11)
order = self.getdraworder(image.shape[0], image.shape[1], scale=self.brush_width * 2)
# 画椭圆
colors = np.array(image, dtype=np.float)
for i, (y, x) in enumerate(order):
length = int(round(self.brush_width + self.brush_width * gh[y, x]))
if self.palette != 0:
color = np.array([round(colors[y, x][0] / self.palette) * self.palette + random.randint(-5, 5), \
round(colors[y, x][1] / self.palette) * self.palette + random.randint(-5, 5), \
round(colors[y, x][2] / self.palette) * self.palette + random.randint(-5, 5)], dtype=np.float)
else:
color = colors[y, x]
cv2.ellipse(canvas, (x, y), (length, self.brush_width), ga[y, x], 0, 360, color, -1, cv2.LINE_AA)
# 返回结果
return canvas
'''画油画的所有位置'''
def getdraworder(self, h, w, scale):
order = []
for i in range(0, h, scale):
for j in range(0, w, scale):
y = random.randint(-scale // 2, scale // 2) + i
x = random.randint(-scale // 2, scale // 2) + j
order.append((y % h, x % w))
return order
'''prewitt算子'''
def prewitt(self, img):
img_gaussian = cv2.GaussianBlur(img, (3, 3), 0)
kernelx = np.array([[1, 1, 1], [0, 0, 0], [-1, -1, -1]])
kernely = np.array([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]])
img_prewittx = cv2.filter2D(img_gaussian, -1, kernelx)
img_prewitty = cv2.filter2D(img_gaussian, -1, kernely)
return img_prewittx // 15.36, img_prewitty // 15.36
'''roberts算子'''
def roberts(self, img):
roberts_cross_v = np.array([[0, 0, 0], [0, 1, 0], [0, 0, -1]])
roberts_cross_h = np.array([[0, 0, 0], [0, 0, 1], [0, -1, 0]])
vertical = ndimage.convolve(img, roberts_cross_v)
horizontal = ndimage.convolve(img, roberts_cross_h)
return vertical // 50.0, horizontal // 50.0
'''利用边缘检测算子获得梯度'''
def getgradient(self, img_o, ksize, edge_operator):
if edge_operator == 'scharr':
X = cv2.Scharr(img_o, cv2.CV_32F, 1, 0) / 50.0
Y = cv2.Scharr(img_o, cv2.CV_32F, 0, 1) / 50.0
elif edge_operator == 'prewitt':
X, Y = self.prewitt(img_o)
elif edge_operator == 'sobel':
X = cv2.Sobel(img_o, cv2.CV_32F, 1, 0, ksize=5) / 50.0
Y = cv2.Sobel(img_o, cv2.CV_32F, 0, 1, ksize=5) / 50.0
elif edge_operator == 'roberts':
X, Y = self.roberts(img_o)
X = cv2.GaussianBlur(X, ksize, 0)
Y = cv2.GaussianBlur(Y, ksize, 0)
return X, Y | 42.729412 | 128 | 0.553139 |
7959ce711bcc6dd549f1388d36f2f193bdb66264 | 847 | py | Python | docs/conf.py | palewire/geomac-wildfires | 178f3800c59435b6ba071d92a998beb6190fa0f2 | [
"MIT"
] | null | null | null | docs/conf.py | palewire/geomac-wildfires | 178f3800c59435b6ba071d92a998beb6190fa0f2 | [
"MIT"
] | 7 | 2021-11-30T16:19:03.000Z | 2021-11-30T16:35:37.000Z | docs/conf.py | palewire/geomac-wildfires | 178f3800c59435b6ba071d92a998beb6190fa0f2 | [
"MIT"
] | 2 | 2021-12-01T01:41:36.000Z | 2021-12-02T00:00:06.000Z | """Configure Sphinx configuration."""
import os
import sys
from datetime import datetime
# Insert the parent directory into the path
sys.path.insert(0, os.path.abspath(".."))
extensions = [
"myst_parser",
]
templates_path = ["_templates"]
source_suffix = ".rst"
master_doc = "index"
project = "nifc-wildfires"
year = datetime.now().year
copyright = f"{year} Ben Welsh"
exclude_patterns = ["_build"]
html_theme = "alabaster"
html_sidebars = {
"**": [
# "about.html",
# "navigation.html",
"relations.html",
"searchbox.html",
"donate.html",
]
}
html_theme_options = {
"canonical_url": f"https://palewi.re/docs/{project}/",
"show_powered_by": False,
"show_relbar_bottom": True,
}
html_static_path = ["_static"]
html_css_files = [
"css/custom.css",
]
pygments_style = "sphinx"
| 19.25 | 58 | 0.649351 |
7959ce7c3b827672afff59ea43648e3c65fc7b28 | 129,118 | py | Python | python/ccxt/async_support/gateio.py | xsmedjax/ccxt | ef505e625fdced258c0745d8285abdfccde6af2b | [
"MIT"
] | null | null | null | python/ccxt/async_support/gateio.py | xsmedjax/ccxt | ef505e625fdced258c0745d8285abdfccde6af2b | [
"MIT"
] | null | null | null | python/ccxt/async_support/gateio.py | xsmedjax/ccxt | ef505e625fdced258c0745d8285abdfccde6af2b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class gateio(Exchange):
def describe(self):
return self.deep_extend(super(gateio, self).describe(), {
'id': 'gateio',
'name': 'Gate.io',
'countries': ['KR'],
'rateLimit': 10 / 3, # 300 requests per second or 3.33ms
'version': 'v4',
'certified': True,
'pro': True,
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/31784029-0313c702-b509-11e7-9ccc-bc0da6a0e435.jpg',
'doc': 'https://www.gate.io/docs/apiv4/en/index.html',
'www': 'https://gate.io/',
'api': {
'public': 'https://api.gateio.ws/api/v4',
'private': 'https://api.gateio.ws/api/v4',
},
'referral': {
'url': 'https://www.gate.io/ref/2436035',
'discount': 0.2,
},
},
'has': {
'margin': True,
'swap': True,
'future': True,
'cancelAllOrders': True,
'cancelOrder': True,
'createMarketOrder': False,
'createOrder': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingFees': True,
'fetchFundingHistory': True,
'fetchFundingRate': True,
'fetchFundingRateHistory': True,
'fetchFundingRates': True,
'fetchIndexOHLCV': True,
'fetchMarkets': True,
'fetchMarkOHLCV': True,
'fetchMyTrades': True,
'fetchNetworkDepositAddress': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrdersByStatus': True,
'fetchPositions': True,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': False,
'fetchTrades': True,
'fetchTradingFees': True,
'fetchWithdrawals': True,
'setLeverage': True,
'transfer': True,
'withdraw': True,
},
'api': {
'public': {
'spot': {
'get': {
'currencies': 1,
'currencies/{currency}': 1,
'currency_pairs': 1,
'currency_pairs/{currency_pair}': 1,
'tickers': 1,
'order_book': 1,
'trades': 1,
'candlesticks': 1,
},
},
'margin': {
'get': {
'currency_pairs': 1,
'currency_pairs/{currency_pair}': 1,
'cross/currencies': 1,
'cross/currencies/{currency}': 1,
'funding_book': 1,
},
},
'futures': {
'get': {
'{settle}/contracts': 1.5,
'{settle}/contracts/{contract}': 1.5,
'{settle}/order_book': 1.5,
'{settle}/trades': 1.5,
'{settle}/candlesticks': 1.5,
'{settle}/tickers': 1.5,
'{settle}/funding_rate': 1.5,
'{settle}/insurance': 1.5,
'{settle}/contract_stats': 1.5,
'{settle}/liq_orders': 1.5,
},
},
'delivery': {
'get': {
'{settle}/contracts': 1.5,
'{settle}/contracts/{contract}': 1.5,
'{settle}/order_book': 1.5,
'{settle}/trades': 1.5,
'{settle}/candlesticks': 1.5,
'{settle}/tickers': 1.5,
'{settle}/insurance': 1.5,
},
},
'options': {
'get': {
'underlyings': 1.5,
'expirations': 1.5,
'contracts': 1.5,
'contracts/{contract}': 1.5,
'settlements': 1.5,
'settlements/{contract}': 1.5,
'order_book': 1.5,
'tickers': 1.5,
'underlying/tickers/{underlying}': 1.5,
'candlesticks': 1.5,
'underlying/candlesticks': 1.5,
'trades': 1.5,
},
},
},
'private': {
'withdrawals': {
'post': {
'': 3000, # 3000 = 10 seconds
},
'delete': {
'{withdrawal_id}': 300,
},
},
'wallet': {
'get': {
'deposit_address': 300,
'withdrawals': 300,
'deposits': 300,
'sub_account_transfers': 300,
'withdraw_status': 300,
'sub_account_balances': 300,
'fee': 300,
},
'post': {
'transfers': 300,
'sub_account_transfers': 300,
},
},
'spot': {
'get': {
'accounts': 1,
'open_orders': 1,
'orders': 1,
'orders/{order_id}': 1,
'my_trades': 1,
'price_orders': 1,
'price_orders/{order_id}': 1,
},
'post': {
'batch_orders': 1,
'orders': 1,
'cancel_batch_orders': 1,
'price_orders': 1,
},
'delete': {
'orders': 1,
'orders/{order_id}': 1,
'price_orders': 1,
'price_orders/{order_id}': 1,
},
},
'margin': {
'get': {
'accounts': 1.5,
'account_book': 1.5,
'funding_accounts': 1.5,
'loans': 1.5,
'loans/{loan_id}': 1.5,
'loans/{loan_id}/repayment': 1.5,
'loan_records': 1.5,
'loan_records/{load_record_id}': 1.5,
'auto_repay': 1.5,
'transferable': 1.5,
'cross/accounts': 1.5,
'cross/account_book': 1.5,
'cross/loans': 1.5,
'cross/loans/{loan_id}': 1.5,
'cross/loans/repayments': 1.5,
'cross/transferable': 1.5,
},
'post': {
'loans': 1.5,
'merged_loans': 1.5,
'loans/{loan_id}/repayment': 1.5,
'auto_repay': 1.5,
'cross/loans': 1.5,
'cross/loans/repayments': 1.5,
},
'patch': {
'loans/{loan_id}': 1.5,
'loan_records/{loan_record_id}': 1.5,
},
'delete': {
'loans/{loan_id}': 1.5,
},
},
'futures': {
'get': {
'{settle}/accounts': 1.5,
'{settle}/account_book': 1.5,
'{settle}/positions': 1.5,
'{settle}/positions/{contract}': 1.5,
'{settle}/orders': 1.5,
'{settle}/orders/{order_id}': 1.5,
'{settle}/my_trades': 1.5,
'{settle}/position_close': 1.5,
'{settle}/liquidates': 1.5,
'{settle}/price_orders': 1.5,
'{settle}/price_orders/{order_id}': 1.5,
},
'post': {
'{settle}/positions/{contract}/margin': 1.5,
'{settle}/positions/{contract}/leverage': 1.5,
'{settle}/positions/{contract}/risk_limit': 1.5,
'{settle}/dual_mode': 1.5,
'{settle}/dual_comp/positions/{contract}': 1.5,
'{settle}/dual_comp/positions/{contract}/margin': 1.5,
'{settle}/dual_comp/positions/{contract}/leverage': 1.5,
'{settle}/dual_comp/positions/{contract}/risk_limit': 1.5,
'{settle}/orders': 1.5,
'{settle}/price_orders': 1.5,
},
'delete': {
'{settle}/orders': 1.5,
'{settle}/orders/{order_id}': 1.5,
'{settle}/price_orders': 1.5,
'{settle}/price_orders/{order_id}': 1.5,
},
},
'delivery': {
'get': {
'{settle}/accounts': 1.5,
'{settle}/account_book': 1.5,
'{settle}/positions': 1.5,
'{settle}/positions/{contract}': 1.5,
'{settle}/orders': 1.5,
'{settle}/orders/{order_id}': 1.5,
'{settle}/my_trades': 1.5,
'{settle}/position_close': 1.5,
'{settle}/liquidates': 1.5,
'{settle}/price_orders': 1.5,
'{settle}/price_orders/{order_id}': 1.5,
},
'post': {
'{settle}/positions/{contract}/margin': 1.5,
'{settle}/positions/{contract}/leverage': 1.5,
'{settle}/positions/{contract}/risk_limit': 1.5,
'{settle}/orders': 1.5,
'{settle}/price_orders': 1.5,
},
'delete': {
'{settle}/orders': 1.5,
'{settle}/orders/{order_id}': 1.5,
'{settle}/price_orders': 1.5,
'{settle}/price_orders/{order_id}': 1.5,
},
},
'options': {
'get': {
'accounts': 1.5,
'account_book': 1.5,
'positions': 1.5,
'positions/{contract}': 1.5,
'position_close': 1.5,
'orders': 1.5,
'orders/{order_id}': 1.5,
'my_trades': 1.5,
},
'post': {
'orders': 1.5,
},
'delete': {
'orders': 1.5,
'orders/{order_id}': 1.5,
},
},
},
},
'timeframes': {
'10s': '10s',
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'4h': '4h',
'8h': '8h',
'1d': '1d',
'7d': '7d',
},
# copied from gateiov2
'commonCurrencies': {
'88MPH': 'MPH',
'BIFI': 'Bitcoin File',
'BOX': 'DefiBox',
'BTCBEAR': 'BEAR',
'BTCBULL': 'BULL',
'BYN': 'BeyondFi',
'EGG': 'Goose Finance',
'GTC': 'Game.com', # conflict with Gitcoin and Gastrocoin
'GTC_HT': 'Game.com HT',
'GTC_BSC': 'Game.com BSC',
'HIT': 'HitChain',
'MM': 'Million', # conflict with MilliMeter
'MPH': 'Morpher', # conflict with 88MPH
'RAI': 'Rai Reflex Index', # conflict with RAI Finance
'SBTC': 'Super Bitcoin',
'TNC': 'Trinity Network Credit',
'TON': 'TONToken',
'VAI': 'VAIOT',
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'options': {
'createOrder': {
'expiration': 86400, # for conditional orders
},
'networks': {
'TRC20': 'TRX',
'ERC20': 'ETH',
'BEP20': 'BSC',
},
'accountsByType': {
'spot': 'spot',
'margin': 'margin',
'future': 'futures',
'futures': 'futures',
'delivery': 'delivery',
},
'defaultType': 'spot',
'swap': {
'fetchMarkets': {
'settlementCurrencies': ['usdt', 'btc'],
},
},
'future': {
'fetchMarkets': {
'settlementCurrencies': ['usdt', 'btc'],
},
},
},
'precisionMode': TICK_SIZE,
'fees': {
'trading': {
'tierBased': True,
'feeSide': 'get',
'percentage': True,
'maker': self.parse_number('0.002'),
'taker': self.parse_number('0.002'),
'tiers': {
# volume is in BTC
'maker': [
[self.parse_number('0'), self.parse_number('0.002')],
[self.parse_number('1.5'), self.parse_number('0.00185')],
[self.parse_number('3'), self.parse_number('0.00175')],
[self.parse_number('6'), self.parse_number('0.00165')],
[self.parse_number('12.5'), self.parse_number('0.00155')],
[self.parse_number('25'), self.parse_number('0.00145')],
[self.parse_number('75'), self.parse_number('0.00135')],
[self.parse_number('200'), self.parse_number('0.00125')],
[self.parse_number('500'), self.parse_number('0.00115')],
[self.parse_number('1250'), self.parse_number('0.00105')],
[self.parse_number('2500'), self.parse_number('0.00095')],
[self.parse_number('3000'), self.parse_number('0.00085')],
[self.parse_number('6000'), self.parse_number('0.00075')],
[self.parse_number('11000'), self.parse_number('0.00065')],
[self.parse_number('20000'), self.parse_number('0.00055')],
[self.parse_number('40000'), self.parse_number('0.00055')],
[self.parse_number('75000'), self.parse_number('0.00055')],
],
'taker': [
[self.parse_number('0'), self.parse_number('0.002')],
[self.parse_number('1.5'), self.parse_number('0.00195')],
[self.parse_number('3'), self.parse_number('0.00185')],
[self.parse_number('6'), self.parse_number('0.00175')],
[self.parse_number('12.5'), self.parse_number('0.00165')],
[self.parse_number('25'), self.parse_number('0.00155')],
[self.parse_number('75'), self.parse_number('0.00145')],
[self.parse_number('200'), self.parse_number('0.00135')],
[self.parse_number('500'), self.parse_number('0.00125')],
[self.parse_number('1250'), self.parse_number('0.00115')],
[self.parse_number('2500'), self.parse_number('0.00105')],
[self.parse_number('3000'), self.parse_number('0.00095')],
[self.parse_number('6000'), self.parse_number('0.00085')],
[self.parse_number('11000'), self.parse_number('0.00075')],
[self.parse_number('20000'), self.parse_number('0.00065')],
[self.parse_number('40000'), self.parse_number('0.00065')],
[self.parse_number('75000'), self.parse_number('0.00065')],
],
},
},
'swap': {
'tierBased': True,
'feeSide': 'base',
'percentage': True,
'maker': self.parse_number('0.0'),
'taker': self.parse_number('0.0005'),
'tiers': {
'maker': [
[self.parse_number('0'), self.parse_number('0.0000')],
[self.parse_number('1.5'), self.parse_number('-0.00005')],
[self.parse_number('3'), self.parse_number('-0.00005')],
[self.parse_number('6'), self.parse_number('-0.00005')],
[self.parse_number('12.5'), self.parse_number('-0.00005')],
[self.parse_number('25'), self.parse_number('-0.00005')],
[self.parse_number('75'), self.parse_number('-0.00005')],
[self.parse_number('200'), self.parse_number('-0.00005')],
[self.parse_number('500'), self.parse_number('-0.00005')],
[self.parse_number('1250'), self.parse_number('-0.00005')],
[self.parse_number('2500'), self.parse_number('-0.00005')],
[self.parse_number('3000'), self.parse_number('-0.00008')],
[self.parse_number('6000'), self.parse_number('-0.01000')],
[self.parse_number('11000'), self.parse_number('-0.01002')],
[self.parse_number('20000'), self.parse_number('-0.01005')],
[self.parse_number('40000'), self.parse_number('-0.02000')],
[self.parse_number('75000'), self.parse_number('-0.02005')],
],
'taker': [
[self.parse_number('0'), self.parse_number('0.00050')],
[self.parse_number('1.5'), self.parse_number('0.00048')],
[self.parse_number('3'), self.parse_number('0.00046')],
[self.parse_number('6'), self.parse_number('0.00044')],
[self.parse_number('12.5'), self.parse_number('0.00042')],
[self.parse_number('25'), self.parse_number('0.00040')],
[self.parse_number('75'), self.parse_number('0.00038')],
[self.parse_number('200'), self.parse_number('0.00036')],
[self.parse_number('500'), self.parse_number('0.00034')],
[self.parse_number('1250'), self.parse_number('0.00032')],
[self.parse_number('2500'), self.parse_number('0.00030')],
[self.parse_number('3000'), self.parse_number('0.00030')],
[self.parse_number('6000'), self.parse_number('0.00030')],
[self.parse_number('11000'), self.parse_number('0.00030')],
[self.parse_number('20000'), self.parse_number('0.00030')],
[self.parse_number('40000'), self.parse_number('0.00030')],
[self.parse_number('75000'), self.parse_number('0.00030')],
],
},
},
},
# https://www.gate.io/docs/apiv4/en/index.html#label-list
'exceptions': {
'exact': {
'INVALID_PARAM_VALUE': BadRequest,
'INVALID_PROTOCOL': BadRequest,
'INVALID_ARGUMENT': BadRequest,
'INVALID_REQUEST_BODY': BadRequest,
'MISSING_REQUIRED_PARAM': ArgumentsRequired,
'BAD_REQUEST': BadRequest,
'INVALID_CONTENT_TYPE': BadRequest,
'NOT_ACCEPTABLE': BadRequest,
'METHOD_NOT_ALLOWED': BadRequest,
'NOT_FOUND': ExchangeError,
'INVALID_CREDENTIALS': AuthenticationError,
'INVALID_KEY': AuthenticationError,
'IP_FORBIDDEN': AuthenticationError,
'READ_ONLY': PermissionDenied,
'INVALID_SIGNATURE': AuthenticationError,
'MISSING_REQUIRED_HEADER': AuthenticationError,
'REQUEST_EXPIRED': AuthenticationError,
'ACCOUNT_LOCKED': AccountSuspended,
'FORBIDDEN': PermissionDenied,
'SUB_ACCOUNT_NOT_FOUND': ExchangeError,
'SUB_ACCOUNT_LOCKED': AccountSuspended,
'MARGIN_BALANCE_EXCEPTION': ExchangeError,
'MARGIN_TRANSFER_FAILED': ExchangeError,
'TOO_MUCH_FUTURES_AVAILABLE': ExchangeError,
'FUTURES_BALANCE_NOT_ENOUGH': InsufficientFunds,
'ACCOUNT_EXCEPTION': ExchangeError,
'SUB_ACCOUNT_TRANSFER_FAILED': ExchangeError,
'ADDRESS_NOT_USED': ExchangeError,
'TOO_FAST': RateLimitExceeded,
'WITHDRAWAL_OVER_LIMIT': ExchangeError,
'API_WITHDRAW_DISABLED': ExchangeNotAvailable,
'INVALID_WITHDRAW_ID': ExchangeError,
'INVALID_WITHDRAW_CANCEL_STATUS': ExchangeError,
'INVALID_PRECISION': InvalidOrder,
'INVALID_CURRENCY': BadSymbol,
'INVALID_CURRENCY_PAIR': BadSymbol,
'POC_FILL_IMMEDIATELY': ExchangeError,
'ORDER_NOT_FOUND': OrderNotFound,
'ORDER_CLOSED': InvalidOrder,
'ORDER_CANCELLED': InvalidOrder,
'QUANTITY_NOT_ENOUGH': InvalidOrder,
'BALANCE_NOT_ENOUGH': InsufficientFunds,
'MARGIN_NOT_SUPPORTED': InvalidOrder,
'MARGIN_BALANCE_NOT_ENOUGH': InsufficientFunds,
'AMOUNT_TOO_LITTLE': InvalidOrder,
'AMOUNT_TOO_MUCH': InvalidOrder,
'REPEATED_CREATION': InvalidOrder,
'LOAN_NOT_FOUND': OrderNotFound,
'LOAN_RECORD_NOT_FOUND': OrderNotFound,
'NO_MATCHED_LOAN': ExchangeError,
'NOT_MERGEABLE': ExchangeError,
'NO_CHANGE': ExchangeError,
'REPAY_TOO_MUCH': ExchangeError,
'TOO_MANY_CURRENCY_PAIRS': InvalidOrder,
'TOO_MANY_ORDERS': InvalidOrder,
'MIXED_ACCOUNT_TYPE': InvalidOrder,
'AUTO_BORROW_TOO_MUCH': ExchangeError,
'TRADE_RESTRICTED': InsufficientFunds,
'USER_NOT_FOUND': ExchangeError,
'CONTRACT_NO_COUNTER': ExchangeError,
'CONTRACT_NOT_FOUND': BadSymbol,
'RISK_LIMIT_EXCEEDED': ExchangeError,
'INSUFFICIENT_AVAILABLE': InsufficientFunds,
'LIQUIDATE_IMMEDIATELY': InvalidOrder,
'LEVERAGE_TOO_HIGH': InvalidOrder,
'LEVERAGE_TOO_LOW': InvalidOrder,
'ORDER_NOT_OWNED': ExchangeError,
'ORDER_FINISHED': ExchangeError,
'POSITION_CROSS_MARGIN': ExchangeError,
'POSITION_IN_LIQUIDATION': ExchangeError,
'POSITION_IN_CLOSE': ExchangeError,
'POSITION_EMPTY': InvalidOrder,
'REMOVE_TOO_MUCH': ExchangeError,
'RISK_LIMIT_NOT_MULTIPLE': ExchangeError,
'RISK_LIMIT_TOO_HIGH': ExchangeError,
'RISK_LIMIT_TOO_lOW': ExchangeError,
'PRICE_TOO_DEVIATED': InvalidOrder,
'SIZE_TOO_LARGE': InvalidOrder,
'SIZE_TOO_SMALL': InvalidOrder,
'PRICE_OVER_LIQUIDATION': InvalidOrder,
'PRICE_OVER_BANKRUPT': InvalidOrder,
'ORDER_POC_IMMEDIATE': InvalidOrder,
'INCREASE_POSITION': InvalidOrder,
'CONTRACT_IN_DELISTING': ExchangeError,
'INTERNAL': ExchangeError,
'SERVER_ERROR': ExchangeError,
'TOO_BUSY': ExchangeNotAvailable,
},
},
'broad': {},
})
async def fetch_markets(self, params={}):
# :param params['type']: 'spot', 'margin', 'future' or 'delivery'
# :param params['settle']: The quote currency
defaultType = self.safe_string_2(self.options, 'fetchMarkets', 'defaultType', 'spot')
type = self.safe_string(params, 'type', defaultType)
query = self.omit(params, 'type')
spot = (type == 'spot')
margin = (type == 'margin')
future = (type == 'future')
swap = (type == 'swap')
option = (type == 'option')
if not spot and not margin and not future and not swap:
raise ExchangeError(self.id + " does not support '" + type + "' type, set exchange.options['defaultType'] to " + "'spot', 'margin', 'swap' or 'future'") # eslint-disable-line quotes
response = None
result = []
method = self.get_supported_mapping(type, {
'spot': 'publicSpotGetCurrencyPairs',
'margin': 'publicMarginGetCurrencyPairs',
'swap': 'publicFuturesGetSettleContracts',
'future': 'publicDeliveryGetSettleContracts',
})
if swap or future or option:
settlementCurrencies = self.get_settlement_currencies(type, 'fetchMarkets')
for c in range(0, len(settlementCurrencies)):
settleId = settlementCurrencies[c]
query['settle'] = settleId
response = await getattr(self, method)(query)
# Perpetual swap
# [
# {
# "name": "BTC_USDT",
# "type": "direct",
# "quanto_multiplier": "0.0001",
# "ref_discount_rate": "0",
# "order_price_deviate": "0.5",
# "maintenance_rate": "0.005",
# "mark_type": "index",
# "last_price": "38026",
# "mark_price": "37985.6",
# "index_price": "37954.92",
# "funding_rate_indicative": "0.000219",
# "mark_price_round": "0.01",
# "funding_offset": 0,
# "in_delisting": False,
# "risk_limit_base": "1000000",
# "interest_rate": "0.0003",
# "order_price_round": "0.1",
# "order_size_min": 1,
# "ref_rebate_rate": "0.2",
# "funding_interval": 28800,
# "risk_limit_step": "1000000",
# "leverage_min": "1",
# "leverage_max": "100",
# "risk_limit_max": "8000000",
# "maker_fee_rate": "-0.00025",
# "taker_fee_rate": "0.00075",
# "funding_rate": "0.002053",
# "order_size_max": 1000000,
# "funding_next_apply": 1610035200,
# "short_users": 977,
# "config_change_time": 1609899548,
# "trade_size": 28530850594,
# "position_size": 5223816,
# "long_users": 455,
# "funding_impact_value": "60000",
# "orders_limit": 50,
# "trade_id": 10851092,
# "orderbook_id": 2129638396
# }
# ]
#
# Delivery Futures
# [
# {
# "name": "BTC_USDT_20200814",
# "underlying": "BTC_USDT",
# "cycle": "WEEKLY",
# "type": "direct",
# "quanto_multiplier": "0.0001",
# "mark_type": "index",
# "last_price": "9017",
# "mark_price": "9019",
# "index_price": "9005.3",
# "basis_rate": "0.185095",
# "basis_value": "13.7",
# "basis_impact_value": "100000",
# "settle_price": "0",
# "settle_price_interval": 60,
# "settle_price_duration": 1800,
# "settle_fee_rate": "0.0015",
# "expire_time": 1593763200,
# "order_price_round": "0.1",
# "mark_price_round": "0.1",
# "leverage_min": "1",
# "leverage_max": "100",
# "maintenance_rate": "1000000",
# "risk_limit_base": "140.726652109199",
# "risk_limit_step": "1000000",
# "risk_limit_max": "8000000",
# "maker_fee_rate": "-0.00025",
# "taker_fee_rate": "0.00075",
# "ref_discount_rate": "0",
# "ref_rebate_rate": "0.2",
# "order_price_deviate": "0.5",
# "order_size_min": 1,
# "order_size_max": 1000000,
# "orders_limit": 50,
# "orderbook_id": 63,
# "trade_id": 26,
# "trade_size": 435,
# "position_size": 130,
# "config_change_time": 1593158867,
# "in_delisting": False
# }
# ]
#
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'name')
parts = id.split('_')
baseId = self.safe_string(parts, 0)
quoteId = self.safe_string(parts, 1)
date = self.safe_string(parts, 2)
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
settle = self.safe_currency_code(settleId)
linear = quote == settle
inverse = base == settle
expiry = self.safe_timestamp(market, 'expire_time')
symbol = ''
if date is not None:
symbol = base + '/' + quote + ':' + settle + '-' + self.yymmdd(expiry, '')
else:
symbol = base + '/' + quote + ':' + settle
priceDeviate = self.safe_string(market, 'order_price_deviate')
markPrice = self.safe_string(market, 'mark_price')
minMultiplier = Precise.string_sub('1', priceDeviate)
maxMultiplier = Precise.string_add('1', priceDeviate)
minPrice = Precise.string_mul(minMultiplier, markPrice)
maxPrice = Precise.string_mul(maxMultiplier, markPrice)
takerPercent = self.safe_string(market, 'taker_fee_rate')
makerPercent = self.safe_string(market, 'maker_fee_rate', takerPercent)
pricePrecision = self.safe_number(market, 'order_price_round')
# Fee is in %, so divide by 100
taker = self.parse_number(Precise.string_div(takerPercent, '100'))
maker = self.parse_number(Precise.string_div(makerPercent, '100'))
result.append({
'info': market,
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'settle': settle,
'baseId': baseId,
'quoteId': quoteId,
'settleId': settleId,
'type': type,
'spot': spot,
'margin': margin,
'swap': swap,
'future': future,
'option': option,
'active': True,
'contract': True,
'linear': linear,
'inverse': inverse,
'taker': taker,
'maker': maker,
'contractSize': self.safe_number(market, 'quanto_multiplier'),
'expiry': expiry,
'expiryDatetime': self.iso8601(expiry),
'strike': None,
'optionType': None,
'precision': {
'amount': self.parse_number('1'),
'price': pricePrecision,
},
'limits': {
'leverage': {
'min': self.safe_number(market, 'leverage_min'),
'max': self.safe_number(market, 'leverage_max'),
},
'amount': {
'min': self.safe_number(market, 'order_size_min'),
'max': self.safe_number(market, 'order_size_max'),
},
'price': {
'min': minPrice,
'max': maxPrice,
},
'cost': {
'min': None,
'max': None,
},
},
})
else:
response = await getattr(self, method)(query)
#
# Spot
# [
# {
# "id": "DEGO_USDT",
# "base": "DEGO",
# "quote": "USDT",
# "fee": "0.2",
# "min_quote_amount": "1",
# "amount_precision": "4",
# "precision": "4",
# "trade_status": "tradable",
# "sell_start": "0",
# "buy_start": "0"
# }
# ]
#
# Margin
# [
# {
# "id": "ETH_USDT",
# "base": "ETH",
# "quote": "USDT",
# "leverage": 3,
# "min_base_amount": "0.01",
# "min_quote_amount": "100",
# "max_quote_amount": "1000000"
# }
# ]
#
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'id')
spot = (type == 'spot')
baseId, quoteId = id.split('_')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
takerPercent = self.safe_string(market, 'fee')
makerPercent = self.safe_string(market, 'maker_fee_rate', takerPercent)
amountPrecisionString = self.safe_string(market, 'amount_precision')
pricePrecisionString = self.safe_string(market, 'precision')
amountPrecision = self.parse_number(self.parse_precision(amountPrecisionString))
pricePrecision = self.parse_number(self.parse_precision(pricePrecisionString))
tradeStatus = self.safe_string(market, 'trade_status')
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': type,
'spot': spot,
'margin': margin,
'swap': False,
'future': False,
'option': False,
'active': tradeStatus == 'tradable',
'contract': False,
'linear': None,
'inverse': None,
# Fee is in %, so divide by 100
'taker': self.parse_number(Precise.string_div(takerPercent, '100')),
'maker': self.parse_number(Precise.string_div(makerPercent, '100')),
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': amountPrecision,
'price': pricePrecision,
},
'limits': {
'leverage': {
'min': self.parse_number('1'),
'max': self.safe_number(market, 'lever', 1),
},
'amount': {
'min': amountPrecision,
'max': None,
},
'price': {
'min': pricePrecision,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'min_quote_amount'),
'max': None,
},
},
'info': market,
})
return result
def prepare_request(self, market):
if market['contract']:
return {
'contract': market['id'],
'settle': market['settleId'],
}
else:
return {
'currency_pair': market['id'],
}
def get_settlement_currencies(self, type, method):
options = self.safe_value(self.options, type, {}) # ['BTC', 'USDT'] unified codes
fetchMarketsContractOptions = self.safe_value(options, method, {})
defaultSettle = ['usdt'] if (type == 'swap') else ['btc']
return self.safe_value(fetchMarketsContractOptions, 'settlementCurrencies', defaultSettle)
async def fetch_currencies(self, params={}):
response = await self.publicSpotGetCurrencies(params)
#
# {
# "currency": "BCN",
# "delisted": False,
# "withdraw_disabled": True,
# "withdraw_delayed": False,
# "deposit_disabled": True,
# "trade_disabled": False
# }
#
result = {}
# TODO: remove magic constants
amountPrecision = self.parse_number('1e-6')
for i in range(0, len(response)):
entry = response[i]
currencyId = self.safe_string(entry, 'currency')
currencyIdLower = self.safe_string_lower(entry, 'currency')
code = self.safe_currency_code(currencyId)
delisted = self.safe_value(entry, 'delisted')
withdraw_disabled = self.safe_value(entry, 'withdraw_disabled')
deposit_disabled = self.safe_value(entry, 'disabled_disabled')
trade_disabled = self.safe_value(entry, 'trade_disabled')
active = not (delisted and withdraw_disabled and deposit_disabled and trade_disabled)
result[code] = {
'id': currencyId,
'lowerCaseId': currencyIdLower,
'name': None,
'code': code,
'precision': amountPrecision,
'info': entry,
'active': active,
'fee': None,
'fees': [],
'limits': self.limits,
}
return result
async def fetch_funding_rate(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
if not market['swap']:
raise BadRequest('Funding rates only exist for swap contracts')
request = self.prepare_request(market)
response = await self.publicFuturesGetSettleContractsContract(self.extend(request, params))
#
# [
# {
# "name": "BTC_USDT",
# "type": "direct",
# "quanto_multiplier": "0.0001",
# "ref_discount_rate": "0",
# "order_price_deviate": "0.5",
# "maintenance_rate": "0.005",
# "mark_type": "index",
# "last_price": "38026",
# "mark_price": "37985.6",
# "index_price": "37954.92",
# "funding_rate_indicative": "0.000219",
# "mark_price_round": "0.01",
# "funding_offset": 0,
# "in_delisting": False,
# "risk_limit_base": "1000000",
# "interest_rate": "0.0003",
# "order_price_round": "0.1",
# "order_size_min": 1,
# "ref_rebate_rate": "0.2",
# "funding_interval": 28800,
# "risk_limit_step": "1000000",
# "leverage_min": "1",
# "leverage_max": "100",
# "risk_limit_max": "8000000",
# "maker_fee_rate": "-0.00025",
# "taker_fee_rate": "0.00075",
# "funding_rate": "0.002053",
# "order_size_max": 1000000,
# "funding_next_apply": 1610035200,
# "short_users": 977,
# "config_change_time": 1609899548,
# "trade_size": 28530850594,
# "position_size": 5223816,
# "long_users": 455,
# "funding_impact_value": "60000",
# "orders_limit": 50,
# "trade_id": 10851092,
# "orderbook_id": 2129638396
# }
# ]
#
return self.parse_funding_rate(response)
async def fetch_funding_rates(self, symbols=None, params={}):
await self.load_markets()
settle = self.safe_string_lower(params, 'settle')
request = {
'settle': settle,
}
response = await self.publicFuturesGetSettleContracts(self.extend(request, params))
#
# [
# {
# "name": "BTC_USDT",
# "type": "direct",
# "quanto_multiplier": "0.0001",
# "ref_discount_rate": "0",
# "order_price_deviate": "0.5",
# "maintenance_rate": "0.005",
# "mark_type": "index",
# "last_price": "38026",
# "mark_price": "37985.6",
# "index_price": "37954.92",
# "funding_rate_indicative": "0.000219",
# "mark_price_round": "0.01",
# "funding_offset": 0,
# "in_delisting": False,
# "risk_limit_base": "1000000",
# "interest_rate": "0.0003",
# "order_price_round": "0.1",
# "order_size_min": 1,
# "ref_rebate_rate": "0.2",
# "funding_interval": 28800,
# "risk_limit_step": "1000000",
# "leverage_min": "1",
# "leverage_max": "100",
# "risk_limit_max": "8000000",
# "maker_fee_rate": "-0.00025",
# "taker_fee_rate": "0.00075",
# "funding_rate": "0.002053",
# "order_size_max": 1000000,
# "funding_next_apply": 1610035200,
# "short_users": 977,
# "config_change_time": 1609899548,
# "trade_size": 28530850594,
# "position_size": 5223816,
# "long_users": 455,
# "funding_impact_value": "60000",
# "orders_limit": 50,
# "trade_id": 10851092,
# "orderbook_id": 2129638396
# }
# ]
#
result = self.parse_funding_rates(response)
return self.filter_by_array(result, 'symbol', symbols)
def parse_funding_rate(self, contract, market=None):
#
# {
# "name": "BTC_USDT",
# "type": "direct",
# "quanto_multiplier": "0.0001",
# "ref_discount_rate": "0",
# "order_price_deviate": "0.5",
# "maintenance_rate": "0.005",
# "mark_type": "index",
# "last_price": "38026",
# "mark_price": "37985.6",
# "index_price": "37954.92",
# "funding_rate_indicative": "0.000219",
# "mark_price_round": "0.01",
# "funding_offset": 0,
# "in_delisting": False,
# "risk_limit_base": "1000000",
# "interest_rate": "0.0003",
# "order_price_round": "0.1",
# "order_size_min": 1,
# "ref_rebate_rate": "0.2",
# "funding_interval": 28800,
# "risk_limit_step": "1000000",
# "leverage_min": "1",
# "leverage_max": "100",
# "risk_limit_max": "8000000",
# "maker_fee_rate": "-0.00025",
# "taker_fee_rate": "0.00075",
# "funding_rate": "0.002053",
# "order_size_max": 1000000,
# "funding_next_apply": 1610035200,
# "short_users": 977,
# "config_change_time": 1609899548,
# "trade_size": 28530850594,
# "position_size": 5223816,
# "long_users": 455,
# "funding_impact_value": "60000",
# "orders_limit": 50,
# "trade_id": 10851092,
# "orderbook_id": 2129638396
# }
#
marketId = self.safe_string(contract, 'name')
symbol = self.safe_symbol(marketId, market)
markPrice = self.safe_number(contract, 'mark_price')
indexPrice = self.safe_number(contract, 'index_price')
interestRate = self.safe_number(contract, 'interest_rate')
fundingRate = self.safe_string(contract, 'funding_rate')
nextFundingTime = self.safe_integer(contract, 'funding_next_apply') * 1000
fundingRateIndicative = self.safe_number(contract, 'funding_rate_indicative')
return {
'info': contract,
'symbol': symbol,
'markPrice': markPrice,
'indexPrice': indexPrice,
'interestRate': interestRate,
'estimatedSettlePrice': None,
'timestamp': None,
'datetime': None,
'previousFundingRate': fundingRate,
'nextFundingRate': fundingRateIndicative,
'previousFundingTimestamp': None,
'nextFundingTimestamp': nextFundingTime,
'previousFundingDatetime': None,
'nextFundingDatetime': self.iso8601(nextFundingTime),
}
async def fetch_network_deposit_address(self, code, params={}):
await self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
}
response = await self.privateWalletGetDepositAddress(self.extend(request, params))
addresses = self.safe_value(response, 'multichain_addresses')
currencyId = self.safe_string(response, 'currency')
code = self.safe_currency_code(currencyId)
result = {}
for i in range(0, len(addresses)):
entry = addresses[i]
#
# {
# "chain": "ETH",
# "address": "0x359a697945E79C7e17b634675BD73B33324E9408",
# "payment_id": "",
# "payment_name": "",
# "obtain_failed": "0"
# }
#
obtainFailed = self.safe_integer(entry, 'obtain_failed')
if obtainFailed:
continue
network = self.safe_string(entry, 'chain')
address = self.safe_string(entry, 'address')
tag = self.safe_string(entry, 'payment_id')
tagLength = len(tag)
tag = tag if tagLength else None
result[network] = {
'info': entry,
'code': code,
'address': address,
'tag': tag,
}
return result
async def fetch_deposit_address(self, code, params={}):
await self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
}
response = await self.privateWalletGetDepositAddress(self.extend(request, params))
#
# {
# "currency": "XRP",
# "address": "rHcFoo6a9qT5NHiVn1THQRhsEGcxtYCV4d 391331007",
# "multichain_addresses": [
# {
# "chain": "XRP",
# "address": "rHcFoo6a9qT5NHiVn1THQRhsEGcxtYCV4d",
# "payment_id": "391331007",
# "payment_name": "Tag",
# "obtain_failed": 0
# }
# ]
# }
#
currencyId = self.safe_string(response, 'currency')
code = self.safe_currency_code(currencyId)
addressField = self.safe_string(response, 'address')
tag = None
address = None
if addressField.find(' ') >= 0:
splitted = addressField.split(' ')
address = splitted[0]
tag = splitted[1]
else:
address = addressField
return {
'info': response,
'code': code,
'address': address,
'tag': tag,
'network': None,
}
async def fetch_trading_fees(self, params={}):
await self.load_markets()
response = await self.privateWalletGetFee(params)
#
# {
# "user_id": 1486602,
# "taker_fee": "0.002",
# "maker_fee": "0.002",
# "gt_discount": True,
# "gt_taker_fee": "0.0015",
# "gt_maker_fee": "0.0015",
# "loan_fee": "0.18",
# "point_type": "0",
# "futures_taker_fee": "0.0005",
# "futures_maker_fee": "0"
# }
#
result = {}
taker = self.safe_number(response, 'taker_fee')
maker = self.safe_number(response, 'maker_fee')
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
result[symbol] = {
'maker': maker,
'taker': taker,
'info': response,
'symbol': symbol,
}
return result
async def fetch_funding_fees(self, params={}):
await self.load_markets()
response = await self.privateWalletGetWithdrawStatus(params)
#
# {
# "currency": "MTN",
# "name": "Medicalchain",
# "name_cn": "Medicalchain",
# "deposit": "0",
# "withdraw_percent": "0%",
# "withdraw_fix": "900",
# "withdraw_day_limit": "500000",
# "withdraw_day_limit_remain": "500000",
# "withdraw_amount_mini": "900.1",
# "withdraw_eachtime_limit": "90000000000",
# "withdraw_fix_on_chains": {
# "ETH": "900"
# }
# }
#
withdrawFees = {}
for i in range(0, len(response)):
entry = response[i]
currencyId = self.safe_string(entry, 'currency')
code = self.safe_currency_code(currencyId)
withdrawFees[code] = {}
withdrawFix = self.safe_value(entry, 'withdraw_fix_on_chains')
if withdrawFix is None:
withdrawFix = {}
withdrawFix[code] = self.safe_number(entry, 'withdraw_fix')
keys = list(withdrawFix.keys())
for i in range(0, len(keys)):
key = keys[i]
withdrawFees[code][key] = self.parse_number(withdrawFix[key])
return {
'info': response,
'withdraw': withdrawFees,
'deposit': {},
}
async def fetch_funding_history(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchFundingHistory() requires a symbol argument')
await self.load_markets()
# defaultType = 'future'
market = self.market(symbol)
request = self.prepare_request(market)
request['type'] = 'fund' # 'dnw' 'pnl' 'fee' 'refr' 'fund' 'point_dnw' 'point_fee' 'point_refr'
if since is not None:
request['from'] = since
if limit is not None:
request['limit'] = limit
method = self.get_supported_mapping(market['type'], {
'swap': 'privateFuturesGetSettleAccountBook',
'future': 'privateDeliveryGetSettleAccountBook',
})
response = await getattr(self, method)(self.extend(request, params))
result = []
for i in range(0, len(response)):
entry = response[i]
timestamp = self.safe_timestamp(entry, 'time')
result.append({
'info': entry,
'symbol': symbol,
'code': self.safe_currency_code(self.safe_string(entry, 'text')),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'id': None,
'amount': self.safe_number(entry, 'change'),
})
sorted = self.sort_by(result, 'timestamp')
return self.filter_by_symbol_since_limit(sorted, symbol, since, limit)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
#
# request = {
# 'currency_pair': market['id'],
# 'interval': '0', # depth, 0 means no aggregation is applied, default to 0
# 'limit': limit, # maximum number of order depth data in asks or bids
# 'with_id': True, # return order book ID
# }
#
request = self.prepare_request(market)
spotOrMargin = market['spot'] or market['margin']
method = self.get_supported_mapping(market['type'], {
'spot': 'publicSpotGetOrderBook',
'margin': 'publicSpotGetOrderBook',
'swap': 'publicFuturesGetSettleOrderBook',
'future': 'publicDeliveryGetSettleOrderBook',
})
if limit is not None:
request['limit'] = limit # default 10, max 100
response = await getattr(self, method)(self.extend(request, params))
#
# SPOT
#
# {
# "current": 1634345973275,
# "update": 1634345973271,
# "asks": [
# ["2.2241","12449.827"],
# ["2.2242","200"],
# ["2.2244","826.931"],
# ["2.2248","3876.107"],
# ["2.225","2377.252"],
# ["2.22509","439.484"],
# ["2.2251","1489.313"],
# ["2.2253","714.582"],
# ["2.2254","1349.784"],
# ["2.2256","234.701"]],
# "bids":[
# ["2.2236","32.465"],
# ["2.2232","243.983"],
# ["2.2231","32.207"],
# ["2.223","449.827"],
# ["2.2228","7.918"],
# ["2.2227","12703.482"],
# ["2.2226","143.033"],
# ["2.2225","143.027"],
# ["2.2224","1369.352"],
# ["2.2223","756.063"]
# ]
# }
#
# Perpetual Swap
#
# {
# "current": 1634350208.745,
# "asks": [
# {"s":24909,"p": "61264.8"},
# {"s":81,"p": "61266.6"},
# {"s":2000,"p": "61267.6"},
# {"s":490,"p": "61270.2"},
# {"s":12,"p": "61270.4"},
# {"s":11782,"p": "61273.2"},
# {"s":14666,"p": "61273.3"},
# {"s":22541,"p": "61273.4"},
# {"s":33,"p": "61273.6"},
# {"s":11980,"p": "61274.5"}
# ],
# "bids": [
# {"s":41844,"p": "61264.7"},
# {"s":13783,"p": "61263.3"},
# {"s":1143,"p": "61259.8"},
# {"s":81,"p": "61258.7"},
# {"s":2471,"p": "61257.8"},
# {"s":2471,"p": "61257.7"},
# {"s":2471,"p": "61256.5"},
# {"s":3,"p": "61254.2"},
# {"s":114,"p": "61252.4"},
# {"s":14372,"p": "61248.6"}
# ],
# "update": 1634350208.724
# }
#
timestamp = self.safe_integer(response, 'current')
if not spotOrMargin:
timestamp = timestamp * 1000
priceKey = 0 if spotOrMargin else 'p'
amountKey = 1 if spotOrMargin else 's'
return self.parse_order_book(response, symbol, timestamp, 'bids', 'asks', priceKey, amountKey)
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = self.prepare_request(market)
method = self.get_supported_mapping(market['type'], {
'spot': 'publicSpotGetTickers',
'margin': 'publicSpotGetTickers',
'swap': 'publicFuturesGetSettleTickers',
'future': 'publicDeliveryGetSettleTickers',
})
response = await getattr(self, method)(self.extend(request, params))
ticker = self.safe_value(response, 0)
return self.parse_ticker(ticker, market)
def parse_ticker(self, ticker, market=None):
#
# SPOT
#
# {
# "currency_pair": "KFC_USDT",
# "last": "7.255",
# "lowest_ask": "7.298",
# "highest_bid": "7.218",
# "change_percentage": "-1.18",
# "base_volume": "1219.053687865",
# "quote_volume": "8807.40299875455",
# "high_24h": "7.262",
# "low_24h": "7.095"
# }
#
# LINEAR/DELIVERY
#
# {
# "contract": "BTC_USDT",
# "last": "6432",
# "low_24h": "6278",
# "high_24h": "6790",
# "change_percentage": "4.43",
# "total_size": "32323904",
# "volume_24h": "184040233284",
# "volume_24h_btc": "28613220",
# "volume_24h_usd": "184040233284",
# "volume_24h_base": "28613220",
# "volume_24h_quote": "184040233284",
# "volume_24h_settle": "28613220",
# "mark_price": "6534",
# "funding_rate": "0.0001",
# "funding_rate_indicative": "0.0001",
# "index_price": "6531"
# }
#
marketId = self.safe_string_2(ticker, 'currency_pair', 'contract')
symbol = self.safe_symbol(marketId, market)
last = self.safe_number(ticker, 'last')
ask = self.safe_number(ticker, 'lowest_ask')
bid = self.safe_number(ticker, 'highest_bid')
high = self.safe_number(ticker, 'high_24h')
low = self.safe_number(ticker, 'low_24h')
baseVolume = self.safe_number_2(ticker, 'base_volume', 'volume_24h_base')
quoteVolume = self.safe_number_2(ticker, 'quote_volume', 'volume_24h_quote')
percentage = self.safe_number(ticker, 'change_percentage')
return self.safe_ticker({
'symbol': symbol,
'timestamp': None,
'datetime': None,
'high': high,
'low': low,
'bid': bid,
'bidVolume': None,
'ask': ask,
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': percentage,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
defaultType = self.safe_string_2(self.options, 'fetchTickers', 'defaultType', 'spot')
type = self.safe_string(params, 'type', defaultType)
params = self.omit(params, 'type')
method = self.get_supported_mapping(type, {
'spot': 'publicSpotGetTickers',
'margin': 'publicSpotGetTickers',
'swap': 'publicFuturesGetSettleTickers',
'future': 'publicDeliveryGetSettleTickers',
})
request = {}
future = type == 'future'
swap = type == 'swap'
defaultSettle = 'usdt' if swap else 'btc'
settle = self.safe_string_lower(params, 'settle', defaultSettle)
if swap or future:
request['settle'] = settle
response = await getattr(self, method)(self.extend(request, params))
return self.parse_tickers(response, symbols)
def fetch_balance_helper(self, entry):
account = self.account()
account['used'] = self.safe_string_2(entry, 'locked', 'position_margin')
account['free'] = self.safe_string(entry, 'available')
return account
async def fetch_balance(self, params={}):
# :param params.type: spot, margin, crossMargin, swap or future
# :param params.settle: Settle currency(usdt or btc) for perpetual swap and future
await self.load_markets()
defaultType = self.safe_string_2(self.options, 'fetchBalance', 'defaultType', 'spot')
type = self.safe_string(params, 'type', defaultType)
params = self.omit(params, 'type')
swap = type == 'swap'
future = type == 'future'
method = self.get_supported_mapping(type, {
'spot': 'privateSpotGetAccounts',
'margin': 'privateMarginGetAccounts',
'swap': 'privateFuturesGetSettleAccounts',
'future': 'privateDeliveryGetSettleAccounts',
})
request = {}
response = []
if swap or future:
defaultSettle = 'usdt' if swap else 'btc'
request['settle'] = self.safe_string_lower(params, 'settle', defaultSettle)
response_item = await getattr(self, method)(self.extend(request, params))
response = [response_item]
else:
response = await getattr(self, method)(self.extend(request, params))
# Spot
#
# [
# {
# "currency": "DBC",
# "available": "0",
# "locked": "0"
# },
# ...
# ]
#
# Margin
#
# [
# {
# "currency_pair":"DOGE_USDT",
# "locked":false,
# "risk":"9999.99",
# "base": {
# "currency":"DOGE",
# "available":"0",
# "locked":"0",
# "borrowed":"0",
# "interest":"0"
# },
# "quote": {
# "currency":"USDT",
# "available":"0.73402",
# "locked":"0",
# "borrowed":"0",
# "interest":"0"
# }
# },
# ...
# ]
#
# Perpetual Swap
#
# {
# order_margin: "0",
# point: "0",
# bonus: "0",
# history: {
# dnw: "2.1321",
# pnl: "11.5351",
# refr: "0",
# point_fee: "0",
# fund: "-0.32340576684",
# bonus_dnw: "0",
# point_refr: "0",
# bonus_offset: "0",
# fee: "-0.20132775",
# point_dnw: "0",
# },
# unrealised_pnl: "13.315100000006",
# total: "12.51345151332",
# available: "0",
# in_dual_mode: False,
# currency: "USDT",
# position_margin: "12.51345151332",
# user: "6333333",
# }
#
# Delivery Future
#
# {
# order_margin: "0",
# point: "0",
# history: {
# dnw: "1",
# pnl: "0",
# refr: "0",
# point_fee: "0",
# point_dnw: "0",
# settle: "0",
# settle_fee: "0",
# point_refr: "0",
# fee: "0",
# },
# unrealised_pnl: "0",
# total: "1",
# available: "1",
# currency: "USDT",
# position_margin: "0",
# user: "6333333",
# }
#
margin = type == 'margin'
result = {
'info': response,
}
for i in range(0, len(response)):
entry = response[i]
if margin:
marketId = self.safe_string(entry, 'currency_pair')
symbol = self.safe_symbol(marketId, None, '_')
base = self.safe_value(entry, 'base', {})
quote = self.safe_value(entry, 'quote', {})
baseCode = self.safe_currency_code(self.safe_string(base, 'currency', {}))
quoteCode = self.safe_currency_code(self.safe_string(quote, 'currency', {}))
subResult = {}
subResult[baseCode] = self.fetch_balance_helper(base)
subResult[quoteCode] = self.fetch_balance_helper(quote)
result[symbol] = self.safe_balance(subResult)
else:
code = self.safe_currency_code(self.safe_string(entry, 'currency', {}))
result[code] = self.fetch_balance_helper(entry)
return result if margin else self.safe_balance(result)
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
price = self.safe_string(params, 'price')
request = self.prepare_request(market)
request['interval'] = self.timeframes[timeframe]
method = 'publicSpotGetCandlesticks'
if market['contract']:
maxLimit = 1999
limit = maxLimit if (limit is None) else min(limit, maxLimit)
if market['future']:
method = 'publicDeliveryGetSettleCandlesticks'
elif market['swap']:
method = 'publicFuturesGetSettleCandlesticks'
isMark = (price == 'mark')
isIndex = (price == 'index')
if isMark or isIndex:
request['contract'] = price + '_' + market['id']
params = self.omit(params, 'price')
else:
maxLimit = 1000
limit = maxLimit if (limit is None) else min(limit, maxLimit)
request['limit'] = limit
if since is not None:
duration = self.parse_timeframe(timeframe)
request['from'] = int(since / 1000)
toTimestamp = self.sum(request['from'], limit * duration - 1)
currentTimestamp = self.seconds()
request['to'] = min(toTimestamp, currentTimestamp)
response = await getattr(self, method)(self.extend(request, params))
return self.parse_ohlcvs(response, market, timeframe, since, limit)
async def fetch_mark_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
request = {
'price': 'mark',
}
return await self.fetch_ohlcv(symbol, timeframe, since, limit, self.extend(request, params))
async def fetch_funding_rate_history(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchFundingRateHistory() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
if not market['swap']:
raise BadRequest('Funding rates only exist for swap contracts')
request = {
'contract': market['id'],
'settle': market['settleId'],
}
if limit is not None:
request['limit'] = limit
method = 'publicFuturesGetSettleFundingRate'
response = await getattr(self, method)(self.extend(request, params))
#
# {
# "r": "0.00063521",
# "t": "1621267200000",
# }
#
rates = []
for i in range(0, len(response)):
entry = response[i]
timestamp = self.safe_timestamp(entry, 't')
rates.append({
'info': entry,
'symbol': symbol,
'fundingRate': self.safe_number(entry, 'r'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
})
sorted = self.sort_by(rates, 'timestamp')
return self.filter_by_symbol_since_limit(sorted, symbol, since, limit)
async def fetch_index_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
request = {
'price': 'index',
}
return await self.fetch_ohlcv(symbol, timeframe, since, limit, self.extend(request, params))
def parse_ohlcv(self, ohlcv, market=None):
#
# Spot market candles
#
# [
# "1626163200", # Unix timestamp in seconds
# "346711.933138181617", # Trading volume
# "33165.23", # Close price
# "33260", # Highest price
# "33117.6", # Lowest price
# "33184.47" # Open price
# ]
#
# Mark and Index price candles
#
# {
# "t":1632873600, # Unix timestamp in seconds
# "o": "41025", # Open price
# "h": "41882.17", # Highest price
# "c": "41776.92", # Close price
# "l": "40783.94" # Lowest price
# }
#
if isinstance(ohlcv, list):
return [
self.safe_timestamp(ohlcv, 0), # unix timestamp in seconds
self.safe_number(ohlcv, 5), # open price
self.safe_number(ohlcv, 3), # highest price
self.safe_number(ohlcv, 4), # lowest price
self.safe_number(ohlcv, 2), # close price
self.safe_number(ohlcv, 1), # trading volume
]
else:
# Mark and Index price candles
return [
self.safe_timestamp(ohlcv, 't'), # unix timestamp in seconds
self.safe_number(ohlcv, 'o'), # open price
self.safe_number(ohlcv, 'h'), # highest price
self.safe_number(ohlcv, 'l'), # lowest price
self.safe_number(ohlcv, 'c'), # close price
self.safe_number(ohlcv, 'v'), # trading volume, None for mark or index price
]
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
#
# spot
#
# request = {
# 'currency_pair': market['id'],
# 'limit': limit, # maximum number of records to be returned in a single list
# 'last_id': 'id', # specify list staring point using the id of last record in previous list-query results
# 'reverse': False, # True to retrieve records where id is smaller than the specified last_id, False to retrieve records where id is larger than the specified last_id
# }
#
# swap, future
#
# request = {
# 'settle': market['settleId'],
# 'contract': market['id'],
# 'limit': limit, # maximum number of records to be returned in a single list
# 'last_id': 'id', # specify list staring point using the id of last record in previous list-query results
# 'from': since / 1000), # starting time in seconds, if not specified, to and limit will be used to limit response items
# 'to': self.seconds(), # end time in seconds, default to current time
# }
#
request = self.prepare_request(market)
method = self.get_supported_mapping(market['type'], {
'spot': 'publicSpotGetTrades',
'margin': 'publicSpotGetTrades',
'swap': 'publicFuturesGetSettleTrades',
'future': 'publicDeliveryGetSettleTrades',
})
if limit is not None:
request['limit'] = limit # default 100, max 1000
if since is not None and (market['contract']):
request['from'] = int(since / 1000)
response = await getattr(self, method)(self.extend(request, params))
#
# spot
#
# [
# {
# id: "1852958144",
# create_time: "1634673259",
# create_time_ms: "1634673259378.105000",
# currency_pair: "ADA_USDT",
# side: "sell",
# amount: "307.078",
# price: "2.104",
# }
# ]
#
# perpetual swap
#
# [
# {
# size: "2",
# id: "2522911",
# create_time_ms: "1634673380.182",
# create_time: "1634673380.182",
# contract: "ADA_USDT",
# price: "2.10486",
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
#
# request = {
# 'currency_pair': market['id'],
# # 'limit': limit,
# # 'page': 0,
# # 'order_id': 'Order ID',
# # 'account': 'spot', # default to spot and margin account if not specified, set to cross_margin to operate against margin account
# # 'from': since, # default to 7 days before current time
# # 'to': self.milliseconds(), # default to current time
# }
#
request = self.prepare_request(market)
if limit is not None:
request['limit'] = limit # default 100, max 1000
if since is not None:
request['from'] = int(since / 1000)
# request['to'] = since + 7 * 24 * 60 * 60
method = self.get_supported_mapping(market['type'], {
'spot': 'privateSpotGetMyTrades',
'margin': 'privateSpotGetMyTrades',
'swap': 'privateFuturesGetSettleMyTrades',
'future': 'privateDeliveryGetSettleMyTrades',
})
response = await getattr(self, method)(self.extend(request, params))
# SPOT
# [{
# id: "1851927191",
# create_time: "1634333360",
# create_time_ms: "1634333360359.901000",
# currency_pair: "BTC_USDT",
# side: "buy",
# role: "taker",
# amount: "0.0001",
# price: "62547.51",
# order_id: "93475897349",
# fee: "2e-07",
# fee_currency: "BTC",
# point_fee: "0",
# gt_fee: "0",
# }]
# Perpetual Swap
# [{
# size: "-13",
# order_id: "79723658958",
# id: "47612669",
# role: "taker",
# create_time: "1634600263.326",
# contract: "BTC_USDT",
# price: "61987.8",
# }]
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
#
# public
#
# {
# "id": "1334253759",
# "create_time": "1626342738",
# "create_time_ms": "1626342738331.497000",
# "currency_pair": "BTC_USDT",
# "side": "sell",
# "amount": "0.0022",
# "price": "32452.16"
# }
#
# private
#
# {
# "id": "218087755",
# "create_time": "1578958740",
# "create_time_ms": "1578958740122.710000",
# "currency_pair": "BTC_USDT",
# "side": "sell",
# "role": "taker",
# "amount": "0.0004",
# "price": "8112.77",
# "order_id": "8445563839",
# "fee": "0.006490216",
# "fee_currency": "USDT",
# "point_fee": "0",
# "gt_fee": "0"
# }
#
id = self.safe_string(trade, 'id')
timestampStringContract = self.safe_string(trade, 'create_time')
timestampString = self.safe_string_2(trade, 'create_time_ms', 'time', timestampStringContract)
timestamp = None
if timestampString.find('.') > 0:
milliseconds = timestampString.split('.')
timestamp = int(milliseconds[0])
if market['contract']:
timestamp = timestamp * 1000
marketId = self.safe_string_2(trade, 'currency_pair', 'contract')
symbol = self.safe_symbol(marketId, market)
amountString = self.safe_string_2(trade, 'amount', 'size')
priceString = self.safe_string(trade, 'price')
costString = Precise.string_abs(Precise.string_mul(amountString, priceString))
price = self.parse_number(priceString)
cost = self.parse_number(costString)
contractSide = 'sell' if Precise.string_lt(amountString, '0') else 'buy'
amountString = Precise.string_abs(amountString)
amount = self.parse_number(amountString)
side = self.safe_string(trade, 'side', contractSide)
orderId = self.safe_string(trade, 'order_id')
gtFee = self.safe_string(trade, 'gt_fee')
feeCurrency = None
feeCost = None
if gtFee == '0':
feeCurrency = self.safe_string(trade, 'fee_currency')
feeCost = self.safe_number(trade, 'fee')
else:
feeCurrency = 'GT'
feeCost = self.parse_number(gtFee)
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
takerOrMaker = self.safe_string(trade, 'role')
return {
'info': trade,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
if limit is not None:
request['limit'] = limit
if since is not None:
request['from'] = int(since / 1000)
request['to'] = since + 30 * 24 * 60 * 60
response = await self.privateWalletGetDeposits(self.extend(request, params))
return self.parse_transactions(response, currency)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
if limit is not None:
request['limit'] = limit
if since is not None:
request['from'] = int(since / 1000)
request['to'] = since + 30 * 24 * 60 * 60
response = await self.privateWalletGetWithdrawals(self.extend(request, params))
return self.parse_transactions(response, currency)
async def withdraw(self, code, amount, address, tag=None, params={}):
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
'address': address,
'amount': self.currency_to_precision(code, amount),
}
if tag is not None:
request['memo'] = tag
networks = self.safe_value(self.options, 'networks', {})
network = self.safe_string_upper(params, 'network') # self line allows the user to specify either ERC20 or ETH
network = self.safe_string_lower(networks, network, network) # handle ETH>ERC20 alias
if network is not None:
request['chain'] = network
params = self.omit(params, 'network')
response = await self.privateWithdrawalsPost(self.extend(request, params))
#
# {
# "id": "w13389675",
# "currency": "USDT",
# "amount": "50",
# "address": "TUu2rLFrmzUodiWfYki7QCNtv1akL682p1",
# "memo": null
# }
#
currencyId = self.safe_string(response, 'currency')
id = self.safe_string(response, 'id')
return {
'info': response,
'id': id,
'code': self.safe_currency_code(currencyId),
'amount': self.safe_number(response, 'amount'),
'address': self.safe_string(response, 'address'),
'tag': self.safe_string(response, 'memo'),
}
def parse_transaction_status(self, status):
statuses = {
'PEND': 'pending',
'REQUEST': 'pending',
'DMOVE': 'pending',
'CANCEL': 'failed',
'DONE': 'ok',
}
return self.safe_string(statuses, status, status)
def parse_transaction_type(self, type):
types = {
'd': 'deposit',
'w': 'withdrawal',
}
return self.safe_string(types, type, type)
def parse_transaction(self, transaction, currency=None):
#
# deposits
#
# {
# "id": "d33361395",
# "currency": "USDT_TRX",
# "address": "TErdnxenuLtXfnMafLbfappYdHtnXQ5U4z",
# "amount": "100",
# "txid": "ae9374de34e558562fe18cbb1bf9ab4d9eb8aa7669d65541c9fa2a532c1474a0",
# "timestamp": "1626345819",
# "status": "DONE",
# "memo": ""
# }
#
# withdrawals
id = self.safe_string(transaction, 'id')
type = None
if id is not None:
type = self.parse_transaction_type(id[0])
currencyId = self.safe_string(transaction, 'currency')
code = self.safe_currency_code(currencyId)
amount = self.safe_number(transaction, 'amount')
txid = self.safe_string(transaction, 'txid')
rawStatus = self.safe_string(transaction, 'status')
status = self.parse_transaction_status(rawStatus)
address = self.safe_string(transaction, 'address')
fee = self.safe_number(transaction, 'fee')
tag = self.safe_string(transaction, 'memo')
if tag == '':
tag = None
timestamp = self.safe_timestamp(transaction, 'timestamp')
return {
'info': transaction,
'id': id,
'txid': txid,
'currency': code,
'amount': amount,
'network': None,
'address': address,
'addressTo': None,
'addressFrom': None,
'tag': tag,
'tagTo': None,
'tagFrom': None,
'status': status,
'type': type,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'updated': None,
'fee': fee,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
contract = market['contract']
stopPrice = self.safe_number(params, 'stopPrice')
methodTail = 'Orders'
reduceOnly = self.safe_value_2(params, 'reduce_only', 'reduceOnly')
defaultTimeInForce = self.safe_value_2(params, 'tif', 'time_in_force', 'gtc')
timeInForce = self.safe_value(params, 'timeInForce', defaultTimeInForce)
params = self.omit(params, ['stopPrice', 'reduce_only', 'reduceOnly', 'tif', 'time_in_force', 'timeInForce'])
isLimitOrder = (type == 'limit')
isMarketOrder = (type == 'market')
if isLimitOrder and price is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument for ' + type + ' orders')
if contract:
amountToPrecision = self.amount_to_precision(symbol, amount)
signedAmount = Precise.string_neg(amountToPrecision) if (side == 'sell') else amountToPrecision
amount = int(signedAmount)
if isMarketOrder:
timeInForce = 'ioc'
price = 0
elif not isLimitOrder:
# Gateio doesn't have market orders for spot
raise InvalidOrder(self.id + ' createOrder() does not support ' + type + ' orders for ' + market['type'] + ' markets')
request = None
trigger = self.safe_value(params, 'trigger')
if stopPrice is None and trigger is None:
if contract:
# contract order
request = {
'contract': market['id'], # filled in prepareRequest above
'size': amount, # int64, positive = bid, negative = ask
# 'iceberg': 0, # int64, display size for iceberg order, 0 for non-iceberg, note that you will have to pay the taker fee for the hidden size
'price': self.price_to_precision(symbol, price), # 0 for market order with tif set as ioc
# 'close': False, # True to close the position, with size set to 0
# 'reduce_only': False, # St as True to be reduce-only order
# 'tif': 'gtc', # gtc, ioc, poc PendingOrCancelled == postOnly order
# 'text': clientOrderId, # 't-abcdef1234567890',
# 'auto_size': '', # close_long, close_short, note size also needs to be set to 0
'settle': market['settleId'], # filled in prepareRequest above
}
if reduceOnly is not None:
request['reduce_only'] = reduceOnly
if timeInForce is not None:
request['tif'] = timeInForce
else:
options = self.safe_value(self.options, 'createOrder', {})
defaultAccount = self.safe_string(options, 'account', 'spot')
account = self.safe_string(params, 'account', defaultAccount)
params = self.omit(params, 'account')
# spot order
request = {
# 'text': clientOrderId, # 't-abcdef1234567890',
'currency_pair': market['id'], # filled in prepareRequest above
'type': type,
'account': account, # 'spot', 'margin', 'cross_margin'
'side': side,
'amount': self.amount_to_precision(symbol, amount),
'price': self.price_to_precision(symbol, price),
# 'time_in_force': 'gtc', # gtc, ioc, poc PendingOrCancelled == postOnly order
# 'iceberg': 0, # amount to display for the iceberg order, null or 0 for normal orders, set to -1 to hide the order completely
# 'auto_borrow': False, # used in margin or cross margin trading to allow automatic loan of insufficient amount if balance is not enough
# 'auto_repay': False, # automatic repayment for automatic borrow loan generated by cross margin order, diabled by default
}
if timeInForce is not None:
request['time_in_force'] = timeInForce
clientOrderId = self.safe_string_2(params, 'text', 'clientOrderId')
if clientOrderId is not None:
# user-defined, must follow the rules if not empty
# prefixed with t-
# no longer than 28 bytes without t- prefix
# can only include 0-9, A-Z, a-z, underscores(_), hyphens(-) or dots(.)
if len(clientOrderId) > 28:
raise BadRequest(self.id + ' createOrder() clientOrderId or text param must be up to 28 characters')
params = self.omit(params, ['text', 'clientOrderId'])
if clientOrderId[0] != 't':
clientOrderId = 't-' + clientOrderId
request['text'] = clientOrderId
else:
if contract:
# contract conditional order
rule = 1 if (side == 'sell') else 2
request = {
'initial': {
'contract': market['id'],
'size': amount, # positive = buy, negative = sell, set to 0 to close the position
'price': self.price_to_precision(symbol, price), # set to 0 to use market price
# 'close': False, # set to True if trying to close the position
# 'tif': 'gtc', # gtc, ioc, if using market price, only ioc is supported
# 'text': clientOrderId, # web, api, app
# 'reduce_only': False,
},
'trigger': {
# 'strategy_type': 0, # 0 = by price, 1 = by price gap, only 0 is supported currently
# 'price_type': 0, # 0 latest deal price, 1 mark price, 2 index price
'price': self.price_to_precision(symbol, stopPrice), # price or gap
'rule': rule, # 1 means price_type >= price, 2 means price_type <= price
# 'expiration': expiration, how many seconds to wait for the condition to be triggered before cancelling the order
},
'settle': market['settleId'],
}
expiration = self.safe_integer(params, 'expiration')
if expiration is not None:
request['trigger']['expiration'] = expiration
params = self.omit(params, 'expiration')
if reduceOnly is not None:
request['initial']['reduce_only'] = reduceOnly
if timeInForce is not None:
request['initial']['tif'] = timeInForce
else:
# spot conditional order
options = self.safe_value(self.options, 'createOrder', {})
defaultAccount = self.safe_string(options, 'account', 'normal')
account = self.safe_string(params, 'account', defaultAccount)
params = self.omit(params, 'account')
defaultExpiration = self.safe_integer(options, 'expiration')
expiration = self.safe_integer(params, 'expiration', defaultExpiration)
rule = '>=' if (side == 'sell') else '<='
triggerPrice = self.safe_value(trigger, 'price', stopPrice)
request = {
'trigger': {
'price': self.price_to_precision(symbol, triggerPrice),
'rule': rule, # >= triggered when market price larger than or equal to price field, <= triggered when market price less than or equal to price field
'expiration': expiration, # required, how long(in seconds) to wait for the condition to be triggered before cancelling the order
},
'put': {
'type': type,
'side': side,
'price': self.price_to_precision(symbol, price),
'amount': self.amount_to_precision(symbol, amount),
'account': account, # normal, margin
'time_in_force': timeInForce, # gtc, ioc for taker only
},
'market': market['id'],
}
methodTail = 'PriceOrders'
method = self.get_supported_mapping(market['type'], {
'spot': 'privateSpotPost' + methodTail,
'margin': 'privateSpotPost' + methodTail,
'swap': 'privateFuturesPostSettle' + methodTail,
'future': 'privateDeliveryPostSettle' + methodTail,
})
response = await getattr(self, method)(self.deep_extend(request, params))
#
# spot
#
# {
# "id":"95282841887",
# "text":"apiv4",
# "create_time":"1637383156",
# "update_time":"1637383156",
# "create_time_ms":1637383156017,
# "update_time_ms":1637383156017,
# "status":"open",
# "currency_pair":"ETH_USDT",
# "type":"limit",
# "account":"spot",
# "side":"buy",
# "amount":"0.01",
# "price":"3500",
# "time_in_force":"gtc",
# "iceberg":"0",
# "left":"0.01",
# "fill_price":"0",
# "filled_total":"0",
# "fee":"0",
# "fee_currency":"ETH",
# "point_fee":"0",
# "gt_fee":"0",
# "gt_discount":false,
# "rebated_fee":"0",
# "rebated_fee_currency":"USDT"
# }
#
# spot conditional
#
# {"id":5891843}
#
# future and perpetual swaps
#
# {
# "id":95938572327,
# "contract":"ETH_USDT",
# "mkfr":"0",
# "tkfr":"0.0005",
# "tif":"gtc",
# "is_reduce_only":false,
# "create_time":1637384600.08,
# "price":"3000",
# "size":1,
# "refr":"0",
# "left":1,
# "text":"api",
# "fill_price":"0",
# "user":2436035,
# "status":"open",
# "is_liq":false,
# "refu":0,
# "is_close":false,
# "iceberg":0
# }
#
# futures and perpetual swaps conditionals
#
# {"id":7615567}
#
return self.parse_order(response, market)
def parse_order_status(self, status):
statuses = {
'filled': 'closed',
'cancelled': 'canceled',
'liquidated': 'closed',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# createOrder, spot
#
# {
# "id": "62364648575",
# "text": "apiv4",
# "create_time": "1626354834",
# "update_time": "1626354834",
# "create_time_ms": "1626354833544",
# "update_time_ms": "1626354833544",
# "status": "open",
# "currency_pair": "BTC_USDT",
# "type": "limit",
# "account": "spot",
# "side": "buy",
# "amount": "0.0001",
# "price": "30000",
# "time_in_force": "gtc",
# "iceberg": "0",
# "left": "0.0001",
# "fill_price": "0",
# "filled_total": "0",
# "fee": "0",
# "fee_currency": "BTC",
# "point_fee": "0",
# "gt_fee": "0",
# "gt_discount": True,
# "rebated_fee": "0",
# "rebated_fee_currency": "USDT"
# }
#
#
id = self.safe_string(order, 'id')
clientOrderId = self.safe_string(order, 'text')
marketId = self.safe_string_2(order, 'currency_pair', 'contract')
symbol = self.safe_symbol(marketId, market)
timestamp = self.safe_timestamp(order, 'create_time')
timestamp = self.safe_integer(order, 'create_time_ms', timestamp)
lastTradeTimestamp = self.safe_timestamp(order, 'update_time')
lastTradeTimestamp = self.safe_integer(order, 'update_time_ms', lastTradeTimestamp)
amountRaw = self.safe_string_2(order, 'amount', 'size')
amount = Precise.string_abs(amountRaw)
price = self.safe_string(order, 'price')
# average = self.safe_string(order, 'fill_price')
remaining = self.safe_string(order, 'left')
cost = self.safe_string(order, 'filled_total') # same as filled_price
rawStatus = None
side = None
contract = self.safe_value(market, 'contract')
if contract:
if amount:
side = 'buy' if Precise.string_gt(amountRaw, '0') else 'sell'
else:
side = None
rawStatus = self.safe_string(order, 'finish_as', 'open')
else:
# open, closed, cancelled - almost already ccxt unified!
rawStatus = self.safe_string(order, 'status')
side = self.safe_string(order, 'side')
status = self.parse_order_status(rawStatus)
type = self.safe_string(order, 'type')
timeInForce = self.safe_string_upper_2(order, 'time_in_force', 'tif')
fees = []
gtFee = self.safe_number(order, 'gt_fee')
if gtFee:
fees.append({
'currency': 'GT',
'cost': gtFee,
})
fee = self.safe_number(order, 'fee')
if fee:
fees.append({
'currency': self.safe_currency_code(self.safe_string(order, 'fee_currency')),
'cost': fee,
})
rebate = self.safe_string(order, 'rebated_fee')
if rebate:
fees.append({
'currency': self.safe_currency_code(self.safe_string(order, 'rebated_fee_currency')),
'cost': self.parse_number(Precise.string_neg(rebate)),
})
mkfr = self.safe_number(order, 'mkfr')
tkfr = self.safe_number(order, 'tkfr')
if mkfr:
fees.append({
'currency': self.safe_currency_code(self.safe_string(order, 'settleId')),
'cost': mkfr,
})
if tkfr:
fees.append({
'currency': self.safe_currency_code(self.safe_string(market, 'settleId')),
'cost': tkfr,
})
return self.safe_order({
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'status': status,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': None,
'average': None,
'amount': amount,
'cost': cost,
'filled': None,
'remaining': remaining,
'fee': None,
'fees': fees,
'trades': None,
'info': order,
}, market)
async def fetch_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'order_id': id,
}
if market['spot'] or market['margin']:
request['currency_pair'] = market['id']
else:
request['settle'] = market['settleId']
method = self.get_supported_mapping(market['type'], {
'spot': 'privateSpotGetOrdersOrderId',
'margin': 'privateSpotGetOrdersOrderId',
'swap': 'privateFuturesGetSettleOrdersOrderId',
'future': 'privateDeliveryGetSettlePriceOrdersOrderId',
})
response = await getattr(self, method)(self.extend(request, params))
return self.parse_order(response, market)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
defaultType = self.safe_string_2(self.options, 'fetchMarkets', 'defaultType', 'spot')
type = self.safe_string(params, 'type', defaultType)
if symbol is None and (type == 'spot') or type == 'margin' or type == 'cross_margin':
request = {
# 'page': 1,
# 'limit': limit,
'account': type, # spot/margin(default), cross_margin
}
if limit is not None:
request['limit'] = limit
response = await self.privateSpotGetOpenOrders(self.extend(request, params))
#
# [
# {
# "currency_pair": "ETH_BTC",
# "total": 1,
# "orders": [
# {
# "id": "12332324",
# "text": "t-123456",
# "create_time": "1548000000",
# "update_time": "1548000100",
# "currency_pair": "ETH_BTC",
# "status": "open",
# "type": "limit",
# "account": "spot",
# "side": "buy",
# "amount": "1",
# "price": "5.00032",
# "time_in_force": "gtc",
# "left": "0.5",
# "filled_total": "2.50016",
# "fee": "0.005",
# "fee_currency": "ETH",
# "point_fee": "0",
# "gt_fee": "0",
# "gt_discount": False,
# "rebated_fee": "0",
# "rebated_fee_currency": "BTC"
# }
# ]
# },
# ...
# ]
#
allOrders = []
for i in range(0, len(response)):
entry = response[i]
orders = self.safe_value(entry, 'orders', [])
parsed = self.parse_orders(orders, None, since, limit)
allOrders = self.array_concat(allOrders, parsed)
return self.filter_by_since_limit(allOrders, since, limit)
return await self.fetch_orders_by_status('open', symbol, since, limit, params)
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
return await self.fetch_orders_by_status('finished', symbol, since, limit, params)
async def fetch_orders_by_status(self, status, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrdersByStatus requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = self.prepare_request(market)
request['status'] = status
if limit is not None:
request['limit'] = limit
if since is not None and (market['spot'] or market['margin']):
request['start'] = int(since / 1000)
method = self.get_supported_mapping(market['type'], {
'spot': 'privateSpotGetOrders',
'margin': 'privateSpotGetOrders',
'swap': 'privateFuturesGetSettleOrders',
'future': 'privateDeliveryGetSettleOrders',
})
if market['type'] == 'margin' or market['type'] == 'cross_margin':
request['account'] = market['type']
response = await getattr(self, method)(self.extend(request, params))
# SPOT
# {
# "id":"8834234273",
# "text": "3",
# "create_time": "1635406193",
# "update_time": "1635406193",
# "create_time_ms": 1635406193361,
# "update_time_ms": 1635406193361,
# "status": "closed",
# "currency_pair": "BTC_USDT",
# "type": "limit",
# "account": "spot",
# "side": "sell",
# "amount": "0.0002",
# "price": "58904.01",
# "time_in_force":"gtc",
# "iceberg": "0",
# "left": "0.0000",
# "fill_price": "11.790516",
# "filled_total": "11.790516",
# "fee": "0.023581032",
# "fee_currency": "USDT",
# "point_fee": "0",
# "gt_fee": "0",
# "gt_discount": False,
# "rebated_fee_currency": "BTC"
# }
# Perpetual Swap
# {
# "status": "finished",
# "size":-1,
# "left":0,
# "id":82750739203,
# "is_liq":false,
# "is_close":false,
# "contract": "BTC_USDT",
# "text": "web",
# "fill_price": "60721.3",
# "finish_as": "filled",
# "iceberg":0,
# "tif": "ioc",
# "is_reduce_only":true,
# "create_time": 1635403475.412,
# "finish_time": 1635403475.4127,
# "price": "0"
# }
return self.parse_orders(response, market, since, limit)
async def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'order_id': id,
}
if market['contract']:
request['settle'] = market['settleId']
else:
request['currency_pair'] = market['id']
method = self.get_supported_mapping(market['type'], {
'spot': 'privateSpotDeleteOrdersOrderId',
'margin': 'privateSpotDeleteOrdersOrderId',
'swap': 'privateFuturesDeleteSettleOrdersOrderId',
'future': 'privateDeliveryDeleteSettleOrdersOrderId',
})
response = await getattr(self, method)(self.extend(request, params))
#
# spot
#
# {
# "id":"95282841887",
# "text":"apiv4",
# "create_time":"1637383156",
# "update_time":"1637383235",
# "create_time_ms":1637383156017,
# "update_time_ms":1637383235085,
# "status":"cancelled",
# "currency_pair":"ETH_USDT",
# "type":"limit",
# "account":"spot",
# "side":"buy",
# "amount":"0.01",
# "price":"3500",
# "time_in_force":"gtc",
# "iceberg":"0",
# "left":"0.01",
# "fill_price":"0",
# "filled_total":"0",
# "fee":"0",
# "fee_currency":"ETH",
# "point_fee":"0",
# "gt_fee":"0",
# "gt_discount":false,
# "rebated_fee":"0",
# "rebated_fee_currency":"USDT"
# }
#
# spot conditional
#
# {
# "market":"ETH_USDT",
# "user":2436035,
# "trigger":{
# "price":"3500",
# "rule":"\u003c=",
# "expiration":86400
# },
# "put":{
# "type":"limit",
# "side":"buy",
# "price":"3500",
# "amount":"0.01000000000000000000",
# "account":"normal",
# "time_in_force":"gtc"
# },
# "id":5891843,
# "ctime":1637382379,
# "ftime":1637382673,
# "status":"canceled"
# }
#
# perpetual swaps
#
# {
# id: "82241928192",
# contract: "BTC_USDT",
# mkfr: "0",
# tkfr: "0.0005",
# tif: "gtc",
# is_reduce_only: False,
# create_time: "1635196145.06",
# finish_time: "1635196233.396",
# price: "61000",
# size: "4",
# refr: "0",
# left: "4",
# text: "web",
# fill_price: "0",
# user: "6693577",
# finish_as: "cancelled",
# status: "finished",
# is_liq: False,
# refu: "0",
# is_close: False,
# iceberg: "0",
# }
#
return self.parse_order(response, market)
async def cancel_all_orders(self, symbol=None, params={}):
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
return await self.privateSpotDeleteOrders(self.extend(request, params))
async def transfer(self, code, amount, fromAccount, toAccount, params={}):
await self.load_markets()
currency = self.currency(code)
accountsByType = self.safe_value(self.options, 'accountsByType', {})
fromId = self.safe_string(accountsByType, fromAccount, fromAccount)
toId = self.safe_string(accountsByType, toAccount, toAccount)
if fromId is None:
keys = list(accountsByType.keys())
raise ExchangeError(self.id + ' fromAccount must be one of ' + ', '.join(keys))
if toId is None:
keys = list(accountsByType.keys())
raise ExchangeError(self.id + ' toAccount must be one of ' + ', '.join(keys))
truncated = self.currency_to_precision(code, amount)
request = {
'currency': currency['id'],
'from': fromId,
'to': toId,
'amount': truncated,
}
if (toId == 'future') or (toId == 'delivery'):
request['settle'] = currency['lowerCaseId']
response = await self.privateWalletPostTransfers(self.extend(request, params))
#
# according to the docs
#
# {
# "currency": "BTC",
# "from": "spot",
# "to": "margin",
# "amount": "1",
# "currency_pair": "BTC_USDT"
# }
#
# actual response
#
# POST https://api.gateio.ws/api/v4/wallet/transfers 204 No Content
#
return {
'info': response,
'from': fromId,
'to': toId,
'amount': truncated,
'code': code,
}
async def set_leverage(self, leverage, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' setLeverage() requires a symbol argument')
# WARNING: THIS WILL INCREASE LIQUIDATION PRICE FOR OPEN ISOLATED LONG POSITIONS
# AND DECREASE LIQUIDATION PRICE FOR OPEN ISOLATED SHORT POSITIONS
if (leverage < 0) or (leverage > 100):
raise BadRequest(self.id + ' leverage should be between 1 and 100')
await self.load_markets()
market = self.market(symbol)
method = self.get_supported_mapping(market['type'], {
'swap': 'privateFuturesPostSettlePositionsContractLeverage',
'future': 'privateDeliveryPostSettlePositionsContractLeverage',
})
request = self.prepare_request(market)
request['query'] = {
'leverage': str(leverage),
}
if 'cross_leverage_limit' in params:
if leverage != 0:
raise BadRequest(self.id + ' cross margin leverage(valid only when leverage is 0)')
request['cross_leverage_limit'] = str(params['cross_leverage_limit'])
params = self.omit(params, 'cross_leverage_limit')
response = await getattr(self, method)(self.extend(request, params))
#
# {
# "value":"0",
# "leverage":"5",
# "mode":"single",
# "realised_point":"0",
# "contract":"BTC_USDT",
# "entry_price":"0",
# "mark_price":"62035.86",
# "history_point":"0",
# "realised_pnl":"0",
# "close_order":null,
# "size":0,
# "cross_leverage_limit":"0",
# "pending_orders":0,
# "adl_ranking":6,
# "maintenance_rate":"0.005",
# "unrealised_pnl":"0",
# "user":2436035,
# "leverage_max":"100",
# "history_pnl":"0",
# "risk_limit":"1000000",
# "margin":"0",
# "last_close_pnl":"0",
# "liq_price":"0"
# }
#
return response
def parse_position(self, position, market=None):
#
# {
# value: "12.475572",
# leverage: "0",
# mode: "single",
# realised_point: "0",
# contract: "BTC_USDT",
# entry_price: "62422.6",
# mark_price: "62377.86",
# history_point: "0",
# realised_pnl: "-0.00624226",
# close_order: null,
# size: "2",
# cross_leverage_limit: "25",
# pending_orders: "0",
# adl_ranking: "5",
# maintenance_rate: "0.005",
# unrealised_pnl: "-0.008948",
# user: "663337",
# leverage_max: "100",
# history_pnl: "14.98868396636",
# risk_limit: "1000000",
# margin: "0.740721495056",
# last_close_pnl: "-0.041996015",
# liq_price: "59058.58"
# }
#
contract = self.safe_string(position, 'contract')
market = self.safe_market(contract, market)
size = self.safe_string(position, 'size')
side = None
if Precise.string_gt(size, '0'):
side = 'buy'
elif Precise.string_lt(size, '0'):
side = 'sell'
maintenanceRate = self.safe_string(position, 'maintenance_rate')
notional = self.safe_string(position, 'value')
leverage = self.safe_string(position, 'leverage')
unrealisedPnl = self.safe_string(position, 'unrealised_pnl')
# Initial Position Margin = ( Position Value / Leverage ) + Close Position Fee
# *The default leverage under the full position is the highest leverage in the market.
# *Trading fee is charged as Taker Fee Rate(0.075%).
takerFee = '0.00075'
feePaid = Precise.string_mul(takerFee, notional)
initialMarginString = Precise.string_add(Precise.string_div(notional, leverage), feePaid)
percentage = Precise.string_mul(Precise.string_div(unrealisedPnl, initialMarginString), '100')
return {
'info': position,
'symbol': self.safe_string(market, 'symbol'),
'timestamp': None,
'datetime': None,
'initialMargin': self.parse_number(initialMarginString),
'initialMarginPercentage': self.parse_number(Precise.string_div(initialMarginString, notional)),
'maintenanceMargin': self.parse_number(Precise.string_mul(maintenanceRate, notional)),
'maintenanceMarginPercentage': self.parse_number(maintenanceRate),
'entryPrice': self.safe_number(position, 'entry_price'),
'notional': self.parse_number(notional),
'leverage': self.safe_number(position, 'leverage'),
'unrealizedPnl': self.parse_number(unrealisedPnl),
'contracts': self.parse_number(size),
'contractSize': self.safe_value(market, 'contractSize'),
# realisedPnl: position['realised_pnl'],
'marginRatio': None,
'liquidationPrice': self.safe_number(position, 'liq_price'),
'markPrice': self.safe_number(position, 'mark_price'),
'collateral': self.safe_number(position, 'margin'),
'marginType': None,
'side': side,
'percentage': self.parse_number(percentage),
}
def parse_positions(self, positions):
result = []
for i in range(0, len(positions)):
result.append(self.parse_position(positions[i]))
return result
async def fetch_positions(self, symbols=None, params={}):
# :param symbols: Not used by Gateio
# :param params:
# settle: The currency that derivative contracts are settled in
# Other exchange specific params
#
await self.load_markets()
defaultType = self.safe_string_2(self.options, 'fetchPositions', 'defaultType', 'swap')
type = self.safe_string(params, 'type', defaultType)
method = self.get_supported_mapping(type, {
'swap': 'privateFuturesGetSettlePositions',
'future': 'privateDeliveryGetSettlePositions',
})
defaultSettle = 'usdt' if (type == 'swap') else 'btc'
settle = self.safe_string_lower(params, 'settle', defaultSettle)
request = {
'settle': settle,
}
response = await getattr(self, method)(request)
#
# [
# {
# value: "12.475572",
# leverage: "0",
# mode: "single",
# realised_point: "0",
# contract: "BTC_USDT",
# entry_price: "62422.6",
# mark_price: "62377.86",
# history_point: "0",
# realised_pnl: "-0.00624226",
# close_order: null,
# size: "2",
# cross_leverage_limit: "25",
# pending_orders: "0",
# adl_ranking: "5",
# maintenance_rate: "0.005",
# unrealised_pnl: "-0.008948",
# user: "6693577",
# leverage_max: "100",
# history_pnl: "14.98868396636",
# risk_limit: "1000000",
# margin: "0.740721495056",
# last_close_pnl: "-0.041996015",
# liq_price: "59058.58"
# }
# ]
#
result = self.parse_positions(response)
return self.filter_by_array(result, 'symbol', symbols, False)
def sign(self, path, api=[], method='GET', params={}, headers=None, body=None):
authentication = api[0] # public, private
type = api[1] # spot, margin, future, delivery
query = self.omit(params, self.extract_params(path))
path = self.implode_params(path, params)
endPart = '' if (path == '') else ('/' + path)
entirePath = '/' + type + endPart
url = self.urls['api'][authentication] + entirePath
if authentication == 'public':
if query:
url += '?' + self.urlencode(query)
else:
queryString = ''
if (method == 'GET') or (method == 'DELETE'):
if query:
queryString = self.urlencode(query)
url += '?' + queryString
else:
urlQueryParams = self.safe_value(query, 'query', {})
if urlQueryParams:
queryString = self.urlencode(urlQueryParams)
url += '?' + queryString
query = self.omit(query, 'query')
body = self.json(query)
bodyPayload = '' if (body is None) else body
bodySignature = self.hash(self.encode(bodyPayload), 'sha512')
timestamp = self.seconds()
timestampString = str(timestamp)
signaturePath = '/api/' + self.version + entirePath
payloadArray = [method.upper(), signaturePath, queryString, bodySignature, timestampString]
# eslint-disable-next-line quotes
payload = "\n".join(payloadArray)
signature = self.hmac(self.encode(payload), self.encode(self.secret), hashlib.sha512)
headers = {
'KEY': self.apiKey,
'Timestamp': timestampString,
'SIGN': signature,
'Content-Type': 'application/json',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return
#
# {"label":"ORDER_NOT_FOUND","message":"Order not found"}
# {"label":"INVALID_PARAM_VALUE","message":"invalid argument: status"}
# {"label":"INVALID_PARAM_VALUE","message":"invalid argument: Trigger.rule"}
# {"label":"INVALID_PARAM_VALUE","message":"invalid argument: trigger.expiration invalid range"}
# {"label":"INVALID_ARGUMENT","detail":"invalid size"}
#
label = self.safe_string(response, 'label')
if label is not None:
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], label, feedback)
raise ExchangeError(feedback)
| 43.57678 | 194 | 0.456544 |
7959cf2a494187c8a5efee158b7c05471b6b6871 | 3,983 | py | Python | leapp/exceptions.py | dhodovsk/leapp | bcd6580a19dabd132b3da8bcf2ed61fa8864ef18 | [
"Apache-2.0"
] | 29 | 2019-05-29T05:34:52.000Z | 2022-03-14T19:09:34.000Z | leapp/exceptions.py | dhodovsk/leapp | bcd6580a19dabd132b3da8bcf2ed61fa8864ef18 | [
"Apache-2.0"
] | 373 | 2018-11-21T11:41:49.000Z | 2022-03-31T11:40:56.000Z | leapp/exceptions.py | dhodovsk/leapp | bcd6580a19dabd132b3da8bcf2ed61fa8864ef18 | [
"Apache-2.0"
] | 27 | 2018-11-26T17:14:15.000Z | 2022-03-10T13:30:50.000Z | class LeappError(Exception):
def __init__(self, message):
super(LeappError, self).__init__(message)
self.message = message
class RepositoryConfigurationError(LeappError):
pass
class CannotConsumeErrorMessages(LeappError):
def __init__(self):
super(CannotConsumeErrorMessages, self).__init__("Actors cannot consume error messages.")
class InvalidTopicItemError(LeappError):
pass
class InvalidTopicDefinitionError(LeappError):
pass
class InvalidTagDefinitionError(LeappError):
pass
class MissingActorAttributeError(LeappError):
pass
class WrongAttributeTypeError(LeappError):
pass
class ModelDefinitionError(LeappError):
pass
class TagFilterUsageError(LeappError):
pass
class CyclingDependenciesError(LeappError):
pass
class UnsupportedDefinitionKindError(LeappError):
pass
class ModuleNameAlreadyExistsError(LeappError):
pass
class ActorInspectionFailedError(LeappError):
pass
class MultipleActorsError(LeappError):
def __init__(self, path):
super(MultipleActorsError, self).__init__(
'Multiple actors found in {path}. Inspection failed'.format(path=path))
class MultipleConfigActorsError(LeappError):
def __init__(self, config_actors):
super(MultipleConfigActorsError, self).__init__(
'Multiple config actors detected: {config_actors}. '
'Only one config actor per workflow is allowed'.format(config_actors=config_actors))
class WorkflowConfigNotAvailable(LeappError):
def __init__(self, actor):
# TODO(mreznik): Current implementation of the workflow congiguration is problematic when used
# with snactor. See https://github.com/oamg/leapp/issues/530
super(WorkflowConfigNotAvailable, self).__init__(
'Actor {actor} relies on workflow configuration model which '
'must be produced by a specific actor'.format(actor=actor))
class RepoItemPathDoesNotExistError(LeappError):
def __init__(self, kind, rel_path, full_path):
super(RepoItemPathDoesNotExistError, self).__init__(
'Could not find {kind} item with relative path: {rel_path} at {full_path}'.format(
kind=kind, rel_path=rel_path, full_path=full_path))
class ActorDiscoveryExecutionError(LeappError):
pass
class UsageError(LeappError):
pass
class CommandError(LeappError):
pass
class CommandDefinitionError(LeappError):
pass
class LeappRuntimeError(LeappError):
pass
class StopActorExecution(Exception):
""" This exception is used to gracefully stop execution of actor, but allows the workflow to continue. """
class StopActorExecutionError(LeappError):
"""
This exception is used to gracefully stop execution of actor and it will call
:py:func:`leapp.actors.Actor.report_error`.
"""
# import here to break import cycle
from leapp.models.error_severity import ErrorSeverity # pylint: disable=import-outside-toplevel
def __init__(self, message, severity=ErrorSeverity.ERROR, details=None):
"""
:param message: A message to print the possible error
:type message: str
:param severity: Severity of the error default :py:attr:`leapp.messaging.errors.ErrorSeverity.ERROR`
:type severity: str with defined values from :py:attr:`leapp.messaging.errors.ErrorSeverity.ERROR`
:param details: A dictionary where additional context information is passed along with the error
:type details: dict
"""
super(StopActorExecutionError, self).__init__(message)
self.severity = severity
self.details = details
class RequestStopAfterPhase(LeappError):
"""
This exception is used to gracefully stop the current actor and request the stop of the workflow execution after
the current phase.
"""
def __init__(self):
super(RequestStopAfterPhase, self).__init__('Stop after phase has been requested.')
| 27.853147 | 116 | 0.728597 |
7959cf6640300aaa19d4d3dd4942feaf9152fdfc | 5,966 | py | Python | tests/helpers/test_check_config.py | dlintott/core | a6c83cc46a34084fdc4c0e7221b6ba493f82cbac | [
"Apache-2.0"
] | 2 | 2021-05-19T19:05:08.000Z | 2021-06-06T06:51:05.000Z | tests/helpers/test_check_config.py | jrhubott/core | 89fe232643134f283c041537e9f6841f47dc1c5e | [
"Apache-2.0"
] | 52 | 2020-07-23T07:15:00.000Z | 2022-03-31T06:01:47.000Z | tests/helpers/test_check_config.py | jrhubott/core | 89fe232643134f283c041537e9f6841f47dc1c5e | [
"Apache-2.0"
] | 2 | 2017-10-13T21:54:28.000Z | 2018-02-24T23:48:21.000Z | """Test check_config helper."""
import logging
from homeassistant.config import YAML_CONFIG_FILE
from homeassistant.helpers.check_config import (
CheckConfigError,
async_check_ha_config_file,
)
from tests.async_mock import Mock, patch
from tests.common import mock_platform, patch_yaml_files
_LOGGER = logging.getLogger(__name__)
BASE_CONFIG = (
"homeassistant:\n"
" name: Home\n"
" latitude: -26.107361\n"
" longitude: 28.054500\n"
" elevation: 1600\n"
" unit_system: metric\n"
" time_zone: GMT\n"
"\n\n"
)
BAD_CORE_CONFIG = "homeassistant:\n unit_system: bad\n\n\n"
def log_ha_config(conf):
"""Log the returned config."""
cnt = 0
_LOGGER.debug("CONFIG - %s lines - %s errors", len(conf), len(conf.errors))
for key, val in conf.items():
_LOGGER.debug("#%s - %s: %s", cnt, key, val)
cnt += 1
for cnt, err in enumerate(conf.errors):
_LOGGER.debug("error[%s] = %s", cnt, err)
async def test_bad_core_config(hass):
"""Test a bad core config setup."""
files = {YAML_CONFIG_FILE: BAD_CORE_CONFIG}
with patch("os.path.isfile", return_value=True), patch_yaml_files(files):
res = await async_check_ha_config_file(hass)
log_ha_config(res)
assert isinstance(res.errors[0].message, str)
assert res.errors[0].domain == "homeassistant"
assert res.errors[0].config == {"unit_system": "bad"}
# Only 1 error expected
res.errors.pop(0)
assert not res.errors
async def test_config_platform_valid(hass):
"""Test a valid platform setup."""
files = {YAML_CONFIG_FILE: BASE_CONFIG + "light:\n platform: demo"}
with patch("os.path.isfile", return_value=True), patch_yaml_files(files):
res = await async_check_ha_config_file(hass)
log_ha_config(res)
assert res.keys() == {"homeassistant", "light"}
assert res["light"] == [{"platform": "demo"}]
assert not res.errors
async def test_component_platform_not_found(hass):
"""Test errors if component or platform not found."""
# Make sure they don't exist
files = {YAML_CONFIG_FILE: BASE_CONFIG + "beer:"}
with patch("os.path.isfile", return_value=True), patch_yaml_files(files):
res = await async_check_ha_config_file(hass)
log_ha_config(res)
assert res.keys() == {"homeassistant"}
assert res.errors[0] == CheckConfigError(
"Component error: beer - Integration 'beer' not found.", None, None
)
# Only 1 error expected
res.errors.pop(0)
assert not res.errors
async def test_component_platform_not_found_2(hass):
"""Test errors if component or platform not found."""
# Make sure they don't exist
files = {YAML_CONFIG_FILE: BASE_CONFIG + "light:\n platform: beer"}
with patch("os.path.isfile", return_value=True), patch_yaml_files(files):
res = await async_check_ha_config_file(hass)
log_ha_config(res)
assert res.keys() == {"homeassistant", "light"}
assert res["light"] == []
assert res.errors[0] == CheckConfigError(
"Platform error light.beer - Integration 'beer' not found.", None, None
)
# Only 1 error expected
res.errors.pop(0)
assert not res.errors
async def test_package_invalid(hass):
"""Test a valid platform setup."""
files = {
YAML_CONFIG_FILE: BASE_CONFIG + (" packages:\n p1:\n" ' group: ["a"]')
}
with patch("os.path.isfile", return_value=True), patch_yaml_files(files):
res = await async_check_ha_config_file(hass)
log_ha_config(res)
assert res.errors[0].domain == "homeassistant.packages.p1.group"
assert res.errors[0].config == {"group": ["a"]}
# Only 1 error expected
res.errors.pop(0)
assert not res.errors
assert res.keys() == {"homeassistant"}
async def test_bootstrap_error(hass):
"""Test a valid platform setup."""
files = {YAML_CONFIG_FILE: BASE_CONFIG + "automation: !include no.yaml"}
with patch("os.path.isfile", return_value=True), patch_yaml_files(files):
res = await async_check_ha_config_file(hass)
log_ha_config(res)
assert res.errors[0].domain is None
# Only 1 error expected
res.errors.pop(0)
assert not res.errors
async def test_automation_config_platform(hass):
"""Test automation async config."""
files = {
YAML_CONFIG_FILE: BASE_CONFIG
+ """
automation:
use_blueprint:
path: test_event_service.yaml
input:
trigger_event: blueprint_event
service_to_call: test.automation
input_datetime:
""",
hass.config.path(
"blueprints/automation/test_event_service.yaml"
): """
blueprint:
name: "Call service based on event"
domain: automation
input:
trigger_event:
service_to_call:
trigger:
platform: event
event_type: !input trigger_event
action:
service: !input service_to_call
""",
}
with patch("os.path.isfile", return_value=True), patch_yaml_files(files):
res = await async_check_ha_config_file(hass)
assert len(res.get("automation", [])) == 1
assert len(res.errors) == 0
assert "input_datetime" in res
async def test_config_platform_raise(hass):
"""Test bad config validation platform."""
mock_platform(
hass,
"bla.config",
Mock(async_validate_config=Mock(side_effect=Exception("Broken"))),
)
files = {
YAML_CONFIG_FILE: BASE_CONFIG
+ """
bla:
value: 1
""",
}
with patch("os.path.isfile", return_value=True), patch_yaml_files(files):
res = await async_check_ha_config_file(hass)
assert len(res.errors) == 1
err = res.errors[0]
assert err.domain == "bla"
assert err.message == "Unexpected error calling config validator: Broken"
assert err.config == {"value": 1}
| 30.594872 | 87 | 0.645659 |
7959cfc22b2162c3fc1e7cac03063926ad543944 | 4,139 | py | Python | examples/app/movies/main.py | goncaloperes/bokeh | b857d2d17d7c19779bb0a7be2601d8238fb1d5e9 | [
"BSD-3-Clause"
] | 1 | 2021-04-09T02:57:29.000Z | 2021-04-09T02:57:29.000Z | examples/app/movies/main.py | goncaloperes/bokeh | b857d2d17d7c19779bb0a7be2601d8238fb1d5e9 | [
"BSD-3-Clause"
] | 1 | 2021-03-01T14:04:56.000Z | 2021-03-01T14:04:56.000Z | examples/app/movies/main.py | goncaloperes/bokeh | b857d2d17d7c19779bb0a7be2601d8238fb1d5e9 | [
"BSD-3-Clause"
] | null | null | null | import sqlite3 as sql
from os.path import dirname, join
import numpy as np
import pandas.io.sql as psql
from bokeh.io import curdoc
from bokeh.layouts import column, row
from bokeh.models import ColumnDataSource, Div, Select, Slider, TextInput
from bokeh.plotting import figure
from bokeh.sampledata.movies_data import movie_path
conn = sql.connect(movie_path)
query = open(join(dirname(__file__), 'query.sql')).read()
movies = psql.read_sql(query, conn)
movies["color"] = np.where(movies["Oscars"] > 0, "orange", "grey")
movies["alpha"] = np.where(movies["Oscars"] > 0, 0.9, 0.25)
movies.fillna(0, inplace=True) # just replace missing values with zero
movies["revenue"] = movies.BoxOffice.apply(lambda x: '{:,d}'.format(int(x)))
with open(join(dirname(__file__), "razzies-clean.csv")) as f:
razzies = f.read().splitlines()
movies.loc[movies.imdbID.isin(razzies), "color"] = "purple"
movies.loc[movies.imdbID.isin(razzies), "alpha"] = 0.9
axis_map = {
"Tomato Meter": "Meter",
"Numeric Rating": "numericRating",
"Number of Reviews": "Reviews",
"Box Office (dollars)": "BoxOffice",
"Length (minutes)": "Runtime",
"Year": "Year",
}
desc = Div(text=open(join(dirname(__file__), "description.html")).read(), sizing_mode="stretch_width")
# Create Input controls
reviews = Slider(title="Minimum number of reviews", value=80, start=10, end=300, step=10)
min_year = Slider(title="Year released", start=1940, end=2014, value=1970, step=1)
max_year = Slider(title="End Year released", start=1940, end=2014, value=2014, step=1)
oscars = Slider(title="Minimum number of Oscar wins", start=0, end=4, value=0, step=1)
boxoffice = Slider(title="Dollars at Box Office (millions)", start=0, end=800, value=0, step=1)
genre = Select(title="Genre", value="All",
options=open(join(dirname(__file__), 'genres.txt')).read().split())
director = TextInput(title="Director name contains")
cast = TextInput(title="Cast names contains")
x_axis = Select(title="X Axis", options=sorted(axis_map.keys()), value="Tomato Meter")
y_axis = Select(title="Y Axis", options=sorted(axis_map.keys()), value="Number of Reviews")
# Create Column Data Source that will be used by the plot
source = ColumnDataSource(data=dict(x=[], y=[], color=[], title=[], year=[], revenue=[], alpha=[]))
TOOLTIPS=[
("Title", "@title"),
("Year", "@year"),
("$", "@revenue")
]
p = figure(plot_height=600, plot_width=700, title="", toolbar_location=None, tooltips=TOOLTIPS, sizing_mode="scale_both")
p.circle(x="x", y="y", source=source, size=7, color="color", line_color=None, fill_alpha="alpha")
def select_movies():
genre_val = genre.value
director_val = director.value.strip()
cast_val = cast.value.strip()
selected = movies[
(movies.Reviews >= reviews.value) &
(movies.BoxOffice >= (boxoffice.value * 1e6)) &
(movies.Year >= min_year.value) &
(movies.Year <= max_year.value) &
(movies.Oscars >= oscars.value)
]
if (genre_val != "All"):
selected = selected[selected.Genre.str.contains(genre_val)==True]
if (director_val != ""):
selected = selected[selected.Director.str.contains(director_val)==True]
if (cast_val != ""):
selected = selected[selected.Cast.str.contains(cast_val)==True]
return selected
def update():
df = select_movies()
x_name = axis_map[x_axis.value]
y_name = axis_map[y_axis.value]
p.xaxis.axis_label = x_axis.value
p.yaxis.axis_label = y_axis.value
p.title.text = "%d movies selected" % len(df)
source.data = dict(
x=df[x_name],
y=df[y_name],
color=df["color"],
title=df["Title"],
year=df["Year"],
revenue=df["revenue"],
alpha=df["alpha"],
)
controls = [reviews, boxoffice, genre, min_year, max_year, oscars, director, cast, x_axis, y_axis]
for control in controls:
control.on_change('value', lambda attr, old, new: update())
inputs = column(*controls, width=320)
l = column(desc, row(inputs, p), sizing_mode="scale_both")
update() # initial load of the data
curdoc().add_root(l)
curdoc().title = "Movies"
| 36.307018 | 121 | 0.672385 |
7959d284b7b823ed4d6d63ab7195da688ba24dd5 | 2,618 | py | Python | bigchaindb/events.py | innoprenuer/bigchaindb | 32b64ccc2a208f38162566f3e088ad49baced79f | [
"Apache-2.0"
] | 1 | 2019-05-31T14:06:02.000Z | 2019-05-31T14:06:02.000Z | bigchaindb/events.py | innoprenuer/bigchaindb | 32b64ccc2a208f38162566f3e088ad49baced79f | [
"Apache-2.0"
] | null | null | null | bigchaindb/events.py | innoprenuer/bigchaindb | 32b64ccc2a208f38162566f3e088ad49baced79f | [
"Apache-2.0"
] | 1 | 2019-08-28T23:38:52.000Z | 2019-08-28T23:38:52.000Z | from queue import Empty
from collections import defaultdict
from multiprocessing import Queue
POISON_PILL = 'POISON_PILL'
class EventTypes:
"""Container class that holds all the possible
events BigchainDB manages.
"""
# If you add a new Event Type, make sure to add it
# to the docs in docs/server/source/event-plugin-api.rst
ALL = ~0
BLOCK_VALID = 1
BLOCK_INVALID = 2
# NEW_EVENT = 4
# NEW_EVENT = 8
# NEW_EVENT = 16...
class Event:
"""An Event."""
def __init__(self, event_type, event_data):
"""Creates a new event.
Args:
event_type (int): the type of the event, see
:class:`~bigchaindb.events.EventTypes`
event_data (obj): the data of the event.
"""
self.type = event_type
self.data = event_data
class Exchange:
"""Dispatch events to subscribers."""
def __init__(self):
self.publisher_queue = Queue()
self.started_queue = Queue()
# Map <event_types -> queues>
self.queues = defaultdict(list)
def get_publisher_queue(self):
"""Get the queue used by the publisher.
Returns:
a :class:`multiprocessing.Queue`.
"""
return self.publisher_queue
def get_subscriber_queue(self, event_types=None):
"""Create a new queue for a specific combination of event types
and return it.
Returns:
a :class:`multiprocessing.Queue`.
Raises:
RuntimeError if called after `run`
"""
try:
self.started_queue.get_nowait()
raise RuntimeError('Cannot create a new subscriber queue while Exchange is running.')
except Empty:
pass
if event_types is None:
event_types = EventTypes.ALL
queue = Queue()
self.queues[event_types].append(queue)
return queue
def dispatch(self, event):
"""Given an event, send it to all the subscribers.
Args
event (:class:`~bigchaindb.events.EventTypes`): the event to
dispatch to all the subscribers.
"""
for event_types, queues in self.queues.items():
if event.type & event_types:
for queue in queues:
queue.put(event)
def run(self):
"""Start the exchange"""
self.started_queue.put('STARTED')
while True:
event = self.publisher_queue.get()
if event == POISON_PILL:
return
else:
self.dispatch(event)
| 24.933333 | 97 | 0.579832 |
7959d540d2df46677f26bd94b551d930688cace8 | 5,317 | py | Python | BattleBombRoyale/tests/test_ready_ok.py | iconation/BattleBombRoyale | 682b4e67212a2478a2ef0c01e29acec775210075 | [
"Apache-2.0"
] | null | null | null | BattleBombRoyale/tests/test_ready_ok.py | iconation/BattleBombRoyale | 682b4e67212a2478a2ef0c01e29acec775210075 | [
"Apache-2.0"
] | null | null | null | BattleBombRoyale/tests/test_ready_ok.py | iconation/BattleBombRoyale | 682b4e67212a2478a2ef0c01e29acec775210075 | [
"Apache-2.0"
] | null | null | null | import os
from iconsdk.builder.transaction_builder import DeployTransactionBuilder
from iconsdk.builder.call_builder import CallBuilder
from iconsdk.icon_service import IconService
from iconsdk.libs.in_memory_zip import gen_deploy_data_content
from iconsdk.providers.http_provider import HTTPProvider
from iconsdk.signed_transaction import SignedTransaction
from tbears.libs.icon_integrate_test import IconIntegrateTestBase, SCORE_INSTALL_ADDRESS
from BattleBombRoyale.tests.utils import *
DIR_PATH = os.path.abspath(os.path.dirname(__file__))
class TestBattleBombRoyale(IconIntegrateTestBase):
TEST_HTTP_ENDPOINT_URI_V3 = "http://127.0.0.1:9000/api/v3"
SCORE_PROJECT= os.path.abspath(os.path.join(DIR_PATH, '..'))
_PARTICIPATION_COST = 1 * 10**18
def setUp(self):
super().setUp()
self.icon_service = None
# if you want to send request to network, uncomment next line and set self.TEST_HTTP_ENDPOINT_URI_V3
# self.icon_service = IconService(HTTPProvider(self.TEST_HTTP_ENDPOINT_URI_V3))
# install SCORE
self._score_address = self._deploy_score()['scoreAddress']
self._j1 = self._wallet_array[0]
self._j2 = self._wallet_array[1]
self._j3 = self._wallet_array[2]
self._j4 = self._wallet_array[3]
self._spectator = self._wallet_array[9]
for wallet in self._wallet_array:
icx_transfer_call(super(), self._test1, wallet.get_address(), 100 * 10**18, self.icon_service)
# OK
result = transaction_call_success(super(),
from_=self._j1,
to_=self._score_address,
method="create_game",
icon_service=self.icon_service,
value=self._PARTICIPATION_COST
)
self._token = result['txHash']
# OK
result = transaction_call_success(super(),
from_=self._j2,
to_=self._score_address,
method="join_game",
params={'token': self._token},
icon_service=self.icon_service,
value=self._PARTICIPATION_COST
)
def ready_ask(self):
# OK
result = transaction_call_success(super(),
from_=self._j1,
to_=self._score_address,
method="ready_ask",
icon_service=self.icon_service
)
def _deploy_score(self, to: str = SCORE_INSTALL_ADDRESS) -> dict:
# Generates an instance of transaction for deploying SCORE.
transaction = DeployTransactionBuilder() \
.from_(self._test1.get_address()) \
.to(to) \
.step_limit(100_000_000_000) \
.nid(3) \
.nonce(100) \
.content_type("application/zip") \
.content(gen_deploy_data_content(self.SCORE_PROJECT)) \
.build()
# Returns the signed transaction object having a signature
signed_transaction = SignedTransaction(transaction, self._test1)
# process the transaction in local
result = self.process_transaction(signed_transaction, self.icon_service)
self.assertTrue('status' in result)
self.assertEqual(1, result['status'])
self.assertTrue('scoreAddress' in result)
return result
# ===============================================================
def test_ready_ok_ok(self):
self.ready_ask()
# OK
result = transaction_call_success(super(),
from_=self._j2,
to_=self._score_address,
method="ready_ok",
icon_service=self.icon_service
)
def test_ready_ok_PLAYER_IS_NOT_REGISTERED (self):
self.ready_ask()
# Fail
result = transaction_call_error(super(),
from_=self._j3,
to_=self._score_address,
method="ready_ok",
icon_service=self.icon_service
)
self.assertEqual(result['failure']['message'], 'PLAYER_IS_NOT_REGISTERED')
def test_ready_ok_GAME_ALREADY_STARTED (self):
self.ready_ask()
# OK
result = transaction_call_success(super(),
from_=self._j2,
to_=self._score_address,
method="ready_ok",
icon_service=self.icon_service
)
# OK
result = transaction_call_success(super(),
from_=self._j1,
to_=self._score_address,
method="start_game",
icon_service=self.icon_service
)
# Fail
result = transaction_call_error(super(),
from_=self._j2,
to_=self._score_address,
method="ready_ok",
icon_service=self.icon_service
)
self.assertEqual(result['failure']['message'], 'GAME_ALREADY_STARTED')
def test_ready_ok_START_COUNTDOWN_NOT_STARTED (self):
# Fail
result = transaction_call_error(super(),
from_=self._j2,
to_=self._score_address,
method="ready_ok",
icon_service=self.icon_service
)
self.assertEqual(result['failure']['message'], 'START_COUNTDOWN_NOT_STARTED')
| 35.684564 | 109 | 0.602219 |
7959d5e5374bbada832c35a6a43296b39d9ea26f | 12,990 | py | Python | setup.py | stillmatic/onnx | 8d5eb62d5299f6dcb6ac787f0ea8e6cf5b8331a7 | [
"Apache-2.0"
] | null | null | null | setup.py | stillmatic/onnx | 8d5eb62d5299f6dcb6ac787f0ea8e6cf5b8331a7 | [
"Apache-2.0"
] | null | null | null | setup.py | stillmatic/onnx | 8d5eb62d5299f6dcb6ac787f0ea8e6cf5b8331a7 | [
"Apache-2.0"
] | null | null | null | # SPDX-License-Identifier: Apache-2.0
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import glob
import multiprocessing
import os
import platform
import shlex
import subprocess
import sys
from collections import namedtuple
from contextlib import contextmanager
from datetime import date
from distutils import log, sysconfig
from distutils.spawn import find_executable
from textwrap import dedent
import setuptools
import setuptools.command.build_ext
import setuptools.command.build_py
import setuptools.command.develop
TOP_DIR = os.path.realpath(os.path.dirname(__file__))
SRC_DIR = os.path.join(TOP_DIR, 'onnx')
TP_DIR = os.path.join(TOP_DIR, 'third_party')
CMAKE_BUILD_DIR = os.path.join(TOP_DIR, '.setuptools-cmake-build')
PACKAGE_NAME = 'onnx'
WINDOWS = (os.name == 'nt')
CMAKE = find_executable('cmake3') or find_executable('cmake')
MAKE = find_executable('make')
install_requires = []
setup_requires = []
tests_require = []
extras_require = {}
################################################################################
# Global variables for controlling the build variant
################################################################################
# Default value is set to TRUE\1 to keep the settings same as the current ones.
# However going forward the recomemded way to is to set this to False\0
ONNX_ML = not bool(os.getenv('ONNX_ML') == '0')
ONNX_VERIFY_PROTO3 = bool(os.getenv('ONNX_VERIFY_PROTO3') == '1')
ONNX_NAMESPACE = os.getenv('ONNX_NAMESPACE', 'onnx')
ONNX_BUILD_TESTS = bool(os.getenv('ONNX_BUILD_TESTS') == '1')
ONNX_DISABLE_EXCEPTIONS = bool(os.getenv('ONNX_DISABLE_EXCEPTIONS') == '1')
USE_MSVC_STATIC_RUNTIME = bool(os.getenv('USE_MSVC_STATIC_RUNTIME', '0') == '1')
DEBUG = bool(os.getenv('DEBUG', '0') == '1')
COVERAGE = bool(os.getenv('COVERAGE', '0') == '1')
################################################################################
# Version
################################################################################
try:
git_version = subprocess.check_output(['git', 'rev-parse', 'HEAD'],
cwd=TOP_DIR).decode('ascii').strip()
except (OSError, subprocess.CalledProcessError):
git_version = None
with open(os.path.join(TOP_DIR, 'VERSION_NUMBER')) as version_file:
VERSION_NUMBER = version_file.read().strip()
if '--weekly_build' in sys.argv:
today_number = date.today().strftime("%Y%m%d")
VERSION_NUMBER += '.dev' + today_number
PACKAGE_NAME = 'onnx-weekly'
sys.argv.remove('--weekly_build')
VersionInfo = namedtuple('VersionInfo', ['version', 'git_version'])(
version=VERSION_NUMBER,
git_version=git_version
)
################################################################################
# Pre Check
################################################################################
assert CMAKE, 'Could not find "cmake" executable!'
################################################################################
# Utilities
################################################################################
@contextmanager
def cd(path):
if not os.path.isabs(path):
raise RuntimeError('Can only cd to absolute path, got: {}'.format(path))
orig_path = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(orig_path)
################################################################################
# Customized commands
################################################################################
class ONNXCommand(setuptools.Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
class create_version(ONNXCommand):
def run(self):
with open(os.path.join(SRC_DIR, 'version.py'), 'w') as f:
f.write(dedent('''\
# This file is generated by setup.py. DO NOT EDIT!
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
version = '{version}'
git_version = '{git_version}'
'''.format(**dict(VersionInfo._asdict()))))
class cmake_build(setuptools.Command):
"""
Compiles everything when `python setupmnm.py build` is run using cmake.
Custom args can be passed to cmake by specifying the `CMAKE_ARGS`
environment variable.
The number of CPUs used by `make` can be specified by passing `-j<ncpus>`
to `setup.py build`. By default all CPUs are used.
"""
user_options = [
(str('jobs='), str('j'), str('Specifies the number of jobs to use with make'))
]
built = False
def initialize_options(self):
self.jobs = None
def finalize_options(self):
if sys.version_info[0] >= 3:
self.set_undefined_options('build', ('parallel', 'jobs'))
if self.jobs is None and os.getenv("MAX_JOBS") is not None:
self.jobs = os.getenv("MAX_JOBS")
self.jobs = multiprocessing.cpu_count() if self.jobs is None else int(self.jobs)
def run(self):
if cmake_build.built:
return
cmake_build.built = True
if not os.path.exists(CMAKE_BUILD_DIR):
os.makedirs(CMAKE_BUILD_DIR)
with cd(CMAKE_BUILD_DIR):
build_type = 'Release'
# configure
cmake_args = [
CMAKE,
'-DPYTHON_INCLUDE_DIR={}'.format(sysconfig.get_python_inc()),
'-DPYTHON_EXECUTABLE={}'.format(sys.executable),
'-DBUILD_ONNX_PYTHON=ON',
'-DCMAKE_EXPORT_COMPILE_COMMANDS=ON',
'-DONNX_NAMESPACE={}'.format(ONNX_NAMESPACE),
'-DPY_EXT_SUFFIX={}'.format(sysconfig.get_config_var('EXT_SUFFIX') or ''),
]
if COVERAGE:
cmake_args.append('-DONNX_COVERAGE=ON')
if COVERAGE or DEBUG:
# in order to get accurate coverage information, the
# build needs to turn off optimizations
build_type = 'Debug'
cmake_args.append('-DCMAKE_BUILD_TYPE=%s' % build_type)
if WINDOWS:
cmake_args.extend([
# we need to link with libpython on windows, so
# passing python version to window in order to
# find python in cmake
'-DPY_VERSION={}'.format('{0}.{1}'.format(*sys.version_info[:2])),
])
if USE_MSVC_STATIC_RUNTIME:
cmake_args.append('-DONNX_USE_MSVC_STATIC_RUNTIME=ON')
if platform.architecture()[0] == '64bit':
cmake_args.extend(['-A', 'x64', '-T', 'host=x64'])
else:
cmake_args.extend(['-A', 'Win32', '-T', 'host=x86'])
if ONNX_ML:
cmake_args.append('-DONNX_ML=1')
if ONNX_VERIFY_PROTO3:
cmake_args.append('-DONNX_VERIFY_PROTO3=1')
if ONNX_BUILD_TESTS:
cmake_args.append('-DONNX_BUILD_TESTS=ON')
if ONNX_DISABLE_EXCEPTIONS:
cmake_args.append('-DONNX_DISABLE_EXCEPTIONS=ON')
if 'CMAKE_ARGS' in os.environ:
extra_cmake_args = shlex.split(os.environ['CMAKE_ARGS'])
# prevent crossfire with downstream scripts
del os.environ['CMAKE_ARGS']
log.info('Extra cmake args: {}'.format(extra_cmake_args))
cmake_args.extend(extra_cmake_args)
cmake_args.append(TOP_DIR)
log.info('Using cmake args: {}'.format(cmake_args))
if '-DONNX_DISABLE_EXCEPTIONS=ON' in cmake_args:
raise RuntimeError("-DONNX_DISABLE_EXCEPTIONS=ON option is only available for c++ builds. Python binding require exceptions to be enabled.")
subprocess.check_call(cmake_args)
build_args = [CMAKE, '--build', os.curdir]
if WINDOWS:
build_args.extend(['--config', build_type])
build_args.extend(['--', '/maxcpucount:{}'.format(self.jobs)])
else:
build_args.extend(['--', '-j', str(self.jobs)])
subprocess.check_call(build_args)
class build_py(setuptools.command.build_py.build_py):
def run(self):
self.run_command('create_version')
self.run_command('cmake_build')
generated_python_files = \
glob.glob(os.path.join(CMAKE_BUILD_DIR, 'onnx', '*.py')) + \
glob.glob(os.path.join(CMAKE_BUILD_DIR, 'onnx', '*.pyi'))
for src in generated_python_files:
dst = os.path.join(
TOP_DIR, os.path.relpath(src, CMAKE_BUILD_DIR))
self.copy_file(src, dst)
return setuptools.command.build_py.build_py.run(self)
class develop(setuptools.command.develop.develop):
def run(self):
self.run_command('build_py')
setuptools.command.develop.develop.run(self)
class build_ext(setuptools.command.build_ext.build_ext):
def run(self):
self.run_command('cmake_build')
setuptools.command.build_ext.build_ext.run(self)
def build_extensions(self):
for ext in self.extensions:
fullname = self.get_ext_fullname(ext.name)
filename = os.path.basename(self.get_ext_filename(fullname))
lib_path = CMAKE_BUILD_DIR
if os.name == 'nt':
debug_lib_dir = os.path.join(lib_path, "Debug")
release_lib_dir = os.path.join(lib_path, "Release")
if os.path.exists(debug_lib_dir):
lib_path = debug_lib_dir
elif os.path.exists(release_lib_dir):
lib_path = release_lib_dir
src = os.path.join(lib_path, filename)
dst = os.path.join(os.path.realpath(self.build_lib), "onnx", filename)
self.copy_file(src, dst)
class mypy_type_check(ONNXCommand):
description = 'Run MyPy type checker'
def run(self):
"""Run command."""
onnx_script = os.path.realpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "tools/mypy-onnx.py"))
returncode = subprocess.call([sys.executable, onnx_script])
sys.exit(returncode)
cmdclass = {
'create_version': create_version,
'cmake_build': cmake_build,
'build_py': build_py,
'develop': develop,
'build_ext': build_ext,
'typecheck': mypy_type_check,
}
################################################################################
# Extensions
################################################################################
ext_modules = [
setuptools.Extension(
name=str('onnx.onnx_cpp2py_export'),
sources=[])
]
################################################################################
# Packages
################################################################################
# no need to do fancy stuff so far
packages = setuptools.find_packages()
requirements_file = "requirements.txt"
requirements_path = os.path.join(os.getcwd(), requirements_file)
if not os.path.exists(requirements_path):
this = os.path.dirname(__file__)
requirements_path = os.path.join(this, requirements_file)
if not os.path.exists(requirements_path):
raise FileNotFoundError("Unable to find " + requirements_file)
with open(requirements_path) as f:
install_requires = f.read().splitlines()
################################################################################
# Test
################################################################################
setup_requires.append('pytest-runner')
tests_require.append('pytest')
tests_require.append('nbval')
tests_require.append('tabulate')
extras_require["mypy"] = ["mypy==0.910"]
################################################################################
# Final
################################################################################
setuptools.setup(
name=PACKAGE_NAME,
version=VersionInfo.version,
description="Open Neural Network Exchange",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
ext_modules=ext_modules,
cmdclass=cmdclass,
packages=packages,
license='Apache License v2.0',
include_package_data=True,
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_require,
extras_require=extras_require,
author='ONNX',
author_email='onnx-technical-discuss@lists.lfai.foundation',
url='https://github.com/onnx/onnx',
entry_points={
'console_scripts': [
'check-model = onnx.bin.checker:check_model',
'check-node = onnx.bin.checker:check_node',
'backend-test-tools = onnx.backend.test.cmd_tools:main',
]
},
)
| 36.183844 | 156 | 0.564973 |
7959d8f859c9a269a59bb43816e40eac056a521c | 4,225 | py | Python | utils/type-layout-fuzzer.py | gandhi56/swift | 2d851ff61991bb8964079661339671c2fd21d88a | [
"Apache-2.0"
] | 72,551 | 2015-12-03T16:45:13.000Z | 2022-03-31T18:57:59.000Z | utils/type-layout-fuzzer.py | gandhi56/swift | 2d851ff61991bb8964079661339671c2fd21d88a | [
"Apache-2.0"
] | 39,352 | 2015-12-03T16:55:06.000Z | 2022-03-31T23:43:41.000Z | utils/type-layout-fuzzer.py | gandhi56/swift | 2d851ff61991bb8964079661339671c2fd21d88a | [
"Apache-2.0"
] | 13,845 | 2015-12-03T16:45:13.000Z | 2022-03-31T11:32:29.000Z | #!/usr/bin/env python
# This script outputs a Swift source with randomly-generated type definitions,
# which can be used for ABI or layout algorithm fuzzing.
# TODO: generate types with generics, existentials, compositions
from __future__ import print_function
import random
import sys
maxDepth = 5
maxMembers = 5
typesDefined = []
classesDefined = []
nextToDefine = 0
objcInterop = False
if len(sys.argv) >= 2:
if sys.argv[1] == "--objc":
objcInterop = True
if sys.argv[1] == "--help":
print("Usage: " + sys.argv[0] + " [--objc]", file=sys.stderr)
print("", file=sys.stderr)
print(" --objc Include ObjC-interop types", file=sys.stderr)
sys.exit(2)
random.seed()
if objcInterop:
print("import Foundation")
print()
def randomTypeList(depth):
count = random.randint(0, maxMembers)
result = "("
for i in range(count):
if i > 0:
result += ", "
result += randomTypeReference(depth + 1)
result += ")"
return result
def randomTypeReference(depth):
def nominal():
global typesDefined
allowNew = depth < maxDepth
bound = len(classesDefined) if allowNew else len(classesDefined) - 1
which = random.randint(0, bound)
if which < len(classesDefined):
return classesDefined[which]
newName = "T" + str(len(typesDefined))
def defineRandomRelatedType(name):
defineRandomNominalType(name, depth)
typesDefined.append((newName, defineRandomRelatedType))
return newName
def tuple():
return randomTypeList(depth + 1)
def metatype():
return "(" + randomTypeReference(depth + 1) + ").Type"
def leaf():
leaves = ["Int", "String", "Int8", "Int16", "Int32", "Int64",
"(() -> ())", "(@convention(c) () -> ())", "AnyObject"]
if objcInterop:
leaves += ["NSObject", "(@convention(block) () -> ())"]
return random.choice(leaves)
if depth < maxDepth:
kinds = [nominal, tuple, metatype, leaf, leaf, leaf, leaf, leaf]
else:
kinds = [leaf]
return random.choice(kinds)()
def defineRandomFields(depth, basename):
numMembers = random.randint(0, maxMembers)
for i in range(numMembers):
print(" var " + basename + str(i) + ": " +
randomTypeReference(depth + 1))
def defineRandomClass(name, depth):
global classesDefined
classesDefined.append(name)
print("class " + name, end="")
def inheritNSObject():
print(": NSObject", end="")
def inheritsOtherClass():
print(": ", end="")
name = "T" + str(len(typesDefined))
def defineRandomBaseClass(name):
defineRandomClass(name, depth)
typesDefined.append((name, defineRandomBaseClass))
print(name, end="")
def inheritsNothing():
pass
inheritances = [inheritsNothing]
if depth == 0:
# The contents of classes are interesting only for top-level type
inheritances += [inheritsOtherClass]
if objcInterop:
inheritances += [inheritNSObject]
random.choice(inheritances)()
print(" {")
# Prevent errors about lack of initializers
print(" init(" + name + ": ()) { fatalError() }")
# The contents of classes are interesting only for top-level type
if depth == 0:
defineRandomFields(depth, "x" + name)
print("}")
print()
def defineRandomNominalType(name, depth=0):
def struct():
print("struct " + name + " {")
defineRandomFields(depth, "x")
print("}")
print()
def clas():
defineRandomClass(name, depth)
def enum():
# TODO: indirect cases
print("enum " + name + " {")
numCases = random.randint(0, maxMembers)
for i in range(numCases):
print(" case x" + str(i) + randomTypeList(depth + 1))
print("}")
print()
kinds = [struct, clas, enum]
return random.choice(kinds)()
typesDefined.append(("Generated", defineRandomNominalType))
while nextToDefine < len(typesDefined):
name, definer = typesDefined[nextToDefine]
definer(name)
nextToDefine += 1
| 26.40625 | 78 | 0.599527 |
7959d940a11946c564524bec99faf74be21dd564 | 2,182 | py | Python | src/richie/apps/persons/cms_plugins.py | sampaccoud/richie | 3d222aedab0636a84011dced568c5dcd48fc5b15 | [
"MIT"
] | null | null | null | src/richie/apps/persons/cms_plugins.py | sampaccoud/richie | 3d222aedab0636a84011dced568c5dcd48fc5b15 | [
"MIT"
] | null | null | null | src/richie/apps/persons/cms_plugins.py | sampaccoud/richie | 3d222aedab0636a84011dced568c5dcd48fc5b15 | [
"MIT"
] | null | null | null | """
Person CMS plugin
"""
from collections import defaultdict
from django.utils.translation import ugettext_lazy as _
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from cms.utils import get_language_from_request
from .models import PersonPluginModel
class PageExtensionPluginMixin:
"""
A mixin to insert the plugins of included in a page to another page's render context
A plugin will represent a page in another page. The content rendered by the plugin is
built with the content inserted in the placeholders of the original corresponding page.
The idea is that other developers using our application to build their own project, should
be able to customize the content of each page and plugin without having to modify models
and database schemas.
"""
def render(self, context, instance, current_placeholder):
"""
This generic `render` method will add to the plugin template context
a dictionnary of placeholders and plugins from page extension this plugin is representing.
"""
context = super().render(context, instance, current_placeholder)
language = get_language_from_request(context["request"])
related_plugins = defaultdict(list)
# Use "get_placeholders" to benefit from the cache mechanism
for placeholder in instance.page.get_placeholders():
if placeholder.slot == "maincontent":
# We only build the plugin content with the specific placeholders
continue
for plugin in placeholder.get_plugins(language=language):
plugin_model_instance = plugin.get_bound_plugin()
related_plugins[placeholder.slot].append(plugin_model_instance)
context.update({"page": instance.page, "related_plugins": related_plugins})
return context
@plugin_pool.register_plugin
class PersonPlugin(PageExtensionPluginMixin, CMSPluginBase):
"""
Person plugin displays a person's information on other pages
"""
model = PersonPluginModel
module = _("Persons")
render_template = "persons/plugins/person.html"
cache = True
| 36.983051 | 98 | 0.722731 |
7959d9dfda9a9f3bb85c4196b604a21b5a4f682f | 22,590 | py | Python | corporate/lib/stripe.py | fearless0307/zulip | 378d14af7ea73a9a83c7245706cd918bec5a37bf | [
"Apache-2.0"
] | 2 | 2019-04-24T15:22:52.000Z | 2020-01-18T11:01:31.000Z | corporate/lib/stripe.py | fearless0307/zulip | 378d14af7ea73a9a83c7245706cd918bec5a37bf | [
"Apache-2.0"
] | 10 | 2019-02-26T11:10:42.000Z | 2019-02-26T14:30:24.000Z | corporate/lib/stripe.py | fearless0307/zulip | 378d14af7ea73a9a83c7245706cd918bec5a37bf | [
"Apache-2.0"
] | 1 | 2020-01-07T15:49:54.000Z | 2020-01-07T15:49:54.000Z | from datetime import datetime
from decimal import Decimal
from functools import wraps
import logging
import math
import os
from typing import Any, Callable, Dict, Optional, TypeVar, Tuple, cast
import ujson
from django.conf import settings
from django.db import transaction
from django.utils.translation import ugettext as _
from django.utils.timezone import now as timezone_now
from django.core.signing import Signer
import stripe
from zerver.lib.logging_util import log_to_file
from zerver.lib.timestamp import datetime_to_timestamp, timestamp_to_datetime
from zerver.lib.utils import generate_random_token
from zerver.models import Realm, UserProfile, RealmAuditLog
from corporate.models import Customer, CustomerPlan, LicenseLedger, \
get_active_plan
from zproject.settings import get_secret
STRIPE_PUBLISHABLE_KEY = get_secret('stripe_publishable_key')
stripe.api_key = get_secret('stripe_secret_key')
BILLING_LOG_PATH = os.path.join('/var/log/zulip'
if not settings.DEVELOPMENT
else settings.DEVELOPMENT_LOG_DIRECTORY,
'billing.log')
billing_logger = logging.getLogger('corporate.stripe')
log_to_file(billing_logger, BILLING_LOG_PATH)
log_to_file(logging.getLogger('stripe'), BILLING_LOG_PATH)
CallableT = TypeVar('CallableT', bound=Callable[..., Any])
MIN_INVOICED_LICENSES = 30
DEFAULT_INVOICE_DAYS_UNTIL_DUE = 30
def get_seat_count(realm: Realm) -> int:
non_guests = UserProfile.objects.filter(
realm=realm, is_active=True, is_bot=False, is_guest=False).count()
guests = UserProfile.objects.filter(
realm=realm, is_active=True, is_bot=False, is_guest=True).count()
return max(non_guests, math.ceil(guests / 5))
def sign_string(string: str) -> Tuple[str, str]:
salt = generate_random_token(64)
signer = Signer(salt=salt)
return signer.sign(string), salt
def unsign_string(signed_string: str, salt: str) -> str:
signer = Signer(salt=salt)
return signer.unsign(signed_string)
# Be extremely careful changing this function. Historical billing periods
# are not stored anywhere, and are just computed on the fly using this
# function. Any change you make here should return the same value (or be
# within a few seconds) for basically any value from when the billing system
# went online to within a year from now.
def add_months(dt: datetime, months: int) -> datetime:
assert(months >= 0)
# It's fine that the max day in Feb is 28 for leap years.
MAX_DAY_FOR_MONTH = {1: 31, 2: 28, 3: 31, 4: 30, 5: 31, 6: 30,
7: 31, 8: 31, 9: 30, 10: 31, 11: 30, 12: 31}
year = dt.year
month = dt.month + months
while month > 12:
year += 1
month -= 12
day = min(dt.day, MAX_DAY_FOR_MONTH[month])
# datetimes don't support leap seconds, so don't need to worry about those
return dt.replace(year=year, month=month, day=day)
def next_month(billing_cycle_anchor: datetime, dt: datetime) -> datetime:
estimated_months = round((dt - billing_cycle_anchor).days * 12. / 365)
for months in range(max(estimated_months - 1, 0), estimated_months + 2):
proposed_next_month = add_months(billing_cycle_anchor, months)
if 20 < (proposed_next_month - dt).days < 40:
return proposed_next_month
raise AssertionError('Something wrong in next_month calculation with '
'billing_cycle_anchor: %s, dt: %s' % (billing_cycle_anchor, dt))
# TODO take downgrade into account
def next_renewal_date(plan: CustomerPlan, event_time: datetime) -> datetime:
months_per_period = {
CustomerPlan.ANNUAL: 12,
CustomerPlan.MONTHLY: 1,
}[plan.billing_schedule]
periods = 1
dt = plan.billing_cycle_anchor
while dt <= event_time:
dt = add_months(plan.billing_cycle_anchor, months_per_period * periods)
periods += 1
return dt
# TODO take downgrade into account
def next_invoice_date(plan: CustomerPlan) -> datetime:
months_per_period = {
CustomerPlan.ANNUAL: 12,
CustomerPlan.MONTHLY: 1,
}[plan.billing_schedule]
if plan.automanage_licenses:
months_per_period = 1
periods = 1
dt = plan.billing_cycle_anchor
while dt <= plan.next_invoice_date:
dt = add_months(plan.billing_cycle_anchor, months_per_period * periods)
periods += 1
return dt
def renewal_amount(plan: CustomerPlan, event_time: datetime) -> Optional[int]: # nocoverage: TODO
if plan.fixed_price is not None:
return plan.fixed_price
last_ledger_entry = add_plan_renewal_to_license_ledger_if_needed(plan, event_time)
if last_ledger_entry.licenses_at_next_renewal is None:
return None
assert(plan.price_per_license is not None) # for mypy
return plan.price_per_license * last_ledger_entry.licenses_at_next_renewal
class BillingError(Exception):
# error messages
CONTACT_SUPPORT = _("Something went wrong. Please contact %s." % (settings.ZULIP_ADMINISTRATOR,))
TRY_RELOADING = _("Something went wrong. Please reload the page.")
# description is used only for tests
def __init__(self, description: str, message: str=CONTACT_SUPPORT) -> None:
self.description = description
self.message = message
class StripeCardError(BillingError):
pass
class StripeConnectionError(BillingError):
pass
def catch_stripe_errors(func: CallableT) -> CallableT:
@wraps(func)
def wrapped(*args: Any, **kwargs: Any) -> Any:
if settings.DEVELOPMENT and not settings.TEST_SUITE: # nocoverage
if STRIPE_PUBLISHABLE_KEY is None:
raise BillingError('missing stripe config', "Missing Stripe config. "
"See https://zulip.readthedocs.io/en/latest/subsystems/billing.html.")
try:
return func(*args, **kwargs)
# See https://stripe.com/docs/api/python#error_handling, though
# https://stripe.com/docs/api/ruby#error_handling suggests there are additional fields, and
# https://stripe.com/docs/error-codes gives a more detailed set of error codes
except stripe.error.StripeError as e:
err = e.json_body.get('error', {})
billing_logger.error("Stripe error: %s %s %s %s" % (
e.http_status, err.get('type'), err.get('code'), err.get('param')))
if isinstance(e, stripe.error.CardError):
# TODO: Look into i18n for this
raise StripeCardError('card error', err.get('message'))
if isinstance(e, stripe.error.RateLimitError) or \
isinstance(e, stripe.error.APIConnectionError): # nocoverage TODO
raise StripeConnectionError(
'stripe connection error',
_("Something went wrong. Please wait a few seconds and try again."))
raise BillingError('other stripe error', BillingError.CONTACT_SUPPORT)
return wrapped # type: ignore # https://github.com/python/mypy/issues/1927
@catch_stripe_errors
def stripe_get_customer(stripe_customer_id: str) -> stripe.Customer:
return stripe.Customer.retrieve(stripe_customer_id, expand=["default_source"])
@catch_stripe_errors
def do_create_stripe_customer(user: UserProfile, stripe_token: Optional[str]=None) -> Customer:
realm = user.realm
# We could do a better job of handling race conditions here, but if two
# people from a realm try to upgrade at exactly the same time, the main
# bad thing that will happen is that we will create an extra stripe
# customer that we can delete or ignore.
stripe_customer = stripe.Customer.create(
description="%s (%s)" % (realm.string_id, realm.name),
email=user.email,
metadata={'realm_id': realm.id, 'realm_str': realm.string_id},
source=stripe_token)
event_time = timestamp_to_datetime(stripe_customer.created)
with transaction.atomic():
RealmAuditLog.objects.create(
realm=user.realm, acting_user=user, event_type=RealmAuditLog.STRIPE_CUSTOMER_CREATED,
event_time=event_time)
if stripe_token is not None:
RealmAuditLog.objects.create(
realm=user.realm, acting_user=user, event_type=RealmAuditLog.STRIPE_CARD_CHANGED,
event_time=event_time)
customer, created = Customer.objects.update_or_create(realm=realm, defaults={
'stripe_customer_id': stripe_customer.id})
user.is_billing_admin = True
user.save(update_fields=["is_billing_admin"])
return customer
@catch_stripe_errors
def do_replace_payment_source(user: UserProfile, stripe_token: str) -> stripe.Customer:
stripe_customer = stripe_get_customer(Customer.objects.get(realm=user.realm).stripe_customer_id)
stripe_customer.source = stripe_token
# Deletes existing card: https://stripe.com/docs/api#update_customer-source
# This can also have other side effects, e.g. it will try to pay certain past-due
# invoices: https://stripe.com/docs/api#update_customer
updated_stripe_customer = stripe.Customer.save(stripe_customer)
RealmAuditLog.objects.create(
realm=user.realm, acting_user=user, event_type=RealmAuditLog.STRIPE_CARD_CHANGED,
event_time=timezone_now())
return updated_stripe_customer
# event_time should roughly be timezone_now(). Not designed to handle
# event_times in the past or future
# TODO handle downgrade
def add_plan_renewal_to_license_ledger_if_needed(plan: CustomerPlan, event_time: datetime) -> LicenseLedger:
last_ledger_entry = LicenseLedger.objects.filter(plan=plan).order_by('-id').first()
last_renewal = LicenseLedger.objects.filter(plan=plan, is_renewal=True) \
.order_by('-id').first().event_time
plan_renewal_date = next_renewal_date(plan, last_renewal)
if plan_renewal_date <= event_time:
return LicenseLedger.objects.create(
plan=plan, is_renewal=True, event_time=plan_renewal_date,
licenses=last_ledger_entry.licenses_at_next_renewal,
licenses_at_next_renewal=last_ledger_entry.licenses_at_next_renewal)
return last_ledger_entry
# Returns Customer instead of stripe_customer so that we don't make a Stripe
# API call if there's nothing to update
def update_or_create_stripe_customer(user: UserProfile, stripe_token: Optional[str]=None) -> Customer:
realm = user.realm
customer = Customer.objects.filter(realm=realm).first()
if customer is None or customer.stripe_customer_id is None:
return do_create_stripe_customer(user, stripe_token=stripe_token)
if stripe_token is not None:
do_replace_payment_source(user, stripe_token)
return customer
def compute_plan_parameters(
automanage_licenses: bool, billing_schedule: int,
discount: Optional[Decimal]) -> Tuple[datetime, datetime, datetime, int]:
# Everything in Stripe is stored as timestamps with 1 second resolution,
# so standardize on 1 second resolution.
# TODO talk about leapseconds?
billing_cycle_anchor = timezone_now().replace(microsecond=0)
if billing_schedule == CustomerPlan.ANNUAL:
# TODO use variables to account for Zulip Plus
price_per_license = 8000
period_end = add_months(billing_cycle_anchor, 12)
elif billing_schedule == CustomerPlan.MONTHLY:
price_per_license = 800
period_end = add_months(billing_cycle_anchor, 1)
else:
raise AssertionError('Unknown billing_schedule: {}'.format(billing_schedule))
if discount is not None:
# There are no fractional cents in Stripe, so round down to nearest integer.
price_per_license = int(float(price_per_license * (1 - discount / 100)) + .00001)
next_invoice_date = period_end
if automanage_licenses:
next_invoice_date = add_months(billing_cycle_anchor, 1)
return billing_cycle_anchor, next_invoice_date, period_end, price_per_license
# Only used for cloud signups
@catch_stripe_errors
def process_initial_upgrade(user: UserProfile, licenses: int, automanage_licenses: bool,
billing_schedule: int, stripe_token: Optional[str]) -> None:
realm = user.realm
customer = update_or_create_stripe_customer(user, stripe_token=stripe_token)
if CustomerPlan.objects.filter(customer=customer, status=CustomerPlan.ACTIVE).exists():
# Unlikely race condition from two people upgrading (clicking "Make payment")
# at exactly the same time. Doesn't fully resolve the race condition, but having
# a check here reduces the likelihood.
billing_logger.warning(
"Customer {} trying to upgrade, but has an active subscription".format(customer))
raise BillingError('subscribing with existing subscription', BillingError.TRY_RELOADING)
billing_cycle_anchor, next_invoice_date, period_end, price_per_license = compute_plan_parameters(
automanage_licenses, billing_schedule, customer.default_discount)
# The main design constraint in this function is that if you upgrade with a credit card, and the
# charge fails, everything should be rolled back as if nothing had happened. This is because we
# expect frequent card failures on initial signup.
# Hence, if we're going to charge a card, do it at the beginning, even if we later may have to
# adjust the number of licenses.
charge_automatically = stripe_token is not None
if charge_automatically:
stripe_charge = stripe.Charge.create(
amount=price_per_license * licenses,
currency='usd',
customer=customer.stripe_customer_id,
description="Upgrade to Zulip Standard, ${} x {}".format(price_per_license/100, licenses),
receipt_email=user.email,
statement_descriptor='Zulip Standard')
# Not setting a period start and end, but maybe we should? Unclear what will make things
# most similar to the renewal case from an accounting perspective.
stripe.InvoiceItem.create(
amount=price_per_license * licenses * -1,
currency='usd',
customer=customer.stripe_customer_id,
description="Payment (Card ending in {})".format(cast(stripe.Card, stripe_charge.source).last4),
discountable=False)
# TODO: The correctness of this relies on user creation, deactivation, etc being
# in a transaction.atomic() with the relevant RealmAuditLog entries
with transaction.atomic():
# billed_licenses can greater than licenses if users are added between the start of
# this function (process_initial_upgrade) and now
billed_licenses = max(get_seat_count(realm), licenses)
plan_params = {
'automanage_licenses': automanage_licenses,
'charge_automatically': charge_automatically,
'price_per_license': price_per_license,
'discount': customer.default_discount,
'billing_cycle_anchor': billing_cycle_anchor,
'billing_schedule': billing_schedule,
'tier': CustomerPlan.STANDARD}
plan = CustomerPlan.objects.create(
customer=customer,
next_invoice_date=next_invoice_date,
**plan_params)
ledger_entry = LicenseLedger.objects.create(
plan=plan,
is_renewal=True,
event_time=billing_cycle_anchor,
licenses=billed_licenses,
licenses_at_next_renewal=billed_licenses)
plan.invoiced_through = ledger_entry
plan.save(update_fields=['invoiced_through'])
RealmAuditLog.objects.create(
realm=realm, acting_user=user, event_time=billing_cycle_anchor,
event_type=RealmAuditLog.CUSTOMER_PLAN_CREATED,
extra_data=ujson.dumps(plan_params))
stripe.InvoiceItem.create(
currency='usd',
customer=customer.stripe_customer_id,
description='Zulip Standard',
discountable=False,
period = {'start': datetime_to_timestamp(billing_cycle_anchor),
'end': datetime_to_timestamp(period_end)},
quantity=billed_licenses,
unit_amount=price_per_license)
if charge_automatically:
billing_method = 'charge_automatically'
days_until_due = None
else:
billing_method = 'send_invoice'
days_until_due = DEFAULT_INVOICE_DAYS_UNTIL_DUE
stripe_invoice = stripe.Invoice.create(
auto_advance=True,
billing=billing_method,
customer=customer.stripe_customer_id,
days_until_due=days_until_due,
statement_descriptor='Zulip Standard')
stripe.Invoice.finalize_invoice(stripe_invoice)
from zerver.lib.actions import do_change_plan_type
do_change_plan_type(realm, Realm.STANDARD)
def update_license_ledger_for_automanaged_plan(realm: Realm, plan: CustomerPlan,
event_time: datetime) -> None:
last_ledger_entry = add_plan_renewal_to_license_ledger_if_needed(plan, event_time)
# todo: handle downgrade, where licenses_at_next_renewal should be 0
licenses_at_next_renewal = get_seat_count(realm)
licenses = max(licenses_at_next_renewal, last_ledger_entry.licenses)
LicenseLedger.objects.create(
plan=plan, event_time=event_time, licenses=licenses,
licenses_at_next_renewal=licenses_at_next_renewal)
def update_license_ledger_if_needed(realm: Realm, event_time: datetime) -> None:
customer = Customer.objects.filter(realm=realm).first()
if customer is None:
return
plan = get_active_plan(customer)
if plan is None:
return
if not plan.automanage_licenses:
return
update_license_ledger_for_automanaged_plan(realm, plan, event_time)
def invoice_plan(plan: CustomerPlan, event_time: datetime) -> None:
if plan.invoicing_status == CustomerPlan.STARTED:
raise NotImplementedError('Plan with invoicing_status==STARTED needs manual resolution.')
add_plan_renewal_to_license_ledger_if_needed(plan, event_time)
assert(plan.invoiced_through is not None)
licenses_base = plan.invoiced_through.licenses
invoice_item_created = False
for ledger_entry in LicenseLedger.objects.filter(plan=plan, id__gt=plan.invoiced_through.id,
event_time__lte=event_time).order_by('id'):
price_args = {} # type: Dict[str, int]
if ledger_entry.is_renewal:
if plan.fixed_price is not None:
price_args = {'amount': plan.fixed_price}
else:
assert(plan.price_per_license is not None) # needed for mypy
price_args = {'unit_amount': plan.price_per_license,
'quantity': ledger_entry.licenses}
description = "Zulip Standard - renewal"
elif ledger_entry.licenses != licenses_base:
assert(plan.price_per_license)
last_renewal = LicenseLedger.objects.filter(
plan=plan, is_renewal=True, event_time__lte=ledger_entry.event_time) \
.order_by('-id').first().event_time
period_end = next_renewal_date(plan, ledger_entry.event_time)
proration_fraction = (period_end - ledger_entry.event_time) / (period_end - last_renewal)
price_args = {'unit_amount': int(plan.price_per_license * proration_fraction + .5),
'quantity': ledger_entry.licenses - licenses_base}
description = "Additional license ({} - {})".format(
ledger_entry.event_time.strftime('%b %-d, %Y'), period_end.strftime('%b %-d, %Y'))
if price_args:
plan.invoiced_through = ledger_entry
plan.invoicing_status = CustomerPlan.STARTED
plan.save(update_fields=['invoicing_status', 'invoiced_through'])
idempotency_key = 'ledger_entry:{}'.format(ledger_entry.id) # type: Optional[str]
if settings.TEST_SUITE:
idempotency_key = None
stripe.InvoiceItem.create(
currency='usd',
customer=plan.customer.stripe_customer_id,
description=description,
discountable=False,
period = {'start': datetime_to_timestamp(ledger_entry.event_time),
'end': datetime_to_timestamp(next_renewal_date(plan, ledger_entry.event_time))},
idempotency_key=idempotency_key,
**price_args)
invoice_item_created = True
plan.invoiced_through = ledger_entry
plan.invoicing_status = CustomerPlan.DONE
plan.save(update_fields=['invoicing_status', 'invoiced_through'])
licenses_base = ledger_entry.licenses
if invoice_item_created:
if plan.charge_automatically:
billing_method = 'charge_automatically'
days_until_due = None
else:
billing_method = 'send_invoice'
days_until_due = DEFAULT_INVOICE_DAYS_UNTIL_DUE
stripe_invoice = stripe.Invoice.create(
auto_advance=True,
billing=billing_method,
customer=plan.customer.stripe_customer_id,
days_until_due=days_until_due,
statement_descriptor='Zulip Standard')
stripe.Invoice.finalize_invoice(stripe_invoice)
plan.next_invoice_date = next_invoice_date(plan)
plan.save(update_fields=['next_invoice_date'])
def invoice_plans_as_needed(event_time: datetime) -> None:
for plan in CustomerPlan.objects.filter(next_invoice_date__lte=event_time):
invoice_plan(plan, event_time)
def attach_discount_to_realm(realm: Realm, discount: Decimal) -> None:
Customer.objects.update_or_create(realm=realm, defaults={'default_discount': discount})
def process_downgrade(user: UserProfile) -> None: # nocoverage
pass
def estimate_annual_recurring_revenue_by_realm() -> Dict[str, int]: # nocoverage
annual_revenue = {}
for plan in CustomerPlan.objects.filter(
status=CustomerPlan.ACTIVE).select_related('customer__realm'):
# TODO: figure out what to do for plans that don't automatically
# renew, but which probably will renew
renewal_cents = renewal_amount(plan, timezone_now()) or 0
if plan.billing_schedule == CustomerPlan.MONTHLY:
renewal_cents *= 12
# TODO: Decimal stuff
annual_revenue[plan.customer.realm.string_id] = int(renewal_cents / 100)
return annual_revenue
| 48.269231 | 108 | 0.698008 |
7959da76e520357b198a9a8187fd142bb3b87f2d | 12,140 | py | Python | splunk_eventgen/lib/generatorplugin.py | hexecute/eventgen | 9978ef0725ad63a717e0019c6b30c5a5d9086fe1 | [
"Apache-2.0"
] | null | null | null | splunk_eventgen/lib/generatorplugin.py | hexecute/eventgen | 9978ef0725ad63a717e0019c6b30c5a5d9086fe1 | [
"Apache-2.0"
] | 1 | 2019-06-28T01:40:16.000Z | 2019-06-28T01:40:16.000Z | splunk_eventgen/lib/generatorplugin.py | hexecute/eventgen | 9978ef0725ad63a717e0019c6b30c5a5d9086fe1 | [
"Apache-2.0"
] | null | null | null | from __future__ import division
import datetime
import logging
import logging.handlers
import pprint
import time
import random
import urllib
from xml.dom import minidom
from xml.parsers.expat import ExpatError
import httplib2
from eventgenoutput import Output
from eventgentimestamp import EventgenTimestamp
from timeparser import timeParser
class GeneratorPlugin(object):
sampleLines = None
sampleDict = None
def __init__(self, sample):
self._sample = sample
self._setup_logging()
def __str__(self):
"""Only used for debugging, outputs a pretty printed representation of this output"""
# Eliminate recursive going back to parent
# temp = dict([(key, value) for (key, value) in self.__dict__.items() if key != '_c'])
# return pprint.pformat(temp)
return ""
def __repr__(self):
return self.__str__()
def __getstate__(self):
temp = self.__dict__
if getattr(self, 'logger', None):
temp.pop('logger', None)
return temp
def __setstate__(self, d):
self.__dict__ = d
self._setup_logging()
def build_events(self, eventsDict, startTime, earliest, latest, ignore_tokens=False):
"""Ready events for output by replacing tokens and updating the output queue"""
# Replace tokens first so that perDayVolume evaluates the correct event length
send_objects = self.replace_tokens(eventsDict, earliest, latest, ignore_tokens=ignore_tokens)
try:
self._out.bulksend(send_objects)
self._sample.timestamp = None
except Exception as e:
self.logger.exception("Exception {} happened.".format(type(e)))
raise e
try:
# TODO: Change this logic so that we don't lose all events if an exception is hit (try/except/break?)
endTime = datetime.datetime.now()
timeDiff = endTime - startTime
timeDiffFrac = "%d.%06d" % (timeDiff.seconds, timeDiff.microseconds)
self.logger.debug("Interval complete, flushing feed")
self._out.flush(endOfInterval=True)
self.logger.debug("Generation of sample '%s' in app '%s' completed in %s seconds." %
(self._sample.name, self._sample.app, timeDiffFrac))
except Exception as e:
self.logger.exception("Exception {} happened.".format(type(e)))
raise e
def _setup_logging(self):
self.logger = logging.getLogger('eventgen')
def updateConfig(self, config, outqueue):
self.config = config
self.outputQueue = outqueue
# TODO: Figure out if this maxQueueLength needs to even be set here. I think this should exist on the output
# process and the generator shouldn't have anything to do with this.
self.outputPlugin = self.config.getPlugin('output.' + self._sample.outputMode, self._sample)
if self._sample.maxQueueLength == 0:
self._sample.maxQueueLength = self.outputPlugin.MAXQUEUELENGTH
# Output = output process, not the plugin. The plugin is loaded by the output process.
self._out = Output(self._sample)
self._out.updateConfig(self.config)
if self.outputPlugin.useOutputQueue or self.config.useOutputQueue:
self._out._update_outputqueue(self.outputQueue)
def updateCounts(self, sample=None, count=None, start_time=None, end_time=None):
if sample:
self._sample = sample
self.count = count
self.start_time = start_time
self.end_time = end_time
def setOutputMetadata(self, event):
# self.logger.debug("Sample Index: %s Host: %s Source: %s Sourcetype: %s" %
# (self.index, self.host, self.source, self.sourcetype))
# self.logger.debug("Event Index: %s Host: %s Source: %s Sourcetype: %s" %
# (sampleDict[x]['index'], sampleDict[x]['host'], sampleDict[x]['source'],
# sampleDict[x]['sourcetype']))
if self._sample.sampletype == 'csv' and (event['index'] != self._sample.index
or event['host'] != self._sample.host
or event['source'] != self._sample.source
or event['sourcetype'] != self._sample.sourcetype):
self._sample.index = event['index']
self._sample.host = event['host']
# Allow randomizing the host:
if self._sample.hostToken:
self.host = self._sample.hostToken.replace(self.host)
self._sample.source = event['source']
self._sample.sourcetype = event['sourcetype']
self.logger.debug("Setting CSV parameters. index: '%s' host: '%s' source: '%s' sourcetype: '%s'" %
(self._sample.index, self._sample.host, self._sample.source, self._sample.sourcetype))
def setupBackfill(self):
"""
Called by non-queueable plugins or by the timer to setup backfill times per config or based on a Splunk Search
"""
s = self._sample
if s.backfill is not None:
try:
s.backfillts = timeParser(s.backfill, timezone=s.timezone)
self.logger.info("Setting up backfill of %s (%s)" % (s.backfill, s.backfillts))
except Exception as ex:
self.logger.error("Failed to parse backfill '%s': %s" % (s.backfill, ex))
raise
if s.backfillSearch is not None:
if s.backfillSearchUrl is None:
try:
s.backfillSearchUrl = c.getSplunkUrl(s)[0] # noqa, we update c in the globals() dict
except ValueError:
self.logger.error(
"Backfill Search URL not specified for sample '%s', not running backfill search" % s.name)
if not s.backfillSearch.startswith('search'):
s.backfillSearch = 'search ' + s.backfillSearch
s.backfillSearch += '| head 1 | table _time'
if s.backfillSearchUrl is not None:
self.logger.debug(
"Searching Splunk URL '%s/services/search/jobs' with search '%s' with sessionKey '%s'" %
(s.backfillSearchUrl, s.backfillSearch, s.sessionKey))
results = httplib2.Http(disable_ssl_certificate_validation=True).request(
s.backfillSearchUrl + '/services/search/jobs', 'POST', headers={
'Authorization': 'Splunk %s' % s.sessionKey}, body=urllib.urlencode({
'search': s.backfillSearch, 'earliest_time': s.backfill, 'exec_mode': 'oneshot'}))[1]
try:
temptime = minidom.parseString(results).getElementsByTagName('text')[0].childNodes[0].nodeValue
# self.logger.debug("Time returned from backfill search: %s" % temptime)
# Results returned look like: 2013-01-16T10:59:15.411-08:00
# But the offset in time can also be +, so make sure we strip that out first
if len(temptime) > 0:
if temptime.find('+') > 0:
temptime = temptime.split('+')[0]
temptime = '-'.join(temptime.split('-')[0:3])
s.backfillts = datetime.datetime.strptime(temptime, '%Y-%m-%dT%H:%M:%S.%f')
self.logger.debug("Backfill search results: '%s' value: '%s' time: '%s'" %
(pprint.pformat(results), temptime, s.backfillts))
except (ExpatError, IndexError):
pass
if s.end is not None:
parsed = False
try:
s.end = int(s.end)
s.endts = None
parsed = True
except ValueError:
self.logger.debug("Failed to parse end '%s' for sample '%s', treating as end time" % (s.end, s.name))
if not parsed:
try:
s.endts = timeParser(s.end, timezone=s.timezone)
self.logger.info("Ending generation at %s (%s)" % (s.end, s.endts))
except Exception as ex:
self.logger.error(
"Failed to parse end '%s' for sample '%s', treating as number of executions" % (s.end, s.name))
raise
def run(self, output_counter=None):
if output_counter is not None and hasattr(self.config, 'outputCounter') and self.config.outputCounter:
# Use output_counter to calculate throughput
self._out.setOutputCounter(output_counter)
self.gen(count=self.count, earliest=self.start_time, latest=self.end_time, samplename=self._sample.name)
# TODO: Make this some how handle an output queue and support intervals and a master queue
# Just double check to see if there's something in queue to flush out at the end of run
if len(self._out._queue) > 0:
self.logger.debug("Queue is not empty, flush out at the end of each run")
self._out.flush()
def replace_tokens(self, eventsDict, earliest, latest, ignore_tokens=False):
"""Iterate event tokens and replace them. This will help calculations for event size when tokens are used."""
eventcount = 0
send_events = []
total_count = len(eventsDict)
index = None
if total_count > 0:
index = random.choice(self._sample.index_list) if len(self._sample.index_list) else eventsDict[0]['index']
for targetevent in eventsDict:
event = targetevent["_raw"]
# Maintain state for every token in a given event, Hash contains keys for each file name which is
# assigned a list of values picked from a random line in that file
mvhash = {}
host = targetevent['host']
if hasattr(self._sample, "sequentialTimestamp") and self._sample.sequentialTimestamp and \
self._sample.generator != 'perdayvolumegenerator':
pivot_timestamp = EventgenTimestamp.get_sequential_timestamp(earliest, latest, eventcount, total_count)
else:
pivot_timestamp = EventgenTimestamp.get_random_timestamp(earliest, latest)
# Iterate tokens
if not ignore_tokens:
for token in self._sample.tokens:
token.mvhash = mvhash
event = token.replace(event, et=earliest, lt=latest, s=self._sample,
pivot_timestamp=pivot_timestamp)
if token.replacementType == 'timestamp' and self._sample.timeField != '_raw':
self._sample.timestamp = None
token.replace(targetevent[self._sample.timeField], et=self._sample.earliestTime(),
lt=self._sample.latestTime(), s=self._sample, pivot_timestamp=pivot_timestamp)
if self._sample.hostToken:
# clear the host mvhash every time, because we need to re-randomize it
self._sample.hostToken.mvhash = {}
if self._sample.hostToken:
host = self._sample.hostToken.replace(host, s=self._sample)
try:
time_val = int(time.mktime(pivot_timestamp.timetuple()))
except Exception:
time_val = int(time.mktime(self._sample.now().timetuple()))
temp_event = {
'_raw': event, 'index': index, 'host': host, 'hostRegex': self._sample.hostRegex,
'source': targetevent['source'], 'sourcetype': targetevent['sourcetype'], '_time': time_val}
send_events.append(temp_event)
return send_events
def load():
return GeneratorPlugin
| 50.373444 | 119 | 0.583526 |
7959dad25b40e0eb724649109c434a7da8004a4e | 29,067 | py | Python | lib/spack/spack/compilers/__init__.py | robertu94/spack | 4cf1a9620216f0c5f3db691ce1fe629484742918 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | lib/spack/spack/compilers/__init__.py | robertu94/spack | 4cf1a9620216f0c5f3db691ce1fe629484742918 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | lib/spack/spack/compilers/__init__.py | robertu94/spack | 4cf1a9620216f0c5f3db691ce1fe629484742918 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""This module contains functions related to finding compilers on the
system and configuring Spack to use multiple compilers.
"""
import collections
import itertools
import multiprocessing.pool
import os
from typing import Dict # novm
import six
import archspec.cpu
import llnl.util.filesystem as fs
import llnl.util.lang
import llnl.util.tty as tty
import spack.architecture
import spack.compiler
import spack.config
import spack.error
import spack.paths
import spack.spec
from spack.util.environment import get_path
from spack.util.naming import mod_to_class
_path_instance_vars = ['cc', 'cxx', 'f77', 'fc']
_flags_instance_vars = ['cflags', 'cppflags', 'cxxflags', 'fflags']
_other_instance_vars = ['modules', 'operating_system', 'environment',
'implicit_rpaths', 'extra_rpaths']
_cache_config_file = []
# TODO: Caches at module level make it difficult to mock configurations in
# TODO: unit tests. It might be worth reworking their implementation.
#: cache of compilers constructed from config data, keyed by config entry id.
_compiler_cache = {} # type: Dict[str, spack.compiler.Compiler]
_compiler_to_pkg = {
'clang': 'llvm+clang',
'oneapi': 'intel-oneapi-compilers'
}
def pkg_spec_for_compiler(cspec):
"""Return the spec of the package that provides the compiler."""
spec_str = '%s@%s' % (_compiler_to_pkg.get(cspec.name, cspec.name),
cspec.versions)
return spack.spec.Spec(spec_str)
def _auto_compiler_spec(function):
def converter(cspec_like, *args, **kwargs):
if not isinstance(cspec_like, spack.spec.CompilerSpec):
cspec_like = spack.spec.CompilerSpec(cspec_like)
return function(cspec_like, *args, **kwargs)
return converter
def _to_dict(compiler):
"""Return a dict version of compiler suitable to insert in YAML."""
d = {}
d['spec'] = str(compiler.spec)
d['paths'] = dict((attr, getattr(compiler, attr, None))
for attr in _path_instance_vars)
d['flags'] = dict((fname, fvals) for fname, fvals in compiler.flags)
d['flags'].update(dict((attr, getattr(compiler, attr, None))
for attr in _flags_instance_vars
if hasattr(compiler, attr)))
d['operating_system'] = str(compiler.operating_system)
d['target'] = str(compiler.target)
d['modules'] = compiler.modules or []
d['environment'] = compiler.environment or {}
d['extra_rpaths'] = compiler.extra_rpaths or []
if compiler.enable_implicit_rpaths is not None:
d['implicit_rpaths'] = compiler.enable_implicit_rpaths
if compiler.alias:
d['alias'] = compiler.alias
return {'compiler': d}
def get_compiler_config(scope=None, init_config=True):
"""Return the compiler configuration for the specified architecture.
"""
def init_compiler_config():
"""Compiler search used when Spack has no compilers."""
compilers = find_compilers()
compilers_dict = []
for compiler in compilers:
compilers_dict.append(_to_dict(compiler))
spack.config.set('compilers', compilers_dict, scope=scope)
config = spack.config.get('compilers', scope=scope)
# Update the configuration if there are currently no compilers
# configured. Avoid updating automatically if there ARE site
# compilers configured but no user ones.
if not config and init_config:
if scope is None:
# We know no compilers were configured in any scope.
init_compiler_config()
config = spack.config.get('compilers', scope=scope)
elif scope == 'user':
# Check the site config and update the user config if
# nothing is configured at the site level.
site_config = spack.config.get('compilers', scope='site')
sys_config = spack.config.get('compilers', scope='system')
if not site_config and not sys_config:
init_compiler_config()
config = spack.config.get('compilers', scope=scope)
return config
elif config:
return config
else:
return [] # Return empty list which we will later append to.
def compiler_config_files():
config_files = list()
config = spack.config.config
for scope in config.file_scopes:
name = scope.name
compiler_config = config.get('compilers', scope=name)
if compiler_config:
config_files.append(config.get_config_filename(name, 'compilers'))
return config_files
def add_compilers_to_config(compilers, scope=None, init_config=True):
"""Add compilers to the config for the specified architecture.
Arguments:
compilers: a list of Compiler objects.
scope: configuration scope to modify.
"""
compiler_config = get_compiler_config(scope, init_config)
for compiler in compilers:
compiler_config.append(_to_dict(compiler))
global _cache_config_file
_cache_config_file = compiler_config
spack.config.set('compilers', compiler_config, scope=scope)
@_auto_compiler_spec
def remove_compiler_from_config(compiler_spec, scope=None):
"""Remove compilers from the config, by spec.
Arguments:
compiler_specs: a list of CompilerSpec objects.
scope: configuration scope to modify.
"""
# Need a better way for this
global _cache_config_file
compiler_config = get_compiler_config(scope)
config_length = len(compiler_config)
filtered_compiler_config = [
comp for comp in compiler_config
if spack.spec.CompilerSpec(comp['compiler']['spec']) != compiler_spec]
# Update the cache for changes
_cache_config_file = filtered_compiler_config
if len(filtered_compiler_config) == config_length: # No items removed
CompilerSpecInsufficientlySpecificError(compiler_spec)
spack.config.set('compilers', filtered_compiler_config, scope=scope)
def all_compilers_config(scope=None, init_config=True):
"""Return a set of specs for all the compiler versions currently
available to build with. These are instances of CompilerSpec.
"""
# Get compilers for this architecture.
# Create a cache of the config file so we don't load all the time.
global _cache_config_file
if not _cache_config_file:
_cache_config_file = get_compiler_config(scope, init_config)
return _cache_config_file
else:
return _cache_config_file
def all_compiler_specs(scope=None, init_config=True):
# Return compiler specs from the merged config.
return [spack.spec.CompilerSpec(s['compiler']['spec'])
for s in all_compilers_config(scope, init_config)]
def find_compilers(path_hints=None):
"""Returns the list of compilers found in the paths given as arguments.
Args:
path_hints (list or None): list of path hints where to look for.
A sensible default based on the ``PATH`` environment variable
will be used if the value is None
Returns:
List of compilers found
"""
if path_hints is None:
path_hints = get_path('PATH')
default_paths = fs.search_paths_for_executables(*path_hints)
# To detect the version of the compilers, we dispatch a certain number
# of function calls to different workers. Here we construct the list
# of arguments for each call.
arguments = []
for o in all_os_classes():
search_paths = getattr(o, 'compiler_search_paths', default_paths)
arguments.extend(arguments_to_detect_version_fn(o, search_paths))
# Here we map the function arguments to the corresponding calls
tp = multiprocessing.pool.ThreadPool()
try:
detected_versions = tp.map(detect_version, arguments)
finally:
tp.close()
def valid_version(item):
value, error = item
if error is None:
return True
try:
# This will fail on Python 2.6 if a non ascii
# character is in the error
tty.debug(error)
except UnicodeEncodeError:
pass
return False
def remove_errors(item):
value, _ = item
return value
return make_compiler_list(
map(remove_errors, filter(valid_version, detected_versions))
)
def supported_compilers():
"""Return a set of names of compilers supported by Spack.
See available_compilers() to get a list of all the available
versions of supported compilers.
"""
# Hack to be able to call the compiler `apple-clang` while still
# using a valid python name for the module
return sorted(name if name != 'apple_clang' else 'apple-clang' for name in
llnl.util.lang.list_modules(spack.paths.compilers_path))
@_auto_compiler_spec
def supported(compiler_spec):
"""Test if a particular compiler is supported."""
return compiler_spec.name in supported_compilers()
@_auto_compiler_spec
def find(compiler_spec, scope=None, init_config=True):
"""Return specs of available compilers that match the supplied
compiler spec. Return an empty list if nothing found."""
return [c for c in all_compiler_specs(scope, init_config)
if c.satisfies(compiler_spec)]
@_auto_compiler_spec
def find_specs_by_arch(compiler_spec, arch_spec, scope=None, init_config=True):
"""Return specs of available compilers that match the supplied
compiler spec. Return an empty list if nothing found."""
return [c.spec for c in compilers_for_spec(compiler_spec,
arch_spec,
scope,
True,
init_config)]
def all_compilers(scope=None):
config = get_compiler_config(scope)
compilers = list()
for items in config:
items = items['compiler']
compilers.append(_compiler_from_config_entry(items))
return compilers
@_auto_compiler_spec
def compilers_for_spec(compiler_spec, arch_spec=None, scope=None,
use_cache=True, init_config=True):
"""This gets all compilers that satisfy the supplied CompilerSpec.
Returns an empty list if none are found.
"""
if use_cache:
config = all_compilers_config(scope, init_config)
else:
config = get_compiler_config(scope, init_config)
matches = set(find(compiler_spec, scope, init_config))
compilers = []
for cspec in matches:
compilers.extend(get_compilers(config, cspec, arch_spec))
return compilers
def compilers_for_arch(arch_spec, scope=None):
config = all_compilers_config(scope)
return list(get_compilers(config, arch_spec=arch_spec))
class CacheReference(object):
"""This acts as a hashable reference to any object (regardless of whether
the object itself is hashable) and also prevents the object from being
garbage-collected (so if two CacheReference objects are equal, they
will refer to the same object, since it will not have been gc'ed since
the creation of the first CacheReference).
"""
def __init__(self, val):
self.val = val
self.id = id(val)
def __hash__(self):
return self.id
def __eq__(self, other):
return isinstance(other, CacheReference) and self.id == other.id
def compiler_from_dict(items):
cspec = spack.spec.CompilerSpec(items['spec'])
os = items.get('operating_system', None)
target = items.get('target', None)
if not ('paths' in items and
all(n in items['paths'] for n in _path_instance_vars)):
raise InvalidCompilerConfigurationError(cspec)
cls = class_for_compiler_name(cspec.name)
compiler_paths = []
for c in _path_instance_vars:
compiler_path = items['paths'][c]
if compiler_path != 'None':
compiler_paths.append(compiler_path)
else:
compiler_paths.append(None)
mods = items.get('modules')
if mods == 'None':
mods = []
alias = items.get('alias', None)
compiler_flags = items.get('flags', {})
environment = items.get('environment', {})
extra_rpaths = items.get('extra_rpaths', [])
implicit_rpaths = items.get('implicit_rpaths', None)
# Starting with c22a145, 'implicit_rpaths' was a list. Now it is a
# boolean which can be set by the user to disable all automatic
# RPATH insertion of compiler libraries
if implicit_rpaths is not None and not isinstance(implicit_rpaths, bool):
implicit_rpaths = None
return cls(cspec, os, target, compiler_paths, mods, alias,
environment, extra_rpaths,
enable_implicit_rpaths=implicit_rpaths,
**compiler_flags)
def _compiler_from_config_entry(items):
"""Note this is intended for internal use only. To avoid re-parsing
the same config dictionary this keeps track of its location in
memory. If you provide the same dictionary twice it will return
the same Compiler object (regardless of whether the dictionary
entries have changed).
"""
config_id = CacheReference(items)
compiler = _compiler_cache.get(config_id, None)
if compiler is None:
compiler = compiler_from_dict(items)
_compiler_cache[config_id] = compiler
return compiler
def get_compilers(config, cspec=None, arch_spec=None):
compilers = []
for items in config:
items = items['compiler']
if cspec and items['spec'] != str(cspec):
continue
# If an arch spec is given, confirm that this compiler
# is for the given operating system
os = items.get('operating_system', None)
if arch_spec and os != arch_spec.os:
continue
# If an arch spec is given, confirm that this compiler
# is for the given target. If the target is 'any', match
# any given arch spec. If the compiler has no assigned
# target this is an old compiler config file, skip this logic.
target = items.get('target', None)
try:
current_target = archspec.cpu.TARGETS[str(arch_spec.target)]
family = str(current_target.family)
except KeyError:
# TODO: Check if this exception handling makes sense, or if we
# TODO: need to change / refactor tests
family = arch_spec.target
except AttributeError:
assert arch_spec is None
if arch_spec and target and (target != family and target != 'any'):
# If the family of the target is the family we are seeking,
# there's an error in the underlying configuration
if archspec.cpu.TARGETS[target].family == family:
msg = ('the "target" field in compilers.yaml accepts only '
'target families [replace "{0}" with "{1}"'
' in "{2}" specification]')
msg = msg.format(str(target), family, items.get('spec', '??'))
raise ValueError(msg)
continue
compilers.append(_compiler_from_config_entry(items))
return compilers
@_auto_compiler_spec
def compiler_for_spec(compiler_spec, arch_spec):
"""Get the compiler that satisfies compiler_spec. compiler_spec must
be concrete."""
assert(compiler_spec.concrete)
assert(arch_spec.concrete)
compilers = compilers_for_spec(compiler_spec, arch_spec=arch_spec)
if len(compilers) < 1:
raise NoCompilerForSpecError(compiler_spec, arch_spec.os)
if len(compilers) > 1:
msg = 'Multiple definitions of compiler %s' % compiler_spec
msg += 'for architecture %s:\n %s' % (arch_spec, compilers)
tty.debug(msg)
return compilers[0]
@_auto_compiler_spec
def get_compiler_duplicates(compiler_spec, arch_spec):
config = spack.config.config
scope_to_compilers = {}
for scope in config.scopes:
compilers = compilers_for_spec(compiler_spec, arch_spec=arch_spec,
scope=scope, use_cache=False)
if compilers:
scope_to_compilers[scope] = compilers
cfg_file_to_duplicates = {}
for scope, compilers in scope_to_compilers.items():
config_file = config.get_config_filename(scope, 'compilers')
cfg_file_to_duplicates[config_file] = compilers
return cfg_file_to_duplicates
@llnl.util.lang.memoized
def class_for_compiler_name(compiler_name):
"""Given a compiler module name, get the corresponding Compiler class."""
assert supported(compiler_name)
# Hack to be able to call the compiler `apple-clang` while still
# using a valid python name for the module
submodule_name = compiler_name
if compiler_name == 'apple-clang':
submodule_name = compiler_name.replace('-', '_')
module_name = '.'.join(['spack', 'compilers', submodule_name])
module_obj = __import__(module_name, fromlist=[None])
cls = getattr(module_obj, mod_to_class(compiler_name))
# make a note of the name in the module so we can get to it easily.
cls.name = compiler_name
return cls
def all_os_classes():
"""
Return the list of classes for all operating systems available on
this platform
"""
classes = []
platform = spack.architecture.platform()
for os_class in platform.operating_sys.values():
classes.append(os_class)
return classes
def all_compiler_types():
return [class_for_compiler_name(c) for c in supported_compilers()]
#: Gathers the attribute values by which a detected compiler is considered
#: unique in Spack.
#:
#: - os: the operating system
#: - compiler_name: the name of the compiler (e.g. 'gcc', 'clang', etc.)
#: - version: the version of the compiler
#:
CompilerID = collections.namedtuple(
'CompilerID', ['os', 'compiler_name', 'version']
)
#: Variations on a matched compiler name
NameVariation = collections.namedtuple('NameVariation', ['prefix', 'suffix'])
#: Groups together the arguments needed by `detect_version`. The four entries
#: in the tuple are:
#:
#: - id: An instance of the CompilerID named tuple (version can be set to None
#: as it will be detected later)
#: - variation: a NameVariation for file being tested
#: - language: compiler language being tested (one of 'cc', 'cxx', 'fc', 'f77')
#: - path: full path to the executable being tested
#:
DetectVersionArgs = collections.namedtuple(
'DetectVersionArgs', ['id', 'variation', 'language', 'path']
)
def arguments_to_detect_version_fn(operating_system, paths):
"""Returns a list of DetectVersionArgs tuples to be used in a
corresponding function to detect compiler versions.
The ``operating_system`` instance can customize the behavior of this
function by providing a method called with the same name.
Args:
operating_system (spack.operating_systems.OperatingSystem): the operating system
on which we are looking for compilers
paths: paths to search for compilers
Returns:
List of DetectVersionArgs tuples. Each item in the list will be later
mapped to the corresponding function call to detect the version of the
compilers in this OS.
"""
def _default(search_paths):
command_arguments = []
files_to_be_tested = fs.files_in(*search_paths)
for compiler_name in spack.compilers.supported_compilers():
compiler_cls = class_for_compiler_name(compiler_name)
for language in ('cc', 'cxx', 'f77', 'fc'):
# Select only the files matching a regexp
for (file, full_path), regexp in itertools.product(
files_to_be_tested,
compiler_cls.search_regexps(language)
):
match = regexp.match(file)
if match:
compiler_id = CompilerID(
operating_system, compiler_name, None
)
detect_version_args = DetectVersionArgs(
id=compiler_id,
variation=NameVariation(*match.groups()),
language=language, path=full_path
)
command_arguments.append(detect_version_args)
return command_arguments
fn = getattr(
operating_system, 'arguments_to_detect_version_fn', _default
)
return fn(paths)
def detect_version(detect_version_args):
"""Computes the version of a compiler and adds it to the information
passed as input.
As this function is meant to be executed by worker processes it won't
raise any exception but instead will return a (value, error) tuple that
needs to be checked by the code dispatching the calls.
Args:
detect_version_args (DetectVersionArgs): information on the
compiler for which we should detect the version.
Returns:
A ``(DetectVersionArgs, error)`` tuple. If ``error`` is ``None`` the
version of the compiler was computed correctly and the first argument
of the tuple will contain it. Otherwise ``error`` is a string
containing an explanation on why the version couldn't be computed.
"""
def _default(fn_args):
compiler_id = fn_args.id
language = fn_args.language
compiler_cls = class_for_compiler_name(compiler_id.compiler_name)
path = fn_args.path
# Get compiler names and the callback to detect their versions
callback = getattr(compiler_cls, '{0}_version'.format(language))
try:
version = callback(path)
if version and six.text_type(version).strip() \
and version != 'unknown':
value = fn_args._replace(
id=compiler_id._replace(version=version)
)
return value, None
error = "Couldn't get version for compiler {0}".format(path)
except spack.util.executable.ProcessError as e:
error = "Couldn't get version for compiler {0}\n".format(path) + \
six.text_type(e)
except Exception as e:
# Catching "Exception" here is fine because it just
# means something went wrong running a candidate executable.
error = "Error while executing candidate compiler {0}" \
"\n{1}: {2}".format(path, e.__class__.__name__,
six.text_type(e))
return None, error
operating_system = detect_version_args.id.os
fn = getattr(operating_system, 'detect_version', _default)
return fn(detect_version_args)
def make_compiler_list(detected_versions):
"""Process a list of detected versions and turn them into a list of
compiler specs.
Args:
detected_versions (list): list of DetectVersionArgs containing a
valid version
Returns:
list: list of Compiler objects
"""
group_fn = lambda x: (x.id, x.variation, x.language)
sorted_compilers = sorted(detected_versions, key=group_fn)
# Gather items in a dictionary by the id, name variation and language
compilers_d = {}
for sort_key, group in itertools.groupby(sorted_compilers, key=group_fn):
compiler_id, name_variation, language = sort_key
by_compiler_id = compilers_d.setdefault(compiler_id, {})
by_name_variation = by_compiler_id.setdefault(name_variation, {})
by_name_variation[language] = next(x.path for x in group)
def _default_make_compilers(cmp_id, paths):
operating_system, compiler_name, version = cmp_id
compiler_cls = spack.compilers.class_for_compiler_name(compiler_name)
spec = spack.spec.CompilerSpec(compiler_cls.name, version)
paths = [paths.get(x, None) for x in ('cc', 'cxx', 'f77', 'fc')]
target = archspec.cpu.host()
compiler = compiler_cls(
spec, operating_system, str(target.family), paths
)
return [compiler]
# For compilers with the same compiler id:
#
# - Prefer with C compiler to without
# - Prefer with C++ compiler to without
# - Prefer no variations to variations (e.g., clang to clang-gpu)
#
sort_fn = lambda variation: (
'cc' not in by_compiler_id[variation], # None last
'cxx' not in by_compiler_id[variation], # None last
getattr(variation, 'prefix', None),
getattr(variation, 'suffix', None),
)
compilers = []
for compiler_id, by_compiler_id in compilers_d.items():
ordered = sorted(by_compiler_id, key=sort_fn)
selected_variation = ordered[0]
selected = by_compiler_id[selected_variation]
# fill any missing parts from subsequent entries
for lang in ['cxx', 'f77', 'fc']:
if lang not in selected:
next_lang = next((
by_compiler_id[v][lang] for v in ordered
if lang in by_compiler_id[v]), None)
if next_lang:
selected[lang] = next_lang
operating_system, _, _ = compiler_id
make_compilers = getattr(
operating_system, 'make_compilers', _default_make_compilers)
compilers.extend(make_compilers(compiler_id, selected))
return compilers
def is_mixed_toolchain(compiler):
"""Returns True if the current compiler is a mixed toolchain,
False otherwise.
Args:
compiler (spack.compiler.Compiler): a valid compiler object
"""
cc = os.path.basename(compiler.cc or '')
cxx = os.path.basename(compiler.cxx or '')
f77 = os.path.basename(compiler.f77 or '')
fc = os.path.basename(compiler.fc or '')
toolchains = set()
for compiler_cls in all_compiler_types():
# Inspect all the compiler toolchain we know. If a compiler is the
# only compiler supported there it belongs to that toolchain.
def name_matches(name, name_list):
# This is such that 'gcc' matches variations
# like 'ggc-9' etc that are found in distros
name, _, _ = name.partition('-')
return len(name_list) == 1 and name and name in name_list
if any([
name_matches(cc, compiler_cls.cc_names),
name_matches(cxx, compiler_cls.cxx_names),
name_matches(f77, compiler_cls.f77_names),
name_matches(fc, compiler_cls.fc_names)
]):
tty.debug("[TOOLCHAIN] MATCH {0}".format(compiler_cls.__name__))
toolchains.add(compiler_cls.__name__)
if len(toolchains) > 1:
if toolchains == set(['Clang', 'AppleClang', 'Aocc']):
return False
tty.debug("[TOOLCHAINS] {0}".format(toolchains))
return True
return False
class InvalidCompilerConfigurationError(spack.error.SpackError):
def __init__(self, compiler_spec):
super(InvalidCompilerConfigurationError, self).__init__(
"Invalid configuration for [compiler \"%s\"]: " % compiler_spec,
"Compiler configuration must contain entries for all compilers: %s"
% _path_instance_vars)
class NoCompilersError(spack.error.SpackError):
def __init__(self):
super(NoCompilersError, self).__init__(
"Spack could not find any compilers!")
class NoCompilerForSpecError(spack.error.SpackError):
def __init__(self, compiler_spec, target):
super(NoCompilerForSpecError, self).__init__(
"No compilers for operating system %s satisfy spec %s"
% (target, compiler_spec))
class CompilerDuplicateError(spack.error.SpackError):
def __init__(self, compiler_spec, arch_spec):
config_file_to_duplicates = get_compiler_duplicates(
compiler_spec, arch_spec)
duplicate_table = list(
(x, len(y)) for x, y in config_file_to_duplicates.items())
descriptor = lambda num: 'time' if num == 1 else 'times'
duplicate_msg = (
lambda cfgfile, count: "{0}: {1} {2}".format(
cfgfile, str(count), descriptor(count)))
msg = (
"Compiler configuration contains entries with duplicate" +
" specification ({0}, {1})".format(compiler_spec, arch_spec) +
" in the following files:\n\t" +
'\n\t'.join(duplicate_msg(x, y) for x, y in duplicate_table))
super(CompilerDuplicateError, self).__init__(msg)
class CompilerSpecInsufficientlySpecificError(spack.error.SpackError):
def __init__(self, compiler_spec):
super(CompilerSpecInsufficientlySpecificError, self).__init__(
"Multiple compilers satisfy spec %s" % compiler_spec)
| 36.470514 | 88 | 0.659236 |
7959db9d2d804c9a4cddaa10f70f96fa1e335b97 | 47,594 | py | Python | compilador/vm/virtual_machine.py | Nombre-Pendiente/Super-Compi | 3f2a8e0219b04863fbf78d03aba782d235ccb11a | [
"MIT"
] | 6 | 2021-05-20T16:01:45.000Z | 2021-05-27T18:48:57.000Z | compilador/vm/virtual_machine.py | Nombre-Pendiente/Super-Compi | 3f2a8e0219b04863fbf78d03aba782d235ccb11a | [
"MIT"
] | 1 | 2021-05-18T14:44:04.000Z | 2021-05-18T14:44:04.000Z | compilador/vm/virtual_machine.py | Nombre-Pendiente/Super-Compi | 3f2a8e0219b04863fbf78d03aba782d235ccb11a | [
"MIT"
] | null | null | null | from router_solver import *
import compilador.vm.memory_segment
from compilador.vm.memory_segment import *
import compilador.objects.function_table
from compilador.objects.function_table import *
import compilador.objects.variable_tables
from compilador.objects.variable_tables import *
import compilador.objects.quadruple
from compilador.objects.quadruple import *
import compilador.objects.semantic_table
from compilador.objects.semantic_table import *
import game_engine.instruction
from game_engine.instruction import *
# CLASE VIRTUAL MACHINE
# Objeto que guarda segmentos de memoria y ejecuta cuadruplos
class VirtualMachine(object):
####################### INITS #######################
def __init__(self, global_size, constant_size, local_size, func_table=None):
self.__total_size = (
global_size + constant_size + local_size
) # Guarda tamaño total de vm
self.func_table = func_table # Tabla de funciones
self.global_segment = MemorySegment(
"Global Segment", global_size, 0
) # Genera segmento de memoria global
self.constant_segment = MemorySegment(
"Constant Segment",
constant_size,
global_size, # Genera segmento de memoria de constantes
)
self.declared_symbols = [] # Lista de simbolos en maquina virtual
self.next_function_segment = (
[]
) # Guarda siguiente dirección de memoria disponible en segmento local
if func_table:
local_size_memory = global_size + constant_size
# Guarda segmentos de memoria local
self.local_segment = self.__build_local_segment(
local_size, global_size + constant_size
)
# Guarda numero de segmentos en segmento local
self.local_functions = len(self.local_segment)
# Mete los datos de la tabla de funciones a memoria
self.__func_table_assign_memory()
else:
self.local_segment = None
self.local_functions = 0
# Genera memoria local
def __build_local_segment(
self,
local_size,
local_start_direction,
):
# Revisa cuantas funciones hay y divide los segmentos locales entre ello
num_local_segments = len(self.func_table.functions)
if not num_local_segments:
return []
# Genera direcciones de inicio de segmento local y tamaño de cada uno
local_segment_size = local_size // num_local_segments
local_memory_size = local_size // num_local_segments
start_direction = local_start_direction
# Crea segmento de memoria del main
segments = []
segments.append(MemorySegment("main", local_segment_size, start_direction))
# Guarda sigueinte dirección disponible y la guarda
start_direction += local_memory_size
self.next_function_segment.append(start_direction)
# Regresa lista de segmentos de memoria con segmento de memoria del main
return segments
# Mete las tablas de variables a su segmento de memoria
def __func_table_assign_memory(self):
functions = self.func_table.functions
tables_init = ["Global Segment", "Constant Segment", "main"]
# Para el segmento global, constante y el main
for ft in functions:
if ft in tables_init:
# Saca su tabla de variables
var_tab = functions[ft]["vt"]
# Saca el diccionario de simbolos en la tabla
vars = var_tab.variables
# Inserta cada simbolo en la tabla a su segmento
for k, v in vars.items():
self.insert_symbol_in_segment(ft, v)
# Genera segmento de memoria de función para instancia de función
def __function_instance(self, func_name):
function_object = self.func_table.functions
# Saca su tamaño de la tabla y lo multiplica por el numero de tipos de variables
function_size = function_object[func_name]["s"] * 7
# Se saca la dirección de inicio
start_direction = self.next_function_segment.pop()
# Valida que hay espacio en la memoria local para instanciar la función
if function_size + start_direction < self.__total_size:
# Se genera el nombre unico de la instancia
# Se agrega su segmento de memoria al segmento local
name = str(func_name) + "-" + str(start_direction)
self.local_segment.append(
MemorySegment(name, function_size, start_direction)
)
# Se actualiza la dirección de inicio del siguiente segmento de memoria
start_direction += function_size
self.next_function_segment.append(start_direction)
# Consigue simbolos en tabla de variables de la función
var_tab = function_object[func_name]["vt"]
vars = var_tab.variables
# Inserta las variables al segmento de memoria
for k, v in vars.items():
self.insert_symbol_in_segment(name, v)
# Regresa nombre unico
return name
else:
print("ERROR: Local Memory exceded, can't instance " + func_name)
sys.exit()
# Busca una función en el segmento local
def __find_function_segment(self, func_name):
for func_segment in self.local_segment:
if func_segment.name == func_name:
return func_segment
return None
# Inserta un simbolo en el segmento indicado
def insert_symbol_in_segment(self, segment_name, symbol):
self.declared_symbols.append(symbol)
# Si el segmento es el global
if segment_name == "Global Segment":
return self.global_segment.insert_symbol(symbol)
# Si el segmento es el constante
elif segment_name == "Constant Segment":
return self.constant_segment.insert_symbol(symbol)
# Busca en el segmento local
else:
function_segment = self.__find_function_segment(segment_name)
# The function was not found
if function_segment == None:
return False
# Inserta a memoria
return function_segment.insert_symbol(symbol)
# Cuando se genera la dirección del indice de un arreglo
def modify_address_symbol(self, array_access, result_value):
# Inserta en segmento global
segment_name = array_access.scope
if segment_name == "Global Segment":
return self.global_segment.modify_address(array_access, result_value)
# Inserta en segmento constante
elif segment_name == "Constant Segment":
return self.constant_segment.modify_address(array_access, result_value)
# Busca en el segmento local
else:
function_segment = self.__find_function_segment(segment_name)
# The function was not found
if function_segment == None:
return False
# Inserta simbolo a dirección indicada
return function_segment.modify_address(array_access, result_value)
# Regresa segmento de memoria al que le pertenece esa dirección
def __get_local_segment(self, direction):
current_segment_direction = (
self.global_segment.size + self.constant_segment.size
)
for func in self.local_segment:
func_size = func.size + func.initial_position - 1
if direction <= func_size:
return func
# Regresa el simbolo en una dirección
def get_direction_symbol(self, direction):
global_size = self.global_segment.size
constant_size = self.constant_segment.size
# Direction en Global Segment
if direction < global_size:
return self.global_segment.search_symbol(direction)
# Direction en Constant Segment
elif direction < global_size + constant_size:
return self.constant_segment.search_symbol(direction)
# Direction excede tamaño de memoria
elif direction > self.__total_size:
print("ERROR: Address excedes memory size")
sys.exit()
# Direction en Local Segment
else:
segment = self.__get_local_segment(direction)
return segment.search_symbol(direction)
# Regresa el valor en una dirección
def get_direction_value(self, direction):
global_size = self.global_segment.size
constant_size = self.constant_segment.size
# Direction en Global Segment
if direction < global_size:
return self.global_segment.search_value(direction)
# Direction en Constant Segment
elif direction < global_size + constant_size:
return self.constant_segment.search_value(direction)
# Direction excede tamaño de memoria
elif direction > self.__total_size:
print("ERROR: Address excedes memory size")
sys.exit()
# Direction en Local Segment
else:
segment = self.__get_local_segment(direction)
return segment.search_value(direction)
# Modifica el valor en una dirección de memoria
def modify_direction_value(self, direction, value):
global_size = self.global_segment.size
constant_size = self.constant_segment.size
# Direction en Global Segment
if direction < global_size:
self.global_segment.modify_value(direction, value)
# Direction en Constant Segment
elif direction < global_size + constant_size:
self.constant_segment.modify_value(direction, value)
# Direction excede tamaño de memoria
elif direction > self.__total_size:
print("ERROR: Address excedes memory size")
sys.exit()
# Direction en Local Segment
else:
segment = self.__get_local_segment(direction)
segment.modify_value(direction, value)
################## FUNCTION CALL PREPARATION ##################
# Regresa un diccionario con valores de variables en segmento actual
def __save_local_scope(self, scope):
f_name = scope[1]
f_unique = scope[0]
segment = self.__find_function_segment(f_unique)
return segment.save_local_memory()
# Regresa los valores guardados a su dirección
def __unfreeze_local_scope(self, scope, frozen_memory):
f_name = scope[1]
f_unique = scope[0]
segment = self.__find_function_segment(f_unique)
segment.backtrack_memory(frozen_memory)
# Borra un segmento de memoria cuando termina de usarse
def __erase_local_instance(self):
# Saca el segmento de memoria de la lista
local_segment = self.local_segment.pop()
# Saca el valor de la siguiente dirección
new_next = self.next_function_segment.pop()
# Cambia la siguiente dirección a la nueva
new_next = new_next - local_segment.size
# Borra memoria local
local_segment.erase_local_memory()
# Guarda nueva dirección
self.next_function_segment.append(new_next)
########################### RESOLVE ###########################
# ......................... ARREGLOS ......................... #
# Resuelve cuadruplo de instrucción VER
def __resolve_ver(self, dir_opnd_1, dir_opnd_2, dir_result):
# Valor en dirección de indice a accesar
val_opnd_1 = self.get_direction_value(dir_opnd_1)
# Valor en dirección de limite inferior
val_opnd_2 = self.get_direction_value(dir_opnd_2)
# Valor en dirección de limite inferior
result = self.get_direction_value(dir_result)
# Se valida que el indice tenga un valor
if val_opnd_1 == None or val_opnd_1 == "null":
sym_opnd_1 = self.get_direction_symbol(dir_opnd_1).name
print("ERROR: variable " + str(sym_opnd_1) + " has no assigned value")
sys.exit()
# Se valida que se hayan encontrado los valores de los limites
if (
val_opnd_2 == None
or val_opnd_2 == "null"
or result == None
or val_opnd_2 == "null"
):
print("ERROR: array missing dimension value")
sys.exit()
# Se valida que el valor del indice este entre los limites
if not (val_opnd_1 >= val_opnd_2) and (val_opnd_1 <= result):
print("ERROR: Trying to acces an index that is out of bounds")
sys.exit()
# Resuelve la operación de agregar el desplazamiento a la dirección base
def __resolve_address_op(
self, operation, dir_opnd_1, dir_opnd_2, dir_result, parent_name, index
):
# Valor de desplazamiento
val_opnd_1 = self.get_direction_value(dir_opnd_1)
# Simbolo del arreglo
parent_sym = self.get_direction_symbol(dir_opnd_2)
# Dirección en segmento del padre
parent_dir = parent_sym.segment_direction
# Simbolo que guarda la dirección
result = self.get_direction_symbol(dir_result)
# Valida que haya valores asignados a las variables
if val_opnd_1 == None or val_opnd_1 == "null":
sym_opnd_1 = self.get_direction_symbol(dir_opnd_1).name
print("ERROR: variable " + str(sym_opnd_1) + " has no assigned value")
sys.exit()
if dir_opnd_2 == None or parent_dir == None:
print("ERROR: Variable " + str(parent_sym.name) + " has not been declared")
sys.exit()
# Valida que sea una suma
if operation == "ADD":
# Dirección global + desplazamiento
result_value = val_opnd_1 + int(dir_opnd_2)
# Dirección de segmento + desplazamiento
child_dir = val_opnd_1 + int(parent_dir)
# Modifica valor de simbolo y valor en tabla para variable que guarda dirección
result.value = result_value
self.modify_direction_value(dir_result, result_value)
# Crea el simbolo del indice del arreglo
array_access = Symbol(
str(parent_name) + "[ " + str(index) + " ]",
parent_sym.type,
parent_sym.scope,
)
# Inserta simbolo de indice a memoria
self.modify_address_symbol(array_access, child_dir)
# ......................... FUNCIONES ......................... #
# Resuelve la asignación a parametros
def __resolve_param(self, dir_operand, index_result, func_name):
# Parametro que se manda
val_operand = self.get_direction_value(dir_operand)
# Indice de parametro que se busca
result = int(index_result) - 1
real_func_name = func_name[1]
memory_func_name = func_name[0]
# Busca la lista de parametros en la tabla
param_searching = self.func_table.functions[real_func_name]["p"]
# Se valida que el indice que buscamos este en la lista
if result < 0 or result > len(param_searching):
print(
"ERROR: "
+ str(index_result)
+ " is not a valid parameter index for function "
+ str(real_func_name)
)
sys.exit()
# Agarra el nombre del parametro y lo busca en la tabla de variables
param_searching = param_searching[result].name
param_in_vartable = self.func_table.functions[real_func_name]["vt"]
param_in_vartable = param_in_vartable.variables[param_searching]
# Modifica el valor del parametro al valor que se mando
self.modify_direction_value(param_in_vartable.global_direction, val_operand)
param_in_vartable.value = val_operand
# Asigna valor de retorno a la variable de la función en la tabla global
def __resolve_return(self, dir_operand, dir_result):
val_operand = self.get_direction_value(dir_operand)
val_result = self.get_direction_symbol(dir_result)
self.modify_direction_value(dir_result, val_operand)
val_result.value = val_operand
# ......................... OPERACIONES ......................... #
# Resuelve operaciones aritmeticas y booleanas
def __resolve_op(self, operation, dir_opnd_1, dir_opnd_2, dir_result):
sym_opnd_1 = self.get_direction_symbol(dir_opnd_1)
sym_opnd_2 = self.get_direction_symbol(dir_opnd_2)
sym_result = self.get_direction_symbol(dir_result)
type_op_1 = sym_opnd_1.type
type_op_2 = sym_opnd_2.type
val_opnd_1 = self.get_direction_value(dir_opnd_1)
val_opnd_2 = self.get_direction_value(dir_opnd_2)
# Hace operaciones en las que no es necesario tener un valor en operando
if operation == "BEQ":
result_value = val_opnd_1 == val_opnd_2
sym_result.value = val_opnd_1 == val_opnd_2
elif operation == "BNEQ":
result_value = val_opnd_1 != val_opnd_2
sym_result.value = val_opnd_1 != val_opnd_2
elif operation == "OR":
if val_opnd_1 == "null":
val_opnd_1 = None
if val_opnd_2 == "null":
val_opnd_2 = None
result_value = val_opnd_1 or val_opnd_2
sym_result.value = val_opnd_1 or val_opnd_2
elif operation == "AND":
if val_opnd_1 == "null":
val_opnd_1 = None
if val_opnd_2 == "null":
val_opnd_2 = None
result_value = val_opnd_1 and val_opnd_2
sym_result.value = val_opnd_1 and val_opnd_2
else:
# Valida que los operandos tengan valor
if val_opnd_1 == None or val_opnd_1 == "null":
sym_opnd_1 = sym_opnd_1.name
print("ERROR: variable " + str(sym_opnd_1) + " has no assigned value")
sys.exit()
if val_opnd_2 == None or val_opnd_2 == "null":
sym_opnd_2 = sym_opnd_2.name
print("ERROR: variable " + str(sym_opnd_2) + " has no assigned value")
sys.exit()
if type_op_1 == "CHAR" and (type_op_2 == "INT" or type_op_2 == "FLT"):
val_opnd_1 = ord(val_opnd_1[1])
elif type_op_2 == "CHAR" and (type_op_1 == "INT" or type_op_1 == "FLT"):
val_opnd_2 = ord(val_opnd_2[1])
# +
if operation == "ADD":
# Suma entre strings quita los "" que los separan
if type_op_1 == "STR" and type_op_2 == "STR":
if val_opnd_1[0] != val_opnd_2[0]:
val_opnd_2[-1] = val_opnd_1[-1]
result_value = val_opnd_1[:-1] + val_opnd_2[1:]
sym_result.value = val_opnd_1[:-1] + val_opnd_2[1:]
else:
result_value = val_opnd_1 + val_opnd_2
sym_result.value = val_opnd_1 + val_opnd_2
# -
elif operation == "SUB":
result_value = val_opnd_1 - val_opnd_2
sym_result.value = val_opnd_1 - val_opnd_2
# *
elif operation == "MUL":
result_value = val_opnd_1 * val_opnd_2
sym_result.value = val_opnd_1 * val_opnd_2
# /
elif operation == "DIV":
if val_opnd_2 == 0:
print("ERROR: Trying to divide by cero")
sys.exit()
result_value = val_opnd_1 / val_opnd_2
sym_result.value = val_opnd_1 / val_opnd_2
# %
elif operation == "MOD":
if val_opnd_2 == 0:
print("ERROR: Trying to divide by cero")
sys.exit()
result_value = val_opnd_1 % val_opnd_2
sym_result.value = val_opnd_1 % val_opnd_2
# <
elif operation == "LT":
result_value = val_opnd_1 < val_opnd_2
sym_result.value = val_opnd_1 < val_opnd_2
# >
elif operation == "GT":
result_value = val_opnd_1 > val_opnd_2
sym_result.value = val_opnd_1 > val_opnd_2
# <=
elif operation == "LTE":
result_value = val_opnd_1 <= val_opnd_2
sym_result.value = val_opnd_1 <= val_opnd_2
# >=
elif operation == "GTE":
result_value = val_opnd_1 >= val_opnd_2
sym_result.value = val_opnd_1 >= val_opnd_2
# Modifica valor en dirección resultante
self.modify_direction_value(dir_result, result_value)
# Resuelve operaciones de asignación y asignación compuesta
def __resolve_eq(self, assign_op, dir_opnd, dir_result):
val_operand = self.get_direction_value(dir_opnd)
result = self.get_direction_symbol(dir_result)
result_value = self.get_direction_value(dir_result)
# Valida que el las variables tengan valores si es asignación compuesta
if assign_op != "EQ" and (val_operand == None or val_operand == "null"):
sym_opnd = self.get_direction_symbol(dir_opnd).name
print("ERROR: variable " + str(sym_opnd) + " has no assigned value")
sys.exit()
if assign_op != "EQ" and (result_value == None or result_value == "null"):
result = result.name
print("ERROR: variable " + str(result) + " has no assigned value")
sys.exit()
# =
if assign_op == "EQ":
result_value = val_operand
result.value = val_operand
# +=
elif assign_op == "ADDEQ":
result_value += val_operand
result.value += val_operand
# -=
elif assign_op == "SUBEQ":
result_value -= val_operand
result.value -= val_operand
# *=
elif assign_op == "MULEQ":
result_value *= val_operand
result.value *= val_operand
# /=
elif assign_op == "DIVEQ":
# Valida que no se pueda divir por cero
if val_operand == 0:
print("ERROR: Trying to divide by cero")
sys.exit()
result_value /= val_operand
result.value /= val_operand
# %=
elif assign_op == "MODEQ":
# Valida que no se pueda dividir por cero
if val_operand == 0:
print("ERROR: Trying to divide by cero")
sys.exit()
result_value %= val_operand
result.value %= val_operand
# Modifica valor resultante de variable receptora
self.modify_direction_value(dir_result, result_value)
# Resuelve operaciones de NOT
def __resolve_not(self, dir_operand, dir_result):
sym_operand = self.get_direction_symbol(dir_operand)
val_operand = self.get_direction_value(dir_operand)
result = self.get_direction_symbol(dir_result)
# Si el operando tiene un valor no booleano el not es falso
if (
val_operand != None
and val_operand != "null"
and sym_operand.type != "BOOL"
and val_operand != 0
):
result_value = False
result.value = False
# Si el valor es None, null ó 0 el not es verdadero
elif val_operand == None or val_operand == "null" or val_operand == 0:
result_value = True
result.value = True
else:
# Si ya es booleano se hace el not a su valor
result_value = not val_operand
result.value = not val_operand
# Se guarda el valor del resultado
self.modify_direction_value(dir_result, result_value)
# ......................... INPUT / OUTPUT ......................... #
# Imprime expresión que se busca
def __resolve_write(self, dir_result):
if dir_result == "empty":
print()
else:
result_value = self.get_direction_value(dir_result)
print(result_value)
# Asigna input de usuario a dirección
def __resolve_read(self, dir_result):
user_input = input()
symbol = self.get_direction_symbol(dir_result)
# Si se busca asignar a un INT intenta convertirlo a INT y asignarlo
if symbol.type == "INT":
user_input = user_input.replace(" ", "")
try:
user_input = int(user_input)
except:
print("ERROR: Not a valid INT input")
sys.exit()
self.modify_direction_value(dir_result, user_input)
symbol.value = user_input
# Si se busca asignar a un FLT intenta convertirlo a FLT y asignarlo
elif symbol.type == "FLT":
user_input = user_input.replace(" ", "")
try:
user_input = float(user_input)
except:
print("ERROR: Not a valid FLT input")
sys.exit()
self.modify_direction_value(dir_result, user_input)
symbol.value = user_input
# Si se busca asignar a un CHAR valida que sea un solo caracter,
# convertirlo a STR y asignar solo la primera casilla del input
elif symbol.type == "CHAR":
user_input = user_input.replace(" ", "")
if len(user_input) > 1:
print("ERROR: Not a valid CHAR input")
sys.exit()
try:
user_input = str(user_input[0])
user_input = "'" + user_input + "'"
except:
print("ERROR: Not a valid CHAR input")
sys.exit()
self.modify_direction_value(dir_result, user_input)
symbol.value = user_input
# Si se busca asignar a un STR se busca convertir a string, agregarle comillas y asignarlo
elif symbol.type == "STR":
try:
user_input = str(user_input)
user_input = '"' + user_input + '"'
except:
print("ERROR: Not a valid STR input")
sys.exit()
self.modify_direction_value(dir_result, user_input)
symbol.value = user_input
# Si es un BOOL
elif symbol.type == "BOOL":
user_input = user_input.replace(" ", "")
booleans = {"true": True, "false": False, "0": False}
# Se valida que el input sea true, false o cero,
if user_input not in booleans:
# Si el valor no esta en el diccionario de BOOL se intenta validar
# que sea un INT y si es > 0 se asigna TRUE
try:
user_input = int(user_input)
user_input = True if user_input > 0 else False
except:
print("ERROR: Not a valid BOOL input")
sys.exit()
else:
user_input = booleans[user_input]
# Se asigna valor
self.modify_direction_value(dir_result, user_input)
symbol.value = user_input
# ......................... MTD OBJ ......................... #
def __resolve_frog_method(self, operation, dir_frog, dir_result):
# Diccionario de accesorios de rana
valid_hats = {
'"cowboy"': 1,
'"cool"': 2,
'"shoes"': 3,
'"makeup"': 4,
"'cowboy'": 1,
"'cool'": 2,
"'shoes'": 3,
"'makeup'": 4,
}
# Se busca el valor / nombre del objeto
frog = self.get_direction_value(dir_frog)
# Si la operaicón es de cambiar atributo
if operation == "hat":
# Valida que este en diccionario y si no es el default
hat = self.get_direction_value(dir_result)
if hat not in valid_hats:
hat = 0
else:
hat = valid_hats[hat]
# Regresa instrucción
return Instruction(frog, operation, hat)
else:
# Regresa instrucción de operando
times = self.get_direction_value(dir_result)
return Instruction(frog, operation, times)
########################### MAIN ###########################
# Imprime la memoria para debugging
def __print_all_memory(self):
self.global_segment.print_memory_segment()
self.constant_segment.print_memory_segment()
for segment in self.local_segment:
segment.print_memory_segment()
# Itera sobre los quadruplos y resuelve la instrucción
def run(self, quad_dir):
era = False # Avisa si estamos llamando a una función
running = True # Lee mientras no lleguemos al ENDOF
instruction = 1 # Inicia en el primer cuadruplo
saved_positions = [] # Guarda indice cuando se llama una función
saved_functions = (
[]
) # Stack con nombre de función y nombre unico de su espacio de meoria
game_instructions = [] # Guarda las instrucciones del juego
frozen_memory = (
[]
) # Guarda diccionario de direcciones + su valor antes de hacer llamada
index_accessed = [] # Guarda indice de dimension a accesar
# Mientras no sea ENDOF
while running:
# Saca cuadruplo en dirección actual y el nombre / tipo del operando
curr_quad = quad_dir[instruction]
operation = curr_quad.operator.name
curr_type = curr_quad.operator.type
# Si es una expresión aritmetica o booleana
if curr_type in ["operation", "comparison", "matching"]:
# Si es una expresión normal
if type(curr_quad.operand_2) == Symbol:
# Checa si el operando_1 es una dirección
if curr_quad.operand_1.address_flag:
# Si es el caso busca el valor en la dirección
dir_opnd_1 = self.get_direction_symbol(
curr_quad.operand_1.value
)
dir_opnd_1 = dir_opnd_1.global_direction
else:
# Si no solo asigna su dirección
dir_opnd_1 = curr_quad.operand_1.global_direction
# Checa si el operando_2 es una dirección
if curr_quad.operand_2.address_flag:
# Si es el caso busca el valor en la dirección
dir_opnd_2 = self.get_direction_symbol(
curr_quad.operand_2.value
)
dir_opnd_2 = dir_opnd_2.global_direction
else:
# Si no solo asigna su dirección
dir_opnd_2 = curr_quad.operand_2.global_direction
# Agarra simbolo de resultado
result_id = curr_quad.result_id
# Si el simbolo no tiene dirección de memoria
if result_id.global_direction == None:
if len(saved_functions) > 0:
# Busca nombre del contexto actual
f = saved_functions[-1]
f_name = f[1]
f_address = f[0]
else:
f_name = ""
f_address = ""
# si la variable es del scope de la función actual
if result_id.scope == f_name:
# Se inserta en segmento actual
self.insert_symbol_in_segment(f_address, result_id)
else:
# Se inserta en su propio scope
self.insert_symbol_in_segment(result_id.scope, result_id)
# Consigue su dirección
dir_result = result_id.global_direction
# Resuelve operación
self.__resolve_op(operation, dir_opnd_1, dir_opnd_2, dir_result)
# Cuando la operación tiene un BASE ADDRESS como operando
else:
# Dirección operando de desplazamiento
dir_opnd_1 = curr_quad.operand_1.global_direction
# Dirección de simbolo padre de dirección base
dir_opnd_2 = curr_quad.operand_2.symbol.global_direction
# Nombre del simbolo padre de la dirección base
parent_name = curr_quad.operand_2.parent
# Agarra simbolo de resultado
result_id = curr_quad.result_id
# Si el simbolo no tiene dirección de memoria
if result_id.global_direction == None:
if len(saved_functions) > 0:
# Busca nombre del contexto actual
f = saved_functions[-1]
f_name = f[1]
f_address = f[0]
else:
f_name = ""
f_address = ""
# si la variable es del scope de la función actual
if result_id.scope == f_name:
# Se inserta en segmento actual
self.insert_symbol_in_segment(f_address, result_id)
else:
# Se inserta en su propio scope
self.insert_symbol_in_segment(result_id.scope, result_id)
# Consigue su dirección
dir_result = result_id.global_direction
# Resuelve operación de dirección
self.__resolve_address_op(
operation,
dir_opnd_1,
dir_opnd_2,
dir_result,
parent_name,
index_accessed.pop(),
)
# Si es una expresión de asignación o asignación compuesta
elif operation in set.union(SemanticTable.assignment_operations_op, {"EQ"}):
# Si estamos haciendo un read lo llama
if operation == "EQ" and curr_quad.operand_1.name == "READ":
dir_result = curr_quad.result_id.global_direction
self.__resolve_read(dir_result)
# Si estamos asignando a un atributo objeto
if curr_quad.result_id.object_atr_flag:
# Genera instrucción
game_instructions.append(
self.__resolve_frog_method(
"hat",
curr_quad.result_id.object_atr_flag.global_direction,
dir_result,
)
)
else:
operand_1 = curr_quad.operand_1
result_id = curr_quad.result_id
# Si el simbolo no tiene dirección de memoria
if result_id.global_direction == None:
if len(saved_functions) > 0:
# Busca nombre del contexto actual
f = saved_functions[-1]
f_name = f[1]
f_address = f[0]
else:
f_name = ""
f_address = ""
# si la variable es del scope de la función actual
if result_id.scope == f_name:
# Se inserta en segmento actual
self.insert_symbol_in_segment(f_address, result_id)
else:
# Se inserta en su propio scope
self.insert_symbol_in_segment(result_id.scope, result_id)
# Checa si el resultado es una dirección
if result_id.address_flag:
# Si es el caso busca el valor en la dirección
dir_result = self.get_direction_symbol(result_id.value)
dir_result = dir_result.global_direction
else:
# Si no solo asigna su dirección
dir_result = result_id.global_direction
# Checa si el operando_1 es una dirección
if operand_1.address_flag:
# Si es el caso busca el valor en la dirección
dir_operand = self.get_direction_symbol(operand_1.value)
dir_operand = dir_operand.global_direction
else:
# Si no solo asigna su dirección
dir_operand = operand_1.global_direction
# Resuelve operación
self.__resolve_eq(operation, dir_operand, dir_result)
# Si estamos asignando a un atributo objeto
if result_id.object_atr_flag:
# Genera instrucción
game_instructions.append(
self.__resolve_frog_method(
"hat",
result_id.object_atr_flag.global_direction,
dir_result,
)
)
# Si es una expresión de not
elif operation == "NOT":
operand_1 = curr_quad.operand_1
result_id = curr_quad.result_id
# Si el simbolo no tiene dirección de memoria
if result_id.global_direction == None:
if len(saved_functions) > 0:
# Busca nombre del contexto actual
f = saved_functions[-1]
f_name = f[1]
f_address = f[0]
else:
f_name = ""
f_address = ""
# si la variable es del scope de la función actual
if result_id.scope == f_name:
# Se inserta en segmento actual
self.insert_symbol_in_segment(f_address, result_id)
else:
# Se inserta en su propio scope
self.insert_symbol_in_segment(result_id.scope, result_id)
# Checa si el resultado es una dirección
if result_id.address_flag:
# Si es el caso busca el valor en la dirección
dir_result = self.get_direction_symbol(result_id.value)
dir_result = dir_result.global_direction
else:
# Si no solo asigna su dirección
dir_result = result_id.global_direction
# Checa si el operando_1 es una dirección
if operand_1.address_flag:
# Si es el caso busca el valor en la dirección
dir_operand = self.get_direction_symbol(operand_1.value)
dir_operand = dir_operand.global_direction
else:
# Si no solo asigna su dirección
dir_operand = operand_1.global_direction
# Resuelve operación
self.__resolve_not(dir_operand, dir_result)
# Si es una operación write
elif operation == "WRITE":
# Si no es un write sin expresión
if curr_quad.result_id.name != "empty":
# Checa si el resultado es una dirección
if curr_quad.result_id.address_flag:
# Si es el caso busca el valor en la dirección
dir_result = self.get_direction_symbol(
curr_quad.result_id.value
)
dir_result = dir_result.global_direction
else:
# Si no solo asigna su dirección
dir_result = curr_quad.result_id.global_direction
# Resuelve operación
self.__resolve_write(dir_result)
else:
# Resuelve operación
self.__resolve_write(curr_quad.result_id.name)
# Si es una instrucción GOTO
elif operation == "GOTO":
# Nos movemos al cuadruplo de ese indice
instruction = curr_quad.result_id.name
continue
# Si es una instrucción GOTOF
elif operation == "GOTOF":
# Si la expresión es verdadera avanzamos uno
if self.get_direction_value(curr_quad.operand_1.global_direction):
instruction += 1
continue
else:
# Si no vamos al cuadruplo del indice
instruction = curr_quad.result_id.name
continue
# Si es una instrucción VER
elif operation == "VER":
# Checa si el operando_1 es una dirección
if curr_quad.operand_1.address_flag:
# Si es el caso busca el valor en la dirección
dir_opnd_1 = self.get_direction_symbol(curr_quad.operand_1.value)
dir_opnd_1 = dir_opnd_1.global_direction
else:
# Si no solo asigna su dirección
dir_opnd_1 = curr_quad.operand_1.global_direction
dir_opnd_2 = curr_quad.operand_2.global_direction
result_id = curr_quad.result_id.global_direction
# Resuelve instrucción
self.__resolve_ver(dir_opnd_1, dir_opnd_2, result_id)
# Guarda el valor del indice a accesar
index_accessed.append(self.get_direction_value(dir_opnd_1))
# Si es una instrucción VER
elif operation == "ERA":
# Si estamos en main y no hay una llamada activa
if curr_quad.operator.scope == "main" and not era:
# Guarda main como el scope anterior
saved_functions.append(["main", "main"])
# Agrega los valores en memoria actuales a la memoria congelada
frozen_memory.append(self.__save_local_scope(saved_functions[-1]))
# Sacamos el nombre de la función
function_name = curr_quad.operand_1.name
# Generamos su espacio de memoria
name = self.__function_instance(function_name)
# Guardamos el nombre de la función y el nombre de su scope
saved_functions.append([name, function_name])
# Indicamos inicio de llamada
era = True
# si es una instrucción PARAM
elif operation == "PARAM":
# Checa si el operando_1 es una dirección
if curr_quad.operand_1.address_flag:
# Si es el caso busca el valor en la dirección
dir_operand = self.get_direction_symbol(curr_quad.operand_1.value)
dir_operand = dir_operand.global_direction
else:
# Si no solo asigna su dirección
dir_operand = curr_quad.operand_1.global_direction
# Saca el indice del parametro que queremos accesar
dir_result = curr_quad.result_id.name
# Sacamos los datos de la función que se esta llamando
func_name = saved_functions[-1]
# Asignamos valores a parametro
self.__resolve_param(dir_operand, dir_result, func_name)
# Instrucción de tipo GOSUB
elif operation == "GOSUB":
# Guarda la posición a la que se regresa dspues de la llamada
saved_positions.append(instruction + 1)
# Va al indice de la función
instruction = curr_quad.result_id.name
continue
# Instrucción de tipo RETURN
elif operation == "RETURN":
# Si existe valor de retorno
if curr_quad.operand_1 and curr_quad.result_id:
# Checa si el operando_1 es una dirección
if curr_quad.operand_1.address_flag:
# Si es el caso busca el valor en la dirección
dir_operand = self.get_direction_symbol(
curr_quad.operand_1.value
)
dir_operand = dir_opnd_1.global_direction
else:
# Si no solo asigna su dirección
dir_operand = curr_quad.operand_1.global_direction
# Saca la dirección de la variable de la función
dir_result = curr_quad.result_id.global_direction
# Resuelve asignación
self.__resolve_return(dir_operand, dir_result)
else:
# Si es VOID pasamos a la siguiente instrucción
instruction += 1
continue
# Instrucción de tipo ENDFUNC
elif operation == "ENDFUNC":
# Cambia el indice a la posición que guardamos
instruction = saved_positions.pop()
# Borra instancia local
self.__erase_local_instance()
# Saca la función del stack de llamadas
saved_functions.pop()
# Vuelve a asignar los valores que congelamos de la instancia anterior
self.__unfreeze_local_scope(saved_functions[-1], frozen_memory.pop())
# Indica que se acabo la llamada
era = False
continue
# Insutrucción tipo METODO OBJETO
elif curr_type == "obj_method":
# Checa si el operando_1 es una dirección
if curr_quad.operand_1.address_flag:
# Si es el caso busca el valor en la dirección
dir_frog = self.get_direction_symbol(curr_quad.operand_1.value)
dir_frog = dir_frog.global_direction
else:
# Si no solo asigna su dirección
dir_frog = curr_quad.operand_1.global_direction
dir_result = curr_quad.result_id.global_direction
# Genera instrucción
game_instructions.append(
self.__resolve_frog_method(operation, dir_frog, dir_result)
)
# Acaba la iteración de cuadruplos
elif operation == "ENDOF":
running = False
continue
# Se mueve a la siguiente instrucción
instruction += 1
# Valida que sea valida y si no acaba
if instruction > len(quad_dir):
running = False
# Regresa instrucciónes acumuladas al juego
return game_instructions
| 43.704316 | 98 | 0.560995 |
7959dbcd49fd1956b30eaf89781c71e8f80038eb | 8,117 | py | Python | doc/conf.py | hydratk/hydratk-ext-testenv | 90eea9c460cc206781154cb541ed0fb8b2b292f3 | [
"BSD-3-Clause"
] | null | null | null | doc/conf.py | hydratk/hydratk-ext-testenv | 90eea9c460cc206781154cb541ed0fb8b2b292f3 | [
"BSD-3-Clause"
] | null | null | null | doc/conf.py | hydratk/hydratk-ext-testenv | 90eea9c460cc206781154cb541ed0fb8b2b292f3 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# HydraTK TestEnv extension documentation build configuration file
# All configuration values have a default values that are commented out
# serve to show the default.
import sys
import os
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.append('../src')
# -- General configuration -----------------------------------------------
autodoc_default_flags = ['members', 'private-members', 'special-members']
autodoc_mock_imports = [
'web'
]
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.pngmath',
'sphinx.ext.napoleon', 'sphinx.ext.graphviz', 'sphinx.ext.inheritance_diagram', 'sphinxcontrib.mscgen']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'hydratk-ext-testenv'
copyright = u'2015-2018, HydraTK Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2.3'
# The full version, including alpha/beta/rc tags.
release = '0.2.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'default'
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Hydradoc'
# -- Options for LaTeX output --------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Hydra.tex', u'Hydra Documentation',
u'Hydra Toolkit Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'hydra', u'Hydra Documentation',
[u'Hydra Toolkit Team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Hydra', u'Hydra Documentation',
u'Hydra Toolkit Team', 'Hydra', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'./': 'objects.inv'}
| 32.468 | 124 | 0.714673 |
7959dcf46909f103b5b82b452b29ad5e352348fa | 5,741 | py | Python | musicdl/musicdl.py | jerrysx/stunning-octo-train | 97d6254c9427046fef5d2ef1e65297cf04397728 | [
"MIT"
] | null | null | null | musicdl/musicdl.py | jerrysx/stunning-octo-train | 97d6254c9427046fef5d2ef1e65297cf04397728 | [
"MIT"
] | null | null | null | musicdl/musicdl.py | jerrysx/stunning-octo-train | 97d6254c9427046fef5d2ef1e65297cf04397728 | [
"MIT"
] | null | null | null | '''
Function:
音乐下载器
Author:
Charles
微信公众号:
Charles的皮卡丘
'''
import sys
if __name__ == '__main__': from modules import *
else: from .modules import *
'''basic info'''
BASICINFO = '''************************************************************
Function: 音乐下载器 V2.1.8
Author: Charles
微信公众号: Charles的皮卡丘
操作帮助:
输入r: 重新初始化程序(即返回主菜单)
输入q: 退出程序
下载多首歌曲: 选择想要下载的歌曲时,输入{1,2,5}可同时下载第1,2,5首歌
歌曲保存路径:
当前路径下的%s文件夹内
************************************************************'''
'''音乐下载器'''
class musicdl():
def __init__(self, configpath=None, config=None, **kwargs):
self.config = loadConfig('config.json') if config is None else config
self.logger_handle = Logger(self.config['logfilepath'])
self.initializeAllSources()
'''非开发人员外部调用'''
def run(self, target_srcs=None):
while True:
print(BASICINFO % self.config.get('savedir'))
# 音乐搜索
user_input = self.dealInput('请输入歌曲搜索的关键词: ')
target_srcs = ['baiduFlac', 'kugou', 'kuwo', 'qq', 'qianqian', 'netease', 'migu', 'xiami', 'joox'] if target_srcs is None else target_srcs
search_results = self.search(user_input, target_srcs)
# 打印搜索结果
title = ['序号', '歌手', '歌名', '大小', '时长', '专辑', '来源']
items = []
records = {}
idx = 0
for key, values in search_results.items():
for value in values:
items.append([str(idx), value['singers'], value['songname'], value['filesize'], value['duration'], value['album'], value['source']])
records.update({str(idx): value})
idx += 1
printTable(title, items)
# 音乐下载
user_input = self.dealInput('请输入想要下载的音乐编号: ')
need_download_numbers = user_input.split(',')
songinfos = []
for item in need_download_numbers:
songinfo = records.get(item, '')
if songinfo: songinfos.append(songinfo)
self.download(songinfos)
'''音乐搜索'''
def search(self, keyword, target_srcs):
search_results = {}
if 'baiduFlac' in target_srcs:
try:
search_results.update({'baiduFlac': self.baiduFlac.search(keyword)})
except Exception as err:
self.logger_handle.error(str(err), True)
self.logger_handle.warning('无法在%s中搜索 ——> %s' % ('baiduFlac', keyword))
if 'kugou' in target_srcs:
try:
search_results.update({'kugou': self.kugou.search(keyword)})
except Exception as err:
self.logger_handle.error(str(err), True)
self.logger_handle.warning('无法在%s中搜索 ——> %s' % ('kugou', keyword))
if 'kuwo' in target_srcs:
try:
search_results.update({'kuwo': self.kuwo.search(keyword)})
except Exception as err:
self.logger_handle.error(str(err), True)
self.logger_handle.warning('无法在%s中搜索 ——> %s' % ('kuwo', keyword))
if 'netease' in target_srcs:
try:
search_results.update({'netease': self.netease.search(keyword)})
except Exception as err:
self.logger_handle.error(str(err), True)
self.logger_handle.warning('无法在%s中搜索 ——> %s' % ('netease', keyword))
if 'qianqian' in target_srcs:
try:
search_results.update({'qianqian': self.qianqian.search(keyword)})
except Exception as err:
self.logger_handle.error(str(err), True)
self.logger_handle.warning('无法在%s中搜索 ——> %s' % ('qianqian', keyword))
if 'qq' in target_srcs:
try:
search_results.update({'qq': self.qq.search(keyword)})
except Exception as err:
self.logger_handle.error(str(err), True)
self.logger_handle.warning('无法在%s中搜索 ——> %s' % ('qq', keyword))
if 'migu' in target_srcs:
try:
search_results.update({'migu': self.migu.search(keyword)})
except Exception as err:
self.logger_handle.error(str(err), True)
self.logger_handle.warning('无法在%s中搜索 ——> %s' % ('migu', keyword))
if 'xiami' in target_srcs:
try:
search_results.update({'xiami': self.xiami.search(keyword)})
except Exception as err:
self.logger_handle.error(str(err), True)
self.logger_handle.warning('无法在%s中搜索 ——> %s' % ('xiami', keyword))
if 'joox' in target_srcs:
try:
search_results.update({'joox': self.joox.search(keyword)})
except Exception as err:
self.logger_handle.error(str(err), True)
self.logger_handle.warning('无法在%s中搜索 ——> %s' % ('joox', keyword))
return search_results
'''音乐下载'''
def download(self, songinfos):
for songinfo in songinfos:
if songinfo['source'] == 'baiduFlac':
self.baiduFlac.download([songinfo])
elif songinfo['source'] == 'kugou':
self.kugou.download([songinfo])
elif songinfo['source'] == 'kuwo':
self.kuwo.download([songinfo])
elif songinfo['source'] == 'netease':
self.netease.download([songinfo])
elif songinfo['source'] == 'qianqian':
self.qianqian.download([songinfo])
elif songinfo['source'] == 'qq':
self.qq.download([songinfo])
elif songinfo['source'] == 'migu':
self.migu.download([songinfo])
elif songinfo['source'] == 'xiami':
self.xiami.download([songinfo])
elif songinfo['source'] == 'joox':
self.joox.download([songinfo])
'''初始化所有支持的搜索/下载源'''
def initializeAllSources(self):
self.baiduFlac = baiduFlac(self.config, self.logger_handle)
self.kugou = kugou(self.config, self.logger_handle)
self.kuwo = kuwo(self.config, self.logger_handle)
self.netease = netease(self.config, self.logger_handle)
self.qianqian = qianqian(self.config, self.logger_handle)
self.qq = qq(self.config, self.logger_handle)
self.migu = migu(self.config, self.logger_handle)
self.xiami = xiami(self.config, self.logger_handle)
self.joox = joox(self.config, self.logger_handle)
'''处理用户输入'''
def dealInput(self, tip=''):
user_input = input(tip)
if user_input.lower() == 'q':
self.logger_handle.info('ByeBye')
sys.exit()
elif user_input.lower() == 'r':
self.initializeAllSources()
self.run()
else:
return user_input
'''run'''
if __name__ == '__main__':
dl_client = musicdl('config.json')
dl_client.run() | 34.377246 | 141 | 0.662254 |
7959dd531e9d6edc3283266e3e9e10fc8f259e05 | 8,227 | py | Python | cmt/util/evaluation.py | erichilarysmithsr/CrisisMappingToolkit | 33eb4f158cf7ae4c3e58025b2639186d17fe8d01 | [
"Apache-2.0"
] | 2 | 2017-11-30T18:45:59.000Z | 2018-04-08T16:47:43.000Z | cmt/util/evaluation.py | erichilarysmithsr/CrisisMappingToolkit | 33eb4f158cf7ae4c3e58025b2639186d17fe8d01 | [
"Apache-2.0"
] | null | null | null | cmt/util/evaluation.py | erichilarysmithsr/CrisisMappingToolkit | 33eb4f158cf7ae4c3e58025b2639186d17fe8d01 | [
"Apache-2.0"
] | 1 | 2021-09-09T06:03:44.000Z | 2021-09-09T06:03:44.000Z | # -----------------------------------------------------------------------------
# Copyright * 2014, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration. All
# rights reserved.
#
# The Crisis Mapping Toolkit (CMT) v1 platform is licensed under the Apache
# License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# -----------------------------------------------------------------------------
import ee
import threading
import functools
import time
import cmt.util.miscUtilities
#import cmt.mapclient_qt
def countNumBlobs(classifiedImage, region, maxBlobSize, evalResolution=500): # In pixels?
'''Count the number of unconnected blobs in an image'''
# Count up the number of islands smaller than a certain size
antiResult = classifiedImage.Not()
onBlobs = classifiedImage.connectedComponents(ee.Kernel.square(3), maxBlobSize).select('b1')
offBlobs = antiResult.connectedComponents( ee.Kernel.square(3), maxBlobSize).select('b1')
vectorsOn = onBlobs.reduceToVectors(scale=evalResolution, geometry=region,
geometryType='centroid', bestEffort=True)
vectorsOff = offBlobs.reduceToVectors(scale=evalResolution, geometry=region,
geometryType='centroid', bestEffort=True)
numOnBlobs = len(vectorsOn.getInfo()['features'])
numOffBlobs = len(vectorsOff.getInfo()['features'])
return (numOnBlobs, numOffBlobs)
def evaluate_result_quality(resultIn, region):
'''Try to appraise the quality of a result without access to ground truth data!'''
EVAL_RESOLUTION = 500
waterMask = ee.Image("MODIS/MOD44W/MOD44W_005_2000_02_24").select(['water_mask'], ['b1'])
# Check percentage of region classified as true
result = resultIn.round().uint8() # Eliminate fractional inputs
fillCount = result.reduceRegion(ee.Reducer.mean(), region, EVAL_RESOLUTION)
percentClassified = fillCount.getInfo()['b1']
#print 'percentClassified = ' + str(percentClassified)
# Too much or too little fill generally indicates a bad match
MAX_FILL_PERCENT = 0.95
MIN_FILL_PERCENT = 0.05
if (percentClassified < MIN_FILL_PERCENT) or (percentClassified > MAX_FILL_PERCENT):
return 0.0
# Make sure enough of the water mask has been filled in
MIN_PERCENT_MASK_FILL = 0.60
filledWaterMask = waterMask.And(result)
filledWaterCount = filledWaterMask.reduceRegion(ee.Reducer.sum(), region, EVAL_RESOLUTION).getInfo()['b1']
waterMaskCount = waterMask.reduceRegion(ee.Reducer.sum(), region, EVAL_RESOLUTION).getInfo()['b1']
if waterMaskCount == 0: # Can't do much without the water mask!
return 1.0 # Give it the benefit of the doubt.
waterMaskPercentFill = filledWaterCount / waterMaskCount
#print 'Water mask percent fill = ' + str(waterMaskPercentFill)
if waterMaskPercentFill < MIN_PERCENT_MASK_FILL:
return 0.0
# Count up the number of islands smaller than a certain size
MAX_SPECK_SIZE = 150 # In pixels?
(waterSpecks, landSpecks) = countNumBlobs(result, region, MAX_SPECK_SIZE, EVAL_RESOLUTION)
#print 'Found ' + str(waterSpecks) + ' water specks'
# Count up the number of islands in the water mask -> Only need to do this once!
(waterMaskSpecks, landMaskSpecks) = countNumBlobs(waterMask, region, MAX_SPECK_SIZE, EVAL_RESOLUTION)
#print 'Found ' + str(waterMaskSpecks) + ' water mask specks'
# Floods tend to reduce the number of isolated water bodies, not increase them.
MAX_RATIO = 10
waterSpeckRatio = waterSpecks / waterMaskSpecks
landSpeckRatio = landSpecks / landMaskSpecks
#print 'waterSpeckRatio = ' + str(waterSpeckRatio)
#print 'landSpeckRatio = ' + str(landSpeckRatio)
if (waterSpeckRatio > MAX_RATIO) or (landSpeckRatio > MAX_RATIO):
return 0
# At this point all of the pass/fail checks have passed.
# Compute a final percentage by assesing some penalties
score = 1.0
penalty = min(max(1.0 - waterMaskPercentFill, 0), 0.4)
score -= penalty
penalty = min(max(waterSpeckRatio - 1.0, 0)/10.0, 0.3)
score -= penalty
penalty = min(max(landSpeckRatio - 1.0, 0)/10.0, 0.3)
score -= penalty
return score
def evaluate_approach(result, ground_truth, region, fractional=False):
'''Compare result to ground truth in region and compute precision and recall'''
ground_truth = ground_truth.mask(ground_truth.mask().And(result.mask()))
# TODO: Fix this!
if fractional: # Apply a MODIS pixel sized smoothing kernel ground truth
ground_truth = ground_truth.convolve(ee.Kernel.square(250, 'meters', True))
# Correct detections mean water detected in the same location.
# - This does not include correct non-detections!
correct = ground_truth.min(result)
# Keep reducing the evaluation resolution until Earth Engine finishes without timing out
MIN_EVAL_POINTS = 5000
eval_points = 60000
while True:
try:
# This probably works now
#correct_sum = correct.reduceRegion( ee.Reducer.sum(), region, eval_res, 'EPSG:4326' ).getInfo()['b1'] # Correct detections
#result_sum = result.reduceRegion( ee.Reducer.sum(), region, eval_res, 'EPSG:4326' ).getInfo()['b1'] # Total detections
#truth_sum = ground_truth.reduceRegion(ee.Reducer.sum(), region, eval_res, 'EPSG:4326' ).getInfo()['b1'] # Total water
# Evaluate the results at a large number of random sample points
correct_sum = ee.data.getValue({'image': correct.stats( eval_points, region, 'EPSG:4326').serialize(), 'fields': 'b1'})['properties']['b1']['values']['sum']
result_sum = ee.data.getValue({'image': result.stats( eval_points, region, 'EPSG:4326').serialize(), 'fields': 'b1'})['properties']['b1']['values']['sum']
truth_sum = ee.data.getValue({'image': ground_truth.stats(eval_points, region, 'EPSG:4326').serialize(), 'fields': 'b1'})['properties']['b1']['values']['sum']
break # Quit the loop if the calculations were successful
except Exception,e: # On failure coursen the resolution and try again
print str(e)
eval_points /= 2
if eval_points < MIN_EVAL_POINTS:
raise Exception('Unable to evaluate results at resolution ' + str(eval_points*2))
# Compute ratios, avoiding divide by zero.
precision = 1.0 if (result_sum == 0.0) else (correct_sum / result_sum)
recall = 1.0 if (truth_sum == 0.0) else (correct_sum / truth_sum)
if (precision > 1.0) or (recall > 1.0):
print 'EVALUATION_ERROR'
print 'correct_sum = ' + str(correct_sum)
print 'result_sum = ' + str(result_sum)
print 'truth_sum = ' + str(truth_sum)
#cmt.mapclient_qt.addToMap(correct, {}, 'CORRECT')
## A test of our result evaluation that does not depend on the ground truth!
#no_truth_result = evaluate_result_quality(result, region)
no_truth_result = 0 # For now skip calculating this to reduce the computation time
return (precision, recall, eval_points, no_truth_result)
def evaluate_approach_thread(evaluation_function, result, ground_truth, region, fractional=False):
'''Computes precision and recall of the given result/ground truth pair, then passes the result to the input function'''
cmt.util.miscUtilities.waitForEeResult(functools.partial(evaluate_approach, result=result, ground_truth=ground_truth,
region=region, fractional=fractional), evaluation_function)
| 50.472393 | 172 | 0.674973 |
7959de44fbf68e5a61138f7e58255d806172fff8 | 1,316 | py | Python | azure-mgmt-network/azure/mgmt/network/v2018_02_01/models/express_route_circuits_routes_table_summary_list_result_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | azure-mgmt-network/azure/mgmt/network/v2018_02_01/models/express_route_circuits_routes_table_summary_list_result_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | azure-mgmt-network/azure/mgmt/network/v2018_02_01/models/express_route_circuits_routes_table_summary_list_result_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2018-08-28T14:36:47.000Z | 2018-08-28T14:36:47.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ExpressRouteCircuitsRoutesTableSummaryListResult(Model):
"""Response for ListRoutesTable associated with the Express Route Circuits
API.
:param value: A list of the routes table.
:type value:
list[~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuitRoutesTableSummary]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ExpressRouteCircuitRoutesTableSummary]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(self, *, value=None, next_link: str=None, **kwargs) -> None:
super(ExpressRouteCircuitsRoutesTableSummaryListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
| 37.6 | 88 | 0.630699 |
7959df01c16494fcc1ea5fab4424bda5c53fff46 | 25,009 | py | Python | btclib/tests/test_ssa.py | giubby84/btclib | 0dd7e4e8ca43451a03b577fd7ec95715a1a21711 | [
"MIT"
] | null | null | null | btclib/tests/test_ssa.py | giubby84/btclib | 0dd7e4e8ca43451a03b577fd7ec95715a1a21711 | [
"MIT"
] | null | null | null | btclib/tests/test_ssa.py | giubby84/btclib | 0dd7e4e8ca43451a03b577fd7ec95715a1a21711 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (C) 2017-2020 The btclib developers
#
# This file is part of btclib. It is subject to the license terms in the
# LICENSE file found in the top-level directory of this distribution.
#
# No part of btclib including this file, may be copied, modified, propagated,
# or distributed except according to the terms contained in the LICENSE file.
"Tests for `btclib.ssa` module."
import csv
import secrets
from hashlib import sha256 as hf
from os import path
from typing import List
import pytest
from btclib import ssa
from btclib.alias import INF, Point
from btclib.bip32 import BIP32KeyData
from btclib.curve import CURVES, double_mult, mult
from btclib.curvegroup import _mult
from btclib.numbertheory import mod_inv
from btclib.pedersen import second_generator
from btclib.secpoint import bytes_from_point
from btclib.tests.test_curve import low_card_curves
from btclib.utils import int_from_bits
def test_signature() -> None:
ec = CURVES["secp256k1"]
msg = "Satoshi Nakamoto"
q, x_Q = ssa.gen_keys(0x01)
sig = ssa.sign(msg, q)
assert ssa.verify(msg, x_Q, sig)
assert sig == ssa.deserialize(sig)
ssa.assert_as_valid(msg, x_Q, sig)
ssa.assert_as_valid(msg, x_Q, ssa.serialize(*sig))
ssa.assert_as_valid(msg, x_Q, ssa.serialize(*sig).hex())
msg_fake = "Craig Wright"
assert not ssa.verify(msg_fake, x_Q, sig)
err_msg = "signature verification failed"
with pytest.raises(AssertionError, match=err_msg):
ssa.assert_as_valid(msg_fake, x_Q, sig)
_, x_Q_fake = ssa.gen_keys(0x02)
assert not ssa.verify(msg, x_Q_fake, sig)
err_msg = "y_K is not a quadratic residue"
with pytest.raises(RuntimeError, match=err_msg):
ssa.assert_as_valid(msg, x_Q_fake, sig)
_, x_Q_fake = ssa.gen_keys(0x4)
assert not ssa.verify(msg, x_Q_fake, sig)
err_msg = "signature verification failed"
with pytest.raises(AssertionError, match=err_msg):
ssa.assert_as_valid(msg, x_Q_fake, sig)
err_msg = "not a BIP340 public key"
with pytest.raises(ValueError, match=err_msg):
ssa.assert_as_valid(msg, INF, sig) # type: ignore
with pytest.raises(ValueError, match=err_msg):
ssa.point_from_bip340pubkey(INF) # type: ignore
assert not ssa.verify(msg, x_Q, sig, CURVES["secp224k1"], hf)
err_msg = "field prime is not equal to 3 mod 4: "
with pytest.raises(ValueError, match=err_msg):
ssa.assert_as_valid(msg, x_Q, sig, CURVES["secp224k1"], hf)
sig_fake = (sig[0], sig[1], sig[1])
assert not ssa.verify(msg, x_Q, sig_fake) # type: ignore
err_msg = "too many values to unpack "
with pytest.raises(ValueError, match=err_msg):
ssa.assert_as_valid(msg, x_Q, sig_fake) # type: ignore
sig_invalid = ec.p, sig[1]
assert not ssa.verify(msg, x_Q, sig_invalid)
err_msg = "x-coordinate not in 0..p-1: "
with pytest.raises(ValueError, match=err_msg):
ssa.assert_as_valid(msg, x_Q, sig_invalid)
sig_invalid = sig[0], ec.p
assert not ssa.verify(msg, x_Q, sig_invalid)
err_msg = "scalar s not in 0..n-1: "
with pytest.raises(ValueError, match=err_msg):
ssa.assert_as_valid(msg, x_Q, sig_invalid)
m_fake = b"\x00" * 31
err_msg = "invalid size: 31 bytes instead of 32"
with pytest.raises(ValueError, match=err_msg):
ssa._assert_as_valid(m_fake, x_Q, sig)
with pytest.raises(ValueError, match=err_msg):
ssa._sign(m_fake, q)
err_msg = "private key not in 1..n-1: "
with pytest.raises(ValueError, match=err_msg):
ssa.sign(msg, 0)
# ephemeral key not in 1..n-1
err_msg = "private key not in 1..n-1: "
with pytest.raises(ValueError, match=err_msg):
ssa.sign(msg, q, 0)
with pytest.raises(ValueError, match=err_msg):
ssa.sign(msg, q, ec.n)
err_msg = "invalid zero challenge"
with pytest.raises(ValueError, match=err_msg):
ssa.__recover_pubkey(0, sig[0], sig[1], ec)
def test_bip340_vectors() -> None:
"""BIP340 (Schnorr) test vectors.
https://github.com/bitcoin/bips/blob/master/bip-0340/test-vectors.csv
"""
fname = "bip340_test_vectors.csv"
filename = path.join(path.dirname(__file__), "test_data", fname)
with open(filename, newline="") as csvfile:
reader = csv.reader(csvfile)
# skip column headers while checking that there are 7 columns
_, _, _, _, _, _, _ = reader.__next__()
for row in reader:
(index, seckey, pubkey, m, sig, result, comment) = row
err_msg = f"Test vector #{int(index)}"
if seckey != "":
_, pubkey_actual = ssa.gen_keys(seckey)
assert pubkey == hex(pubkey_actual).upper()[2:], err_msg
sig_actual = ssa.serialize(*ssa._sign(m, seckey))
assert sig == sig_actual.hex().upper(), err_msg
if comment:
err_msg += ": " + comment
# TODO what's worng with xor-ing ?
# assert (result == "TRUE") ^ ssa._verify(m, pubkey, sig), err_msg
if result == "TRUE":
assert ssa._verify(m, pubkey, sig), err_msg
else:
assert not ssa._verify(m, pubkey, sig), err_msg
def test_point_from_bip340pubkey() -> None:
q, x_Q = ssa.gen_keys()
P = mult(q)
# Integer (int)
assert ssa.point_from_bip340pubkey(x_Q) == P
# Integer (bytes)
assert ssa.point_from_bip340pubkey(x_Q.to_bytes(32, byteorder="big")) == P
# Integer (hex-str)
assert ssa.point_from_bip340pubkey(x_Q.to_bytes(32, byteorder="big").hex()) == P
# tuple Point
assert ssa.point_from_bip340pubkey(P) == P
# 33 bytes
assert ssa.point_from_bip340pubkey(bytes_from_point(P)) == P
# 33 bytes hex-string
assert ssa.point_from_bip340pubkey(bytes_from_point(P).hex()) == P
# 65 bytes
assert ssa.point_from_bip340pubkey(bytes_from_point(P, compressed=False)) == P
# 65 bytes hex-string
assert ssa.point_from_bip340pubkey(bytes_from_point(P, compressed=False).hex()) == P
xpub_data = BIP32KeyData.deserialize(
"xpub6H1LXWLaKsWFhvm6RVpEL9P4KfRZSW7abD2ttkWP3SSQvnyA8FSVqNTEcYFgJS2UaFcxupHiYkro49S8yGasTvXEYBVPamhGW6cFJodrTHy"
)
xpub_data.key = bytes_from_point(P)
# BIP32KeyData
assert ssa.point_from_bip340pubkey(xpub_data) == P
# BIP32Key encoded str
xpub = xpub_data.serialize()
assert ssa.point_from_bip340pubkey(xpub) == P
# BIP32Key str
assert ssa.point_from_bip340pubkey(xpub.decode("ascii")) == P
def test_low_cardinality() -> None:
"test low-cardinality curves for all msg/key pairs."
# ec.n has to be prime to sign
test_curves = [
low_card_curves["ec13_11"],
low_card_curves["ec13_19"],
low_card_curves["ec17_13"],
low_card_curves["ec17_23"],
low_card_curves["ec19_13"],
low_card_curves["ec19_23"],
low_card_curves["ec23_19"],
low_card_curves["ec23_31"],
]
# only low cardinality test curves or it would take forever
for ec in test_curves:
# BIP340 Schnorr only applies to curve whose prime p = 3 %4
if not ec.pIsThreeModFour:
err_msg = "field prime is not equal to 3 mod 4: "
with pytest.raises(ValueError, match=err_msg):
ssa._sign(32 * b"\x00", 1, None, ec)
continue
for q in range(1, ec.n // 2): # all possible private keys
QJ = _mult(q, ec.GJ, ec) # public key
x_Q = ec._x_aff_from_jac(QJ)
if not ec.has_square_y(QJ):
q = ec.n - q
QJ = ec.negate_jac(QJ)
for k in range(1, ec.n // 2): # all possible ephemeral keys
RJ = _mult(k, ec.GJ, ec)
r = ec._x_aff_from_jac(RJ)
if not ec.has_square_y(RJ):
k = ec.n - k
for e in range(ec.n): # all possible challenges
s = (k + e * q) % ec.n
sig = ssa.__sign(e, q, k, r, ec)
assert (r, s) == sig
# valid signature must validate
ssa.__assert_as_valid(e, QJ, r, s, ec)
# if e == 0 then the sig is valid for all {q, Q}
# no public key can be recovered
if e == 0:
err_msg = "invalid zero challenge"
with pytest.raises(ValueError, match=err_msg):
ssa.__recover_pubkey(e, r, s, ec)
else:
assert x_Q == ssa.__recover_pubkey(e, r, s, ec)
def test_crack_prvkey() -> None:
ec = CURVES["secp256k1"]
q = 0x19E14A7B6A307F426A94F8114701E7C8E774E7F9A47E2C2035DB29A206321725
x_Q = mult(q)[0]
msg1_str = "Paolo is afraid of ephemeral random numbers"
msg1 = hf(msg1_str.encode()).digest()
k, _ = ssa._det_nonce(msg1, q)
sig1 = ssa._sign(msg1, q, k)
msg2_str = "and Paolo is right to be afraid"
msg2 = hf(msg2_str.encode()).digest()
# reuse same k
sig2 = ssa._sign(msg2, q, k)
qc, kc = ssa._crack_prvkey(msg1, sig1, msg2, sig2, x_Q)
assert q in (qc, ec.n - qc)
assert k in (kc, ec.n - kc)
with pytest.raises(ValueError, match="not the same r in signatures"):
ssa._crack_prvkey(msg1, sig1, msg2, (16, sig1[1]), x_Q)
with pytest.raises(ValueError, match="identical signatures"):
ssa._crack_prvkey(msg1, sig1, msg1, sig1, x_Q)
def test_batch_validation() -> None:
ec = CURVES["secp256k1"]
hsize = hf().digest_size
hlen = hsize * 8
ms = []
Qs = []
sigs = []
ms.append(secrets.randbits(hlen).to_bytes(hsize, "big"))
q = 1 + secrets.randbelow(ec.n - 1)
# bytes version
Qs.append(mult(q, ec.G, ec)[0])
sigs.append(ssa._sign(ms[0], q, None, ec, hf))
# test with only 1 sig
ssa._batch_verify(ms, Qs, sigs, ec, hf)
for _ in range(3):
m = secrets.randbits(hlen).to_bytes(hsize, "big")
ms.append(m)
q = 1 + secrets.randbelow(ec.n - 1)
# Point version
Qs.append(mult(q, ec.G, ec)[0])
sigs.append(ssa._sign(m, q, None, ec, hf))
ssa._batch_verify(ms, Qs, sigs, ec, hf)
assert ssa.batch_verify(ms, Qs, sigs, ec, hf)
ms.append(ms[0])
sigs.append(sigs[1])
Qs.append(Qs[0])
assert not ssa.batch_verify(ms, Qs, sigs, ec, hf)
err_msg = "signature verification precondition failed"
with pytest.raises(ValueError, match=err_msg):
ssa._batch_verify(ms, Qs, sigs, ec, hf)
sigs[-1] = sigs[0] # valid again
ms[-1] = ms[0][:-1]
err_msg = "invalid size: 31 bytes instead of 32"
with pytest.raises(ValueError, match=err_msg):
ssa._batch_verify(ms, Qs, sigs, ec, hf)
ms[-1] = ms[0] # valid again
ms.append(ms[0]) # add extra message
err_msg = "mismatch between number of pubkeys "
with pytest.raises(ValueError, match=err_msg):
ssa._batch_verify(ms, Qs, sigs, ec, hf)
ms.pop() # valid again
sigs.append(sigs[0]) # add extra sig
err_msg = "mismatch between number of pubkeys "
with pytest.raises(ValueError, match=err_msg):
ssa._batch_verify(ms, Qs, sigs, ec, hf)
sigs.pop() # valid again
err_msg = "field prime is not equal to 3 mod 4: "
with pytest.raises(ValueError, match=err_msg):
ssa._batch_verify(ms, Qs, sigs, CURVES["secp224k1"], hf)
def test_musig() -> None:
"""testing 3-of-3 MuSig.
https://github.com/ElementsProject/secp256k1-zkp/blob/secp256k1-zkp/src/modules/musig/musig.md
https://blockstream.com/2019/02/18/musig-a-new-multisignature-standard/
https://eprint.iacr.org/2018/068
https://blockstream.com/2018/01/23/musig-key-aggregation-schnorr-signatures.html
https://medium.com/@snigirev.stepan/how-schnorr-signatures-may-improve-bitcoin-91655bcb4744
"""
ec = CURVES["secp256k1"]
m = hf(b"message to sign").digest()
# the signers private and public keys,
# including both the curve Point and the BIP340-Schnorr public key
q1, x_Q1_int = ssa.gen_keys()
x_Q1 = x_Q1_int.to_bytes(ec.psize, "big")
q2, x_Q2_int = ssa.gen_keys()
x_Q2 = x_Q2_int.to_bytes(ec.psize, "big")
q3, x_Q3_int = ssa.gen_keys()
x_Q3 = x_Q3_int.to_bytes(ec.psize, "big")
# (non interactive) key setup
# this is MuSig core: the rest is just Schnorr signature additivity
# 1. lexicographic sorting of public keys
keys: List[bytes] = list()
keys.append(x_Q1)
keys.append(x_Q2)
keys.append(x_Q3)
keys.sort()
# 2. coefficients
prefix = b"".join(keys)
a1 = int_from_bits(hf(prefix + x_Q1).digest(), ec.nlen) % ec.n
a2 = int_from_bits(hf(prefix + x_Q2).digest(), ec.nlen) % ec.n
a3 = int_from_bits(hf(prefix + x_Q3).digest(), ec.nlen) % ec.n
# 3. aggregated public key
Q1 = mult(q1)
Q2 = mult(q2)
Q3 = mult(q3)
Q = ec.add(double_mult(a1, Q1, a2, Q2), mult(a3, Q3))
if not ec.has_square_y(Q):
# print("Q has been negated")
a1 = ec.n - a1 # pragma: no cover
a2 = ec.n - a2 # pragma: no cover
a3 = ec.n - a3 # pragma: no cover
# ready to sign: nonces and nonce commitments
k1, _ = ssa.gen_keys()
K1 = mult(k1)
k2, _ = ssa.gen_keys()
K2 = mult(k2)
k3, _ = ssa.gen_keys()
K3 = mult(k3)
# exchange {K_i} (interactive)
# computes s_i (non interactive)
# WARNING: signers must exchange the nonces commitments {K_i}
# before sharing {s_i}
# same for all signers
K = ec.add(ec.add(K1, K2), K3)
if not ec.has_square_y(K):
k1 = ec.n - k1 # pragma: no cover
k2 = ec.n - k2 # pragma: no cover
k3 = ec.n - k3 # pragma: no cover
r = K[0]
e = ssa._challenge(m, Q[0], r, ec, hf)
s1 = (k1 + e * a1 * q1) % ec.n
s2 = (k2 + e * a2 * q2) % ec.n
s3 = (k3 + e * a3 * q3) % ec.n
# exchange s_i (interactive)
# finalize signature (non interactive)
s = (s1 + s2 + s3) % ec.n
sig = r, s
# check signature is valid
ssa._assert_as_valid(m, Q[0], sig, ec, hf)
def test_threshold() -> None:
"testing 2-of-3 threshold signature (Pedersen secret sharing)"
ec = CURVES["secp256k1"]
# parameters
m = 2
H = second_generator(ec, hf)
# FIRST PHASE: key pair generation ###################################
# 1.1 signer one acting as the dealer
commits1: List[Point] = list()
q1, _ = ssa.gen_keys()
q1_prime, _ = ssa.gen_keys()
commits1.append(double_mult(q1_prime, H, q1, ec.G))
# sharing polynomials
f1 = [q1]
f1_prime = [q1_prime]
for i in range(1, m):
f1.append(ssa.gen_keys()[0])
f1_prime.append(ssa.gen_keys()[0])
commits1.append(double_mult(f1_prime[i], H, f1[i], ec.G))
# shares of the secret
alpha12 = 0 # share of q1 belonging to signer two
alpha12_prime = 0
alpha13 = 0 # share of q1 belonging to signer three
alpha13_prime = 0
for i in range(m):
alpha12 += (f1[i] * pow(2, i)) % ec.n
alpha12_prime += (f1_prime[i] * pow(2, i)) % ec.n
alpha13 += (f1[i] * pow(3, i)) % ec.n
alpha13_prime += (f1_prime[i] * pow(3, i)) % ec.n
# signer two verifies consistency of his share
RHS = INF
for i in range(m):
RHS = ec.add(RHS, mult(pow(2, i), commits1[i]))
t = double_mult(alpha12_prime, H, alpha12, ec.G)
assert t == RHS, "signer one is cheating"
# signer three verifies consistency of his share
RHS = INF
for i in range(m):
RHS = ec.add(RHS, mult(pow(3, i), commits1[i]))
t = double_mult(alpha13_prime, H, alpha13, ec.G)
assert t == RHS, "signer one is cheating"
# 1.2 signer two acting as the dealer
commits2: List[Point] = list()
q2, _ = ssa.gen_keys()
q2_prime, _ = ssa.gen_keys()
commits2.append(double_mult(q2_prime, H, q2, ec.G))
# sharing polynomials
f2 = [q2]
f2_prime = [q2_prime]
for i in range(1, m):
f2.append(ssa.gen_keys()[0])
f2_prime.append(ssa.gen_keys()[0])
commits2.append(double_mult(f2_prime[i], H, f2[i], ec.G))
# shares of the secret
alpha21 = 0 # share of q2 belonging to signer one
alpha21_prime = 0
alpha23 = 0 # share of q2 belonging to signer three
alpha23_prime = 0
for i in range(m):
alpha21 += (f2[i] * pow(1, i)) % ec.n
alpha21_prime += (f2_prime[i] * pow(1, i)) % ec.n
alpha23 += (f2[i] * pow(3, i)) % ec.n
alpha23_prime += (f2_prime[i] * pow(3, i)) % ec.n
# signer one verifies consistency of his share
RHS = INF
for i in range(m):
RHS = ec.add(RHS, mult(pow(1, i), commits2[i]))
t = double_mult(alpha21_prime, H, alpha21, ec.G)
assert t == RHS, "signer two is cheating"
# signer three verifies consistency of his share
RHS = INF
for i in range(m):
RHS = ec.add(RHS, mult(pow(3, i), commits2[i]))
t = double_mult(alpha23_prime, H, alpha23, ec.G)
assert t == RHS, "signer two is cheating"
# 1.3 signer three acting as the dealer
commits3: List[Point] = list()
q3, _ = ssa.gen_keys()
q3_prime, _ = ssa.gen_keys()
commits3.append(double_mult(q3_prime, H, q3, ec.G))
# sharing polynomials
f3 = [q3]
f3_prime = [q3_prime]
for i in range(1, m):
f3.append(ssa.gen_keys()[0])
f3_prime.append(ssa.gen_keys()[0])
commits3.append(double_mult(f3_prime[i], H, f3[i], ec.G))
# shares of the secret
alpha31 = 0 # share of q3 belonging to signer one
alpha31_prime = 0
alpha32 = 0 # share of q3 belonging to signer two
alpha32_prime = 0
for i in range(m):
alpha31 += (f3[i] * pow(1, i)) % ec.n
alpha31_prime += (f3_prime[i] * pow(1, i)) % ec.n
alpha32 += (f3[i] * pow(2, i)) % ec.n
alpha32_prime += (f3_prime[i] * pow(2, i)) % ec.n
# signer one verifies consistency of his share
RHS = INF
for i in range(m):
RHS = ec.add(RHS, mult(pow(1, i), commits3[i]))
t = double_mult(alpha31_prime, H, alpha31, ec.G)
assert t == RHS, "signer three is cheating"
# signer two verifies consistency of his share
RHS = INF
for i in range(m):
RHS = ec.add(RHS, mult(pow(2, i), commits3[i]))
t = double_mult(alpha32_prime, H, alpha32, ec.G)
assert t == RHS, "signer three is cheating"
# shares of the secret key q = q1 + q2 + q3
alpha1 = (alpha21 + alpha31) % ec.n
alpha2 = (alpha12 + alpha32) % ec.n
alpha3 = (alpha13 + alpha23) % ec.n
for i in range(m):
alpha1 += (f1[i] * pow(1, i)) % ec.n
alpha2 += (f2[i] * pow(2, i)) % ec.n
alpha3 += (f3[i] * pow(3, i)) % ec.n
# 1.4 it's time to recover the public key
# each participant i = 1, 2, 3 shares Qi as follows
# Q = Q1 + Q2 + Q3 = (q1 + q2 + q3) G
A1: List[Point] = list()
A2: List[Point] = list()
A3: List[Point] = list()
for i in range(m):
A1.append(mult(f1[i]))
A2.append(mult(f2[i]))
A3.append(mult(f3[i]))
# signer one checks others' values
RHS2 = INF
RHS3 = INF
for i in range(m):
RHS2 = ec.add(RHS2, mult(pow(1, i), A2[i]))
RHS3 = ec.add(RHS3, mult(pow(1, i), A3[i]))
assert mult(alpha21) == RHS2, "signer two is cheating"
assert mult(alpha31) == RHS3, "signer three is cheating"
# signer two checks others' values
RHS1 = INF
RHS3 = INF
for i in range(m):
RHS1 = ec.add(RHS1, mult(pow(2, i), A1[i]))
RHS3 = ec.add(RHS3, mult(pow(2, i), A3[i]))
assert mult(alpha12) == RHS1, "signer one is cheating"
assert mult(alpha32) == RHS3, "signer three is cheating"
# signer three checks others' values
RHS1 = INF
RHS2 = INF
for i in range(m):
RHS1 = ec.add(RHS1, mult(pow(3, i), A1[i]))
RHS2 = ec.add(RHS2, mult(pow(3, i), A2[i]))
assert mult(alpha13) == RHS1, "signer one is cheating"
assert mult(alpha23) == RHS2, "signer two is cheating"
# commitment at the global sharing polynomial
A: List[Point] = list()
for i in range(m):
A.append(ec.add(A1[i], ec.add(A2[i], A3[i])))
# aggregated public key
Q = A[0]
if not ec.has_square_y(Q):
# print('Q has been negated')
A[1] = ec.negate(A[1]) # pragma: no cover
alpha1 = ec.n - alpha1 # pragma: no cover
alpha2 = ec.n - alpha2 # pragma: no cover
alpha3 = ec.n - alpha3 # pragma: no cover
Q = ec.negate(Q) # pragma: no cover
# SECOND PHASE: generation of the nonces' pair ######################
# Assume signer one and three want to sign
msg = "message to sign"
# 2.1 signer one acting as the dealer
commits1 = []
k1, _ = ssa.det_nonce(msg, q1, ec, hf)
k1_prime, _ = ssa.det_nonce(msg, q1_prime, ec, hf)
commits1.append(double_mult(k1_prime, H, k1, ec.G))
# sharing polynomials
f1 = [k1]
f1_prime = [k1_prime]
for i in range(1, m):
f1.append(ssa.gen_keys()[0])
f1_prime.append(ssa.gen_keys()[0])
commits1.append(double_mult(f1_prime[i], H, f1[i], ec.G))
# shares of the secret
beta13 = 0 # share of k1 belonging to signer three
beta13_prime = 0
for i in range(m):
beta13 += (f1[i] * pow(3, i)) % ec.n
beta13_prime += (f1_prime[i] * pow(3, i)) % ec.n
# signer three verifies consistency of his share
RHS = INF
for i in range(m):
RHS = ec.add(RHS, mult(pow(3, i), commits1[i]))
t = double_mult(beta13_prime, H, beta13, ec.G)
assert t == RHS, "signer one is cheating"
# 2.2 signer three acting as the dealer
commits3 = []
k3, _ = ssa.det_nonce(msg, q3, ec, hf)
k3_prime, _ = ssa.det_nonce(msg, q3_prime, ec, hf)
commits3.append(double_mult(k3_prime, H, k3, ec.G))
# sharing polynomials
f3 = [k3]
f3_prime = [k3_prime]
for i in range(1, m):
f3.append(ssa.gen_keys()[0])
f3_prime.append(ssa.gen_keys()[0])
commits3.append(double_mult(f3_prime[i], H, f3[i], ec.G))
# shares of the secret
beta31 = 0 # share of k3 belonging to signer one
beta31_prime = 0
for i in range(m):
beta31 += (f3[i] * pow(1, i)) % ec.n
beta31_prime += (f3_prime[i] * pow(1, i)) % ec.n
# signer one verifies consistency of his share
RHS = INF
for i in range(m):
RHS = ec.add(RHS, mult(pow(1, i), commits3[i]))
t = double_mult(beta31_prime, H, beta31, ec.G)
assert t == RHS, "signer three is cheating"
# 2.3 shares of the secret nonce
beta1 = beta31 % ec.n
beta3 = beta13 % ec.n
for i in range(m):
beta1 += (f1[i] * pow(1, i)) % ec.n
beta3 += (f3[i] * pow(3, i)) % ec.n
# 2.4 it's time to recover the public nonce
# each participant i = 1, 3 shares Qi as follows
B1: List[Point] = list()
B3: List[Point] = list()
for i in range(m):
B1.append(mult(f1[i]))
B3.append(mult(f3[i]))
# signer one checks values from signer three
RHS3 = INF
for i in range(m):
RHS3 = ec.add(RHS3, mult(pow(1, i), B3[i]))
assert mult(beta31) == RHS3, "signer three is cheating"
# signer three checks values from signer one
RHS1 = INF
for i in range(m):
RHS1 = ec.add(RHS1, mult(pow(3, i), B1[i]))
assert mult(beta13) == RHS1, "signer one is cheating"
# commitment at the global sharing polynomial
B: List[Point] = list()
for i in range(m):
B.append(ec.add(B1[i], B3[i]))
# aggregated public nonce
K = B[0]
if not ec.has_square_y(K):
# print('K has been negated')
B[1] = ec.negate(B[1]) # pragma: no cover
beta1 = ec.n - beta1 # pragma: no cover
beta3 = ec.n - beta3 # pragma: no cover
K = ec.negate(K) # pragma: no cover
# PHASE THREE: signature generation ###
# partial signatures
e = ssa.challenge(msg, Q[0], K[0], ec, hf)
gamma1 = (beta1 + e * alpha1) % ec.n
gamma3 = (beta3 + e * alpha3) % ec.n
# each participant verifies the other partial signatures
# signer one
RHS3 = ec.add(K, mult(e, Q))
for i in range(1, m):
temp = double_mult(pow(3, i), B[i], e * pow(3, i), A[i])
RHS3 = ec.add(RHS3, temp)
assert mult(gamma3) == RHS3, "signer three is cheating"
# signer three
RHS1 = ec.add(K, mult(e, Q))
for i in range(1, m):
temp = double_mult(pow(1, i), B[i], e * pow(1, i), A[i])
RHS1 = ec.add(RHS1, temp)
assert mult(gamma1) == RHS1, "signer one is cheating"
# PHASE FOUR: aggregating the signature ###
omega1 = 3 * mod_inv(3 - 1, ec.n) % ec.n
omega3 = 1 * mod_inv(1 - 3, ec.n) % ec.n
sigma = (gamma1 * omega1 + gamma3 * omega3) % ec.n
sig = K[0], sigma
assert ssa.verify(msg, Q[0], sig)
# ADDITIONAL PHASE: reconstruction of the private key ###
secret = (omega1 * alpha1 + omega3 * alpha3) % ec.n
assert (q1 + q2 + q3) % ec.n in (secret, ec.n - secret)
| 35.125 | 121 | 0.604103 |
7959e2bda4fa28ff6360907eb4bce7266d39d13d | 747 | py | Python | code/bubble_sort.py | Rustam-Z/data-structures-and-algorithms | 0ed253c433198fb6fa6d609a806f4ae7e820af06 | [
"MIT"
] | 6 | 2021-09-19T11:01:27.000Z | 2021-11-11T08:53:31.000Z | code/bubble_sort.py | Rustam-Z/data-structures-and-algorithms | 0ed253c433198fb6fa6d609a806f4ae7e820af06 | [
"MIT"
] | null | null | null | code/bubble_sort.py | Rustam-Z/data-structures-and-algorithms | 0ed253c433198fb6fa6d609a806f4ae7e820af06 | [
"MIT"
] | 1 | 2021-12-20T13:25:12.000Z | 2021-12-20T13:25:12.000Z | """
Optimized Bubble sort algorithm implementation.
Time Complexity: O(n^2)
Best O(n)
Worst O(n^2)
Average O(n^2)
Space Complexity: O(1)
"""
def bubble_sort(array):
for i in range(len(array)):
swapped = False
for j in range(0, len(array) - i - 1):
# change > to < to sort in descending order
if array[j] > array[j + 1]:
array[j], array[j + 1] = array[j + 1], array[j]
swapped = True
# no swapping means the array is already sorted
# so no need for further comparison
if not swapped:
break
return array
if __name__ == "__main__":
data = [-2, 45, 0, 11, -9, 32, 43, 0, 92]
bubble_sort(data)
print(data)
| 23.34375 | 63 | 0.547523 |
7959e3cfe747cafab34b11a5c3faca4ee5ace902 | 21,547 | py | Python | tests/test_mhtml_parse.py | Querela/MHTML | b814ada1d1980cade05f47339625fdd61036bbb0 | [
"MIT"
] | 7 | 2019-06-11T14:57:46.000Z | 2022-01-29T18:29:52.000Z | tests/test_mhtml_parse.py | Querela/MHTML | b814ada1d1980cade05f47339625fdd61036bbb0 | [
"MIT"
] | 1 | 2020-12-18T00:16:28.000Z | 2020-12-18T00:16:28.000Z | tests/test_mhtml_parse.py | Querela/MHTML | b814ada1d1980cade05f47339625fdd61036bbb0 | [
"MIT"
] | 2 | 2020-01-04T01:19:56.000Z | 2021-04-25T18:54:04.000Z | # pylint: disable=missing-docstring,invalid-name
# pylint: disable=protected-access
import pytest
import mhtml
def test_get_content_type():
# more verbose construction
mock_headers = mhtml.ResourceHeader()
mock_headers['Content-Type'] = 'text/html'
assert mhtml.get_content_type(mock_headers) == 'text/html'
# case insensitive
assert mhtml.get_content_type(
mhtml.ResourceHeader([('conTent-TyPe', 'text/html')])
) == 'text/html'
# multipart/related
assert mhtml.get_content_type(
mhtml.ResourceHeader([('conTent-TyPe',
'multipart/related;\r\n\t...')])
) == 'multipart/related'
# empty headers -> None
assert mhtml.get_content_type(mhtml.ResourceHeader()) is None
# no headers
with pytest.raises(AttributeError):
mhtml.get_content_type(None)
# even standard dicts, but case sensitive
assert mhtml.get_content_type({'Content-Type': 'text/abc'}) == 'text/abc'
assert mhtml.get_content_type({'conTent-TyPe': 'text/abc'}) is None
def test_get_boundary():
# no headers
with pytest.raises(AttributeError):
mhtml.get_boundary(None)
# no content-type
assert mhtml.get_boundary(mhtml.ResourceHeader()) is None
# missing boundary declaration
assert mhtml.get_boundary(
mhtml.ResourceHeader([('conTent-TyPe', 'text/html')])
) is None
assert mhtml.get_boundary(
mhtml.ResourceHeader([('conTent-TyPe',
'text/html;\r\n\tabc\r\n\tboundary="'
'---test-boundary---'
'"')])
) is None
# has to be multipart
assert mhtml.get_boundary(
mhtml.ResourceHeader([('Content-Type',
'multipart/related;\r\n\tabc\r\n'
'\tnothing-here')])
) is None
# has to be multipart and contain a boundary declaration
assert mhtml.get_boundary(
mhtml.ResourceHeader([('Content-Type',
'multipart/related;\r\n\tabc\r\n\tboundary="'
'---test-boundary---'
'"')])
) == '---test-boundary---'
def test_make_filename():
# no headers given
assert mhtml.make_filename(None, default='abc') == 'abc'
# empty header
assert mhtml.make_filename(mhtml.ResourceHeader(), default='abd') == 'abd'
assert mhtml.make_filename(mhtml.ResourceHeader([('CH', 'CV')]),
default='abd') == 'abd'
# assume we have extensions
mock_headers = mhtml.ResourceHeader()
mock_headers['Content-Location'] = 'proto://path/to/file.ext'
assert mhtml.make_filename(mock_headers,
guess_extension=False) == 'file.ext'
assert mhtml.make_filename(mock_headers, folder='abc',
guess_extension=False) == 'abc/file.ext'
assert mhtml.make_filename(mock_headers,
guess_extension=True) == 'file.ext'
assert mhtml.make_filename(mock_headers) == 'file.ext'
# test guessing extensions
del mock_headers['Content-Location']
mock_headers['Content-Location'] = 'proto://path/to/file'
assert mhtml.make_filename(mock_headers, default='abc.hhh') == 'file.hhh'
# if not extension, then .bin ?
assert mhtml.make_filename(mock_headers, default=None) == 'file.bin'
assert mhtml.make_filename(mock_headers, default='ooo') == 'file.bin'
assert mhtml.make_filename(mock_headers, default='lolo.olo',
ext_from_default=True) == 'file.olo'
# add content-type
mock_headers['Content-Type'] = 'myster/lexi'
assert mhtml.make_filename(mock_headers, default='ooo.hhh') == 'file.lexi'
assert mhtml.make_filename(mock_headers, folder='ddd/bbb/',
default='ooo.hhh') == 'ddd/bbb/file.lexi'
del mock_headers['Content-Type']
mock_headers['Content-Type'] = 'mystery'
assert mhtml.make_filename(mock_headers) == 'file.mystery'
# force use of default extension
del mock_headers['Content-Location']
mock_headers['Content-Location'] = 'proto://path/to/file'
assert mhtml.make_filename(mock_headers, default='lolo.olo',
ext_from_default=True) == 'file.olo'
def test_make_uniq_filename(monkeypatch):
import os.path
name = 'abc'
def mock_exists(fn):
return fn == name
monkeypatch.setattr(os.path, 'exists', mock_exists)
assert mhtml.make_uniq_filename('abc', pre_dup_str='dpp_') == 'abc.dpp_1'
assert mhtml.make_uniq_filename('abc', pre_dup_str='') == 'abc.1'
assert mhtml.make_uniq_filename('abc', pre_dup_str=None) == 'abc.1'
name2 = '/kljklk/jkllj/abcd.bi'
def mock_exists2(fn):
return fn == name2
monkeypatch.setattr(os.path, 'exists', mock_exists2)
assert mhtml.make_uniq_filename(name2, pre_dup_str=None) \
== name2[:-2] + '1.bi'
def mock_exists3(fn):
return fn in (name, name + '.dpd_1')
monkeypatch.setattr(os.path, 'exists', mock_exists3)
assert mhtml.make_uniq_filename('abc', pre_dup_str='dpd_') == 'abc.dpd_2'
monkeypatch.setattr(os.path, 'exists', lambda _: False)
assert mhtml.make_uniq_filename('abc', pre_dup_str='dpd_') == 'abc'
assert mhtml.make_uniq_filename('abcd', pre_dup_str='dpd_') == 'abcd'
# ---------------------------------------------------------------------------
def test_find_next_linebreak():
assert mhtml.find_next_linebreak(b'', 0) == -1
# index after linebreak, start of new content
assert mhtml.find_next_linebreak(b'abc\r\ndef', 0) == 5
assert mhtml.find_next_linebreak(b'abc\r\ndef', 6) == -1
# currently wants '\r\n' as separator
assert mhtml.find_next_linebreak(b'abc\rdef', 0) == -1
assert mhtml.find_next_linebreak(b'abc\ndef', 0) == -1
assert mhtml.find_next_linebreak(b'abc\r\ndef', -1) == -1
# works on bytes
with pytest.raises(TypeError):
mhtml.find_next_linebreak('abc\r\ndef', 0)
def test_next_line():
assert mhtml.next_line(b'', 0) == (b'', -1)
assert mhtml.next_line(b'abc\r\ndef', 0) == (b'abc\r\n', 5)
assert mhtml.next_line(b'abc\r\ndef', 1) == (b'bc\r\n', 5)
# with linebreak continuation
assert mhtml.next_line(b'abc;\r\n\tcba\r\ndef', 1) == \
(b'bc;\r\n\tcba\r\n', 12)
# unspecified, tries to get content from -1 to end
# really should not happen -> so ignore it
assert mhtml.next_line(b'abc\r\ndef', -1) == (b'f', -1)
with pytest.raises(AttributeError):
mhtml.next_line(None, -1)
def test_parse_header():
assert mhtml.parse_header(b'', 0) == (mhtml.ResourceHeader(), -1)
# needs two linebreaks (a single empty line) after the header fields
with pytest.raises(AssertionError):
assert mhtml.parse_header(b'CH: CV\r\n', 0) == \
(mhtml.ResourceHeader([('CH', 'CV')]), -1)
# really short header
assert mhtml.parse_header(b'CH: CV\r\n\r\n', 0) == \
(mhtml.ResourceHeader([('CH', 'CV')]), -1)
assert mhtml.parse_header(b'CH: CV\r\nCH2: CV2\r\nCH3: CV3\r\n\r\n', 0) \
== (mhtml.ResourceHeader([('CH', 'CV'), ('CH2', 'CV2'),
('CH3', 'CV3')]), -1)
# TODO: how to handle multiple spaces -> trim()?
assert mhtml.parse_header(b'CH: CV\r\n\r\n', 0) == \
(mhtml.ResourceHeader([('CH', ' CV')]), -1)
# needs at least a single space
assert mhtml.parse_header(b'CH:CV\r\n\r\n', 0) == \
(mhtml.ResourceHeader([]), -1)
assert mhtml.parse_header(b'CH: CV\r\n\r\n\r\n-----boundary---', 0) == \
(mhtml.ResourceHeader([('CH', 'CV')]), 10)
# value with linebreaks
assert mhtml.parse_header(b'CH: CV;\r\n\tCV2\r\n\r\n', 0) == \
(mhtml.ResourceHeader([('CH', 'CV;\r\n\tCV2')]), -1)
assert mhtml.parse_header(b'CH: CV;\r\n\tCV2\r\nCH2: CV3\r\n\r\n', 0) == \
(mhtml.ResourceHeader([('CH', 'CV;\r\n\tCV2'), ('CH2', 'CV3')]), -1)
def test_find_next_boundary():
# no boundary found
assert mhtml.find_next_boundary(b'', '---boundary---', 0) == (-1, -1)
# missing linebreak beforehand
assert mhtml.find_next_boundary(b'--'
b'---boundary---'
b'\r\n', '---boundary---', 0) == (-1, -1)
# needs a linebreak before
assert mhtml.find_next_boundary(b'\r\n'
b'--'
b'---boundary---'
b'\r\n', '---boundary---', 0) == (2, 20)
# end-of-parts (of file?) boundary
assert mhtml.find_next_boundary(b'\r\n'
b'--'
b'---boundary---'
b'--\r\n', '---boundary---', 0) == (2, -1)
def test_parse_part():
# boundary is string (because from header)
with pytest.raises(TypeError):
mhtml.parse_part(b'', b'', 0)
bndry = '---boundary---'
part_bndry = bytes('--' + bndry + '\r\n', 'ascii')
file_bndry = bytes('--' + bndry + '--\r\n', 'ascii')
# this case should not happen, because there will always be a part when
# the function is called?
assert mhtml.parse_part(b'', bndry, 0) == \
((mhtml.ResourceHeader(), 0, -1, 0), -1)
# simulate last part (end-of-parts boundary) (see the extra dashes)
assert mhtml.parse_part(b'CH: CV\r\n\r\ncontent\r\n'
+ file_bndry,
bndry, 0) == \
((mhtml.ResourceHeader([('CH', 'CV')]), 0, 10, 19), -1)
# simulate more parts (end-of-part boundary)
assert mhtml.parse_part(b'CH: CV\r\n\r\ncontent\r\n'
+ part_bndry,
bndry, 0) == \
((mhtml.ResourceHeader([('CH', 'CV')]), 0, 10, 19), 37)
def test_parse_parts_missing_head_boundary():
bndry = '---boundary---'
part_bndry = bytes('--' + bndry + '\r\n', 'ascii')
file_bndry = bytes('--' + bndry + '--\r\n', 'ascii')
assert mhtml.parse_parts(b'', bndry, 0) == ([], -1)
# missing head boundary - should not happen
# TODO: raise Error on missing boundary?
assert mhtml.parse_parts(b'CH: CV\r\n\r\n', bndry, 0) == \
([], -1)
assert mhtml.parse_parts(b'CH: CV\r\n\r\n'
+ file_bndry, bndry, 0) \
== ([], -1)
assert mhtml.parse_parts(b'CH: CV\r\n\r\n'
b'content\r\n'
+ file_bndry, bndry, 0) \
== ([], -1)
def test_parse_parts_with_head_boundary():
bndry = '---boundary---'
part_bndry = bytes('--' + bndry + '\r\n', 'ascii')
file_bndry = bytes('--' + bndry + '--\r\n', 'ascii')
# head boundary - announce part
assert mhtml.parse_parts(b'\r\n' + part_bndry +
b'CH: CV\r\n\r\n'
b'content\r\n', bndry, 2) \
== ([(mhtml.ResourceHeader([('CH', 'CV')]),
20, 30, 39)], -1)
# TODO: work with monkeypatching?
# TODO: should recognize empty part?
# something like first part, then another follows but is somewhat vague ...
assert mhtml.parse_parts(b'\r\n' + part_bndry +
b'CH: CV\r\n\r\n'
b'content\r\n'
+ part_bndry, bndry, 2) \
== ([(mhtml.ResourceHeader([('CH', 'CV')]),
20, 30, 39),
(mhtml.ResourceHeader(),
57, -1, 57)], -1)
# single part (use-case: last-part before file boundary)
assert mhtml.parse_parts(b'\r\n' + part_bndry +
b'CH: CV\r\n\r\n'
b'content\r\n'
+ file_bndry, bndry, 0) \
== ([(mhtml.ResourceHeader([('CH', 'CV')]),
20, 30, 39)], -1)
def test_parse_mhtml(mocker):
content = b'content'
bndry = '--bndry--'
header_end_pos = 5
line1 = b'\r\n'
line2 = b'other\r\n'
next_pos = 10
parts = [1, 2, 4]
mock_meth_parse_header = mocker.Mock()
mock_meth_next_line = mocker.Mock()
mock_meth_get_boundary = mocker.Mock()
mock_meth_parse_parts = mocker.Mock()
mocker.patch('mhtml.parse_header', mock_meth_parse_header)
mocker.patch('mhtml.next_line', mock_meth_next_line)
mocker.patch('mhtml.get_boundary', mock_meth_get_boundary)
mocker.patch('mhtml.parse_parts', mock_meth_parse_parts)
# no boundary in header
mock_meth_parse_header.return_value = (mocker.sentinel.headers,
header_end_pos)
mock_meth_next_line.return_value = (line1, next_pos)
mock_meth_get_boundary.return_value = None
assert mhtml.parse_mhtml(content) == (mocker.sentinel.headers, None)
mock_meth_parse_header.assert_called_once_with(content, 0)
mock_meth_next_line.assert_called_once_with(content, header_end_pos)
mock_meth_get_boundary.assert_called_once_with(mocker.sentinel.headers)
mock_meth_parse_parts.assert_not_called()
# with boundary
mock_meth_parse_header.reset_mock()
mock_meth_next_line.reset_mock()
mock_meth_get_boundary.reset_mock()
mock_meth_next_line.return_value = (line1, next_pos)
mock_meth_get_boundary.return_value = bndry
mock_meth_parse_parts.return_value = (parts, -1)
assert mhtml.parse_mhtml(content) == (mocker.sentinel.headers, parts)
mock_meth_parse_header.assert_called_once_with(content, 0)
mock_meth_next_line.assert_called_once_with(content, header_end_pos)
mock_meth_get_boundary.assert_called_once_with(mocker.sentinel.headers)
mock_meth_parse_parts.assert_called_once_with(content, bndry, next_pos)
# only single empty line after header
# TODO: should fail if not two empty lines after header?
mock_meth_next_line.reset_mock()
mock_meth_get_boundary.reset_mock()
mock_meth_parse_parts.reset_mock()
mock_meth_next_line.return_value = (line2, next_pos)
mock_meth_parse_parts.return_value = (parts, -1)
assert mhtml.parse_mhtml(content) == (mocker.sentinel.headers, parts)
mock_meth_next_line.assert_called_once_with(content, header_end_pos)
mock_meth_get_boundary.assert_called_once_with(mocker.sentinel.headers)
mock_meth_parse_parts.assert_called_once_with(content, bndry,
header_end_pos)
# invalid parts parse
mock_meth_parse_parts.reset_mock()
mock_meth_parse_parts.return_value = (parts, 9001)
with pytest.raises(AssertionError,
match='file should be completly parsed'):
mhtml.parse_mhtml(content)
mock_meth_parse_parts.assert_called_once_with(content, bndry,
header_end_pos)
# TODO: check if not bytes content?
# ---------------------------------------------------------------------------
def test_parse_mhtml_struct_no_parts(mocker):
content = b'content'
bndry = '---bndry---'
header_end_pos = 6
next_pos = 55
mock_mhtarc_class = mocker.patch('mhtml.MHTMLArchive', spec=True)
mock_meth_parse_header = mocker.patch('mhtml.parse_header')
mock_meth_next_line = mocker.patch('mhtml.next_line')
mock_meth_get_boundary = mocker.patch('mhtml.get_boundary')
mock_meth_parse_parts = mocker.patch('mhtml.parse_parts')
# only header
mock_mhtarc_class.return_value = mocker.sentinel.mhtarc
mock_meth_parse_header.return_value = (mocker.sentinel.headers,
header_end_pos)
mock_meth_next_line.return_value = (b'\r\n', next_pos)
mock_meth_get_boundary.return_value = bndry
assert mhtml.parse_mhtml_struct(content, True) == mocker.sentinel.mhtarc
mock_meth_parse_header.assert_called_once_with(content, 0)
mock_meth_next_line.assert_called_once_with(content, header_end_pos)
mock_meth_get_boundary.assert_called_once_with(mocker.sentinel.headers)
mock_mhtarc_class.assert_called_once_with(content, mocker.sentinel.headers,
next_pos, bndry)
mock_meth_parse_parts.assert_not_called()
# no extra free line after header
mock_mhtarc_class.reset_mock()
mock_meth_parse_header.reset_mock()
mock_meth_next_line.reset_mock()
mock_meth_get_boundary.reset_mock()
mock_meth_next_line.return_value = (b'start of content or bndry', next_pos)
assert mhtml.parse_mhtml_struct(content, True) == mocker.sentinel.mhtarc
mock_meth_parse_header.assert_called_once_with(content, 0)
mock_meth_next_line.assert_called_once_with(content, header_end_pos)
mock_meth_get_boundary.assert_called_once_with(mocker.sentinel.headers)
mock_mhtarc_class.assert_called_once_with(content, mocker.sentinel.headers,
header_end_pos, bndry)
mock_meth_parse_parts.assert_not_called()
# no boundary
mock_mhtarc_class.reset_mock()
mock_meth_parse_header.reset_mock()
mock_meth_next_line.reset_mock()
mock_meth_get_boundary.reset_mock()
mock_meth_get_boundary.return_value = None
mock_meth_next_line.return_value = (b'\r\n', next_pos)
assert mhtml.parse_mhtml_struct(content, True) == mocker.sentinel.mhtarc
mock_meth_parse_header.assert_called_once_with(content, 0)
mock_meth_next_line.assert_called_once_with(content, header_end_pos)
mock_meth_get_boundary.assert_called_once_with(mocker.sentinel.headers)
mock_mhtarc_class.assert_called_once_with(content, mocker.sentinel.headers,
next_pos, None)
mock_meth_parse_parts.assert_not_called()
def test_parse_mhtml_struct_with_parts(mocker):
content = b'content'
bndry = '---bndry---'
header_end_pos = 6
next_pos = 55
parts = [(1, 2, 3, 4), (11, 22, 33, 44), (111, 222, 333, 444)] # dummies
mock_mhtarc_class = mocker.patch('mhtml.MHTMLArchive', spec=True)
mock_res_class = mocker.patch('mhtml.Resource', spec=True)
mock_meth_parse_header = mocker.patch('mhtml.parse_header')
mock_meth_next_line = mocker.patch('mhtml.next_line')
mock_meth_get_boundary = mocker.patch('mhtml.get_boundary')
mock_meth_parse_parts = mocker.patch('mhtml.parse_parts')
# only header
mock_mhtarc_class.return_value = mocker.sentinel.mhtarc
mock_meth_parse_header.return_value = (mocker.sentinel.headers,
header_end_pos)
mock_meth_set_res = mocker.Mock()
mocker.sentinel.mhtarc._set_resources = mock_meth_set_res
mock_meth_next_line.return_value = (b'\r\n', next_pos)
mock_meth_get_boundary.return_value = bndry
mock_meth_parse_parts.return_value = (parts, -1)
mock_res_class.side_effect = [mocker.sentinel.res1, mocker.sentinel.res2,
mocker.sentinel.res3]
assert mhtml.parse_mhtml_struct(content, False) == mocker.sentinel.mhtarc
mock_meth_parse_header.assert_called_once_with(content, 0)
mock_meth_next_line.assert_called_once_with(content, header_end_pos)
mock_meth_get_boundary.assert_called_once_with(mocker.sentinel.headers)
mock_mhtarc_class.assert_called_once_with(content, mocker.sentinel.headers,
next_pos, bndry)
mock_meth_parse_parts.assert_called_once_with(content, bndry, next_pos)
mock_meth_set_res.assert_called_once_with([mocker.sentinel.res1,
mocker.sentinel.res2,
mocker.sentinel.res3])
mock_res_class.assert_has_calls([
mocker.call(mocker.sentinel.mhtarc, 1, 2, 3, 4),
mocker.call(mocker.sentinel.mhtarc, 11, 22, 33, 44),
mocker.call(mocker.sentinel.mhtarc, 111, 222, 333, 444)])
# no end of parts parse
mock_res_class.reset_mock()
mock_meth_set_res.reset_mock()
mock_meth_parse_parts.return_value = (parts, 2)
with pytest.raises(AssertionError,
match='file should be completly parsed'):
mhtml.parse_mhtml_struct(content, False)
mock_res_class.assert_not_called()
mock_meth_set_res.assert_not_called()
def _get_open_ref(): # pragma: no cover
'''
see: https://github.com/andras-tim/octoconf/blob/master/tests/common.py
:rtype str
'''
# noqa: E501 pylint: disable=import-error,redefined-builtin,unused-import,unused-variable
try:
from builtins import open
return 'builtins.open'
except ImportError:
from __builtin__ import open # noqa: F401
return '__builtin__.open'
def test_MHTMLArchive_from_file(mocker): # noqa: N80
mock_open = mocker.mock_open(read_data=b'abc')
mocker.patch(_get_open_ref(), mock_open)
mock_parse = mocker.patch('mhtml.parse_mhtml_struct')
mhtml.MHTMLArchive_from_file('somefilename', only_header=True)
mock_open.assert_called_once_with('somefilename', 'rb')
mock_parse.assert_called_once_with(b'abc', only_header=True)
def test_MHTMLArchive_to_file(mocker): # noqa: N80
mock_open = mocker.mock_open()
mock_mhtarc = mocker.Mock()
mock_mhtarc.content = b'abc2'
mocker.patch(_get_open_ref(), mock_open)
mhtml.MHTMLArchive_to_file(mock_mhtarc, 'somefilename')
mock_open.assert_called_once_with('somefilename', 'wb')
mock_handle = mock_open()
mock_handle.write.assert_called_once_with(b'abc2')
| 40.199627 | 93 | 0.626955 |
7959e3ee79a5b53bed592c56126d39aab3130f92 | 2,083 | py | Python | sortingview/config/job_handler.py | garrettmflynn/sortingview | 0bb3df40d5d031ec651c4821f928787bbee71fbb | [
"Apache-2.0"
] | 2 | 2021-11-19T04:51:42.000Z | 2022-03-12T23:36:19.000Z | sortingview/config/job_handler.py | magland/sortingview | 0b1be9d55048cd4b8a0b6b6733bd7d35cb440aa7 | [
"Apache-2.0"
] | 172 | 2021-05-10T17:39:15.000Z | 2022-03-18T21:46:15.000Z | sortingview/config/job_handler.py | garrettmflynn/sortingview | 0bb3df40d5d031ec651c4821f928787bbee71fbb | [
"Apache-2.0"
] | 2 | 2021-08-29T20:13:57.000Z | 2022-03-12T23:36:34.000Z | import os
import hither2 as hi
import yaml
from copy import deepcopy
default_config_yaml = '''
job_handlers:
correlograms:
type: parallel
params:
num_workers: 4
timeseries:
type: parallel
params:
num_workers: 4
waveforms:
type: parallel
params:
num_workers: 4
clusters:
type: parallel
params:
num_workers: 4
metrics:
type: parallel
params:
num_workers: 4
misc:
type: parallel
params:
num_workers: 4
extract_snippets:
type: parallel
params:
num_workers: 4
'''
default_config = yaml.safe_load(default_config_yaml)
config = deepcopy(default_config)
job_handler_config_path = os.getenv('SORTINGVIEW_JOB_HANDLER_CONFIG', None)
if job_handler_config_path is not None:
print(f'Using job handler config file: {job_handler_config_path}')
with open(job_handler_config_path, 'r') as f:
config0 = yaml.safe_load(f)
config['job_handlers'].update(config0['job_handlers'])
else:
print('Using default job handler config. To override, set SORTINGVIEW_JOB_HANDLER_CONFIG to path of a yaml file.')
def _job_handler_from_config(x):
type0 = x['type']
params0 = x['params']
if type0 == 'parallel':
return hi.ParallelJobHandler(params0['num_workers'])
else:
raise Exception(f'Invalid type for job handler: {type0}')
print(yaml.safe_dump(config))
class job_handler:
correlograms = _job_handler_from_config(config['job_handlers']['correlograms'])
timeseries = _job_handler_from_config(config['job_handlers']['timeseries'])
waveforms = _job_handler_from_config(config['job_handlers']['waveforms'])
clusters = _job_handler_from_config(config['job_handlers']['clusters'])
metrics = _job_handler_from_config(config['job_handlers']['metrics'])
misc = _job_handler_from_config(config['job_handlers']['misc'])
extract_snippets = _job_handler_from_config(config['job_handlers']['extract_snippets']) | 31.089552 | 118 | 0.677868 |
7959e454f949137ed80aaf6199f4b81aab8235c8 | 1,069 | py | Python | fastNLP/modules/__init__.py | KuNyaa/fastNLP | 22f9b87c54a4eebec7352c7ff772cd24685c7186 | [
"Apache-2.0"
] | 1 | 2019-10-05T06:02:44.000Z | 2019-10-05T06:02:44.000Z | fastNLP/modules/__init__.py | awesomemachinelearning/fastNLP | 945b30bb6174751130744231aa26119bf9bb2601 | [
"Apache-2.0"
] | 1 | 2019-12-09T06:34:44.000Z | 2019-12-09T06:34:44.000Z | fastNLP/modules/__init__.py | awesomemachinelearning/fastNLP | 945b30bb6174751130744231aa26119bf9bb2601 | [
"Apache-2.0"
] | null | null | null | """
.. image:: figures/text_classification.png
大部分用于的 NLP 任务神经网络都可以看做由 :mod:`embedding<fastNLP.embeddings>` 、 :mod:`~fastNLP.modules.encoder` 、
:mod:`~fastNLP.modules.decoder` 三种模块组成。 本模块中实现了 fastNLP 提供的诸多模块组件,
可以帮助用户快速搭建自己所需的网络。几种模块的功能和常见组件如下:
.. csv-table::
:header: "类型", "功能", "常见组件"
"embedding", 参见 :doc:`/fastNLP.embeddings` , "Elmo, Bert"
"encoder", "将输入编码为具有表示能力的向量", "CNN, LSTM, Transformer"
"decoder", "将具有某种表示意义的向量解码为需要的输出形式 ", "MLP, CRF"
"其它", "配合其它组件使用的组件", "Dropout"
"""
__all__ = [
# "BertModel",
"ConvolutionCharEncoder",
"LSTMCharEncoder",
"ConvMaxpool",
"LSTM",
"StarTransformer",
"TransformerEncoder",
"VarRNN",
"VarLSTM",
"VarGRU",
"MaxPool",
"MaxPoolWithMask",
"AvgPool",
"AvgPoolWithMask",
"MultiHeadAttention",
"MLP",
"ConditionalRandomField",
"viterbi_decode",
"allowed_transitions",
"TimestepDropout",
]
from . import decoder
from . import encoder
from .decoder import *
from .dropout import TimestepDropout
from .encoder import *
| 18.754386 | 96 | 0.657624 |
7959e5159cd012ca1c61cd94a3cb357a67b1becb | 2,157 | py | Python | examples/linear_time_model.py | pupil-labs/pupil-invisible-lsl-relay | 68f31b5408479d5324e69063e67e517c6354b31d | [
"MIT"
] | null | null | null | examples/linear_time_model.py | pupil-labs/pupil-invisible-lsl-relay | 68f31b5408479d5324e69063e67e517c6354b31d | [
"MIT"
] | 2 | 2022-01-31T08:32:16.000Z | 2022-01-31T08:32:24.000Z | examples/linear_time_model.py | pupil-labs/pupil-invisible-lsl-relay | 68f31b5408479d5324e69063e67e517c6354b31d | [
"MIT"
] | null | null | null | # imports for the full pipeline
import numpy as np
import pandas as pd
import pyxdf
from sklearn import linear_model
# import xdf data
# define the name of the stream of interest
stream_name = 'pupil_invisible_Event'
# load xdf data
path_to_recording = './lsl_recordings/recorded_xdf_file.xdf'
data, header = pyxdf.load_xdf(path_to_recording, select_streams=[{'name': stream_name}])
# when recording from one device, there will be only one event stream
# extract this stream from the data
event_stream = data[0]
# extract event names and lsl time stamps into a pandas data frames
event_column_name = 'name'
event_column_timestamp = 'timestamp [s]'
lsl_event_data = pd.DataFrame(columns=[event_column_name, event_column_timestamp])
lsl_event_data[event_column_name] = [name[0] for name in event_stream['time_series']]
lsl_event_data[event_column_timestamp] = event_stream['time_stamps']
# import cloud data
path_to_cloud_events = './cloud_recordings/events.csv'
cloud_event_data = pd.read_csv(path_to_cloud_events)
# transform cloud timestamps to seconds
cloud_event_data[event_column_timestamp] = cloud_event_data['timestamp [ns]'] * 1e-9
# filter events that were recorded in the lsl stream and in cloud
name_intersection = np.intersect1d(
cloud_event_data[event_column_name], lsl_event_data[event_column_name]
)
# filter timestamps by the event intersection
filtered_cloud_event_data = cloud_event_data[
cloud_event_data[event_column_name].isin(name_intersection)
]
filtered_lsl_event_data = lsl_event_data[
lsl_event_data[event_column_name].isin(name_intersection)
]
# fit a linear model
time_mapper = linear_model.LinearRegression()
time_mapper.fit(
filtered_cloud_event_data[[event_column_timestamp]],
filtered_lsl_event_data[event_column_timestamp],
)
# use convert gaze time stamps from cloud to lsl time
cloud_gaze_data = pd.read_csv('./cloud_recordings/gaze.csv')
# map from nanoseconds to seconds
cloud_gaze_data[event_column_timestamp] = cloud_gaze_data['timestamp [ns]'] * 1e-9
# predict lsl time in seconds
cloud_gaze_data['lsl_time [s]'] = time_mapper.predict(
cloud_gaze_data[[event_column_timestamp]]
)
| 33.184615 | 88 | 0.802503 |
7959e669b8396eaf596d1b0acac84f517f98711f | 2,723 | py | Python | cctbx/covariance/tests/tst_covariance.py | dperl-sol/cctbx_project | b9e390221a2bc4fd00b9122e97c3b79c632c6664 | [
"BSD-3-Clause-LBNL"
] | 155 | 2016-11-23T12:52:16.000Z | 2022-03-31T15:35:44.000Z | cctbx/covariance/tests/tst_covariance.py | dperl-sol/cctbx_project | b9e390221a2bc4fd00b9122e97c3b79c632c6664 | [
"BSD-3-Clause-LBNL"
] | 590 | 2016-12-10T11:31:18.000Z | 2022-03-30T23:10:09.000Z | cctbx/covariance/tests/tst_covariance.py | dperl-sol/cctbx_project | b9e390221a2bc4fd00b9122e97c3b79c632c6664 | [
"BSD-3-Clause-LBNL"
] | 115 | 2016-11-15T08:17:28.000Z | 2022-02-09T15:30:14.000Z | from __future__ import absolute_import, division, print_function
from cctbx.array_family import flex
from cctbx import covariance, crystal, xray
from libtbx.test_utils import approx_equal, Exception_expected
from scitbx import matrix
from six.moves import range
def exercise_covariance():
xs = xray.structure(
crystal_symmetry=crystal.symmetry(
(5.01,5.01,5.47,90,90,120), "P6222"),
scatterers=flex.xray_scatterer([
xray.scatterer("Si", (1/2.,1/2.,1/3.)),
xray.scatterer("O", (0.197,-0.197,0.83333))]))
uc = xs.unit_cell()
flags = xs.scatterer_flags()
for f in flags:
f.set_grad_site(True)
xs.set_scatterer_flags(flags)
cov = flex.double((1e-8,1e-9,2e-9,3e-9,4e-9,5e-9,
2e-8,1e-9,2e-9,3e-9,4e-9,
3e-8,1e-9,2e-9,3e-9,
2e-8,1e-9,2e-9,
3e-8,1e-9,
4e-8))
param_map = xs.parameter_map()
assert approx_equal(cov,
covariance.extract_covariance_matrix_for_sites(flex.size_t([0,1]), cov, param_map))
cov_cart = covariance.orthogonalize_covariance_matrix(cov, uc, param_map)
O = matrix.sqr(uc.orthogonalization_matrix())
for i in range(param_map.n_scatterers):
cov_i = covariance.extract_covariance_matrix_for_sites(flex.size_t([i]), cov, param_map)
cov_i_cart = covariance.extract_covariance_matrix_for_sites(flex.size_t([i]), cov_cart, param_map)
assert approx_equal(
O * matrix.sym(sym_mat3=cov_i) * O.transpose(),
matrix.sym(sym_mat3=cov_i_cart).as_mat3())
for f in flags: f.set_grads(False)
flags[0].set_grad_u_aniso(True)
flags[0].set_use_u_aniso(True)
flags[1].set_grad_u_iso(True)
flags[1].set_use_u_iso(True)
xs.set_scatterer_flags(flags)
param_map = xs.parameter_map()
cov = flex.double(7*7, 0)
cov.reshape(flex.grid(7,7))
cov.matrix_diagonal_set_in_place(flex.double([i for i in range(7)]))
cov = cov.matrix_symmetric_as_packed_u()
assert approx_equal([i for i in range(6)],
covariance.extract_covariance_matrix_for_u_aniso(
0, cov, param_map).matrix_packed_u_diagonal())
assert covariance.variance_for_u_iso(1, cov, param_map) == 6
try: covariance.variance_for_u_iso(0, cov, param_map)
except RuntimeError: pass
else: raise Exception_expected
try: covariance.extract_covariance_matrix_for_u_aniso(1, cov, param_map)
except RuntimeError: pass
else: raise Exception_expected
approx_equal(covariance.extract_covariance_matrix_for_sites(
flex.size_t([1]), cov, param_map), (0,0,0,0,0,0))
def run():
exercise_covariance()
print("OK")
if __name__ == '__main__':
run()
| 40.641791 | 102 | 0.678663 |
7959e6c5b32d19b211aa33701f64566ebe4f615a | 9,732 | py | Python | highway_env/envs/common/action.py | boschresearch/highway-env | 19770b9e2a4a4e740b1aec6680d14d36fd4da3c2 | [
"MIT"
] | 2 | 2021-08-13T02:02:10.000Z | 2021-08-14T14:16:36.000Z | highway_env/envs/common/action.py | boschresearch/highway-env | 19770b9e2a4a4e740b1aec6680d14d36fd4da3c2 | [
"MIT"
] | null | null | null | highway_env/envs/common/action.py | boschresearch/highway-env | 19770b9e2a4a4e740b1aec6680d14d36fd4da3c2 | [
"MIT"
] | 1 | 2022-03-04T23:14:15.000Z | 2022-03-04T23:14:15.000Z | import functools
from itertools import product
from typing import TYPE_CHECKING, Optional, Union, Tuple, Callable
from gym import spaces
import numpy as np
from highway_env import utils
from highway_env.utils import Vector
from highway_env.vehicle.dynamics import BicycleVehicle
from highway_env.vehicle.kinematics import Vehicle
from highway_env.vehicle.controller import MDPVehicle
if TYPE_CHECKING:
from highway_env.envs.common.abstract import AbstractEnv
Action = Union[int, np.ndarray]
class ActionType(object):
"""A type of action specifies its definition space, and how actions are executed in the environment"""
def __init__(self, env: 'AbstractEnv', **kwargs) -> None:
self.env = env
self.__controlled_vehicle = None
def space(self) -> spaces.Space:
"""The action space."""
raise NotImplementedError
@property
def vehicle_class(self) -> Callable:
"""
The class of a vehicle able to execute the action.
Must return a subclass of :py:class:`highway_env.vehicle.kinematics.Vehicle`.
"""
raise NotImplementedError
def act(self, action: Action) -> None:
"""
Execute the action on the ego-vehicle.
Most of the action mechanics are actually implemented in vehicle.act(action), where
vehicle is an instance of the specified :py:class:`highway_env.envs.common.action.ActionType.vehicle_class`.
Must some pre-processing can be applied to the action based on the ActionType configurations.
:param action: the action to execute
"""
raise NotImplementedError
@property
def controlled_vehicle(self):
"""The vehicle acted upon.
If not set, the first controlled vehicle is used by default."""
return self.__controlled_vehicle or self.env.vehicle
@controlled_vehicle.setter
def controlled_vehicle(self, vehicle):
self.__controlled_vehicle = vehicle
class ContinuousAction(ActionType):
"""
An continuous action space for throttle and/or steering angle.
If both throttle and steering are enabled, they are set in this order: [throttle, steering]
The space intervals are always [-1, 1], but are mapped to throttle/steering intervals through configurations.
"""
ACCELERATION_RANGE = (-5, 5.0)
"""Acceleration range: [-x, x], in m/s²."""
STEERING_RANGE = (-np.pi / 4, np.pi / 4)
"""Steering angle range: [-x, x], in rad."""
def __init__(self,
env: 'AbstractEnv',
acceleration_range: Optional[Tuple[float, float]] = None,
steering_range: Optional[Tuple[float, float]] = None,
longitudinal: bool = True,
lateral: bool = True,
dynamical: bool = False,
clip: bool = True,
**kwargs) -> None:
"""
Create a continuous action space.
:param env: the environment
:param acceleration_range: the range of acceleration values [m/s²]
:param steering_range: the range of steering values [rad]
:param longitudinal: enable throttle control
:param lateral: enable steering control
:param dynamical: whether to simulate dynamics (i.e. friction) rather than kinematics
:param clip: clip action to the defined range
"""
super().__init__(env)
self.acceleration_range = acceleration_range if acceleration_range else self.ACCELERATION_RANGE
self.steering_range = steering_range if steering_range else self.STEERING_RANGE
self.lateral = lateral
self.longitudinal = longitudinal
if not self.lateral and not self.longitudinal:
raise ValueError("Either longitudinal and/or lateral control must be enabled")
self.dynamical = dynamical
self.clip = clip
self.size = 2 if self.lateral and self.longitudinal else 1
self.last_action = np.zeros(self.size)
def space(self) -> spaces.Box:
return spaces.Box(-1., 1., shape=(self.size,), dtype=np.float32)
@property
def vehicle_class(self) -> Callable:
return Vehicle if not self.dynamical else BicycleVehicle
def act(self, action: np.ndarray) -> None:
if self.clip:
action = np.clip(action, -1, 1)
if self.longitudinal and self.lateral:
self.controlled_vehicle.act({
"acceleration": utils.lmap(action[0], [-1, 1], self.acceleration_range),
"steering": utils.lmap(action[1], [-1, 1], self.steering_range),
})
elif self.longitudinal:
self.controlled_vehicle.act({
"acceleration": utils.lmap(action[0], [-1, 1], self.acceleration_range),
"steering": 0,
})
elif self.lateral:
self.controlled_vehicle.act({
"acceleration": 0,
"steering": utils.lmap(action[0], [-1, 1], self.steering_range)
})
self.last_action = action
class DiscreteAction(ContinuousAction):
def __init__(self,
env: 'AbstractEnv',
acceleration_range: Optional[Tuple[float, float]] = None,
steering_range: Optional[Tuple[float, float]] = None,
longitudinal: bool = True,
lateral: bool = True,
dynamical: bool = False,
clip: bool = True,
actions_per_axis: int = 3,
**kwargs) -> None:
super().__init__(env, acceleration_range=acceleration_range, steering_range=steering_range,
longitudinal=longitudinal, lateral=lateral, dynamical=dynamical, clip=clip)
self.actions_per_axis = actions_per_axis
def space(self) -> spaces.Discrete:
return spaces.Discrete(self.actions_per_axis**self.size)
def act(self, action: int) -> None:
cont_space = super().space()
axes = np.linspace(cont_space.low, cont_space.high, self.actions_per_axis)
all_actions = list(product(axes))
super().act(all_actions[action])
class DiscreteMetaAction(ActionType):
"""
An discrete action space of meta-actions: lane changes, and cruise control set-point.
"""
ACTIONS_ALL = {
0: 'LANE_LEFT',
1: 'IDLE',
2: 'LANE_RIGHT',
3: 'FASTER',
4: 'SLOWER'
}
"""A mapping of action indexes to labels."""
ACTIONS_LONGI = {
0: 'SLOWER',
1: 'IDLE',
2: 'FASTER'
}
"""A mapping of longitudinal action indexes to labels."""
ACTIONS_LAT = {
0: 'LANE_LEFT',
1: 'IDLE',
2: 'LANE_RIGHT'
}
"""A mapping of lateral action indexes to labels."""
def __init__(self,
env: 'AbstractEnv',
longitudinal: bool = True,
lateral: bool = True,
target_speeds: Optional[Vector] = None,
**kwargs) -> None:
"""
Create a discrete action space of meta-actions.
:param env: the environment
:param longitudinal: include longitudinal actions
:param lateral: include lateral actions
:param target_speeds: the list of speeds the vehicle is able to track
"""
super().__init__(env)
self.longitudinal = longitudinal
self.lateral = lateral
self.target_speeds = np.array(target_speeds) if target_speeds is not None else MDPVehicle.DEFAULT_TARGET_SPEEDS
self.actions = self.ACTIONS_ALL if longitudinal and lateral \
else self.ACTIONS_LONGI if longitudinal \
else self.ACTIONS_LAT if lateral \
else None
if self.actions is None:
raise ValueError("At least longitudinal or lateral actions must be included")
self.actions_indexes = {v: k for k, v in self.actions.items()}
def space(self) -> spaces.Space:
return spaces.Discrete(len(self.actions))
@property
def vehicle_class(self) -> Callable:
return functools.partial(MDPVehicle, target_speeds=self.target_speeds)
def act(self, action: int) -> None:
self.controlled_vehicle.act(self.actions[action])
class MultiAgentAction(ActionType):
def __init__(self,
env: 'AbstractEnv',
action_config: dict,
**kwargs) -> None:
super().__init__(env)
self.action_config = action_config
self.agents_action_types = []
for vehicle in self.env.controlled_vehicles:
action_type = action_factory(self.env, self.action_config)
action_type.controlled_vehicle = vehicle
self.agents_action_types.append(action_type)
def space(self) -> spaces.Space:
return spaces.Tuple([action_type.space() for action_type in self.agents_action_types])
@property
def vehicle_class(self) -> Callable:
return action_factory(self.env, self.action_config).vehicle_class
def act(self, action: Action) -> None:
assert isinstance(action, tuple)
for agent_action, action_type in zip(action, self.agents_action_types):
action_type.act(agent_action)
def action_factory(env: 'AbstractEnv', config: dict) -> ActionType:
if config["type"] == "ContinuousAction":
return ContinuousAction(env, **config)
if config["type"] == "DiscreteAction":
return DiscreteAction(env, **config)
elif config["type"] == "DiscreteMetaAction":
return DiscreteMetaAction(env, **config)
elif config["type"] == "MultiAgentAction":
return MultiAgentAction(env, **config)
else:
raise ValueError("Unknown action type")
| 36.313433 | 119 | 0.632552 |
7959e755f67aa8ba96d4b1eb23eaad82464b94bc | 3,334 | py | Python | smartpages/models.py | zemogle/astroEDU | 8d240ff35a288c9e920f6527f1cd3957d116e6ae | [
"MIT"
] | 1 | 2021-09-09T12:32:34.000Z | 2021-09-09T12:32:34.000Z | smartpages/models.py | zemogle/astroEDU | 8d240ff35a288c9e920f6527f1cd3957d116e6ae | [
"MIT"
] | 4 | 2021-09-09T19:53:18.000Z | 2021-09-24T09:11:26.000Z | smartpages/models.py | zemogle/astroEDU | 8d240ff35a288c9e920f6527f1cd3957d116e6ae | [
"MIT"
] | null | null | null | from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.urls import get_script_prefix, reverse
from django.utils.encoding import iri_to_uri
from parler.models import TranslatableModel, TranslatedFieldsModel
from parler.managers import TranslatableManager, TranslatableQuerySet
from activities.models import PublishingModel, PublishingManager
class SmartPageQuerySet(TranslatableQuerySet):
pass
class SmartPageManager(PublishingManager, TranslatableManager):
queryset_class = SmartPageQuerySet
class SmartPage(PublishingModel, TranslatableModel):
code = models.CharField(unique=True, max_length=100, blank=True, db_index=True, help_text='Internal code to identify the page; if set, do not modify. When in doubt, leave empty.')
# template_name = models.CharField(_('template name'), max_length=70, blank=True,
# help_text="Example: 'smartpages/contact_page.html'. If this isn't provided, the system will use 'smartpages/default.html'."
# ),
# )
registration_required = models.BooleanField(
_('registration required'),
help_text='If this is checked, only logged-in users will be able to view the page.',
default=False)
creation_date = models.DateTimeField(auto_now_add=True, null=True)
modification_date = models.DateTimeField(auto_now=True, null=True)
objects = SmartPageManager()
class Meta:
# ordering = ('translations__url',)
verbose_name = 'page'
def __str__(self):
return 'SmartPage: %s -- %s' % (self.url, self.title)
# def get_absolute_url(self):
# # Handle script prefix manually because we bypass reverse()
# return iri_to_uri(get_script_prefix().rstrip('/') + self.url)
def get_absolute_url(self):
return reverse('smartpage', kwargs={'url': self.url.lstrip('/'), })
class SmartPageTranslation(TranslatedFieldsModel):
master = models.ForeignKey(SmartPage, related_name='translations', null=True, on_delete=models.CASCADE)
url = models.CharField('URL', max_length=100, db_index=True, help_text='Example: "/about/contact/". Make sure to have leading and trailing slashes.')
title = models.CharField('title', max_length=200)
content = models.TextField('content', blank=True)
class Meta:
unique_together = (
('language_code', 'master'),
('language_code', 'url'),
)
verbose_name = 'page translation'
class SmartEmbed(TranslatableModel):
code = models.CharField(unique=True, max_length=100, blank=True, db_index=True, help_text='Internal code to identify the embed; if set, do not modify. When in doubt, leave empty.')
creation_date = models.DateTimeField(auto_now_add=True, null=True)
modification_date = models.DateTimeField(auto_now=True, null=True)
class Meta:
ordering = ('code',)
verbose_name = 'embed'
def __str__(self):
return "SmartEmbed: %s" % self.code
class SmartEmbedTranslation(TranslatedFieldsModel):
master = models.ForeignKey(SmartEmbed, related_name='translations', null=True, on_delete=models.CASCADE)
content = models.TextField('content', blank=True)
class Meta:
unique_together = (
('language_code', 'master'),
)
verbose_name = 'embed translation'
| 38.767442 | 184 | 0.711758 |
7959e7a854486794cb4679db562b765559722ab5 | 2,070 | py | Python | test/record/parser/test_response_whois_nic_am_status_registered.py | huyphan/pyyawhois | 77fb2f73a9c67989f1d41d98f37037406a69d136 | [
"MIT"
] | null | null | null | test/record/parser/test_response_whois_nic_am_status_registered.py | huyphan/pyyawhois | 77fb2f73a9c67989f1d41d98f37037406a69d136 | [
"MIT"
] | null | null | null | test/record/parser/test_response_whois_nic_am_status_registered.py | huyphan/pyyawhois | 77fb2f73a9c67989f1d41d98f37037406a69d136 | [
"MIT"
] | null | null | null |
# This file is autogenerated. Do not edit it manually.
# If you want change the content of this file, edit
#
# spec/fixtures/responses/whois.nic.am/status_registered
#
# and regenerate the tests with the following script
#
# $ scripts/generate_tests.py
#
from nose.tools import *
from dateutil.parser import parse as time_parse
import yawhois
class TestWhoisNicAmStatusRegistered(object):
def setUp(self):
fixture_path = "spec/fixtures/responses/whois.nic.am/status_registered.txt"
host = "whois.nic.am"
part = yawhois.record.Part(open(fixture_path, "r").read(), host)
self.record = yawhois.record.Record(None, [part])
def test_status(self):
eq_(self.record.status, 'registered')
def test_available(self):
eq_(self.record.available, False)
def test_nameservers(self):
eq_(self.record.nameservers.__class__.__name__, 'list')
eq_(len(self.record.nameservers), 4)
eq_(self.record.nameservers[0].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[0].name, "ns1.google.com")
eq_(self.record.nameservers[1].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[1].name, "ns2.google.com")
eq_(self.record.nameservers[2].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[2].name, "ns3.google.com")
eq_(self.record.nameservers[3].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[3].name, "ns4.google.com")
def test_registered(self):
eq_(self.record.registered, True)
def test_created_on(self):
eq_(self.record.created_on.__class__.__name__, 'datetime')
eq_(self.record.created_on, time_parse('1999-06-05'))
def test_updated_on(self):
eq_(self.record.updated_on.__class__.__name__, 'datetime')
eq_(self.record.updated_on, time_parse('2014-02-13'))
def test_expires_on(self):
eq_(self.record.expires_on.__class__.__name__, 'datetime')
eq_(self.record.expires_on, time_parse('2014-04-15'))
| 36.964286 | 83 | 0.688889 |
7959e88c765803b7c712b3a856cfe3c6ecfa0dd2 | 447 | py | Python | idz.py | Timofej8971/lab-9 | 68f41fca3f2750e76e1d25d9a2f08c80075964c5 | [
"MIT"
] | null | null | null | idz.py | Timofej8971/lab-9 | 68f41fca3f2750e76e1d25d9a2f08c80075964c5 | [
"MIT"
] | null | null | null | idz.py | Timofej8971/lab-9 | 68f41fca3f2750e76e1d25d9a2f08c80075964c5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Вариант 7
if __name__ == "__main__":
U = set("abcdefghijklmnopqrstuvwxyz")
A = {'b', 'f', 'g', 'm', 'o'}
B = {'b', 'g', 'h', 'l', 'u'}
C = {'e', 'f', 'm'}
D = {'e', 'g', 'l', 'p', 'q', 'u', 'v'}
X = (A.intersection(C)).union(B)
print(f'X= {X}')
BB = U.difference(B)
Y = (A.intersection(BB)).union(C.difference(D))
print(f'Y = {Y}')
| 21.285714 | 52 | 0.425056 |
7959e917eb323864b9df591c1d1ddf26a93f7b59 | 2,932 | py | Python | Lib/test/test_threadsignals.py | ystk/debian-python3.1 | 6241444a6994140621d1b143a2d6b311b184366a | [
"PSF-2.0"
] | 1 | 2020-11-26T18:53:46.000Z | 2020-11-26T18:53:46.000Z | Lib/test/test_threadsignals.py | ystk/debian-python3.1 | 6241444a6994140621d1b143a2d6b311b184366a | [
"PSF-2.0"
] | null | null | null | Lib/test/test_threadsignals.py | ystk/debian-python3.1 | 6241444a6994140621d1b143a2d6b311b184366a | [
"PSF-2.0"
] | 2 | 2018-08-06T04:37:38.000Z | 2022-02-27T18:07:12.000Z | """PyUnit testing that threads honor our signal semantics"""
import unittest
import _thread as thread
import signal
import os
import sys
from test.support import run_unittest
if sys.platform[:3] in ('win', 'os2') or sys.platform=='riscos':
raise unittest.SkipTest("Can't test signal on %s" % sys.platform)
process_pid = os.getpid()
signalled_all=thread.allocate_lock()
def registerSignals(for_usr1, for_usr2, for_alrm):
usr1 = signal.signal(signal.SIGUSR1, for_usr1)
usr2 = signal.signal(signal.SIGUSR2, for_usr2)
alrm = signal.signal(signal.SIGALRM, for_alrm)
return usr1, usr2, alrm
# The signal handler. Just note that the signal occurred and
# from who.
def handle_signals(sig,frame):
signal_blackboard[sig]['tripped'] += 1
signal_blackboard[sig]['tripped_by'] = thread.get_ident()
# a function that will be spawned as a separate thread.
def send_signals():
os.kill(process_pid, signal.SIGUSR1)
os.kill(process_pid, signal.SIGUSR2)
signalled_all.release()
class ThreadSignals(unittest.TestCase):
"""Test signal handling semantics of threads.
We spawn a thread, have the thread send two signals, and
wait for it to finish. Check that we got both signals
and that they were run by the main thread.
"""
def test_signals(self):
signalled_all.acquire()
self.spawnSignallingThread()
signalled_all.acquire()
# the signals that we asked the kernel to send
# will come back, but we don't know when.
# (it might even be after the thread exits
# and might be out of order.) If we haven't seen
# the signals yet, send yet another signal and
# wait for it return.
if signal_blackboard[signal.SIGUSR1]['tripped'] == 0 \
or signal_blackboard[signal.SIGUSR2]['tripped'] == 0:
signal.alarm(1)
signal.pause()
signal.alarm(0)
self.assertEqual( signal_blackboard[signal.SIGUSR1]['tripped'], 1)
self.assertEqual( signal_blackboard[signal.SIGUSR1]['tripped_by'],
thread.get_ident())
self.assertEqual( signal_blackboard[signal.SIGUSR2]['tripped'], 1)
self.assertEqual( signal_blackboard[signal.SIGUSR2]['tripped_by'],
thread.get_ident())
signalled_all.release()
def spawnSignallingThread(self):
thread.start_new_thread(send_signals, ())
def test_main():
global signal_blackboard
signal_blackboard = { signal.SIGUSR1 : {'tripped': 0, 'tripped_by': 0 },
signal.SIGUSR2 : {'tripped': 0, 'tripped_by': 0 },
signal.SIGALRM : {'tripped': 0, 'tripped_by': 0 } }
oldsigs = registerSignals(handle_signals, handle_signals, handle_signals)
try:
run_unittest(ThreadSignals)
finally:
registerSignals(*oldsigs)
if __name__ == '__main__':
test_main()
| 34.494118 | 77 | 0.662005 |
7959eabd6f3a5c48a968d152be80784f00175f91 | 1,271 | py | Python | interpro7dw/interpro/oracle/taxa.py | matthiasblum/i7dw | b40e5b9984dec2895956828ddf9db8af4a8ec932 | [
"Apache-2.0"
] | null | null | null | interpro7dw/interpro/oracle/taxa.py | matthiasblum/i7dw | b40e5b9984dec2895956828ddf9db8af4a8ec932 | [
"Apache-2.0"
] | null | null | null | interpro7dw/interpro/oracle/taxa.py | matthiasblum/i7dw | b40e5b9984dec2895956828ddf9db8af4a8ec932 | [
"Apache-2.0"
] | null | null | null | import pickle
import cx_Oracle
def export_taxa(url: str, file: str):
con = cx_Oracle.connect(url)
cur = con.cursor()
cur.execute(
"""
SELECT TO_CHAR(TAX_ID), TO_CHAR(PARENT_ID), SCIENTIFIC_NAME,
FULL_NAME, RANK
FROM INTERPRO.ETAXI
"""
)
taxa = {}
for row in cur:
taxon_id = row[0]
taxa[taxon_id] = {
"id": taxon_id,
"parent": row[1],
"sci_name": row[2],
"full_name": row[3],
"rank": row[4],
"children": set(),
"lineage": [taxon_id]
}
cur.close()
con.close()
for taxon_id, taxon in taxa.items():
node_id = taxon_id
parent_id = taxon["parent"]
# Traverse lineage from child to parent
while parent_id is not None:
taxon["lineage"].append(parent_id)
taxa[parent_id]["children"].add(node_id)
# We move to the parent
node_id = parent_id
parent_id = taxa[parent_id]["parent"]
for info in taxa.values():
info["children"] = list(info["children"])
info["lineage"] = list(map(str, reversed(info["lineage"])))
with open(file, "wb") as fh:
pickle.dump(taxa, fh)
| 23.981132 | 69 | 0.526357 |
7959eb73d28985defeb9d8e3496000e24ea8b7f0 | 767 | py | Python | examples/example_02.py | cfangmeier/matplotboard | a6e2c638f611c0cc9206ac18dd0ea827f367dc3a | [
"MIT"
] | 3 | 2019-05-12T04:04:39.000Z | 2020-12-15T20:25:15.000Z | examples/example_02.py | cfangmeier/matplotboard | a6e2c638f611c0cc9206ac18dd0ea827f367dc3a | [
"MIT"
] | 1 | 2018-08-15T20:18:45.000Z | 2018-08-15T20:18:45.000Z | examples/example_02.py | cfangmeier/matplotboard | a6e2c638f611c0cc9206ac18dd0ea827f367dc3a | [
"MIT"
] | null | null | null | from itertools import product
import numpy as np
import matplotlib.pyplot as plt
import matplotboard as mpb
@mpb.decl_fig
def cool_fig(func, scale, color="b"):
xs = np.linspace(-scale, scale, 100)
f = {
"sin": lambda xs: np.sin(xs),
"tan": lambda xs: np.tan(xs),
"exp": lambda xs: np.exp(xs),
}[func]
ys = f(xs)
plt.plot(xs, ys, color=color)
if __name__ == "__main__":
mpb.configure(multiprocess=True)
figures = {}
for color, function, scale in product(
"rbgk", ["sin", "tan", "exp"], np.linspace(1, 20, 20)
):
figures[f"{function}_{color}_{int(scale)}"] = cool_fig(
function, scale, color=color
)
mpb.render(figures)
mpb.generate_report(figures, "Report")
| 23.96875 | 63 | 0.597132 |
7959ebde8cebee6dc0c9d674c8896eb1850a2bda | 3,678 | py | Python | core/hash/hash.py | awesome-archive/OAG | 551a237e8aa1fd6642b6c89f0fdb545104c09712 | [
"MIT"
] | 50 | 2019-08-02T05:46:55.000Z | 2022-03-28T02:01:52.000Z | core/hash/hash.py | awesome-archive/OAG | 551a237e8aa1fd6642b6c89f0fdb545104c09712 | [
"MIT"
] | 1 | 2019-08-14T07:51:49.000Z | 2019-08-16T07:22:24.000Z | core/hash/hash.py | awesome-archive/OAG | 551a237e8aa1fd6642b6c89f0fdb545104c09712 | [
"MIT"
] | 15 | 2019-07-30T07:32:58.000Z | 2022-01-09T13:28:29.000Z | from os.path import join
import os
import numpy as np
import time
from collections import defaultdict as dd
from core.hash.title2vec import Title2Vec
from core.utils import feature_utils
from core.utils import data_utils
from core.utils import eval_utils
from core.utils import settings
import logging
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s') # include timestamp
class HashMatch:
title_bit = 128
title2vec_model = Title2Vec()
vector_dim = title2vec_model.dim
proj = None
def prepare_LSH_projection_matrix(self):
proj = np.random.normal(size=(self.vector_dim, self.title_bit))
fname = 'LSH_proj_matrix.pkl'
data_utils.dump_large_obj(proj, settings.PAPER_DATA_DIR, fname)
self.proj = proj
def load_LSH_projection_matrix(self):
fname = 'LSH_proj_matrix.pkl'
proj = data_utils.load_large_obj(settings.PAPER_DATA_DIR, fname)
self.proj = proj
def title_vectors_to_binary_codes_LSH(self, vectors, proj):
proj_vectors = np.dot(vectors, proj)
B = np.zeros(proj_vectors.shape, dtype=np.bool_)
B = np.where(proj_vectors >= 0, B, 1)
return B
def two_domain_title_vectors_to_binary_codes(self):
src_vectors, dst_vectors = self.title2vec_model.prepare_paper_title_to_vectors()
if self.proj is None:
self.load_LSH_projection_matrix()
src_binary_codes = self.title_vectors_to_binary_codes_LSH(src_vectors, self.proj)
dst_binary_codes = self.title_vectors_to_binary_codes_LSH(dst_vectors, self.proj)
return src_binary_codes, dst_binary_codes
def dump_dst_hash_tables(self):
src_binary_codes_test, dst_binary_codes = self.two_domain_title_vectors_to_binary_codes()
hash_to_dst_idx = dd(list)
cpapers_train = data_utils.load_json_lines(settings.PAPER_DATA_DIR, 'clean-papers-train.dat')
cpapers_test = data_utils.load_json_lines(settings.PAPER_DATA_DIR, 'clean-papers-test.dat')
cpapers = cpapers_train + cpapers_test
for i, h in enumerate(dst_binary_codes):
h = feature_utils.encode_binary_codes(h)
hash_to_dst_idx[h].append(str(cpapers[i]['id']))
data_utils.dump_json(hash_to_dst_idx, settings.OUT_PAPER_DIR, 'hash_to_dst_paper_id.json')
def eval_hash_table(self):
start_test_time = time.time()
src_binary_codes_test, dst_binary_codes = self.two_domain_title_vectors_to_binary_codes()
npapers_test = data_utils.load_json_lines(settings.PAPER_DATA_DIR, 'noisy-papers-test.dat')
labels = [str(item['id']) for item in npapers_test]
hash_to_dst_idx = data_utils.load_json(settings.OUT_PAPER_DIR, 'hash_to_dst_paper_id.json')
preds = []
before_loop_time = time.time()
for i, h in enumerate(src_binary_codes_test):
h = feature_utils.encode_binary_codes(h)
if h in hash_to_dst_idx and len(hash_to_dst_idx[h]) == 1:
preds.append(hash_to_dst_idx[h][0])
else:
preds.append(None)
end_time = time.time()
pred_time = end_time - before_loop_time
test_time = end_time - start_test_time
r = eval_utils.eval_prec_rec_f1_ir(preds, labels)
logger.info('eval results: Prec. %.4f, Rec. %.4f, F1. %.4f', r[0], r[1], r[2])
logger.info('test time %.2fs, predict time %.2fs', test_time, pred_time)
if __name__ == '__main__':
hash_match = HashMatch()
hash_match.prepare_LSH_projection_matrix()
hash_match.dump_dst_hash_tables()
hash_match.eval_hash_table()
logger.info('done')
| 41.325843 | 101 | 0.706906 |
7959ecdd6dd69031a04f935d7b85e2434f6d5bb4 | 768 | py | Python | flaskrestful/FlaskProject/migrations/versions/8a3c7a3d27e9_.py | riverstation/project-all | c56f1879e1303d561e95a3ff3a70f94fb5fa2191 | [
"Apache-2.0"
] | null | null | null | flaskrestful/FlaskProject/migrations/versions/8a3c7a3d27e9_.py | riverstation/project-all | c56f1879e1303d561e95a3ff3a70f94fb5fa2191 | [
"Apache-2.0"
] | null | null | null | flaskrestful/FlaskProject/migrations/versions/8a3c7a3d27e9_.py | riverstation/project-all | c56f1879e1303d561e95a3ff3a70f94fb5fa2191 | [
"Apache-2.0"
] | null | null | null | """empty message
Revision ID: 8a3c7a3d27e9
Revises: 5f2c5108ef26
Create Date: 2018-08-14 15:36:10.352682
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '8a3c7a3d27e9'
down_revision = '5f2c5108ef26'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('movie',
sa.Column('m_id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('m_name', sa.String(length=16), nullable=True),
sa.PrimaryKeyConstraint('m_id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('movie')
# ### end Alembic commands ###
| 23.272727 | 72 | 0.686198 |
7959ed24d2cc11b25f3b89bc7f2fd3e458eb264b | 1,696 | py | Python | mutation.py | DilipIITBHU/MLST-IMPLEMENTATION | 6ecfaab85f954171fc5aa9694a511a9e44a4ffa8 | [
"MIT"
] | 1 | 2020-02-26T17:28:37.000Z | 2020-02-26T17:28:37.000Z | mutation.py | DilipIITBHU/MLST-IMPLEMENTATION | 6ecfaab85f954171fc5aa9694a511a9e44a4ffa8 | [
"MIT"
] | null | null | null | mutation.py | DilipIITBHU/MLST-IMPLEMENTATION | 6ecfaab85f954171fc5aa9694a511a9e44a4ffa8 | [
"MIT"
] | 1 | 2020-02-26T17:29:00.000Z | 2020-02-26T17:29:00.000Z | from encode_decode import encode,decode
from freq_element import *
from collections import defaultdict
from check_spanning import *
"""graph = {
'a': {'b': 1, 'e': 4,'c':2},
'b': {'a':1,'c': 3, 'd':3, 'e':4},
'c': {'a':2,'d':1,'b':3},
'd': {'b':3,'e':1, 'c':1},
'e': {'d':1,'a':4,'b':4}
}"""
def _mutation(s):
n = len(s)
if s[0]=='0':
return '1'+s[1:]
for i in range(0,n-1):
if s[i]=='0':
s = s[:i]+'1'+s[i+1:]
return s
if s[n-1]=='0':
return s[:n-1]+'1'
return s
def sortSecond(val):
return val[1]
def mutation_sorting(s,graph):
#print("s is {}".format(s))
l=decode(s)
#print("labels is {}".format(l))
#print(graph)
z =create_subset_graph(graph,l)
#print("z {}".format(z))
l1=freq_element(z)
#print("l1 value{}".format(l1))
ans=[]
for i in l:
ans.append((i,l1[i-1]))
# return ans
ans.sort(key=sortSecond,reverse=True)
l2=[]
for i in ans:
l2.append(i[0])
return l2
def label_remove(graph,label):
count = len(label)
x=0
while x<count:
q = label.pop()
if is_spanning_tree(graph,label)==True:
continue
else:
label.insert(0,q)
x+=1
return label
#print(mutation('1010'))
#print(mutation_sorting('1010',graph))
#print(label_remove(graph,mutation_sorting('1010',graph)))
def mutation(graph,s):
s = _mutation(s)
#print("_mutation {}".format(s))
#print("mutation sorting called")
x = mutation_sorting(s,graph)
#print("till")
#print("x = {}".format(x))
return label_remove(graph,x)
| 22.918919 | 58 | 0.520047 |
7959ed7847ba0776dea1f4cd61d7c37cd15cc066 | 12,265 | py | Python | nnunet/preprocessing/sanity_checks.py | zyw19980410/avm | a00e9ac09a5ca394eb18b4f55fc9adeeb2c0f1ec | [
"Apache-2.0"
] | null | null | null | nnunet/preprocessing/sanity_checks.py | zyw19980410/avm | a00e9ac09a5ca394eb18b4f55fc9adeeb2c0f1ec | [
"Apache-2.0"
] | null | null | null | nnunet/preprocessing/sanity_checks.py | zyw19980410/avm | a00e9ac09a5ca394eb18b4f55fc9adeeb2c0f1ec | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from multiprocessing import Pool
import SimpleITK as sitk
import nibabel as nib
import numpy as np
from batchgenerators.utilities.file_and_folder_operations import *
from nnunet.configuration import default_num_threads
def verify_all_same_orientation(folder):
"""
This should run after cropping
:param folder:
:return:
"""
nii_files = subfiles(folder, suffix=".nii.gz", join=True)
orientations = []
for n in nii_files:
img = nib.load(n)
affine = img.affine
orientation = nib.aff2axcodes(affine)
orientations.append(orientation)
# now we need to check whether they are all the same
orientations = np.array(orientations)
unique_orientations = np.unique(orientations, axis=0)
all_same = len(unique_orientations) == 1
return all_same, unique_orientations
def verify_same_geometry(img_1: sitk.Image, img_2: sitk.Image):
ori1, spacing1, direction1, size1 = img_1.GetOrigin(), img_1.GetSpacing(), img_1.GetDirection(), img_1.GetSize()
ori2, spacing2, direction2, size2 = img_2.GetOrigin(), img_2.GetSpacing(), img_2.GetDirection(), img_2.GetSize()
same_ori = np.all(np.isclose(ori1, ori2))
if not same_ori:
print("the origin does not match between the images:")
print(ori1)
print(ori2)
same_spac = np.all(np.isclose(spacing1, spacing2))
if not same_spac:
print("the spacing does not match between the images")
print(spacing1)
print(spacing2)
same_dir = np.all(np.isclose(direction1, direction2))
if not same_dir:
print("the direction does not match between the images")
print(direction1)
print(direction2)
same_size = np.all(np.isclose(size1, size2))
if not same_size:
print("the size does not match between the images")
print(size1)
print(size2)
if same_ori and same_spac and same_dir and same_size:
return True
else:
return False
def verify_contains_only_expected_labels(itk_img: str, valid_labels: (tuple, list)):
img_npy = sitk.GetArrayFromImage(sitk.ReadImage(itk_img))
uniques = np.unique(img_npy)
invalid_uniques = [i for i in uniques if i not in valid_labels]
if len(invalid_uniques) == 0:
r = True
else:
r = False
return r, invalid_uniques
def verify_dataset_integrity(folder):
"""
folder needs the imagesTr, imagesTs and labelsTr subfolders. There also needs to be a dataset.json
checks if all training cases and labels are present
checks if all test cases (if any) are present
for each case, checks whether all modalities apre present
for each case, checks whether the pixel grids are aligned
checks whether the labels really only contain values they should
:param folder:
:return:
"""
assert isfile(join(folder, "dataset.json")), "There needs to be a dataset.json file in folder, folder=%s" % folder
assert isdir(join(folder, "imagesTr")), "There needs to be a imagesTr subfolder in folder, folder=%s" % folder
assert isdir(join(folder, "labelsTr")), "There needs to be a labelsTr subfolder in folder, folder=%s" % folder
dataset = load_json(join(folder, "dataset.json"))
training_cases = dataset['training']
num_modalities = len(dataset['modality'].keys())
test_cases = dataset['test']
expected_train_identifiers = [i['image'].split("/")[-1][:-7] for i in training_cases]
expected_test_identifiers = [i.split("/")[-1][:-7] for i in test_cases]
## check training set
nii_files_in_imagesTr = subfiles((join(folder, "imagesTr")), suffix=".nii.gz", join=False)
nii_files_in_labelsTr = subfiles((join(folder, "labelsTr")), suffix=".nii.gz", join=False)
label_files = []
geometries_OK = True
has_nan = False
# check all cases
if len(expected_train_identifiers) != len(np.unique(expected_train_identifiers)): raise RuntimeError("found duplicate training cases in dataset.json")
print("Verifying training set")
for c in expected_train_identifiers:
print("checking case", c)
# check if all files are present
expected_label_file = join(folder, "labelsTr", c + ".nii.gz")
label_files.append(expected_label_file)
expected_image_files = [join(folder, "imagesTr", c + "_%04.0d.nii.gz" % i) for i in range(num_modalities)]
assert isfile(expected_label_file), "could not find label file for case %s. Expected file: \n%s" % (
c, expected_label_file)
assert all([isfile(i) for i in
expected_image_files]), "some image files are missing for case %s. Expected files:\n %s" % (
c, expected_image_files)
# verify that all modalities and the label have the same shape and geometry.
label_itk = sitk.ReadImage(expected_label_file)
nans_in_seg = np.any(np.isnan(sitk.GetArrayFromImage(label_itk)))
has_nan = has_nan | nans_in_seg
if nans_in_seg:
print("There are NAN values in segmentation %s" % expected_label_file)
images_itk = [sitk.ReadImage(i) for i in expected_image_files]
for i, img in enumerate(images_itk):
nans_in_image = np.any(np.isnan(sitk.GetArrayFromImage(img)))
has_nan = has_nan | nans_in_image
same_geometry = verify_same_geometry(img, label_itk)
if not same_geometry:
geometries_OK = False
print("The geometry of the image %s does not match the geometry of the label file. The pixel arrays "
"will not be aligned and nnU-Net cannot use this data. Please make sure your image modalities "
"are coregistered and have the same geometry as the label" % expected_image_files[0][:-12])
if nans_in_image:
print("There are NAN values in image %s" % expected_image_files[i])
# now remove checked files from the lists nii_files_in_imagesTr and nii_files_in_labelsTr
for i in expected_image_files:
print(i)
nii_files_in_imagesTr.remove(os.path.basename(i))
nii_files_in_labelsTr.remove(os.path.basename(expected_label_file))
print(len(nii_files_in_imagesTr),len(nii_files_in_labelsTr))
# check for stragglers
assert len(
nii_files_in_imagesTr) == 0, "there are training cases in imagesTr that are not listed in dataset.json: %s" % nii_files_in_imagesTr
assert len(
nii_files_in_labelsTr) == 0, "there are training cases in labelsTr that are not listed in dataset.json: %s" % nii_files_in_labelsTr
# verify that only properly declared values are present in the labels
print("Verifying label values")
expected_labels = list(int(i) for i in dataset['labels'].keys())
# check if labels are in consecutive order
assert expected_labels[0] == 0, 'The first label must be 0 and maps to the background'
labels_valid_consecutive = np.ediff1d(expected_labels) == 1
assert all(labels_valid_consecutive), f'Labels must be in consecutive order (0, 1, 2, ...). The labels {np.array(expected_labels)[1:][~labels_valid_consecutive]} do not satisfy this restriction'
p = Pool(default_num_threads)
results = p.starmap(verify_contains_only_expected_labels, zip(label_files, [expected_labels] * len(label_files)))
p.close()
p.join()
fail = False
print("Expected label values are", expected_labels)
for i, r in enumerate(results):
if not r[0]:
print("Unexpected labels found in file %s. Found these unexpected values (they should not be there) %s" % (
label_files[i], r[1]))
fail = True
if fail:
raise AssertionError(
"Found unexpected labels in the training dataset. Please correct that or adjust your dataset.json accordingly")
else:
print("Labels OK")
# check test set, but only if there actually is a test set
if len(expected_test_identifiers) > 0:
print("Verifying test set")
nii_files_in_imagesTs = subfiles((join(folder, "imagesTs")), suffix=".nii.gz", join=False)
for c in expected_test_identifiers:
# check if all files are present
expected_image_files = [join(folder, "imagesTs", c + "_%04.0d.nii.gz" % i) for i in range(num_modalities)]
assert all([isfile(i) for i in
expected_image_files]), "some image files are missing for case %s. Expected files:\n %s" % (
c, expected_image_files)
# verify that all modalities and the label have the same geometry. We use the affine for this
if num_modalities > 1:
images_itk = [sitk.ReadImage(i) for i in expected_image_files]
reference_img = images_itk[0]
for i, img in enumerate(images_itk[1:]):
assert verify_same_geometry(img, reference_img), "The modalities of the image %s do not seem to be " \
"registered. Please coregister your modalities." % (
expected_image_files[i])
# now remove checked files from the lists nii_files_in_imagesTr and nii_files_in_labelsTr
for i in expected_image_files:
nii_files_in_imagesTs.remove(os.path.basename(i))
assert len(
nii_files_in_imagesTs) == 0, "there are training cases in imagesTs that are not listed in dataset.json: %s" % nii_files_in_imagesTr
all_same, unique_orientations = verify_all_same_orientation(join(folder, "imagesTr"))
if not all_same:
print(
"WARNING: Not all images in the dataset have the same axis ordering. We very strongly recommend you correct that by reorienting the data. fslreorient2std should do the trick")
# save unique orientations to dataset.json
if not geometries_OK:
raise Warning("GEOMETRY MISMATCH FOUND! CHECK THE TEXT OUTPUT! This does not cause an error at this point but you should definitely check whether your geometries are alright!")
else:
print("Dataset OK")
if has_nan:
raise RuntimeError("Some images have nan values in them. This will break the training. See text output above to see which ones")
def reorient_to_RAS(img_fname: str, output_fname: str = None):
img = nib.load(img_fname)
canonical_img = nib.as_closest_canonical(img)
if output_fname is None:
output_fname = img_fname
nib.save(canonical_img, output_fname)
if __name__ == "__main__":
# investigate geometry issues
import SimpleITK as sitk
# load image
gt_itk = sitk.ReadImage(
"/media/fabian/Results/nnUNet/3d_fullres/Task064_KiTS_labelsFixed/nnUNetTrainerV2__nnUNetPlansv2.1/gt_niftis/case_00085.nii.gz")
# get numpy array
pred_npy = sitk.GetArrayFromImage(gt_itk)
# create new image from numpy array
prek_itk_new = sitk.GetImageFromArray(pred_npy)
# copy geometry
prek_itk_new.CopyInformation(gt_itk)
# prek_itk_new = copy_geometry(prek_itk_new, gt_itk)
# save
sitk.WriteImage(prek_itk_new, "test.mnc")
# load images in nib
gt = nib.load(
"/media/fabian/Results/nnUNet/3d_fullres/Task064_KiTS_labelsFixed/nnUNetTrainerV2__nnUNetPlansv2.1/gt_niftis/case_00085.nii.gz")
pred_nib = nib.load("test.mnc")
new_img_sitk = sitk.ReadImage("test.mnc")
np1 = sitk.GetArrayFromImage(gt_itk)
np2 = sitk.GetArrayFromImage(prek_itk_new)
| 44.277978 | 198 | 0.678924 |
7959ede9bd294129626eb2c1d5eebe14afd9e379 | 855 | py | Python | node_launcher/node_set/tor/tor_node.py | ryan-lingle/node-launcher | 4f1f7087a28d76f5b8153adac548d09b0558f6d5 | [
"MIT"
] | null | null | null | node_launcher/node_set/tor/tor_node.py | ryan-lingle/node-launcher | 4f1f7087a28d76f5b8153adac548d09b0558f6d5 | [
"MIT"
] | null | null | null | node_launcher/node_set/tor/tor_node.py | ryan-lingle/node-launcher | 4f1f7087a28d76f5b8153adac548d09b0558f6d5 | [
"MIT"
] | null | null | null | from node_launcher.node_set.lib.managed_process import ManagedProcess
from node_launcher.node_set.lib.network_node import NetworkNode
from node_launcher.node_set.lib.node_status import NodeStatus
from .tor_configuration import TorConfiguration
from .tor_software import TorSoftware
class TorNode(NetworkNode):
software: TorSoftware
configuration: TorConfiguration
process: ManagedProcess
def __init__(self):
super().__init__(
network='tor',
Software=TorSoftware,
Configuration=TorConfiguration,
Process=ManagedProcess
)
def handle_log_line(self, log_line: str):
if 'Bootstrapped 0%: Starting' in log_line:
self.update_status(NodeStatus.SYNCING)
elif 'Bootstrapped 100%: Done' in log_line:
self.update_status(NodeStatus.SYNCED)
| 32.884615 | 69 | 0.720468 |
7959ee7071f93dad476f1add115c1bf827ed4961 | 13,014 | py | Python | elmclient/_typesystem.py | IBM/ELM-Python-Client | cd61ae6a253cc7ebffcfce78c9c6d67c93864ac6 | [
"MIT"
] | 10 | 2021-10-21T12:23:41.000Z | 2022-03-30T22:43:30.000Z | elmclient/_typesystem.py | IBM/ELM-Python-Client | cd61ae6a253cc7ebffcfce78c9c6d67c93864ac6 | [
"MIT"
] | 6 | 2021-11-16T10:37:23.000Z | 2022-03-14T11:38:46.000Z | elmclient/_typesystem.py | IBM/ELM-Python-Client | cd61ae6a253cc7ebffcfce78c9c6d67c93864ac6 | [
"MIT"
] | 4 | 2021-11-15T23:40:46.000Z | 2022-03-28T19:41:51.000Z | ##
## © Copyright 2021- IBM Inc. All rights reserved
# SPDX-License-Identifier: MIT
##
import logging
from . import rdfxml
from . import utils
logger = logging.getLogger(__name__)
#################################################################################################
class No_Type_System_Mixin():
def __init__(self,*args,**kwargs):
self.has_typesystem=False
class Type_System_Mixin():
def __init__(self,*args,**kwargs):
self.typesystem_loaded = False
self.has_typesystem=True
self.clear_typesystem()
def clear_typesystem(self):
self.shapes = {}
self.properties = {}
self.enums = {}
self.values = {}
self.typesystem_loaded = False
self._gettypecache = {}
def textreport(self):
def quote(s):
if " " in s:
return f"'{s}'"
else:
return s
rows = []
report = ""
def addtoreport(s, end='\n'):
nonlocal report
report += s + end
reportedproperties = []
# print a nicely sorted report with shapes at left, then properties (with type, if defined) in that shape, then enumerations in that property
for shapeuri in sorted(self.shapes.keys(),key=lambda k: self.shapes[k]['name'].lower()):
rows.append( [f"{quote(self.shapes[shapeuri]['name']):25}"] )
for propertyuri in sorted(self.shapes[shapeuri]['properties'], key=lambda k: self.properties[k]['name'].lower()):
reportedproperties.append(propertyuri)
rows.append( [ "",f"{quote(self.properties[propertyuri]['name'])}"] )
if self.properties[propertyuri]['altname'] is not None:
rows[-1].append( f"{self.properties[propertyuri]['altname']}" )
else:
rows[-1].append("")
rows[-1].append( f"{rdfxml.uri_to_default_prefixed_tag(propertyuri)}" )
if self.properties[propertyuri].get('value_type'):
rows[-1].append( f"{self.properties[propertyuri]['value_type']}" )
else:
rows[-1].append( "" )
newrowlen = len(rows[-1])-3
# add enums as additional rows
for enum_uri in sorted(self.properties[propertyuri]['enums'],key=lambda k:self.enums[k]['name'].lower()):
eid = self.enums[enum_uri].get('id') or enum_uri
rows.append( [""]*newrowlen+[f"{quote(self.enums[enum_uri]['name'])}",eid,enum_uri ] )
logger.info( f"appended for enum {rows[-1]}" )
if len(rows)>0:
addtoreport( "<h2>Shapes<h2>\n" )
report += utils.print_in_html( rows,['Shape','Property Name','Property label','URI'] )
# now report properties without shape
rows = []
for propertyuri in sorted(self.properties, key=lambda k: self.properties[k]['name'].lower()):
if propertyuri not in reportedproperties:
rows.append( [ f"{quote(self.properties[propertyuri]['name'])}" ] )
if self.properties[propertyuri]['altname'] is not None:
rows[-1].append( f"{self.properties[propertyuri]['altname']}" )
else:
rows[-1].append("")
rows[-1].append( f"{rdfxml.uri_to_default_prefixed_tag(propertyuri)}" )
# addtoreport( f"{INDENT}{propertyuri}", end="" )
if self.properties[propertyuri].get('value_type'):
rows[-1].append( f"{self.properties[propertyuri]['value_type']}" )
else:
rows[-1].append( "" )
newrowlen = len(rows[-1])-3
# add enums as additional rows
for enum_uri in sorted(self.properties[propertyuri]['enums'],key=lambda k:self.enums[k]['name'].lower()):
eid = self.enums[enum_uri].get('id') or enum_uri
rows.append( [""]*newrowlen+[f"{quote(self.enums[enum_uri]['name'])}",eid,enum_uri ] )
logger.info( f"appended for enum {rows[-1]}" )
if len(rows)>0:
addtoreport( "<h2>Properties with no shape</h2>\n" )
report += utils.print_in_html( rows,['Shape','Property Name','Property label','URI'] )
return report
# normalise results to either a URI or if a tag expand it, or the name
def normalise_uri( self, uri, exception_if_name=False ):
if uri is None:
result = None
elif uri.startswith( 'http://') or uri.startswith( 'https://'):
result = uri
elif ':' in uri:
result = rdfxml.tag_to_uri( uri )
logger.info( f"tag_to_uri {uri=} {result=}" )
else:
raise Exception( f"Expecting a uri but this doesn't look like a URI {uri}" )
return result
def is_known_shape_uri(self,shape_uri ):
logger.info( f"is_known_shape_uri {shape_uri=}" )
shape_uri = self.normalise_uri( shape_uri )
result = self.shapes.get(shape_uri) is not None
logger.info( f"is_known_shape_uri {shape_uri=} returning {result=}" )
return result
def register_shape( self, shape_name, shape_uri ):
logger.info( f"register_shape {shape_name=} {shape_uri=}" )
shape_uri = self.normalise_uri( shape_uri)
if shape_uri in self.shapes:
raise Exception( f"Shape {shape_uri} already defined!" )
# add the URI as the main registration for the shape
self.shapes[shape_uri] = {'name':shape_name,'shape':shape_uri,'properties':[]}
self.loaded = True
def get_shape_uri( self, shape_name ):
logger.info( f"get_shape_uri {shape_name=}" )
shapes = [k for k,v in self.shapes.items() if v['name']==shape_name ]
if len(shapes)==1:
result = shapes[0]
else:
result = None
return result
def get_shape_name( self, shape_uri ):
shape_uri = self.normalise_uri( shape_uri)
result = self.shapes.get(shape_uri)
return result
def is_known_property_uri( self, property_uri, *, shape_uri=None, raiseifnotfound=True ):
logger.info( f"is_known_property_uri {property_uri=} {shape_uri=}" )
property_uri = self.normalise_uri( property_uri )
shape_uri = self.normalise_uri( shape_uri )
if property_uri in self.properties:
if self.properties[property_uri]['shape']==shape_uri:
result = True
else:
if raiseifnotfound:
raise Exception( f"Property {property_uri} not registered with shape {shape_uri}" )
result = False
else:
result = False
logger.info( f"is_known_property_uri {property_uri=} {shape_uri=} returning {result=}" )
return result
def register_property( self, property_name, property_uri, *, property_value_type=None, shape_uri=None, altname = None, do_not_overwrite=True, property_definition_uri=None ):
logger.info( f"register_property {property_name=} {property_uri=} {shape_uri=}" )
property_uri = self.normalise_uri( property_uri )
shape_uri = self.normalise_uri( shape_uri )
if not do_not_overwrite or property_uri not in self.properties:
self.properties[property_uri] = {'name': property_name, 'shape': shape_uri, 'enums': [], 'value_type': property_value_type, 'altname':altname}
if altname and property_definition_uri and ( not do_not_overwrite or property_definition_uri not in self.properties):
self.properties[property_definition_uri] = {'name': altname, 'shape': shape_uri, 'enums': [], 'value_type': property_value_type, 'altname':None}
self.properties[rdfxml.uri_to_default_prefixed_tag(property_definition_uri)] = {'name': altname, 'shape': shape_uri, 'enums': [], 'value_type': property_value_type, 'altname':None}
if shape_uri is not None:
self.shapes[shape_uri]['properties'].append(property_uri)
self.loaded = True
def get_property_uri( self, property_name, *, shape_uri=None ):
logger.info( f"get_property_uri {property_name=} {shape_uri=}" )
shape_uri = self.normalise_uri( shape_uri )
properties = [k for k,v in self.properties.items() if v['name']==property_name and v['shape']==shape_uri]
if len(properties)==1:
result = properties[0]
else:
# try using altname
altproperties = [k for k,v in self.properties.items() if v['altname']==property_name and v['shape']==shape_uri]
if len(altproperties)==1:
result = altproperties[0]
logger.info( f"Property {property_name} found using altname" )
else:
if len(altproperties)>1:
altnames = [self.properties[k]['altname'] for k in properties]
raise Exception( f"Property {property_name} is ambiguous - maybe use the altname - {altnames}" )
else:
# try for a property ignoring the shape - as long as all the ones with the name have the same URI after normalising to a uri if tag/prefix present
properties = [k for k,v in self.properties.items() if v['name']==property_name]
if len(properties)==1 or (len(properties)>1 and all([rdfxml.tag_to_uri(k)==rdfxml.tag_to_uri(properties[0]) for k in properties[1:]]) ):
result = properties[0]
else:
result = None
logger.info( f"get_property_uri {property_name=} {shape_uri=} returning {result=}" )
return result
def get_property_name( self, property_uri, shapeuri=None ):
logger.info( f"get_property_name {property_uri=} {shape_uri=}" )
property_uri = self.normalise_uri( property_uri )
result = self.properties.get(property_uri)
return result
def is_known_enum_uri( self, enum_uri ):
enum_uri = self.normalise_uri( enum_uri )
result = self.enums.get(enum_uri)
logger.info( f"is_known_enum_uri {enum_uri=} returning {result=}" )
return result
def register_enum( self, enum_name, enum_uri, property_uri, *, id=None ):
logger.info( f"register_enum {enum_name=} {enum_uri=} {property_uri=} {id=}" )
# add the enum to the property
enum_uri = self.normalise_uri( enum_uri )
property_uri = self.normalise_uri( property_uri )
self.enums[enum_uri] = {'name': enum_name, 'id':id, 'property': property_uri}
self.properties[property_uri]['enums'].append(enum_uri)
self.loaded = True
def get_enum_uri(self, enum_name, property_uri):
property_uri = self.normalise_uri( property_uri )
result = None
for enumuri in self.properties[property_uri]['enums']:
if self.enums[enumuri]['name']==enum_name:
result = enumuri
break
return result
def get_enum_name( self, enum_uri ):
property_uri = self.normalise_uri( property_uri )
return self.enums[enum_uri]['name']
def get_enum_id( self, enum_name, property_uri ):
logger.info( f"get_enum_id {enum_name=} {property_uri=}" )
property_uri = self.normalise_uri( property_uri )
result = None
logger.info( f"{self.properties[property_uri]=}" )
logger.info( f"{self.properties[property_uri]['enums']=}" )
for enum_uri in self.properties[property_uri]['enums']:
if self.enums[enum_uri]['name']==enum_name:
result = self.enums[enum_uri]['id'] or enum_uri
break
logger.info( f"get_enum_id {enum_name=} {property_uri=} {result=}" )
return result
# generic uri/name
def is_known_uri( self, uri ):
logger.debug( f"iku {uri}" )
uri = self.normalise_uri( uri )
result = ( self.shapes.get(uri) or self.properties.get(uri) or self.enums.get(uri) or self.values.get(uri) ) is not None
logger.info( f"is_known_uri {uri=} returning {result=} s={self.shapes.get(uri)} p={self.properties.get(uri)} e={self.enums.get(uri)} v={self.values.get(uri)}" )
return result
def register_name( self, name, uri ):
uri = self.normalise_uri( uri )
self.values[uri]={'name': name }
self.loaded = True
def get_uri_name( self, uri ):
uri = self.normalise_uri( uri )
result = self.shapes.get(uri) or self.properties.get(uri) or self.enums.get(uri) or self.values.get(uri)
if result is not None:
result = result['name']
logger.info( f"get_uri_name {uri=} returning {result=}" )
return result
def get_name_uri( self, name ):
result = self.get_shape_uri(name) or self.get_property_uri(name) or self.get_enum_uri(name) or self.get_value_uri(name)
return result
| 47.49635 | 192 | 0.599201 |
7959eec07eacea13bb66c51db35b2ef7f591b124 | 924 | py | Python | migrations/versions/06600f194fb3_observation_model.py | rajhiren/test | b60572e505a79a1aed18fbffd3924a05d3f18a0c | [
"Apache-2.0"
] | null | null | null | migrations/versions/06600f194fb3_observation_model.py | rajhiren/test | b60572e505a79a1aed18fbffd3924a05d3f18a0c | [
"Apache-2.0"
] | null | null | null | migrations/versions/06600f194fb3_observation_model.py | rajhiren/test | b60572e505a79a1aed18fbffd3924a05d3f18a0c | [
"Apache-2.0"
] | null | null | null | """Observation Model
Revision ID: 06600f194fb3
Revises: bc494540d50e
Create Date: 2020-01-11 16:33:33.237128
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '06600f194fb3'
down_revision = 'bc494540d50e'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('observation',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('survey_id', sa.Integer(), nullable=True),
sa.Column('value', sa.Float(), nullable=True),
sa.Column('frequency', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['survey_id'], ['survey.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('observation')
# ### end Alembic commands ###
| 25.666667 | 65 | 0.678571 |
7959ef26ed4dcd2c446e4738847c18c0d9bae0b0 | 9,367 | py | Python | python2/prac/pracmodules/achieved_by/src/achievedBy.py | danielnyga/prac-dev | 107855cb9ddc294467098334725065b3937af150 | [
"BSD-2-Clause"
] | 3 | 2018-10-04T05:13:02.000Z | 2022-01-18T15:06:05.000Z | python2/prac/pracmodules/achieved_by/src/achievedBy.py | danielnyga/prac-dev | 107855cb9ddc294467098334725065b3937af150 | [
"BSD-2-Clause"
] | 2 | 2017-03-01T07:17:14.000Z | 2019-06-26T14:28:57.000Z | python2/prac/pracmodules/achieved_by/src/achievedBy.py | danielnyga/prac-dev | 107855cb9ddc294467098334725065b3937af150 | [
"BSD-2-Clause"
] | 2 | 2018-12-18T23:01:11.000Z | 2020-12-15T08:57:19.000Z | # PROBABILISTIC ROBOT ACTION CORES
#
# (C) 2012-2015 by Daniel Nyga (nyga@cs.tum.edu)
# (C) 2015 by Sebastian Koralewski (seba@informatik.uni-bremen.de)
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
from dnutils import logs
from prac.core import locations as pracloc
from prac.core.base import PRACModule, PRACDatabase
from prac.core.inference import PRACInferenceStep, FrameNode
from prac.db.ies.models import Frame
from prac.pracutils.utils import prac_heading
from pracmln.mln.base import parse_mln
from pracmln.mln.errors import NoConstraintsError
from pracmln.mln.util import colorize
from pracmln.utils.project import MLNProject
from pracmln.utils.visualization import get_cond_prob_png
logger = logs.getlogger(__name__, logs.DEBUG)
class AchievedBy(PRACModule):
'''
PRACModule used to perform action core refinement. If there exist no
robot-executable plan for a given action core, this module will find an
action by which this action core can be achieved.
'''
def extendDBWithAchievedByEvidence(self, db, querymln, actioncore):
'''
TODO
:param db:
:param querymln:
:return:
'''
# It will be assumed that there is only one true action_core
# predicate per database
acdomain = querymln.domains.get("actioncore")
acdomain.extend(db.domains.get("actioncore"))
acdomain = set(acdomain)
db_ = PRACDatabase(self.prac)
for ac1 in acdomain:
for ac2 in acdomain:
if ac1 == actioncore:
continue
db_["achieved_by({},{})".format(ac1, ac2)] = 0
for atom, truth in sorted(db.evidence.iteritems()):
db_ << (atom, truth)
return db_
# @PRACPIPE
def __call__(self, node, **params):
# ======================================================================
# Initialization
# ======================================================================
logger.debug('inference on {}'.format(self.name))
if self.prac.verbose > 0:
print prac_heading('Refining Actioncores')
dbs = node.outdbs
infstep = PRACInferenceStep(node, self)
# if node.previous_module == 'achieved_by':
# raise ActionKnowledgeError('I don\'t know how to %s' % node.frame.sentence)
# ======================================================================
# Preprocessing
# ======================================================================
for olddb in dbs:
infstep.indbs.append(olddb.copy())
#To handle multiple acs in one task, we have to check if the single
# dbs contain achieved_bys which representing already plans
pngs = {}
actioncore = node.frame.actioncore
mod = self.prac.module('complex_achieved_by')
newnodes = list(mod(node))
n = None
parentframes = [p.frame for p in node.parentspath() if isinstance(p, FrameNode)]
if any(n.frame in parentframes for n in newnodes):
logger.error('aborting reasoning because of infinite loop. (%s)' % node.frame)
node.children = []
else:
for n in newnodes:
yield n
if n is not None: return
if n is None:
# This list is used to avoid an infinite loop during the
# achieved by inference.
# To avoid this infinite loop, the list contains the pracmlns
# which were inferenced during the process.
# Every pracmln should be used only once during the process
# because the evidence for the inference will always remain
# the same.
# So if the pracmln hadnt inferenced a plan in the first time,
# it will never do it.
# Need to remove possible achieved_by predicates from
# previous achieved_by inferences
db_ = PRACDatabase(self.prac)
for atom, truth in sorted(olddb.evidence.iteritems()):
if 'achieved_by' in atom: continue
db_ << (atom,truth)
if params.get('project', None) is None:
logger.debug('Loading Project: {}.pracmln'.format(colorize(actioncore, (None, 'cyan', True), True)))
projectpath = os.path.join(pracloc.pracmodules, self.name, '{}.pracmln'.format(actioncore))
if os.path.exists(projectpath):
project = MLNProject.open(projectpath)
else:
infstep.outdbs.append(olddb)
logger.error(actioncore + ".pracmln does not exist.")
return
else:
logger.debug(colorize('Loading Project from params', (None, 'cyan', True), True))
projectpath = os.path.join(params.get('projectpath', None) or os.path.join(pracloc.pracmodules, self.name), params.get('project').name)
project = params.get('project')
mlntext = project.mlns.get(project.queryconf['mln'], None)
mln = parse_mln(mlntext, searchpaths=[self.module_path],
projectpath=projectpath,
logic=project.queryconf.get('logic', 'FirstOrderLogic'),
grammar=project.queryconf.get('grammar', 'PRACGrammar'))
known_concepts = mln.domains.get('concept', [])
wnmod = self.prac.module('wn_senses')
#Merge domains of db and given mln to avoid errors due to role inference and the resulting missing fuzzy perdicates
known_concepts = list(set(known_concepts).union(set(db_.domains.get('concept', []))))
db = wnmod.get_senses_and_similarities(db_, known_concepts)
unified_db = db_.union(db)
dbnew = wnmod.add_sims(unified_db, unified_db)
# Inference achieved_by predicate
db_ = self.extendDBWithAchievedByEvidence(dbnew, mln, actioncore)
# ==============================================================
# Inference
# ==============================================================
# db_.write()
try:
infer = self.mlnquery(config=project.queryconf,
verbose=self.prac.verbose > 2,
db=db_, mln=mln)
except NoConstraintsError:
logger.error('achieved_by inference failed due to NoConstraintsError: %s' % node.frame)
return
result_db = infer.resultdb
if self.prac.verbose == 2:
print
print prac_heading('INFERENCE RESULTS')
infer.write()
# ==============================================================
# Postprocessing
# ==============================================================
# unified_db = result_db.union(kb.query_mln, db_)
# only add inferred achieved_by atoms, leave out
# 0-evidence atoms
for qa in result_db.query('achieved_by(?ac1,?ac2)'):
if qa['?ac2'] == 'Complex': continue
unified_db << 'achieved_by({},{})'.format(qa['?ac1'], qa['?ac2'])
pngs[qa['?ac2']] = get_cond_prob_png(project.queryconf.get('queries', ''), dbs, filename=self.name)
newframe = Frame(self.prac, node.frame.sidx, '', words=[], syntax=[], actioncore=qa['?ac2'], actionroles={})
# out('->', newframe)
infstep.outdbs.append(unified_db)
yield FrameNode(node.pracinfer, newframe, node, pred=None, indbs=[unified_db], prevmod=self.name)
return
infstep.outdbs.append(unified_db)
# raise ActionKnowledgeError('I don\'t know how to %s' % node.frame.sentence) | 48.283505 | 155 | 0.551724 |
7959ef43530445df8f1a8cd21a3d76e6f28be9f6 | 7,049 | py | Python | d_parser/d_spider_30ssd.py | Holovin/D_GrabDemo | 6adb03fb42ae03e7896eb2eacb342cf9660feb92 | [
"MIT"
] | null | null | null | d_parser/d_spider_30ssd.py | Holovin/D_GrabDemo | 6adb03fb42ae03e7896eb2eacb342cf9660feb92 | [
"MIT"
] | 2 | 2018-03-28T19:47:46.000Z | 2021-12-13T20:56:31.000Z | d_parser/d_spider_30ssd.py | Holovin/D_GrabDemo | 6adb03fb42ae03e7896eb2eacb342cf9660feb92 | [
"MIT"
] | null | null | null | from d_parser.d_spider_common import DSpiderCommon
from d_parser.helpers.re_set import Ree
from d_parser.helpers.stat_counter import StatCounter as SC
from helpers.url_generator import UrlGenerator
VERSION = 29
# Warn: Don't remove task argument even if not use it (it's break grab and spider crashed)
# Warn: noinspection PyUnusedLocal
class DSpider(DSpiderCommon):
def __init__(self, thread_number, try_limit=0):
super().__init__(thread_number, try_limit)
# fetch categories
def task_initial(self, grab, task):
try:
if self.check_body_errors(grab, task):
yield self.check_errors(task)
return
# catalog
catalog = grab.doc.select('//div[@class="main__sidebar"]/ol//a[not(@href="#")]')
for link in catalog:
link = UrlGenerator.get_page_params(self.domain, link.attr('href'), {})
yield self.do_task('parse_cat_page', link, DSpider.get_next_task_priority(task))
except Exception as e:
self.process_error(grab, task, e)
finally:
self.process_finally(task)
# parse page categories
def task_parse_cat_page(self, grab, task):
try:
if self.check_body_errors(grab, task):
yield self.check_errors(task)
return
catalog = grab.doc.select('//div[@class="cats-wrap"]//a')
for link in catalog:
link = UrlGenerator.get_page_params(self.domain, link.attr('href'), {})
yield self.do_task('parse_page_items', link, DSpider.get_next_task_priority(task))
except Exception as e:
self.process_error(grab, task, e)
finally:
self.process_finally(task)
# parse page items in category
def task_parse_page_items(self, grab, task):
try:
if self.check_body_errors(grab, task):
yield self.check_errors(task)
return
items = grab.doc.select('//div[@class="items-wrap"]//div[@class="item__title"]//a')
for link in items:
link = UrlGenerator.get_page_params(self.domain, link.attr('href'), {})
yield self.do_task('parse_item', link, DSpider.get_next_task_priority(task))
except Exception as e:
self.process_error(grab, task, e)
finally:
self.process_finally(task)
# parse single item
def task_parse_item(self, grab, task):
try:
if self.check_body_errors(grab, task):
yield self.check_errors(task)
return
# common block with info
product_info = grab.doc.select('//div[@class="main__content"]')
# parse fields
# A = name
product_name = product_info.select('.//h1').text()
# B = count (quantity)
# C = status (delivery)
product_count_string = product_info.select('(.//div[@class="data-store"])[last()]')
product_count = None
store_count = {
'data-msk': 0,
'data-nsb': 0,
'data-krd': 0,
}
# for each city
for city in store_count:
temp = product_count_string.attr(city, '').replace(' ', '')
if temp != '' or not Ree.float.match(temp):
if product_count is None:
product_count = 0
# convert
temp = float(temp)
# check valid
if temp >= 0:
# replace
if temp == 0:
store_count[city] = -1
else:
store_count[city] = temp
else:
self.log_warn(SC.MSG_POSSIBLE_WARN, f'Unknown count status (>=0) {product_count_string.html()} skip...', task)
continue
# D = unit (measure)
product_unit = product_info.select('.//input[contains(@class, "product_count")]').attr('placeholder', 'ед.')
# E = price
product_price = product_info.select('.//strong[@id="item_price1"]').attr('content', '')
if not product_price or not Ree.float.match(product_price):
self.log_warn(SC.MSG_UNKNOWN_PRICE, f'Unknown price status {product_price}, skip...', task)
return
# F = vendor code (sku)
product_vendor_code = product_info.select('.//div[@class="item-number"]/strong').text('')
# G = vendor (manufacture) [const]
product_vendor = ''
# H = photo url
product_photo_url_raw = product_info.select('.//div[@class="fotorama"]/a[1]').attr('href', '')
if product_photo_url_raw:
product_photo_url = UrlGenerator.get_page_params(self.domain, product_photo_url_raw, {})
else:
product_photo_url = ''
# ID
product_id = product_info.select('.//a[@id="btn_buy"]').attr('data-id', '')
# I = description (properties)
product_description = {
'Описание': product_info.select('.//div[@itemprop="description"]').text('')
}
# I - first table
table_characteristics = product_info.select('.//div[@data-id="#characteristics"]')
for row in table_characteristics.select('.//tr'):
key = row.select('./td[1]').text('')
value = row.select('./td[2]').text('')
# default save
if key:
product_description[key] = value
# I - second table
table_log = product_info.select('.//div[contains(@class, "logistick")]')
for row in table_log.select('.//tr'):
key = row.select('./td[1]').text('')
value = row.select('./td[2]').text('')
# default save
if key:
product_description[key] = value
# save
for store_name, value in store_count.items():
# skip if still default value
if value == 0:
continue
self.result.add({
'name': product_name,
'quantity': str(value),
'delivery': '0' if value != -1 else '-1',
'measure': product_unit,
'price': product_price,
'sku': product_vendor_code,
'manufacture': product_vendor,
'photo': product_photo_url,
'id': product_id,
'properties': product_description,
'place': store_name
})
except Exception as e:
self.process_error(grab, task, e)
finally:
self.process_finally(task)
| 35.245 | 134 | 0.520925 |
7959ef60ae23deff7672bbd5b3058693cbcdd161 | 2,366 | py | Python | docs/update_req_for_rtd.py | borellim/aiida_core | eebef392c81e8b130834a92e1d7abf5e2e30b3ce | [
"BSD-2-Clause"
] | 1 | 2019-03-15T10:37:53.000Z | 2019-03-15T10:37:53.000Z | docs/update_req_for_rtd.py | odarbelaeze/aiida_core | 934b4ccdc73a993f2a6656caf516500470e3da08 | [
"BSD-2-Clause"
] | null | null | null | docs/update_req_for_rtd.py | odarbelaeze/aiida_core | 934b4ccdc73a993f2a6656caf516500470e3da08 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""
Whenever the requirements in ../setup.json are updated, run
also this script to update the requirements for Read the Docs.
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import json
import click
@click.command()
@click.option('--pre-commit', is_flag=True)
def update_req_for_rtd(pre_commit):
"""Update the separate requirements file for Read the Docs"""
docs_dir = os.path.abspath(os.path.dirname(__file__))
root_dir = os.path.join(docs_dir, os.pardir)
with open(os.path.join(root_dir, 'setup.json'), 'r') as info:
setup_json = json.load(info)
extras = setup_json['extras_require']
reqs = set(extras['testing'] + extras['docs'] + extras['rest'] + extras['atomic_tools'] +
# To avoid that it requires also the postgres libraries
[p for p in setup_json['install_requires'] if not p.startswith('psycopg2')])
reqs_str = "\n".join(sorted(reqs))
basename = 'requirements_for_rtd.txt'
# pylint: disable=bad-continuation
with open(os.path.join(docs_dir, basename), 'w') as reqs_file:
reqs_file.write(reqs_str)
click.echo("File '{}' written.".format(basename))
if pre_commit:
msg = 'Some requirements for Read the Docs have changed, {}'
local_help = 'please add the changes and commit again'
travis_help = 'please run aiida/docs/update_req_for_rtd.py locally and commit the changes it makes'
help_msg = msg.format(travis_help if os.environ.get('TRAVIS') else local_help)
click.echo(help_msg, err=True)
if __name__ == '__main__':
update_req_for_rtd() # pylint: disable=no-value-for-parameter
| 40.793103 | 107 | 0.610313 |
7959f0b4ce229f6608ed4fd354830364822e8bf8 | 1,727 | py | Python | gcloud/core/migrations/0004_environmentvariables.py | dtlisir/bk_sops | c39a23681e1fb2408ae93cebea20eb2a7dcec8ea | [
"Apache-2.0"
] | 1 | 2019-05-21T06:44:18.000Z | 2019-05-21T06:44:18.000Z | gcloud/core/migrations/0004_environmentvariables.py | dtlisir/bk_sops | c39a23681e1fb2408ae93cebea20eb2a7dcec8ea | [
"Apache-2.0"
] | 9 | 2020-06-05T21:18:43.000Z | 2021-06-10T21:34:38.000Z | gcloud/core/migrations/0004_environmentvariables.py | dtlisir/bk_sops | c39a23681e1fb2408ae93cebea20eb2a7dcec8ea | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
""" # noqa
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0003_business_executor'),
]
operations = [
migrations.CreateModel(
name='EnvironmentVariables',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('key', models.CharField(unique=True, max_length=255, verbose_name='\u53d8\u91cfKEY')),
('name', models.CharField(max_length=255, verbose_name='\u53d8\u91cf\u63cf\u8ff0', blank=True)),
('value', models.CharField(max_length=1000, verbose_name='\u53d8\u91cf\u503c', blank=True)),
],
options={
'verbose_name': '\u73af\u5883\u53d8\u91cf EnvironmentVariables',
'verbose_name_plural': '\u73af\u5883\u53d8\u91cf EnvironmentVariables',
},
),
]
| 49.342857 | 305 | 0.68674 |
7959f166e296b20d0278c4aab0e7c6817d62d5ae | 2,559 | py | Python | azure-mgmt-network/azure/mgmt/network/v2015_06_15/models/route_table_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | azure-mgmt-network/azure/mgmt/network/v2015_06_15/models/route_table_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | azure-mgmt-network/azure/mgmt/network/v2015_06_15/models/route_table_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2018-08-28T14:36:47.000Z | 2018-08-28T14:36:47.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource_py3 import Resource
class RouteTable(Resource):
"""Route table resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource Identifier.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param routes: Collection of routes contained within a route table.
:type routes: list[~azure.mgmt.network.v2015_06_15.models.Route]
:param subnets: A collection of references to subnets.
:type subnets: list[~azure.mgmt.network.v2015_06_15.models.Subnet]
:param provisioning_state: The provisioning state of the resource.
Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param etag: Gets a unique read-only string that changes whenever the
resource is updated.
:type etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'routes': {'key': 'properties.routes', 'type': '[Route]'},
'subnets': {'key': 'properties.subnets', 'type': '[Subnet]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, id: str=None, location: str=None, tags=None, routes=None, subnets=None, provisioning_state: str=None, etag: str=None, **kwargs) -> None:
super(RouteTable, self).__init__(id=id, location=location, tags=tags, **kwargs)
self.routes = routes
self.subnets = subnets
self.provisioning_state = provisioning_state
self.etag = etag
| 38.772727 | 162 | 0.602188 |
7959f16b6e69165322389dda166417064963412b | 2,633 | py | Python | ucsmsdk/mometa/ape/ApeMc.py | Kego/ucsmsdk | 244f283a5c295cf746110bb96686d079b19927ce | [
"Apache-2.0"
] | 78 | 2015-11-30T14:10:05.000Z | 2022-02-13T00:29:08.000Z | ucsmsdk/mometa/ape/ApeMc.py | Kego/ucsmsdk | 244f283a5c295cf746110bb96686d079b19927ce | [
"Apache-2.0"
] | 113 | 2015-11-20T09:42:46.000Z | 2022-03-16T16:53:29.000Z | ucsmsdk/mometa/ape/ApeMc.py | Kego/ucsmsdk | 244f283a5c295cf746110bb96686d079b19927ce | [
"Apache-2.0"
] | 86 | 2015-12-12T08:22:18.000Z | 2022-01-23T03:56:34.000Z | """This module contains the general information for ApeMc ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class ApeMcConsts:
UPDATE_TYPE_DELTA = "delta"
UPDATE_TYPE_PERIODIC = "periodic"
UPDATE_TYPE_SYNC = "sync"
class ApeMc(ManagedObject):
"""This is ApeMc class."""
consts = ApeMcConsts()
naming_props = set(['ip'])
mo_meta = MoMeta("ApeMc", "apeMc", "mc-[ip]", VersionMeta.Version101e, "InputOutput", 0xff, [], ["read-only"], ['apeManager'], ['apeMcTable'], [None])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"ip": MoPropertyMeta("ip", "ip", "string", VersionMeta.Version101e, MoPropertyMeta.NAMING, 0x8, 0, 256, r"""((([0-9]){1,3}\.){3}[0-9]{1,3})""", [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x20, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"type": MoPropertyMeta("type", "type", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x40, 0, 510, None, [], []),
"update_type": MoPropertyMeta("update_type", "updateType", "string", VersionMeta.Version211a, MoPropertyMeta.READ_WRITE, 0x80, None, None, None, ["delta", "periodic", "sync"], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"ip": "ip",
"rn": "rn",
"sacl": "sacl",
"status": "status",
"type": "type",
"updateType": "update_type",
}
def __init__(self, parent_mo_or_dn, ip, **kwargs):
self._dirty_mask = 0
self.ip = ip
self.child_action = None
self.sacl = None
self.status = None
self.type = None
self.update_type = None
ManagedObject.__init__(self, "ApeMc", parent_mo_or_dn, **kwargs)
| 48.759259 | 247 | 0.628181 |
7959f239a3e21b67545f0cf9fd4ce2049af02049 | 1,471 | py | Python | oaei2nt.py | insight-centre/naisc | 01e13e35b6aaace98606e8ac56d7f9f21ee51ff1 | [
"Apache-2.0"
] | 6 | 2019-09-11T12:48:23.000Z | 2022-03-22T14:04:34.000Z | oaei2nt.py | insight-centre/naisc | 01e13e35b6aaace98606e8ac56d7f9f21ee51ff1 | [
"Apache-2.0"
] | 9 | 2020-10-22T15:35:07.000Z | 2021-09-01T08:01:03.000Z | oaei2nt.py | insight-centre/naisc | 01e13e35b6aaace98606e8ac56d7f9f21ee51ff1 | [
"Apache-2.0"
] | 1 | 2021-10-31T14:38:58.000Z | 2021-10-31T14:38:58.000Z | ##############################################################################
### Converts an OAEI file into an RDF alignment file as required by Naisc
import xml.etree.ElementTree as ET
import sys
def main():
if len(sys.argv) < 1:
print("Usage:\n\t python oaei2nt.py oaei.rdf > datasets/name/align.rdf")
sys.exit(-1)
data = ET.parse(open(sys.argv[1]))
for map in data.find("{http://knowledgeweb.semanticweb.org/heterogeneity/alignment}Alignment").findall("{http://knowledgeweb.semanticweb.org/heterogeneity/alignment}map"):
cell = map.find("{http://knowledgeweb.semanticweb.org/heterogeneity/alignment}Cell")
e1 = cell.find("{http://knowledgeweb.semanticweb.org/heterogeneity/alignment}entity1").attrib["{http://www.w3.org/1999/02/22-rdf-syntax-ns#}resource"]
e2 = cell.find("{http://knowledgeweb.semanticweb.org/heterogeneity/alignment}entity2").attrib["{http://www.w3.org/1999/02/22-rdf-syntax-ns#}resource"]
probability = cell.find("{http://knowledgeweb.semanticweb.org/heterogeneity/alignment}measure").text
if cell.find("{http://knowledgeweb.semanticweb.org/heterogeneity/alignment}property").text == "=":
print("<%s> <http://www.w3.org/2004/02/skos/core#exactMatch> <%s> . # %s" % (e1, e2, probability))
else:
print("Unsupported " + cell.find("{http://knowledgeweb.semanticweb.org/heterogeneity/alignment}property").text)
if __name__ == "__main__":
main()
| 61.291667 | 175 | 0.654657 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.